本文整理汇总了Python中multiprocessing.dummy.Pool.starmap方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.starmap方法的具体用法?Python Pool.starmap怎么用?Python Pool.starmap使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.dummy.Pool
的用法示例。
在下文中一共展示了Pool.starmap方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: start
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import starmap [as 别名]
def start(self):
pool = ThreadPool(self.PROFILES_LOADER_POOL_SIZE)
pool.starmap(self._do_search,
self._get_search_iterator())
pool.close()
pool.join()
示例2: main
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import starmap [as 别名]
def main():
input_file = './input.txt'
output_file = './output.txt'
pool = Pool(1)
lock = Lock()
pool.starmap(process_chunk, [ ( input_file, output_file, chunk_start, chunk_end, lock ) for chunk_start, chunk_end in chunkify(input_file, 10) ])
pool.close()
pool.join()
示例3: filterMsgS
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import starmap [as 别名]
def filterMsgS(args,msgList):
"""
filter out all dedicate msg entries into different dedicate files
:param msgList: msg block defitions load from json file
:return:
"""
pool = ThreadPool(cpu_count())
# pool.starmap(filterItem, zip(itertools.repeat(args), msgList))
pool.starmap(startPoint, zip(itertools.repeat(args), msgList))
pool.close()
pool.join()
示例4: _pool_query
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import starmap [as 别名]
def _pool_query(self, query, func, attr, callback):
"""Uses :code:`query` to perform :code:`func` with kwargs :code:`attr`
in parallel against all configured geocoders. Performs :code:`callback`
function on the result list of addresses or locations.
Args:
query (str): The query component of a reverse or forward geocode.
func (function): Function to use to obtain an answer.
attr (dict): Keyword arguments to pass to function for each
geocoder.
callback (func): Function to run over iterable result.
Returns:
Output of `callback`.
"""
pool = ThreadPool()
results = pool.starmap(func,
zip([g.geocoder for g in self.geocoders],
repeat(query),
[getattr(g, attr) for g in self.geocoders]))
pool.close()
pool.join()
locations = []
for location in results:
if isinstance(location, list):
locations.extend(location)
else:
locations.append(location)
# locations = [item for sublist in results for item in sublist]
return callback(locations)
示例5: scrawl
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import starmap [as 别名]
async def scrawl(self, threads=5):
logger.log('Scrawling Trackemon..', 'green')
await self.client.wait_until_ready()
# get arrays channels id need to post
shout_out_channels = []
for server in self.client.servers:
for channel in server.channels:
if channel.name in self.config.get('scrawl_channels', []):
shout_out_channels.append(discord.Object(channel.id))
if len(shout_out_channels) == 0:
raise Exception("No channel to shout out!")
while not self.client.is_closed:
logger.log('Scrawling Trackemon..', 'green')
self._retrieve_session_id()
# use multiprocessing
if 'pokemons' in self.config.get('scrawl_trackemon'):
pokemon_names = self.config.get('scrawl_trackemon')['pokemons']
pool = ThreadPool(threads)
messages = pool.starmap(self.scrawl_trackemon, zip(
pokemon_names, itertools.repeat(self.session_id)))
for message in messages:
if len(message):
for channel in shout_out_channels:
await self.client.send_message(channel, message)
# increase delay to finish task
await asyncio.sleep(self.config.get('delay_scrawl', 300))
示例6: main
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import starmap [as 别名]
def main(genre):
pool_of_threads = Pool(multiprocessing.cpu_count() - 1) # lets hope you have more than 1 cpu core...
numbers = list(range(1,pages))
old_results = pool_of_threads.starmap( get_painting_list, zip( numbers, itertools.repeat(genre)) )
pool_of_threads.close()
pool_of_threads.join()
results = []
for item in old_results:
if item:
for x in item:
results.append(x)
pool_of_threads = Pool(multiprocessing.cpu_count() - 1)
pool_of_threads.starmap(downloader, zip(enumerate(results), itertools.repeat(genre) ) )
pool_of_threads.close
pool_of_threads.close()
示例7: topN_translates
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import starmap [as 别名]
def topN_translates(self, bot, update, number):
chat_id = update.message.chat_id
bot.sendChatAction(chat_id, ChatAction.TYPING)
languages = defaultdict(int)
pool = ThreadPool(4)
args = [('https://launchpad.net/{}/+translations'.format(project),
languages) for project in self.projects]
pool.starmap(self.check_project_untranslated, args)
pool.close()
pool.join()
text = ['Language - Number of translated strings']
top = sorted(languages.items(), key=lambda x: (x[1], x[0]),
reverse=True)[:number]
for index, (lang, translated) in enumerate(top):
text.append('{0}) {1} - {2}'.format(index + 1, lang, translated))
bot.sendMessage(chat_id, text='\n'.join(text))
示例8: pool_filter
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import starmap [as 别名]
def pool_filter(
candidates: List[Tuple[str, str]], compare_images: Callable[[str, str, float, float], bool],
aspect_fuzziness: float, rms_error: float, chunk_size: float
) -> List[Tuple[str, str]]:
pool = Pool(None)
return [
c
for c, keep in zip(
candidates,
pool.starmap(
partial(compare_images, aspect_fuzziness=aspect_fuzziness, rms_error=rms_error),
candidates, chunksize=chunk_size
)
)
if keep
]
示例9: main
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import starmap [as 别名]
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'exepath', help='path to the trace dump executable', type=file_type)
parser.add_argument('files', nargs='*')
parser.add_argument('-j', type=parallel_arg_type,
help='number of parallel processes', default=1)
args = parser.parse_args(argv[1:])
exepath = args.exepath
files = []
for wildcard in args.files:
files.extend(glob.glob(wildcard))
if len(files) == 0:
print("No input files found!")
return 1
# Test the executable first.
valid = False
try:
p = subprocess.Popen([
exepath,
'--log_file=stdout',
], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
if p.returncode == 5:
# code 5 = invalid trace file / trace file unspecified
valid = True
except OSError:
pass
if not valid:
print("Trace executable invalid!")
return 1
print("Processing...")
pool = Pool(args.j)
start_time = time.perf_counter()
results = pool.starmap(run_dumper, zip(itertools.repeat(exepath), files))
pool.close()
pool.join()
elapsed_time = time.perf_counter() - start_time
print("entire runtime took %.3f seconds" % elapsed_time)
return 0
示例10: backup_all_servers
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import starmap [as 别名]
def backup_all_servers():
# Get a list of up and down servers
hosts_up, hosts_down = check_alive_hosts(ssh_test_cmd)
# Prepare the list for starmap two arguments.
hosts_up_two_args = []
# Add the argument to hosts list and append to separate list.
# Output should be something like [('a', 1), ('b', 1), ('c', 1)]
for i in zip(hosts_up, repeat(rsync_stdout)):
hosts_up_two_args.append(i)
# Make the Pool of workers
pool = ThreadPool(number_of_threads)
results = pool.starmap(rsync_start, hosts_up_two_args)
pool.close()
pool.join()
示例11: backup_one_server
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import starmap [as 别名]
def backup_one_server(server_ip):
# This function need the parameter as a list
# Prepare the list for starmap two arguments.
hosts_up = []
hosts_up_two_args = []
hosts_up.append(server_ip)
# Add the argument to hosts list and append to separate list.
# Output should be something like [('a', 1), ('b', 1), ('c', 1)]
# Always run backup one server in background ( append 0 )
for i in zip(hosts_up, repeat(0)):
hosts_up_two_args.append(i)
# Make the Pool of workers
pool = ThreadPool(number_of_threads)
results = pool.starmap(rsync_start, hosts_up_two_args)
pool.close()
pool.join()
示例12: open
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import starmap [as 别名]
query += ' -query ' + fasta_file
query += ' -out ' + blast_output_file + ' -outfmt 6'
os.system(query)
with open(blast_output_file, 'r', encoding='utf-8') as f2:
while True:
line = f2.readline().split('\t')
if float(line[3]) > 0.9 * len(seq):
ofile.write(name + '\t' + line[3] + '\t' + line[4])
ofile.write('\t' + line[5] + '\t' + line[6] + '\t' + line[7])
ofile.write('\t' + line[8] + '\t' + line[9] + '\t' + line[10])
else:
break
os.remove(fasta_file)
os.remove(blast_output_file)
return(out)
else:
out = -1
return(out)
with open(completeGenome, 'r') as f:
t = 0
o = 'output.txt'
while True:
t = oneSequenceBlast(f, t, o)
if t == -1:
break
pool = Pool()
results = pool.starmap(oneSequenceBlast, mailsList)
示例13: processSpider
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import starmap [as 别名]
return paramList
def processSpider(url, filePath, threadName):
sTime = time.time()
session = requests.session()
session.headers['User-Agent'] = random.choice(UserAgent)
response = session.get(url)
# todo add message if response code != 200
print(response.status_code)
if response.status_code == 200:
f = open(filePath, 'w+', encoding='utf-8')
f.write(response.text)
print(threadName + ": " + str(time.time() - sTime))
f.close()
def getSession():
session = requests.session()
session.headers['User-Agent'] = random.choice(UserAgent)
return session
# main()
if __name__ == '__main__':
pool = ThreadPool(20)
paramList = getParam()
print(paramList)
pool.starmap(processSpider, paramList)
示例14: spider_kaiyuan
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import starmap [as 别名]
def spider_kaiyuan(start,end,password):
file_name = '账号{0}-{1}.txt'.format(start,end)
with open(file_name, 'a+') as f:
for account in range(int(start), int(end)):
if (auth_account(account=account,password=password)):
f.write('账号:{} 密码{}'.format(account,password) )
f.flush()
def generate_func_args(start,end,password):
return (start,end,password)
if __name__ == '__main__':
pool = ThreadPool(5)
args = []
account_start = 888800000000
password = input("输入测试密码 测试区间在{} -{}\n".format(account_start+5*10000,account_start+9*10000+9999))
for i in range (5,9):
args_map = generate_func_args(account_start+i*10000,account_start+i*10000+9999,password)
args.append(args_map)
pool.starmap(spider_kaiyuan, args)
示例15: str
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import starmap [as 别名]
eTime = time.time()
logging.info(fileName + " takes time:" + str(eTime - sTime) + ", url:" + url)
def getDownloadParam(folder, suffix, list):
paramList = []
# for i in range(1, 1192):
for i in list:
index = str(i)
url = mainUrl + index + endUrl
fileName = 'file-' + index + '.' + suffix
param = (url, folder, fileName)
paramList.append(param)
return paramList
if __name__ == '__main__':
logging.basicConfig(filename="F:\Workspace\_data\log\\resumeSpider\logging-11-27-2.log", level=logging.INFO)
folder = "H:\\resume4"
startTime = time.time()
pool = ThreadPool(20)
errorRange = getNumbersFromErrLog()
# normalRange = range(1, 1200)
paramList = getDownloadParam(folder, "zip", errorRange)
pool.starmap(downloadResume, paramList)
endTime = time.time()
logging.info('take time:'+ str(endTime-startTime))