本文整理汇总了Python中multiprocessing.dummy.Pool.map方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.map方法的具体用法?Python Pool.map怎么用?Python Pool.map使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.dummy.Pool
的用法示例。
在下文中一共展示了Pool.map方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_threading
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import map [as 别名]
def test_threading(self):
pool = ThreadPool(4)
results = pool.map(self.parser.parse, Dictionary().version.terms)
self.assertSetEqual({str(t) for t in results}, {'[{0}]'.format(str(t)) for t in Dictionary().version.terms})
results = pool.map(script, Dictionary().version.terms)
self.assertSetEqual({str(t) for t in results}, set(Dictionary().version.terms))
示例2: load_rowdata_to_mongo_zh
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import map [as 别名]
def load_rowdata_to_mongo_zh(is_incremental):
print("start loading row data(zh) from JSON file to MongoDB...")
all_start = timeit.default_timer()
static = Static()
bydim_dir = static.output_folder + static.dataset_bydim_folder
client = MongoClient(static.mongo_url, static.mongo_port)
db = client[static.database_name]
dataset_col = db[static.dataset_col_name]
if not is_incremental:
dataset_col.drop()
file_path_array = []
for idx, file in enumerate(os.listdir(bydim_dir)):
file_path = os.path.join(bydim_dir, file)
if os.path.isfile(file_path):
file_path_array.append(file_path)
print(str(len(file_path_array)) + " files are loaded")
counter = []
mapfunc = partial(insert_by_dim, counter=counter, dataset_col=dataset_col, all_start=all_start)
pool = ThreadPool(12)
pool.map(mapfunc, file_path_array)
pool.close()
pool.join()
print("All the threads are completed. Total number is " + str(len(counter)) + "\n")
print("total time cost: " + str(round(timeit.default_timer() - all_start)) + 's')
示例3: get_offline_user_data
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import map [as 别名]
def get_offline_user_data():
if DEBUG_MODE:
print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'get_offline_user_data')
if r_session.exists('api_error_info'):
return
if datetime.now().minute < 50:
return
offline_users = []
for b_user in r_session.mget(*['user:%s' % name.decode('utf-8') for name in r_session.sdiff('users', *r_session.smembers('global:online.users'))]):
user_info = json.loads(b_user.decode('utf-8'))
username = user_info.get('username')
if not user_info.get('active'): continue
every_hour_key = 'user:%s:cron_queued' % username
if r_session.exists(every_hour_key): continue
offline_users.append(username)
pool = ThreadPool(processes=5)
pool.map(get_data, offline_users)
pool.close()
pool.join()
示例4: simTrans
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import map [as 别名]
def simTrans(hosts, prm):
fname = str(prm.n) + 'nodes.' + str(prm.data_size) + 'MB.' + str(prm.pipes) + 'pipes.out'
for h in hosts:
full_name = "results/%04d/%s"%(int(h.name.split('h')[1]), fname)
os.system("rm %s" % full_name)
status[h.name] = [0 for i in range(prm.pipes)]
ip[h.name] = h.IP()
h.cmdPrint('iperf -s -f M >> %s &'%full_name)
'''for h1 in hosts:
for h2 in hosts:
if h1 == h2:
continue
print "Testing %s and %s after running server" % (h1.name, h2.name)
net.iperf( (h1, h2) )
'''
print neib
status['h1'] = [2 for i in range(prm.pipes)] #start node
print status
k = []
for h in hosts:
k.append((h, prm))
pool = ThreadPool(50)
pool.map(perNodeProc, k)
pool.close()
pool.join()
for h in hosts:
h.cmdPrint('kill %iperf')
示例5: test_multi_threading
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import map [as 别名]
def test_multi_threading():
import time
import random
from multiprocessing.dummy import Pool
def op_a(a, b):
time.sleep(random.random()*.02)
return a+b
def op_b(c, b):
time.sleep(random.random()*.02)
return c+b
def op_c(a, b):
time.sleep(random.random()*.02)
return a*b
pipeline = compose(name="pipeline", merge=True)(
operation(name="op_a", needs=['a', 'b'], provides='c')(op_a),
operation(name="op_b", needs=['c', 'b'], provides='d')(op_b),
operation(name="op_c", needs=['a', 'b'], provides='e')(op_c),
)
def infer(i):
# data = open("616039-bradpitt.jpg").read()
outputs = ["c", "d", "e"]
results = pipeline({"a": 1, "b":2}, outputs)
assert tuple(sorted(results.keys())) == tuple(sorted(outputs)), (outputs, results)
return results
N = 100
for i in range(20, 200):
pool = Pool(i)
pool.map(infer, range(N))
pool.close()
示例6: BurstDz
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import map [as 别名]
def BurstDz(host, path, user, passfile):
hostuser = host.split('.')
hostuser = hostuser[len(hostuser)-2]
hostdir = [hostuser,hostuser+hostuser,'admin'+hostuser,hostuser+'123','manage'+hostuser,hostuser+'123456',hostuser+'admin','123'+hostuser]
opts_list = []
f = open(passfile, 'r')
password = f.read().split()
dic = password+hostdir
pool = ThreadPool(10)
host1 = host+path
for x in range(len(dic)):
mima = dic[x]
opts = {
'host': host1,
'user': user,
'password': mima
}
opts_list.append(opts)
#print hostr
#print result
pool.map(LoginDisCuz, opts_list)
#pool.join()
print 'All PassWord Run Over'
示例7: get_proxy
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import map [as 别名]
def get_proxy(self):
self._parse_proxy()
pool = ThreadPool(8)
pool.map(self._check_proxy, self.proxies)
pool.close()
pool.join()
return self.checked_proxies
示例8: test_ip_speed
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import map [as 别名]
def test_ip_speed(self):
# t0l = time.time()
# pooll = Pool(processes=20)
# for item in self.ip_items:
# pooll.apply(self.ping_one_ip, args=(item.ip,))
# #pooll.map(self.ping_one_ip, self.ip_items)
# pooll.close()
# pooll.join
t0 = time.time()
#多线程
pool = ThreadPool(processes=20)
pool.map(self.ping_one_ip, range(len(self.ip_items)))
pool.close()
pool.join()
t1 = time.time()
#print "Total time running multi: %s seconds" % ( str(t0-t0l))
print "Total time running multi: %s seconds" % ( str(t1-t0))
#单线程
# for index in range(len(self.ip_items)):
# self.ping_one_ip(index)
# t2 = time.time()
# print "Total time running multi: %s seconds" % ( str(t1-t0))
# print "Total time running single: %s seconds" % ( str(t2-t1))
print len(self.ip_items)
self.ip_items = [item for item in self.ip_items if item.speed >=0 and item.speed < 1500.0] # 超时1.5s以内
print len(self.ip_items)
示例9: start_download
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import map [as 别名]
def start_download(page_url):
links = get_pattern_group(image_pattern, get_url_content(page_url))
pool = ThreadPool(3)
pool.map(download, links)
pool.close()
pool.join()
示例10: get_web_dict_multithreading
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import map [as 别名]
def get_web_dict_multithreading(self):
if POOL_NUM > 1 and POOL_NUM < 50:
pool = Pool(POOL_NUM)
logging.info("You are using multiple threads to get info from web:"
" [ %d ]\n" % POOL_NUM)
else:
pool = Pool()
if os.path.isfile(LOCAL_JSON_CACHE_FILE):
with open(LOCAL_JSON_CACHE_FILE, 'rb') as f:
local_web_cache_dict = json.loads(f.read())
species_in_local_json_cache = local_web_cache_dict.keys()
logging.info(
'[ CACHE ] Get cache from local JSON file:\n |- %s' %
'\n |- '.join(species_in_local_json_cache))
else:
species_in_local_json_cache = []
local_web_cache_dict = {}
species_not_in_cache = list(
set(self.non_repeatitive_species_name_list)
.difference(set(species_in_local_json_cache)))
pool.map(self._single_query, species_not_in_cache)
_web_data_cache_dict.update(local_web_cache_dict)
with open(LOCAL_JSON_CACHE_FILE, 'wb') as f:
json.dump(_web_data_cache_dict, f,
indent=4, separators=(',', ': '))
logging.info(
'[ CACHE ] Write all cache to local JSON file:\n |- %s' %
'\n |- '.join(_web_data_cache_dict.keys()))
pool.close()
pool.join()
示例11: main
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import map [as 别名]
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i',help='comma separated input map files from HMP', required=True)
parser.add_argument('-s',help='comma separated list of body sites',required=True)
parser.add_argument('-j',help='threads',type=int,default=1)
args = parser.parse_args()
logname = 'import-%s' % strftime("%Y%m%d%H%M")
logger = make_log(logname)
map_files = args.i.split(',')
locs_list = args.s.split(',')
mapper = {}
map_to_write = []
for filename in map_files:
with open(filename, 'rU') as f:
r = csv.reader(f,delimiter='\t')
header = r.next()
for row in r:
this_mapper = dict(zip(header,row))
rid = this_mapper['RunID']
bs = this_mapper['HMPBodySubsiteHMPBodySite']
sid = this_mapper['SRS_SampleID']
if bs not in locs_list:
continue
map_to_write.append(make_map_to_write_row(this_mapper))
if rid not in mapper:
mapper[rid] = this_mapper
else:
logger.warning('found two entries for %s saved just one fastq file but both %s and %s will be added to map' % (rid,sid,mapper[rid]['SRS_SampleID']))
pool = ThreadPool(args.j)
holder = [[mapper_item, logger] for nom,mapper_item in mapper.iteritems()]
pool.map(dummy,holder)
pool.close()
pool.join()
write_map(map_to_write, logger)
logger.info('done')
示例12: test_multithread
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import map [as 别名]
def test_multithread(self):
logger = HitLogger(300)
def single_thread_process(logger):
time.time = mock.Mock(return_value=0)
logger.log_hit()
time.sleep(1)
time.time = mock.Mock(return_value=10)
logger.get_hits()
logger.log_hit()
logger.get_hits()
logger.log_hit()
time.sleep(1)
time.time = mock.Mock(return_value=11)
logger.get_hits()
time.sleep(1)
time.time = mock.Mock(return_value=100)
logger.log_hit()
time.sleep(1)
time.time = mock.Mock(return_value=200)
logger.log_hit()
logger.get_hits()
time.sleep(1)
time.time = mock.Mock(return_value=300)
logger.log_hit()
logger.get_hits()
time.sleep(1)
time.time = mock.Mock(return_value=310)
logger.get_hits()
pool = Pool(5)
pool.map(single_thread_process, [ logger ] * 5)
input('Press any key to exit...')
示例13: spider
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import map [as 别名]
def spider(self):
pages = self.__total_pages()
pool = ThreadPool(16)
pool.map(self.make_market_data, range(pages))
pool.close()
pool.join()
示例14: test_generate_in_progress_resizer_option_true
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import map [as 别名]
def test_generate_in_progress_resizer_option_true(
redis_cache,
resizetarget_opts,
image1_data,
image1_name,
tmpdir
):
config = Config(
root=str(tmpdir),
url='/',
redis_host=redis_cache.redis,
raise_on_generate_in_progress=True
)
resizer = flask_resize.make_resizer(config)
# Save original file
resizer.storage_backend.save(image1_name, image1_data)
def run(x):
return resizer(image1_name)
pool = Pool(2)
with pytest.raises(flask_resize.exc.GenerateInProgress):
pool.map(run, [None] * 2)
示例15: e_cal
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import map [as 别名]
def e_cal(l, cores):
global LOOPS
'''
e calculator
this function will recive digits of float
and calculate and print status during working.
This function will return value of e.
'''
p = Pool()
getcontext().prec = l
e = Decimal(0)
i = 0
temp = 0
c = 0
while True:
fact = p.map(math.factorial, range(i, i+cores)) #parallel process factorial
e += sum(p.map(one_div, fact)) #processed factorial will total in here
i += cores
c += 1
LOOPS += 1
sys.stdout.write("\r%i loops passed." % (c) ) #Print Loop status
sys.stdout.flush()
#print i, "loops passed."
if e == temp:
break
temp = e
sys.stdout.write("\r%i loops passed.\n" % (c) )
print i
p.close()
p.join()
return e