本文整理汇总了Python中multiprocess.Pool.close方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.close方法的具体用法?Python Pool.close怎么用?Python Pool.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocess.Pool
的用法示例。
在下文中一共展示了Pool.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: zte_gpon_svlan_check
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import close [as 别名]
def zte_gpon_svlan_check():
clear_log()
nodes = graph.cypher.execute(
"match(n:Olt)--(c:Card) where c.name='GTGO' return n.ip,collect(c.slot)")
olts = ((x[0], x[1]) for x in nodes)
lzte_gpon_svlan = lambda x: zte_gpon_svlan(ip=x[0], slots=x[1])
pool = Pool(8)
lock = Manager().Lock()
func = partial(svlan_entry, lock)
list(pool.map(compose(func, lzte_gpon_svlan), olts))
pool.close()
pool.join()
示例2: prime_calculate
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import close [as 别名]
def prime_calculate(self):
break_points = [] # List that will have start and stopping points
for i in range(cores): # Creates start and stopping points based on length of range_finish
break_points.append(
{"start": int(math.ceil(((self.maximum_prime + 1) + 0.0) / cores * i)),
"stop": int(math.ceil(((self.maximum_prime + 1) + 0.0) / cores * (i + 1)))})
p = Pool(cores) # Number of processes to create.
for i in break_points: # Cycles though the breakpoints list created above.
a = p.apply_async(self.prime_calculator, kwds=i, args=tuple(),
callback=self.update_num) # This will start the separate processes.
p.close() # Prevents any more processes being started
p.join() # Waits for worker process to end
示例3: interface_check_m
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import close [as 别名]
def interface_check_m():
clear_log()
# cmd = "match(s: Switch) where s.model in ['S8505','S8508'] return s.ip, s.model"
cmd = "match(s: Switch) return s.ip, s.model"
# cmd = "match(s:Switch) where s.model='S9306' or s.model='s9303' return s.ip,s.model limit 2"
nodes = graph.cypher.execute(cmd)
switchs = [(x[0], x[1]) for x in nodes]
pool = Pool(16)
lock = Manager().Lock()
out_inf = partial(output_interface_m, lock)
list(pool.map(compose(out_inf, get_interface), switchs))
pool.close()
pool.join()
示例4: svlan_check
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import close [as 别名]
def svlan_check():
clear_log()
# nodes = graph.find('Olt', property_key='ip', property_value='9.192.96.246')
nodes = graph.find('Olt')
# nodes = graph.find('Olt', property_key='company', property_value='zte')
olts = [(x['ip'], x['company'], x['area']) for x in nodes]
# list(map(compose(card_entry, get_card), olts))
pool = Pool(16)
lock = Manager().Lock()
func = partial(svlan_entry, lock)
list(pool.map(compose(func, get_svlan), olts))
pool.close()
pool.join()
示例5: main
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import close [as 别名]
def main(args):
filedate = args.filedate
database = args.database
slablist = ['alu','cal','cam','car','cas','cot','hal','hel','him','hin','izu','jap','ker','kur','mak','man','mue','pam','png','phi','puy','ryu','sam','sco','sol','sul','sum','van']
indices = range(len(slablist))
pool1 = Pool(args.nCores)
partial_loop1 = partial(calls2d, database, filedate, slablist)
pts = pool1.map(partial_loop1, indices)
pool1.close()
pool1.join()
示例6: add_infs
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import close [as 别名]
def add_infs():
funcs = {'zte': Zte.get_infs, 'hw': Huawei.get_infs}
get_infs = partial(_company, funcs)
clear_log()
nodes = graph.cypher.execute(
'match (n:Olt) return n.ip as ip,n.company as company')
olts = [dict(ip=x['ip'], company=x['company']) for x in nodes]
pool = Pool(128)
lock = Manager().Lock()
_add_infs_p = partial(_add_infs, lock)
list(pool.map(compose(_add_infs_p, get_infs), olts))
pool.close()
pool.join()
示例7: hostname_check
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import close [as 别名]
def hostname_check():
clear_log()
nodes = graph.find('Olt')
# nodes = graph.find('Olt', property_key='ip', property_value='172.18.0.46')
olts = [(x['ip'], x['company']) for x in nodes]
pool = Pool(16)
lock = Manager().Lock()
func = partial(hostname_entry, lock)
list(pool.map(compose(func, get_hostname), olts))
pool.close()
pool.join()
ip_hostname = (x.split(',') for x in open(result_file))
cmd = "match (n:Olt) where n.ip={ip} set n.hostname={hostname}"
list(map(lambda x: graph.cypher.execute(
cmd, ip=x[0], hostname=x[1]), ip_hostname))
示例8: get_vlan_usersP
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import close [as 别名]
def get_vlan_usersP(bras):
def _get_vlan_users(bas):
funcs = {'m6k': M6k.get_vlan_users,
'me60': ME60.get_vlan_users}
_gvu = partial(_model, funcs)
return _gvu(bas)
bras = [dict(ip=x[0], model=x[1], inf=x[2])
for x in bras]
pool = Pool(len(bras))
temp = pool.map(_get_vlan_users, bras)
pool.close()
pool.join()
temp = [x[1] for x in temp if x[1]]
rslt = reduce(lambda x, y: merge_with(sum, x, y), temp)
return rslt
示例9: calculate
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import close [as 别名]
def calculate(self, data):
t1 = dt.datetime.utcnow()
LOGGER.info('Starting calculation...')
self._data = deepcopy(data)
self._check_inputs(data)
dep = self._dependencies()
sorted_dep = topological_sort(dep)
for items in sorted_dep:
# loading node with inputs
for item in items:
node = self._get_node(item)
args = [i_name for i_name in node.input_names if i_name not in node.kwargs]
data_to_pass = []
for arg in args:
data_to_pass.append(self._data[arg])
kwargs_to_pass = {}
for kwarg in node.kwargs:
kwargs_to_pass[kwarg] = self._data[kwarg]
node.load_inputs(data_to_pass, kwargs_to_pass)
# running nodes
if self._parallel:
pool = Pool(self._pool_size)
results = pool.map(
Graph.run_node,
[self._get_node(i) for i in items]
)
pool.close()
pool.join()
results = {k: v for k, v in results}
else:
results = {}
for item in items:
node = self._get_node(item)
res = node.run_with_loaded_inputs()
results[node.id] = res
# save results
for item in items:
node = self._get_node(item)
res = results[node.id]
if len(node.output_names) == 1:
self._data[node.output_names[0]] = res
else:
for i, out in enumerate(node.output_names):
self._data[out] = res[i]
t2 = dt.datetime.utcnow()
LOGGER.info('Calculation finished in {}'.format(t2-t1))
return res
示例10: zhongji_check
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import close [as 别名]
def zhongji_check():
clear_log()
nodes = graph.find('Olt')
# nodes = graph.find('Olt', property_key='ip', property_value='172.18.0.46')
olts = [(x['ip'], x['company']) for x in nodes]
pool = Pool(16)
lock = Manager().Lock()
func = partial(zhongji_entry, lock)
list(pool.map(compose(func, get_zhongji), olts))
pool.close()
pool.join()
ports = (x.split(',') for x in open(result_file))
cmd = """match(n: Olt) where n.ip = {ip}
merge(n) - [:HAS]->(m: Etrunk{name: {sm}})
merge(m) - [:Include]->(p: Port{name: {interface}})"""
list(map(lambda x: graph.cypher.execute(
cmd, ip=x[0], sm=x[1], interface=x[2]), ports))
示例11: parallel_cdist
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import close [as 别名]
def parallel_cdist(data1, data2, n_rows_per_job=100):
from scipy.spatial.distance import cdist
data1 = np.array(data1)
data2 = np.array(data2)
pool = Pool(12)
start_indices = np.arange(0, data1.shape[0], n_rows_per_job)
end_indices = start_indices + n_rows_per_job - 1
partial_distance_matrices = pool.map(lambda (si, ei): cdist(data1[si:ei+1].copy(), data2), zip(start_indices, end_indices))
pool.close()
pool.join()
distance_matrix = np.concatenate(partial_distance_matrices)
return distance_matrix
示例12: add_power_info
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import close [as 别名]
def add_power_info():
funcs = {'S8508': S85.get_power_info,
'S8505': S85.get_power_info,
'T64G': T64.get_power_info,
'S8905': S89.get_power_info,
'S8905E': S8905E.get_power_info,
'S9306': S93.get_power_info,
'S9303': S93.get_power_info}
get_power_info = partial(_model, funcs)
# clear_log()
nodes = graph.cypher.execute(
"match (s:Switch) where s.snmpState='normal' return s.ip as ip,s.model as model")
switches = [dict(ip=x['ip'], model=x['model']) for x in nodes]
pool = Pool(processor)
lock = Manager().Lock()
_ff = partial(_add_power_info, lock)
list(pool.map(compose(_ff, get_power_info), switches))
pool.close()
pool.join()
示例13: add_traffics
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import close [as 别名]
def add_traffics():
funcs = {'S8508': S85.get_traffics,
'S8505': S85.get_traffics,
'T64G': T64.get_traffics,
'S8905': S89.get_traffics,
'S8905E': S8905E.get_traffics,
'S9306': S93.get_traffics,
'S9303': S93.get_traffics}
get_traffics = partial(_model, funcs)
# clear_log()
nodes = graph.cypher.execute(
"match (s:Switch)--(i:Inf) where s.snmpState='normal' return s.ip as ip,collect(i.name) as infs,s.model as model")
switchs = [dict(ip=x['ip'], infs=x['infs'], model=x['model'])
for x in nodes]
pool = Pool(processor)
lock = Manager().Lock()
_ff = partial(_add_traffics, lock)
list(pool.map(compose(_ff, get_traffics), switchs))
pool.close()
pool.join()
示例14: compute_jaccard_pairwise
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import close [as 别名]
def compute_jaccard_pairwise(indices, square_form=True, parallel=True, return_poses=False):
n = len(indices)
if parallel:
pool = Pool(16)
scores_poses_tuples = pool.map(lambda x: compute_jaccard_i_vs_list(x[0],x[1]),
[(indices[i], indices[i+1:]) for i in range(n)])
pool.close()
pool.join()
else:
scores_poses_tuples = [compute_jaccard_i_vs_list(indices[i], indices[i+1:]) for i in range(n)]
pairwise_scores = np.array([scores for scores, poses in scores_poses_tuples])
if square_form:
pairwise_scores = squareform(np.concatenate(pairwise_scores))
if return_poses:
poses = np.array([poses for scores, poses in scores_poses_tuples])
return pairwise_scores, poses
else:
return pairwise_scores
示例15: enumerate
# 需要导入模块: from multiprocess import Pool [as 别名]
# 或者: from multiprocess.Pool import close [as 别名]
for iy, y0 in enumerate(np.arange(0, img_h, 5000)):
for ix, x0 in enumerate(np.arange(0, img_w, 5000)):
origins.append((x0, y0))
alg = 'cellprofiler'
big_labelmap = np.zeros((img_h, img_w), dtype=np.int64)
n = 0
for i, input_fp in enumerate(input_fps):
prefix = os.path.splitext(input_fp)[0]
labelmap = labelmap_alltiles[i].astype(np.int64) # astype(np.int64) is important, otherwise results in negative label values.
x0, y0 = origins[i]
big_labelmap[y0:y0+5000, x0:x0+5000][labelmap != 0] = labelmap[labelmap != 0] + n
n += labelmap.max()
labelmap_fp = os.path.splitext(input_img_fp)[0] + '_labelmap_%(alg)s.bp' % dict(alg=alg)
bp.pack_ndarray_file(big_labelmap, labelmap_fp)
upload_to_s3(labelmap_fp)
for fp in input_fps:
execute_command('rm ' + fp)
t = time.time()
pool = Pool(NUM_CORES/2)
pool.map(detect_cells, filenames)
pool.close()
pool.join()
sys.stderr.write('Overall time: %.2f seconds.\n' % (time.time()-t))