本文整理汇总了Python中multiprocessing.Pool.terminate方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.terminate方法的具体用法?Python Pool.terminate怎么用?Python Pool.terminate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.Pool
的用法示例。
在下文中一共展示了Pool.terminate方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_repeated
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import terminate [as 别名]
def run_repeated(name, num_trials, num_examples, incorrect):
manager = Manager()
q = manager.Queue()
pool = Pool(NUM_WORKERS + 1) # Add one process for the listener
# Start a listener that writes to the output file.
filename = '{}-{}-{}.results'.format(name, num_examples, num_trials)
pool.apply_async(listener, (filename, q))
# Start worker jobs that run trials.
jobs = []
for i in range(num_trials):
job = pool.apply_async(worker, (name, num_examples, q))
jobs.append(job)
results = []
for job in jobs:
results.append(job.get())
# Check results for early exit
num_incorrect = 0
for result in results:
if is_incorrect(result, incorrect):
num_incorrect += 1
if num_incorrect > NUM_TRIALS - MIN_CORRECT:
pool.terminate() # Kill the rest of the jobs in the pool.
break
q.put('kill') # Tell the listener to stop running.
pool.close()
print()
return results
示例2: YaraJobPool
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import terminate [as 别名]
class YaraJobPool(object):
"""
Yara pool container.
"""
pool = None
message_queue = None
def __init__(self, max_instances=3):
self.message_queue = Queue()
self.pool = Pool(max_instances, execute_yara_task,
(self.message_queue,))
atexit.register(self.clear)
def add_yara_task(self, yara_task):
"""
Adds the yara task.
"""
self.message_queue.put(yara_task)
def clear(self):
"""
Pool cleanup.
"""
self.pool.terminate()
self.pool.join()
示例3: _doFastPoW
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import terminate [as 别名]
def _doFastPoW(target, initialHash):
import shared
import time
from multiprocessing import Pool, cpu_count
try:
pool_size = cpu_count()
except:
pool_size = 4
try:
maxCores = config.getint('bitmessagesettings', 'maxcores')
except:
maxCores = 99999
if pool_size > maxCores:
pool_size = maxCores
pool = Pool(processes=pool_size)
result = []
for i in range(pool_size):
result.append(pool.apply_async(_pool_worker, args = (i, initialHash, target, pool_size)))
while True:
if shared.shutdown:
pool.terminate()
while True:
time.sleep(10) # Don't let this thread return here; it will return nothing and cause an exception in bitmessagemain.py
return
for i in range(pool_size):
if result[i].ready():
result = result[i].get()
pool.terminate()
pool.join() #Wait for the workers to exit...
return result[0], result[1]
time.sleep(0.2)
示例4: main
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import terminate [as 别名]
def main():
if len(sys.argv) != 3:
print 'Usage: {} <file> <save_dir>'.format(sys.argv[0])
sys.exit(1)
csv_file, save_dir = sys.argv[1], sys.argv[2]
frame = pandas.read_csv(csv_file, sep='\t', header=None)
counters = map(Counter, [frame[i] for i in range(len(frame.columns))])
pool = Pool()
for i in range(len(counters)):
for j in range(len(counters)):
nr_keys = len(counters[i]) * len(counters[j])
if len(counters[i]) > 200 or len(counters[i]) > 200 or nr_keys > 200 * 200:
print 'too many keys columns `{},{}\': {}'.format(
i, j, nr_keys)
else:
print 'columns `{},{}\' passed'.format(i, j)
pool.apply_async(do_main, [
csv_file, frame, i, j, os.path.join(
save_dir, '{}-{}.png'.format(i, j))])
pool.close()
pool.join()
pool.terminate()
示例5: MultiProcessScheduler
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import terminate [as 别名]
class MultiProcessScheduler(LocalScheduler):
def __init__(self, threads):
LocalScheduler.__init__(self)
self.threads = threads
self.tasks = {}
from multiprocessing import Pool
self.pool = Pool(self.threads or 2)
def start(self):
pass
def submitTasks(self, tasks):
def callback(args):
logger.debug("got answer: %s", args)
tid, reason, result, update = args
task = self.tasks.pop(tid)
self.taskEnded(task, reason, result, update)
for task in tasks:
logger.debug("put task async: %s", task)
self.tasks[task.id] = task
self.pool.apply_async(run_task_in_process,
[task, self.nextAttempId(), env.environ],
callback=callback)
def stop(self):
self.pool.terminate()
self.pool.join()
logger.debug("process pool stopped")
示例6: loop
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import terminate [as 别名]
def loop(self, argv = sys.argv):
# Parse arguments
self.parse(argv)
# Check for help option
if self.isParameter('info'):
self.info()
return
# Call start function
self.start()
# Number of processors
nproc = int(self.getParameter('nproc', '1'))
# Run in multiprocessing is requested
if nproc > 1:
# Creates the log directory is needed
if not os.path.exists('%s/logs' % Common.NeatDirectory):
os.makedirs('%s/logs' % Common.NeatDirectory)
# Create a pool of workers
pool = Pool(processes = nproc)
try:
# Loop over the channels
for set in self.__loopsets:
pool.apply_async(self.wrapper, (set,))
time.sleep(1)
pool.close(); pool.join()
except KeyboardInterrupt:
pool.terminate(); pool.join()
else:
# Run the process with multiprocessing
for set in self.__loopsets:
self.process(set)
# Call end function
self.end()
示例7: main
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import terminate [as 别名]
def main():
parser = ArgumentParser(description="Speed up your SHA. A different hash style.")
parser.add_argument("-1", "--sha1", action="store_true")
parser.add_argument("-2", "--sha224", action="store_true")
parser.add_argument("-3", "--sha256", action="store_true")
parser.add_argument("-4", "--sha384", action="store_true")
parser.add_argument("-5", "--sha512", action="store_true")
parser.add_argument("-f", "--file", type=str, help="The path to the file")
if len(sys.argv) == 1:
parser.print_help()
return
global args
args = parser.parse_args()
hashtree = ""
big_file = open(args.file, "rb")
pool = Pool(multiprocessing.cpu_count())
for chunk_hash in pool.imap(hashing, chunks(big_file)):
hashtree = hashtree + chunk_hash
pool.terminate()
if os.path.getsize(args.file) < 20971520:
print(hashtree)
else:
print(str(hashing(hashtree)))
示例8: _doFastPoW
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import terminate [as 别名]
def _doFastPoW(target, initialHash):
logger.debug("Fast PoW start")
import time
from multiprocessing import Pool, cpu_count
try:
pool_size = cpu_count()
except:
pool_size = 4
try:
maxCores = config.getint('bitmessagesettings', 'maxcores')
except:
maxCores = 99999
if pool_size > maxCores:
pool_size = maxCores
pool = Pool(processes=pool_size)
result = []
for i in range(pool_size):
result.append(pool.apply_async(_pool_worker, args = (i, initialHash, target, pool_size)))
while True:
if shutdown >= 1:
pool.terminate()
raise Exception("Interrupted")
for i in range(pool_size):
if result[i].ready():
result = result[i].get()
pool.terminate()
pool.join() #Wait for the workers to exit...
logger.debug("Fast PoW done")
return result[0], result[1]
time.sleep(0.2)
示例9: harmony_search
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import terminate [as 别名]
def harmony_search(objective_function, num_processes, num_iterations, initial_harmonies=None):
"""
Here, we use multiprocessing.Pool to do multiple harmony searches simultaneously. Since HS is stochastic (unless random_seed is set),
multiple runs can find different results. We run the specified number of iterations on the specified number of processes and return
an instance of HarmonySearchResults.
"""
pool = Pool(num_processes)
try:
start = datetime.now()
pool_results = [pool.apply_async(worker, args=(objective_function, initial_harmonies,)) for i in range(num_iterations)]
pool.close() # no more tasks will be submitted to the pool
pool.join() # wait for all tasks to finish before moving on
end = datetime.now()
elapsed_time = end - start
# find best harmony from all iterations
best_harmony = None
best_fitness = float('-inf') if objective_function.maximize() else float('+inf')
harmony_memories = list()
harmony_histories = list()
for result in pool_results:
harmony, fitness, harmony_memory, harmony_history = result.get() # multiprocessing.pool.AsyncResult is returned for each process, so we need to call get() to pull out the value
if (objective_function.maximize() and fitness > best_fitness) or (not objective_function.maximize() and fitness < best_fitness):
best_harmony = harmony
best_fitness = fitness
harmony_memories.append(harmony_memory)
harmony_histories.append(harmony_history)
return HarmonySearchResults(elapsed_time=elapsed_time, best_harmony=best_harmony, best_fitness=best_fitness,\
harmony_memories=harmony_memories, harmony_histories=harmony_histories)
except KeyboardInterrupt:
pool.terminate()
示例10: replaceText
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import terminate [as 别名]
def replaceText(text_test,apikey):
urls = []
urls_in_order = []
for url in re.findall(r'(https?://[^\s]+)', text_test):
newurl = url.split('"')[0].split('<')[0]
while newurl[-1] == '.' or newurl[-1] == ')' or newurl[-1] == '!':
newurl = newurl[:-1]
if not apikey:
urls.append(newurl)
else:
urls.append((newurl,apikey))
urls_in_order.append(newurl)
f = getWebArchiveLink
if apikey:
f = getPermaccLink
p = Pool(cpu_count())
conversion = {}
for result in p.map(f, list(set(urls))):
conversion[result[0]] = result[1]
p.terminate()
print conversion
curPos = 0
for url in urls_in_order:
if url in text_test[curPos:]:
print url
print conversion[url]
print text_test[curPos:]
newPos = text_test.index(url)
text_test = text_test[0:curPos] + text_test[curPos:].replace(url,conversion[url],1)
curPos = newPos
return text_test
示例11: pricing
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import terminate [as 别名]
def pricing(dual):
cpus = cpu_count() - int(argv[2])
'''process for getting new columns'''
final = pow(2, K)
if K < 23:
section = final
else:
section = 100 * cpus # probar valores
to = 0
since = 1
manager = Manager()
elements = manager.list([RETAILERS, DCS, PLANTS])
out = manager.Queue() # queue with the result from each worker
while to < final:
p = Pool(cpus)
to = min(since + section, final)
boss = p.apply_async(coordinator, (out,))
workers = [p.apply_async(work, (k, elements, dual, out)) for k in xrange(since, to)]
enviados = 0
for w in workers:
enviados += w.get()
out.put('ok')
a = boss.get()
assert a.counter == enviados
since = to + 1
p.terminate()
return a
示例12: main
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import terminate [as 别名]
def main():
logging.basicConfig(level=logging.DEBUG)
urls = []
with open('urls-50k.csv') as csvfile:
reader = csv.DictReader(csvfile, ['url', 'cnt'])
urls = [row['url'] for row in reader]
urls = urls[1:] # strip out header row
# normalize URL encoding of '://'
urls = [url.replace('%3A%2F%2F', '://') for url in urls]
# dedupe
urls = set(urls)
pool = Pool(processes=200)
promise = pool.map_async(consume, urls)
results = []
try:
results = promise.get()
except KeyboardInterrupt:
logging.error('Terminating worker pool')
pool.terminate()
pool.join()
return
print "--ALL RESULTS--"
print results
print "--RESULTS THAT MATCH--"
print [result['url'] for result in results
if result and result['magic_viewport']]
print "Number of URLs scanned:", len(urls)
print "Failed checks", len([True for result in results if not result])
print "Have it:", len([True for result in results
if result and result['magic_viewport']])
print "Don't have it:", len([False for result in results
if result and not result['magic_viewport']])
if __name__ == '__main__':
main()
示例13: plot_zphot_zspec
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import terminate [as 别名]
def plot_zphot_zspec(Nthreads):
from multiprocessing import Pool
match = filter('GAMA-MATCHED')
n_samples = match['ID'].shape[0]
pool = Pool(Nthreads)
mapfn = pool.map
Nchunk = np.ceil(1. / Nthreads * n_samples).astype(np.int)
arglist = [None] * Nthreads
for i in range(Nthreads):
s = int(i * Nchunk)
e = int(s + Nchunk)
if i == Nthreads - 1 : e = 203024
print s , e
arglist[i] = (s, e)
result = list(mapfn(match_index, [ars for ars in arglist]))
result = np.concatenate(result)
#print result.flatten()
np.savetxt("zphot_matched.txt" , result.flatten())
pool.close()
pool.terminate()
pool.join()
return None
示例14: crack
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import terminate [as 别名]
def crack(ssid, clientMac, APMac, Anonce, Snonce, mic, data, passQueue):
foundPassQ = Queue()
try:
timeA = datetime.now()
startSize = passQueue.qsize()
except:
pass
pool = Pool(numOfPs, crackProcess, (ssid, clientMac, APMac, Anonce, Snonce, mic, data, passQueue, foundPassQ))
while True:
sleep(1)
try:
timeB = datetime.now()
currentSize = passQueue.qsize()
print str(100 - 100.0 * currentSize / startSize) + "% done. " + str((startSize - currentSize) / (timeB - timeA).total_seconds()) + " hashes per second"
except:
pass
if foundPassQ.empty():
if passQueue.empty():
returnVal = False
break
else:
passphrase = foundPassQ.get()
returnVal = passphrase
break
pool.terminate()
return returnVal
示例15: run
# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import terminate [as 别名]
def run(args):
print >> sys.stderr, "processing input from {0}...".format(args.logpath)
source, xzproc = source_prepare(args.logpath)
d = {'ticks':{}, 'nodes':{}}
m = {'mem':0, 'hours':0}
p = Pool(args.nprocesses)
try:
lines = []
for line in source:
if args.tee: sys.stdout.write(line)
lines.append(line)
if len(lines) > args.nprocesses*NUMLINES:
d, m = do_reduce(d, m, do_map(p, lines))
lines = []
if len(lines) > 0: d, m = do_reduce(d, m, do_map(p, lines))
p.close()
except KeyboardInterrupt:
print >> sys.stderr, "interrupted, terminating process pool"
p.terminate()
p.join()
sys.exit()
source_cleanup(args.logpath, source, xzproc)
print >> sys.stderr, "done processing input: simulation ran for {0} hours and consumed {1} GiB of RAM".format(m['hours'], m['mem'])
print >> sys.stderr, "dumping stats in {0}".format(args.prefix)
dump(d, args.prefix, SHADOWJSON)
print >> sys.stderr, "all done!"