本文整理汇总了Python中multiprocessing.Manager类的典型用法代码示例。如果您正苦于以下问题:Python Manager类的具体用法?Python Manager怎么用?Python Manager使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Manager类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: controller_failure_unit_test
def controller_failure_unit_test():
s = ["1001"]
s1 = ["1002"]
clear_config(s)
clear_config(s1)
manager1 = Manager()
manager2 = Manager()
failure1 = manager1.Value('i', 0)
failed_list1 = manager1.list([])
failure2 = manager2.Value('i', 0)
failed_list2 = manager2.list([])
processes = []
process2 = mp.Process(target=controller_failure_detection, args=(s, '1', failure1, failed_list1,))
processes.append(process2)
process4 = mp.Process(target=controller_failure_detection, args=(s, '2', failure2, failed_list2,))
processes.append(process4)
for p in processes:
p.start()
print 'STARTING:', p, p.is_alive()
r = random.randint(1, 10)
time.sleep(r)
print 'terminated'
t1 = time.time()
logging.debug(str( ["controller failed at:"] + [t1]))
processes[0].terminate()
# Exit the completed processes
for p in processes:
p.join()
print 'JOINED:', p, p.is_alive()
示例2: correction_terms_threaded
def correction_terms_threaded(self):
'''Finds the correction terms assoctiated to the quadratic form,
for each of the equivalance classes it finds the maximum by
iterating through the relation vectors of the group.
Uses multiprocessing.'''
print 'Using multiprocessing'
pool = Pool() # default: processes=None => uses cpu_count()
manager = Manager()
start_time = time.time()
coef_lists = lrange(self.group.structure)
# representatives = elements of C_1(V) (np.matrix)
representatives = map(lambda l: self.find_rep(l), coef_lists)
# list of maxes
lst = manager.list([None for i in xrange(len(representatives))])
alphalist = list(self.get_alpha()) # cannot pickle generators
pool.map_async(functools.partial(process_alpha_outside, self,
representatives, lst), alphalist)
pool.close()
pool.join() # wait for pool to finish
# get corrterms via (|alpha|^2+b)/4
print 'Computed from quadratic form in %g seconds' \
% (time.time() - start_time)
return [Fraction(Fraction(alpha, self.int_inverse[1]) + self.b, 4) \
for alpha in lst]
示例3: processFiles
def processFiles(patch_dir):
root = os.getcwd()
glbl.data_dirs = {}
if root != patch_dir: working_path = root+"/"+patch_dir
else: working_path = root
for path, dirs, files in os.walk(working_path):
if len(dirs) == 0: glbl.data_dirs[path] = ''
# Multiprocessing Section
#########################################
Qids = glbl.data_dirs.keys()
manager = Manager() # creates shared memory manager object
results = manager.dict() # Add dictionary to manager, so it can be accessed across processes
nextid = Queue() # Create Queue object to serve as shared id generator across processes
for qid in Qids: nextid.put(qid) # Load the ids to be tested into the Queue
for x in range(0,multiprocessing.cpu_count()): # Create one process per logical CPU
p = Process(target=processData, args=(nextid,results)) # Assign process to processCBR function, passing in the Queue and shared dictionary
glbl.jobs.append(p) # Add the process to a list of running processes
p.start() # Start process running
for j in glbl.jobs:
j.join() # For each process, join them back to main, blocking on each one until finished
# write out results
c = 1
sets = results.keys()
sets.sort()
for x in sets:
if results[x] != 'None':
FINAL = open('result'+str(c)+'.txt','w')
n = "\n************************************************************************************************\n"
FINAL.write(n+"* "+x+' *\n'+n+results[x]+"\n")
FINAL.close()
c += 1
示例4: download
def download(self, sources, output_directory, filename):
"""Download a file from one of the provided sources
The sources will be ordered by least amount of errors, so most
successful hosts will be tried first. In case of failure, the next
source will be attempted, until the first successful download is
completed or all sources have been depleted.
Args:
sources: A list of dicts with 'host_name' and 'url' keys.
output_directory (str): Directory to save the downloaded file in.
filename (str): Filename assigned to the downloaded file.
Returns:
A dict with 'host_name' and 'filename' keys if the download is
successful, or an empty dict otherwise.
"""
valid_sources = self._filter_sources(sources)
if not valid_sources:
return {'error': 'no valid sources'}
manager = Manager()
successful_downloads = manager.list([])
def f(source):
if not successful_downloads:
result = self.download_from_host(source, output_directory, filename)
if 'error' in result:
self._host_errors[source['host_name']] += 1
else:
successful_downloads.append(result)
multiprocessing.dummy.Pool(len(valid_sources)).map(f, valid_sources)
return successful_downloads[0] if successful_downloads else {}
示例5: main
def main():
init_params()
vk = connect_to_vk(LOGIN, PASSWORD)
audio_list = vk.method('audio.get', {})
total = len(audio_list)
if not os.path.exists(DOWNLOAD_DIR):
os.makedirs(DOWNLOAD_DIR)
manager = Manager()
workers_list = []
progress_list = manager.dict()
downloaded_tracks = manager.Value('i', 0)
lock = Lock()
for f in audio_list[:WORKERS_COUNT - 1]:
start_download_process(f, workers_list, progress_list, downloaded_tracks, lock)
del audio_list[:WORKERS_COUNT - 1]
while any(worker.is_alive() for worker in workers_list) or len(audio_list):
if audio_list and len(workers_list) < WORKERS_COUNT:
f = audio_list.pop(0)
start_download_process(f, workers_list, progress_list, downloaded_tracks, lock)
print_progress(progress_list, downloaded_tracks.value, total)
clean_workers(workers_list)
time.sleep(0.1)
print "Done!"
示例6: __init__
def __init__(self,port):
manager = Manager()
self.status=manager.dict()
self.sendbuf=manager.list()
self.p = Process(target=SocketManager, args=(port,self.status,self.sendbuf) )
self.p.daemon=True
self.p.start()
示例7: run
def run(args):
# Limit it to a single GPU.
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
conn = create_db(args.db)
m = Manager()
logs = args.logging
datasets = args.datasets
embeddings = args.embeddings
settings = args.settings
# So we don't litter the fs
dir_ = tempfile.mkdtemp(prefix='baseline-speed-test-')
try:
configs = get_configs(args.config)
if not args.single:
full_configs = []
for config in configs:
full_configs.extend(edit_config(config, args.frameworks, args.no_crf, args.no_attn))
configs = full_configs
if args.verbose:
for config in configs:
pprint(config)
print()
print()
steps = len(configs)
pg = create_progress_bar(steps)
for config in configs:
write_config = deepcopy(config)
config['train']['epochs'] = args.trials
task_name = config['task']
system_info = m.dict()
p = Process(
target=run_model,
args=(
system_info,
config,
logs,
settings,
datasets,
embeddings,
task_name,
dir_,
int(args.gpu)
)
)
p.start()
pid = p.pid
p.join()
log_file = os.path.join(dir_, 'timing-{}.log'.format(pid))
speeds = parse_logs(log_file)
save_data(conn, speeds, write_config, system_info)
pg.update()
pg.done()
finally:
shutil.rmtree(dir_)
示例8: solve
def solve(iterations, proc_count):
queue = JoinableQueue()
partition = get_iterations_partition(iterations, proc_count)
for iteration in partition:
queue.put(iteration)
for i in range(process_count):
queue.put(None)
manager = Manager()
result = manager.list()
processes = []
cur_time = time.time()
for i in range(process_count):
proc = Process(target=worker, args=(queue, result,))
proc.start()
processes.append(proc)
queue.join()
for proc in processes:
proc.join()
cur_time = time.time() - cur_time
print_results(cur_time, result, iterations)
示例9: multi_download
def multi_download(url_and_name_list, num_threads=8):
''' accepts list of tuples, where t[0] = url and t[1] = filename '''
manager = Manager()
#pylint: disable=no-member
m_list = manager.list()
#pylint: enable=no-member
log = logging.getLogger('multi_dl')
log.debug('starting pool with ' + str(num_threads) + ' workers')
monitor_thread = Process(target=download_monitor,
args=((m_list, len(url_and_name_list)),))
monitor_thread.start()
workers = Pool(processes=num_threads)
work = workers.map_async(single_download,
zip(url_and_name_list, repeat(m_list)))
# this hack makes the async_map respond to ^C interrupts
try:
work.get(0xFFFF)
monitor_thread.join()
sys.stdout.write('\n\n')
except KeyboardInterrupt:
print 'parent received control-c'
exit()
示例10: spawn
def spawn(self, n=GAME_CT):
# Fallback on import error or single core
try:
from multiprocessing import Process, Manager, cpu_count
except ImportError:
return self.run(n)
# For low n multiprocessing does not gain much speed up
if cpu_count() <= 1 or n < 500:
return self.run(n)
m = Manager()
self.results = m.list()
procs = []
load = [n // cpu_count()] * cpu_count()
# add the rest from division to last cpu
load[-1] += n % cpu_count()
for count in load:
proc = Process(target=self.run, args=(count,))
proc.start()
procs.append(proc)
[p.join() for p in procs]
示例11: run
def run():
# build the mdp
start = time.time()
room_size = 3
num_rooms = 5
mdp = maze_mdp.MazeMDP(room_size=room_size, num_rooms=num_rooms)
# build the agent
m = Manager()
init_dict = {(s, a): 0 for s in mdp.states for a in mdp.ACTIONS + [None]}
shared_weights = m.dict(init_dict)
shared_value_weights = m.dict(init_dict)
agent = async_actor_critic.AsyncActorCritic(actions=mdp.ACTIONS, discount=mdp.DISCOUNT,
weights=shared_weights, value_weights=shared_value_weights, tau=.3, learning_rate=.5)
# build a single experiment
rewards = m.list()
start_state_values = m.list()
max_steps = (2 * room_size * num_rooms) ** 2
exp = experiment.Experiment(mdp=mdp, agent=agent, num_episodes=800, max_steps=max_steps,
rewards=rewards, start_state_values=start_state_values)
# run the experiment
multiexperiment = experiment.MultiProcessExperiment(experiment=exp, num_agents=NUM_PROCESSES)
multiexperiment.run()
# report results
end = time.time()
print 'took {} seconds to converge'.format(end - start)
mdp.print_state_values(shared_value_weights)
optimal = mdp.EXIT_REWARD + (2 * room_size * num_rooms * mdp.MOVE_REWARD)
utils.plot_values(rewards, optimal, 'rewards')
utils.plot_values(start_state_values, optimal, 'start state value')
示例12: __init__
def __init__(self):
manager = Manager()
self.flow_to_state_map = manager.dict()
self.flow_to_state_map.clear()
self.trigger = manager.Value('i', 0)
self.comp = manager.Value('i', 0) # sequential = 0, parallel = 1
示例13: scanner_network
def scanner_network(self,gateway):
get_ip = len(gateway)-1
gateway = gateway[:get_ip]
ranger = str(self.ip_range.text()).split("-")
self.control = True
jobs = []
manager = Manager()
on_ips = manager.dict()
for n in xrange(int(ranger[0]),int(ranger[1])):
ip="%s{0}".format(n)%(gateway)
p = Process(target=self.working,args=(ip,on_ips))
jobs.append(p)
p.start()
for i in jobs: i.join()
for i in on_ips.values():
Headers = []
n = i.split("|")
self.data['IPaddress'].append(n[0])
self.data['MacAddress'].append(n[1])
self.data['Hostname'].append("<unknown>")
for n, key in enumerate(reversed(self.data.keys())):
Headers.append(key)
for m, item in enumerate(self.data[key]):
item = QTableWidgetItem(item)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.tables.setItem(m, n, item)
self.scanner_OFF(False,"txt_status")
Headers = []
for key in reversed(self.data.keys()):
Headers.append(key)
self.tables.setHorizontalHeaderLabels(Headers)
示例14: crackTicket
def crackTicket(ticket, label, hashList):
try:
data = base64.b64decode(ticket)
except:
#print "DEBUG\n" + str(ticket) + "DEBUG\n\n"
return "FAIL" + str(label) + "\n"
manager = Manager()
enctickets = manager.list()
if data[0] == '\x76':
try:
enctickets.append((str(decoder.decode(data)[0][2][0][3][2])))
except:
#print "DEBUG\n" + str(ticket) + "DEBUG\n\n"
return "FAIL" + str(label)
elif data[:2] == '6d':
for ticket in data.strip().split('\n'):
try:
enctickets.append((str(decoder.decode(ticket.decode('hex'))[0][4][3][2])))
except:
#print "DEBUG\n" + str(ticket) + "DEBUG\n\n"
return "FAIL" + str(label)
print "\nAccount: " + label
for currentHash in hashList:
ntlmHash_hex = binascii.unhexlify(currentHash)
kdata, nonce = kerberos.decrypt(ntlmHash_hex, 2, enctickets[0])
if kdata:
print "NTLM Hash: " + currentHash
break
return ""
示例15: func_thread
def func_thread():
a = numpy.random.rand(1000000)
b = numpy.random.rand(1000000)
nodata = 0.3
print "here"
manager = Manager()
lock = Lock()
d = manager.dict()
ps = []
start_time = time.clock()
for i in numpy.where((a > 0.7) & (a < 0.9) & (a != nodata)):
for j in numpy.where((b > 0.5) & (b < 0.9) & (b != nodata)):
index = numpy.intersect1d(i, j)
length = len(index)/2
array1 = index[:length]
array2 = index[length:]
for processes in range(2):
p = Process(target=f_thread, args=(d, a, b, array1, lock))
ps.append(p)
p.start()
for p in ps:
p.join()
print time.clock() - start_time, "seconds"
print len(d)