本文整理汇总了Python中multiprocessing.Manager.get方法的典型用法代码示例。如果您正苦于以下问题:Python Manager.get方法的具体用法?Python Manager.get怎么用?Python Manager.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.Manager
的用法示例。
在下文中一共展示了Manager.get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_finetuning
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import get [as 别名]
def run_finetuning(self, epochs):
"""
Run the train and test error evaluation and the backpropagation using conjugate gradient to optimize the
weights in order to make the DBN perform better.
@param epochs: The number of epochs to run the finetuning for.
"""
self.train_error = {}
self.test_error = {}
dbn.save_dbn(self.weight_matrices_added_biases, self.train_error, self.test_error, self.fout())
for epoch in range(epochs):
self.fout('Backprop: Epoch ' + str(epoch + 1))
result_queue = Manager().Queue()
w_queue = Manager().Queue()
# Start backprop process
proc = Process(target=self.backpropagation, args=(epoch, self.weight_matrices_added_biases, w_queue,))
proc.start()
# Start error eval processes
evaluations = []
evaluations.append((
self.weight_matrices_added_biases, epoch, True, data_processing.get_batch_list(training=True),
result_queue,
self.binary_output))
evaluations.append((
self.weight_matrices_added_biases, epoch, False, data_processing.get_batch_list(training=False),
result_queue, self.binary_output))
p = Pool(cpu_count())
p.map_async(error, evaluations)
p.close()
# Join multiple processes
p.join()
proc.join()
self.weight_matrices_added_biases = w_queue.get()
# Print and save error estimates
for e in range(2):
out = result_queue.get()
if out[0]:
self.train_error[epoch] = out[2]
self.fout(out[1])
else:
self.test_error[epoch] = out[2]
self.fout(out[1])
# Save DBN
dbn.save_dbn(self.weight_matrices_added_biases, self.train_error, self.test_error, self.fout())
示例2: _feed_helper
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import get [as 别名]
def _feed_helper(self, feed_name, feed_config):
feed_provider_name = feed_config['name']
feed_func = FEEDS_MAPPING.get(feed_provider_name)
LOG.debug("Geting feeds from '%s'", feed_name)
if not feed_func:
LOG.warning('Unknown feed provider %s', feed_provider_name)
return
feed_result = Manager().dict()
feed_func_timeout = feed_config.get('timeout') or feed_func.get('timeout')
p = Process(target=feed_func.get('func'), args=(feed_config, feed_result))
p.start()
p.join(feed_func_timeout)
if p.is_alive():
p.terminate()
status = 255
else:
status = p.exitcode
if status:
LOG.error("Fail to execute feed provider '%s' for '%s', exitcode '%d'", feed_provider_name, feed_name, status)
return None
else:
return feed_result.get('sensor_data', None)
示例3: upload_test
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import get [as 别名]
def upload_test(self):
start_time = time.time()
q = Manager().Queue()
plist = []
for i in range(self.upload_user):
proc = Process(target=self.upload_one_user, args=(q,))
plist.append(proc)
for proc in plist:
proc.start()
for proc in plist:
proc.join()
while True:
if q.empty():
break
else:
if q.get() == 0:
self.upload_success += 1
else:
self.upload_fail += 1
use_time = time.time() - start_time
table = PrettyTable(["key", "value"])
table.add_row(["One File Size (M)", self.upload_file_size])
table.add_row(["All File Size (M)", self.upload_file_size * self.upload_number * self.upload_user])
table.add_row(["Process Count(user)", self.upload_user])
table.add_row(["Upload Count", self.upload_number * self.upload_user])
table.add_row(["Interval Time(s)", self.upload_time])
table.add_row(["Success count", self.upload_success])
table.add_row(["Fail count", self.upload_fail])
table.add_row(["Success ratio (%)",
(round(self.upload_success / float(self.upload_number * self.upload_user), 4) * 100)])
table.add_row(["Use time (s)", "%.2f" % use_time])
print table
示例4: __init__
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import get [as 别名]
class InMemoryStorage:
def __init__(self):
self.storage = Manager().dict()
def keys(self):
return self.storage.keys()
def set_val(self, key, val):
self.storage[key] = val
def get_val(self, key):
return self.storage[key]
def append_val(self, key, val):
# self.storage.setdefault(key, []).append(val) # 不适用于Manager()
# t=self.storage.setdefault(key, []) # !!!
# t.append(val)
# self.storage[key]=t
if key in self.storage:
self.storage[key]+=[val]
else:
self.storage[key]=[val]
def get_list(self, key):
return self.storage.get(key, [])
示例5: behavior_stat_to_db
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import get [as 别名]
def behavior_stat_to_db(param=Const.TRAIN, other=Const.OTHER, process_num=12):
global left_time
global X, user_item_pair
print 'Counting behavior......... '
x_data = Data(other.PROCESSED_DATA_PATH)
X = x_data.query(select_table_sql(param.X_TABLE), index=['user_id', 'item_id'])
X = X['behavior_type']
set_index = list(set(X.index))
user_item_pair = pd.DataFrame(set_index, columns=['user_id', 'item_id'])
print user_item_pair
user_item_len = len(user_item_pair)
test_num = user_item_len
core_num = process_num
# queue
queue = Manager().Queue()
q_result = Manager().Queue()
# split data to different processes
interval = test_num/core_num
left_time = interval
task_list = [i*interval for i in range(core_num)]
task_list.append(test_num)
####################################
start_CPU = time.clock()
start_time = time.time()
p = Pool(core_num+1)
for i in range(core_num):
p.apply_async(m_ui_pc, args=(task_list[i], task_list[i+1], queue))
p.apply_async(read_queue, args=(queue, q_result, core_num))
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
value = q_result.get(True)
final = value.sort_index()
print final
end_CPU = time.clock()
end_time = time.time()
print '%f CPU second' % (end_CPU - start_CPU)
print '%f real second' % (end_time - start_time)
print
temp = Data(other.PROCESSED_DATA_PATH)
final.iloc[0:test_num].to_sql(param.X_STATI_BEHAVIOR_TABLE, temp.conn, if_exists='replace')
show = pd.read_sql_query(select_table_sql(param.X_STATI_BEHAVIOR_TABLE), temp.conn, index_col='index')
print show
x_data.close()
temp.close()
示例6: do_git_srpm_import
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import get [as 别名]
def do_git_srpm_import(opts, src_filepath, task, tmp_dir):
# should be run in the forked process, see:
# - https://bugzilla.redhat.com/show_bug.cgi?id=1253335
# - https://github.com/gitpython-developers/GitPython/issues/304
result_dict = Manager().dict()
proc = Process(target=actual_do_git_srpm_import, args=(opts, src_filepath, task, tmp_dir, result_dict))
proc.start()
proc.join()
if result_dict.get("hash") is None:
raise PackageImportException("Failed to import the source rpm: {}".format(src_filepath))
return str(result_dict["hash"])
示例7: run
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import get [as 别名]
def run(self, show_errors):
tests_path = "%s/%s" % (self.test_folder, self.test_subfolder)
test_total = 0
test_fails = 0
print("Running test suite: '%s'" % self._name)
print("==================================================")
q = Manager().Queue()
p_list = []
pool = Pool(processes=num_cores())
start_time = time.time()
# Loop through test cases (files)
for test_name in os.listdir(tests_path):
if not test_name.startswith("J"):
continue
test_path = os.path.join(tests_path, test_name)
test_total += 1
# Run joosc (i.e., run the test).
p = pool.apply_async(func=run_joosc, args=(self._joosc_options, test_path, q, ))
p_list.append(p)
for p in p_list:
ret = q.get(5)
if ret[0] < 0:
print("#\nUNEXPECTED ERROR: %s" % os.path.split(ret[1])[1])
elif self.is_correct_result(ret[0], os.path.split(ret[1])[1]) == False:
test_fails += 1
print("#\nTEST FAIL %d: %s" % (test_fails, os.path.split(ret[1])[1]))
if self.verbose:
print("OUTPUT:")
print("==================================================")
print(ret[2])
print("==================================================")
else:
sys.stdout.write('.')
sys.stdout.flush()
# Done tests
print("\n==================================================")
print("Test run successful. %s seconds" % (time.time() - start_time))
print("{} test(s) ran. {} test(s) failed.".format(test_total, test_fails))
示例8: remote_call
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import get [as 别名]
def remote_call(self, rpc_code, *args, **kwargs):
"""
Make synchronous remote procedure calls on the Orchestrator.
:param rpc_code: RPC code.
:type rpc_code: int
:returns: Depends on the call.
:rtype: \\*
"""
# Create the response queue.
try:
response_queue = Manager().Queue()
# If the above fails we can assume the parent process is dead.
except:
exit(1)
# Send the RPC message.
self.send_msg(message_type = MessageType.MSG_TYPE_RPC,
message_code = rpc_code,
message_info = (response_queue, args, kwargs),
priority = MessagePriority.MSG_PRIORITY_HIGH)
# Get the response.
try:
raw_response = response_queue.get() # blocking call
# If the above fails we can assume the parent process is dead.
except:
exit(1)
# Return the response, or raise an exception on error.
success, response = raw_response
if not success:
exc_type, exc_value, tb_list = response
try:
sys.stderr.writelines(
format_exception_only(exc_type, exc_value) )
sys.stderr.writelines(
format_list(tb_list) )
except Exception:
pass
raise response[0], response[1]
return response
示例9: upload_begin
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import get [as 别名]
def upload_begin(self):
plist = []
q = Manager().Queue()
with open(self.list_path, 'r') as fp:
for i in fp:
if not i:
break
md5_crc32 = i.strip()[:41]
if md5_crc32 not in self.tmp_list and len(md5_crc32) == 41:
self.tmp_list.append(md5_crc32)
self.upload_num += 1
print self.upload_num
for md5_crc32_list in self.chunks(self.tmp_list, self.work_count):
proc = Process(target=self.upload_file, args=(q, md5_crc32_list,))
plist.append(proc)
for proc in plist:
proc.start()
for proc in plist:
proc.join()
while True:
if q.empty():
break
else:
r = q.get()
if r == 0:
self.success += 1
elif r == 1:
self.fail += 1
elif r == 2:
self.download_fail += 1
else:
pass
use_time = time.time() - self.start_time
table = PrettyTable(["key", "value"])
table.add_row(["Upload Count", len(set(self.tmp_list))])
table.add_row(["Success count", self.success])
table.add_row(["Fail count", self.fail])
table.add_row(["Download Fail", self.download_fail])
table.add_row(["Use time (s)", "%.2f" % use_time])
print table
示例10: startServer
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import get [as 别名]
def startServer(host, port, options):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.bind((host, port))
s.listen(0)
queue = Manager().Queue()
while True:
print "main: waiting for connection"
conn, addr = s.accept()
print 'main: Connected by', addr
data = conn.recv(1024)
print 'received port request'
p = Process(target = serverNewClient, args = (queue, options, ))
p.start()
while queue.empty():
time.sleep(0.05)
print "queue is still empty"
port = queue.get()
conn.sendall(str(port) + '\r\n')
print "assigned port %d to new client" % port
示例11: TestVariant
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import get [as 别名]
class TestVariant(object):
def __init__(self, name, compile_flags=[], variant_not_tags=[]):
self.name = name
self.compile_flags = \
['-WERExceptionSupport', '-ExtendedErrorStackForTestHost',
'-BaselineMode'] + compile_flags
self._compile_flags_has_expansion = self._has_expansion(compile_flags)
self.tags = tags.copy()
self.not_tags = not_tags.union(variant_not_tags).union(
['{}_{}'.format(x, name) for x in ('fails','exclude')])
self.msg_queue = Manager().Queue() # messages from multi processes
self.test_result = TestResult()
self.test_count = 0
self._print_lines = [] # _print lines buffer
self._last_len = 0
@staticmethod
def _has_expansion(flags):
return any(re.match('.*\${.*}', f) for f in flags)
@staticmethod
def _expand(flag, test):
return re.sub('\${id}', str(test.id), flag)
def _expand_compile_flags(self, test):
if self._compile_flags_has_expansion:
return [self._expand(flag, test) for flag in self.compile_flags]
return self.compile_flags
# check if this test variant should run a given test
def _should_test(self, test):
tags = split_tags(test.get('tags'))
if not tags.isdisjoint(self.not_tags):
return False
if self.tags and not self.tags.issubset(tags):
return False
if not_compile_flags: # exclude unsupported compile-flags if any
flags = test.get('compile-flags')
if flags and \
not not_compile_flags.isdisjoint(flags.lower().split()):
return False
return True
# print output from multi-process run, to be sent with result message
def _print(self, line):
self._print_lines.append(str(line))
# queue a test result from multi-process runs
def _log_result(self, test, fail):
output = '\n'.join(self._print_lines) # collect buffered _print output
self._print_lines = []
self.msg_queue.put((test.filename, fail, test.elapsed_time, output))
# (on main process) process one queued message
def _process_msg(self, msg):
filename, fail, elapsed_time, output = msg
self.test_result.log(filename, fail=fail)
line = '[{}/{} {:4.2f}] {} -> {}'.format(
self.test_result.total_count(),
self.test_count,
elapsed_time,
'Failed' if fail else 'Passed',
self._short_name(filename))
padding = self._last_len - len(line)
print(line + ' ' * padding, end='\n' if fail else '\r')
log_message(line)
self._last_len = len(line) if not fail else 0
if len(output) > 0:
print_and_log(output)
# get a shorter test file path for display only
def _short_name(self, filename):
folder = os.path.basename(os.path.dirname(filename))
return os.path.join(folder, os.path.basename(filename))
# (on main process) wait and process one queued message
def _process_one_msg(self):
self._process_msg(self.msg_queue.get())
# log a failed test with details
def _show_failed(self, test, flags, exit_code, output,
expected_output=None, timedout=False):
if timedout:
if warn_on_timeout:
self._print('WARNING: Test timed out!')
else:
self._print('ERROR: Test timed out!')
self._print('{} {} {}'.format(binary, ' '.join(flags), test.filename))
if expected_output == None or timedout:
self._print("\nOutput:")
self._print("----------------------------")
self._print(output.decode('utf-8'))
self._print("----------------------------")
else:
lst_output = output.split(b'\n')
lst_expected = expected_output.split(b'\n')
ln = min(len(lst_output), len(lst_expected))
for i in range(0, ln):
if lst_output[i] != lst_expected[i]:
#.........这里部分代码省略.........
示例12: while
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import get [as 别名]
#######################################
# 再生
#######################################
# 現在再生中の音声のインデックス
nowplaying = -1
# 現在再生中のaplayコマンドのProcess
playing = None
# 進捗を確認しつつ音声を読み上げる
while ( not r.ready() ) or ( nowplaying != len(arg) ) or ( playing is not None ):
time.sleep(0.5)
# 音声合成の終了報告があるかキューを確認する。
for _ in range(queue.qsize()):
compiled_index = queue.get()
l[compiled_index] = 1
# 再生できるならしてみる?
if nowplaying < len(arg):
if playing is None:
if l[nowplaying + 1] == 1:
# まとめてWAVファイルを指定できるときはする
listindex = list()
while l[nowplaying + 1] == 1:
nowplaying += 1
listindex.append(nowplaying)
_print( "DEBUG: しゃべるよ![%s]" % str(listindex) )
playing = play_wav(listindex)
elif l[nowplaying + 1] == 0:
_print( "DEBUG: 音声合成の完了待ちです!" )
else:
示例13: MPResult
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import get [as 别名]
class MPResult(object):
"""
Sync result between processes
"""
MATCH = {} # id -> instance
def __init__(self, result):
from multiprocessing import Manager
# Test result instance
self.result = result
# Result queue
self.queue = Manager().Queue()
def __getattr__(self, item):
return getattr(self.result, item)
@staticmethod
def pack_result_storage(storage):
"""
Pack result from storage
"""
return [(get_master_id(s[0]), s[1]) for s in storage]
def unpack_result_storage(self, storage):
"""
Unpack result from storage
"""
unpack_storage = []
for master_id, message in storage:
unpack_storage.append(
(self.MATCH[master_id], message),
)
return unpack_storage
def match(self, suite):
"""
Match id of master process to instance
"""
self.MATCH[get_suite_master_id(suite)] = suite
def match(s):
for o in s:
if isinstance(o, BaseSuite):
self.MATCH[get_suite_master_id(o)] = o
match(o)
else:
self.MATCH[get_case_master_id(o)] = o
match(suite)
def save_result(self):
"""
Save result in queue
"""
self.queue.put(
(
(
self.pack_result_storage(self.result.errors),
self.pack_result_storage(self.result.skipped),
self.pack_result_storage(self.result.failures),
),
self.result.testsRun,
),
)
def make_result(self):
"""
Merge result from queue to result instance
"""
while not self.queue.empty():
(errors, skipped, failures), run_tests = self.queue.get()
self.result.errors.extend(self.unpack_result_storage(errors))
self.result.skipped.extend(self.unpack_result_storage(skipped))
self.result.failures.extend(self.unpack_result_storage(failures))
self.result.testsRun += run_tests
示例14: AmazonSpider
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import get [as 别名]
'''
main ,start here
'''
now = time.time()
amazon = AmazonSpider()
html = amazon.getHTML(amazon.url)
time.sleep(5)
amazon.parseBase(html)
pool = Pool(5)
q = Manager().Queue()
for color_id in amazon.color_list:
pool.apply_async(start, args = (color_id, amazon.size_list, amazon.url, q, ))
pool.close()
pool.join()
while q.qsize() != 0:
print q.get(False)
end = time.time()
print (end-now)
amazon.driver.quit()
# for color_id in amazon.color_list:
# xpath = "//li[@id='"+color_id+"']"
# amazon.driver.find_element_by_xpath(xpath).click()
# time.sleep(2)
# for size in amazon.size_list:
# if size == 'native_size_name_-1':
# continue
# if 'U' in amazon.driver.find_element_by_xpath("//option[@id='"+size+"']").get_attribute('class'):
# continue
# else:
# amazon.driver.find_element_by_xpath("//option[@id='"+size+"']").click()
# time.sleep(2)
示例15: TestVariant
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import get [as 别名]
class TestVariant(object):
def __init__(self, name, compile_flags=[]):
self.name = name
self.compile_flags = \
['-WERExceptionSupport', '-ExtendedErrorStackForTestHost',
'-BaselineMode'] + compile_flags
self.tags = tags.copy()
self.not_tags = not_tags.union(
['{}_{}'.format(x, name) for x in ('fails','exclude')])
self.msg_queue = Manager().Queue() # messages from multi processes
self.test_result = TestResult()
self._print_lines = [] # _print lines buffer
self._last_len = 0
# check if this test variant should run a given test
def _should_test(self, test):
tags = split_tags(test.get('tags'))
if not tags.isdisjoint(self.not_tags):
return False
if self.tags and not self.tags.issubset(tags):
return False
if not_compile_flags: # exclude unsupported compile-flags if any
flags = test.get('compile-flags')
if flags and \
not not_compile_flags.isdisjoint(flags.lower().split()):
return False
return True
# print output from multi-process run, to be sent with result message
def _print(self, line):
self._print_lines.append(str(line))
# queue a test result from multi-process runs
def _log_result(self, test, fail):
output = '\n'.join(self._print_lines) # collect buffered _print output
self._print_lines = []
self.msg_queue.put((test.filename, fail, test.elapsed_time, output))
# (on main process) process one queued message
def _process_msg(self, msg):
filename, fail, elapsed_time, output = msg
self.test_result.log(filename, fail=fail)
line = '[{}/{} {:4.2f}] {} -> {}'.format(
self.test_result.total_count(),
self.test_count,
elapsed_time,
'Failed' if fail else 'Passed',
self._short_name(filename))
padding = self._last_len - len(line)
print(line + ' ' * padding, end='\n' if fail else '\r')
log_message(line)
self._last_len = len(line) if not fail else 0
if len(output) > 0:
print_and_log(output)
# get a shorter test file path for display only
def _short_name(self, filename):
folder = os.path.basename(os.path.dirname(filename))
return os.path.join(folder, os.path.basename(filename))
# (on main process) wait and process one queued message
def _process_one_msg(self):
self._process_msg(self.msg_queue.get())
# log a failed test with details
def _show_failed(self, test, flags, exit_code, output,
expected_output=None, timedout=False):
if timedout:
self._print('ERROR: Test timed out!')
self._print('{} {} {}'.format(binary, ' '.join(flags), test.filename))
if expected_output == None or timedout:
self._print("\nOutput:")
self._print("----------------------------")
self._print(output)
self._print("----------------------------")
else:
lst_output = output.split(b'\n')
lst_expected = expected_output.split(b'\n')
ln = min(len(lst_output), len(lst_expected))
for i in range(0, ln):
if lst_output[i] != lst_expected[i]:
self._print("Output: (at line " + str(i) + ")")
self._print("----------------------------")
self._print(lst_output[i])
self._print("----------------------------")
self._print("Expected Output:")
self._print("----------------------------")
self._print(lst_expected[i])
self._print("----------------------------")
break
self._print("exit code: {}".format(exit_code))
self._log_result(test, fail=True)
# temp: try find real file name on hard drive if case mismatch
def _check_file(self, folder, filename):
path = os.path.join(folder, filename)
if os.path.isfile(path):
return path # file exists on disk
#.........这里部分代码省略.........