本文整理汇总了Python中multiprocessing.Manager.keys方法的典型用法代码示例。如果您正苦于以下问题:Python Manager.keys方法的具体用法?Python Manager.keys怎么用?Python Manager.keys使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.Manager
的用法示例。
在下文中一共展示了Manager.keys方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _test_bmuf_distributed
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import keys [as 别名]
def _test_bmuf_distributed(self, cpu_device=False, nesterov=False):
processes = []
filestore_dir = tempfile.mkdtemp()
results = Manager().dict()
for idx in range(0, 2):
process = Process(
target=bmuf_process,
args=(filestore_dir, idx, results, cpu_device, nesterov)
)
processes.append(process)
process.start()
while len(processes) > 0:
process = processes.pop()
process.join()
shutil.rmtree(filestore_dir)
if len(results) == 0:
return
w_0 = results[0]['w_0']
w_1 = results[0]['w_1']
b_0 = results[0]['b_0']
b_1 = results[0]['b_1']
# Check parameters are in sync.
np.testing.assert_equal(w_0, w_1)
np.testing.assert_equal(w_0, results[1]['w_0'])
np.testing.assert_equal(w_0, results[1]['w_1'])
np.testing.assert_equal(b_0, b_1)
np.testing.assert_equal(b_0, results[1]['b_0'])
np.testing.assert_equal(b_0, results[1]['b_1'])
w_g_ = results[0]['w_g_']
b_g_ = results[0]['b_g_']
g_b = (results[0]['b_0_'] + results[1]['b_0_'] + results[0]['b_1_'] +
results[1]['b_1_']) / 4 - b_g_
g_w = (results[0]['w_0_'] + results[1]['w_0_'] + results[0]['w_1_'] +
results[1]['w_1_']) / 4 - w_g_
v_b_ = results[0]['v_b_']
v_b = results[0]['v_b']
v_w_ = results[0]['v_w_']
v_w = results[0]['v_w']
for pid in results.keys():
for k in results[pid].keys():
if k.startswith("sync_num"):
self.assertEqual(2603, results[pid][k])
# Check block gradients are correct.
np.testing.assert_almost_equal(v_b, 0.75 * v_b_ + g_b)
np.testing.assert_almost_equal(v_w, 0.75 * v_w_ + g_w)
# Check params update step
if nesterov:
np.testing.assert_equal(w_0, w_g_ + v_w - 0.75 * (v_w - v_w_))
np.testing.assert_equal(b_0, b_g_ + v_b - 0.75 * (v_b - v_b_))
else:
np.testing.assert_equal(w_0, w_g_ + v_w)
np.testing.assert_equal(b_0, b_g_ + v_b)
示例2: __init__
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import keys [as 别名]
class InMemoryStorage:
def __init__(self):
self.storage = Manager().dict()
def keys(self):
return self.storage.keys()
def set_val(self, key, val):
self.storage[key] = val
def get_val(self, key):
return self.storage[key]
def append_val(self, key, val):
# self.storage.setdefault(key, []).append(val) # 不适用于Manager()
# t=self.storage.setdefault(key, []) # !!!
# t.append(val)
# self.storage[key]=t
if key in self.storage:
self.storage[key]+=[val]
else:
self.storage[key]=[val]
def get_list(self, key):
return self.storage.get(key, [])
示例3: __init__
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import keys [as 别名]
class Datefacet:
def __init__(self):
from couchbase.n1ql import N1QLQuery
from multiprocessing import Manager, Lock
self.cb = Bucket('couchbase://172.23.123.38/bucket-1')
self.row_iter = self.cb.n1ql_query(N1QLQuery('select meta().id from `bucket-1`'))
self.lock = Lock()
self.dsize = 1000000
self.dateiter = Manager().dict({key: None for key in ['2013-10-17', '2013-11-17', '2014-02-09', '2015-11-26']})
self.dateiter['2013-10-17'] = .65 * self.dsize
self.dateiter['2013-11-17'] = .2 * self.dsize
self.dateiter['2014-02-09'] = .1 * self.dsize
self.dateiter['2015-11-26'] = .05 * self.dsize
self.cycledates = itertools.cycle(self.dateiter.keys())
def createdateset(self):
for resultid in self.row_iter:
'''
Day 1 should have approximately 65% of the documents
Day 2 should have approximately 20% of the documents
Day 3 should have approximately 10% of the documents
Day 4 should have approximately 5% of the documents
format like this 2010-07-27
'''
val = self.cb.get(resultid["id"]).value
self.lock.acquire()
tmpdate = self.cycledates.next()
val["date"] = tmpdate
self.cb.set(resultid["id"], val)
'''
Critical section
'''
self.dateiter[tmpdate] -= 1
if self.dateiter[tmpdate] == 0:
self.dateiter.pop(tmpdate, None)
self.cycledates = itertools.cycle(self.dateiter.keys())
self.lock.release()
print(self.dateiter)
def run(self):
import concurrent.futures
with concurrent.futures.ProcessPoolExecutor(max_workers=10) as executor:
executor.submit(self.createdateset())
示例4: __init__
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import keys [as 别名]
class Datefacet:
def __init__(self):
from multiprocessing import Manager, Lock
self.cb = Bucket('couchbase://172.23.99.211/bucket-1', password="password")
self.lock = Lock()
self.dsize = 1000000
self.dateiter = Manager().dict({key: None for key in ['2013-10-17', '2013-11-17', '2014-02-09', '2015-11-26']})
self.dateiter['2013-10-17'] = .65 * self.dsize
self.dateiter['2013-11-17'] = .2 * self.dsize
self.dateiter['2014-02-09'] = .1 * self.dsize
self.dateiter['2015-11-26'] = .05 * self.dsize
self.cycledates = itertools.cycle(self.dateiter.keys())
def createdateset(self):
for resultid in range(0, self.dsize):
key = hex(resultid)[2:]
'''
Day 1 should have approximately 65% of the documents
Day 2 should have approximately 20% of the documents
Day 3 should have approximately 10% of the documents
Day 4 should have approximately 5% of the documents
format like this 2010-07-27
'''
val = self.cb.get(key).value
self.lock.acquire()
tmpdate = next(self.cycledates)
val["date"] = tmpdate
self.cb.set(key, val)
self.dateiter[tmpdate] -= 1
if self.dateiter[tmpdate] == 0:
self.dateiter.pop(tmpdate, None)
self.cycledates = itertools.cycle(self.dateiter.keys())
self.lock.release()
def run(self):
with concurrent.futures.ProcessPoolExecutor(max_workers=10) as executor:
executor.submit(self.createdateset())
示例5: search
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import keys [as 别名]
def search(output_dict, rules_file):
rules = [rule.split(' | ') for rule in pickle.load(open(rules_file, 'rb'))]
file_list = JoinableQueue()
word_dict = Manager().dict()
for root, subFolders, files in os.walk(os.path.join(os.path.dirname(__file__), 'corpus', 'tagged')):
for current_file in files:
if current_file.endswith(".pickle"):
file_list.put(os.path.join(root, current_file))
#break # TODO remove (only for testing with one file)
file_count = file_list.qsize()
def worker():
def rule_parser(tagged_data):
parser = nltk.RegexpParser('''
NP: {<NN|NNS|NNP|NNPS|NE>}
NPs: {<NP> (<,|CC> <NP>)+}
''')
return parser.parse(tagged_data)
def get_nltk_word(data):
if isinstance(data, nltk.tree.Tree):
if isinstance(data[0], tuple):
return data[0][0]
else:
return data[0]
else:
return data[0]
def add_to_dict(hypernym, hyponym):
if not hyponym in word_dict.keys():
old_list = word_dict.get(hypernym)
if not old_list:
old_list = [hyponym]
else:
if not hyponym in old_list:
old_list.append(hyponym)
word_dict[hypernym] = old_list
def apply_rules(data, position):
for rule in rules:
# search right side
if rule[0] == 'HYPERNYM':
possible_hypernym = get_nltk_word(data[position])
error = False
word_count = 1
for word in rule[1:-1]:
try:
if word != get_nltk_word(data[position + word_count]):
error = True
word_count += 1
except IndexError:
pass
try:
if not error:
if isinstance(data[position + word_count], nltk.tree.Tree):
if data[position + word_count].node == 'NP' and rule[-1] == 'NP':
add_to_dict(possible_hypernym, data[position + word_count][0][0])
break
elif data[position + word_count].node == 'NPs' and rule[-1] == 'NPs':
for node in data[position + word_count]:
if isinstance(node, nltk.tree.Tree):
add_to_dict(possible_hypernym, node[0][0])
break
except IndexError:
pass
# search left side
elif rule[-1] == 'HYPERNYM':
possible_hypernym = get_nltk_word(data[position])
error = False
word_count = -1
nrule = list(rule)
nrule.reverse()
for word in nrule[1:-1]:
try:
if word != get_nltk_word(data[position + word_count]):
error = False
word_count -= 1
except IndexError:
pass
try:
if not error:
if isinstance(data[position + word_count], nltk.tree.Tree):
if data[position + word_count].node == 'NP' and rule[-1] == 'NP':
add_to_dict(possible_hypernym, data[position + word_count][0][0])
break
elif data[position + word_count].node == 'NPs' and rule[-1] == 'NPs':
for node in data[position + word_count]:
if isinstance(node, nltk.tree.Tree):
#.........这里部分代码省略.........
示例6: Manager
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import keys [as 别名]
dicTable = Manager().dict()
# Запускаем на каждую БД свой поток.
logging.info("Start threads")
for setting in settingfile.settingList:
process = Process(target=proc.getBillingStat, args=(dicTable, setting, settingfile.wsSettingList))
processList.append(process)
process.start()
for process in processList:
process.join()
logging.info("Finish threads")
logging.info("Start building text mail")
# Собираем текст письма из результатов
emailText = """<html><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>ЕССДЗ биллинг статистика</title>
<style type="text/css">
table {border-collapse: collapse;}
th {background: #ccc; text-align: center;}
td.number {text-align: right;}
td, th {border: 1px solid #800; padding: 4px;}
</style></head><body>"""
keys = dicTable.keys()
for key in keys:
if dicTable[key] is not None:
emailText = emailText + dicTable[key]
emailText += "</body></html>"
logging.info("Finish building text mail")
if len(dicTable)!=0:
logging.info("Start sending mail")
mail.sent_mail(text=emailText, to=settingfile.to, subj=settingfile.subject, toView=settingfile.toView)
logging.info("Finish sending mail")