本文整理匯總了Python中multiprocessing.Array.raw方法的典型用法代碼示例。如果您正苦於以下問題:Python Array.raw方法的具體用法?Python Array.raw怎麽用?Python Array.raw使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類multiprocessing.Array
的用法示例。
在下文中一共展示了Array.raw方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: run
# 需要導入模塊: from multiprocessing import Array [as 別名]
# 或者: from multiprocessing.Array import raw [as 別名]
def run(args):
# create dummy environment to be able to create model
env = gym.make(args.environment)
assert isinstance(env.observation_space, Box)
assert isinstance(env.action_space, Discrete)
print("Observation space:", env.observation_space)
print("Action space:", env.action_space)
# create main model
model = create_model(env, args)
model.summary()
env.close()
# for better compatibility with Theano and Tensorflow
multiprocessing.set_start_method('spawn')
# create shared buffer for sharing weights
blob = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL)
shared_buffer = Array('c', len(blob))
shared_buffer.raw = blob
# force runner processes to use cpu
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# create fifos and threads for all runners
fifos = []
for i in range(args.num_runners):
fifo = Queue(args.queue_length)
fifos.append(fifo)
process = Process(target=runner, args=(shared_buffer, fifo, args))
process.start()
# start trainer in main thread
trainer(model, fifos, shared_buffer, args)
示例2: main
# 需要導入模塊: from multiprocessing import Array [as 別名]
# 或者: from multiprocessing.Array import raw [as 別名]
def main():
global WORDLIST, HASHFILE, words, result, curr, total, num_words, curr_words
#
# process files
#
print("[*] reading hashes...")
hashes = open(HASHFILE, 'r')
hashlist = []
for line in hashes:
data = line.split(":")
if len(data) > 1:
hashv = data[0].strip()
salt = data[1].strip()
hashlist.append((hashv, salt))
hashes.close()
print("[*] parsing wordlist...")
words = Array('c', SHARED_MEM_SIZE, lock=False) # allocate shared memory segment
# get line count
wordlist_file = open(WORDLIST, 'r')
lines = 0
for line in wordlist_file:
lines += 1
total = lines * len(hashlist)
curr = Value('i', 0)
curr_words = Value('i', 0)
wordlist_file.seek(0) # get back to beginning
#
# crack
#
print("[*] beginning cracking")
pool = Pool(processes=NUM_PROCESSES)
results = []
current_char_count = 0
words_raw = ""
for line in wordlist_file:
length = len(line)
if length + current_char_count < SHARED_MEM_SIZE:
words_raw += line
current_char_count += length
else:
print("[*] next round")
curr_words.value = len(words_raw.split("\n"))
words.raw = words_raw + (SHARED_MEM_SIZE - len(words_raw)) * '0' # clear space
words_raw = line
current_char_count = length
# let workers do work!
results.extend(pool.map(entry, hashlist))
# remove cracked hashes
# TODO
print("[*] final round")
curr_words.value = len(words_raw.split("\n"))
words.raw = words_raw + (SHARED_MEM_SIZE - len(words_raw)) * '0'
results.extend(pool.map(entry, hashlist))
print("[*] done")
for result in results:
if result is not None:
print("%s:%s" % (result))