本文整理匯總了Python中sgf_wrapper.replay_sgf_file方法的典型用法代碼示例。如果您正苦於以下問題:Python sgf_wrapper.replay_sgf_file方法的具體用法?Python sgf_wrapper.replay_sgf_file怎麽用?Python sgf_wrapper.replay_sgf_file使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sgf_wrapper
的用法示例。
在下文中一共展示了sgf_wrapper.replay_sgf_file方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: make_dataset_from_sgf
# 需要導入模塊: import sgf_wrapper [as 別名]
# 或者: from sgf_wrapper import replay_sgf_file [as 別名]
def make_dataset_from_sgf(sgf_filename, tf_record):
pwcs = sgf_wrapper.replay_sgf_file(sgf_filename)
tf_examples = map(_make_tf_example_from_pwc, pwcs)
write_tf_examples(tf_record, tf_examples)
示例2: make_dataset_from_sgf
# 需要導入模塊: import sgf_wrapper [as 別名]
# 或者: from sgf_wrapper import replay_sgf_file [as 別名]
def make_dataset_from_sgf(board_size, sgf_filename, tf_record):
pwcs = sgf_wrapper.replay_sgf_file(board_size, sgf_filename)
def make_tf_example_from_pwc(pwcs):
return _make_tf_example_from_pwc(board_size, pwcs)
tf_examples = map(make_tf_example_from_pwc, pwcs)
write_tf_examples(tf_record, tf_examples)
示例3: final_position_sgf
# 需要導入模塊: import sgf_wrapper [as 別名]
# 或者: from sgf_wrapper import replay_sgf_file [as 別名]
def final_position_sgf(sgf_path):
for pwc in sgf_wrapper.replay_sgf_file(sgf_path):
pass
return pwc.position.play_move(pwc.next_move)
示例4: parse_sgf_to_examples
# 需要導入模塊: import sgf_wrapper [as 別名]
# 或者: from sgf_wrapper import replay_sgf_file [as 別名]
def parse_sgf_to_examples(sgf_path):
"""Return supervised examples from positions
NOTE: last move is not played because no p.next_move after.
"""
return zip(*[(p.position, p.next_move, p.result)
for p in sgf_wrapper.replay_sgf_file(sgf_path)])
示例5: main
# 需要導入模塊: import sgf_wrapper [as 別名]
# 或者: from sgf_wrapper import replay_sgf_file [as 別名]
def main(argv):
features, labels = dual_net.get_inference_input()
tf_tensors = dual_net.model_inference_fn(features, False)
if len(tf_tensors) != 4:
print("oneoffs/embeddings.py requires you modify")
print("dual_net.model_inference_fn and add a fourth param")
sys.exit(1)
p_out, v_out, logits, shared = tf_tensors
predictions = {'shared': shared}
sess = tf.Session()
tf.train.Saver().restore(sess, FLAGS.model)
try:
progress = tqdm(get_files())
embeddings = []
metadata = []
for i, f in enumerate(progress):
short_f = os.path.basename(f)
short_f = short_f.replace('-minigo-cc-evaluator', '-')
short_f = short_f.replace('-000', '-')
progress.set_description('Processing %s' % short_f)
processed = []
for idx, p in enumerate(sgf_wrapper.replay_sgf_file(f)):
if idx < FLAGS.first: continue
if idx > FLAGS.last: break
if idx % FLAGS.every != 0: continue
processed.append(features_lib.extract_features(p.position))
metadata.append((f, idx))
if len(processed) > 0:
# If len(processed) gets too large may have to chunk.
res = sess.run(predictions, feed_dict={features: processed})
for r in res['shared']:
embeddings.append(r.flatten())
except:
# Raise shows us the error but only after the finally block executes.
raise
finally:
with open(FLAGS.embedding_file, 'wb') as pickle_file:
pickle.dump([metadata, np.array(embeddings)], pickle_file)