本文整理汇总了Python中sgf_wrapper.replay_sgf_file方法的典型用法代码示例。如果您正苦于以下问题:Python sgf_wrapper.replay_sgf_file方法的具体用法?Python sgf_wrapper.replay_sgf_file怎么用?Python sgf_wrapper.replay_sgf_file使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sgf_wrapper
的用法示例。
在下文中一共展示了sgf_wrapper.replay_sgf_file方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_dataset_from_sgf
# 需要导入模块: import sgf_wrapper [as 别名]
# 或者: from sgf_wrapper import replay_sgf_file [as 别名]
def make_dataset_from_sgf(sgf_filename, tf_record):
pwcs = sgf_wrapper.replay_sgf_file(sgf_filename)
tf_examples = map(_make_tf_example_from_pwc, pwcs)
write_tf_examples(tf_record, tf_examples)
示例2: make_dataset_from_sgf
# 需要导入模块: import sgf_wrapper [as 别名]
# 或者: from sgf_wrapper import replay_sgf_file [as 别名]
def make_dataset_from_sgf(board_size, sgf_filename, tf_record):
pwcs = sgf_wrapper.replay_sgf_file(board_size, sgf_filename)
def make_tf_example_from_pwc(pwcs):
return _make_tf_example_from_pwc(board_size, pwcs)
tf_examples = map(make_tf_example_from_pwc, pwcs)
write_tf_examples(tf_record, tf_examples)
示例3: final_position_sgf
# 需要导入模块: import sgf_wrapper [as 别名]
# 或者: from sgf_wrapper import replay_sgf_file [as 别名]
def final_position_sgf(sgf_path):
for pwc in sgf_wrapper.replay_sgf_file(sgf_path):
pass
return pwc.position.play_move(pwc.next_move)
示例4: parse_sgf_to_examples
# 需要导入模块: import sgf_wrapper [as 别名]
# 或者: from sgf_wrapper import replay_sgf_file [as 别名]
def parse_sgf_to_examples(sgf_path):
"""Return supervised examples from positions
NOTE: last move is not played because no p.next_move after.
"""
return zip(*[(p.position, p.next_move, p.result)
for p in sgf_wrapper.replay_sgf_file(sgf_path)])
示例5: main
# 需要导入模块: import sgf_wrapper [as 别名]
# 或者: from sgf_wrapper import replay_sgf_file [as 别名]
def main(argv):
features, labels = dual_net.get_inference_input()
tf_tensors = dual_net.model_inference_fn(features, False)
if len(tf_tensors) != 4:
print("oneoffs/embeddings.py requires you modify")
print("dual_net.model_inference_fn and add a fourth param")
sys.exit(1)
p_out, v_out, logits, shared = tf_tensors
predictions = {'shared': shared}
sess = tf.Session()
tf.train.Saver().restore(sess, FLAGS.model)
try:
progress = tqdm(get_files())
embeddings = []
metadata = []
for i, f in enumerate(progress):
short_f = os.path.basename(f)
short_f = short_f.replace('-minigo-cc-evaluator', '-')
short_f = short_f.replace('-000', '-')
progress.set_description('Processing %s' % short_f)
processed = []
for idx, p in enumerate(sgf_wrapper.replay_sgf_file(f)):
if idx < FLAGS.first: continue
if idx > FLAGS.last: break
if idx % FLAGS.every != 0: continue
processed.append(features_lib.extract_features(p.position))
metadata.append((f, idx))
if len(processed) > 0:
# If len(processed) gets too large may have to chunk.
res = sess.run(predictions, feed_dict={features: processed})
for r in res['shared']:
embeddings.append(r.flatten())
except:
# Raise shows us the error but only after the finally block executes.
raise
finally:
with open(FLAGS.embedding_file, 'wb') as pickle_file:
pickle.dump([metadata, np.array(embeddings)], pickle_file)