本文整理汇总了Python中model.Graph方法的典型用法代码示例。如果您正苦于以下问题:Python model.Graph方法的具体用法?Python model.Graph怎么用?Python model.Graph使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类model
的用法示例。
在下文中一共展示了model.Graph方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: master
# 需要导入模块: import model [as 别名]
# 或者: from model import Graph [as 别名]
def master(train_data, dev_data, utility):
#creates TF graph and calls trainer or evaluator
batch_size = utility.FLAGS.batch_size
model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/"
#create all paramters of the model
param_class = parameters.Parameters(utility)
params, global_step, init = param_class.parameters(utility)
key = "test" if (FLAGS.evaluator_job) else "train"
graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key)
graph.create_graph(params, global_step)
prev_dev_error = 0.0
final_loss = 0.0
final_accuracy = 0.0
#start session
with tf.Session() as sess:
sess.run(init.name)
sess.run(graph.init_op.name)
to_save = params.copy()
saver = tf.train.Saver(to_save, max_to_keep=500)
if (FLAGS.evaluator_job):
while True:
selected_models = {}
file_list = tf.gfile.ListDirectory(model_dir)
for model_file in file_list:
if ("checkpoint" in model_file or "index" in model_file or
"meta" in model_file):
continue
if ("data" in model_file):
model_file = model_file.split(".")[0]
model_step = int(
model_file.split("_")[len(model_file.split("_")) - 1])
selected_models[model_step] = model_file
file_list = sorted(selected_models.items(), key=lambda x: x[0])
if (len(file_list) > 0):
file_list = file_list[0:len(file_list) - 1]
print "list of models: ", file_list
for model_file in file_list:
model_file = model_file[1]
print "restoring: ", model_file
saver.restore(sess, model_dir + "/" + model_file)
model_step = int(
model_file.split("_")[len(model_file.split("_")) - 1])
print "evaluating on dev ", model_file, model_step
evaluate(sess, dev_data, batch_size, graph, model_step)
else:
ckpt = tf.train.get_checkpoint_state(model_dir)
print "model dir: ", model_dir
if (not (tf.gfile.IsDirectory(utility.FLAGS.output_dir))):
print "create dir: ", utility.FLAGS.output_dir
tf.gfile.MkDir(utility.FLAGS.output_dir)
if (not (tf.gfile.IsDirectory(model_dir))):
print "create dir: ", model_dir
tf.gfile.MkDir(model_dir)
Train(graph, utility, batch_size, train_data, sess, model_dir,
saver)
示例2: master
# 需要导入模块: import model [as 别名]
# 或者: from model import Graph [as 别名]
def master(train_data, dev_data, utility):
#creates TF graph and calls trainer or evaluator
batch_size = utility.FLAGS.batch_size
model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/"
#create all paramters of the model
param_class = parameters.Parameters(utility)
params, global_step, init = param_class.parameters(utility)
key = "test" if (FLAGS.evaluator_job) else "train"
graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key)
graph.create_graph(params, global_step)
prev_dev_error = 0.0
final_loss = 0.0
final_accuracy = 0.0
#start session
with tf.Session() as sess:
sess.run(init.name)
sess.run(graph.init_op.name)
to_save = params.copy()
saver = tf.train.Saver(to_save, max_to_keep=500)
if (FLAGS.evaluator_job):
while True:
selected_models = {}
file_list = tf.gfile.ListDirectory(model_dir)
for model_file in file_list:
if ("checkpoint" in model_file or "index" in model_file or
"meta" in model_file):
continue
if ("data" in model_file):
model_file = model_file.split(".")[0]
model_step = int(
model_file.split("_")[len(model_file.split("_")) - 1])
selected_models[model_step] = model_file
file_list = sorted(selected_models.items(), key=lambda x: x[0])
if (len(file_list) > 0):
file_list = file_list[0:len(file_list) - 1]
print("list of models: ", file_list)
for model_file in file_list:
model_file = model_file[1]
print("restoring: ", model_file)
saver.restore(sess, model_dir + "/" + model_file)
model_step = int(
model_file.split("_")[len(model_file.split("_")) - 1])
print("evaluating on dev ", model_file, model_step)
evaluate(sess, dev_data, batch_size, graph, model_step)
else:
ckpt = tf.train.get_checkpoint_state(model_dir)
print("model dir: ", model_dir)
if (not (tf.gfile.IsDirectory(utility.FLAGS.output_dir))):
print("create dir: ", utility.FLAGS.output_dir)
tf.gfile.MkDir(utility.FLAGS.output_dir)
if (not (tf.gfile.IsDirectory(model_dir))):
print("create dir: ", model_dir)
tf.gfile.MkDir(model_dir)
Train(graph, utility, batch_size, train_data, sess, model_dir,
saver)
示例3: master
# 需要导入模块: import model [as 别名]
# 或者: from model import Graph [as 别名]
def master(train_data, dev_data, utility):
#creates TF graph and calls trainer or evaluator
batch_size = utility.FLAGS.batch_size
model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/"
#create all paramters of the model
param_class = parameters.Parameters(utility)
params, global_step, init = param_class.parameters(utility)
key = "test" if (FLAGS.evaluator_job) else "train"
graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key)
graph.create_graph(params, global_step)
prev_dev_error = 0.0
final_loss = 0.0
final_accuracy = 0.0
#start session
with tf.Session() as sess:
sess.run(init.name)
sess.run(graph.init_op.name)
to_save = params.copy()
saver = tf.train.Saver(to_save, max_to_keep=500)
if (FLAGS.evaluator_job):
while True:
selected_models = {}
file_list = tf.gfile.ListDirectory(model_dir)
for model_file in file_list:
if ("checkpoint" in model_file or "index" in model_file or
"meta" in model_file):
continue
if ("data" in model_file):
model_file = model_file.split(".")[0]
model_step = int(
model_file.split("_")[len(model_file.split("_")) - 1])
selected_models[model_step] = model_file
file_list = sorted(selected_models.items(), key=lambda x: x[0])
if (len(file_list) > 0):
file_list = file_list[0:len(file_list) - 1]
print "list of models: ", file_list
for model_file in file_list:
model_file = model_file[1]
print "restoring: ", model_file
saver.restore(sess, model_dir + "/" + model_file)
model_step = int(
model_file.split("_")[len(model_file.split("_")) - 1])
print "evaluating on dev ", model_file, model_step
evaluate(sess, dev_data, batch_size, graph, model_step)
else:
ckpt = tf.train.get_checkpoint_state(model_dir)
print "model dir: ", model_dir
if (not (tf.gfile.IsDirectory(model_dir))):
print "create dir: ", model_dir
tf.gfile.MkDir(model_dir)
Train(graph, utility, batch_size, train_data, sess, model_dir,
saver)