本文整理汇总了Python中inception.imagenet_data.ImagenetData方法的典型用法代码示例。如果您正苦于以下问题:Python imagenet_data.ImagenetData方法的具体用法?Python imagenet_data.ImagenetData怎么用?Python imagenet_data.ImagenetData使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类inception.imagenet_data
的用法示例。
在下文中一共展示了imagenet_data.ImagenetData方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from inception import imagenet_data [as 别名]
# 或者: from inception.imagenet_data import ImagenetData [as 别名]
def main(_):
dataset = ImagenetData(subset=FLAGS.subset)
assert dataset.data_files()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
inception_train.train(dataset)
示例2: main
# 需要导入模块: from inception import imagenet_data [as 别名]
# 或者: from inception.imagenet_data import ImagenetData [as 别名]
def main(unused_args):
assert FLAGS.job_name in ['ps', 'worker'], 'job_name must be ps or worker'
# Extract all the hostnames for the ps and worker jobs to construct the
# cluster spec.
ps_hosts = FLAGS.ps_hosts.split(',')
worker_hosts = FLAGS.worker_hosts.split(',')
tf.logging.info('PS hosts are: %s' % ps_hosts)
tf.logging.info('Worker hosts are: %s' % worker_hosts)
cluster_spec = tf.train.ClusterSpec({'ps': ps_hosts,
'worker': worker_hosts})
server = tf.train.Server(
{'ps': ps_hosts,
'worker': worker_hosts},
job_name=FLAGS.job_name,
task_index=FLAGS.task_id,
protocol=FLAGS.protocol)
if FLAGS.job_name == 'ps':
# `ps` jobs wait for incoming connections from the workers.
server.join()
else:
# `worker` jobs will actually do the work.
dataset = ImagenetData(subset=FLAGS.subset)
assert dataset.data_files()
# Only the chief checks for or creates train_dir.
if FLAGS.task_id == 0:
if not tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.MakeDirs(FLAGS.train_dir)
inception_distributed_train.train(server.target, dataset, cluster_spec)
示例3: main
# 需要导入模块: from inception import imagenet_data [as 别名]
# 或者: from inception.imagenet_data import ImagenetData [as 别名]
def main(unused_argv=None):
dataset = ImagenetData(subset=FLAGS.subset)
assert dataset.data_files()
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
inception_eval.evaluate(dataset)
示例4: main
# 需要导入模块: from inception import imagenet_data [as 别名]
# 或者: from inception.imagenet_data import ImagenetData [as 别名]
def main(unused_args):
assert FLAGS.job_name in ['ps', 'worker'], 'job_name must be ps or worker'
# Extract all the hostnames for the ps and worker jobs to construct the
# cluster spec.
ps_hosts = FLAGS.ps_hosts.split(',')
worker_hosts = FLAGS.worker_hosts.split(',')
tf.logging.info('PS hosts are: %s' % ps_hosts)
tf.logging.info('Worker hosts are: %s' % worker_hosts)
cluster_spec = tf.train.ClusterSpec({'ps': ps_hosts,
'worker': worker_hosts})
server = tf.train.Server(
{'ps': ps_hosts,
'worker': worker_hosts},
job_name=FLAGS.job_name,
task_index=FLAGS.task_id)
if FLAGS.job_name == 'ps':
# `ps` jobs wait for incoming connections from the workers.
server.join()
else:
# `worker` jobs will actually do the work.
dataset = ImagenetData(subset=FLAGS.subset)
assert dataset.data_files()
# Only the chief checks for or creates train_dir.
if FLAGS.task_id == 0:
if not tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.MakeDirs(FLAGS.train_dir)
inception_distributed_train.train(server.target, dataset, cluster_spec)
示例5: main
# 需要导入模块: from inception import imagenet_data [as 别名]
# 或者: from inception.imagenet_data import ImagenetData [as 别名]
def main(_):
dataset = ImagenetData(subset=FLAGS.subset)
#assert dataset.data_files()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
inception_train.train(dataset)
示例6: main
# 需要导入模块: from inception import imagenet_data [as 别名]
# 或者: from inception.imagenet_data import ImagenetData [as 别名]
def main(unused_args):
assert FLAGS.job_name in ['ps', 'worker'], 'job_name must be ps or worker'
# Extract all the hostnames for the ps and worker jobs to construct the
# cluster spec.
ps_hosts = FLAGS.ps_hosts.split(',')
worker_hosts = FLAGS.worker_hosts.split(',')
tf.logging.info('PS hosts are: %s' % ps_hosts)
tf.logging.info('Worker hosts are: %s' % worker_hosts)
cluster_spec = tf.train.ClusterSpec({'ps': ps_hosts,
'worker': worker_hosts})
server = tf.train.Server(
{'ps': ps_hosts,
'worker': worker_hosts},
job_name=FLAGS.job_name,
task_index=FLAGS.task_id)
if FLAGS.job_name == 'ps':
# `ps` jobs wait for incoming connections from the workers.
server.join()
else:
# `worker` jobs will actually do the work.
dataset = ImagenetData(subset=FLAGS.subset)
#assert dataset.data_files()
# Only the chief checks for or creates train_dir.
if FLAGS.task_id == 0:
if not tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.MakeDirs(FLAGS.train_dir)
inception_distributed_train.train(server.target, dataset, cluster_spec)
示例7: main
# 需要导入模块: from inception import imagenet_data [as 别名]
# 或者: from inception.imagenet_data import ImagenetData [as 别名]
def main(_):
dataset = ImagenetData(subset=FLAGS.subset)
assert dataset.data_files()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
FLAGS.dataset_name = 'imagenet'
inception_train.train(dataset)
示例8: main
# 需要导入模块: from inception import imagenet_data [as 别名]
# 或者: from inception.imagenet_data import ImagenetData [as 别名]
def main(unused_args):
FLAGS.dataset_name = 'imagenet'
assert FLAGS.job_name in ['ps', 'worker'], 'job_name must be ps or worker'
# Extract all the hostnames for the ps and worker jobs to construct the
# cluster spec.
ps_hosts = FLAGS.ps_hosts.split(',')
worker_hosts = FLAGS.worker_hosts.split(',')
tf.logging.info('PS hosts are: %s' % ps_hosts)
tf.logging.info('Worker hosts are: %s' % worker_hosts)
cluster_spec = tf.train.ClusterSpec({'ps': ps_hosts,
'worker': worker_hosts})
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
server = tf.train.Server(
{'ps': ps_hosts,
'worker': worker_hosts},
job_name=FLAGS.job_name,
task_index=FLAGS.task_id,
config=sess_config)
if FLAGS.job_name == 'ps':
# `ps` jobs wait for incoming connections from the workers.
server.join()
else:
# `worker` jobs will actually do the work.
dataset = ImagenetData(subset=FLAGS.subset)
assert dataset.data_files()
# Only the chief checks for or creates train_dir.
if FLAGS.task_id == 0:
if not tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.MakeDirs(FLAGS.train_dir)
inception_distributed_train.train(server.target, dataset, cluster_spec)
示例9: main
# 需要导入模块: from inception import imagenet_data [as 别名]
# 或者: from inception.imagenet_data import ImagenetData [as 别名]
def main(unused_argv=None):
dataset = ImagenetData(subset=FLAGS.subset)
assert dataset.data_files()
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
FLAGS.dataset_name = 'imagenet'
FLAGS.num_examples = dataset.num_examples_per_epoch()
inception_eval.evaluate(dataset)