本文整理汇总了Python中tensorflow.write_file方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.write_file方法的具体用法?Python tensorflow.write_file怎么用?Python tensorflow.write_file使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.write_file方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import write_file [as 别名]
def __init__(self):
# image writing graph
self.tf_graph = tf.Graph()
with self.tf_graph.as_default():
self.tf_image = tf.placeholder(tf.uint8, [None, None, 3])
self.tf_image_path = tf.placeholder(tf.string, [])
tf_image = tf.image.encode_png(self.tf_image)
tf_write_op = tf.write_file(self.tf_image_path, tf_image)
self.tf_write_op = tf_write_op
init = tf.global_variables_initializer()
self.tf_session = tf.Session(config=tf.ConfigProto(
device_count={'GPU': 0}
))
self.tf_session.run(init)
示例2: decode
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import write_file [as 别名]
def decode(self, ids, strip_extraneous=False):
"""Transform a sequence of int ids into an image file.
Args:
ids: list of integers to be converted.
strip_extraneous: unused
Returns:
Path to the temporary file where the image was saved.
Raises:
ValueError: if the ids are not of the appropriate size.
"""
del strip_extraneous
_, tmp_file_path = tempfile.mkstemp("_decode.png")
if self._height is None or self._width is None:
size = int(math.sqrt(len(ids) / self._channels))
length = size * size * self._channels
else:
size = None
length = self._height * self._width * self._channels
if len(ids) != length:
raise ValueError("Length of ids (%d) must be height (%d) x width (%d) x "
"channels (%d); %d != %d.\n Ids: %s"
% (len(ids), self._height, self._width, self._channels,
len(ids), length, " ".join([str(i) for i in ids])))
with tf.Graph().as_default():
raw = tf.constant(ids, dtype=tf.uint8)
if size is None:
img = tf.reshape(raw, [self._height, self._width, self._channels])
else:
img = tf.reshape(raw, [size, size, self._channels])
png = tf.image.encode_png(img)
op = tf.write_file(tmp_file_path, png)
with tf.Session() as sess:
sess.run(op)
return tmp_file_path
示例3: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import write_file [as 别名]
def main(argv=None):
"""Run a Tensorflow model on the Criteo dataset."""
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
# First find out if there's a task value on the environment variable.
# If there is none or it is empty define a default one.
task_data = env.get('task') or {'type': 'master', 'index': 0}
argv = sys.argv if argv is None else argv
args = create_parser().parse_args(args=argv[1:])
trial = task_data.get('trial')
if trial is not None:
output_dir = os.path.join(args.output_path, trial)
else:
output_dir = args.output_path
# Do only evaluation if instructed so, or call Experiment's run.
if args.eval_only_summary_filename:
experiment = get_experiment_fn(args)(output_dir)
# Note that evaluation here will appear as 'one_pass' in tensorboard.
results = experiment.evaluate(delay_secs=0)
# Converts numpy types to native types for json dumps.
json_out = json.dumps(
{key: value.tolist() for key, value in results.iteritems()})
with tf.Session():
tf.write_file(args.eval_only_summary_filename, json_out).run()
else:
learn_runner.run(experiment_fn=get_experiment_fn(args),
output_dir=output_dir)
示例4: _get_write_image_ops
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import write_file [as 别名]
def _get_write_image_ops(eval_dir, filename, images):
"""Create Ops that write images to disk."""
return tf.write_file(
'%s/%s'% (eval_dir, filename),
tf.image.encode_png(data_provider.float_image_to_uint8(images)))
示例5: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import write_file [as 别名]
def main(_, run_eval_loop=True):
# Fetch real images.
with tf.name_scope('inputs'):
real_images, _, _ = data_provider.provide_data(
'train', FLAGS.num_images_generated, FLAGS.dataset_dir)
image_write_ops = None
if FLAGS.eval_real_images:
tf.summary.scalar('MNIST_Classifier_score',
util.mnist_score(real_images, FLAGS.classifier_filename))
else:
# In order for variables to load, use the same variable scope as in the
# train job.
with tf.variable_scope('Generator'):
images = networks.unconditional_generator(
tf.random_normal([FLAGS.num_images_generated, FLAGS.noise_dims]))
tf.summary.scalar('MNIST_Frechet_distance',
util.mnist_frechet_distance(
real_images, images, FLAGS.classifier_filename))
tf.summary.scalar('MNIST_Classifier_score',
util.mnist_score(images, FLAGS.classifier_filename))
if FLAGS.num_images_generated >= 100:
reshaped_images = tfgan.eval.image_reshaper(
images[:100, ...], num_cols=10)
uint8_images = data_provider.float_image_to_uint8(reshaped_images)
image_write_ops = tf.write_file(
'%s/%s'% (FLAGS.eval_dir, 'unconditional_gan.png'),
tf.image.encode_png(uint8_images[0]))
# For unit testing, use `run_eval_loop=False`.
if not run_eval_loop: return
tf.contrib.training.evaluate_repeatedly(
FLAGS.checkpoint_dir,
hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
tf.contrib.training.StopAfterNEvalsHook(1)],
eval_ops=image_write_ops,
max_number_of_evaluations=FLAGS.max_number_of_evaluations)
示例6: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import write_file [as 别名]
def main(_, run_eval_loop=True):
with tf.name_scope('inputs'):
noise, one_hot_labels = _get_generator_inputs(
FLAGS.num_images_per_class, NUM_CLASSES, FLAGS.noise_dims)
# Generate images.
with tf.variable_scope('Generator'): # Same scope as in train job.
images = networks.conditional_generator((noise, one_hot_labels))
# Visualize images.
reshaped_img = tfgan.eval.image_reshaper(
images, num_cols=FLAGS.num_images_per_class)
tf.summary.image('generated_images', reshaped_img, max_outputs=1)
# Calculate evaluation metrics.
tf.summary.scalar('MNIST_Classifier_score',
util.mnist_score(images, FLAGS.classifier_filename))
tf.summary.scalar('MNIST_Cross_entropy',
util.mnist_cross_entropy(
images, one_hot_labels, FLAGS.classifier_filename))
# Write images to disk.
image_write_ops = tf.write_file(
'%s/%s'% (FLAGS.eval_dir, 'conditional_gan.png'),
tf.image.encode_png(data_provider.float_image_to_uint8(reshaped_img[0])))
# For unit testing, use `run_eval_loop=False`.
if not run_eval_loop: return
tf.contrib.training.evaluate_repeatedly(
FLAGS.checkpoint_dir,
hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
tf.contrib.training.StopAfterNEvalsHook(1)],
eval_ops=image_write_ops,
max_number_of_evaluations=FLAGS.max_number_of_evaluations)
示例7: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import write_file [as 别名]
def main(_, run_eval_loop=True):
with tf.name_scope('inputs'):
images = data_provider.provide_data(
'validation', FLAGS.batch_size, dataset_dir=FLAGS.dataset_dir,
patch_size=FLAGS.patch_size)
# In order for variables to load, use the same variable scope as in the
# train job.
with tf.variable_scope('generator'):
reconstructions, _, prebinary = networks.compression_model(
images,
num_bits=FLAGS.bits_per_patch,
depth=FLAGS.model_depth,
is_training=False)
summaries.add_reconstruction_summaries(images, reconstructions, prebinary)
# Visualize losses.
pixel_loss_per_example = tf.reduce_mean(
tf.abs(images - reconstructions), axis=[1, 2, 3])
pixel_loss = tf.reduce_mean(pixel_loss_per_example)
tf.summary.histogram('pixel_l1_loss_hist', pixel_loss_per_example)
tf.summary.scalar('pixel_l1_loss', pixel_loss)
# Create ops to write images to disk.
uint8_images = data_provider.float_image_to_uint8(images)
uint8_reconstructions = data_provider.float_image_to_uint8(reconstructions)
uint8_reshaped = summaries.stack_images(uint8_images, uint8_reconstructions)
image_write_ops = tf.write_file(
'%s/%s'% (FLAGS.eval_dir, 'compression.png'),
tf.image.encode_png(uint8_reshaped[0]))
# For unit testing, use `run_eval_loop=False`.
if not run_eval_loop: return
tf.contrib.training.evaluate_repeatedly(
FLAGS.checkpoint_dir,
master=FLAGS.master,
hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
tf.contrib.training.StopAfterNEvalsHook(1)],
eval_ops=image_write_ops,
max_number_of_evaluations=FLAGS.max_number_of_evaluations)
示例8: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import write_file [as 别名]
def main(_, run_eval_loop=True):
# Fetch real images.
with tf.name_scope('inputs'):
real_images, _, _ = data_provider.provide_data(
'train', FLAGS.num_images_generated, FLAGS.dataset_dir)
image_write_ops = None
if FLAGS.eval_real_images:
tf.summary.scalar('MNIST_Classifier_score',
util.mnist_score(real_images, FLAGS.classifier_filename))
else:
# In order for variables to load, use the same variable scope as in the
# train job.
with tf.variable_scope('Generator'):
images = networks.unconditional_generator(
tf.random_normal([FLAGS.num_images_generated, FLAGS.noise_dims]),
is_training=False)
tf.summary.scalar('MNIST_Frechet_distance',
util.mnist_frechet_distance(
real_images, images, FLAGS.classifier_filename))
tf.summary.scalar('MNIST_Classifier_score',
util.mnist_score(images, FLAGS.classifier_filename))
if FLAGS.num_images_generated >= 100 and FLAGS.write_to_disk:
reshaped_images = tfgan.eval.image_reshaper(
images[:100, ...], num_cols=10)
uint8_images = data_provider.float_image_to_uint8(reshaped_images)
image_write_ops = tf.write_file(
'%s/%s'% (FLAGS.eval_dir, 'unconditional_gan.png'),
tf.image.encode_png(uint8_images[0]))
# For unit testing, use `run_eval_loop=False`.
if not run_eval_loop: return
tf.contrib.training.evaluate_repeatedly(
FLAGS.checkpoint_dir,
hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
tf.contrib.training.StopAfterNEvalsHook(1)],
eval_ops=image_write_ops,
max_number_of_evaluations=FLAGS.max_number_of_evaluations)
示例9: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import write_file [as 别名]
def main(_, run_eval_loop=True):
with tf.name_scope('inputs'):
noise, one_hot_labels = _get_generator_inputs(
FLAGS.num_images_per_class, NUM_CLASSES, FLAGS.noise_dims)
# Generate images.
with tf.variable_scope('Generator'): # Same scope as in train job.
images = networks.conditional_generator(
(noise, one_hot_labels), is_training=False)
# Visualize images.
reshaped_img = tfgan.eval.image_reshaper(
images, num_cols=FLAGS.num_images_per_class)
tf.summary.image('generated_images', reshaped_img, max_outputs=1)
# Calculate evaluation metrics.
tf.summary.scalar('MNIST_Classifier_score',
util.mnist_score(images, FLAGS.classifier_filename))
tf.summary.scalar('MNIST_Cross_entropy',
util.mnist_cross_entropy(
images, one_hot_labels, FLAGS.classifier_filename))
# Write images to disk.
image_write_ops = None
if FLAGS.write_to_disk:
image_write_ops = tf.write_file(
'%s/%s'% (FLAGS.eval_dir, 'conditional_gan.png'),
tf.image.encode_png(data_provider.float_image_to_uint8(
reshaped_img[0])))
# For unit testing, use `run_eval_loop=False`.
if not run_eval_loop: return
tf.contrib.training.evaluate_repeatedly(
FLAGS.checkpoint_dir,
hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
tf.contrib.training.StopAfterNEvalsHook(1)],
eval_ops=image_write_ops,
max_number_of_evaluations=FLAGS.max_number_of_evaluations)
示例10: testWriteFile
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import write_file [as 别名]
def testWriteFile(self):
cases = ['', 'Some contents']
for contents in cases:
contents = tf.compat.as_bytes(contents)
temp = tempfile.NamedTemporaryFile(
prefix='WriteFileTest', dir=self.get_temp_dir())
with self.test_session() as sess:
w = tf.write_file(temp.name, contents)
sess.run(w)
file_contents = open(temp.name, 'rb').read()
self.assertEqual(file_contents, contents)
示例11: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import write_file [as 别名]
def main(_, run_eval_loop=True):
# Fetch and generate images to run through Inception.
with tf.name_scope('inputs'):
real_data, num_classes = _get_real_data(
FLAGS.num_images_generated, FLAGS.dataset_dir)
generated_data = _get_generated_data(
FLAGS.num_images_generated, FLAGS.conditional_eval, num_classes)
# Compute Frechet Inception Distance.
if FLAGS.eval_frechet_inception_distance:
fid = util.get_frechet_inception_distance(
real_data, generated_data, FLAGS.num_images_generated,
FLAGS.num_inception_images)
tf.summary.scalar('frechet_inception_distance', fid)
# Compute normal Inception scores.
if FLAGS.eval_real_images:
inc_score = util.get_inception_scores(
real_data, FLAGS.num_images_generated, FLAGS.num_inception_images)
else:
inc_score = util.get_inception_scores(
generated_data, FLAGS.num_images_generated, FLAGS.num_inception_images)
tf.summary.scalar('inception_score', inc_score)
# If conditional, display an image grid of difference classes.
if FLAGS.conditional_eval and not FLAGS.eval_real_images:
reshaped_imgs = util.get_image_grid(
generated_data, FLAGS.num_images_generated, num_classes,
FLAGS.num_images_per_class)
tf.summary.image('generated_data', reshaped_imgs, max_outputs=1)
# Create ops that write images to disk.
image_write_ops = None
if FLAGS.conditional_eval:
reshaped_imgs = util.get_image_grid(
generated_data, FLAGS.num_images_generated, num_classes,
FLAGS.num_images_per_class)
uint8_images = data_provider.float_image_to_uint8(reshaped_imgs)
image_write_ops = tf.write_file(
'%s/%s'% (FLAGS.eval_dir, 'conditional_cifar10.png'),
tf.image.encode_png(uint8_images[0]))
else:
if FLAGS.num_images_generated >= 100:
reshaped_imgs = tfgan.eval.image_reshaper(
generated_data[:100], num_cols=FLAGS.num_images_per_class)
uint8_images = data_provider.float_image_to_uint8(reshaped_imgs)
image_write_ops = tf.write_file(
'%s/%s'% (FLAGS.eval_dir, 'unconditional_cifar10.png'),
tf.image.encode_png(uint8_images[0]))
# For unit testing, use `run_eval_loop=False`.
if not run_eval_loop: return
tf.contrib.training.evaluate_repeatedly(
FLAGS.checkpoint_dir,
master=FLAGS.master,
hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
tf.contrib.training.StopAfterNEvalsHook(1)],
eval_ops=image_write_ops,
max_number_of_evaluations=FLAGS.max_number_of_evaluations)
示例12: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import write_file [as 别名]
def main(_, run_eval_loop=True):
# Fetch and generate images to run through Inception.
with tf.name_scope('inputs'):
real_data, num_classes = _get_real_data(
FLAGS.num_images_generated, FLAGS.dataset_dir)
generated_data = _get_generated_data(
FLAGS.num_images_generated, FLAGS.conditional_eval, num_classes)
# Compute Frechet Inception Distance.
if FLAGS.eval_frechet_inception_distance:
fid = util.get_frechet_inception_distance(
real_data, generated_data, FLAGS.num_images_generated,
FLAGS.num_inception_images)
tf.summary.scalar('frechet_inception_distance', fid)
# Compute normal Inception scores.
if FLAGS.eval_real_images:
inc_score = util.get_inception_scores(
real_data, FLAGS.num_images_generated, FLAGS.num_inception_images)
else:
inc_score = util.get_inception_scores(
generated_data, FLAGS.num_images_generated, FLAGS.num_inception_images)
tf.summary.scalar('inception_score', inc_score)
# If conditional, display an image grid of difference classes.
if FLAGS.conditional_eval and not FLAGS.eval_real_images:
reshaped_imgs = util.get_image_grid(
generated_data, FLAGS.num_images_generated, num_classes,
FLAGS.num_images_per_class)
tf.summary.image('generated_data', reshaped_imgs, max_outputs=1)
# Create ops that write images to disk.
image_write_ops = None
if FLAGS.conditional_eval and FLAGS.write_to_disk:
reshaped_imgs = util.get_image_grid(
generated_data, FLAGS.num_images_generated, num_classes,
FLAGS.num_images_per_class)
uint8_images = data_provider.float_image_to_uint8(reshaped_imgs)
image_write_ops = tf.write_file(
'%s/%s'% (FLAGS.eval_dir, 'conditional_cifar10.png'),
tf.image.encode_png(uint8_images[0]))
else:
if FLAGS.num_images_generated >= 100 and FLAGS.write_to_disk:
reshaped_imgs = tfgan.eval.image_reshaper(
generated_data[:100], num_cols=FLAGS.num_images_per_class)
uint8_images = data_provider.float_image_to_uint8(reshaped_imgs)
image_write_ops = tf.write_file(
'%s/%s'% (FLAGS.eval_dir, 'unconditional_cifar10.png'),
tf.image.encode_png(uint8_images[0]))
# For unit testing, use `run_eval_loop=False`.
if not run_eval_loop: return
tf.contrib.training.evaluate_repeatedly(
FLAGS.checkpoint_dir,
master=FLAGS.master,
hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
tf.contrib.training.StopAfterNEvalsHook(1)],
eval_ops=image_write_ops,
max_number_of_evaluations=FLAGS.max_number_of_evaluations)
示例13: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import write_file [as 别名]
def main(unused_argv):
# initialize
tf.logging.set_verbosity(tf.logging.INFO)
# downscaling session
tf_downscale_graph = tf.Graph()
with tf_downscale_graph.as_default():
tf_input_path = tf.placeholder(tf.string, [])
tf_output_path = tf.placeholder(tf.string, [])
tf_scale = tf.placeholder(tf.int32, [])
tf_image = tf.read_file(tf_input_path)
tf_image = tf.image.decode_png(tf_image, channels=3, dtype=tf.uint8)
tf_image = tf.image.resize_bicubic([tf_image], size=[tf.shape(tf_image)[0] // tf_scale, tf.shape(tf_image)[1] // tf_scale], align_corners=True)[0]
tf_image = tf.cast(tf.clip_by_value(tf_image, 0.0, 255.0), tf.uint8)
tf_image = tf.image.encode_png(tf_image)
tf_downscale_op = tf.write_file(tf_output_path, tf_image)
tf_downscale_init = tf.global_variables_initializer()
tf_downscale_session = tf.Session(config=tf.ConfigProto(
device_count={'GPU': 0}
))
tf_downscale_session.run(tf_downscale_init)
# retrieve image name list
image_name_list = [f for f in os.listdir(FLAGS.input_path) if f.lower().endswith('.png')]
tf.logging.info('data: %d images are prepared' % (len(image_name_list)))
# downscale
for (i, image_name) in enumerate(image_name_list):
input_path = os.path.join(FLAGS.input_path, image_name)
output_path = os.path.join(FLAGS.output_path, image_name)
feed_dict = {
tf_input_path: input_path,
tf_output_path: output_path,
tf_scale: FLAGS.scale
}
tf.logging.info('%d/%d, %s' % ((i+1), len(image_name_list), image_name))
tf_downscale_session.run(tf_downscale_op, feed_dict=feed_dict)
# finalize
tf.logging.info('finished')
示例14: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import write_file [as 别名]
def main():
if (not args.use_gpu):
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# load and build graph
with tf.Graph().as_default():
model_input_path = tf.placeholder(tf.string, [])
model_output_path = tf.placeholder(tf.string, [])
image = tf.read_file(model_input_path)
image = [tf.image.decode_png(image, channels=3, dtype=tf.uint8)]
image = tf.cast(image, tf.float32)
with tf.gfile.GFile(args.model_name, 'rb') as f:
model_graph_def = tf.GraphDef()
model_graph_def.ParseFromString(f.read())
model_output = tf.import_graph_def(model_graph_def, name='model', input_map={'sr_input:0': image}, return_elements=['sr_output:0'])[0]
model_output = model_output[0, :, :, :]
model_output = tf.round(model_output)
model_output = tf.clip_by_value(model_output, 0, 255)
model_output = tf.cast(model_output, tf.uint8)
image = tf.image.encode_png(model_output)
write_op = tf.write_file(model_output_path, image)
init = tf.global_variables_initializer()
sess = tf.Session(config=tf.ConfigProto(
log_device_placement=False,
allow_soft_placement=True
))
sess.run(init)
# get image path list
image_path_list = []
for root, subdirs, files in os.walk(args.input_path):
for filename in files:
if (filename.lower().endswith('.png')):
input_path = os.path.join(args.input_path, filename)
output_path = os.path.join(args.output_path, filename)
image_path_list.append([input_path, output_path])
print('Found %d images' % (len(image_path_list)))
# iterate
for input_path, output_path in image_path_list:
print('- %s -> %s' % (input_path, output_path))
sess.run([write_op], feed_dict={model_input_path:input_path, model_output_path:output_path})
print('Done')