本文整理汇总了Python中TensorflowUtils.get_tensor_size方法的典型用法代码示例。如果您正苦于以下问题:Python TensorflowUtils.get_tensor_size方法的具体用法?Python TensorflowUtils.get_tensor_size怎么用?Python TensorflowUtils.get_tensor_size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TensorflowUtils
的用法示例。
在下文中一共展示了TensorflowUtils.get_tensor_size方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import get_tensor_size [as 别名]
def main(argv=None):
utils.maybe_download_and_extract(FLAGS.model_dir, MODEL_URL)
utils.maybe_download_and_extract(FLAGS.data_dir, DATA_URL, is_tarfile=True)
model_data = get_model_data()
model_params = {}
mean = model_data['normalization'][0][0][0]
model_params['mean_pixel'] = np.mean(mean, axis=(0, 1))
model_params['weights'] = np.squeeze(model_data['layers'])
style_image = get_image(FLAGS.style_path)
processed_style = utils.process_image(style_image, model_params['mean_pixel']).astype(np.float32)
style_net = vgg_net(model_params['weights'], processed_style)
tf.image_summary("Style_Image", style_image)
with tf.Session() as sess:
print "Evaluating style features..."
style_features = {}
for layer in STYLE_LAYERS:
features = style_net[layer].eval()
features = np.reshape(features, (-1, features.shape[3]))
style_gram = np.matmul(features.T, features) / features.size
style_features[layer] = style_gram
print "Reading image inputs"
input_image, input_content = read_input(model_params)
print "Setting up inference"
output_image = 255 * inference_strided(input_image)
print "Creating saver.."
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(FLAGS.log_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print "Model restored..."
if FLAGS.mode == "test":
test(sess, output_image, model_params['mean_pixel'])
return
print "Calculating content loss..."
image_net = vgg_net(model_params['weights'], output_image)
content_loss = CONTENT_WEIGHT * tf.nn.l2_loss(image_net[CONTENT_LAYER] - input_content) / utils.get_tensor_size(
input_content)
print content_loss.get_shape()
tf.scalar_summary("Content_loss", content_loss)
print "Calculating style loss..."
style_losses = []
for layer in STYLE_LAYERS:
image_layer = image_net[layer]
_, height, width, number = map(lambda i: i.value, image_layer.get_shape())
size = height * width * number
feats = tf.reshape(image_layer, (-1, number))
image_gram = tf.matmul(tf.transpose(feats), feats) / size
style_losses.append(0.5 * tf.nn.l2_loss(image_gram - style_features[layer]))
style_loss = STYLE_WEIGHT * reduce(tf.add, style_losses)
print style_loss.get_shape()
tf.scalar_summary("Style_loss", style_loss)
print "Calculating variational loss..."
tv_y_size = utils.get_tensor_size(output_image[:, 1:, :, :])
tv_x_size = utils.get_tensor_size(output_image[:, :, 1:, :])
tv_loss = VARIATION_WEIGHT * (
(tf.nn.l2_loss(output_image[:, 1:, :, :] - output_image[:, :IMAGE_SIZE - 1, :, :]) /
tv_y_size) +
(tf.nn.l2_loss(output_image[:, :, 1:, :] - output_image[:, :, :IMAGE_SIZE - 1, :]) /
tv_x_size))
print tv_loss.get_shape()
tf.scalar_summary("Variation_loss", tv_loss)
loss = content_loss + style_loss + tv_loss
tf.scalar_summary("Total_loss", loss)
print "Setting up train operation..."
train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)
print "Setting up summary write"
summary_writer = tf.train.SummaryWriter(FLAGS.log_dir, sess.graph_def)
summary_op = tf.merge_all_summaries()
print "initializing all variables"
sess.run(tf.initialize_all_variables())
tf.train.start_queue_runners(sess=sess)
print "Running training..."
for step in range(MAX_ITERATIONS):
if step % 10 == 0:
this_loss, summary_str = sess.run([loss, summary_op])
summary_writer.add_summary(summary_str, global_step=step)
print('%s : Step %d' % (datetime.now(), step)),
print('total loss: %g' % this_loss)
if step % 100 == 0:
print ("Step %d" % step),
#.........这里部分代码省略.........
示例2: scatter_subtract
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import get_tensor_size [as 别名]
def scatter_subtract(variables1, variables2):
shape = utils.get_tensor_size(variables2)
values, indices = tf.nn.top_k(-1 * variables1, tf.cast(k * shape / 100, tf.int32))
return tf.scatter_sub(variables2, indices, values)
示例3: scatter_add
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import get_tensor_size [as 别名]
def scatter_add(variables):
shape = utils.get_tensor_size(variables)
values, indices = tf.nn.top_k(-1 * variables, tf.cast(k * shape / 100, tf.int32))
return tf.scatter_add(variables, indices, values)
示例4: main
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import get_tensor_size [as 别名]
def main(argv=None):
utils.maybe_download_and_extract(FLAGS.model_dir, DATA_URL)
model_data = get_model_data()
mean = model_data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = np.squeeze(model_data['layers'])
content_image = get_image(FLAGS.content_path)
print content_image.shape
processed_content = utils.process_image(content_image, mean_pixel).astype(np.float32)
style_image = get_image(FLAGS.style_path)
processed_style = utils.process_image(style_image, mean_pixel).astype(np.float32)
content_net = vgg_net(weights, processed_content)
style_net = vgg_net(weights, processed_style)
dummy_image = utils.weight_variable(content_image.shape, stddev=np.std(content_image) * 0.1)
image_net = vgg_net(weights, dummy_image)
with tf.Session() as sess:
content_losses = []
for layer in CONTENT_LAYERS:
feature = content_net[layer].eval()
content_losses.append(tf.nn.l2_loss(image_net[layer] - feature))
content_loss = CONTENT_WEIGHT * reduce(tf.add, content_losses)
style_losses = []
for layer in STYLE_LAYERS:
features = style_net[layer].eval()
features = np.reshape(features, (-1, features.shape[3]))
style_gram = np.matmul(features.T, features) / features.size
image_layer = image_net[layer]
_, height, width, number = map(lambda i: i.value, image_layer.get_shape())
size = height * width * number
feats = tf.reshape(image_layer, (-1, number))
image_gram = tf.matmul(tf.transpose(feats), feats) / size
style_losses.append(0.5*tf.nn.l2_loss(image_gram - style_gram))
style_loss = STYLE_WEIGHT * reduce(tf.add, style_losses)
tv_y_size = utils.get_tensor_size(dummy_image[:, 1:, :, :])
tv_x_size = utils.get_tensor_size(dummy_image[:, :, 1:, :])
tv_loss = VARIATION_WEIGHT * (
(tf.nn.l2_loss(dummy_image[:, 1:, :, :] - dummy_image[:, :content_image.shape[1] - 1, :, :]) /
tv_y_size) +
(tf.nn.l2_loss(dummy_image[:, :, 1:, :] - dummy_image[:, :, :content_image.shape[2] - 1, :]) /
tv_x_size))
loss = content_loss + style_loss + tv_loss
train_step = tf.train.MomentumOptimizer(LEARNING_RATE,MOMENTUM).minimize(loss)
best_loss = float('inf')
best = None
sess.run(tf.initialize_all_variables())
for i in range(1, MAX_ITERATIONS):
train_step.run()
if i % 10 == 0 or i == MAX_ITERATIONS - 1:
this_loss = loss.eval()
print('Step %d' % (i)),
print(' total loss: %g' % this_loss)
if this_loss < best_loss:
best_loss = this_loss
best = dummy_image.eval()
output = utils.unprocess_image(best.reshape(content_image.shape[1:]), mean_pixel)
scipy.misc.imsave("output_check.png", output)
if i % 100 == 0 or i == MAX_ITERATIONS - 1:
print(' content loss: %g' % content_loss.eval()),
print(' style loss: %g' % style_loss.eval()),
print(' tv loss: %g' % tv_loss.eval())
output = utils.unprocess_image(best.reshape(content_image.shape[1:]), mean_pixel)
scipy.misc.imsave("output.png", output)
示例5: scatter_restore
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import get_tensor_size [as 别名]
def scatter_restore(saliency, variables1, variables2):
shape = utils.get_tensor_size(variables2)
values, indices = tf.nn.top_k(-1 * saliency, tf.cast(k * shape / 100, tf.int32))
values = tf.gather(variables1, indices)
return tf.scatter_update(variables2, indices, values)
示例6: scatter_update
# 需要导入模块: import TensorflowUtils [as 别名]
# 或者: from TensorflowUtils import get_tensor_size [as 别名]
def scatter_update(saliency, variables):
shape = utils.get_tensor_size(variables)
# print(utils.get_tensor_size(saliency))
# print(shape)
values, indices = tf.nn.top_k(-1 * saliency, tf.cast(k * shape / 100, tf.int32))
return tf.scatter_update(variables, indices, tf.zeros_like(values))