本文整理汇总了Python中tensorflow.pack方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.pack方法的具体用法?Python tensorflow.pack怎么用?Python tensorflow.pack使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.pack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: lp_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pack [as 别名]
def lp_loss(gen_frames, gt_frames, l_num):
"""
Calculates the sum of lp losses between the predicted and ground truth frames.
@param gen_frames: The predicted frames at each scale.
@param gt_frames: The ground truth frames at each scale
@param l_num: 1 or 2 for l1 and l2 loss, respectively).
@return: The lp loss.
"""
# calculate the loss for each scale
scale_losses = []
for i in xrange(len(gen_frames)):
scale_losses.append(tf.reduce_sum(tf.abs(gen_frames[i] - gt_frames[i])**l_num))
# condense into one tensor and avg
return tf.reduce_mean(tf.pack(scale_losses))
示例2: adv_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pack [as 别名]
def adv_loss(preds, labels):
"""
Calculates the sum of BCE losses between the predicted classifications and true labels.
@param preds: The predicted classifications at each scale.
@param labels: The true labels. (Same for every scale).
@return: The adversarial loss.
"""
# calculate the loss for each scale
scale_losses = []
for i in xrange(len(preds)):
loss = bce_loss(preds[i], labels)
scale_losses.append(loss)
# condense into one tensor and avg
return tf.reduce_mean(tf.pack(scale_losses))
示例3: __call__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pack [as 别名]
def __call__(self, input_layer, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d"):
output_shape[0] = input_layer.shape[0]
ts_output_shape = tf.pack(output_shape)
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = self.variable('w', [k_h, k_w, output_shape[-1], input_layer.shape[-1]],
init=tf.random_normal_initializer(stddev=stddev))
try:
deconv = tf.nn.conv2d_transpose(input_layer, w,
output_shape=ts_output_shape,
strides=[1, d_h, d_w, 1])
# Support for versions of TensorFlow before 0.7.0
except AttributeError:
deconv = tf.nn.deconv2d(input_layer, w, output_shape=ts_output_shape,
strides=[1, d_h, d_w, 1])
# biases = self.variable('biases', [output_shape[-1]], init=tf.constant_initializer(0.0))
# deconv = tf.reshape(tf.nn.bias_add(deconv, biases), [-1] + output_shape[1:])
deconv = tf.reshape(deconv, [-1] + output_shape[1:])
return deconv
示例4: loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pack [as 别名]
def loss(self, img_batch, label_batch):
"""Create the network, run inference on the input batch and compute loss.
Args:
input_batch: batch of pre-processed images.
Returns:
Pixel-wise softmax loss.
"""
raw_output = self._create_network(tf.cast(img_batch, tf.float32), keep_prob=tf.constant(0.5))
prediction = tf.reshape(raw_output, [-1, n_classes])
# Need to resize labels and convert using one-hot encoding.
label_batch = self.prepare_label(label_batch, tf.pack(raw_output.get_shape()[1:3]))
gt = tf.reshape(label_batch, [-1, n_classes])
# Pixel-wise softmax loss.
loss = tf.nn.softmax_cross_entropy_with_logits(prediction, gt)
reduced_loss = tf.reduce_mean(loss)
return reduced_loss
示例5: one_hot_encoding
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pack [as 别名]
def one_hot_encoding(labels, num_classes, scope=None):
"""Transform numeric labels into onehot_labels.
Args:
labels: [batch_size] target labels.
num_classes: total number of classes.
scope: Optional scope for op_scope.
Returns:
one hot encoding of the labels.
"""
with tf.op_scope([labels], scope, 'OneHotEncoding'):
batch_size = labels.get_shape()[0]
indices = tf.expand_dims(tf.range(0, batch_size), 1)
labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
onehot_labels.set_shape([batch_size, num_classes])
return onehot_labels
示例6: process_image
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pack [as 别名]
def process_image(img, scale, isotropic, crop, mean):
'''Crops, scales, and normalizes the given image.
scale : The image wil be first scaled to this size.
If isotropic is true, the smaller side is rescaled to this,
preserving the aspect ratio.
crop : After scaling, a central crop of this size is taken.
mean : Subtracted from the image
'''
# Rescale
if isotropic:
img_shape = tf.to_float(tf.shape(img)[:2])
min_length = tf.minimum(img_shape[0], img_shape[1])
new_shape = tf.to_int32((scale / min_length) * img_shape)
else:
new_shape = tf.pack([scale, scale])
img = tf.image.resize_images(img, new_shape[0], new_shape[1])
# Center crop
# Use the slice workaround until crop_to_bounding_box supports deferred tensor shapes
# See: https://github.com/tensorflow/tensorflow/issues/521
offset = (new_shape - crop) / 2
img = tf.slice(img, begin=tf.pack([offset[0], offset[1], 0]), size=tf.pack([crop, crop, -1]))
# Mean subtraction
return tf.to_float(img) - mean
示例7: one_hot_encoding
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pack [as 别名]
def one_hot_encoding(labels, num_classes, scope=None):
"""Transform numeric labels into onehot_labels.
Args:
labels: [batch_size] target labels.
num_classes: total number of classes.
scope: Optional scope for name_scope.
Returns:
one hot encoding of the labels.
"""
with tf.name_scope(scope, 'OneHotEncoding', [labels]):
batch_size = labels.get_shape()[0]
indices = tf.expand_dims(tf.range(0, batch_size), 1)
labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
concated = tf.concat([indices, labels], 1)
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
onehot_labels.set_shape([batch_size, num_classes])
return onehot_labels
示例8: tf_ms_ssim
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pack [as 别名]
def tf_ms_ssim(img1, img2, mean_metric=True, level=5):
weight = tf.constant([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=tf.float32)
mssim = []
mcs = []
for l in range(level):
ssim_map, cs_map = tf_ssim(img1, img2, cs_map=True, mean_metric=False)
mssim.append(tf.reduce_mean(ssim_map))
mcs.append(tf.reduce_mean(cs_map))
filtered_im1 = tf.nn.avg_pool(img1, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
filtered_im2 = tf.nn.avg_pool(img2, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
img1 = filtered_im1
img2 = filtered_im2
# list to tensor of dim D+1
mssim = tf.pack(mssim, axis=0)
mcs = tf.pack(mcs, axis=0)
value = (tf.reduce_prod(
mcs[0:level-1]**weight[0:level-1]) * (mssim[level-1]**weight[level-1]))
if mean_metric:
value = tf.reduce_mean(value)
return value
示例9: build_skip_conn_attn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pack [as 别名]
def build_skip_conn_attn(cnn_channels, h_cnn_time, x_time, timespan):
"""Build skip connection for attention based model."""
skip = [None]
skip_ch = [0]
nlayers = len(h_cnn_time[0])
timespan = len(h_cnn_time)
for jj in range(nlayers):
lidx = nlayers - jj - 2
if lidx >= 0:
ll = [h_cnn_time[tt][lidx] for tt in range(timespan)]
else:
ll = x_time
layer = tf.concat(1, [tf.expand_dims(l, 1) for l in ll])
ss = tf.shape(layer)
layer = tf.reshape(layer, tf.pack([-1, ss[2], ss[3], ss[4]]))
skip.append(layer)
ch_idx = lidx + 1
skip_ch.append(cnn_channels[ch_idx])
return skip, skip_ch
示例10: _build_global_context
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pack [as 别名]
def _build_global_context(
net,
is_training=False,
bayesian=False,
dropout_keep_prob=0.8):
with tf.variable_scope('GlobalContext'):
# Reduce feature dimension before LSTM to reduce param count
net = slim.conv2d(net, 1024, 1, padding='VALID', scope='conv_reduce_1x1')
#net = slim.dropout(net, dropout_keep_prob, is_training=bayesian or is_training, scope='Dropout')
rows = tf.unpack(net, axis=1)
net = tf.pack(
[lstm.bidir_lstm(r, 512, scope='row%d' % i) for i, r in enumerate(rows)],
axis=1)
print('Horizontal LSTM', net.get_shape())
cols = tf.unpack(net, axis=2)
net = tf.pack(
[lstm.bidir_lstm(r, 512, scope='col%d' % i) for i, r in enumerate(cols)],
axis=2)
print('Vertical LSTM', net.get_shape())
return net
示例11: _merge_outputs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pack [as 别名]
def _merge_outputs(outputs, weights):
assert outputs
merged = defaultdict(list)
weights_tensor = tf.pack(weights)
print('weights ', weights_tensor.get_shape())
# recombine multiple model outputs by dict key or list position under output name based dict
if isinstance(outputs[0], dict):
for o in outputs:
for name, tensor in o.items():
merged['output_%s' % name].append(tensor)
elif isinstance(outputs[0], list):
for o in outputs:
for index, tensor in enumerate(o):
merged['output_%d' % index].append(tensor)
else:
merged['output'] = outputs
reduced = {name: _weighted_mean(value_list, weights_tensor) for name, value_list in merged.items()}
for k, v in reduced.items():
print(k, v, v.get_shape())
return reduced
示例12: testGradientsAxis1
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pack [as 别名]
def testGradientsAxis1(self):
np.random.seed(7)
for shape in (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0]
out_shape = list(shape[1:])
out_shape.insert(1, shape[0])
with self.test_session(use_gpu=True):
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(tf.constant, data))
c = tf.pack(xs, axis=1)
err = tf.test.compute_gradient_error(xs, shapes, c, out_shape)
self.assertLess(err, 1e-6)
c = tf.stack(xs, axis=1)
err = tf.test.compute_gradient_error(xs, shapes, c, out_shape)
self.assertLess(err, 1e-6)
示例13: testAgainstNumpy
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pack [as 别名]
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
for i in range(1, 6):
expected = np.random.random(np.random.permutation(i) + 1)
# For all the possible axis to split it, including negative indices.
for j in range(-i, i):
test_arrays = np_split_squeeze(expected, j)
with self.test_session(use_gpu=True):
actual_pack = tf.pack(test_arrays, axis=j)
self.assertEqual(expected.shape, actual_pack.get_shape())
actual_pack = actual_pack.eval()
actual_stack = tf.pack(test_arrays, axis=j)
self.assertEqual(expected.shape, actual_stack.get_shape())
actual_stack = actual_stack.eval()
self.assertNDArrayNear(expected, actual_pack, 1e-6)
self.assertNDArrayNear(expected, actual_stack, 1e-6)
示例14: test_5th_order_polynomial
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pack [as 别名]
def test_5th_order_polynomial(self):
# this should be an exact fit
f = lambda x: x ** 4 + x ** 3 - 2 * x ** 2 + 4 * x + 5
f_prime = lambda x: 4 * x ** 3 + 3 * x ** 2 - 4 * x + 4
coeffs = odes._interp_fit(
f(0.0), f(10.0), f(5.0), f_prime(0.0), f_prime(10.0), 10.0)
times = np.linspace(0, 10, dtype=np.float32)
y_fit = tf.pack([odes._interp_evaluate(coeffs, 0.0, 10.0, t)
for t in times])
y_expected = f(times)
with self.test_session() as sess:
y_actual = sess.run(y_fit)
self.assertAllClose(y_expected, y_actual)
# attempt interpolation outside bounds
y_invalid = odes._interp_evaluate(coeffs, 0.0, 10.0, 100.0)
with self.test_session() as sess:
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(y_invalid)
示例15: deconv2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import pack [as 别名]
def deconv2d(x, out_shape, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None, prevNumFeat=None):
with tf.variable_scope(name):
num_filters = out_shape[-1]
prevNumFeat = int(x.get_shape()[3]) if prevNumFeat is None else prevNumFeat
stride_shape = [1, stride[0], stride[1], 1]
# transpose_filter : [height, width, out_channels, in_channels]
filter_shape = [filter_size[0], filter_size[1], num_filters, prevNumFeat]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[:2]) * prevNumFeat
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width"
fan_out = np.prod(filter_shape[:3])
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [num_filters], initializer=tf.constant_initializer(0.0),
collections=collections)
deconv2d = tf.nn.conv2d_transpose(x, w, tf.pack(out_shape), stride_shape, pad)
# deconv2d = tf.reshape(tf.nn.bias_add(deconv2d, b), deconv2d.get_shape())
return deconv2d