本文整理汇总了Python中tensorflow.reshape函数的典型用法代码示例。如果您正苦于以下问题:Python reshape函数的具体用法?Python reshape怎么用?Python reshape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了reshape函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: project_bilstm_layer
def project_bilstm_layer(self, lstm_outputs, name=None):
"""
hidden layer between lstm layer and logits
:param lstm_outputs: [batch_size, num_steps, emb_size]
:return: [batch_size, num_steps, num_tags]
"""
with tf.variable_scope("project" if not name else name):
with tf.variable_scope("hidden"):
W = tf.get_variable("W", shape=[self.hidden_unit * 2, self.hidden_unit],
dtype=tf.float32, initializer=self.initializers.xavier_initializer())
b = tf.get_variable("b", shape=[self.hidden_unit], dtype=tf.float32,
initializer=tf.zeros_initializer())
output = tf.reshape(lstm_outputs, shape=[-1, self.hidden_unit * 2])
hidden = tf.tanh(tf.nn.xw_plus_b(output, W, b))
# project to score of tags
with tf.variable_scope("logits"):
W = tf.get_variable("W", shape=[self.hidden_unit, self.num_labels],
dtype=tf.float32, initializer=self.initializers.xavier_initializer())
b = tf.get_variable("b", shape=[self.num_labels], dtype=tf.float32,
initializer=tf.zeros_initializer())
pred = tf.nn.xw_plus_b(hidden, W, b)
return tf.reshape(pred, [-1, self.seq_length, self.num_labels])
示例2: iris_input_fn
def iris_input_fn(num_epochs=None):
iris = tf.contrib.learn.datasets.load_iris()
features = tf.reshape(tf.constant(iris.data), [-1, 4])
if num_epochs:
features = tf.train.limit_epochs(features, num_epochs=num_epochs)
target = tf.reshape(tf.constant(iris.target), [-1])
return features, target
示例3: make_net
def make_net(self, input_images, input_measurements, input_actions, input_objectives, reuse=False):
if reuse:
tf.get_variable_scope().reuse_variables()
self.fc_val_params = np.copy(self.fc_joint_params)
self.fc_val_params['out_dims'][-1] = self.target_dim
self.fc_adv_params = np.copy(self.fc_joint_params)
self.fc_adv_params['out_dims'][-1] = len(self.net_discrete_actions) * self.target_dim
p_img_conv = my_ops.conv_encoder(input_images, self.conv_params, 'p_img_conv', msra_coeff=0.9)
p_img_fc = my_ops.fc_net(my_ops.flatten(p_img_conv), self.fc_img_params, 'p_img_fc', msra_coeff=0.9)
p_meas_fc = my_ops.fc_net(input_measurements, self.fc_meas_params, 'p_meas_fc', msra_coeff=0.9)
if isinstance(self.fc_obj_params, np.ndarray):
p_obj_fc = my_ops.fc_net(input_objectives, self.fc_obj_params, 'p_obj_fc', msra_coeff=0.9)
p_concat_fc = tf.concat([p_img_fc,p_meas_fc,p_obj_fc], 1)
else:
p_concat_fc = tf.concat([p_img_fc,p_meas_fc], 1)
if self.random_objective_coeffs:
raise Exception('Need fc_obj_params with randomized objectives')
p_val_fc = my_ops.fc_net(p_concat_fc, self.fc_val_params, 'p_val_fc', last_linear=True, msra_coeff=0.9)
p_adv_fc = my_ops.fc_net(p_concat_fc, self.fc_adv_params, 'p_adv_fc', last_linear=True, msra_coeff=0.9)
adv_reshape = tf.reshape(p_adv_fc, [-1, len(self.net_discrete_actions), self.target_dim])
pred_all_nomean = adv_reshape - tf.reduce_mean(adv_reshape, reduction_indices=1, keep_dims=True)
pred_all = pred_all_nomean + tf.reshape(p_val_fc, [-1, 1, self.target_dim])
pred_relevant = tf.boolean_mask(pred_all, tf.cast(input_actions, tf.bool))
return pred_all, pred_relevant
示例4: one_minus_pseudo_unitcell_transfer_op
def one_minus_pseudo_unitcell_transfer_op(direction, mps, left_dominant,
right_dominant, vector):
"""
calculates action of 11-Transfer-Operator +|r)(l|
Parameters:
---------------------------
direction: int or str
if (1,'l','left'): do left multiplication
if (-1,'r','right'): do right multiplication
mps: InfiniteMPSCentralGauge object
an infinite mps
left_dominant: tf.tensor of shape (mps.D[0],mps.D[0])
left dominant eigenvector of the unit-cell transfer operator of mps
right_dominant: tf.tensor of shape (mps.D[-1],mps.D[-1])
right dominant eigenvector of the unit-cell transfer operator of mps
vector: tf.tensor of shape (mps.D[0]*mps.D[0]) or (mps.D[-1]*mps.D[-1])
the input vector
Returns
---------------------------
np.ndarray of shape (mps.D[0]*mps.D[0]) or (mps.D[-1]*mps.D[-1])
"""
if direction in (1, 'l', 'left'):
x = tf.reshape(tf.convert_to_tensor(vector), (mps.D[0], mps.D[0]))
temp = x - mps.unitcell_transfer_op('left', x) + ncon(
[x, right_dominant], [[1, 2], [1, 2]]) * left_dominant
return tf.reshape(temp, [mps.D[-1] * mps.D[-1]]).numpy()
if direction in (-1, 'r', 'right'):
x = tf.reshape(tf.convert_to_tensor(vector), [mps.D[-1], mps.D[-1]])
temp = x - mps.unitcell_transfer_op('right', x) + ncon(
[left_dominant, x], [[1, 2], [1, 2]]) * right_dominant
return tf.reshape(temp, [mps.D[0] * mps.D[0]]).numpy()
示例5: create_output
def create_output(decoder_output, rows, cols, targets, hparams):
"""Creates output from decoder output and vars.
Args:
decoder_output: Tensor of shape [batch, ...], where ... can be any rank such
that the number of elements is batch * rows * cols * hparams.hidden_size.
rows: Integer representing number of rows in a 2-D data point.
cols: Integer representing number of columns in a 2-D data point.
targets: Tensor of shape [batch, hparams.img_len, hparams.img_len,
hparams.num_channels].
hparams: tf.contrib.training.HParams set.
Returns:
Tensor of shape [batch, hparams.img_len, hparams.img_len,
hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise
[batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256].
In the special case of predict mode, it is a Tensor of rank 5.
"""
decoded_image = postprocess_image(decoder_output, rows, cols, hparams)
depth = common_layers.shape_list(decoded_image)[-1]
batch, height, width, channels = common_layers.shape_list(targets)
likelihood = getattr(hparams, "likelihood", DistributionType.CAT)
if hparams.mode == tf.estimator.ModeKeys.PREDICT:
y = tf.reshape(decoded_image, [batch, -1, 1, 1, depth])
output = y[:, :height, :, :, :]
elif likelihood == DistributionType.CAT:
# Unpack the cols dimension of the Categorical.
output = tf.reshape(decoded_image,
[batch, height, width, channels, depth])
else:
output = decoded_image
return output
示例6: SoftThreshold
def SoftThreshold(t, threshold_ratio, name=None):
"""Soft-threshold a tensor by the mean value.
Softthreshold each dimension-0 vector (for matrix it is each column) by
the mean of absolute value multiplied by the threshold_ratio factor. Here
we soft threshold each column as it corresponds to each unit in a layer.
Args:
t: the input tensor.
threshold_ratio: the threshold ratio.
name: the optional name for the returned tensor.
Returns:
the thresholded tensor, where each entry is soft-thresholded by
threshold_ratio times the mean of the aboslute value of each column.
"""
assert threshold_ratio >= 0
with tf.op_scope([t, threshold_ratio], name, "soft_thresholding") as name:
saved_shape = tf.shape(t)
t2 = tf.reshape(t, tf.concat(0, [tf.slice(saved_shape, [0], [1]), -1]))
t_abs = tf.abs(t2)
t_x = tf.sign(t2) * tf.nn.relu(t_abs -
(tf.reduce_mean(t_abs, [0],
keep_dims=True) *
threshold_ratio))
return tf.reshape(t_x, saved_shape, name=name)
示例7: forward_propagation
def forward_propagation(images):
with tf.variable_scope('conv1') as scope:
W_conv1 = weight_variable([5, 5, 3, 32])
b_conv1 = bias_variable([32])
image_matrix = tf.reshape(images, [-1, 1750, 1750, 3])
h_conv1 = tf.nn.sigmoid(conv2d(image_matrix, W_conv1) + b_conv1)
_activation_summary(h_conv1)
h_pool1 = max_pool_5x5(h_conv1)
with tf.variable_scope('conv2') as scope:
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.sigmoid(conv2d(h_pool1, W_conv2) + b_conv2)
_activation_summary(h_conv2)
h_pool2 = max_pool_5x5(h_conv2)
with tf.variable_scope('conv3') as scope:
W_conv3 = weight_variable([5, 5, 64, 128])
b_conv3 = bias_variable([128])
h_conv3 = tf.nn.sigmoid(conv2d(h_pool2, W_conv3) + b_conv3)
_activation_summary(h_conv3)
h_pool3 = max_pool_5x5(h_conv3)
with tf.variable_scope('local3') as scope:
W_fc1 = weight_variable([14 * 14 * 128, 256])
b_fc1 = bias_variable([256])
h_pool3_flat = tf.reshape(h_pool3, [-1, 14 * 14 * 128])
h_fc1 = tf.nn.sigmoid(tf.matmul(h_pool3_flat, W_fc1) + b_fc1)
_activation_summary(h_fc1)
keep_prob = tf.Variable(1.0)
W_fc2 = weight_variable([256, 4])
b_fc2 = bias_variable([4])
y_conv = tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2)
_activation_summary(y_conv)
return y_conv
示例8: buildSpImageConverter
def buildSpImageConverter(channelOrder, img_dtype):
"""
Convert a imageIO byte encoded image into a image tensor suitable as input to ConvNets
The name of the input must be a subset of those specified in `image.imageIO.imageSchema`.
:param img_dtype: the type of data the underlying image bytes represent
"""
with IsolatedSession() as issn:
# Flat image data -> image dimensions
# This has to conform to `imageIO.imageSchema`
height = tf.placeholder(tf.int32, [], name="height")
width = tf.placeholder(tf.int32, [], name="width")
num_channels = tf.placeholder(tf.int32, [], name="nChannels")
image_buffer = tf.placeholder(tf.string, [], name="data")
# The image is packed into bytes with height as leading dimension
# This is the default behavior of Python Image Library
shape = tf.reshape(tf.stack([height, width, num_channels], axis=0),
shape=(3,), name='shape')
if img_dtype == 'uint8':
image_uint8 = tf.decode_raw(image_buffer, tf.uint8, name="decode_raw")
image_float = tf.to_float(image_uint8)
elif img_dtype == 'float32':
image_float = tf.decode_raw(image_buffer, tf.float32, name="decode_raw")
else:
raise ValueError('''unsupported image data type "%s", currently only know how to
handle uint8 and float32''' % img_dtype)
image_reshaped = tf.reshape(image_float, shape, name="reshaped")
image_reshaped = imageIO.fixColorChannelOrdering(channelOrder, image_reshaped)
image_input = tf.expand_dims(image_reshaped, 0, name="image_input")
gfn = issn.asGraphFunction([height, width, image_buffer, num_channels], [image_input])
return gfn
示例9: read_data
def read_data(self, filename_queue, has_3d=False):
with tf.name_scope(None, 'read_data', [filename_queue]):
reader = tf.TFRecordReader()
_, example_serialized = reader.read(filename_queue)
if has_3d:
image, image_size, label, center, fname, pose, shape, gt3d, has_smpl3d = data_utils.parse_example_proto(
example_serialized, has_3d=has_3d)
# Need to send pose bc image can get flipped.
image, label, pose, gt3d = self.image_preprocessing(
image, image_size, label, center, pose=pose, gt3d=gt3d)
# Convert pose to rotation.
# Do not ignore the global!!
rotations = batch_rodrigues(tf.reshape(pose, [-1, 3]))
gt3d_flat = tf.reshape(gt3d, [-1])
# Label 3d is:
# [rotations, shape-beta, 3Djoints]
# [216=24*3*3, 10, 42=14*3]
label3d = tf.concat(
[tf.reshape(rotations, [-1]), shape, gt3d_flat], 0)
else:
image, image_size, label, center, fname = data_utils.parse_example_proto(
example_serialized)
image, label = self.image_preprocessing(
image, image_size, label, center)
# label should be K x 3
label = tf.transpose(label)
if has_3d:
return image, label, label3d, has_smpl3d
else:
return image, label
示例10: conv_net
def conv_net(_X, _weights, _biases, _dropout):
# Reshape input picture
_X = tf.reshape(_X, shape=[-1, 28, 28, 1])
# Convolution Layer
conv1 = conv2d(_X, _weights['wc1'], _biases['bc1'])
# Max Pooling (down-sampling)
conv1 = max_pool(conv1, k=2)
# Apply Dropout
conv1 = tf.nn.dropout(conv1, _dropout)
# Convolution Layer
conv2 = conv2d(conv1, _weights['wc2'], _biases['bc2'])
# Max Pooling (down-sampling)
conv2 = max_pool(conv2, k=2)
# Apply Dropout
conv2 = tf.nn.dropout(conv2, _dropout)
# Fully connected layer
dense1 = tf.reshape(conv2, [-1, _weights['wd1'].get_shape().as_list()[0]]) # Reshape conv2 output to fit dense layer input
dense1 = tf.nn.relu(tf.add(tf.matmul(dense1, _weights['wd1']), _biases['bd1'])) # Relu activation
dense1 = tf.nn.dropout(dense1, _dropout) # Apply Dropout
# Output, class prediction
out = tf.add(tf.matmul(dense1, _weights['out']), _biases['out'])
return out
示例11: knn_point
def knn_point(k, xyz1, xyz2):
'''
Input:
k: int32, number of k in k-nn search
xyz1: (batch_size, ndataset, c) float32 array, input points
xyz2: (batch_size, npoint, c) float32 array, query points
Output:
val: (batch_size, npoint, k) float32 array, L2 distances
idx: (batch_size, npoint, k) int32 array, indices to input points
'''
b = xyz1.get_shape()[0].value
n = xyz1.get_shape()[1].value
c = xyz1.get_shape()[2].value
m = xyz2.get_shape()[1].value
print b, n, c, m
print xyz1, (b,1,n,c)
xyz1 = tf.tile(tf.reshape(xyz1, (b,1,n,c)), [1,m,1,1])
xyz2 = tf.tile(tf.reshape(xyz2, (b,m,1,c)), [1,1,n,1])
dist = tf.reduce_sum((xyz1-xyz2)**2, -1)
print dist, k
outi, out = select_top_k(k, dist)
idx = tf.slice(outi, [0,0,0], [-1,-1,k])
val = tf.slice(out, [0,0,0], [-1,-1,k])
print idx, val
#val, idx = tf.nn.top_k(-dist, k=k) # ONLY SUPPORT CPU
return val, idx
示例12: din_fcn_shine
def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN
# outputs.
facts = tf.concat(facts, 2)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
mask = tf.equal(mask, tf.ones_like(mask))
# D value - hidden size of the RNN layer
facts_size = facts.get_shape().as_list()[-1]
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(
query, facts_size, activation=None, name='f1_trans_shine' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat(
[queries, facts, queries - facts, queries * facts], axis=-1)
d_layer_1_all = tf.layers.dense(
din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag)
d_layer_2_all = tf.layers.dense(
d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)
d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts))
output = d_layer_2_all
return output
示例13: accumulate_privacy_spending
def accumulate_privacy_spending(self, eps_delta, unused_sigma,
num_examples):
"""Accumulate the privacy spending.
Currently only support approximate privacy. Here we assume we use Gaussian
noise on randomly sampled batch so we get better composition: 1. the per
batch privacy is computed using privacy amplication via sampling bound;
2. the composition is done using the composition with Gaussian noise.
TODO(liqzhang) Add a link to a document that describes the bounds used.
Args:
eps_delta: EpsDelta pair which can be tensors.
unused_sigma: the noise sigma. Unused for this accountant.
num_examples: the number of examples involved.
Returns:
a TensorFlow operation for updating the privacy spending.
"""
eps, delta = eps_delta
with tf.control_dependencies(
[tf.Assert(tf.greater(delta, 0),
["delta needs to be greater than 0"])]):
amortize_ratio = (tf.cast(num_examples, tf.float32) * 1.0 /
self._total_examples)
# Use privacy amplification via sampling bound.
# See Lemma 2.2 in http://arxiv.org/pdf/1405.7085v2.pdf
# TODO(liqzhang) Add a link to a document with formal statement
# and proof.
amortize_eps = tf.reshape(tf.log(1.0 + amortize_ratio * (
tf.exp(eps) - 1.0)), [1])
amortize_delta = tf.reshape(amortize_ratio * delta, [1])
return tf.group(*[tf.assign_add(self._eps_squared_sum,
tf.square(amortize_eps)),
tf.assign_add(self._delta_sum, amortize_delta)])
示例14: add_logits_op
def add_logits_op(self):
"""
Adds logits to self
"""
with tf.variable_scope("bi-lstm"):
lstm_fwrd_cell = tf.contrib.rnn.LSTMCell(self.hidden_size)
lstm_back_cell = tf.contrib.rnn.LSTMCell(self.hidden_size)
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(lstm_fwrd_cell,
lstm_back_cell,
self.word_embeddings,
sequence_length=self.sequence_lengths,
dtype=tf.float32)
output = tf.concat([output_fw, output_bw], axis=-1)
output = tf.nn.dropout(output, self.dropout)
with tf.variable_scope("proj"):
W = tf.get_variable("W", shape=[2*self.hidden_size, self.ntags],
dtype=tf.float32)
b = tf.get_variable("b", shape=[self.ntags], dtype=tf.float32,
initializer=tf.zeros_initializer())
ntime_steps = tf.shape(output)[1]
output = tf.reshape(output, [-1, 2*self.hidden_size])
pred = tf.matmul(output, W) + b
self.logits = tf.reshape(pred, [-1, ntime_steps, self.ntags])
示例15: tf_random_modifiers
def tf_random_modifiers(flat_img, window_dims, name=None):
float_img = tf.cast(flat_img, tf.float32)
w, h = window_dims
mod_image = tf.reshape(float_img, (h, w, 3))
# # Define the modifier ops:
# brightness_mod = lambda x: tf.image.random_brightness(x, max_delta=0.3)
# contrast_mod = lambda x: tf.image.random_contrast(x, lower=0.2, upper=1.8)
# saturation_mod = lambda x: tf.image.random_saturation(x, lower=0.2, upper=1.8)
# hue_mod = lambda x: tf.image.random_hue(x, max_delta=0.025)
# modifier_ops = [brightness_mod, contrast_mod, saturation_mod, hue_mod]
# # Choose a random order for the modifiers:
# perm = np.arange(len(modifier_ops))
# np.random.shuffle(perm)
# # Apply the modifiers in a random order:
# for i in perm:
# mod_op = modifier_ops[i]
# mod_image = mod_op(mod_image)
mod_image = tf.image.random_brightness(mod_image, max_delta=0.3)
mod_image = tf.image.random_contrast(mod_image, lower=0.2, upper=1.8)
mod_image = tf.image.random_saturation(mod_image, lower=0.2, upper=1.8)
mod_image = tf.image.random_hue(mod_image, max_delta=0.025)
# Subtract off the mean and divide by the variance of the pixels.
final_image = tf.image.per_image_whitening(mod_image)
final_flat_image = tf.reshape(final_image, (w*h*3,), name=name)
print 'final_flat_image.get_shape()', final_flat_image.get_shape()
return final_flat_image