本文整理汇总了Python中tensorflow.unstack函数的典型用法代码示例。如果您正苦于以下问题:Python unstack函数的具体用法?Python unstack怎么用?Python unstack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了unstack函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _init_decoder_train_connectors
def _init_decoder_train_connectors(self):
with tf.name_scope('DecoderTrainFeeds'):
sequence_size,batch_size = tf.unstack(tf.shape(self.decoder_targets))
self.EOS_SLICE = tf.ones([1, batch_size], dtype=tf.int32) * self.EOS
self.PAD_SLICE = tf.ones([1, batch_size], dtype=tf.int32) * self.PAD
self.decoder_train_inputs = tf.concat([self.EOS_SLICE, self.decoder_targets], axis=0)
self.decoder_train_length = self.decoder_targets_length + 1
decoder_train_targets = tf.concat([self.decoder_targets, self.PAD_SLICE], axis=0)
self.decoder_train_targets_seq_len,_= tf.unstack(tf.shape(decoder_train_targets))
decoder_train_targets_eos_mask = tf.one_hot(self.decoder_train_length - 1,
self.decoder_train_targets_seq_len,
on_value=self.EOS, off_value=self.PAD,
dtype=tf.int32)
self.decoder_train_targets_eos_mask = tf.transpose(decoder_train_targets_eos_mask, [1, 0])
self.temp_decoder_train_targets = decoder_train_targets
# hacky way using one_hot to put EOS symbol at the end of target sequence
decoder_train_targets = tf.add(decoder_train_targets,
self.decoder_train_targets_eos_mask)
self.decoder_train_targets = decoder_train_targets
self.loss_weights = tf.ones([
batch_size,
tf.reduce_max(self.decoder_train_length)
], dtype=tf.float32, name="loss_weights")
示例2: buildModel
def buildModel(self, lstm_layer, is_dynamic_rnn, is_train):
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(
"float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
# For dynamic_rnn, train with dynamic_rnn and inference with static_rnn.
# x is shaped [batch_size,time_steps,num_inputs]
if is_dynamic_rnn:
if is_train:
lstm_input = x
outputs, _ = tf.nn.dynamic_rnn(lstm_layer, lstm_input, dtype="float32")
outputs = tf.unstack(outputs, axis=1)
else:
lstm_input = tf.unstack(x, self.time_steps, 1)
outputs, _ = tf.nn.static_rnn(lstm_layer, lstm_input, dtype="float32")
else:
lstm_input = tf.unstack(x, self.time_steps, 1)
outputs, _ = tf.nn.static_rnn(lstm_layer, lstm_input, dtype="float32")
# Compute logits by multiplying outputs[-1] of shape [batch_size,num_units]
# by the softmax layer's out_weight of shape [num_units,n_classes]
# plus out_bias
prediction = tf.matmul(outputs[-1], out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
示例3: merge
def merge(inputs, targets):
"""Split inputs and targets into lists."""
inputs = tf.unstack(inputs, axis=1)
targets = tf.unstack(targets, axis=1)
assert len(inputs) == hparams.video_num_input_frames
assert len(targets) == hparams.video_num_target_frames
return inputs + targets
示例4: gather_neighbors
def gather_neighbors(X, nbr_indices, B, N, M, d):
"""Gathers the neighbor subsets of the atoms in X.
B = batch_size, N = max_num_atoms, M = max_num_neighbors, d = num_features
Parameters
----------
X: tf.Tensor of shape (B, N, d)
Coordinates/features tensor.
atom_indices: tf.Tensor of shape (B, M)
Neighbor list for single atom.
Returns
-------
neighbors: tf.Tensor of shape (B, M, d)
Neighbor coordinates/features tensor for single atom.
"""
example_tensors = tf.unstack(X, axis=0)
example_nbrs = tf.unstack(nbr_indices, axis=0)
all_nbr_coords = []
for example, (example_tensor, example_nbr) in enumerate(
zip(example_tensors, example_nbrs)):
nbr_coords = tf.gather(example_tensor, example_nbr)
all_nbr_coords.append(nbr_coords)
neighbors = tf.stack(all_nbr_coords)
return neighbors
示例5: __call__
def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True):
outputs = [tf.transpose(inputs, [1, 0, 2])]
for layer in range(self.num_layers):
gru_fw, gru_bw = self.grus[layer]
init_fw, init_bw = self.inits[layer]
mask_fw, mask_bw = self.dropout_mask[layer]
with tf.variable_scope('fw_{}'.format(layer), reuse=tf.AUTO_REUSE):
with tf.variable_scope('cudnn_gru', reuse=tf.AUTO_REUSE):
out_fw, _ = tf.nn.dynamic_rnn(cell=gru_fw, inputs=outputs[-1] * mask_fw, time_major=True,
initial_state=tuple(tf.unstack(init_fw, axis=0)))
with tf.variable_scope('bw_{}'.format(layer), reuse=tf.AUTO_REUSE):
with tf.variable_scope('cudnn_gru', reuse=tf.AUTO_REUSE):
inputs_bw = tf.reverse_sequence(
outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
out_bw, _ = tf.nn.dynamic_rnn(cell=gru_bw, inputs=inputs_bw, time_major=True,
initial_state=tuple(tf.unstack(init_bw, axis=0)))
out_bw = tf.reverse_sequence(
out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
outputs.append(tf.concat([out_fw, out_bw], axis=2))
if concat_layers:
res = tf.concat(outputs[1:], axis=2)
else:
res = outputs[-1]
res = tf.transpose(res, [1, 0, 2])
return res
示例6: _decode_and_random_crop
def _decode_and_random_crop(image_buffer, bbox, image_size):
"""Randomly crops image and then scales to target size."""
with tf.name_scope('distorted_bounding_box_crop',
values=[image_buffer, bbox]):
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.image.extract_jpeg_shape(image_buffer),
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.08, 1.0],
max_attempts=10,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_buffer, crop_window, channels=3)
image = tf.image.convert_image_dtype(
image, dtype=tf.float32)
image = tf.image.resize_bicubic([image],
[image_size, image_size])[0]
return image
示例7: vec2mtrxBatch
def vec2mtrxBatch(pBatch,opt):
with tf.name_scope("vec2mtrx"):
batchSize = tf.shape(pBatch)[0]
O = tf.zeros([batchSize])
I = tf.ones([batchSize])
if opt.warpType=="translation":
tx,ty = tf.unstack(pBatch,axis=1)
pMtrxBatch = tf.transpose(tf.stack([[I,O,tx],
[O,I,ty],
[O,O,I]]),perm=[2,0,1])
elif opt.warpType=="similarity":
pc,ps,tx,ty = tf.unstack(pBatch,axis=1)
pMtrxBatch = tf.transpose(tf.stack([[I+pc,-ps,tx],
[ps,I+pc,ty],
[O,O,I]]),perm=[2,0,1])
elif opt.warpType=="affine":
p1,p2,p3,p4,p5,p6 = tf.unstack(pBatch,axis=1)
pMtrxBatch = tf.transpose(tf.stack([[I+p1,p2,p3],
[p4,I+p5,p6],
[O,O,I]]),perm=[2,0,1])
elif opt.warpType=="homography":
p1,p2,p3,p4,p5,p6,p7,p8 = tf.unstack(pBatch,axis=1)
pMtrxBatch = tf.transpose(tf.stack([[I+p1,p2,p3],
[p4,I+p5,p6],
[p7,p8,I]]),perm=[2,0,1])
return pMtrxBatch
示例8: buildModel
def buildModel(self, fw_lstm_layer, bw_lstm_layer, is_dynamic_rnn):
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units * 2, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(
"float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
if is_dynamic_rnn:
lstm_inputs = tf.transpose(x, [1, 0, 2])
outputs, _ = bidirectional_dynamic_rnn(
fw_lstm_layer,
bw_lstm_layer,
lstm_inputs,
dtype="float32",
time_major=True)
fw_outputs, bw_outputs = outputs
output = tf.concat([fw_outputs, bw_outputs], 2)
output = tf.unstack(output, axis=0)
output = output[-1]
else:
lstm_input = tf.unstack(x, self.time_steps, 1)
outputs, _, _ = tf.nn.static_bidirectional_rnn(
fw_lstm_layer, bw_lstm_layer, lstm_input, dtype="float32")
output = outputs[-1]
# Compute logits by multiplying output of shape [batch_size,num_units*2]
# by the softmax layer's out_weight of shape [num_units*2,n_classes]
# plus out_bias
prediction = tf.matmul(output, out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
示例9: lstm_word_embedding_from_chars
def lstm_word_embedding_from_chars(chars, lengths, embed_dim, scope='lstm-word-embed', reuse=False):
"""
Word embeddings via LSTM encoding of character sequences.
Args:
chars: Tensor of shape [batch_size, word sequence length, char sequence length, num characters].
lengths: Tensor of shape [batch_size, word_sequence length].
embed_dim: Dimension of word embeddings. Integer.
Returns:
Sequence of embedding vectors. Tensor of shape [batch_size, word sequence length, embed_dim].
"""
chars = tf.cast(chars, tf.float32)
# this is super inefficient
chars = tf.unstack(chars, axis=0)
lengths = tf.unstack(lengths, axis=0)
lstm_word_embeddings = []
for i, (char, length) in enumerate(zip(chars, lengths)):
temp_reuse = i != 0 or reuse
embedding = lstm_encoder(char, length, embed_dim, 1.0, scope=scope, reuse=temp_reuse)
lstm_word_embeddings.append(embedding)
lstm_word_embeddings = tf.stack(lstm_word_embeddings, axis=0)
return lstm_word_embeddings
示例10: input_fn
def input_fn(data_dir, subset, num_shards, batch_size):
"""Create input graph for model.
Args:
data_dir: Directory where TFRecords representing the dataset are located.
subset: one of 'train', 'validate' and 'eval'.
num_shards: num of towers participating in data-parallel training.
batch_size: total batch size for training to be divided by the number of
shards.
Returns:
two lists of tensors for features and labels, each of num_shards length.
"""
with tf.device('/cpu:0'):
dataset = mlp_data.MlpDataSet(data_dir, subset)
image_batch, label_batch = dataset.make_batch(batch_size)
if num_shards <= 1:
# No GPU available or only 1 GPU.
return [image_batch], [label_batch]
# Note that passing num=batch_size is safe here, even though
# dataset.batch(batch_size) can, in some cases, return fewer than batch_size
# examples. This is because it does so only when repeating for a limited
# number of epochs, but our dataset repeats forever.
image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
feature_shards = [[] for i in range(num_shards)]
label_shards = [[] for i in range(num_shards)]
for i in xrange(batch_size):
idx = i % num_shards
feature_shards[idx].append(image_batch[i])
label_shards[idx].append(label_batch[i])
feature_shards = [tf.parallel_stack(x) for x in feature_shards]
label_shards = [tf.parallel_stack(x) for x in label_shards]
return feature_shards, label_shards
示例11: wasserstein_disagreement_map
def wasserstein_disagreement_map(
prediction, ground_truth, weight_map=None, M=None):
"""
Function to calculate the pixel-wise Wasserstein distance between the
flattened prediction and the flattened labels (ground_truth) with respect
to the distance matrix on the label space M.
:param prediction: the logits after softmax
:param ground_truth: segmentation ground_truth
:param M: distance matrix on the label space
:return: the pixelwise distance map (wass_dis_map)
"""
if weight_map is not None:
# raise NotImplementedError
tf.logging.warning('Weight map specified but not used.')
assert M is not None, "Distance matrix is required."
# pixel-wise Wassertein distance (W) between flat_pred_proba and flat_labels
# wrt the distance matrix on the label space M
n_classes = prediction.shape[1].value
unstack_labels = tf.unstack(ground_truth, axis=-1)
unstack_labels = tf.cast(unstack_labels, dtype=tf.float64)
unstack_pred = tf.unstack(prediction, axis=-1)
unstack_pred = tf.cast(unstack_pred, dtype=tf.float64)
# print("shape of M", M.shape, "unstacked labels", unstack_labels,
# "unstacked pred" ,unstack_pred)
# W is a weighting sum of all pairwise correlations (pred_ci x labels_cj)
pairwise_correlations = []
for i in range(n_classes):
for j in range(n_classes):
pairwise_correlations.append(
M[i, j] * tf.multiply(unstack_pred[i], unstack_labels[j]))
wass_dis_map = tf.add_n(pairwise_correlations)
return wass_dis_map
示例12: hard_negative_mining
def hard_negative_mining():
bboxes_per_batch = tf.unstack(bboxes)
classification_loss_per_batch = tf.unstack(classification_loss)
num_positives_per_batch = tf.unstack(tf.reduce_sum(positives, axis=-1))
neg_class_loss_per_batch = tf.unstack(neg_class_loss_all)
neg_class_losses = []
total_negatives = []
for bboxes_per_image, classification_loss_per_image, num_positives_per_image, neg_class_loss_per_image in \
zip(bboxes_per_batch, classification_loss_per_batch, num_positives_per_batch, neg_class_loss_per_batch):
min_negatives_keep = tf.maximum(self.neg_pos_ratio * num_positives_per_image, 3)
num_negatives_keep = tf.minimum(min_negatives_keep,
tf.count_nonzero(neg_class_loss_per_image, dtype=tf.float32))
indices = tf.image.non_max_suppression(bboxes_per_image, classification_loss_per_image,
tf.to_int32(num_negatives_keep), iou_threshold=0.99)
num_negatives = tf.size(indices)
total_negatives.append(num_negatives)
expanded_indexes = tf.expand_dims(indices, axis=1) # shape: (num_negatives, 1)
negatives_keep = tf.scatter_nd(expanded_indexes, updates=tf.ones_like(indices, dtype=tf.int32),
shape=tf.shape(classification_loss_per_image)) # shape: (num_priors,)
negatives_keep = tf.to_float(tf.reshape(negatives_keep, [num_priors])) # shape: (batch_size, num_priors)
neg_class_losses.append(tf.reduce_sum(classification_loss_per_image * negatives_keep, axis=-1)) # shape: (1,)
return tf.stack(neg_class_losses), tf.reduce_sum(tf.stack(total_negatives))
示例13: testCannotInferNumFromNoneShape
def testCannotInferNumFromNoneShape(self):
x = tf.placeholder(np.float32, shape=(None,))
with self.assertRaisesRegexp(ValueError,
r'Cannot infer num from shape \(\?,\)'):
tf.unpack(x)
with self.assertRaisesRegexp(ValueError,
r'Cannot infer num from shape \(\?,\)'):
tf.unstack(x)
示例14: testCannotInferNumFromUnknownShape
def testCannotInferNumFromUnknownShape(self):
x = tf.placeholder(np.float32)
with self.assertRaisesRegexp(
ValueError, r'Cannot infer num from shape <unknown>'):
tf.unpack(x)
with self.assertRaisesRegexp(
ValueError, r'Cannot infer num from shape <unknown>'):
tf.unstack(x)
示例15: sample
def sample(self, b_enc, b_dec):
"""Generate samples for the batch from the NADE.
Args:
b_enc: External encoder bias terms (`b` in [1]), sized
`[batch_size, num_hidden]`.
b_dec: External decoder bias terms (`c` in [1]), sized
`[batch_size, num_dims]`.
Returns:
sample: The generated samples, sized `[batch_size, num_dims]`.
log_prob: The log probabilities of each observation in the batch, sized
`[batch_size, 1]`.
"""
batch_size = tf.shape(b_enc)[0]
a_0 = b_enc
sample_0 = []
log_p_0 = tf.zeros([batch_size, 1])
w_enc_arr = tf.unstack(self.w_enc)
w_dec_arr = tf.unstack(self.w_dec_t)
b_dec_arr = tf.unstack(
tf.reshape(tf.transpose(b_dec), [self.num_dims, batch_size, 1]))
def loop_body(i, a, sample, log_p):
"""Accumulate hidden state, sample, and log probability for index i."""
# Get weights and bias for time step.
w_enc_i = w_enc_arr[i]
w_dec_i = w_dec_arr[i]
b_dec_i = b_dec_arr[i]
cond_p_i = self._cond_prob(a, w_dec_i, b_dec_i)
bernoulli = tf.contrib.distributions.Bernoulli(probs=cond_p_i,
dtype=tf.float32)
v_i = bernoulli.sample()
# Accumulate sampled values.
sample_new = sample + [v_i]
# Get log probability for this value. Log space avoids numerical issues.
log_p_i = v_i * safe_log(cond_p_i) + (1 - v_i) * safe_log(1 - cond_p_i)
# Accumulate log probability.
log_p_new = log_p + log_p_i
# Encode value and add to hidden units.
a_new = a + tf.matmul(v_i, w_enc_i)
return a_new, sample_new, log_p_new
# Build the actual loop.
a, sample, log_p = a_0, sample_0, log_p_0
for i in range(self.num_dims):
a, sample, log_p = loop_body(i, a, sample, log_p)
return tf.transpose(tf.squeeze(tf.stack(sample), [2])), log_p