本文整理匯總了Python中tensorflow.compat.v1.shape方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.shape方法的具體用法?Python v1.shape怎麽用?Python v1.shape使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.shape方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: global_pool
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import shape [as 別名]
def global_pool(input_tensor, pool_op=tf.nn.avg_pool):
"""Applies avg pool to produce 1x1 output.
NOTE: This function is funcitonally equivalenet to reduce_mean, but it has
baked in average pool which has better support across hardware.
Args:
input_tensor: input tensor
pool_op: pooling op (avg pool is default)
Returns:
a tensor batch_size x 1 x 1 x depth.
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size = tf.convert_to_tensor(
[1, tf.shape(input_tensor)[1],
tf.shape(input_tensor)[2], 1])
else:
kernel_size = [1, shape[1], shape[2], 1]
output = pool_op(
input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')
# Recover output shape, for unknown shape.
output.set_shape([None, 1, 1, None])
return output
示例2: _reduce_prev_layer
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import shape [as 別名]
def _reduce_prev_layer(self, prev_layer, curr_layer):
"""Matches dimension of prev_layer to the curr_layer."""
# Set the prev layer to the current layer if it is none
if prev_layer is None:
return curr_layer
curr_num_filters = self._filter_size
prev_num_filters = get_channel_dim(prev_layer.shape)
curr_filter_shape = int(curr_layer.shape[2])
prev_filter_shape = int(prev_layer.shape[2])
if curr_filter_shape != prev_filter_shape:
prev_layer = tf.nn.relu(prev_layer)
prev_layer = factorized_reduction(prev_layer, curr_num_filters, stride=2)
elif curr_num_filters != prev_num_filters:
prev_layer = tf.nn.relu(prev_layer)
prev_layer = slim.conv2d(
prev_layer, curr_num_filters, 1, scope='prev_1x1')
prev_layer = slim.batch_norm(prev_layer, scope='prev_bn')
return prev_layer
示例3: _cell_base
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import shape [as 別名]
def _cell_base(self, net, prev_layer):
"""Runs the beginning of the conv cell before the predicted ops are run."""
num_filters = self._filter_size
# Check to be sure prev layer stuff is setup correctly
prev_layer = self._reduce_prev_layer(prev_layer, net)
net = tf.nn.relu(net)
net = slim.conv2d(net, num_filters, 1, scope='1x1')
net = slim.batch_norm(net, scope='beginning_bn')
split_axis = get_channel_index()
net = tf.split(axis=split_axis, num_or_size_splits=1, value=net)
for split in net:
assert int(split.shape[split_axis] == int(
self._num_conv_filters * self._filter_scaling))
net.append(prev_layer)
return net
示例4: decode_jpeg
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import shape [as 別名]
def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32):
"""Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
"""
# with tf.op_scope([image_buffer], scope, 'decode_jpeg'):
# with tf.name_scope(scope, 'decode_jpeg', [image_buffer]):
with tf.name_scope(scope or 'decode_jpeg'):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3,
fancy_upscaling=False,
dct_method='INTEGER_FAST')
# image = tf.Print(image, [tf.shape(image)], 'Image shape: ')
return image
示例5: _ensure_keep_mask
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import shape [as 別名]
def _ensure_keep_mask(self, x):
if self._keep_mask is None or not self._share_mask:
shape = tf.shape(x)
k = shape[1]
# To make this class a drop-in replacement for bernoulli dropout we
# paramaterize it with keep_prob. Set alpha of the dirichlet so that the
# variance is equal to the variance of the bernoulli with p=keep_prob
# divided by keep_prob.
# Now the variance of the dirichlet with k equal alphas is
# (k-1)/(k^2(k*alpha+1). Solve that for alpha.
kf = tf.cast(k, tf.float32)
alpha = self._keep_prob * (kf - 1.0) / ((1-self._keep_prob)*kf) - 1.0/kf
dist = tfp.distributions.Dirichlet(tf.ones(shape=k) * alpha)
assert (dist.reparameterization_type ==
tfp.distributions.FULLY_REPARAMETERIZED)
# The E[dir(alpha)] = 1/k for all elements, but we want the expectation to
# be keep_prob, hence the multiplication.
self._keep_mask = kf * dist.sample(shape[0])
self._keep_mask.set_shape(x.get_shape())
return self._keep_mask
示例6: _build
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import shape [as 別名]
def _build(self, x, state):
prev_keep_mask = state
shape = tf.shape(x)
noise = tf.random_uniform(shape, dtype=x.dtype)
other_mask = tf.floor(self._keep_prob + noise)
choice_noise = tf.random_uniform(shape, dtype=x.dtype)
choice = tf.less(choice_noise, self._flip_prob)
# KLUDGE(melisgl): The client has to pass the last keep_mask from
# a batch to the next so the mask may end up next to some
# recurrent cell state. This state is often zero at the beginning
# and may be periodically zeroed (per example) during training.
# While zeroing LSTM state is okay, zeroing the dropout mask is
# not. So instead of forcing every client to deal with this common
# (?) case, if an all zero mask is detected, then regenerate a
# fresh mask. This is of course a major hack and won't help with
# learnt initial states, for example.
sum_ = tf.reduce_sum(prev_keep_mask, 1, keepdims=True)
is_initializing = tf.equal(sum_, 0.0)
self._keep_mask = tf.where(tf.logical_or(choice, is_initializing),
other_mask,
prev_keep_mask)
self._time_step += 1
return x * self._keep_mask / self._keep_prob * self._scaler
示例7: layer_norm
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import shape [as 別名]
def layer_norm(x, reduction_indices, epsilon=1e-9, gain=None, bias=None,
per_element=True, scope=None):
"""DOC."""
reduction_indices = ensure_list(reduction_indices)
mean = tf.reduce_mean(x, reduction_indices, keep_dims=True)
variance = tf.reduce_mean(tf.squared_difference(x, mean),
reduction_indices, keep_dims=True)
normalized = (x - mean) / tf.sqrt(variance + epsilon)
dtype = x.dtype
shape = x.get_shape().as_list()
for i in six.moves.range(len(shape)):
if i not in reduction_indices or not per_element:
shape[i] = 1
with tf.variable_scope(scope or 'layer_norm'):
if gain is None:
gain = tf.get_variable('gain', shape=shape, dtype=dtype,
initializer=tf.ones_initializer())
if bias is None:
bias = tf.get_variable('bias', shape=shape, dtype=dtype,
initializer=tf.zeros_initializer())
return gain*normalized+bias
示例8: sparse_random_indices
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import shape [as 別名]
def sparse_random_indices(ratio, shape):
"""DOC."""
assert 0 < ratio and ratio <= 1.0
n = round_to_int(tf.TensorShape(shape).num_elements()*ratio)
# There are two implementations. The first generates random indices
# and wastes computation due to collisions, and the second wastes
# memory.
if ratio < 0.25:
indices = {}
if isinstance(shape, tf.TensorShape):
shape = shape.as_list()
while len(indices) < n:
index = _random_index(shape)
indices[index] = True
return indices.keys()
else:
indices = _all_indices(shape)
random.shuffle(indices)
return indices[:n]
示例9: log_trainables
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import shape [as 別名]
def log_trainables(scopes=('',)):
""""Log number of trainable parameters for each scope in `scopes`.
Args:
scopes: A sequence of scope names.
Returns:
The total number of trainable parameters over all scopes in
`scopes`. Possibly counting some parameters multiple times if the
scopes are nested.
"""
total = 0
for scope in scopes:
logging.info('Trainables in scope "%s":', scope)
n = 0
for var in trainable_vars_in_scope(scope):
shape = var.get_shape()
logging.info('trainable: %s shape %r (%r)', var.name, shape.as_list(),
shape.num_elements())
n += shape.num_elements()
logging.info('Number of parameters in scope "%s": %r', scope, n)
total += n
return total
示例10: padded_accuracy_topk
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import shape [as 別名]
def padded_accuracy_topk(predictions,
labels,
k,
weights_fn=common_layers.weights_nonzero):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]):
padded_predictions, padded_labels = common_layers.pad_with_zeros(
predictions, labels)
weights = weights_fn(padded_labels)
effective_k = tf.minimum(k,
common_layers.shape_list(padded_predictions)[-1])
_, outputs = tf.nn.top_k(padded_predictions, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(padded_labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
示例11: two_class_log_likelihood
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import shape [as 別名]
def two_class_log_likelihood(predictions, labels, weights_fn=None):
"""Log-likelihood for two class classification with 0/1 labels.
Args:
predictions: A float valued tensor of shape [`batch_size`]. Each
component should be between 0 and 1.
labels: An int valued tensor of shape [`batch_size`]. Each component
should either be 0 or 1.
weights_fn: unused.
Returns:
A pair, with the average log likelihood in the first component.
"""
del weights_fn
float_predictions = tf.cast(tf.squeeze(predictions), dtype=tf.float64)
batch_probs = tf.stack([1. - float_predictions, float_predictions], axis=-1)
int_labels = tf.cast(tf.squeeze(labels), dtype=tf.int32)
onehot_targets = tf.cast(tf.one_hot(int_labels, 2), dtype=tf.float64)
chosen_probs = tf.einsum(
"ij,ij->i", batch_probs, onehot_targets, name="chosen_probs")
avg_log_likelihood = tf.reduce_mean(tf.log(chosen_probs))
return avg_log_likelihood, tf.constant(1.0)
示例12: set_precision
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import shape [as 別名]
def set_precision(predictions, labels,
weights_fn=common_layers.weights_nonzero):
"""Precision of set predictions.
Args:
predictions : A Tensor of scores of shape [batch, nlabels].
labels: A Tensor of int32s giving true set elements,
of shape [batch, seq_length].
weights_fn: A function to weight the elements.
Returns:
hits: A Tensor of shape [batch, nlabels].
weights: A Tensor of shape [batch, nlabels].
"""
with tf.variable_scope("set_precision", values=[predictions, labels]):
labels = tf.squeeze(labels, [2, 3])
weights = weights_fn(labels)
labels = tf.one_hot(labels, predictions.shape[-1])
labels = tf.reduce_max(labels, axis=1)
labels = tf.cast(labels, tf.bool)
return tf.to_float(tf.equal(labels, predictions)), weights
示例13: set_recall
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import shape [as 別名]
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero):
"""Recall of set predictions.
Args:
predictions : A Tensor of scores of shape [batch, nlabels].
labels: A Tensor of int32s giving true set elements,
of shape [batch, seq_length].
weights_fn: A function to weight the elements.
Returns:
hits: A Tensor of shape [batch, nlabels].
weights: A Tensor of shape [batch, nlabels].
"""
with tf.variable_scope("set_recall", values=[predictions, labels]):
labels = tf.squeeze(labels, [2, 3])
weights = weights_fn(labels)
labels = tf.one_hot(labels, predictions.shape[-1])
labels = tf.reduce_max(labels, axis=1)
labels = tf.cast(labels, tf.bool)
return tf.to_float(tf.equal(labels, predictions)), weights
示例14: image_summary
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import shape [as 別名]
def image_summary(predictions, targets, hparams):
"""Reshapes predictions and passes it to tensorboard.
Args:
predictions : The predicted image (logits).
targets : The ground truth.
hparams: model hparams.
Returns:
summary_proto: containing the summary images.
weights: A Tensor of zeros of the same shape as predictions.
"""
del hparams
results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8)
gold = tf.cast(targets, tf.uint8)
summary1 = tf.summary.image("prediction", results, max_outputs=2)
summary2 = tf.summary.image("data", gold, max_outputs=2)
summary = tf.summary.merge([summary1, summary2])
return summary, tf.zeros_like(predictions)
示例15: sigmoid_precision_one_hot
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import shape [as 別名]
def sigmoid_precision_one_hot(logits, labels, weights_fn=None):
"""Calculate precision for a set, given one-hot labels and logits.
Predictions are converted to one-hot,
as predictions[example][arg-max(example)] = 1
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
precision (scalar), weights
"""
with tf.variable_scope("sigmoid_precision_one_hot", values=[logits, labels]):
del weights_fn
num_classes = logits.shape[-1]
predictions = tf.nn.sigmoid(logits)
predictions = tf.argmax(predictions, -1)
predictions = tf.one_hot(predictions, num_classes)
_, precision = tf.metrics.precision(labels=labels, predictions=predictions)
return precision, tf.constant(1.0)