本文整理汇总了Python中tensorflow.size方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.size方法的具体用法?Python tensorflow.size怎么用?Python tensorflow.size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_cross_entropy_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import size [as 别名]
def build_cross_entropy_loss(logits, gold):
"""Constructs a cross entropy from logits and one-hot encoded gold labels.
Supports skipping rows where the gold label is the magic -1 value.
Args:
logits: float Tensor of scores.
gold: int Tensor of one-hot labels.
Returns:
cost, correct, total: the total cost, the total number of correctly
predicted labels, and the total number of valid labels.
"""
valid = tf.reshape(tf.where(tf.greater(gold, -1)), [-1])
gold = tf.gather(gold, valid)
logits = tf.gather(logits, valid)
correct = tf.reduce_sum(tf.to_int32(tf.nn.in_top_k(logits, gold, 1)))
total = tf.size(gold)
cost = tf.reduce_sum(
tf.contrib.nn.deprecated_flipped_sparse_softmax_cross_entropy_with_logits(
logits, tf.cast(gold, tf.int64))) / tf.cast(total, tf.float32)
return cost, correct, total
示例2: update_tensor_arrays
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import size [as 别名]
def update_tensor_arrays(network_tensors, arrays):
"""Updates a list of tensor arrays from the network's output tensors.
Arguments:
network_tensors: Output tensors from the underlying NN unit.
arrays: TensorArrays to be updated.
Returns:
New list of TensorArrays after writing activations.
"""
# TODO(googleuser): Only store activations that will be used later in linked
# feature specifications.
next_arrays = []
for index, network_tensor in enumerate(network_tensors):
array = arrays[index]
size = array.size()
array = array.write(size, network_tensor)
next_arrays.append(array)
return next_arrays
示例3: expanded_shape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import size [as 别名]
def expanded_shape(orig_shape, start_dim, num_dims):
"""Inserts multiple ones into a shape vector.
Inserts an all-1 vector of length num_dims at position start_dim into a shape.
Can be combined with tf.reshape to generalize tf.expand_dims.
Args:
orig_shape: the shape into which the all-1 vector is added (int32 vector)
start_dim: insertion position (int scalar)
num_dims: length of the inserted all-1 vector (int scalar)
Returns:
An int32 vector of length tf.size(orig_shape) + num_dims.
"""
with tf.name_scope('ExpandedShape'):
start_dim = tf.expand_dims(start_dim, 0) # scalar to rank-1
before = tf.slice(orig_shape, [0], start_dim)
add_shape = tf.ones(tf.reshape(num_dims, [1]), dtype=tf.int32)
after = tf.slice(orig_shape, start_dim, [-1])
new_shape = tf.concat([before, add_shape, after], 0)
return new_shape
示例4: _padded_batched_proposals_indicator
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import size [as 别名]
def _padded_batched_proposals_indicator(self,
num_proposals,
max_num_proposals):
"""Creates indicator matrix of non-pad elements of padded batch proposals.
Args:
num_proposals: Tensor of type tf.int32 with shape [batch_size].
max_num_proposals: Maximum number of proposals per image (integer).
Returns:
A Tensor of type tf.bool with shape [batch_size, max_num_proposals].
"""
batch_size = tf.size(num_proposals)
tiled_num_proposals = tf.tile(
tf.expand_dims(num_proposals, 1), [1, max_num_proposals])
tiled_proposal_index = tf.tile(
tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1])
return tf.greater(tiled_num_proposals, tiled_proposal_index)
示例5: _grad_sparsity
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import size [as 别名]
def _grad_sparsity(self):
"""Gradient sparsity."""
# If the sparse minibatch gradient has 10 percent of its entries
# non-zero, its sparsity is 0.1.
# The norm of dense gradient averaged from full dataset
# are roughly estimated norm of minibatch
# sparse gradient norm * sqrt(sparsity)
# An extension maybe only correct the sparse blob.
non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad])
all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad])
self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype)
self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype)
avg_op = self._moving_averager.apply([self._sparsity,])
with tf.control_dependencies([avg_op]):
self._sparsity_avg = self._moving_averager.average(self._sparsity)
return avg_op
示例6: add_positional_embedding
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import size [as 别名]
def add_positional_embedding(x, max_length, name, positions=None):
"""Add positional embedding.
Args:
x: a Tensor with shape [batch, length, depth]
max_length: an integer. static maximum size of any dimension.
name: a name for this layer.
positions: an optional tensor with shape [batch, length]
Returns:
a Tensor the same shape as x.
"""
_, length, depth = common_layers.shape_list(x)
var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype)
if positions is None:
sliced = tf.cond(
tf.less(length, max_length),
lambda: tf.slice(var, [0, 0], [length, -1]),
lambda: tf.pad(var, [[0, length - max_length], [0, 0]]))
return x + tf.expand_dims(sliced, 0)
else:
return x + tf.gather(var, tf.to_int32(positions))
示例7: coordinate_tensor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import size [as 别名]
def coordinate_tensor(shape, axis):
"""Return a tensor with given shape containing coordinate along given axis.
Args:
shape: a Tensor representing the shape of the output Tensor
axis: an integer
Returns:
A tensor with shape shape and type tf.int32, where each elements its
coordinate along the given axis.
"""
if axis < 0:
axis = tf.size(shape) + axis # Convert to positive for the one_hot indice
r = tf.range(shape[axis])
r_shape = tf.one_hot(
axis, tf.size(shape), on_value=-1, off_value=1, dtype=tf.int32)
return tf.zeros(shape, dtype=tf.int32) + tf.reshape(r, r_shape)
示例8: select_dim_value
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import size [as 别名]
def select_dim_value(x, indices, name=None):
with tf.name_scope(name, "select-dim-value", values=[x, indices]):
# x.shape = (rest..., dims)
rest = tf.shape(x)[:-1]
dims = tf.shape(x)[-1]
size = tf.size(indices, out_type=indices.dtype)
# reshape to (size, dims)
t = tf.reshape(x, shape=[-1, dims])
# then index as ([1,2,3,...,size], indices.ravel())
nd_indices = tf.stack([
tf.range(0, size, dtype=indices.dtype),
tf.reshape(indices, shape=[-1])
], axis=1)
t = tf.gather_nd(t, indices=nd_indices)
# reshape back to (rest...)
t = tf.reshape(t, rest)
t.set_shape(x.get_shape()[:-1])
return t
示例9: fixed_padding
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import size [as 别名]
def fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
示例10: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import size [as 别名]
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
first_stage_features_stride: Output stride of extracted RPN feature map.
batch_norm_trainable: Whether to update batch norm parameters during
training or not. When training with a relative large batch size
(e.g. 8), it could be desirable to enable batch norm update.
reuse_weights: Whether to reuse variables. Default is None.
weight_decay: float weight decay for feature extractor (default: 0.0).
"""
self._is_training = is_training
self._first_stage_features_stride = first_stage_features_stride
self._train_batch_norm = (batch_norm_trainable and is_training)
self._reuse_weights = reuse_weights
self._weight_decay = weight_decay
示例11: get_anchors
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import size [as 别名]
def get_anchors(self, image_shape):
"""Returns anchor pyramid for the given image size."""
backbone_shapes = compute_backbone_shapes(self.config, image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = utils.generate_pyramid_anchors(
self.config.RPN_ANCHOR_SCALES,
self.config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self.config.BACKBONE_STRIDES,
self.config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
# TODO: Remove this after the notebook are refactored to not use it
self.anchors = a
# Normalize coordinates
self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
示例12: compose_image_meta
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import size [as 别名]
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
original_image_shape: [H, W, C] before resizing or padding.
image_shape: [H, W, C] after resizing and padding
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
scale: The scaling factor applied to the original image (float32)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
[scale] + # size=1
list(active_class_ids) # size=num_classes
)
return meta
示例13: fetch_differentiable_fixed_embeddings
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import size [as 别名]
def fetch_differentiable_fixed_embeddings(comp, state, stride):
"""Looks up fixed features with separate, differentiable, embedding lookup.
Args:
comp: Component whose fixed features we wish to look up.
state: live MasterState object for the component.
stride: Tensor containing current batch * beam size.
Returns:
state handle: updated state handle to be used after this call
fixed_embeddings: list of NamedTensor objects
"""
_validate_embedded_fixed_features(comp)
num_channels = len(comp.spec.fixed_feature)
if not num_channels:
return state.handle, []
state.handle, indices, ids, weights, num_steps = (
dragnn_ops.bulk_fixed_features(
state.handle, component=comp.name, num_channels=num_channels))
fixed_embeddings = []
for channel, feature_spec in enumerate(comp.spec.fixed_feature):
differentiable_or_constant = ('constant' if feature_spec.is_constant else
'differentiable')
tf.logging.info('[%s] Adding %s fixed feature "%s"', comp.name,
differentiable_or_constant, feature_spec.name)
size = stride * num_steps * feature_spec.size
fixed_embedding = network_units.embedding_lookup(
comp.get_variable(network_units.fixed_embeddings_name(channel)),
indices[channel], ids[channel], weights[channel], size)
if feature_spec.is_constant:
fixed_embedding = tf.stop_gradient(fixed_embedding)
fixed_embeddings.append(
network_units.NamedTensor(fixed_embedding, feature_spec.name))
return state.handle, fixed_embeddings
示例14: extract_fixed_feature_ids
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import size [as 别名]
def extract_fixed_feature_ids(comp, state, stride):
"""Extracts fixed feature IDs.
Args:
comp: Component whose fixed feature IDs we wish to extract.
state: Live MasterState object for the component.
stride: Tensor containing current batch * beam size.
Returns:
state handle: Updated state handle to be used after this call.
ids: List of [stride * num_steps, 1] feature IDs per channel. Missing IDs
(e.g., due to batch padding) are set to -1.
"""
num_channels = len(comp.spec.fixed_feature)
if not num_channels:
return state.handle, []
for feature_spec in comp.spec.fixed_feature:
check.Eq(feature_spec.size, 1, 'All features must have size=1')
check.Lt(feature_spec.embedding_dim, 0, 'All features must be non-embedded')
state.handle, indices, ids, _, num_steps = dragnn_ops.bulk_fixed_features(
state.handle, component=comp.name, num_channels=num_channels)
size = stride * num_steps
fixed_ids = []
for channel, feature_spec in enumerate(comp.spec.fixed_feature):
tf.logging.info('[%s] Adding fixed feature IDs "%s"', comp.name,
feature_spec.name)
# The +1 and -1 increments ensure that missing IDs default to -1.
#
# TODO(googleuser): This formula breaks if multiple IDs are extracted at some
# step. Try using tf.unique() to enforce the unique-IDS precondition.
sums = tf.unsorted_segment_sum(ids[channel] + 1, indices[channel], size) - 1
sums = tf.expand_dims(sums, axis=1)
fixed_ids.append(network_units.NamedTensor(sums, feature_spec.name, dim=1))
return state.handle, fixed_ids
示例15: embedding_size
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import size [as 别名]
def embedding_size(self):
size = 0
for i in range(self._feature_size):
size += self._num_features[i] * self._embedding_sizes[i]
return size