本文整理汇总了Python中tensorflow.python.framework.tensor_shape.dimension_value函数的典型用法代码示例。如果您正苦于以下问题:Python dimension_value函数的具体用法?Python dimension_value怎么用?Python dimension_value使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dimension_value函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
last_dim = tensor_shape.dimension_value(input_shape[-1])
self.input_spec = InputSpec(min_ndim=2,
axes={-1: last_dim})
self.kernel = self.add_weight(
'kernel',
shape=[last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
示例2: maybe_check_quadrature_param
def maybe_check_quadrature_param(param, name, validate_args):
"""Helper which checks validity of `loc` and `scale` init args."""
with ops.name_scope(name="check_" + name, values=[param]):
assertions = []
if param.shape.ndims is not None:
if param.shape.ndims == 0:
raise ValueError("Mixing params must be a (batch of) vector; "
"{}.rank={} is not at least one.".format(
name, param.shape.ndims))
elif validate_args:
assertions.append(check_ops.assert_rank_at_least(
param, 1,
message=("Mixing params must be a (batch of) vector; "
"{}.rank is not at least one.".format(
name))))
# TODO(jvdillon): Remove once we support k-mixtures.
if param.shape.with_rank_at_least(1)[-1] is not None:
if tensor_shape.dimension_value(param.shape[-1]) != 1:
raise NotImplementedError("Currently only bimixtures are supported; "
"{}.shape[-1]={} is not 1.".format(
name,
tensor_shape.dimension_value(
param.shape[-1])))
elif validate_args:
assertions.append(check_ops.assert_equal(
array_ops.shape(param)[-1], 1,
message=("Currently only bimixtures are supported; "
"{}.shape[-1] is not 1.".format(name))))
if assertions:
return control_flow_ops.with_dependencies(assertions, param)
return param
示例3: build
def build(self, input_shape):
dtype = dtypes.as_dtype(self.dtype or K.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError('Unable to build `Dense` layer with non-floating point '
'dtype %s' % (dtype,))
input_shape = tensor_shape.TensorShape(input_shape)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
last_dim = tensor_shape.dimension_value(input_shape[-1])
self.input_spec = InputSpec(min_ndim=2,
axes={-1: last_dim})
self.kernel = self.add_weight(
'kernel',
shape=[last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
示例4: _min_matrix_dim
def _min_matrix_dim(self):
"""Minimum of domain/range dimension, if statically available, else None."""
domain_dim = tensor_shape.dimension_value(self.domain_dimension)
range_dim = tensor_shape.dimension_value(self.range_dimension)
if domain_dim is None or range_dim is None:
return None
return min(domain_dim, range_dim)
示例5: convert_legacy_structure
def convert_legacy_structure(output_types, output_shapes, output_classes):
"""Returns a `Structure` that represents the given legacy structure.
This method provides a way to convert from the existing `Dataset` and
`Iterator` structure-related properties to a `Structure` object. A "legacy"
structure is represented by the `tf.data.Dataset.output_types`,
`tf.data.Dataset.output_shapes`, and `tf.data.Dataset.output_classes`
properties.
TODO(b/110122868): Remove this function once `Structure` is used throughout
`tf.data`.
Args:
output_types: A nested structure of `tf.DType` objects corresponding to
each component of a structured value.
output_shapes: A nested structure of `tf.TensorShape` objects
corresponding to each component a structured value.
output_classes: A nested structure of Python `type` objects corresponding
to each component of a structured value.
Returns:
A `Structure`.
Raises:
TypeError: If a structure cannot be built from the arguments, because one of
the component classes in `output_classes` is not supported.
"""
flat_types = nest.flatten(output_types)
flat_shapes = nest.flatten(output_shapes)
flat_classes = nest.flatten(output_classes)
flat_ret = []
for flat_type, flat_shape, flat_class in zip(flat_types, flat_shapes,
flat_classes):
if isinstance(flat_class, Structure):
flat_ret.append(flat_class)
elif issubclass(flat_class, sparse_tensor_lib.SparseTensor):
flat_ret.append(SparseTensorStructure(flat_type, flat_shape))
elif issubclass(flat_class, ops.Tensor):
flat_ret.append(TensorStructure(flat_type, flat_shape))
elif issubclass(flat_class, tensor_array_ops.TensorArray):
# We sneaked the dynamic_size and infer_shape into the legacy shape.
flat_ret.append(
TensorArrayStructure(
flat_type, flat_shape[2:],
dynamic_size=tensor_shape.dimension_value(flat_shape[0]),
infer_shape=tensor_shape.dimension_value(flat_shape[1])))
else:
# NOTE(mrry): Since legacy structures produced by iterators only
# comprise Tensors, SparseTensors, and nests, we do not need to
# support all structure types here.
raise TypeError(
"Could not build a structure for output class %r" % (flat_class,))
ret = nest.pack_sequence_as(output_classes, flat_ret)
if isinstance(ret, Structure):
return ret
else:
return NestedStructure(ret)
示例6: _inverse_event_shape
def _inverse_event_shape(self, output_shape):
batch_shape, n1, n2 = (output_shape[:-2],
tensor_shape.dimension_value(output_shape[-2]),
tensor_shape.dimension_value(output_shape[-1]))
if n1 is None or n2 is None:
m = None
elif n1 != n2:
raise ValueError("Matrix must be square. (saw [{}, {}])".format(n1, n2))
else:
m = n1 * (n1 + 1) / 2
return batch_shape.concatenate([m])
示例7: _forward
def _forward(self, x):
if self._unroll_loop:
event_size = tensor_shape.dimension_value(
x.shape.with_rank_at_least(1)[-1])
if event_size is None:
raise ValueError(
"The final dimension of `x` must be known at graph construction "
"time if `unroll_loop=True`. `x.shape: %r`" % x.shape)
y = array_ops.zeros_like(x, name="y0")
for _ in range(event_size):
shift, log_scale = self._shift_and_log_scale_fn(y)
# next_y = scale * x + shift
next_y = x
if log_scale is not None:
next_y *= math_ops.exp(log_scale)
if shift is not None:
next_y += shift
y = next_y
return y
event_size = array_ops.shape(x)[-1]
# If the event size is available at graph construction time, we can inform
# the graph compiler of the maximum number of steps. If not,
# static_event_size will be None, and the maximum_iterations argument will
# have no effect.
static_event_size = tensor_shape.dimension_value(
x.shape.with_rank_at_least(1)[-1])
y0 = array_ops.zeros_like(x, name="y0")
# call the template once to ensure creation
_ = self._shift_and_log_scale_fn(y0)
def _loop_body(index, y0):
"""While-loop body for autoregression calculation."""
# Set caching device to avoid re-getting the tf.Variable for every while
# loop iteration.
with variable_scope_lib.variable_scope(
variable_scope_lib.get_variable_scope()) as vs:
if vs.caching_device is None:
vs.set_caching_device(lambda op: op.device)
shift, log_scale = self._shift_and_log_scale_fn(y0)
y = x
if log_scale is not None:
y *= math_ops.exp(log_scale)
if shift is not None:
y += shift
return index + 1, y
_, y = control_flow_ops.while_loop(
cond=lambda index, _: index < event_size,
body=_loop_body,
loop_vars=(0, y0),
maximum_iterations=static_event_size)
return y
示例8: build
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = 1 if self.data_format == 'channels_first' else -1
if tensor_shape.dimension_value(input_shape[channel_axis]) is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = tensor_shape.dimension_value(input_shape[channel_axis])
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.mask = self.add_variable(
name='mask',
shape=kernel_shape,
initializer=init_ops.ones_initializer(),
trainable=False,
dtype=self.dtype)
self.kernel = self.add_variable(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True,
dtype=self.dtype)
self.threshold = self.add_variable(
name='threshold',
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
self.masked_kernel = math_ops.multiply(self.mask, self.kernel,
MASKED_WEIGHT_NAME)
ops.add_to_collection(MASK_COLLECTION, self.mask)
ops.add_to_collection(MASKED_WEIGHT_COLLECTION, self.masked_kernel)
ops.add_to_collection(THRESHOLD_COLLECTION, self.threshold)
ops.add_to_collection(WEIGHT_COLLECTION, self.kernel)
if self.use_bias:
self.bias = self.add_variable(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = base.InputSpec(
ndim=self.rank + 2, axes={channel_axis: input_dim})
self.built = True
示例9: recalculate_output_shapes
def recalculate_output_shapes(output_shapes):
"""Recalculates the output_shapes after dividing it by num_workers."""
if len(output_shapes) < 1:
raise ValueError("Input shape should have at least one dimension.")
if (tensor_shape.dimension_value(output_shapes[0]) and
tensor_shape.dimension_value(output_shapes[0]) % num_workers != 0):
raise errors.InvalidArgumentError(
None, None,
"First dim of input shape: %d is not divisible by num_workers: %d" %
(output_shapes[0], num_workers))
output_dims = [d for d in output_shapes.dims]
output_dims[0] = output_dims[0] // num_workers
return tensor_shape.TensorShape(output_dims)
示例10: _set_diag_operators
def _set_diag_operators(self, diag_update, is_diag_update_positive):
"""Set attributes self._diag_update and self._diag_operator."""
if diag_update is not None:
self._diag_operator = linear_operator_diag.LinearOperatorDiag(
self._diag_update, is_positive_definite=is_diag_update_positive)
self._diag_inv_operator = linear_operator_diag.LinearOperatorDiag(
1. / self._diag_update, is_positive_definite=is_diag_update_positive)
else:
if tensor_shape.dimension_value(self.u.shape[-1]) is not None:
r = tensor_shape.dimension_value(self.u.shape[-1])
else:
r = array_ops.shape(self.u)[-1]
self._diag_operator = linear_operator_identity.LinearOperatorIdentity(
num_rows=r, dtype=self.dtype)
self._diag_inv_operator = self._diag_operator
示例11: crf_log_likelihood
def crf_log_likelihood(inputs,
tag_indices,
sequence_lengths,
transition_params=None):
"""Computes the log-likelihood of tag sequences in a CRF.
Args:
inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
to use as input to the CRF layer.
tag_indices: A [batch_size, max_seq_len] matrix of tag indices for which we
compute the log-likelihood.
sequence_lengths: A [batch_size] vector of true sequence lengths.
transition_params: A [num_tags, num_tags] transition matrix, if available.
Returns:
log_likelihood: A [batch_size] `Tensor` containing the log-likelihood of
each example, given the sequence of tag indices.
transition_params: A [num_tags, num_tags] transition matrix. This is either
provided by the caller or created in this function.
"""
# Get shape information.
num_tags = tensor_shape.dimension_value(inputs.shape[2])
# Get the transition matrix if not provided.
if transition_params is None:
transition_params = vs.get_variable("transitions", [num_tags, num_tags])
sequence_scores = crf_sequence_score(inputs, tag_indices, sequence_lengths,
transition_params)
log_norm = crf_log_norm(inputs, sequence_lengths, transition_params)
# Normalize the scores to get the log-likelihood per example.
log_likelihood = sequence_scores - log_norm
return log_likelihood, transition_params
示例12: testUnknownIndices
def testUnknownIndices(self):
params = constant_op.constant([[0, 1, 2]])
indices = array_ops.placeholder(dtypes.int32)
gather_nd_t = array_ops.gather_nd(params, indices)
shape = gather_nd_t.get_shape()
self.assertEqual(None, shape.ndims)
self.assertEqual(None, tensor_shape.dimension_value(shape[0]))
示例13: rank
def rank(self):
"""The number of dimensions in this shape, or None if unknown."""
inner_ndims = tensor_shape.dimension_value(self._inner_dim_sizes.shape[0])
if inner_ndims is None:
return None
else:
return len(self._partitioned_dim_sizes) + inner_ndims
示例14: _create_vars
def _create_vars(self, var_list, state):
# Construct ordered dictionary for variable dimensions, sorted by name.
shape_dict = {}
for v in var_list:
shape_dict[v.name] = tensor_shape.dimension_value(np.prod(v.get_shape()))
self.shape_dict = collections.OrderedDict(
sorted(shape_dict.items(), key=lambda t: t[0]))
# Assign each variable its location in flat_grad. The locations are based on
# the order of sorted names.
idx = 0
for v_name, v_dim in self.shape_dict.items():
self.index_dict[v_name] = idx
idx += v_dim
state.create_non_slot(
initial_value=math_ops.cast(0., dtype=var_list[0].dtype.base_dtype),
name="global_step")
# Buffer for keeping past gradients.
window = state.get_hyper("window")
grad_buffer_init = array_ops.zeros(
[window, idx], dtype=var_list[0].dtype.base_dtype)
state.create_non_slot(initial_value=grad_buffer_init, name="grad_buffer")
state.create_non_slot(
initial_value=array_ops.zeros(
(idx,), dtype=var_list[0].dtype.base_dtype),
name="moment1")
# Flattened gradient that contains gradients for all variables in the model.
state.create_non_slot(
initial_value=array_ops.zeros(
(idx,), dtype=var_list[0].dtype.base_dtype),
name="flat_grad")
示例15: row_splits_to_segment_ids
def row_splits_to_segment_ids(splits, name=None):
"""Generates the segmentation corresponding to a RaggedTensor `row_splits`.
Returns an integer vector `segment_ids`, where `segment_ids[i] == j` if
`splits[j] <= i < splits[j+1]`. Example:
```python
>>> ragged.row_splits_to_segment_ids([0, 3, 3, 5, 6, 9]).eval()
[ 0 0 0 2 2 3 4 4 4 ]
```
Args:
splits: A sorted 1-D int64 Tensor. `splits[0]` must be zero.
name: A name prefix for the returned tensor (optional).
Returns:
A sorted 1-D int64 Tensor, with `shape=[splits[-1]]`
Raises:
ValueError: If `splits` is invalid.
"""
with ops.name_scope(name, "RaggedSplitsToSegmentIds", [splits]) as name:
splits = ops.convert_to_tensor(splits, dtype=dtypes.int64, name="splits")
splits.shape.assert_has_rank(1)
if tensor_shape.dimension_value(splits.shape[0]) == 0:
raise ValueError("Invalid row_splits: []")
row_lengths = splits[1:] - splits[:-1]
nrows = array_ops.shape(splits, out_type=dtypes.int64)[-1] - 1
indices = math_ops.range(nrows)
return ragged_util.repeat(indices, repeats=row_lengths, axis=0)