本文整理汇总了Python中tensorflow.python.framework.ops.name_scope函数的典型用法代码示例。如果您正苦于以下问题:Python name_scope函数的具体用法?Python name_scope怎么用?Python name_scope使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了name_scope函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self,
mu,
cov,
validate_args=True,
allow_nan_stats=False,
name="MultivariateNormalCov"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu`, and an instance of `OperatorPDBase`, `cov`,
which determines the covariance.
Args:
mu: Floating point tensor with shape `[N1,...,Nb, k]`, `b >= 0`.
cov: Instance of `OperatorPDBase` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`.
validate_args: Whether to validate input with asserts. If `validate_args`
is `False`, and the inputs are invalid, correct behavior is not
guaranteed.
allow_nan_stats: `Boolean`, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `cov` are different dtypes.
"""
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
with ops.name_scope(name):
with ops.name_scope("init", values=[mu] + cov.inputs):
self._cov = cov
self._mu = self._check_mu(mu)
self._name = name
示例2: log_prob
def log_prob(self, x, name="log_prob"):
"""`Log(P[counts])`, computed for every batch member.
Args:
x: Non-negative floating point tensor whose shape can
be broadcast with `self.a` and `self.b`. For fixed leading
dimensions, the last dimension represents counts for the corresponding
Beta distribution in `self.a` and `self.b`. `x` is only legal if
0 < x < 1.
name: Name to give this Op, defaults to "log_prob".
Returns:
Log probabilities for each record, shape `[N1,...,Nm]`.
"""
a = self._a
b = self._b
with ops.name_scope(self.name):
with ops.name_scope(name, values=[a, x]):
x = self._check_x(x)
unnorm_pdf = (a - 1) * math_ops.log(x) + (
b - 1) * math_ops.log(1 - x)
normalization_factor = -(math_ops.lgamma(a) + math_ops.lgamma(b)
- math_ops.lgamma(a + b))
log_prob = unnorm_pdf + normalization_factor
return log_prob
示例3: __init__
def __init__(self, example_indices, feature_indices, feature_values):
"""Creates a `SparseFeatureColumn` representation.
Args:
example_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts
python lists, or numpy arrays.
feature_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts
python lists, or numpy arrays.
feature_values: An optional 1-D tensor float tensor of shape `[N]`. Also,
accepts python lists, or numpy arrays.
Returns:
A `SparseFeatureColumn`
"""
with name_scope(None, 'SparseFeatureColumn',
[example_indices, feature_indices]):
self._example_indices = convert_to_tensor(example_indices,
name='example_indices',
dtype=dtypes.int64)
self._feature_indices = convert_to_tensor(feature_indices,
name='feature_indices',
dtype=dtypes.int64)
self._feature_values = None
if feature_values is not None:
with name_scope(None, 'SparseFeatureColumn', [feature_values]):
self._feature_values = convert_to_tensor(feature_values,
name='feature_values',
dtype=dtypes.float32)
示例4: __init__
def __init__(self, shape, dtype, scale=None,
verify_pd=True, name="OperatorPDIdentity"):
"""Initialize an `OperatorPDIdentity`.
Args:
shape: `int32` rank 1 `Tensor` of length at least 2, and with the last
two entries equal (since this is a square matrix).
dtype: Data type of the matrix that this operator represents.
scale: floating point rank 0 `Tensor` representing a scalar to
multiply the identity matrix by. This will default to a scale of 1.
This will be converted to the dtype `dtype`.
verify_pd: `Boolean`, if `True`, asserts are added to the initialization
args to ensure they define this operator as a square (batch) matrix.
name: Name to prepend to `Ops`.
"""
# Grab static shape if available now.
with ops.name_scope(name):
with ops.name_scope("init", values=[shape, scale]):
self._dtype = dtypes.as_dtype(dtype)
self._verify_pd = verify_pd
self._name = name
# Store the static shape (if possible) right now before adding the
# asserts, since the asserts prevent .constant_value from working.
shape = ops.convert_to_tensor(shape, name="shape")
self._get_shape = tensor_shape.TensorShape(
tensor_util.constant_value(shape))
self._shape_arg = self._check_shape(shape)
self._scale = self._check_scale(scale, self._dtype)
示例5: _concat
def _concat(self):
"""Returns the overall concatenated value as a `Tensor`.
This is different from using the partitioned variable directly as a tensor
(through tensor conversion and `as_tensor`) in that it creates a new set of
operations that keeps the control dependencies from its scope.
Returns:
`Tensor` containing the concatenated value.
"""
if len(self._variable_list) == 1:
with ops.name_scope(None):
return array_ops.identity(self._variable_list[0], name=self._name)
partition_axes = self._partition_axes()
if len(partition_axes) > 1:
raise NotImplementedError(
"Cannot concatenate along more than one dimension: %s. "
"Multi-axis partition concat is not supported" % str(partition_axes)
)
partition_ix = partition_axes[0]
with ops.name_scope(self._name + "/ConcatPartitions/"):
concatenated = array_ops.concat(partition_ix, self._variable_list)
with ops.name_scope(None):
return array_ops.identity(concatenated, name=self._name)
示例6: create_estimator_spec
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""See `Head`."""
with ops.name_scope('head'):
logits = head_lib._check_logits(logits, self.logits_dimension) # pylint:disable=protected-access
# Predict.
pred_keys = prediction_keys.PredictionKeys
with ops.name_scope(None, 'predictions', (logits,)):
probabilities = math_ops.sigmoid(logits, name=pred_keys.PROBABILITIES)
predictions = {
pred_keys.LOGITS: logits,
pred_keys.PROBABILITIES: probabilities,
}
if mode == model_fn.ModeKeys.PREDICT:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
'': export_output.ClassificationOutput(scores=probabilities)
})
# Eval.
unweighted_loss, processed_labels = self.create_loss(
features=features, mode=mode, logits=logits, labels=labels)
# Averages loss over classes.
per_example_loss = math_ops.reduce_mean(
unweighted_loss, axis=-1, keep_dims=True)
weights = head_lib._weights(features, self._weight_column) # pylint:disable=protected-access
training_loss = losses.compute_weighted_loss(
per_example_loss, weights=weights, reduction=losses.Reduction.SUM)
if mode == model_fn.ModeKeys.EVAL:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=training_loss,
eval_metric_ops=self._eval_metric_ops(
labels=processed_labels,
probabilities=probabilities,
weights=weights,
per_example_loss=per_example_loss))
# Train.
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
with ops.name_scope(''):
summary.scalar(
head_lib._summary_key(self._name, metric_keys.MetricKeys.LOSS), # pylint:disable=protected-access
training_loss)
summary.scalar(
head_lib._summary_key( # pylint:disable=protected-access
self._name, metric_keys.MetricKeys.LOSS_MEAN),
losses.compute_weighted_loss(
unweighted_loss, weights=weights,
reduction=losses.Reduction.MEAN))
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=training_loss,
train_op=train_op_fn(training_loss))
示例7: apply_gradients
def apply_gradients(self, grads_and_vars, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
name: Optional name for the returned operation. Default to the name
passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
grads_and_vars = _filter_grads(grads_and_vars)
var_list = [v for (_, v) in grads_and_vars]
if distribution_strategy_context.has_distribution_strategy():
reduced_grads = merge_grads(grads_and_vars)
grads_and_vars = zip(reduced_grads, var_list)
with ops.init_scope():
self._prepare()
self._create_slots(var_list)
update_ops = []
def update_grad_to_var(grad, var):
"""Apply gradient to variable."""
if isinstance(var, ops.Tensor):
raise NotImplementedError("Trying to update a Tensor ", var)
if isinstance(grad, ops.IndexedSlices):
if var.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
return self._resource_apply_sparse_duplicate_indices(
grad.values, var, grad.indices)
update_op = self._resource_apply_dense(grad, var)
if var.constraint is not None:
with ops.control_dependencies([update_op]):
return var.assign(var.constraint(var))
else:
return update_op
with ops.name_scope(name, self._name) as name:
for grad, var in grads_and_vars:
scope_name = ("" if ops.executing_eagerly_outside_functions() else
"_" + var.op.name)
with ops.name_scope("update" + scope_name):
update_ops.append(update_grad_to_var(grad, var))
# control dependencies does not work in per replica mode, please change
# this once b/118841692 is fixed.
# with ops.control_dependencies(update_ops):
# apply_updates = self._iterations.assign_add(1).op
apply_updates = merge_update_step(update_ops, self.iterations)
return apply_updates
示例8: log_prob
def log_prob(self, event, name="log_prob"):
"""Log of the probability mass function.
Args:
event: `int32` or `int64` binary Tensor.
name: A name for this operation (optional).
Returns:
The log-probabilities of the events.
"""
# TODO(jaana): The current sigmoid_cross_entropy_with_logits has
# inconsistent behavior for logits = inf/-inf.
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self.logits, event]):
event = ops.convert_to_tensor(event, name="event")
event = math_ops.cast(event, self.logits.dtype)
logits = self.logits
# sigmoid_cross_entropy_with_logits doesn't broadcast shape,
# so we do this here.
# TODO(b/30637701): Check dynamic shape, and don't broadcast if the
# dynamic shapes are the same.
if (not event.get_shape().is_fully_defined() or
not logits.get_shape().is_fully_defined() or
event.get_shape() != logits.get_shape()):
logits = array_ops.ones_like(event) * logits
event = array_ops.ones_like(logits) * event
return -nn.sigmoid_cross_entropy_with_logits(logits, event)
示例9: mode
def mode(self, name="mode"):
"""Mode of each batch member.
The mode of a gamma distribution is `(alpha - 1) / beta` when `alpha > 1`,
and `NaN` otherwise. If `self.allow_nan_stats` is `False`, an exception
will be raised rather than returning `NaN`.
Args:
name: A name to give this op.
Returns:
The mode for every batch member, a `Tensor` with same `dtype` as self.
"""
alpha = self._alpha
beta = self._beta
with ops.name_scope(self.name):
with ops.name_scope(name, values=[alpha, beta]):
mode_if_defined = (alpha - 1.0) / beta
if self.allow_nan_stats:
alpha_ge_1 = alpha >= 1.0
nan = np.nan * self._ones()
return math_ops.select(alpha_ge_1, mode_if_defined, nan)
else:
one = constant_op.constant(1.0, dtype=self.dtype)
return control_flow_ops.with_dependencies(
[check_ops.assert_less(
one, alpha,
message="mode not defined for components of alpha <= 1"
)], mode_if_defined)
示例10: decorated
def decorated(*args):
with ops.name_scope("batch") as name:
for a in args:
if not isinstance(a, ops.Tensor):
raise ValueError("All arguments to functions decorated with "
"`batch_function` are supposed to be Tensors; "
"found %s" % repr(a))
batched_tensors, batch_index, id_t = gen_batch_ops.batch(
args,
num_batch_threads=num_batch_threads,
max_batch_size=max_batch_size,
batch_timeout_micros=batch_timeout_micros,
max_enqueued_batches=max_enqueued_batches,
allowed_batch_sizes=allowed_batch_sizes,
grad_timeout_micros=grad_timeout_micros,
shared_name=name)
outputs = f(*batched_tensors)
if isinstance(outputs, ops.Tensor):
outputs_list = [outputs]
else:
outputs_list = outputs
with ops.name_scope("unbatch") as unbatch_name:
unbatched = [
gen_batch_ops.unbatch(t, batch_index, id_t,
timeout_micros=unbatch_timeout_micros,
shared_name=unbatch_name + "/" + t.name)
for t in outputs_list]
if isinstance(outputs, ops.Tensor):
return unbatched[0]
return unbatched
示例11: variance
def variance(self, name="variance"):
"""Variance of the Wishart distribution.
This function should not be confused with the covariance of the Wishart. The
covariance matrix would have shape `q x q` where,
`q = dimension * (dimension+1) / 2`
and having elements corresponding to some mapping from a lower-triangular
matrix to a vector-space.
This function returns the diagonal of the Covariance matrix but shaped
as a `dimension x dimension` matrix.
Args:
name: The name of this op.
Returns:
variance: `Tensor` of dtype `self.dtype`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=list(self.inputs.values())):
x = math_ops.sqrt(self.df) * self.scale_operator_pd.to_dense()
d = array_ops.expand_dims(array_ops.batch_matrix_diag_part(x), -1)
v = math_ops.square(x) + math_ops.batch_matmul(d, d, adj_y=True)
if self.cholesky_input_output_matrices:
return linalg_ops.batch_cholesky(v)
else:
return v
示例12: sample_n
def sample_n(self, n, seed=None, name="sample_n"):
"""Sample `n` observations from the Normal Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: `[n, ...]`, a `Tensor` of `n` samples for each
of the distributions determined by broadcasting the hyperparameters.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._mu, self._sigma, n]):
broadcast_shape = common_shapes.broadcast_shape(
self._mu.get_shape(), self._sigma.get_shape())
n = ops.convert_to_tensor(n)
shape = array_ops.concat(0, ([n], array_ops.shape(self.mean())))
sampled = random_ops.random_normal(
shape=shape, mean=0, stddev=1, dtype=self._mu.dtype, seed=seed)
# Provide some hints to shape inference
n_val = tensor_util.constant_value(n)
final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
sampled.set_shape(final_shape)
return sampled * self._sigma + self._mu
示例13: sqrt_matmul
def sqrt_matmul(self, x, transpose_x=False, name="sqrt_matmul"):
"""Left (batch) matmul `x` by a sqrt of this matrix: `Sx` where `A = S S^T`.
`x` is a batch matrix with compatible shape if
```
self.shape = [N1,...,Nn] + [k, k]
x.shape = [N1,...,Nn] + [k, r]
```
Args:
x: `Tensor` with shape `self.batch_shape + [k, r]` and same `dtype` as
this `Operator`.
transpose_x: If `True`, `x` is transposed before multiplication.
name: A name scope to use for ops added by this method.
Returns:
A result equivalent to `tf.matmul(self.sqrt_to_dense(), x)`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[x] + self.inputs):
x = ops.convert_to_tensor(x, name="x")
return self._dispatch_based_on_batch(
self._batch_sqrt_matmul, self._sqrt_matmul, x=x,
transpose_x=transpose_x)
示例14: log_prob
def log_prob(self, counts, name="log_prob"):
"""`Log(P[counts])`, computed for every batch member.
For each batch member of counts `k`, `P[counts]` is the probability that
after sampling `n` draws from this Binomial distribution, the number of
successes is `k`. Note that different sequences of draws can result in the
same counts, thus the probability includes a combinatorial coefficient.
Args:
counts: Non-negative tensor with dtype `dtype` and whose shape can be
broadcast with `self.p` and `self.n`. `counts` is only legal if it is
less than or equal to `n` and its components are equal to integer
values.
name: Name to give this Op, defaults to "log_prob".
Returns:
Log probabilities for each record, shape `[N1,...,Nm]`.
"""
n = self._n
p = self._p
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._n, self._p, counts]):
counts = self._check_counts(counts)
prob_prob = counts * math_ops.log(p) + (
n - counts) * math_ops.log(1 - p)
combinations = math_ops.lgamma(n + 1) - math_ops.lgamma(
counts + 1) - math_ops.lgamma(n - counts + 1)
log_prob = prob_prob + combinations
return log_prob
示例15: testImportGraphWithFunctionTwice
def testImportGraphWithFunctionTwice(self):
g = ops.Graph()
with g.as_default():
@function.Defun()
def Add2(x, y):
return math_ops.add(x, y)
x = array_ops.placeholder(dtype=dtypes.float32, name="x")
y = array_ops.placeholder(dtype=dtypes.float32, name="y")
_ = Add2(x, y, name="z") # pylint: disable=unexpected-keyword-arg
gdef = g.as_graph_def()
x = random_ops.random_uniform(dtype=dtypes.float32, shape=())
y = random_ops.random_uniform(dtype=dtypes.float32, shape=())
input_map = {"x:0": x, "y:0": y}
with ops.name_scope("first"):
z1 = importer.import_graph_def(gdef, return_elements=["z:0"],
input_map=input_map)[0]
with ops.name_scope("second"):
z2 = importer.import_graph_def(gdef, return_elements=["z:0"],
input_map=input_map)[0]
with self.test_session() as sess:
z1_val, z2_val = sess.run((z1, z2))
self.assertAllEqual(z1_val, z2_val)