本文整理汇总了Python中tensorflow.python.ops.variable_scope.variable_op_scope函数的典型用法代码示例。如果您正苦于以下问题:Python variable_op_scope函数的具体用法?Python variable_op_scope怎么用?Python variable_op_scope使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了variable_op_scope函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testVarOpScope
def testVarOpScope(self):
with self.test_session():
with tf.name_scope("scope1"):
with variable_scope.variable_op_scope([], "tower", "default"):
self.assertEqual(variable_scope.get_variable("w", []).name,
"tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with variable_scope.variable_op_scope([], "tower", "default"):
with self.assertRaises(ValueError):
variable_scope.get_variable("w", [])
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_1/scope2/")
with tf.name_scope("scope2"):
with variable_scope.variable_op_scope([], None, "default"):
self.assertEqual(variable_scope.get_variable("w", []).name,
"default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope2/default/scope2/")
with variable_scope.variable_op_scope([], None, "default"):
self.assertEqual(variable_scope.get_variable("w", []).name,
"default_1/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope2/default_1/scope2/")
示例2: testVarOpScopeReuse
def testVarOpScopeReuse(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
with variable_scope.variable_op_scope([], "tower", "default"):
self.assertEqual(variable_scope.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_op_scope([], None, "default"):
self.assertEqual(variable_scope.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with tf.variable_scope(outer, reuse=True) as outer:
with variable_scope.variable_op_scope([], "tower", "default"):
self.assertEqual(variable_scope.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
with variable_scope.variable_op_scope([], None, "default"):
self.assertEqual(variable_scope.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
示例3: build_model
def build_model(self, features, feature_columns, is_training):
"""See base class."""
self._feature_columns = feature_columns
input_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas,
min_slice_size=64 << 20))
with variable_scope.variable_op_scope(
features.values(),
"input_from_feature_columns",
partitioner=input_layer_partitioner) as scope:
net = layers.input_from_feature_columns(
features,
self._get_feature_columns(),
weight_collections=[self._weight_collection_name],
scope=scope)
hidden_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas))
for layer_id, num_hidden_units in enumerate(self._hidden_units):
with variable_scope.variable_op_scope(
[net], "hiddenlayer_%d" % layer_id,
partitioner=hidden_layer_partitioner) as scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=self._activation_fn,
variables_collections=[self._weight_collection_name],
scope=scope)
if self._dropout is not None and is_training:
net = layers.dropout(
net,
keep_prob=(1.0 - self._dropout))
self._add_hidden_layer_summary(net, scope.name)
with variable_scope.variable_op_scope(
[net], "dnn_logits",
partitioner=hidden_layer_partitioner) as scope:
logits = layers.fully_connected(
net,
self._num_label_columns,
activation_fn=None,
variables_collections=[self._weight_collection_name],
scope=scope)
self._add_hidden_layer_summary(logits, "dnn_logits")
return logits
示例4: stack
def stack(inputs, layer, stack_args, **kwargs):
"""Builds a stack of layers by applying layer repeatedly using stack_args.
`stack` allows you to repeatedly apply the same operation with different
arguments `stack_args[i]`. For each application of the layer, `stack` creates
a new scope appended with an increasing number. For example:
```python
stack(x, fully_connected, [32, 64, 128], scope='fc')
# It is equivalent to:
x = fully_connected(x, 32, scope='fc/fc_1')
x = fully_connected(x, 64, scope='fc/fc_2')
x = fully_connected(x, 128, scope='fc/fc_3')
```
Args:
inputs: A `Tensor` suitable for layer.
layer: A layer(inputs, *args, **kwargs)
stack_args: A list/tuple of parameters for each call of layer.
**kwargs: Extra kwargs for the layer.
Returns:
a `Tensor` result of applying the stacked layers.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
if not isinstance(stack_args, (list, tuple)):
raise ValueError('stack_args need to be a list or tuple')
with variable_scope.variable_op_scope([inputs], scope, 'Stack'):
outputs = inputs
scope = scope or layer.__name__
for i in range(len(stack_args)):
kwargs['scope'] = scope + '_' + str(i+1)
layer_args = stack_args[i]
if not isinstance(layer_args, (list, tuple)):
layer_args = [layer_args]
outputs = layer(outputs, *layer_args, **kwargs)
return outputs
示例5: __init__
def __init__(self, name, func, create_scope_now=False):
"""Creates a template for the given function.
Args:
name: A name for the scope created by this template. The
name will be made unique by appending `_N` to the it (see how
`tf.variable_op_scope` treats the `default_name` for details).
func: The function to apply each time.
create_scope_now: Whether to create the scope at Template construction
time, rather than first call. Defaults to false. Creating the scope at
construction time may be more convenient if the template is to passed
through much lower level code, and you want to be sure of the scope
name without knowing exactly where it will be first called. If set to
True, the scope will be created in the constructor, and all subsequent
times in __call__, leading to a trailing numeral being added to the
names of all created Tensors. If set to False, the scope will be created
at the first call location.
Raises:
ValueError: if the name is None.
"""
self._func = func
self._stacktrace = traceback.format_stack()[:-2]
self._name = name
if name is None:
raise ValueError("name cannot be None.")
if create_scope_now:
with variable_scope.variable_op_scope([], None, self._name) as vs:
self._var_scope = vs
else:
self._var_scope = None
# This variable keeps track of whether the template has been called yet,
# which is not the same as whether the scope has been created.
self._variables_created = False
示例6: dnn_autoencoder
def dnn_autoencoder(
tensor_in, hidden_units, activation=nn.relu, add_noise=None, dropout=None,
scope=None):
"""Creates fully connected autoencoder subgraph.
Args:
tensor_in: tensor or placeholder for input features.
hidden_units: list of counts of hidden units in each layer.
activation: activation function used to map inner latent layer onto
reconstruction layer.
add_noise: a function that adds noise to tensor_in,
e.g. def add_noise(x):
return(x + np.random.normal(0, 0.1, (len(x), len(x[0]))))
dropout: if not None, will add a dropout layer with given
probability.
scope: the variable scope for this op.
Returns:
Tensors for encoder and decoder.
"""
with vs.variable_op_scope([tensor_in], scope, "autoencoder"):
if add_noise is not None:
tensor_in = add_noise(tensor_in)
with vs.variable_scope("encoder"):
# build DNN encoder
encoder = dnn_ops.dnn(
tensor_in, hidden_units, activation=activation, dropout=dropout)
with vs.variable_scope("decoder"):
# reverse hidden_units and built DNN decoder
decoder = dnn_ops.dnn(
encoder, hidden_units[::-1], activation=activation, dropout=dropout)
return encoder, decoder
示例7: _auc_hist_accumulate
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
"""Accumulate histograms in new variables."""
with variable_scope.variable_op_scope(
[hist_true, hist_false], None, 'hist_accumulate'):
# Holds running total histogram of scores for records labeled True.
hist_true_acc = variable_scope.get_variable(
'hist_true_acc',
initializer=array_ops.zeros_initializer(
[nbins],
dtype=hist_true.dtype),
collections=collections,
trainable=False)
# Holds running total histogram of scores for records labeled False.
hist_false_acc = variable_scope.get_variable(
'hist_false_acc',
initializer=array_ops.zeros_initializer(
[nbins],
dtype=hist_false.dtype),
collections=collections,
trainable=False)
update_op = control_flow_ops.group(
hist_true_acc.assign_add(hist_true),
hist_false_acc.assign_add(hist_false),
name='update_op')
return hist_true_acc, hist_false_acc, update_op
示例8: weighted_moving_average
def weighted_moving_average(value,
decay,
weight,
truediv=True,
collections=None,
name=None):
"""Compute the weighted moving average of `value`.
Conceptually, the weighted moving average is:
`moving_average(value * weight) / moving_average(weight)`,
where a moving average updates by the rule
`new_value = decay * old_value + (1 - decay) * update`
Internally, this Op keeps moving average variables of both `value * weight`
and `weight`.
Args:
value: A numeric `Tensor`.
decay: A float `Tensor` or float value. The moving average decay.
weight: `Tensor` that keeps the current value of a weight.
Shape should be able to multiply `value`.
truediv: Boolean, if `True`, dividing by `moving_average(weight)` is
floating point division. If `False`, use division implied by dtypes.
collections: List of graph collections keys to add the internal variables
`value * weight` and `weight` to. Defaults to `[GraphKeys.VARIABLES]`.
name: Optional name of the returned operation.
Defaults to "WeightedMovingAvg".
Returns:
An Operation that updates and returns the weighted moving average.
"""
# Unlike assign_moving_average, the weighted moving average doesn't modify
# user-visible variables. It is the ratio of two internal variables, which are
# moving averages of the updates. Thus, the signature of this function is
# quite different than assign_moving_average.
if collections is None:
collections = [ops.GraphKeys.VARIABLES]
with variable_scope.variable_op_scope(
[value, weight, decay], name, "WeightedMovingAvg") as scope:
value_x_weight_var = variable_scope.get_variable(
"value_x_weight",
initializer=init_ops.zeros_initializer(value.get_shape(),
dtype=value.dtype),
trainable=False,
collections=collections)
weight_var = variable_scope.get_variable(
"weight",
initializer=init_ops.zeros_initializer(weight.get_shape(),
dtype=weight.dtype),
trainable=False,
collections=collections)
numerator = assign_moving_average(value_x_weight_var, value * weight, decay)
denominator = assign_moving_average(weight_var, weight, decay)
if truediv:
return math_ops.truediv(numerator, denominator, name=scope.name)
else:
return math_ops.div(numerator, denominator, name=scope.name)
示例9: auc_using_histogram
def auc_using_histogram(boolean_labels,
scores,
score_range,
nbins=100,
collections=None,
check_shape=True,
name=None):
"""AUC computed by maintaining histograms.
Rather than computing AUC directly, this Op maintains Variables containing
histograms of the scores associated with `True` and `False` labels. By
comparing these the AUC is generated, with some discretization error.
See: "Efficient AUC Learning Curve Calculation" by Bouckaert.
This AUC Op updates in `O(batch_size + nbins)` time and works well even with
large class imbalance. The accuracy is limited by discretization error due
to finite number of bins. If scores are concentrated in a fewer bins,
accuracy is lower. If this is a concern, we recommend trying different
numbers of bins and comparing results.
Args:
boolean_labels: 1-D boolean `Tensor`. Entry is `True` if the corresponding
record is in class.
scores: 1-D numeric `Tensor`, same shape as boolean_labels.
score_range: `Tensor` of shape `[2]`, same dtype as `scores`. The min/max
values of score that we expect. Scores outside range will be clipped.
nbins: Integer number of bins to use. Accuracy strictly increases as the
number of bins increases.
collections: List of graph collections keys. Internal histogram Variables
are added to these collections. Defaults to `[GraphKeys.LOCAL_VARIABLES]`.
check_shape: Boolean. If `True`, do a runtime shape check on the scores
and labels.
name: A name for this Op. Defaults to "auc_using_histogram".
Returns:
auc: `float32` scalar `Tensor`. Fetching this converts internal histograms
to auc value.
update_op: `Op`, when run, updates internal histograms.
"""
if collections is None:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
with variable_scope.variable_op_scope(
[boolean_labels, scores, score_range], name, 'auc_using_histogram'):
scores, boolean_labels = metric_ops.remove_squeezable_dimensions(
scores, boolean_labels)
score_range = ops.convert_to_tensor(score_range, name='score_range')
boolean_labels, scores = _check_labels_and_scores(
boolean_labels, scores, check_shape)
hist_true, hist_false = _make_auc_histograms(boolean_labels, scores,
score_range, nbins)
hist_true_acc, hist_false_acc, update_op = _auc_hist_accumulate(hist_true,
hist_false,
nbins,
collections)
auc = _auc_convert_hist_to_auc(hist_true_acc, hist_false_acc, nbins)
return auc, update_op
示例10: __call__
def __call__(self, *args, **kwargs):
# Capture the name of the variable_scope here because if we capture at
# construction, then name_scopes would have a '_N+1' suffix.
if self._var_scope:
with variable_scope.variable_scope(self._var_scope, reuse=True):
return self._call_func(args, kwargs, check_for_new_variables=True)
else:
with variable_scope.variable_op_scope([], None, self._name) as vs:
self._var_scope = vs
return self._call_func(args, kwargs, check_for_new_variables=False)
示例11: stack
def stack(inputs, layer, stack_args, **kwargs):
"""Builds a stack of layers by applying layer repeatedly using stack_args.
`stack` allows you to repeatedly apply the same operation with different
arguments `stack_args[i]`. For each application of the layer, `stack` creates
a new scope appended with an increasing number. For example:
```python
y = stack(x, fully_connected, [32, 64, 128], scope='fc')
# It is equivalent to:
x = fully_connected(x, 32, scope='fc/fc_1')
x = fully_connected(x, 64, scope='fc/fc_2')
y = fully_connected(x, 128, scope='fc/fc_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
layer: A layer with arguments `(inputs, *args, **kwargs)`
stack_args: A list/tuple of parameters for each call of layer.
**kwargs: Extra kwargs for the layer.
Returns:
a `Tensor` result of applying the stacked layers.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
if not isinstance(stack_args, (list, tuple)):
raise ValueError('stack_args need to be a list or tuple')
with variable_scope.variable_op_scope([inputs], scope, 'Stack'):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'stack'
outputs = inputs
for i in range(len(stack_args)):
kwargs['scope'] = scope + '_' + str(i+1)
layer_args = stack_args[i]
if not isinstance(layer_args, (list, tuple)):
layer_args = [layer_args]
outputs = layer(outputs, *layer_args, **kwargs)
return outputs
示例12: bias_add
def bias_add(inputs,
activation_fn=None,
initializer=init_ops.zeros_initializer,
regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a bias to the inputs.
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: a tensor of with at least rank 2 and value for the last dimension,
e.g. `[batch_size, depth]`, `[None, None, None, depth]`.
activation_fn: Optional activation function.
initializer: An initializer for the bias, defaults to 0.
regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional collections for the variables.
outputs_collections: collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_op_scope.
Returns:
a tensor representing the result of adding biases to the inputs.
"""
with variable_scope.variable_op_scope([inputs],
scope, 'BiasAdd', reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
num_features = utils.last_dimension(inputs.get_shape(), min_rank=2)
biases_collections = utils.get_variable_collections(variables_collections,
'biases')
biases = variables.model_variable('biases',
shape=[num_features,],
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(inputs, biases)
if activation_fn:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
示例13: _dnn_logits
def _dnn_logits(self, features, is_training=False):
net = layers.input_from_feature_columns(
features, self._get_dnn_feature_columns(), weight_collections=[self._dnn_weight_collection]
)
for layer_id, num_hidden_units in enumerate(self._dnn_hidden_units):
with variable_scope.variable_op_scope(
[net],
"hiddenlayer_%d" % layer_id,
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=self._config.num_ps_replicas
),
) as scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=self._dnn_activation_fn,
variables_collections=[self._dnn_weight_collection],
scope=scope,
)
if self._dnn_dropout is not None and is_training:
net = layers.dropout(net, keep_prob=(1.0 - self._dnn_dropout))
self._add_hidden_layer_summary(net, scope.name)
with variable_scope.variable_op_scope(
[net],
"dnn_logit",
partitioner=partitioned_variables.min_max_variable_partitioner(max_partitions=self._config.num_ps_replicas),
) as scope:
logit = layers.fully_connected(
net,
self._target_column.num_label_columns,
activation_fn=None,
variables_collections=[self._dnn_weight_collection],
scope=scope,
)
self._add_hidden_layer_summary(logit, "dnn_logit")
return logit
示例14: __init__
def __init__(self, value, decay,
truediv=True,
collections=None,
reduction_indices=None,
name=None):
self.value = value
self.reduction_indices = reduction_indices or [0]
eps = 1e-8
if truediv:
div = math_ops.truediv
else:
div = math_ops.div
if collections is None:
collections = [ops.GraphKeys.VARIABLES]
value_shape = value.get_shape().as_list()
shape = []
for dim in range(len(value_shape)):
if dim in self.reduction_indices:
shape.append(1)
else:
shape.append(value_shape[dim])
with variable_scope.variable_op_scope(
[value, decay], name, "MomentTracker") as scope:
mean_x_weight_var = variable_scope.get_variable("mean_x_weight", trainable=False, collections=collections,
initializer=init_ops.zeros_initializer(shape, dtype=value.dtype))
variance_x_weight_var = variable_scope.get_variable("variance_x_weight", trainable=False,
collections=collections, initializer=init_ops.zeros_initializer(shape, dtype=value.dtype))
weight_var = variable_scope.get_variable("weight", trainable=False, collections=collections,
initializer=init_ops.zeros_initializer([1], dtype=tf.float32))
self.tracked_mean = div(mean_x_weight_var, weight_var + eps)
self.tracked_variance = div(variance_x_weight_var, weight_var + eps)
self.batch_mean, self.batch_variance = tf.nn.moments(self.value, axes=self.reduction_indices,
shift=self.tracked_mean, keep_dims=True)
mean_numerator = assign_moving_average(mean_x_weight_var, self.batch_mean, decay)
variance_numerator = assign_moving_average(variance_x_weight_var, self.batch_variance, decay)
denominator = assign_moving_average(weight_var, 1.0, decay)
self.update_mean = div(mean_numerator, denominator + eps, name=scope.name)
self.update_variance = div(variance_numerator, denominator + eps, name=scope.name)
示例15: weighted_moving_average
def weighted_moving_average(
value, decay, weight, truediv=True, name="WeightedMovingAvg"):
"""Compute the weighted moving average of `value`.
Conceptually, the weighted moving average is:
moving_average(value * weight) / moving_average(weight),
where a moving average updates by the rule
new_value = decay * old_value + (1 - decay) * update
Args:
value: A tensor.
decay: A float Tensor or float value. The moving average decay.
weight: A tensor that keeps the current value of a weight.
Shape should be able to multiply `value`.
truediv: Boolean, if True, dividing by moving_average(weight) is floating
point division. If False, use division implied by dtypes.
name: Optional name of the returned operation.
Returns:
An Operation that updates the weighted moving average.
"""
# Unlike assign_moving_average, the weighted moving average doesn't modify
# user-visible variables. It is the ratio of two internal variables, which are
# moving averages of the updates. Thus, the signature of this function is
# quite different than assign_moving_average.
with variable_scope.variable_op_scope(
[value, weight, decay], name, name) as scope:
value_variable = variable_scope.get_variable(
"value",
initializer=array_ops.zeros_initializer(
value.get_shape(), dtype=value.dtype),
trainable=False
)
weight_variable = variable_scope.get_variable(
"weight",
initializer=array_ops.zeros_initializer(
weight.get_shape(), dtype=weight.dtype),
trainable=False
)
numerator = assign_moving_average(value_variable, value * weight, decay)
denominator = assign_moving_average(weight_variable, weight, decay)
if truediv:
return math_ops.truediv(numerator, denominator, name=scope.name)
else:
return math_ops.div(numerator, denominator, name=scope.name)