本文整理汇总了Python中tensorflow.python.ops.variable_scope.get_variable_scope方法的典型用法代码示例。如果您正苦于以下问题:Python variable_scope.get_variable_scope方法的具体用法?Python variable_scope.get_variable_scope怎么用?Python variable_scope.get_variable_scope使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.variable_scope
的用法示例。
在下文中一共展示了variable_scope.get_variable_scope方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable_scope [as 别名]
def call(self, inputs, forward=True):
vs = variable_scope.get_variable_scope()
vars_before = vs.global_variables()
if forward:
x1, x2 = inputs
out = self._forward(x1, x2)
else:
y1, y2 = inputs
out = self._backward(y1, y2)
# Add any created variables to the Layer's variable stores
new_vars = vs.global_variables()[len(vars_before):]
train_vars = vs.trainable_variables()
for new_var in new_vars:
if new_var in train_vars:
self._trainable_weights.append(new_var)
else:
self._non_trainable_weights.append(new_var)
return out
示例2: call
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable_scope [as 别名]
def call(self, inputs, state):
"""Run the cell on embedded inputs."""
with ops.device("/cpu:0"):
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
if isinstance(state, tuple):
data_type = state[0].dtype
else:
data_type = state.dtype
embedding = vs.get_variable(
"embedding", [self._embedding_classes, self._embedding_size],
initializer=initializer,
dtype=data_type)
embedded = embedding_ops.embedding_lookup(embedding,
array_ops.reshape(inputs, [-1]))
return self._cell(embedded, state)
示例3: _get_concat_variable
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable_scope [as 别名]
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(sharded_variable, 0, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
示例4: testAtrousFullyConvolutionalValues
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable_scope [as 别名]
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
with ops.Graph().as_default():
with self.test_session() as sess:
random_seed.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(
inputs, None, global_pool=False, output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
variable_scope.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(inputs, None, global_pool=False)
sess.run(variables.global_variables_initializer())
self.assertAllClose(
output.eval(), expected.eval(), atol=1e-4, rtol=1e-4)
示例5: testTrainEvalWithReuse
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable_scope [as 别名]
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 300, 400
num_classes = 1000
with self.test_session():
train_inputs = random_ops.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
variable_scope.get_variable_scope().reuse_variables()
eval_inputs = random_ops.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(
eval_inputs, is_training=False, spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 4, 7, num_classes])
logits = math_ops.reduce_mean(logits, [1, 2])
predictions = math_ops.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
示例6: testTrainEvalWithReuse
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable_scope [as 别名]
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = random_ops.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_16(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
variable_scope.get_variable_scope().reuse_variables()
eval_inputs = random_ops.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_16(
eval_inputs, is_training=False, spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = math_ops.reduce_mean(logits, [1, 2])
predictions = math_ops.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
示例7: _get_concat_variable
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable_scope [as 别名]
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(0, sharded_variable, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
示例8: __call__
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable_scope [as 别名]
def __call__(self, inputs, state, scope=None):
"""Run the cell on embedded inputs."""
with vs.variable_scope(scope or type(self).__name__): # "EmbeddingWrapper"
with ops.device("/cpu:0"):
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
if type(state) is tuple:
data_type = state[0].dtype
else:
data_type = state.dtype
embedding = vs.get_variable(
"embedding", [self._embedding_classes, self._embedding_size],
initializer=initializer,
dtype=data_type)
embedded = embedding_ops.embedding_lookup(
embedding, array_ops.reshape(inputs, [-1]))
return self._cell(embedded, state)
示例9: testTrainEvalWithReuse
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable_scope [as 别名]
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 231, 231
eval_height, eval_width = 281, 281
num_classes = 1000
with self.cached_session():
train_inputs = random_ops.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = overfeat.overfeat(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
variable_scope.get_variable_scope().reuse_variables()
eval_inputs = random_ops.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = overfeat.overfeat(
eval_inputs, is_training=False, spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = math_ops.reduce_mean(logits, [1, 2])
predictions = math_ops.argmax(logits, 1)
self.assertEqual(predictions.get_shape().as_list(), [eval_batch_size])
示例10: testTrainEvalWithReuse
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable_scope [as 别名]
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.cached_session():
train_inputs = random_ops.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_a(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
variable_scope.get_variable_scope().reuse_variables()
eval_inputs = random_ops.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_a(
eval_inputs, is_training=False, spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = math_ops.reduce_mean(logits, [1, 2])
predictions = math_ops.argmax(logits, 1)
self.assertEqual(predictions.get_shape().as_list(), [eval_batch_size])
示例11: __call__
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable_scope [as 别名]
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size x input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size x self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size x s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size x self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
if scope is not None:
with vs.variable_scope(scope,
custom_getter=self._rnn_get_variable) as scope:
return super(RNNCell, self).__call__(inputs, state, scope=scope)
else:
with vs.variable_scope(vs.get_variable_scope(),
custom_getter=self._rnn_get_variable):
return super(RNNCell, self).__call__(inputs, state)
示例12: _create_slot_var
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable_scope [as 别名]
def _create_slot_var(primary, val, scope, validate_shape, shape, dtype):
"""Helper function for creating a slot variable."""
# TODO(lukaszkaiser): Consider allowing partitioners to be set in the current
# scope.
current_partitioner = variable_scope.get_variable_scope().partitioner
variable_scope.get_variable_scope().set_partitioner(None)
slot = variable_scope.get_variable(
scope, initializer=val, trainable=False,
use_resource=_is_resource(primary),
shape=shape, dtype=dtype,
validate_shape=validate_shape)
variable_scope.get_variable_scope().set_partitioner(current_partitioner)
# pylint: disable=protected-access
if isinstance(primary, variables.Variable) and primary._save_slice_info:
# Primary is a partitioned variable, so we need to also indicate that
# the slot is a partitioned variable. Slots have the same partitioning
# as their primaries.
# For examples when using AdamOptimizer in linear model, slot.name
# here can be "linear//weights/Adam:0", while primary.op.name is
# "linear//weight". We want to get 'Adam' as real_slot_name, so we
# remove "'linear//weight' + '/'" and ':0'.
real_slot_name = slot.name[len(primary.op.name + "/"):-2]
slice_info = primary._save_slice_info
slot._set_save_slice_info(variables.Variable.SaveSliceInfo(
slice_info.full_name + "/" + real_slot_name,
slice_info.full_shape[:],
slice_info.var_offset[:],
slice_info.var_shape[:]))
# pylint: enable=protected-access
return slot
示例13: __init__
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable_scope [as 别名]
def __init__(self, *args, **kwargs):
super(_FuncGraph, self).__init__(*args, **kwargs)
self._building_function = True
self._outer_graph = ops.get_default_graph()
self._vscope = vs.get_variable_scope()
self._old_custom_getter = self._vscope.custom_getter
self._captured = {}
self.extra_inputs = []
self.extra_args = []
self.extra_vars = []
示例14: ndlstm_base_unrolled
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable_scope [as 别名]
def ndlstm_base_unrolled(inputs, noutput, scope=None, reverse=False):
"""Run an LSTM, either forward or backward.
This is a 1D LSTM implementation using unrolling and the TensorFlow
LSTM op.
Args:
inputs: input sequence (length, batch_size, ninput)
noutput: depth of output
scope: optional scope name
reverse: run LSTM in reverse
Returns:
Output sequence (length, batch_size, noutput)
"""
with variable_scope.variable_scope(scope, "SeqLstmUnrolled", [inputs]):
length, batch_size, _ = _shape(inputs)
lstm_cell = rnn_cell.BasicLSTMCell(noutput, state_is_tuple=False)
state = array_ops.zeros([batch_size, lstm_cell.state_size])
output_u = []
inputs_u = array_ops.unstack(inputs)
if reverse:
inputs_u = list(reversed(inputs_u))
for i in xrange(length):
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
output, state = lstm_cell(inputs_u[i], state)
output_u += [output]
if reverse:
output_u = list(reversed(output_u))
outputs = array_ops.stack(output_u)
return outputs
示例15: sequence_to_final
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable_scope [as 别名]
def sequence_to_final(inputs, noutput, scope=None, name=None, reverse=False):
"""Run an LSTM across all steps and returns only the final state.
Args:
inputs: (length, batch_size, depth) tensor
noutput: size of output vector
scope: optional scope name
name: optional name for output tensor
reverse: run in reverse
Returns:
Batch of size (batch_size, noutput).
"""
with variable_scope.variable_scope(scope, "SequenceToFinal", [inputs]):
length, batch_size, _ = _shape(inputs)
lstm = rnn_cell.BasicLSTMCell(noutput, state_is_tuple=False)
state = array_ops.zeros([batch_size, lstm.state_size])
inputs_u = array_ops.unstack(inputs)
if reverse:
inputs_u = list(reversed(inputs_u))
for i in xrange(length):
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
output, state = lstm(inputs_u[i], state)
outputs = array_ops.reshape(output, [batch_size, noutput], name=name)
return outputs