本文整理汇总了Python中tensorflow.get_variable_scope方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.get_variable_scope方法的具体用法?Python tensorflow.get_variable_scope怎么用?Python tensorflow.get_variable_scope使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.get_variable_scope方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testTrainEvalWithReuse
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable_scope [as 别名]
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 231, 231
eval_height, eval_width = 281, 281
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = overfeat.overfeat(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = overfeat.overfeat(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
示例2: testTrainEvalWithReuse
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable_scope [as 别名]
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_a(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_a(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
示例3: conv_tower_fn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable_scope [as 别名]
def conv_tower_fn(self, images, is_training=True, reuse=None):
"""Computes convolutional features using the InceptionV3 model.
Args:
images: A tensor of shape [batch_size, height, width, channels].
is_training: whether is training or not.
reuse: whether or not the network and its variables should be reused. To
be able to reuse 'scope' must be given.
Returns:
A tensor of shape [batch_size, OH, OW, N], where OWxOH is resolution of
output feature map and N is number of output features (depends on the
network architecture).
"""
mparams = self._mparams['conv_tower_fn']
logging.debug('Using final_endpoint=%s', mparams.final_endpoint)
with tf.variable_scope('conv_tower_fn/INCE'):
if reuse:
tf.get_variable_scope().reuse_variables()
with slim.arg_scope(inception.inception_v3_arg_scope()):
net, _ = inception.inception_v3_base(
images, final_endpoint=mparams.final_endpoint)
return net
示例4: testTrainEvalWithReuse
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable_scope [as 别名]
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
with self.test_session() as sess:
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v3(train_inputs, num_classes)
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v3(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
示例5: call
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable_scope [as 别名]
def call(self, inputs, **kwargs):
del kwargs
features = inputs
set_custom_getter_compose(self._custom_getter)
tf.get_variable_scope().set_initializer(
optimize.get_variable_initializer(self.hparams))
with self._eager_var_store.as_default():
self._fill_problem_hparams_features(features)
sharded_features = self._shard_features(features)
sharded_logits, losses = self.model_fn_sharded(sharded_features)
if isinstance(sharded_logits, dict):
concat_logits = {}
for k, v in six.iteritems(sharded_logits):
concat_logits[k] = tf.concat(v, 0)
return concat_logits, losses
else:
return tf.concat(sharded_logits, 0), losses
示例6: top
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable_scope [as 别名]
def top(self, body_output, _):
# TODO(lukaszkaiser): is this a universal enough way to get channels?
num_channels = self._model_hparams.problem.num_channels
with tf.variable_scope("rgb_softmax"):
body_output_shape = common_layers.shape_list(body_output)
reshape_shape = body_output_shape[:3]
reshape_shape.extend([num_channels, self.top_dimensionality])
res = tf.layers.dense(body_output, self.top_dimensionality * num_channels)
res = tf.reshape(res, reshape_shape)
if not tf.get_variable_scope().reuse:
res_argmax = tf.argmax(res, axis=-1)
tf.summary.image(
"result",
common_layers.tpu_safe_image_summary(res_argmax),
max_outputs=1)
return res
示例7: dense
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable_scope [as 别名]
def dense(x, size, name, weight_init=None, bias_init=0, weight_loss_dict=None, reuse=None):
with tf.variable_scope(name, reuse=reuse):
assert (len(tf.get_variable_scope().name.split('/')) == 2)
w = tf.get_variable("w", [x.get_shape()[1], size], initializer=weight_init)
b = tf.get_variable("b", [size], initializer=tf.constant_initializer(bias_init))
weight_decay_fc = 3e-4
if weight_loss_dict is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(w), weight_decay_fc, name='weight_decay_loss')
if weight_loss_dict is not None:
weight_loss_dict[w] = weight_decay_fc
weight_loss_dict[b] = 0.0
tf.add_to_collection(tf.get_variable_scope().name.split('/')[0] + '_' + 'losses', weight_decay)
return tf.nn.bias_add(tf.matmul(x, w), b)
示例8: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable_scope [as 别名]
def __init__(self, name, window_size, obs_stack, output_size, num_support):
self.window_size = window_size
self.obs_stack = obs_stack
self.output_size = output_size
self.num_support = num_support
with tf.variable_scope(name):
self.input = tf.placeholder(tf.float32, shape=[None, self.window_size, self.window_size, self.obs_stack])
self.conv1 = tf.layers.conv2d(inputs=self.input, filters=32, kernel_size=[8, 8], strides=[4, 4], padding='VALID', activation=tf.nn.relu)
self.conv2 = tf.layers.conv2d(inputs=self.conv1, filters=64, kernel_size=[4, 4], strides=[2, 2], padding='VALID', activation=tf.nn.relu)
self.conv3 = tf.layers.conv2d(inputs=self.conv2, filters=64, kernel_size=[3, 3], strides=[1, 1], padding='VALID', activation=tf.nn.relu)
self.reshape = tf.reshape(self.conv3, [-1, 7 * 7 * 64])
self.l1 = tf.layers.dense(inputs=self.reshape, units=512, activation=tf.nn.relu)
self.l2 = tf.layers.dense(inputs=self.l1, units=self.output_size * self.num_support, activation=None)
self.net = tf.reshape(self.l2, [-1, self.output_size, self.num_support])
self.scope = tf.get_variable_scope().name
示例9: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable_scope [as 别名]
def __init__(self, name, window_size, obs_stack):
self.window_size = window_size
self.obs_stack = obs_stack
with tf.variable_scope(name):
self.input = tf.placeholder(dtype=tf.float32, shape=[None, window_size, window_size, obs_stack])
self.conv1 = tf.layers.conv2d(inputs=self.input, filters=32, kernel_size=[8, 8], strides=[4, 4], padding='VALID', activation=tf.nn.relu)
self.conv2 = tf.layers.conv2d(inputs=self.conv1, filters=64, kernel_size=[4, 4], strides=[2, 2], padding='VALID', activation=tf.nn.relu)
self.conv3 = tf.layers.conv2d(inputs=self.conv2, filters=64, kernel_size=[3, 3], strides=[1, 1], padding='VALID', activation=tf.nn.relu)
self.reshape = tf.reshape(self.conv3, [-1, 7 * 7 * 64])
self.dense_3 = tf.layers.dense(inputs=self.reshape, units=512, activation=tf.nn.relu)
self.critic = tf.layers.dense(inputs=self.dense_3, units=1, activation=None)
self.scope = tf.get_variable_scope().name
示例10: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable_scope [as 别名]
def __init__(self, hparams=None):
EncoderBase.__init__(self, hparams)
use_bias = self._hparams.use_bias
with tf.variable_scope(self.variable_scope):
if self._hparams.initializer:
tf.get_variable_scope().set_initializer(
layers.get_initializer(self._hparams.initializer))
self.Q_dense = tf.layers.Dense(self._hparams.num_units,
use_bias=use_bias,
name='query')
self.K_dense = tf.layers.Dense(self._hparams.num_units,
use_bias=use_bias,
name='key')
self.V_dense = tf.layers.Dense(self._hparams.num_units,
use_bias=use_bias,
name='value')
self.O_dense = tf.layers.Dense(self._hparams.output_dim,
use_bias=use_bias,
name='output')
示例11: assert_rank
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable_scope [as 别名]
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
示例12: _init_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable_scope [as 别名]
def _init_graph(self):
# Collect inputs.
self.input_names = []
for param in inspect.signature(self._build_func).parameters.values():
if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
self.input_names.append(param.name)
self.num_inputs = len(self.input_names)
assert self.num_inputs >= 1
# Choose name and scope.
if self.name is None:
self.name = self._build_func_name
self.scope = tf.get_default_graph().unique_name(self.name.replace('/', '_'), mark_as_used=False)
# Build template graph.
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
assert tf.get_variable_scope().name == self.scope
with absolute_name_scope(self.scope): # ignore surrounding name_scope
with tf.control_dependencies(None): # ignore surrounding control_dependencies
self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
out_expr = self._build_func(*self.input_templates, is_template_graph=True, **self.static_kwargs)
# Collect outputs.
assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)
self.output_templates = [out_expr] if is_tf_expression(out_expr) else list(out_expr)
self.output_names = [t.name.split('/')[-1].split(':')[0] for t in self.output_templates]
self.num_outputs = len(self.output_templates)
assert self.num_outputs >= 1
# Populate remaining fields.
self.input_shapes = [shape_to_list(t.shape) for t in self.input_templates]
self.output_shapes = [shape_to_list(t.shape) for t in self.output_templates]
self.input_shape = self.input_shapes[0]
self.output_shape = self.output_shapes[0]
self.vars = OrderedDict([(self.get_var_localname(var), var) for var in tf.global_variables(self.scope + '/')])
self.trainables = OrderedDict([(self.get_var_localname(var), var) for var in tf.trainable_variables(self.scope + '/')])
# Run initializers for all variables defined by this network.
示例13: get_output_for
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable_scope [as 别名]
def get_output_for(self, *in_expr, return_as_list=False, **dynamic_kwargs):
assert len(in_expr) == self.num_inputs
all_kwargs = dict(self.static_kwargs)
all_kwargs.update(dynamic_kwargs)
with tf.variable_scope(self.scope, reuse=True):
assert tf.get_variable_scope().name == self.scope
named_inputs = [tf.identity(expr, name=name) for expr, name in zip(in_expr, self.input_names)]
out_expr = self._build_func(*named_inputs, **all_kwargs)
assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)
if return_as_list:
out_expr = [out_expr] if is_tf_expression(out_expr) else list(out_expr)
return out_expr
# Get the local name of a given variable, excluding any surrounding name scopes.
示例14: main
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable_scope [as 别名]
def main():
train_x = tf.placeholder(tf.float32)
train_label = tf.placeholder(tf.float32)
test_x = tf.placeholder(tf.float32)
test_label = tf.placeholder(tf.float32)
with tf.variable_scope("inference"):
train_y = inference(train_x)
tf.get_variable_scope().reuse_variables()
test_y = inference(test_x)
train_loss = tf.square(train_y - train_label)
test_loss = tf.square(test_y - test_label)
opt = tf.train.GradientDescentOptimizer(0.002)
train_op = opt.minimize(train_loss)
init = tf.global_variables_initializer()
train_data_x, train_data_label = get_data(1000)
test_data_x, test_data_label = get_data(1)
with tf.Session() as sess:
sess.run(init)
for i in range(1000):
sess.run(train_op, feed_dict={train_x: train_data_x[i],
train_label: train_data_label[i]})
if i % 10 == 0:
test_loss_value = sess.run(test_loss, feed_dict={test_x:test_data_x[0],
test_label:test_data_label[0]})
print("step %d eval loss is %.3f" % (i, test_loss_value))
示例15: decode
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_variable_scope [as 别名]
def decode(self,prev_state,prev_input,timestep):
with tf.variable_scope("loop"):
if timestep > 0:
tf.get_variable_scope().reuse_variables()
# Run the cell on a combination of the previous input and state
output, state = self.cell(prev_input,prev_state)
# Attention mechanism
masked_scores = self.attention(self.encoder_output, output)
# Multinomial distribution
prob = distr.Categorical(masked_scores)
# Sample from distribution
position = prob.sample()
position = tf.cast(position,tf.int32)
self.positions.append(position)
# Store log_prob for backprop
self.log_softmax.append(prob.log_prob(position))
# Update current city and mask
self.current_city = tf.one_hot(position, self.seq_length)
self.mask = self.mask + self.current_city
# Retrieve decoder's new input
new_decoder_input = tf.gather(self.h,position)[0]
return state, new_decoder_input