本文整理汇总了Python中tensorflow.python.ops.variable_scope.variable_scope函数的典型用法代码示例。如果您正苦于以下问题:Python variable_scope函数的具体用法?Python variable_scope怎么用?Python variable_scope使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了variable_scope函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __call__
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or type(self).__name__):
if self._dropMaskInput.get_shape()[1:] != inputs.get_shape()[1:]:
print("error: "+str(self._dropMaskInput.get_shape()[1:])+" != "+str(inputs.get_shape()[1:]))
assert(False)
if self._dropMaskState.get_shape()[1:] != state.get_shape()[1:]:
print("error: "+str(self._dropMaskState.get_shape()[1:])+" != "+str(state.get_shape()[1:]))
assert(False)
dropin = tf.mul(self._dropMaskInput, inputs)
dropst = tf.mul(self._dropMaskState, state)
with vs.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
concat = rnn_cell._linear([dropin, dropst], 2 * self._num_units, True, 1.0)
r, u = tf.split(1, 2, concat)
r, u = tf.sigmoid(r), tf.sigmoid(u)
with vs.variable_scope("Candidate"):
htilda = self._activation(rnn_cell._linear([dropin, r * dropst], self._num_units, True))
new_h = u * dropst + (1 - u) * htilda
return new_h, new_h
示例2: __call__
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell with attention (LSTMA)."""
with vs.variable_scope(scope or type(self).__name__):
if self._state_is_tuple:
state, attns, attn_states = state
else:
states = state
state = array_ops.slice(states, [0, 0], [-1, self._cell.state_size])
attns = array_ops.slice(
states, [0, self._cell.state_size], [-1, self._attn_size])
attn_states = array_ops.slice(
states, [0, self._cell.state_size + self._attn_size],
[-1, self._attn_size * self._attn_length])
attn_states = array_ops.reshape(attn_states,
[-1, self._attn_length, self._attn_size])
input_size = self._input_size
if input_size is None:
input_size = inputs.get_shape().as_list()[1]
inputs = _linear([inputs, attns], input_size, True)
lstm_output, new_state = self._cell(inputs, state)
if self._state_is_tuple:
new_state_cat = array_ops.concat(1, _unpacked_state(new_state))
else:
new_state_cat = new_state
new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
with vs.variable_scope("AttnOutputProjection"):
output = _linear([lstm_output, new_attns], self._attn_size, True)
new_attn_states = array_ops.concat(1, [new_attn_states,
array_ops.expand_dims(output, 1)])
new_attn_states = array_ops.reshape(
new_attn_states, [-1, self._attn_length * self._attn_size])
new_state = (new_state, new_attns, new_attn_states)
if not self._state_is_tuple:
new_state = array_ops.concat(1, list(new_state))
return output, new_state
示例3: testInitFromCheckpoint
def testInitFromCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("my1", [1, 10])
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable("my2", [10, 10])
with variable_scope.variable_scope("other_useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
my3 = variable_scope.get_variable("my3", [100, 100])
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var1": "some_scope/my1",
"useful_scope/": "some_scope/some_other_scope/other_useful_scope/",
})
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var2": "some_scope/some_other_scope/my2",
"var3": my3,
})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
# Check that tensors are not explicitly in the graph.
self.assertLess(len(str(session.graph.as_graph_def())), 29000)
示例4: call
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(
1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
value = math_ops.sigmoid(self._gate_linear([inputs, state]))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
if self._candidate_linear is None:
with vs.variable_scope("candidate"):
self._candidate_linear = _Linear(
[inputs, r_state],
self._num_units,
True,
bias_initializer=self._bias_initializer,
kernel_initializer=self._kernel_initializer)
c = self._activation(self._candidate_linear([inputs, r_state]))
u = (1.0 - att_score) * u
new_h = u * state + (1 - u) * c
return new_h, new_h
示例5: testReuse
def testReuse(self):
def f(x):
return core_layers.dense(x, self.CHANNELS // 2)
def g(x):
return core_layers.dense(x, self.CHANNELS // 2)
x = random_ops.random_uniform(
[self.BATCH_SIZE, self.CHANNELS], dtype=dtypes.float32)
x1, x2 = array_ops.split(x, 2, axis=-1)
with variable_scope.variable_scope("test"):
y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)
num_vars_before = len(variables.global_variables())
with variable_scope.variable_scope("test", reuse=True):
y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)
num_vars_after = len(variables.global_variables())
self.assertEqual(num_vars_before, num_vars_after)
loss = math_ops.reduce_mean(y1 + y2)
_ = gradients_impl.gradients(loss,
[x] + variables.trainable_variables())
with variable_scope.variable_scope("test", reuse=True):
y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)
num_vars_after = len(variables.global_variables())
self.assertEqual(num_vars_before, num_vars_after)
示例6: __call__
def __call__(self, *args, **kwargs):
if self._variable_scope:
if self._variables_created:
# This is not the first visit to __call__, so variables have already
# been created, and we want to reuse them.
with variable_scope.variable_scope(self._variable_scope,
reuse=variable_scope.AUTO_REUSE):
with self._eager_variable_store.as_default():
return self._call_func(args, kwargs, check_for_new_variables=True)
else:
# This is the first visit to __call__, but the scope has already been
# created in the constructor. Set _variables_created after the inner
# function is successfully called so that subsequent calls take the if
# branch above.
with variable_scope.variable_scope(self._variable_scope,
reuse=variable_scope.AUTO_REUSE):
with self._eager_variable_store.as_default():
result = self._call_func(args, kwargs,
check_for_new_variables=False)
self._variables_created = True
return result
else:
# The scope was not created at construction time, so create it here.
# Subsequent calls should reuse variables.
with variable_scope.variable_scope(
self._unique_name, self._name,
custom_getter=self._custom_getter) as vs:
self._variable_scope = vs
with self._eager_variable_store.as_default():
result = self._call_func(args, kwargs,
check_for_new_variables=False)
self._variables_created = True
return result
示例7: separable_lstm
def separable_lstm(images, num_filters_out,
kernel_size=None, nhidden=None, scope=None):
"""Run bidirectional LSTMs first horizontally then vertically.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output layer depth
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the pooling. Can be an int if both values are the same. Set to None for
not using blocks
nhidden: hidden layer depth
scope: optional scope name
Returns:
(num_images, height/kernel_height, width/kernel_width,
num_filters_out) tensor
"""
with variable_scope.variable_scope(scope, "SeparableLstm", [images]):
if nhidden is None:
nhidden = num_filters_out
if kernel_size is not None:
images = get_blocks(images, kernel_size)
hidden = horizontal_lstm(images, nhidden)
with variable_scope.variable_scope("vertical"):
transposed = array_ops.transpose(hidden, [0, 2, 1, 3])
output_transposed = horizontal_lstm(transposed, num_filters_out)
output = array_ops.transpose(output_transposed, [0, 2, 1, 3])
return output
示例8: __call__
def __call__(self, *args, **kwargs):
# In both branches below, the template store is installed as default after
# the variable scope is opened in order to ensure that templates nested at
# the same level correctly uniquify lower variable scope names.
if self._variable_scope:
# Create a cache for the variable scope context manager the first time
# around so that we don't have to keep recreating it.
if not self._variable_scope_context_manager:
self._variable_scope_context_manager = variable_scope.variable_scope(
self._variable_scope, reuse=variable_scope.AUTO_REUSE)
with self._variable_scope_context_manager:
with self._template_store.as_default():
result = self._call_func(args, kwargs)
return result
else:
# The scope was not created at construction time, so create it here.
# Subsequent calls should reuse variables.
with variable_scope.variable_scope(
self._unique_name, self._name,
custom_getter=self._custom_getter) as vs:
self._variable_scope = vs
# Because the scope was not created at construction time, the template
# store's variable scope name is unset; set it here.
self._template_store.set_variable_scope_name(vs.name)
with self._template_store.as_default():
result = self._call_func(args, kwargs)
return result
示例9: sequence_softmax
def sequence_softmax(inputs, noutput, scope=None, name=None, linear_name=None):
"""Run a softmax layer over all the time steps of an input sequence.
Args:
inputs: (length, batch_size, depth) tensor
noutput: output depth
scope: optional scope name
name: optional name for output tensor
linear_name: name for linear (pre-softmax) output
Returns:
A tensor of size (length, batch_size, noutput).
"""
length, _, ninputs = _shape(inputs)
inputs_u = array_ops.unstack(inputs)
output_u = []
with variable_scope.variable_scope(scope, "SequenceSoftmax", [inputs]):
initial_w = random_ops.truncated_normal([0 + ninputs, noutput], stddev=0.1)
initial_b = constant_op.constant(0.1, shape=[noutput])
w = variables.model_variable("weights", initializer=initial_w)
b = variables.model_variable("biases", initializer=initial_b)
for i in xrange(length):
with variable_scope.variable_scope(scope, "SequenceSoftmaxStep",
[inputs_u[i]]):
# TODO(tmb) consider using slim.fully_connected(...,
# activation_fn=tf.nn.softmax)
linear = nn_ops.xw_plus_b(inputs_u[i], w, b, name=linear_name)
output = nn_ops.softmax(linear)
output_u += [output]
outputs = array_ops.stack(output_u, name=name)
return outputs
示例10: dnn_logits_fn
def dnn_logits_fn():
"""Builds the logits from the input layer."""
previous_layer = input_layer
for layer_id, num_hidden_units in enumerate(dnn_hidden_units):
with variable_scope.variable_scope(
"hiddenlayer_%d" % layer_id,
values=(previous_layer,)) as hidden_layer_scope:
net = layers.fully_connected(
previous_layer,
num_hidden_units,
activation_fn=dnn_activation_fn,
variables_collections=[dnn_parent_scope],
scope=hidden_layer_scope)
if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = layers.dropout(net, keep_prob=(1.0 - dnn_dropout))
_add_hidden_layer_summary(net, hidden_layer_scope.name)
previous_layer = net
with variable_scope.variable_scope(
"logits", values=(previous_layer,)) as logits_scope:
dnn_logits = layers.fully_connected(
previous_layer,
head.logits_dimension,
activation_fn=None,
variables_collections=[dnn_parent_scope],
scope=logits_scope)
_add_hidden_layer_summary(dnn_logits, logits_scope.name)
return dnn_logits
示例11: call
def call(self, inputs, state):
"""
"""
(c_prev, m_prev) = state
self._batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
scope = vs.get_variable_scope()
with vs.variable_scope(scope, initializer=self._initializer):
x = array_ops.concat([inputs, m_prev], axis=1)
with vs.variable_scope("first_gemm"):
if self._linear1 is None:
# no bias for bottleneck
self._linear1 = _Linear(x, self._fact_size, False)
R_fact = self._linear1(x)
with vs.variable_scope("second_gemm"):
if self._linear2 is None:
self._linear2 = _Linear(R_fact, 4*self._num_units, True)
R = self._linear2(R_fact)
i, j, f, o = array_ops.split(R, 4, 1)
c = (math_ops.sigmoid(f + self._forget_bias) * c_prev +
math_ops.sigmoid(i) * math_ops.tanh(j))
m = math_ops.sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection"):
if self._linear3 is None:
self._linear3 = _Linear(m, self._num_proj, False)
m = self._linear3(m)
new_state = rnn_cell_impl.LSTMStateTuple(c, m)
return m, new_state
示例12: __call__
def __call__(self, inputs, state, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
with vs.variable_scope(scope or "multi_rnn_cell"):
cur_state_pos = 0
cur_inp = inputs
new_states = []
outputs = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("cell_%d" % i):
if self._state_is_tuple:
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s"
% (len(self.state_size), state))
cur_state = state[i]
else:
cur_state = array_ops.slice(
state, [0, cur_state_pos], [-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
outputs.append(cur_inp)
new_states.append(new_state)
new_states = (tuple(new_states) if self._state_is_tuple else
array_ops.concat_v2(new_states, 1))
return tuple(outputs), new_states
示例13: testIndyGRUCell
def testIndyGRUCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.IndyGRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.185265, 0.17704]])
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
# Test IndyGRUCell with input_size != num_units.
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 2])
g, _ = contrib_rnn_cell.IndyGRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])
})
# Smoke test
self.assertAllClose(res[0], [[0.155127, 0.157328]])
示例14: dnn
def dnn(tensor_in, hidden_units, activation=nn.relu, dropout=None):
"""Creates fully connected deep neural network subgraph.
Args:
tensor_in: tensor or placeholder for input features.
hidden_units: list of counts of hidden units in each layer.
activation: activation function between layers. Can be None.
dropout: if not None, will add a dropout layer with given probability.
Returns:
A tensor which would be a deep neural network.
"""
with vs.variable_scope('dnn'):
for i, n_units in enumerate(hidden_units):
with vs.variable_scope('layer%d' % i):
# Weight initializer was set to None to replicate the behavior of
# rnn_cell.linear. Using fully_connected's default initializer gets
# slightly worse quality results on unit tests.
tensor_in = layers.legacy_fully_connected(
tensor_in,
n_units,
weight_init=None,
weight_collections=['dnn_weights'],
bias_collections=['dnn_biases'])
if activation is not None:
tensor_in = activation(tensor_in)
if dropout is not None:
is_training = array_ops_.squeeze(ops.get_collection('IS_TRAINING'))
tensor_in = control_flow_ops.cond(
is_training,
lambda: dropout_ops.dropout(tensor_in, prob=(1.0 - dropout)),
lambda: tensor_in)
return tensor_in
示例15: _serving_ops
def _serving_ops(self, features):
"""Add ops for serving to the graph."""
with variable_scope.variable_scope("model", use_resource=True):
prediction_outputs = self.model.predict(features=features)
with variable_scope.variable_scope("model", reuse=True):
filtering_outputs = self.create_loss(
features, estimator_lib.ModeKeys.EVAL)
with variable_scope.variable_scope("model", reuse=True):
no_state_features = {
k: v for k, v in features.items()
if not k.startswith(feature_keys.State.STATE_PREFIX)}
# Ignore any state management when cold-starting. The model's default
# start state is replicated across the batch.
cold_filtering_outputs = self.model.define_loss(
features=no_state_features, mode=estimator_lib.ModeKeys.EVAL)
return estimator_lib.EstimatorSpec(
mode=estimator_lib.ModeKeys.PREDICT,
export_outputs={
feature_keys.SavedModelLabels.PREDICT:
export_lib.PredictOutput(prediction_outputs),
feature_keys.SavedModelLabels.FILTER:
export_lib.PredictOutput(
state_to_dictionary(filtering_outputs.end_state)),
feature_keys.SavedModelLabels.COLD_START_FILTER:
_NoStatePredictOutput(
state_to_dictionary(cold_filtering_outputs.end_state))
},
# Likely unused, but it is necessary to return `predictions` to satisfy
# the Estimator's error checking.
predictions={})