本文整理汇总了Python中tensorflow.python.ops.variable_scope.get_variable函数的典型用法代码示例。如果您正苦于以下问题:Python get_variable函数的具体用法?Python get_variable怎么用?Python get_variable使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_variable函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: register_option2quants
def register_option2quants(self, damping):
self.register_cov_dt1()
if damping not in self._option2quants_by_damping:
# It's questionable as to whether we should initialize with stuff like
# this at all. Ideally these values should never be used until they are
# updated at least once.
damping_string = scalar_or_tensor_to_string(damping)
with variable_scope.variable_scope(self._var_scope):
Pmat = variable_scope.get_variable( # pylint: disable=invalid-name
"Lmat_damp{}".format(damping_string),
initializer=inverse_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype)
Kmat = variable_scope.get_variable( # pylint: disable=invalid-name
"Kmat_damp{}".format(damping_string),
initializer=inverse_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype)
mu = variable_scope.get_variable(
"mu_damp{}".format(damping_string),
initializer=init_ops.ones_initializer,
shape=self._vec_shape,
trainable=False,
dtype=self._dtype)
self._option2quants_by_damping[damping] = (Pmat, Kmat, mu)
示例2: __call__
def __call__(self, x, states_prev, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or self._names["scope"]):
x_shape = x.get_shape().with_rank(2)
if not x_shape[1]:
raise ValueError("Expecting x_shape[1] to be sets: %s" % str(x_shape))
if len(states_prev) != 2:
raise ValueError("Expecting states_prev to be a tuple with length 2.")
input_size = x_shape[1]
w = vs.get_variable(self._names["W"], [input_size + self._num_units,
self._num_units * 4])
b = vs.get_variable(
self._names["b"], [w.get_shape().with_rank(2)[1]],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
wci = vs.get_variable(self._names["wci"], [self._num_units])
wco = vs.get_variable(self._names["wco"], [self._num_units])
wcf = vs.get_variable(self._names["wcf"], [self._num_units])
else:
wci = wco = wcf = array_ops.zeros([self._num_units])
(cs_prev, h_prev) = states_prev
(_, cs, _, _, _, _, h) = _lstm_block_cell(
x,
cs_prev,
h_prev,
w,
b,
wci=wci,
wco=wco,
wcf=wcf,
forget_bias=self._forget_bias,
use_peephole=self._use_peephole)
return (h, (cs, h))
示例3: _auc_hist_accumulate
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections):
"""Accumulate histograms in new variables."""
with variable_scope.variable_op_scope(
[hist_true, hist_false], None, 'hist_accumulate'):
# Holds running total histogram of scores for records labeled True.
hist_true_acc = variable_scope.get_variable(
'hist_true_acc',
initializer=array_ops.zeros_initializer(
[nbins],
dtype=hist_true.dtype),
collections=collections,
trainable=False)
# Holds running total histogram of scores for records labeled False.
hist_false_acc = variable_scope.get_variable(
'hist_false_acc',
initializer=array_ops.zeros_initializer(
[nbins],
dtype=hist_false.dtype),
collections=collections,
trainable=False)
update_op = control_flow_ops.group(
hist_true_acc.assign_add(hist_true),
hist_false_acc.assign_add(hist_false),
name='update_op')
return hist_true_acc, hist_false_acc, update_op
示例4: build
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape.ndims is None:
raise ValueError('Inputs to `Dense` should have known rank.')
if len(input_shape) < 2:
raise ValueError('Inputs to `Dense` should have rank >= 2.')
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
# Note that we set `trainable=True` because this is a trainable
# weight of the layer. If the layer is not trainable
# (self.trainable = False), the variable will not be added to
# tf.trainable_variables(), and self.trainable_weights will be empty.
self.kernel = vs.get_variable('kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = vs.get_variable('bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
示例5: testErrorConditions
def testErrorConditions(self):
self.assertRaises(ValueError, ws_util._WarmStartSettings, None)
x = variable_scope.get_variable(
"x",
shape=[4, 1],
initializer=ones(),
partitioner=lambda shape, dtype: [2, 1])
# List of PartitionedVariable is invalid type when warmstarting with vocab.
self.assertRaises(TypeError, ws_util._warmstart_var_with_vocab, [x], "/tmp",
5, "/tmp", "/tmp")
# Keys of type other than FeatureColumn.
self.assertRaises(TypeError, ws_util._warmstart,
{"StringType": x}, ws_util._WarmStartSettings("/tmp"))
# Unused variable names raises ValueError.
with ops.Graph().as_default():
with self.test_session() as sess:
x = variable_scope.get_variable(
"x",
shape=[4, 1],
initializer=ones(),
partitioner=lambda shape, dtype: [2, 1])
self._write_checkpoint(sess)
self.assertRaises(ValueError, ws_util._warmstart,
ws_util._WarmStartSettings(
self.get_temp_dir(),
var_name_to_vocab_info={
"y": ws_util._VocabInfo("", 1, 0, "")
}))
self.assertRaises(ValueError, ws_util._warmstart,
ws_util._WarmStartSettings(
self.get_temp_dir(),
var_name_to_prev_var_name={"y": "y2"}))
示例6: testOptimizerInit
def testOptimizerInit(self):
with ops.Graph().as_default():
layer_collection = lc.LayerCollection()
inputs = array_ops.ones((2, 1)) * 2
weights_val = np.ones((1, 1), dtype=np.float32) * 3.
weights = variable_scope.get_variable(
'w', initializer=array_ops.constant(weights_val))
bias = variable_scope.get_variable(
'b', initializer=init_ops.zeros_initializer(), shape=(1, 1))
output = math_ops.matmul(inputs, weights) + bias
layer_collection.register_fully_connected((weights, bias), inputs, output)
logits = math_ops.tanh(output)
targets = array_ops.constant([[0.], [1.]])
output = math_ops.reduce_mean(
nn.softmax_cross_entropy_with_logits(logits=logits, labels=targets))
layer_collection.register_categorical_predictive_distribution(logits)
optimizer.KfacOptimizer(
0.1,
0.2,
0.3,
layer_collection,
momentum=0.5,
momentum_type='regular')
示例7: build
def build(self, input_shape):
if len(input_shape) != self.rank + 2:
raise ValueError('Inputs should have rank ' +
str(self.rank + 2) +
'Received input shape:', str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = vs.get_variable('kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = vs.get_variable('bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
示例8: testWarmStartMoreSettingsNoPartitioning
def testWarmStartMoreSettingsNoPartitioning(self):
# Create old and new vocabs for sparse column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# Create feature columns.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
all_linear_cols = [sc_hash, sc_keys, sc_vocab]
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
sc_keys_weights = variable_scope.get_variable(
"some_other_name", shape=[4, 1], initializer=rand())
variable_scope.get_variable(
"linear_model/sc_vocab/weights",
initializer=[[0.5], [1.], [2.], [3.]])
self._write_checkpoint(sess)
prev_keys_val = sess.run(sc_keys_weights)
# New graph, new session with warmstarting.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols,
partitioner=None)
vocab_info = ws_util._VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path
)
ws_settings = ws_util._WarmStartSettings(
self.get_temp_dir(),
vars_to_warmstart=".*(sc_keys|sc_vocab).*",
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[sc_vocab]): vocab_info
},
var_name_to_prev_var_name={
ws_util._infer_var_name(cols_to_vars[sc_keys]):
"some_other_name"
})
ws_util._warmstart(ws_settings)
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warmstarted. Var corresponding to
# sc_hash should not be warm-started. Var corresponding to sc_vocab
# should be correctly warmstarted after vocab remapping.
self._assert_cols_to_vars(cols_to_vars, {
sc_keys: [prev_keys_val],
sc_hash: [np.zeros([15, 1])],
sc_vocab: [np.array([[3.], [2.], [1.], [0.5], [0.], [0.]])]
}, sess)
示例9: batch_normalize
def batch_normalize(tensor_in, epsilon=1e-5, convnet=False, decay=0.9, scale_after_normalization=True):
"""Batch Normalization
Args:
tensor_in: input Tensor, 4D shape: [batch, in_height, in_width, in_depth].
epsilon : A float number to avoid being divided by 0.
decay: decay rate for exponential moving average.
convnet: Whether this is for convolutional net use. If this is True,
moments will sum across axis [0, 1, 2]. Otherwise, only [0].
scale_after_normalization: Whether to scale after normalization.
"""
shape = tensor_in.get_shape().as_list()
with vs.variable_scope("batch_norm"):
gamma = vs.get_variable("gamma", [shape[-1]], initializer=init_ops.random_normal_initializer(1.0, 0.02))
beta = vs.get_variable("beta", [shape[-1]], initializer=init_ops.constant_initializer(0.0))
ema = moving_averages.ExponentialMovingAverage(decay=decay)
if convnet:
assign_mean, assign_var = nn.moments(tensor_in, [0, 1, 2])
else:
assign_mean, assign_var = nn.moments(tensor_in, [0])
ema_assign_op = ema.apply([assign_mean, assign_var])
ema_mean, ema_var = ema.average(assign_mean), ema.average(assign_var)
def update_mean_var():
"""Internal function that updates mean and variance during training"""
with ops.control_dependencies([ema_assign_op]):
return array_ops_.identity(assign_mean), array_ops_.identity(assign_var)
is_training = array_ops_.squeeze(ops.get_collection("IS_TRAINING"))
mean, variance = control_flow_ops.cond(is_training, update_mean_var, lambda: (ema_mean, ema_var))
return nn.batch_norm_with_global_normalization(
tensor_in, mean, variance, beta, gamma, epsilon, scale_after_normalization=scale_after_normalization
)
示例10: testRestoreOnAssign
def testRestoreOnAssign(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(save_graph):
first = checkpointable.Checkpointable()
first.var1 = variable_scope.get_variable(
name="outside_var", initializer=0.)
first.var2 = variable_scope.get_variable(
name="blah", initializer=0.)
self.evaluate(first.var1.assign(4.))
self.evaluate(first.var2.assign(8.))
save_path = checkpointable_utils.CheckpointableSaver(first).save(
checkpoint_prefix)
restore_graph = ops.Graph()
with restore_graph.as_default(), self.test_session(restore_graph):
second = checkpointable.Checkpointable()
second.var2 = variable_scope.get_variable(
name="blah", initializer=0.)
status = checkpointable_utils.CheckpointableSaver(
second).restore(save_path)
recreated_var1 = variable_scope.get_variable(
name="outside_var", initializer=0.)
status.run_restore_ops()
self.assertEqual(8., self.evaluate(second.var2))
self.evaluate(recreated_var1.assign(-2.))
self.assertEqual(-2., self.evaluate(recreated_var1))
second.var1 = recreated_var1
status.run_restore_ops()
self.assertEqual(4., self.evaluate(recreated_var1))
示例11: _project_input
def _project_input(self, inputs, c_prev, m_prev, with_c):
"""Fills in c_prev and m_prev with projected input, for input dimensions
"""
conf = self._config
if (inputs is not None and inputs.get_shape().with_rank(2)[1].value > 0
and len(conf.inputs) > 0):
if isinstance(inputs, tuple):
if len(conf.inputs) != len(inputs):
raise ValueError("Expect inputs as a tuple of {} "
"tensors".format(len(conf.inputs)))
input_splits = inputs
else:
input_splits = array_ops.split(
value=inputs, num_or_size_splits=len(conf.inputs), axis=1)
input_sz = input_splits[0].get_shape().with_rank(2)[1].value
for i, j in enumerate(conf.inputs):
input_project_m = vs.get_variable(
'project_m_{}'.format(j), [input_sz, conf.num_units],
dtype=inputs.dtype)
m_prev[j] = math_ops.matmul(input_splits[i], input_project_m)
if with_c:
input_project_c = vs.get_variable(
'project_c_{}'.format(j), [input_sz, conf.num_units],
dtype=inputs.dtype)
c_prev[j] = math_ops.matmul(input_splits[i], input_project_c)
示例12: testPartitionConcatenatesAlongCorrectAxis
def testPartitionConcatenatesAlongCorrectAxis(self):
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
with variable_scope.variable_scope("root"):
v0 = variable_scope.get_variable(
"n0", shape=(2, 2, 2), partitioner=_part_axis_0)
v1 = variable_scope.get_variable(
"n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
n0_0 = ops.get_default_graph().get_tensor_by_name("root/n0/part_0:0")
n0_1 = ops.get_default_graph().get_tensor_by_name("root/n0/part_1:0")
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
n1_0 = ops.get_default_graph().get_tensor_by_name("root/n1/part_0:0")
n1_1 = ops.get_default_graph().get_tensor_by_name("root/n1/part_1:0")
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
示例13: weighted_moving_average
def weighted_moving_average(value,
decay,
weight,
truediv=True,
collections=None,
name=None):
"""Compute the weighted moving average of `value`.
Conceptually, the weighted moving average is:
`moving_average(value * weight) / moving_average(weight)`,
where a moving average updates by the rule
`new_value = decay * old_value + (1 - decay) * update`
Internally, this Op keeps moving average variables of both `value * weight`
and `weight`.
Args:
value: A numeric `Tensor`.
decay: A float `Tensor` or float value. The moving average decay.
weight: `Tensor` that keeps the current value of a weight.
Shape should be able to multiply `value`.
truediv: Boolean, if `True`, dividing by `moving_average(weight)` is
floating point division. If `False`, use division implied by dtypes.
collections: List of graph collections keys to add the internal variables
`value * weight` and `weight` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
name: Optional name of the returned operation.
Defaults to "WeightedMovingAvg".
Returns:
An Operation that updates and returns the weighted moving average.
"""
# Unlike assign_moving_average, the weighted moving average doesn't modify
# user-visible variables. It is the ratio of two internal variables, which are
# moving averages of the updates. Thus, the signature of this function is
# quite different than assign_moving_average.
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
with variable_scope.variable_scope(name, "WeightedMovingAvg",
[value, weight, decay]) as scope:
value_x_weight_var = variable_scope.get_variable(
"value_x_weight",
initializer=init_ops.zeros_initializer(value.get_shape(),
dtype=value.dtype),
trainable=False,
collections=collections)
weight_var = variable_scope.get_variable(
"weight",
initializer=init_ops.zeros_initializer(weight.get_shape(),
dtype=weight.dtype),
trainable=False,
collections=collections)
numerator = assign_moving_average(
value_x_weight_var, value * weight, decay, zero_debias=False)
denominator = assign_moving_average(
weight_var, weight, decay, zero_debias=False)
if truediv:
return math_ops.truediv(numerator, denominator, name=scope.name)
else:
return math_ops.div(numerator, denominator, name=scope.name)
示例14: __call__
def __call__(self, x, h_prev, scope=None):
"""GRU cell."""
with vs.variable_scope(scope or type(self).__name__):
input_size = x.get_shape().with_rank(2)[1]
# Check if the input size exist.
if input_size is None:
raise ValueError("Expecting input_size to be set.")
# Check cell_size == state_size from h_prev.
cell_size = h_prev.get_shape().with_rank(2)[1]
if cell_size != self._cell_size:
raise ValueError("Shape of h_prev[1] incorrect: cell_size %i vs %s" %
(self._cell_size, cell_size))
if cell_size is None:
raise ValueError("cell_size from `h_prev` should not be None.")
w_ru = vs.get_variable("w_ru", [input_size + self._cell_size,
self._cell_size * 2])
b_ru = vs.get_variable(
"b_ru", [self._cell_size * 2],
initializer=init_ops.constant_initializer(1.0))
w_c = vs.get_variable("w_c",
[input_size + self._cell_size, self._cell_size])
b_c = vs.get_variable(
"b_c", [self._cell_size],
initializer=init_ops.constant_initializer(0.0))
_gru_block_cell = gen_gru_ops.gru_block_cell # pylint: disable=invalid-name
_, _, _, new_h = _gru_block_cell(
x=x, h_prev=h_prev, w_ru=w_ru, w_c=w_c, b_ru=b_ru, b_c=b_c)
return new_h, new_h
示例15: _annotated_graph
def _annotated_graph(self):
graph = ops.Graph()
with graph.as_default():
random_seed.set_random_seed(2)
current_activation = variable_scope.get_variable(
name='start', shape=[1, 2, 2, 5])
conv_filter = variable_scope.get_variable(
name='filter', shape=[5, 5, 5, 5])
for layer_number in range(3):
with variable_scope.variable_scope('layer_{}'.format(layer_number)):
after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],
'SAME')
current_activation = 2. * after_conv
current_activation.op._set_attr(
'_recompute_hint',
# The value of the attribute does not matter; just that the key
# exists in the op's attributes.
attr_value_pb2.AttrValue(i=1))
current_activation += 5.
current_activation.op._set_attr(
'_recompute_hint', attr_value_pb2.AttrValue(i=0))
current_activation = nn.relu(current_activation)
current_activation.op._set_attr(
'_recompute_hint', attr_value_pb2.AttrValue(i=1))
loss = math_ops.reduce_mean(current_activation)
optimizer = train.AdamOptimizer(0.001)
train_op = optimizer.minimize(loss)
init_op = variables.global_variables_initializer()
return graph, init_op, train_op