本文整理汇总了Python中tensorflow.python.ops.init_ops.ones_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python init_ops.ones_initializer方法的具体用法?Python init_ops.ones_initializer怎么用?Python init_ops.ones_initializer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.init_ops
的用法示例。
在下文中一共展示了init_ops.ones_initializer方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testEmbeddingColumnWithWeightedSparseColumnForDNN
# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import ones_initializer [as 别名]
def testEmbeddingColumnWithWeightedSparseColumnForDNN(self):
ids = tf.contrib.layers.sparse_column_with_keys(
"ids", ["marlo", "omar", "stringer"])
ids_tensor = tf.SparseTensor(values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[3, 2])
weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, "weights")
weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[3, 2])
features = {"ids": ids_tensor,
"weights": weights_tensor}
embeded_sparse = tf.contrib.layers.embedding_column(
weighted_ids, 1, combiner="sum", initializer=init_ops.ones_initializer)
output = tf.contrib.layers.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
tf.global_variables_initializer().run()
tf.initialize_all_tables().run()
# score: (sum of weights)
self.assertAllEqual(output.eval(), [[10.], [50.], [0.]])
示例2: _create_slots
# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import ones_initializer [as 别名]
def _create_slots(self, var_list):
for v in var_list:
init_rms = init_ops.ones_initializer(dtype=v.dtype)
self._get_or_make_slot_with_initializer(v, init_rms, v.get_shape(),
v.dtype, "rms", self._name)
if self._centered:
self._zeros_slot(v, "mg", self._name)
self._zeros_slot(v, "momentum", self._name)
示例3: __init__
# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import ones_initializer [as 别名]
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
trainable=True,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
self.renorm = renorm
if renorm:
renorm_clipping = renorm_clipping or {}
keys = ['rmax', 'rmin', 'dmax']
if set(renorm_clipping) - set(keys):
raise ValueError('renorm_clipping %s contains keys not in %s' %
(renorm_clipping, keys))
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
示例4: __init__
# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import ones_initializer [as 别名]
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
示例5: testInitializedVariableValue
# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import ones_initializer [as 别名]
def testInitializedVariableValue(self):
with self.cached_session() as sess:
a = variables_lib2.model_variable(
'a', [5], initializer=init_ops.ones_initializer())
sess.run(variables_lib.global_variables_initializer())
self.assertAllEqual(a.eval(), [1] * 5)
示例6: testEmbeddingColumnForDNN
# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import ones_initializer [as 别名]
def testEmbeddingColumnForDNN(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[3, 2])
features = {"wire": wire_tensor}
embeded_sparse = tf.contrib.layers.embedding_column(
hashed_sparse, 1, combiner="sum", initializer=init_ops.ones_initializer)
output = tf.contrib.layers.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
tf.global_variables_initializer().run()
# score: (number of values)
self.assertAllEqual(output.eval(), [[1.], [2.], [0.]])
示例7: _norm
# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import ones_initializer [as 别名]
def _norm(self, inputs, scope, bias_initializer):
shape = inputs.get_shape()[-1:]
gamma_init = init_ops.ones_initializer()
beta_init = bias_initializer
with vs.variable_scope(scope):
# Initialize beta and gamma for use by normalizer.
vs.get_variable("gamma", shape=shape, initializer=gamma_init)
vs.get_variable("beta", shape=shape, initializer=beta_init)
normalized = self._normalizer_fn(inputs, reuse=True, scope=scope)
return normalized
示例8: call
# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import ones_initializer [as 别名]
def call(self, inputs, state):
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.ones_initializer()
tile_concat = isinstance(inputs, (list, tuple))
if tile_concat:
inputs, inputs_non_spatial = inputs
with vs.variable_scope('gates'):
inputs = array_ops.concat([inputs, state], axis=-1)
concat = self._conv2d(inputs, 2 * self._filters, bias_ones)
if tile_concat:
concat = concat + self._dense(inputs_non_spatial, concat.shape[-1].value)[:, None, None, :]
if self._normalizer_fn and not self._separate_norms:
concat = self._norm(concat, "reset_update", bias_ones)
r, u = array_ops.split(concat, 2, axis=-1)
if self._normalizer_fn and self._separate_norms:
r = self._norm(r, "reset", bias_ones)
u = self._norm(u, "update", bias_ones)
r, u = math_ops.sigmoid(r), math_ops.sigmoid(u)
bias_zeros = self._bias_initializer
if self._bias_initializer is None:
bias_zeros = init_ops.zeros_initializer()
with vs.variable_scope('candidate'):
inputs = array_ops.concat([inputs, r * state], axis=-1)
candidate = self._conv2d(inputs, self._filters, bias_zeros)
if tile_concat:
candidate = candidate + self._dense(inputs_non_spatial, candidate.shape[-1].value)[:, None, None, :]
if self._normalizer_fn:
candidate = self._norm(candidate, "state", bias_zeros)
c = self._activation_fn(candidate)
new_h = u * state + (1 - u) * c
return new_h, new_h
示例9: testCreateConvWithWeightDecay
# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import ones_initializer [as 别名]
def testCreateConvWithWeightDecay(self):
random_seed.set_random_seed(0)
height, width = 3, 3
with self.cached_session() as sess:
images = random_ops.random_uniform((5, height, width, 3), seed=1)
regularizer = regularizers.l2_regularizer(0.01)
layers_lib.separable_conv2d(
images,
32, [3, 3],
2,
weights_regularizer=regularizer,
weights_initializer=init_ops.ones_initializer())
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 2)
weight_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(
weight_decay.op.name,
'SeparableConv2d/depthwise_kernel/Regularizer/l2_regularizer')
sess.run(variables_lib.global_variables_initializer())
depth_weight_one = sess.run(weight_decay)
weight_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[1]
self.assertEqual(
weight_decay.op.name,
'SeparableConv2d/pointwise_kernel/Regularizer/l2_regularizer')
pointwise_weight_one = sess.run(weight_decay)
regularizer = regularizers.l2_regularizer(1.0)
layers_lib.separable_conv2d(
images,
32, [3, 3],
2,
weights_regularizer=regularizer,
weights_initializer=init_ops.ones_initializer())
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 4)
weight_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[2]
sess.run(variables_lib.global_variables_initializer())
depth_weight_two = sess.run(weight_decay)
weight_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[3]
pointwise_weight_two = sess.run(weight_decay)
self.assertAllClose(
[100.0 * depth_weight_one, 100.0 * pointwise_weight_one],
[depth_weight_two, pointwise_weight_two])
示例10: _zero_debias
# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import ones_initializer [as 别名]
def _zero_debias(unbiased_var, value, decay):
"""Compute the delta required for a debiased Variable.
All exponential moving averages initialized with Tensors are initialized to 0,
and therefore are biased to 0. Variables initialized to 0 and used as EMAs are
similarly biased. This function creates the debias updated amount according to
a scale factor, as in https://arxiv.org/abs/1412.6980.
To demonstrate the bias the results from 0-initialization, take an EMA that
was initialized to `0` with decay `b`. After `t` timesteps of seeing the
constant `c`, the variable have the following value:
```
EMA = 0*b^(t) + c*(1 - b)*b^(t-1) + c*(1 - b)*b^(t-2) + ...
= c*(1 - b^t)
```
To have the true value `c`, we would divide by the scale factor `1 - b^t`.
In order to perform debiasing, we use two shadow variables. One keeps track of
the biased estimate, and the other keeps track of the number of updates that
have occurred.
Args:
unbiased_var: A Variable representing the current value of the unbiased EMA.
value: A Tensor representing the most recent value.
decay: A Tensor representing `1-decay` for the EMA.
Returns:
The amount that the unbiased variable should be updated. Computing this
tensor will also update the shadow variables appropriately.
"""
with variable_scope.variable_scope(
"ZeroDebias", values=[unbiased_var, value, decay]) as scope:
with ops.colocate_with(unbiased_var):
biased_var = variable_scope.get_variable(
unbiased_var.op.name + "_biased",
initializer=init_ops.zeros_initializer(
unbiased_var.get_shape(), dtype=unbiased_var.dtype),
trainable=False)
# Initializing the local_step to `0` would cause problems with the
# debiasing equation, so we instead initialize to `1`.
local_step = variable_scope.get_variable(
unbiased_var.op.name + "_local_step",
initializer=init_ops.ones_initializer([], dtype=unbiased_var.dtype),
trainable=False)
# Get an update ops for both shadow variables.
update_biased = state_ops.assign_sub(biased_var,
(biased_var - value) * decay,
name=scope.name)
update_local_step = local_step.assign_add(1)
# Compute the value of the delta to update the unbiased EMA. Make sure to
# use the new values of the biased variable and the local step.
with ops.control_dependencies([update_biased, update_local_step]):
# This function gets `1 - decay`, so use `1.0 - decay` in the exponent.
unbiased_ema_delta = (unbiased_var - biased_var.ref() /
(1 - math_ops.pow(1.0 - decay, local_step.ref())))
return unbiased_ema_delta
示例11: _fixed_memory_luong_score
# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import ones_initializer [as 别名]
def _fixed_memory_luong_score(query, keys, scale):
"""Implements Luong-style (multiplicative) scoring function.
Assumes that keys have batch dimension of 1 (i.e., fixed memory bank).
Args:
query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[1, max_time, num_units]`.
scale: Whether to apply a scale to the score function.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
Raises:
ValueError: If `key` and `query` depths do not match.
"""
depth = query.get_shape()[-1]
key_units = keys.get_shape()[-1]
if depth != key_units:
raise ValueError(
"Incompatible or unknown inner dimensions between query and keys. "
"Query (%s) has units: %s. Keys (%s) have units: %s. "
"Perhaps you need to set num_units to the keys' dimension (%s)?"
% (query, depth, keys, key_units, key_units))
dtype = query.dtype
# Reshape from [1, memory_size, depth] to [memory_size, depth] for matmul.
keys = array_ops.squeeze(keys, 0)
# Inner product along the query units dimension.
# matmul shapes: query is [batch_size, depth] and
# keys is [max_time, depth].
# the inner product is asked to **transpose keys' inner shape** to get a
# matmul on:
# [batch_size, depth] . [depth, max_time]
# resulting in an output shape of:
# [batch_size, max_time].
# we then squeeze out the center singleton dimension.
score = math_ops.matmul(query, keys, transpose_b=True)
if scale:
# Scalar used in weight scaling
g = variable_scope.get_variable(
"attention_g", dtype=dtype,
initializer=init_ops.ones_initializer, shape=())
score = g * score
return score
示例12: __init__
# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import ones_initializer [as 别名]
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
self.beta_constraint = beta_constraint
self.gamma_constraint = gamma_constraint
self.renorm = renorm
if fused is None:
fused = True
self.fused = fused
self._bessels_correction_test_only = True
if renorm:
renorm_clipping = renorm_clipping or {}
keys = ['rmax', 'rmin', 'dmax']
if set(renorm_clipping) - set(keys):
raise ValueError('renorm_clipping %s contains keys not in %s' %
(renorm_clipping, keys))
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:52,代码来源:normalization.py