本文整理汇总了Python中tensorflow.python.ops.init_ops.ones_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python ones_initializer函数的具体用法?Python ones_initializer怎么用?Python ones_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ones_initializer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
示例2: __init__
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
virtual_batch_size=None,
adjustment=None,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
if isinstance(axis, list):
self.axis = axis[:]
else:
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
self.beta_constraint = beta_constraint
self.gamma_constraint = gamma_constraint
self.renorm = renorm
self.virtual_batch_size = virtual_batch_size
self.adjustment = adjustment
if fused is None:
fused = True
self.fused = fused
self._bessels_correction_test_only = True
if renorm:
renorm_clipping = renorm_clipping or {}
keys = ['rmax', 'rmin', 'dmax']
if set(renorm_clipping) - set(keys):
raise ValueError('renorm_clipping %s contains keys not in %s' %
(renorm_clipping, keys))
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
示例3: __init__
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
self.beta_constraint = beta_constraint
self.gamma_constraint = gamma_constraint
self.renorm = renorm
# This environment variable is only used during the testing period of fused
# batch norm and will be removed after that.
if fused is None:
fused = _FUSED_DEFAULT
self.fused = fused
self._bessels_correction_test_only = True
if renorm:
renorm_clipping = renorm_clipping or {}
keys = ['rmax', 'rmin', 'dmax']
if set(renorm_clipping) - set(keys):
raise ValueError('renorm_clipping %s contains keys not in %s' %
(renorm_clipping, keys))
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
示例4: __init__
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=False,
trainable=True,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
self.renorm = renorm
self.fused = fused
if self.fused and renorm:
raise ValueError(
'Batch renorm is currently not supported with fused batch norm.')
if self.fused and (beta_regularizer is not None or
gamma_regularizer is not None):
raise ValueError('Regularizers are not currently '
'supported for fused batch norm.')
if renorm:
renorm_clipping = renorm_clipping or {}
keys = ['rmax', 'rmin', 'dmax']
if set(renorm_clipping) - set(keys):
raise ValueError('renorm_clipping %s contains keys not in %s' %
(renorm_clipping, keys))
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
示例5: testVariableCreationInALoop
def testVariableCreationInALoop(self):
"""Tests the variable created inside a loop can be used outside the loop."""
with self.test_session():
with variable_scope.variable_scope("ascope") as scope:
def Body(i, _):
var_x = variable_scope.get_variable(
"x",
shape=[2],
initializer=init_ops.ones_initializer(),
partitioner=partitioned_variables.variable_axis_size_partitioner(
4))
return (i + 1, var_x.as_tensor())
cond = lambda i, _: i < 2
_, x = control_flow_ops.while_loop(
cond, Body, (0, constant_op.constant([7, 8], dtypes.float32)))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 1.0], x.eval())
scope.reuse_variables()
var_x = variable_scope.get_variable(
"x",
shape=[2],
initializer=init_ops.ones_initializer(),
partitioner=partitioned_variables.variable_axis_size_partitioner(4))
self.assertAllClose([1.0, 1.0], var_x.as_tensor().eval())
示例6: Foo
def Foo(inputs):
var = variable_scope.get_variable(
"var",
shape=[10],
dtype=dtypes.float32,
initializer=init_ops.ones_initializer())
return inputs + var
示例7: testControlDepsNone
def testControlDepsNone(self):
with self.test_session() as session:
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dependency.
d = constant_op.constant(2.0)
# Partitioned variables do not.
var_x = variable_scope.get_variable(
"x",
shape=[2],
initializer=init_ops.ones_initializer(),
partitioner=partitioned_variables.variable_axis_size_partitioner(4))
ops_before_read = session.graph.get_operations()
var_x.as_tensor() # Caches the ops for subsequent reads.
reading_ops = [
op for op in session.graph.get_operations()
if op not in ops_before_read
]
self.assertEqual([c.op], d.op.control_inputs)
# Tests that no control dependencies are added to reading a partitioned
# variable which is similar to reading a variable.
for op in reading_ops:
self.assertEqual([], op.control_inputs)
示例8: testEagerExecution
def testEagerExecution(self):
with context.eager_mode():
container = variable_scope.EagerVariableStore()
x = constant_op.constant([[2.0]])
with container.as_default():
y = core_layers.dense(
x, 1, name='my_dense',
kernel_initializer=init_ops.ones_initializer())
self.assertAllEqual(y, [[2.0]])
self.assertEqual(len(container.variables()), 2)
# Recreate the layer to test reuse.
with container.as_default():
core_layers.dense(
x, 1, name='my_dense',
kernel_initializer=init_ops.ones_initializer())
self.assertEqual(len(container.variables()), 2)
示例9: testOnesInitializer
def testOnesInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.ones_initializer())
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
示例10: build
def build(self, inputs_shape):
# Call the build method of the parent class.
super(MaskedBasicLSTMCell, self).build(inputs_shape)
self.built = False
input_depth = inputs_shape[1].value
h_depth = self._num_units
self._mask = self.add_variable(
name="mask",
shape=[input_depth + h_depth, 4 * h_depth],
initializer=init_ops.ones_initializer(),
trainable=False,
dtype=self.dtype)
self._threshold = self.add_variable(
name="threshold",
shape=[],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=self.dtype)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
self._masked_kernel = math_ops.multiply(self._mask, self._kernel,
core_layers.MASKED_WEIGHT_NAME)
if self._mask not in ops.get_collection_ref(core_layers.MASK_COLLECTION):
ops.add_to_collection(core_layers.MASK_COLLECTION, self._mask)
ops.add_to_collection(core_layers.MASKED_WEIGHT_COLLECTION,
self._masked_kernel)
ops.add_to_collection(core_layers.THRESHOLD_COLLECTION, self._threshold)
ops.add_to_collection(core_layers.WEIGHT_COLLECTION, self._kernel)
self.built = True
示例11: Body
def Body(i, _):
var_x = variable_scope.get_variable(
"x",
shape=[2],
initializer=init_ops.ones_initializer(),
partitioner=partitioned_variables.variable_axis_size_partitioner(
4))
return (i + 1, var_x.as_tensor())
示例12: __init__
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
virtual_batch_size=None,
adjustment=None,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
beta_constraint=beta_constraint,
gamma_constraint=gamma_constraint,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_momentum,
fused=fused,
trainable=trainable,
virtual_batch_size=virtual_batch_size,
adjustment=adjustment,
name=name,
**kwargs)
示例13: testAddVariable
def testAddVariable(self):
obj = NonLayerCheckpointable()
with self.assertRaisesRegexp(ValueError, "do not specify shape"):
checkpointable_utils.add_variable(
obj, name="shape_specified_twice", shape=[], initializer=1)
constant_initializer = checkpointable_utils.add_variable(
obj, name="constant_initializer", initializer=1)
with variable_scope.variable_scope("some_variable_scope"):
ones_initializer = checkpointable_utils.add_variable(
obj,
name="ones_initializer",
shape=[2],
initializer=init_ops.ones_initializer(dtype=dtypes.float32))
bare_initializer = checkpointable_utils.add_variable(
obj,
name="bare_initializer",
shape=[2, 2],
dtype=dtypes.float64,
initializer=init_ops.zeros_initializer)
# Even in graph mode, there are no naming conflicts between objects, only
# naming conflicts within an object.
other_duplicate = resource_variable_ops.ResourceVariable(
name="duplicate", initial_value=1.)
duplicate = checkpointable_utils.add_variable(
obj, name="duplicate", shape=[])
with self.assertRaisesRegexp(ValueError, "'duplicate' already exists"):
checkpointable_utils.add_variable(obj, name="duplicate", shape=[])
if context.in_graph_mode():
self.evaluate(variables.global_variables_initializer())
self.assertEqual("constant_initializer:0", constant_initializer.name)
self.assertEqual(1, self.evaluate(constant_initializer))
self.assertEqual("some_variable_scope/ones_initializer:0",
ones_initializer.name)
self.assertAllEqual([1, 1], self.evaluate(ones_initializer))
self.assertAllEqual([[0., 0.],
[0., 0.]], self.evaluate(bare_initializer))
self.assertEqual("a_variable:0", obj.a_variable.name)
self.assertEqual("duplicate:0", other_duplicate.name)
if context.in_graph_mode():
# The .name attribute may be globally influenced, but the checkpoint name
# won't be (tested below).
self.assertEqual("duplicate_1:0", duplicate.name)
else:
# When executing eagerly, there's no uniquification of variable names. The
# checkpoint name will be the same.
self.assertEqual("duplicate:0", duplicate.name)
named_variables, _ = checkpointable_utils._serialize_object_graph(obj)
expected_checkpoint_names = (
"a_variable/.ATTRIBUTES/VARIABLE_VALUE",
"bare_initializer/.ATTRIBUTES/VARIABLE_VALUE",
"constant_initializer/.ATTRIBUTES/VARIABLE_VALUE",
"duplicate/.ATTRIBUTES/VARIABLE_VALUE",
"ones_initializer/.ATTRIBUTES/VARIABLE_VALUE",
)
six.assertCountEqual(
self, expected_checkpoint_names, named_variables.keys())
示例14: _create_variable_statistics_object
def _create_variable_statistics_object(self):
"""Creates non-trainable variables representing input statistics."""
series_start_moments = Moments(
mean=variable_scope.get_variable(
name="series_start_mean",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
variance=variable_scope.get_variable(
name="series_start_variance",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.ones_initializer(),
trainable=False))
overall_feature_moments = Moments(
mean=variable_scope.get_variable(
name="overall_feature_mean",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
variance=variable_scope.get_variable(
name="overall_feature_var",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.ones_initializer(),
trainable=False))
start_time = variable_scope.get_variable(
name="start_time",
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
shape=[],
trainable=False)
total_observation_count = variable_scope.get_variable(
name="total_observation_count",
shape=[],
dtype=dtypes.int64,
initializer=init_ops.ones_initializer(),
trainable=False)
return InputStatistics(
series_start_moments=series_start_moments,
overall_feature_moments=overall_feature_moments,
start_time=start_time,
total_observation_count=total_observation_count)
示例15: testLSTMLayer
def testLSTMLayer(self):
# Run with all-0 weights, no padding.
o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 0., 0., 0.)
self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 0., 1., 0.)
self.assertAllClose(o, [[[.25]] * self._batch_size,
[[.125]] * self._batch_size,
[[.0625]] * self._batch_size])
o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 1., 0., 0.)
self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('zeros', init_ops.zeros_initializer(), 1., 1., 0.)
self.assertAllClose(o, [[[.25]] * self._batch_size,
[[.125]] * self._batch_size,
[[.0625]] * self._batch_size])
# Run with all-1 weights, no padding.
weight1 = 1.
for m_init in [0., 1.]:
for c_init in [0., 1.]:
o = self._RunLSTMLayer('ones',
init_ops.ones_initializer(), m_init, c_init, 0.)
m0 = self._NextM(self._inputs, weight1, m_init, c_init)
c0 = self._NextC(self._inputs, weight1, m_init, c_init)
self.assertAllClose(o[0], m0)
m1 = self._NextM(self._inputs, weight1, m0, c0)
c1 = self._NextC(self._inputs, weight1, m0, c0)
self.assertAllClose(o[1], m1)
m2 = self._NextM(self._inputs, weight1, m1, c1)
self.assertAllClose(o[2], m2)
# Run with random weights.
for weight in np.random.rand(3):
weight_tf = constant_op.constant(weight, dtypes.float32)
random_weight = lambda shape, w=weight_tf: array_ops.fill(shape, w)
# No padding.
for m_init in [0., 1.]:
for c_init in [0., 1.]:
o = self._RunLSTMLayer('random', random_weight, m_init, c_init, 0.)
m0 = self._NextM(self._inputs, weight, m_init, c_init)
c0 = self._NextC(self._inputs, weight, m_init, c_init)
self.assertAllClose(o[0], m0)
m1 = self._NextM(self._inputs, weight, m0, c0)
c1 = self._NextC(self._inputs, weight, m0, c0)
self.assertAllClose(o[1], m1)
m2 = self._NextM(self._inputs, weight, m1, c1)
self.assertAllClose(o[2], m2)
# Set padding.
o = self._RunLSTMLayer('random', random_weight, 0., 0., 1.)
self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('random', random_weight, 0., 1., 1.)
self.assertAllClose(o, [[[0.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('random', random_weight, 1., 0., 1.)
self.assertAllClose(o, [[[1.]] * self._batch_size] * 3)
o = self._RunLSTMLayer('random', random_weight, 1., 1., 1.)
self.assertAllClose(o, [[[1.]] * self._batch_size] * 3)