本文整理汇总了Python中tensorflow.python.ops.init_ops.random_uniform_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python random_uniform_initializer函数的具体用法?Python random_uniform_initializer怎么用?Python random_uniform_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了random_uniform_initializer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: benchmarkTfRNNLSTMTraining
def benchmarkTfRNNLSTMTraining(self):
test_configs = self._GetTestConfig()
for config_name, config in test_configs.items():
num_layers = config["num_layers"]
num_units = config["num_units"]
batch_size = config["batch_size"]
seq_length = config["seq_length"]
with ops.Graph().as_default(), ops.device("/gpu:0"):
inputs = seq_length * [
array_ops.zeros([batch_size, num_units], dtypes.float32)
]
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = core_rnn_cell_impl.LSTMCell(
num_units=num_units, initializer=initializer, state_is_tuple=True)
multi_cell = core_rnn_cell_impl.MultiRNNCell([cell] * num_layers)
outputs, final_state = core_rnn.static_rnn(
multi_cell, inputs, dtype=dtypes.float32)
trainable_variables = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
training_op = control_flow_ops.group(*gradients)
self._BenchmarkOp(training_op, "tf_rnn_lstm %s %s" %
(config_name, self._GetConfigDesc(config)))
示例2: _create_multi_lstm_cell_ops
def _create_multi_lstm_cell_ops(batch_size, num_units, input_depth,
num_layers, max_time, compiled):
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(-0.1, 0.1, seed=2)):
inputs = variable_scope.get_variable(
"inputs", initializer=random_ops.random_uniform(
(max_time, batch_size, input_depth), seed=1))
maybe_xla = lambda c: rnn_cell.CompiledWrapper(c) if compiled else c
cell = core_rnn_cell_impl.MultiRNNCell(
[maybe_xla(core_rnn_cell_impl.LSTMCell(num_units))
for _ in range(num_layers)])
initial_state = cell.zero_state(
batch_size=batch_size, dtype=dtypes.float32)
outputs, final_state = rnn.dynamic_rnn(
cell=cell, inputs=inputs, initial_state=initial_state,
time_major=True)
flat_final_state = nest.flatten(final_state)
trainable_variables = variables.trainable_variables()
outputs_grad = gradients_impl.gradients(
[outputs],
trainable_variables + [inputs] + nest.flatten(initial_state))
final_state_grad = gradients_impl.gradients(
flat_final_state,
trainable_variables + [inputs] + nest.flatten(initial_state))
return {"outputs": outputs,
"final_state": flat_final_state,
"outputs_grad": outputs_grad,
"final_state_grad": final_state_grad}
示例3: create_ops
def create_ops():
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(
-0.1, 0.1, seed=2)):
inputs = random_ops.random_uniform((1,), seed=1)
return inputs
示例4: testBlockGRUToGRUCellSingleStep
def testBlockGRUToGRUCellSingleStep(self):
with self.session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 4
cell_size = 5
input_size = 6
seed = 1994
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed)
# Inputs
x = array_ops.zeros([batch_size, input_size])
h = array_ops.zeros([batch_size, cell_size])
# Values for the inputs.
x_value = np.random.rand(batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
# Output from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
output = rnn_cell.GRUCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([output], {x: x_value, h: h_value})
# Output from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
output = gru_ops.GRUBlockCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
block_res = sess.run([output], {x: x_value, h: h_value})
self.assertEqual(len(block_res), len(basic_res))
for block, basic in zip(block_res, basic_res):
self.assertAllClose(block, basic)
示例5: __call__
def __call__(self, inputs, state, scope=None):
"""Run the cell on embedded inputs."""
with vs.variable_scope(scope or type(self).__name__): # "EmbeddingWrapper"
with ops.device("/cpu:0"):
if self._embedding:
embedding = self._embedding
else:
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
embedding = vs.get_variable("embedding", [self._embedding_classes,
self._cell.input_size],
initializer=initializer)
embedded = embedding_ops.embedding_lookup(
embedding, array_ops.reshape(inputs, [-1]))
"""print (embedded)
print ("{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}")"""
return self._cell(embedded, state)
示例6: _Model
def _Model(x):
w = variable_scope.get_variable(
"w", (64, 64),
initializer=init_ops.random_uniform_initializer(seed=312))
b = variable_scope.get_variable(
"b", (64), initializer=init_ops.zeros_initializer()),
return math_ops.sigmoid(math_ops.matmul(x, w) + b)
示例7: __call__
def __call__(self, inputs, state, scope=None):
"""Run the cell on embedded inputs."""
with _checked_scope(self, scope or "embedding_wrapper", reuse=self._reuse):
with ops.device("/cpu:0"):
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
if type(state) is tuple:
data_type = state[0].dtype
else:
data_type = state.dtype
embedding = vs.get_variable(
"embedding", [self._embedding_classes, self._embedding_size],
initializer=initializer,
dtype=data_type)
embedded = embedding_ops.embedding_lookup(
embedding, array_ops.reshape(inputs, [-1]))
return self._cell(embedded, state)
示例8: __call__
def __call__(self, inputs, state, scope=None):
"""Run the cell on embedded inputs."""
with vs.variable_scope(scope or type(self).__name__): # "EmbeddingWrapper2"
with ops.device("/cpu:0"):
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
embeddings = []
for i in xrange(len(self._embedding_classes)):
embeddings.append(vs.get_variable("embedding"+str(i), [self._embedding_classes[i],
self._embedding_sizes[i]],
initializer=initializer))
embedded = []
for i in xrange(len(self._embedding_classes)):
embedded.append(embedding_ops.embedding_lookup(
embeddings[i], array_ops.reshape(inputs[i], [-1])))
finalEmbedded = tf.concat(1, embedded)
return self._cell(finalEmbedded, state)
示例9: _get_initializer
def _get_initializer(init_bound, dtype, seed):
if dtype == dtypes.float16:
return _MaskedRandomUniformInitializer(
-init_bound, init_bound, dtype=dtype, seed=seed)
else:
return init_ops.random_uniform_initializer(
-init_bound, init_bound, dtype=dtype, seed=seed)
示例10: model_fn
def model_fn():
x = variable_scope.get_variable(
'x',
shape=(2, 3),
initializer=init_ops.random_uniform_initializer(
1.0, 10.0, dtype=dtypes.float32))
return array_ops.identity(x)
示例11: build
def build(self, _):
self.embedding = self.add_variable(
'embedding_kernel',
shape=[self.vocab_size, self.embedding_dim],
dtype=np.float32,
initializer=init_ops.random_uniform_initializer(-0.1, 0.1),
trainable=True)
示例12: testWarmStartInputLayerEmbeddingColumn
def testWarmStartInputLayerEmbeddingColumn(self):
# Create old and new vocabs for embedding column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry", "blueberry"],
"new_vocab")
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
_ = variable_scope.get_variable(
"input_layer/sc_vocab_embedding/embedding_weights",
initializer=[[0.5, 0.4], [1., 1.1], [2., 2.2], [3., 3.3]])
self._write_checkpoint(sess)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape[0].value)
return partitions
# Create feature columns.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
emb_vocab = fc.embedding_column(
categorical_column=sc_vocab,
dimension=2,
# Can't use constant_initializer with load_and_remap. In practice,
# use a truncated normal initializer.
initializer=init_ops.random_uniform_initializer(
minval=0.42, maxval=0.42))
all_deep_cols = [emb_vocab]
# New graph, new session with warmstarting.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
cols_to_vars = {}
with variable_scope.variable_scope("", partitioner=_partitioner):
# Create the variables.
fc.input_layer(
features=self._create_dummy_inputs(),
feature_columns=all_deep_cols,
cols_to_vars=cols_to_vars)
ws_settings = ws_util._WarmStartSettings(
self.get_temp_dir(), col_to_prev_vocab={
emb_vocab: prev_vocab_path
})
ws_util._warmstart_input_layer(cols_to_vars, ws_settings)
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warmstarted. Var corresponding to
# emb_vocab should be correctly warmstarted after vocab remapping.
# Missing values are filled in with the EmbeddingColumn's initializer.
self._assert_cols_to_vars(
cols_to_vars, {
emb_vocab: [
np.array([[3., 3.3], [2., 2.2], [1., 1.1]]),
np.array([[0.5, 0.4], [0.42, 0.42], [0.42, 0.42]])
]
}, sess)
示例13: testRandomInitializer
def testRandomInitializer(self):
# Sanity check that the slices uses a different seed when using a random
# initializer function.
with self.test_session():
var0, var1 = partitioned_variables.create_partitioned_variables(
[20, 12], [1, 2], init_ops.random_uniform_initializer())
variables.global_variables_initializer().run()
val0, val1 = var0.eval().flatten(), var1.eval().flatten()
self.assertTrue(np.linalg.norm(val0 - val1) > 1e-6)
# Negative test that proves that slices have the same values if
# the random initializer uses a seed.
with self.test_session():
var0, var1 = partitioned_variables.create_partitioned_variables(
[20, 12], [1, 2], init_ops.random_uniform_initializer(seed=201))
variables.global_variables_initializer().run()
val0, val1 = var0.eval().flatten(), var1.eval().flatten()
self.assertAllClose(val0, val1)
示例14: _build
def _build(self):
""" build embedding table and
build position embedding table if timing=="emb"
:return:
"""
self._embeddings = variable_scope.get_variable(
name=(self._name or "embedding_table"),
shape=[self._vocab_size, self._dimension],
initializer=init_ops.random_uniform_initializer(
-self._init_scale, self._init_scale))
if self._timing == "emb":
self._position_embedding = variable_scope.get_variable(
name=(self._name or "embedding_table") + "_posi",
shape=[self._maximum_position, self._dimension],
initializer=init_ops.random_uniform_initializer(
-self._init_scale, self._init_scale))
示例15: _createStackBidirectionalDynamicRNN
def _createStackBidirectionalDynamicRNN(self,
use_gpu,
use_shape,
use_state_tuple,
initial_states_fw=None,
initial_states_bw=None,
scope=None):
self.layers = [2, 3]
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = array_ops.placeholder(dtypes.int64)
self.cells_fw = [
core_rnn_cell_impl.LSTMCell(
num_units,
input_size,
initializer=initializer,
state_is_tuple=False) for num_units in self.layers
]
self.cells_bw = [
core_rnn_cell_impl.LSTMCell(
num_units,
input_size,
initializer=initializer,
state_is_tuple=False) for num_units in self.layers
]
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size, input_size) if use_shape else (None, input_size))
]
inputs_c = array_ops.stack(inputs)
inputs_c = array_ops.transpose(inputs_c, [1, 0, 2])
outputs, st_fw, st_bw = rnn.stack_bidirectional_dynamic_rnn(
self.cells_fw,
self.cells_bw,
inputs_c,
initial_states_fw=initial_states_fw,
initial_states_bw=initial_states_bw,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
# Outputs has shape (batch_size, max_length, 2* layer[-1].
output_shape = [None, max_length, 2 * self.layers[-1]]
if use_shape:
output_shape[0] = batch_size
self.assertAllEqual(outputs.get_shape().as_list(), output_shape)
input_value = np.random.randn(batch_size, input_size)
return input_value, inputs, outputs, st_fw, st_bw, sequence_length