本文整理汇总了Python中tensorflow.zeros_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.zeros_initializer方法的具体用法?Python tensorflow.zeros_initializer怎么用?Python tensorflow.zeros_initializer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.zeros_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: init_state
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import zeros_initializer [as 别名]
def init_state(inputs,
state_shape,
state_initializer=tf.zeros_initializer(),
dtype=tf.float32):
"""Helper function to create an initial state given inputs.
Args:
inputs: input Tensor, at least 2D, the first dimension being batch_size
state_shape: the shape of the state.
state_initializer: Initializer(shape, dtype) for state Tensor.
dtype: Optional dtype, needed when inputs is None.
Returns:
A tensors representing the initial state.
"""
if inputs is not None:
# Handle both the dynamic shape as well as the inferred shape.
inferred_batch_size = inputs.get_shape().with_rank_at_least(1)[0]
dtype = inputs.dtype
else:
inferred_batch_size = 0
initial_state = state_initializer(
[inferred_batch_size] + state_shape, dtype=dtype)
return initial_state
示例2: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import zeros_initializer [as 别名]
def __init__(self, component, name, shape, dtype):
"""Construct variables to normalize an input of given shape.
Arguments:
component: ComponentBuilder handle.
name: Human readable name to organize the variables.
shape: Shape of the layer to be normalized.
dtype: Type of the layer to be normalized.
"""
self._name = name
self._shape = shape
self._component = component
beta = tf.get_variable(
'beta_%s' % name,
shape=shape,
dtype=dtype,
initializer=tf.zeros_initializer())
gamma = tf.get_variable(
'gamma_%s' % name,
shape=shape,
dtype=dtype,
initializer=tf.ones_initializer())
self._params = [beta, gamma]
示例3: global_step
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import zeros_initializer [as 别名]
def global_step(device=''):
"""Returns the global step variable.
Args:
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
Returns:
the tensor representing the global step variable.
"""
global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)
if global_step_ref:
return global_step_ref[0]
else:
collections = [
VARIABLES_TO_RESTORE,
tf.GraphKeys.GLOBAL_VARIABLES,
tf.GraphKeys.GLOBAL_STEP,
]
# Get the device for the variable.
with tf.device(variable_device(device, 'global_step')):
return tf.get_variable('global_step', shape=[], dtype=tf.int64,
initializer=tf.zeros_initializer(),
trainable=False, collections=collections)
示例4: layer_norm
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import zeros_initializer [as 别名]
def layer_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalize the tensor x, averaging over the last dimension."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(
name, default_name="layer_norm", values=[x], reuse=reuse):
scale = tf.get_variable(
"layer_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"layer_norm_bias", [filters], initializer=tf.zeros_initializer())
if allow_defun:
result = layer_norm_compute(x, tf.constant(epsilon), scale, bias)
result.set_shape(x.get_shape())
else:
result = layer_norm_compute_python(x, epsilon, scale, bias)
return result
示例5: init_param
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import zeros_initializer [as 别名]
def init_param(self):
idm = self.input_dim
hs = self.hidden_size
ws = len(self.window)
nf = idm * ws
# author's special initlaization strategy.
self.Wemb = tf.get_variable(name=self.name + '_Wemb', shape=[self.vocab_size, idm], dtype=tf.float32, initializer=tf.random_uniform_initializer())
self.bhid = tf.get_variable(name=self.name + '_bhid', shape=[self.vocab_size], dtype=tf.float32, initializer=tf.zeros_initializer())
self.Vhid = tf.get_variable(name=self.name + '_Vhid', shape=[hs, idm], dtype=tf.float32, initializer=tf.random_uniform_initializer())
self.Vhid = dot(self.Vhid, self.Wemb) # [hidden_size, vocab_size]
self.i2h_W = tf.get_variable(name=self.name + '_i2h_W', shape=[idm, hs * 4], dtype=tf.float32, initializer=tf.random_uniform_initializer())
self.h2h_W = tf.get_variable(name=self.name + '_h2h_W', shape=[hs, hs * 4], dtype=tf.float32, initializer=tf.orthogonal_initializer())
self.z2h_W = tf.get_variable(name=self.name + '_z2h_W', shape=[nf, hs * 4], dtype=tf.float32, initializer=tf.random_uniform_initializer())
b_init_1 = tf.zeros((hs,))
b_init_2 = tf.ones((hs,)) * 3
b_init_3 = tf.zeros((hs,))
b_init_4 = tf.zeros((hs,))
b_init = tf.concat([b_init_1, b_init_2, b_init_3, b_init_4], axis=0)
# b_init = tf.constant(b_init)
# self.b = tf.get_variable(name=self.name + '_b', shape=[hs * 4], dtype=tf.float32, initializer=b_init)
self.b = tf.get_variable(name=self.name + '_b', dtype=tf.float32, initializer=b_init) # ValueError: If initializer is a constant, do not specify shape.
self.C0 = tf.get_variable(name=self.name + '_C0', shape=[nf, hs], dtype=tf.float32, initializer=tf.random_uniform_initializer())
self.b0 = tf.get_variable(name=self.name + '_b0', shape=[hs], dtype=tf.float32, initializer=tf.zeros_initializer())
示例6: _dense_block_mode1
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import zeros_initializer [as 别名]
def _dense_block_mode1(x, hidden_units, dropouts, densenet=False, training=False, seed=0, bn=False, name="dense_block"):
"""
:param x:
:param hidden_units:
:param dropouts:
:param densenet: enable densenet
:return:
Ref: https://github.com/titu1994/DenseNet
"""
for i, (h, d) in enumerate(zip(hidden_units, dropouts)):
z = tf.layers.Dense(h, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * i),
dtype=tf.float32,
bias_initializer=tf.zeros_initializer())(x)
if bn:
z = batch_normalization(z, training=training, name=name+"-"+str(i))
z = tf.nn.relu(z)
# z = tf.nn.selu(z)
z = tf.layers.Dropout(d, seed=seed * i)(z, training=training) if d > 0 else z
if densenet:
x = tf.concat([x, z], axis=-1)
else:
x = z
return x
示例7: _dense_block_mode2
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import zeros_initializer [as 别名]
def _dense_block_mode2(x, hidden_units, dropouts, densenet=False, training=False, seed=0, bn=False, name="dense_block"):
"""
:param x:
:param hidden_units:
:param dropouts:
:param densenet: enable densenet
:return:
Ref: https://github.com/titu1994/DenseNet
"""
for i, (h, d) in enumerate(zip(hidden_units, dropouts)):
if bn:
z = batch_normalization(x, training=training, name=name + "-" + str(i))
z = tf.nn.relu(z)
z = tf.layers.Dropout(d, seed=seed * i)(z, training=training) if d > 0 else z
z = tf.layers.Dense(h, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * i), dtype=tf.float32,
bias_initializer=tf.zeros_initializer())(z)
if densenet:
x = tf.concat([x, z], axis=-1)
else:
x = z
return x
示例8: _resnet_branch_mode1
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import zeros_initializer [as 别名]
def _resnet_branch_mode1(x, hidden_units, dropouts, training, seed=0):
h1, h2, h3 = hidden_units
dr1, dr2, dr3 = dropouts
# branch 2
x2 = tf.layers.Dense(h1, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * 2), dtype=tf.float32,
bias_initializer=tf.zeros_initializer())(x)
x2 = tf.layers.BatchNormalization()(x2)
x2 = tf.nn.relu(x2)
x2 = tf.layers.Dropout(dr1, seed=seed * 1)(x2, training=training) if dr1 > 0 else x2
x2 = tf.layers.Dense(h2, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * 3), dtype=tf.float32,
bias_initializer=tf.zeros_initializer())(x2)
x2 = tf.layers.BatchNormalization()(x2)
x2 = tf.nn.relu(x2)
x2 = tf.layers.Dropout(dr2, seed=seed * 2)(x2, training=training) if dr2 > 0 else x2
x2 = tf.layers.Dense(h3, kernel_initializer=tf.glorot_uniform_initializer(seed=seed * 4), dtype=tf.float32,
bias_initializer=tf.zeros_initializer())(x2)
x2 = tf.layers.BatchNormalization()(x2)
return x2
示例9: conv3d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import zeros_initializer [as 别名]
def conv3d(inpt, f, output_channels, s, use_bias=False, scope='conv', name=None):
inpt_shape = inpt.get_shape().as_list()
with tf.variable_scope(scope):
filtr = tf.get_variable(initializer=tf.contrib.layers.xavier_initializer(),
shape=[f,f,f,inpt_shape[-1],output_channels],name='filtr')
strides = [1,s,s,s,1]
output = conv3d_withPeriodicPadding(inpt,filtr,strides,name)
if use_bias:
with tf.variable_scope(scope):
bias = tf.get_variable(intializer=tf.zeros_initializer(
[1,1,1,1,output_channels],dtype=tf.float32),name='bias')
output = output + bias;
return output
示例10: _create_user_terms
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import zeros_initializer [as 别名]
def _create_user_terms(self, users, N):
num_users = self.num_users
num_items = self.num_items
num_factors = self.num_factors
p_u, b_u = super(SVDPP, self)._create_user_terms(users)
with tf.variable_scope('user'):
implicit_feedback_embeddings = tf.get_variable(
name='implict_feedback_embedding',
shape=[num_items, num_factors],
initializer=tf.zeros_initializer(),
regularizer=tf.contrib.layers.l2_regularizer(self.reg_y_u))
y_u = tf.gather(
tf.nn.embedding_lookup_sparse(
implicit_feedback_embeddings,
N,
sp_weights=None,
combiner='sqrtn'),
users,
name='y_u'
)
return p_u, b_u, y_u
示例11: resnet_bottleneck
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import zeros_initializer [as 别名]
def resnet_bottleneck(l, ch_out, stride, stride_first=False):
shortcut = l
norm_relu = lambda x: tf.nn.relu(Norm(x))
l = Conv2D('conv1', l, ch_out, 1, strides=stride if stride_first else 1, activation=norm_relu)
"""
Sec 5.1:
We use the ResNet-50 [16] variant from [12], noting that
the stride-2 convolutions are on 3×3 layers instead of on 1×1 layers
"""
l = Conv2D('conv2', l, ch_out, 3, strides=1 if stride_first else stride, activation=norm_relu)
"""
Section 5.1:
For BN layers, the learnable scaling coefficient γ is initialized
to be 1, except for each residual block's last BN
where γ is initialized to be 0.
"""
l = Conv2D('conv3', l, ch_out * 4, 1, activation=lambda x: Norm(x, gamma_initializer=tf.zeros_initializer()))
ret = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=lambda x: Norm(x))
return tf.nn.relu(ret, name='block_output')
示例12: vgg_arg_scope
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import zeros_initializer [as 别名]
def vgg_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
return arg_sc
示例13: overfeat_arg_scope
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import zeros_initializer [as 别名]
def overfeat_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME'):
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
示例14: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import zeros_initializer [as 别名]
def __init__(self, net, labels_one_hot, model_params, method_params):
"""Stores argument in member variable for further use.
Args:
net: A tensor with shape [batch_size, num_features, feature_size] which
contains some extracted image features.
labels_one_hot: An optional (can be None) ground truth labels for the
input features. Is a tensor with shape
[batch_size, seq_length, num_char_classes]
model_params: A namedtuple with model parameters (model.ModelParams).
method_params: A SequenceLayerParams instance.
"""
self._params = model_params
self._mparams = method_params
self._net = net
self._labels_one_hot = labels_one_hot
self._batch_size = net.get_shape().dims[0].value
# Initialize parameters for char logits which will be computed on the fly
# inside an LSTM decoder.
self._char_logits = {}
regularizer = slim.l2_regularizer(self._mparams.weight_decay)
self._softmax_w = slim.model_variable(
'softmax_w',
[self._mparams.num_lstm_units, self._params.num_char_classes],
initializer=orthogonal_initializer,
regularizer=regularizer)
self._softmax_b = slim.model_variable(
'softmax_b', [self._params.num_char_classes],
initializer=tf.zeros_initializer(),
regularizer=regularizer)
示例15: fc_network
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import zeros_initializer [as 别名]
def fc_network(x, neurons, wt_decay, name, num_pred=None, offset=0,
batch_norm_param=None, dropout_ratio=0.0, is_training=None):
if dropout_ratio > 0:
assert(is_training is not None), \
'is_training needs to be defined when trainnig with dropout.'
repr = []
for i, neuron in enumerate(neurons):
init_var = np.sqrt(2.0/neuron)
if batch_norm_param is not None:
x = slim.fully_connected(x, neuron, activation_fn=None,
weights_initializer=tf.random_normal_initializer(stddev=init_var),
weights_regularizer=slim.l2_regularizer(wt_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_param,
biases_initializer=tf.zeros_initializer(),
scope='{:s}_{:d}'.format(name, offset+i))
else:
x = slim.fully_connected(x, neuron, activation_fn=tf.nn.relu,
weights_initializer=tf.random_normal_initializer(stddev=init_var),
weights_regularizer=slim.l2_regularizer(wt_decay),
biases_initializer=tf.zeros_initializer(),
scope='{:s}_{:d}'.format(name, offset+i))
if dropout_ratio > 0:
x = slim.dropout(x, keep_prob=1-dropout_ratio, is_training=is_training,
scope='{:s}_{:d}'.format('dropout_'+name, offset+i))
repr.append(x)
if num_pred is not None:
init_var = np.sqrt(2.0/num_pred)
x = slim.fully_connected(x, num_pred,
weights_regularizer=slim.l2_regularizer(wt_decay),
weights_initializer=tf.random_normal_initializer(stddev=init_var),
biases_initializer=tf.zeros_initializer(),
activation_fn=None,
scope='{:s}_pred'.format(name))
return x, repr