本文整理汇总了Python中tensorflow.contrib.layers.python.layers.initializers.xavier_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python initializers.xavier_initializer方法的具体用法?Python initializers.xavier_initializer怎么用?Python initializers.xavier_initializer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.layers.python.layers.initializers
的用法示例。
在下文中一共展示了initializers.xavier_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_weights_wrapper
# 需要导入模块: from tensorflow.contrib.layers.python.layers import initializers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.initializers import xavier_initializer [as 别名]
def _get_weights_wrapper(
name, shape, dtype=tf.float32, initializer=initializers.xavier_initializer(),
weights_decay_factor=None
):
"""Wrapper over _get_variable_wrapper() to get weights, with weights decay factor in loss.
"""
weights = _get_variable_wrapper(
name=name, shape=shape, dtype=dtype, initializer=initializer
)
if weights_decay_factor is not None and weights_decay_factor > 0.0:
weights_wd = tf.multiply(
tf.nn.l2_loss(weights), weights_decay_factor, name=name + '/l2loss'
)
tf.add_to_collection('losses', weights_wd)
return weights
示例2: linear
# 需要导入模块: from tensorflow.contrib.layers.python.layers import initializers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.initializers import xavier_initializer [as 别名]
def linear(input_,
output_size,
weights_initializer=initializers.xavier_initializer(),
biases_initializer=tf.zeros_initializer,
activation_fn=None,
trainable=True,
name='linear'):
shape = input_.get_shape().as_list()
if len(shape) > 2:
input_ = tf.reshape(input_, [-1, reduce(lambda x, y: x * y, shape[1:])])
shape = input_.get_shape().as_list()
with tf.variable_scope(name):
w = tf.get_variable('w', [shape[1], output_size], tf.float32,
initializer=weights_initializer, trainable=trainable)
b = tf.get_variable('b', [output_size],
initializer=biases_initializer, trainable=trainable)
out = tf.nn.bias_add(tf.matmul(input_, w), b)
if activation_fn != None:
return activation_fn(out), w, b
else:
return out, w, b
示例3: __init__
# 需要导入模块: from tensorflow.contrib.layers.python.layers import initializers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.initializers import xavier_initializer [as 别名]
def __init__(self):
self.nums_tags = 4
self.embedding_size = 50
self.max_epoch = 10
self.learning_rate = 0.5
self.lstm_dim = 128
self.global_steps = tf.Variable(0, trainable=False)
self.best_dev_f1 = tf.Variable(0.0, trainable=False)
self.checkpoint_dir = "./model/"
self.checkpoint_path = "./model/train_model.ckpt"
self.initializer = initializers.xavier_initializer()
self.entry = "train"
self.vocab_dir = None
self.init_checkpoint = None
self.bert_config = None
self.is_training = True if self.entry == "train" else False
示例4: det_lesion_resnet
# 需要导入模块: from tensorflow.contrib.layers.python.layers import initializers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.initializers import xavier_initializer [as 别名]
def det_lesion_resnet(inputs, is_training_option=False, scope='det_lesion'):
"""Defines the network
Args:
inputs: Tensorflow placeholder that contains the input image
scope: Scope name for the network
Returns:
net: Output Tensor of the network
end_points: Dictionary with all Tensors of the network
"""
with tf.variable_scope(scope, 'det_lesion', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_50(inputs, is_training=is_training_option)
net = slim.flatten(net, scope='flatten5')
net = slim.fully_connected(net, 1, activation_fn=tf.nn.sigmoid,
weights_initializer=initializers.xavier_initializer(), scope='output')
utils.collect_named_outputs(end_points_collection, 'det_lesion/output', net)
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
return net, end_points
示例5: flatten_fully_connected_v2
# 需要导入模块: from tensorflow.contrib.layers.python.layers import initializers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.initializers import xavier_initializer [as 别名]
def flatten_fully_connected_v2(inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_normalizer_fn=None,
weights_normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
with variable_scope.variable_scope(scope, 'flatten_fully_connected_v2'):
if inputs.shape.ndims > 2:
inputs = layers.flatten(inputs)
return fully_connected(inputs=inputs,
num_outputs=num_outputs,
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params,
weights_normalizer_fn=weights_normalizer_fn,
weights_normalizer_params=weights_normalizer_params,
weights_initializer=weights_initializer,
weights_regularizer=weights_regularizer,
biases_initializer=biases_initializer,
biases_regularizer=biases_regularizer,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
scope=scope)
示例6: flatten_fully_connected_v1
# 需要导入模块: from tensorflow.contrib.layers.python.layers import initializers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.initializers import xavier_initializer [as 别名]
def flatten_fully_connected_v1(inputs,
num_outputs,
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=slim.xavier_initializer(),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
with tf.variable_scope(scope, 'flatten_fully_connected_v1'):
if inputs.shape.ndims > 2:
inputs = slim.flatten(inputs)
return slim.fully_connected(inputs,
num_outputs,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
scope)
示例7: flatten_fully_connected
# 需要导入模块: from tensorflow.contrib.layers.python.layers import initializers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.initializers import xavier_initializer [as 别名]
def flatten_fully_connected(inputs,
num_outputs,
activation_fn=tf.nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=slim.xavier_initializer(),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
with tf.variable_scope(scope, 'flatten_fully_connected', [inputs]):
if inputs.shape.ndims > 2:
inputs = slim.flatten(inputs)
return slim.fully_connected(inputs,
num_outputs,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
scope)
示例8: preact_conv2d
# 需要导入模块: from tensorflow.contrib.layers.python.layers import initializers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.initializers import xavier_initializer [as 别名]
def preact_conv2d(
inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a 2D convolution preceded by batch normalization and activation.
"""
with variable_scope.variable_scope(scope, 'Conv', values=[inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
if normalizer_fn:
normalizer_params = normalizer_params or {}
inputs = normalizer_fn(inputs, activation_fn=activation_fn, **normalizer_params)
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_shape = [kernel_h, kernel_w, num_filters_in, num_outputs]
weights_collections = utils.get_variable_collections(variables_collections, 'weights')
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
outputs = nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1], padding=padding)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
示例9: conv2d
# 需要导入模块: from tensorflow.contrib.layers.python.layers import initializers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.initializers import xavier_initializer [as 别名]
def conv2d(x,
output_dim,
kernel_size,
stride,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer,
activation_fn=tf.nn.relu,
data_format='NHWC',
padding='VALID',
name='conv2d',
trainable=True):
with tf.variable_scope(name):
if data_format == 'NCHW':
stride = [1, 1, stride[0], stride[1]]
kernel_shape = [kernel_size[0], kernel_size[1], x.get_shape()[1], output_dim]
elif data_format == 'NHWC':
stride = [1, stride[0], stride[1], 1]
kernel_shape = [kernel_size[0], kernel_size[1], x.get_shape()[-1], output_dim]
w = tf.get_variable('w', kernel_shape,
tf.float32, initializer=weights_initializer, trainable=trainable)
conv = tf.nn.conv2d(x, w, stride, padding, data_format=data_format)
b = tf.get_variable('b', [output_dim],
tf.float32, initializer=biases_initializer, trainable=trainable)
out = tf.nn.bias_add(conv, b, data_format)
if activation_fn != None:
out = activation_fn(out)
return out, w, b
示例10: fractal_conv2d
# 需要导入模块: from tensorflow.contrib.layers.python.layers import initializers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.initializers import xavier_initializer [as 别名]
def fractal_conv2d(inputs,
num_columns,
num_outputs,
kernel_size,
joined=True,
stride=1,
padding='SAME',
# rate=1,
activation_fn=nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=None,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
is_training=True,
trainable=True,
scope=None):
"""Builds a fractal block with slim.conv2d.
The fractal will have `num_columns` columns, and have
Args:
inputs: a 4-D tensor `[batch_size, height, width, channels]`.
num_columns: integer, the columns in the fractal.
"""
locs = locals()
fractal_args = ['inputs','num_columns','joined','is_training']
asc_fn = lambda : slim.arg_scope([slim.conv2d],
**{arg:val for (arg,val) in locs.items()
if arg not in fractal_args})
return fractal_template(inputs, num_columns, slim.conv2d, asc_fn,
joined, is_training, reuse, scope)
示例11: conv2d
# 需要导入模块: from tensorflow.contrib.layers.python.layers import initializers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.initializers import xavier_initializer [as 别名]
def conv2d(x,
output_dim,
kernel_size,
stride,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer,
activation_fn=tf.nn.relu,
data_format='NHWC',
padding='VALID',
name='conv2d',
trainable=True):
with tf.variable_scope(name):
stride = [1, stride[0], stride[1], 1]
kernel_shape = [kernel_size[0], kernel_size[1], x.get_shape()[-1], output_dim]
w = tf.get_variable('w', kernel_shape,
tf.float32, initializer=weights_initializer, trainable=trainable)
conv = tf.nn.conv2d(x, w, stride, padding, data_format=data_format)
b = tf.get_variable('b', [output_dim],
tf.float32, initializer=biases_initializer, trainable=trainable)
out = tf.nn.bias_add(conv, b, data_format)
if activation_fn != None:
out = activation_fn(out)
return out, w, b
示例12: load_enet
# 需要导入模块: from tensorflow.contrib.layers.python.layers import initializers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.initializers import xavier_initializer [as 别名]
def load_enet(sess, checkpoint_dir, input_image, batch_size, num_classes):
checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
num_initial_blocks = 1
skip_connections = False
stage_two_repeat = 2
with slim.arg_scope(ENet_arg_scope()):
logits, _ = ENet(input_image,
num_classes=12,
batch_size=batch_size,
is_training=True,
reuse=None,
num_initial_blocks=num_initial_blocks,
stage_two_repeat=stage_two_repeat,
skip_connections=skip_connections)
variables_to_restore = slim.get_variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
saver.restore(sess, checkpoint)
graph = tf.get_default_graph()
last_prelu = graph.get_tensor_by_name('ENet/bottleneck5_1_last_prelu:0')
output = slim.conv2d_transpose(last_prelu, num_classes, [2,2], stride=2,
weights_initializer=initializers.xavier_initializer(),
scope='Semantic/transfer_layer/conv2d_transpose')
probabilities = tf.nn.softmax(output, name='Semantic/transfer_layer/logits_to_softmax')
with tf.variable_scope('', reuse=True):
weight = tf.get_variable('Semantic/transfer_layer/conv2d_transpose/weights')
bias = tf.get_variable('Semantic/transfer_layer/conv2d_transpose/biases')
sess.run([weight.initializer, bias.initializer])
return output, probabilities
开发者ID:hq-jiang,项目名称:instance-segmentation-with-discriminative-loss-tensorflow,代码行数:38,代码来源:transfer_semantic.py
示例13: IDCNN_layer
# 需要导入模块: from tensorflow.contrib.layers.python.layers import initializers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.initializers import xavier_initializer [as 别名]
def IDCNN_layer(self, model_inputs,
name=None):
model_inputs = tf.expand_dims(model_inputs, 1)
reuse = False
if self.dropout == 1.0:
reuse = True
with tf.variable_scope("idcnn" if not name else name):
shape = [1, self.filter_width, self.embedding_dim,
self.num_filter]
filter_weights = tf.get_variable(
"idcnn_filter",
shape=[1, self.filter_width, self.embedding_dim,
self.num_filter],
initializer=self.initializer)
"""
shape of input = [batch, in_height, in_width, in_channels]
shape of filter = [filter_height, filter_width, in_channels, out_channels]
"""
layerInput = tf.nn.conv2d(model_inputs,
filter_weights,
strides=[1, 1, 1, 1],
padding="SAME",
name="init_layer")
finalOutFromLayers = []
totalWidthForLastDim = 0
for j in range(self.repeat_times):
for i in range(len(self.layers)):
dilation = self.layers[i]['dilation']
isLast = True if i == (len(self.layers) - 1) else False
with tf.variable_scope("atrous-conv-layer-%d" % i,
reuse=True
if (reuse or j > 0) else False):
w = tf.get_variable(
"filterW",
shape=[1, self.filter_width, self.num_filter,
self.num_filter],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable("filterB", shape=[self.num_filter])
conv = tf.nn.atrous_conv2d(layerInput,
w,
rate=dilation,
padding="SAME")
conv = tf.nn.bias_add(conv, b)
conv = tf.nn.relu(conv)
if isLast:
finalOutFromLayers.append(conv)
totalWidthForLastDim += self.num_filter
layerInput = conv
finalOut = tf.concat(axis=3, values=finalOutFromLayers)
keepProb = 1.0 if reuse else 0.5
finalOut = tf.nn.dropout(finalOut, keepProb)
finalOut = tf.squeeze(finalOut, [1])
finalOut = tf.reshape(finalOut, [-1, totalWidthForLastDim])
self.cnn_output_width = totalWidthForLastDim
return finalOut
示例14: create_model
# 需要导入模块: from tensorflow.contrib.layers.python.layers import initializers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.initializers import xavier_initializer [as 别名]
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
seq_length, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
embedding = model.get_sequence_output()
embeddings = tf.layers.dropout(embedding, rate=FLAGS.dropout_rate, training=is_training)
with tf.variable_scope('Graph', reuse=None, custom_getter=None):
# LSTM
t = tf.transpose(embeddings, perm=[1, 0, 2])
lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(128) # 序列标注问题中一般lstm单元个数就是max_seq_length
lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(128)
lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw)
output_fw, _ = lstm_cell_fw(t, dtype=tf.float32, sequence_length=seq_length)
output_bw, _ = lstm_cell_bw(t, dtype=tf.float32, sequence_length=seq_length)
output = tf.concat([output_fw, output_bw], axis=-1)
output = tf.transpose(output, perm=[1, 0, 2])
output = tf.layers.dropout(output, rate=0.5, training=is_training)
# CRF
logits = tf.layers.dense(output, num_labels)
crf_params = tf.get_variable("crf", [num_labels, num_labels], dtype=tf.float32)
trans = tf.get_variable(
"transitions",
shape=[num_labels, num_labels],
initializer=initializers.xavier_initializer())
pred_ids, trans = tf.contrib.crf.crf_decode(logits, crf_params, seq_length)
log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(
logits, label_ids, seq_length, crf_params)
loss = tf.reduce_mean(-log_likelihood)
# if mode == tf.estimator.ModeKeys.EVAL:
# return tf.estimator.EstimatorSpec(
# mode, loss=loss, eval_metric_ops=metrics)
# elif mode == tf.estimator.ModeKeys.TRAIN:
return loss, logits, trans, pred_ids
示例15: __init__
# 需要导入模块: from tensorflow.contrib.layers.python.layers import initializers [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.initializers import xavier_initializer [as 别名]
def __init__(self, config, embeddings):
self.config = config
self.lstm_dim = config["lstm_dim"]
self.num_chars = config["num_chars"]
self.num_tags = config["num_tags"]
self.char_dim = config["char_dim"]
self.lr = config["lr"]
self.char_embeding = tf.get_variable(name="char_embeding", initializer=embeddings)
self.global_step = tf.Variable(0, trainable=False)
self.initializer = initializers.xavier_initializer()
self.char_inputs = tf.placeholder(dtype=tf.int32, shape=[None, None], name="char_inputs")
self.targets = tf.placeholder(dtype=tf.int32, shape=[None, None], name="targets")
self.dropout = tf.placeholder(dtype=tf.float32, name="dropout")
self.lengths = tf.placeholder(dtype=tf.int32, shape=[None, ], name="lengths")
# self.middle_dropout_keep_prob = tf.placeholder_with_default(1.0, [], name="middle_dropout_keep_prob")
# self.hidden_dropout_keep_prob = tf.placeholder_with_default(1.0, [], name="hidden_dropout_keep_prob")
self.input_dropout_keep_prob = tf.placeholder_with_default(config["input_dropout_keep"], [], name="input_dropout_keep_prob")
self.batch_size = tf.shape(self.char_inputs)[0]
self.num_steps = tf.shape(self.char_inputs)[-1]
# forward
embedding = self.embedding_layer(self.char_inputs)
lstm_inputs = tf.nn.dropout(embedding, self.input_dropout_keep_prob)
## bi-directional lstm layer
lstm_outputs = self.bilstm_layer(lstm_inputs)
## logits for tags
self.project_layer(lstm_outputs)
## loss of the model
self.loss = self.loss_layer(self.logits, self.lengths)
with tf.variable_scope("optimizer"):
optimizer = self.config["optimizer"]
if optimizer == "sgd":
self.opt = tf.train.GradientDescentOptimizer(self.lr)
elif optimizer == "adam":
self.opt = tf.train.AdamOptimizer(self.lr)
elif optimizer == "adgrad":
self.opt = tf.train.AdagradOptimizer(self.lr)
else:
raise KeyError
grads_vars = self.opt.compute_gradients(self.loss)
capped_grads_vars = [[tf.clip_by_value(g, -self.config["clip"], self.config["clip"]), v] for g, v in grads_vars]
self.train_op = self.opt.apply_gradients(capped_grads_vars, self.global_step)