本文整理汇总了Python中tensorflow.Varialbe方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.Varialbe方法的具体用法?Python tensorflow.Varialbe怎么用?Python tensorflow.Varialbe使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.Varialbe方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: batch_norm_template
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Varialbe [as 别名]
def batch_norm_template(inputs, is_training, scope, moments_dims_unused, bn_decay, data_format='NHWC'):
""" Batch normalization on convolutional maps and beyond...
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
Args:
inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC
is_training: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
moments_dims: a list of ints, indicating dimensions for moments calculation
bn_decay: float or float tensor variable, controling moving average weight
data_format: 'NHWC' or 'NCHW'
Return:
normed: batch-normalized maps
"""
bn_decay = bn_decay if bn_decay is not None else 0.9
return tf.contrib.layers.batch_norm(inputs,
center=True, scale=True,
is_training=is_training, decay=bn_decay,updates_collections=None,
scope=scope,
data_format=data_format)
示例2: batch_norm_for_fc
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Varialbe [as 别名]
def batch_norm_for_fc(inputs, is_training, bn_decay, scope, is_dist=False):
""" Batch normalization on FC data.
Args:
inputs: Tensor, 2D BxC input
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
is_dist: true indicating distributed training scheme
Return:
normed: batch-normalized maps
"""
if is_dist:
return batch_norm_dist_template(inputs, is_training, scope, [0,], bn_decay)
else:
return batch_norm_template(inputs, is_training, scope, [0,], bn_decay)
示例3: batch_norm_for_conv1d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Varialbe [as 别名]
def batch_norm_for_conv1d(inputs, is_training, bn_decay, scope, is_dist=False):
""" Batch normalization on 1D convolutional maps.
Args:
inputs: Tensor, 3D BLC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
is_dist: true indicating distributed training scheme
Return:
normed: batch-normalized maps
"""
if is_dist:
return batch_norm_dist_template(inputs, is_training, scope, [0,1], bn_decay)
else:
return batch_norm_template(inputs, is_training, scope, [0,1], bn_decay)
示例4: batch_norm_for_conv2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Varialbe [as 别名]
def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope, is_dist=False):
""" Batch normalization on 2D convolutional maps.
Args:
inputs: Tensor, 4D BHWC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
is_dist: true indicating distributed training scheme
Return:
normed: batch-normalized maps
"""
if is_dist:
return batch_norm_dist_template(inputs, is_training, scope, [0,1,2], bn_decay)
else:
return batch_norm_template(inputs, is_training, scope, [0,1,2], bn_decay)
示例5: batch_norm_for_conv3d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Varialbe [as 别名]
def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope, is_dist=False):
""" Batch normalization on 3D convolutional maps.
Args:
inputs: Tensor, 5D BDHWC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
is_dist: true indicating distributed training scheme
Return:
normed: batch-normalized maps
"""
if is_dist:
return batch_norm_dist_template(inputs, is_training, scope, [0,1,2,3], bn_decay)
else:
return batch_norm_template(inputs, is_training, scope, [0,1,2,3], bn_decay)
示例6: batch_norm_template_multiGPU
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Varialbe [as 别名]
def batch_norm_template_multiGPU(inputs, is_training, scope, moments_dims_unused, bn_decay, data_format='NHWC'):
""" Batch normalization on convolutional maps and beyond...
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
Args:
inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC
is_training: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
moments_dims: a list of ints, indicating dimensions for moments calculation
bn_decay: float or float tensor variable, controling moving average weight
data_format: 'NHWC' or 'NCHW'
Return:
normed: batch-normalized maps
"""
bn_decay = bn_decay if bn_decay is not None else 0.9
return tf.contrib.layers.batch_norm(inputs,
center=True, scale=True,
is_training=is_training, decay=bn_decay,updates_collections=None,
scope=scope,
data_format=data_format)
示例7: batch_norm_template
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Varialbe [as 别名]
def batch_norm_template(inputs, is_training, scope, moments_dims_unused, bn_decay, data_format='NHWC'):
""" Batch normalization on convolutional maps and beyond...
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
Args:
inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC
is_training: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
moments_dims: a list of ints, indicating dimensions for moments calculation
bn_decay: float or float tensor variable, controling moving average weight
data_format: 'NHWC' or 'NCHW'
Return:
normed: batch-normalized maps
"""
bn_decay = bn_decay if bn_decay is not None else 0.9
return tf.contrib.layers.batch_norm(inputs,
center=True, scale=True,
is_training=is_training, decay=bn_decay,updates_collections=None,
scope=scope, reuse=False,
data_format=data_format)
示例8: batch_normalization
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Varialbe [as 别名]
def batch_normalization(x, out_shape, phase_train):
"""Batch normalization on convolutional maps.
Args:
x: Tensor, 4D Batch-Height-Width-Depth (BHWD) input maps
out_shape: integer, depth of input maps
phase_train: boolean tf.Varialbe, true indicates training phase
Return:
normed: batch-normalized maps
"""
with tf.variable_scope('batch_norm'):
beta = tf.Variable(tf.constant(0.0, shape=[out_shape]), name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[out_shape]), name='gamma', trainable=True)
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train, mean_var_with_update, lambda: (ema.average(batch_mean),
ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
示例9: batch_norm_template
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Varialbe [as 别名]
def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay):
""" Batch normalization on convolutional maps and beyond...
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
Args:
inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC
is_training: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
moments_dims: a list of ints, indicating dimensions for moments calculation
bn_decay: float or float tensor variable, controling moving average weight
Return:
normed: batch-normalized maps
"""
with tf.variable_scope(scope ) as sc:
num_channels = inputs.get_shape()[-1].value
beta = tf.get_variable('beta',initializer=tf.constant(0.0, shape=[num_channels]),
trainable=True)
gamma = tf.get_variable('gamma',initializer=tf.constant(1.0, shape=[num_channels]),
trainable=True)
# beta = tf.constant(0.0, shape=[num_channels])
# gamma = tf.constant(1.0, shape=[num_channels])
batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments')
decay = bn_decay if bn_decay is not None else 0.9
ema = tf.train.ExponentialMovingAverage(decay=decay)
# Operator that maintains moving averages of variables.
ema_apply_op = tf.cond(is_training,
lambda: ema.apply([batch_mean, batch_var]),
lambda: tf.no_op())
# Update moving average and return current batch's avg and var.
def mean_var_with_update():
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
# ema.average returns the Variable holding the average of var.
mean, var = tf.cond(is_training,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)
return normed
示例10: batch_norm_for_fc
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Varialbe [as 别名]
def batch_norm_for_fc(inputs, is_training, bn_decay, scope):
""" Batch normalization on FC data.
Args:
inputs: Tensor, 2D BxC input
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
return batch_norm_template(inputs, is_training, scope, [0,], bn_decay)
示例11: batch_norm_for_conv1d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Varialbe [as 别名]
def batch_norm_for_conv1d(inputs, is_training, bn_decay, scope):
""" Batch normalization on 1D convolutional maps.
Args:
inputs: Tensor, 3D BLC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
return batch_norm_template(inputs, is_training, scope, [0,1], bn_decay)
示例12: batch_norm_for_conv2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Varialbe [as 别名]
def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope):
""" Batch normalization on 2D convolutional maps.
Args:
inputs: Tensor, 4D BHWC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
return batch_norm_template(inputs, is_training, scope, [0,1,2], bn_decay)
示例13: batch_norm_for_conv3d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Varialbe [as 别名]
def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope):
""" Batch normalization on 3D convolutional maps.
Args:
inputs: Tensor, 5D BDHWC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
return batch_norm_template(inputs, is_training, scope, [0,1,2,3], bn_decay)
示例14: batch_norm
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Varialbe [as 别名]
def batch_norm(self, x, n_out, phase_train):
"""
Batch normalization on convolutional maps.
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
Args:
x: Tensor, 4D BHWD input maps
n_out: integer, depth of input maps
phase_train: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
with tf.variable_scope('bn'):
gamma = self.get_bias(n_out, 1.0, 'gamma')
beta = self.get_bias(n_out, 0.0, 'beta')
batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.999)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
return tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
示例15: full_batch_norm
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Varialbe [as 别名]
def full_batch_norm(self, x, n_out, phase_train=tf.constant(False, dtype=tf.bool), scope='bn'):
"""
Batch normalization on convolutional maps.
Args:
x: Tensor, 4D BHWD input maps
n_out: integer, depth of input maps
phase_train: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
with tf.variable_scope(scope):
beta = tf.Variable(tf.constant(0.0, shape=[n_out]),
name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),
name='gamma', trainable=True)
batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed