本文整理汇总了Python中tensorflow.python.training.moving_averages.assign_moving_average函数的典型用法代码示例。如果您正苦于以下问题:Python assign_moving_average函数的具体用法?Python assign_moving_average怎么用?Python assign_moving_average使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assign_moving_average函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _delay_updates
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
return update_moving_mean, update_moving_variance
示例2: bn
def bn(x, c):
x_shape = x.get_shape()
params_shape = x_shape[-1:]
if c["use_bias"]:
bias = _get_variable("bias", params_shape, initializer=tf.zeros_initializer)
return x + bias
axis = list(range(len(x_shape) - 1))
beta = _get_variable("beta", params_shape, initializer=tf.zeros_initializer)
gamma = _get_variable("gamma", params_shape, initializer=tf.ones_initializer)
moving_mean = _get_variable("moving_mean", params_shape, initializer=tf.zeros_initializer, trainable=False)
moving_variance = _get_variable("moving_variance", params_shape, initializer=tf.ones_initializer, trainable=False)
# These ops will only be preformed when training.
mean, variance = tf.nn.moments(x, axis)
update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, BN_DECAY)
update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, BN_DECAY)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
mean, variance = control_flow_ops.cond(
c["is_training"], lambda: (mean, variance), lambda: (moving_mean, moving_variance)
)
x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)
# x.set_shape(inputs.get_shape()) ??
return x
示例3: __call__
def __call__(self, input_layer, epsilon=1e-5, decay=0.9, name="batch_norm",
in_dim=None, phase=Phase.train):
shape = input_layer.shape
shp = in_dim or shape[-1]
with tf.variable_scope(name) as scope:
self.mean = self.variable('mean', [shp], init=tf.constant_initializer(0.), train=False)
self.variance = self.variable('variance', [shp], init=tf.constant_initializer(1.0), train=False)
self.gamma = self.variable("gamma", [shp], init=tf.random_normal_initializer(1., 0.02))
self.beta = self.variable("beta", [shp], init=tf.constant_initializer(0.))
if phase == Phase.train:
mean, variance = tf.nn.moments(input_layer.tensor, [0, 1, 2])
mean.set_shape((shp,))
variance.set_shape((shp,))
update_moving_mean = moving_averages.assign_moving_average(self.mean, mean, decay)
update_moving_variance = moving_averages.assign_moving_average(self.variance, variance, decay)
with tf.control_dependencies([update_moving_mean, update_moving_variance]):
normalized_x = tf.nn.batch_norm_with_global_normalization(
input_layer.tensor, mean, variance, self.beta, self.gamma, epsilon,
scale_after_normalization=True)
else:
normalized_x = tf.nn.batch_norm_with_global_normalization(
input_layer.tensor, self.mean, self.variance,
self.beta, self.gamma, epsilon,
scale_after_normalization=True)
return input_layer.with_tensor(normalized_x, parameters=self.vars)
示例4: mean_var_with_update
def mean_var_with_update():
mean, variance = tf.nn.moments(x, list(range(len(x.shape) - 1)), name='moments')
with tf.control_dependencies([
assign_moving_average(moving_mean, mean, decay),
assign_moving_average(moving_var, variance, decay)
]):
return tf.identity(mean), tf.identity(variance)
示例5: train_phase
def train_phase():
mean, variance = tf.nn.moments(inputs, axis)
update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay)
update_moving_variance = moving_averages.assign_moving_average(moving_variance,
variance, decay)
with tf.control_dependencies([update_moving_mean, update_moving_variance]):
return tf.identity(mean), tf.identity(variance)
示例6: batch_norm
def batch_norm(x, decay=0.999, epsilon=1e-03, is_training=True,
scope="scope"):
x_shape = x.get_shape()
num_inputs = x_shape[-1]
reduce_dims = list(range(len(x_shape) - 1))
with tf.variable_scope(scope):
beta = create_var("beta", [num_inputs,],
initializer=tf.zeros_initializer())
gamma = create_var("gamma", [num_inputs,],
initializer=tf.ones_initializer())
# for inference
moving_mean = create_var("moving_mean", [num_inputs,],
initializer=tf.zeros_initializer(),
trainable=False)
moving_variance = create_var("moving_variance", [num_inputs],
initializer=tf.ones_initializer(),
trainable=False)
if is_training:
mean, variance = tf.nn.moments(x, axes=reduce_dims)
update_move_mean = moving_averages.assign_moving_average(moving_mean,
mean, decay=decay)
update_move_variance = moving_averages.assign_moving_average(moving_variance,
variance, decay=decay)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_move_mean)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_move_variance)
else:
mean, variance = moving_mean, moving_variance
return tf.nn.batch_normalization(x, mean, variance, beta, gamma, epsilon)
示例7: bacthnorm
def bacthnorm(inputs, scope, epsilon=1e-05, momentum=0.99, is_training=True):
inputs_shape = inputs.get_shape().as_list()# 输出 形状尺寸
params_shape = inputs_shape[-1:]# 输入参数的长度
axis = list(range(len(inputs_shape) - 1))
with tf.variable_scope(scope):
beta = create_variable("beta", params_shape,
initializer=tf.zeros_initializer())
gamma = create_variable("gamma", params_shape,
initializer=tf.ones_initializer())
# 均值 常量 不需要训练 for inference
moving_mean = create_variable("moving_mean", params_shape,
initializer=tf.zeros_initializer(), trainable=False)
# 方差 常量 不需要训练
moving_variance = create_variable("moving_variance", params_shape,
initializer=tf.ones_initializer(), trainable=False)
if is_training:
mean, variance = tf.nn.moments(inputs, axes=axis)# 计算均值和方差
# 移动平均求 均值和 方差 考虑上一次的量 xt = a * x_t-1 +(1-a)*x_now
update_move_mean = moving_averages.assign_moving_average(moving_mean,
mean, decay=momentum)
update_move_variance = moving_averages.assign_moving_average(moving_variance,
variance, decay=momentum)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_move_mean)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_move_variance)
else:
mean, variance = moving_mean, moving_variance
return tf.nn.batch_normalization(inputs, mean, variance, beta, gamma, epsilon)
示例8: moving_average_update
def moving_average_update(variable, value, momentum):
try:
return moving_averages.assign_moving_average(
variable, value, momentum, zero_debias=False)
except TypeError:
return moving_averages.assign_moving_average(
variable, value, momentum)
示例9: update_mean_var
def update_mean_var():
mean, variance = tf.nn.moments(x=incoming, axes=axis)
update_moving_mean = moving_averages.assign_moving_average(
variable=moving_mean, value=mean, decay=self.decay, zero_debias=False)
update_moving_variance = moving_averages.assign_moving_average(
variable=moving_variance, value=variance, decay=self.decay, zero_debias=False)
with tf.control_dependencies([update_moving_mean, update_moving_variance]):
return tf.identity(mean), tf.identity(variance)
示例10: __init__
def __init__(self, value,
decay,
weight,
truediv=True,
collections=None,
name=None):
"""Compute the weighted moving average of `value`.
Conceptually, the weighted moving average is:
`moving_average(value * weight) / moving_average(weight)`,
where a moving average updates by the rule
`new_value = decay * old_value + (1 - decay) * update`
Internally, this Op keeps moving average variables of both `value * weight`
and `weight`.
Args:
value: A numeric `Tensor`.
decay: A float `Tensor` or float value. The moving average decay.
weight: `Tensor` that keeps the current value of a weight.
Shape should be able to multiply `value`.
truediv: Boolean, if `True`, dividing by `moving_average(weight)` is
floating point division. If `False`, use division implied by dtypes.
collections: List of graph collections keys to add the internal variables
`value * weight` and `weight` to. Defaults to `[GraphKeys.VARIABLES]`.
name: Optional name of the returned operation.
Defaults to "WeightedMovingAvg".
Returns:
An Operation that updates and returns the weighted moving average.
"""
# Unlike assign_moving_average, the weighted moving average doesn't modify
# user-visible variables. It is the ratio of two internal variables, which are
# moving averages of the updates. Thus, the signature of this function is
# quite different than assign_moving_average.
if collections is None:
collections = [ops.GraphKeys.VARIABLES]
with variable_scope.variable_op_scope(
[value, weight, decay], name, "WeightedMovingAvg") as scope:
value_x_weight_var = variable_scope.get_variable(
"value_x_weight",
initializer=init_ops.zeros_initializer(value.get_shape(),
dtype=value.dtype),
trainable=False,
collections=collections)
weight_var = variable_scope.get_variable(
"weight",
initializer=init_ops.zeros_initializer(weight.get_shape(),
dtype=weight.dtype),
trainable=False,
collections=collections)
numerator = assign_moving_average(value_x_weight_var, value * weight, decay)
denominator = assign_moving_average(weight_var, weight, decay)
if truediv:
div = math_ops.truediv
else:
div = math_ops.div
self.average_with_update = div(numerator, denominator+1e-8, name=scope.name)
self.average = div(value_x_weight_var, weight_var)
示例11: _force_updates
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies([update_moving_mean,
update_moving_variance]):
return array_ops.identity(mean), array_ops.identity(variance)
示例12: _batch_norm_without_layers
def _batch_norm_without_layers(self, input_layer, decay, use_scale,
epsilon):
"""Batch normalization on `input_layer` without tf.layers."""
shape = input_layer.shape
num_channels = shape[3] if self.data_format == 'NHWC' else shape[1]
beta = self.get_variable(
'beta', [num_channels],
tf.float32,
tf.float32,
initializer=tf.zeros_initializer())
if use_scale:
gamma = self.get_variable(
'gamma', [num_channels],
tf.float32,
tf.float32,
initializer=tf.ones_initializer())
else:
gamma = tf.constant(1.0, tf.float32, [num_channels])
moving_mean = tf.get_variable(
'moving_mean', [num_channels],
tf.float32,
initializer=tf.zeros_initializer(),
trainable=False)
moving_variance = tf.get_variable(
'moving_variance', [num_channels],
tf.float32,
initializer=tf.ones_initializer(),
trainable=False)
if self.phase_train:
bn, batch_mean, batch_variance = tf.nn.fused_batch_norm(
input_layer,
gamma,
beta,
epsilon=epsilon,
data_format=self.data_format,
is_training=True)
mean_update = moving_averages.assign_moving_average(
moving_mean, batch_mean, decay=decay, zero_debias=False)
variance_update = moving_averages.assign_moving_average(
moving_variance,
batch_variance,
decay=decay,
zero_debias=False)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_update)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_update)
else:
bn, _, _ = tf.nn.fused_batch_norm(
input_layer,
gamma,
beta,
mean=moving_mean,
variance=moving_variance,
epsilon=epsilon,
data_format=self.data_format,
is_training=False)
return bn
示例13: _update_mean_var
def _update_mean_var():
"""Internal function that updates mean and variance during training."""
axis = [0, 1, 2] if convnet else [0]
mean, var = nn.moments(tensor_in, axis)
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay)
update_moving_var = moving_averages.assign_moving_average(
moving_var, var, decay)
with ops.control_dependencies([update_moving_mean, update_moving_var]):
return array_ops_.identity(mean), array_ops_.identity(var)
示例14: _update_renorm_variable
def _update_renorm_variable(var, weight, value):
"""Updates a moving average and weight, returns the unbiased value."""
# Update the variables without zero debiasing. The debiasing will be
# accomplished by dividing the exponential moving average by the weight.
# For example, after a single update, the moving average would be
# (1-decay) * value. and the weight will be 1-decay, with their ratio
# giving value.
new_var = moving_averages.assign_moving_average(
var, value, decay, zero_debias=False)
new_weight = moving_averages.assign_moving_average(
weight, 1., decay, zero_debias=False)
return new_var / new_weight
示例15: _batch_norm
def _batch_norm(self, name, x):
with tf.variable_scope(name):
# 输入通道维数
params_shape = [x.get_shape()[-1]]
# offset
beta = tf.get_variable('beta',
params_shape,
tf.float32,
initializer=tf.constant_initializer(0.0, tf.float32))
# scale
gamma = tf.get_variable('gamma',
params_shape,
tf.float32,
initializer=tf.constant_initializer(1.0, tf.float32))
if self.mode == 'train':
# 为每个通道计算均值、标准差
mean, variance = tf.nn.moments(x, [0, 1, 2], name='moments')
# 新建或建立测试阶段使用的batch均值、标准差
moving_mean = tf.get_variable('moving_mean',
params_shape, tf.float32,
initializer=tf.constant_initializer(0.0, tf.float32),
trainable=False)
moving_variance = tf.get_variable('moving_variance',
params_shape, tf.float32,
initializer=tf.constant_initializer(1.0, tf.float32),
trainable=False)
# 添加batch均值和标准差的更新操作(滑动平均)
# moving_mean = moving_mean * decay + mean * (1 - decay)
# moving_variance = moving_variance * decay + variance * (1 - decay)
self._extra_train_ops.append(moving_averages.assign_moving_average(
moving_mean, mean, 0.9))
self._extra_train_ops.append(moving_averages.assign_moving_average(
moving_variance, variance, 0.9))
else:
# 获取训练中积累的batch均值、标准差
mean = tf.get_variable('moving_mean',
params_shape, tf.float32,
initializer=tf.constant_initializer(0.0, tf.float32),
trainable=False)
variance = tf.get_variable('moving_variance',
params_shape, tf.float32,
initializer=tf.constant_initializer(1.0, tf.float32),
trainable=False)
# 添加到直方图总结
tf.summary.histogram(mean.op.name, mean)
tf.summary.histogram(variance.op.name, variance)
# BN层:((x-mean)/var)*gamma+beta
y = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 0.001)
y.set_shape(x.get_shape())
return y