本文整理汇总了Python中tensorflow.assign_add函数的典型用法代码示例。如果您正苦于以下问题:Python assign_add函数的具体用法?Python assign_add怎么用?Python assign_add使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assign_add函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: loop_body
def loop_body(i):
asn1 = tf.assign_add(var_a, 1, name="a_add")
with tf.control_dependencies([asn1]):
asn2 = tf.assign_add(var_b, var_a, name="b_add")
with tf.control_dependencies([asn2]):
ni = tf.add(i, 1, name="i_add")
return ni
示例2: _apply_stats
def _apply_stats(self, statsUpdates, accumulate=False, accumulateCoeff=0.):
updateOps = []
# obtain the stats var list
for stats_var in statsUpdates:
stats_new = statsUpdates[stats_var]
if accumulate:
# simple superbatch averaging
update_op = tf.assign_add(
stats_var, accumulateCoeff * stats_new, use_locking=True)
else:
# exponential running averaging
update_op = tf.assign(
stats_var, stats_var * self._stats_decay, use_locking=True)
update_op = tf.assign_add(
update_op, (1. - self._stats_decay) * stats_new, use_locking=True)
updateOps.append(update_op)
with tf.control_dependencies(updateOps):
stats_step_op = tf.assign_add(self.stats_step, 1)
if KFAC_DEBUG:
stats_step_op = (tf.Print(stats_step_op,
[tf.convert_to_tensor('step:'),
self.global_step,
tf.convert_to_tensor('fac step:'),
self.factor_step,
tf.convert_to_tensor('sgd step:'),
self.sgd_step,
tf.convert_to_tensor('Accum:'),
tf.convert_to_tensor(accumulate),
tf.convert_to_tensor('Accum coeff:'),
tf.convert_to_tensor(accumulateCoeff),
tf.convert_to_tensor('stat step:'),
self.stats_step, updateOps[0], updateOps[1]]))
return [stats_step_op, ]
示例3: test_train_skip_train_if_max_step_already_saved
def test_train_skip_train_if_max_step_already_saved(self):
with tf.Graph().as_default() as g, self.test_session(g):
with tf.control_dependencies(self._build_inference_graph()):
train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
learn.graph_actions._monitored_train( # pylint: disable=protected-access
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=tf.constant(2.0),
max_steps=10)
step = checkpoints.load_variable(
self._output_dir, tf.contrib.framework.get_global_step().name)
self.assertEqual(10, step)
with tf.Graph().as_default() as g, self.test_session(g):
with tf.control_dependencies(self._build_inference_graph()):
train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
learn.graph_actions._monitored_train( # pylint: disable=protected-access
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=tf.constant(2.0),
max_steps=10)
step = checkpoints.load_variable(
self._output_dir, tf.contrib.framework.get_global_step().name)
self.assertEqual(10, step)
示例4: _eval_metric
def _eval_metric(input_, topk, correct_predictions, examples, phase):
"""Creates the standard tracking varibles if in test and returns accuracy."""
my_parameters = {}
if phase in (Phase.test, Phase.infer):
dtype = tf.float32
# Create the variables using tf.Variable because we don't want to share.
count = tf.Variable(
tf.constant(0, dtype=dtype),
name="count_%d" % topk,
collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
trainable=False,
)
correct = tf.Variable(
tf.constant(0, dtype=dtype),
name="correct_%d" % topk,
collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
trainable=False,
)
my_parameters["count"] = count
my_parameters["correct"] = correct
with input_.g.device(count.device):
examples = tf.assign_add(count, examples)
with input_.g.device(correct.device):
correct_predictions = tf.assign_add(correct, correct_predictions)
return correct_predictions, examples, my_parameters
示例5: __init__
def __init__(self, epsilon=1e-2, shape=()):
self._sum = tf.get_variable(
dtype=tf.float64,
shape=shape,
initializer=tf.constant_initializer(0.0),
name="runningsum", trainable=False)
self._sumsq = tf.get_variable(
dtype=tf.float64,
shape=shape,
initializer=tf.constant_initializer(epsilon),
name="runningsumsq", trainable=False)
self._count = tf.get_variable(
dtype=tf.float64,
shape=(),
initializer=tf.constant_initializer(epsilon),
name="count", trainable=False)
self.shape = shape
self.mean = tf.to_float(self._sum / self._count)
self.std = tf.sqrt( tf.maximum( tf.to_float(self._sumsq / self._count) - tf.square(self.mean) , 1e-2 ))
newsum = tf.placeholder(shape=self.shape, dtype=tf.float64, name='sum')
newsumsq = tf.placeholder(shape=self.shape, dtype=tf.float64, name='var')
newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count')
self.incfiltparams = U.function([newsum, newsumsq, newcount], [],
updates=[tf.assign_add(self._sum, newsum),
tf.assign_add(self._sumsq, newsumsq),
tf.assign_add(self._count, newcount)])
示例6: accumulate_privacy_spending
def accumulate_privacy_spending(self, eps_delta, unused_sigma,
num_examples):
"""Accumulate the privacy spending.
Currently only support approximate privacy. Here we assume we use Gaussian
noise on randomly sampled batch so we get better composition: 1. the per
batch privacy is computed using privacy amplication via sampling bound;
2. the composition is done using the composition with Gaussian noise.
TODO(liqzhang) Add a link to a document that describes the bounds used.
Args:
eps_delta: EpsDelta pair which can be tensors.
unused_sigma: the noise sigma. Unused for this accountant.
num_examples: the number of examples involved.
Returns:
a TensorFlow operation for updating the privacy spending.
"""
eps, delta = eps_delta
with tf.control_dependencies(
[tf.Assert(tf.greater(delta, 0),
["delta needs to be greater than 0"])]):
amortize_ratio = (tf.cast(num_examples, tf.float32) * 1.0 /
self._total_examples)
# Use privacy amplification via sampling bound.
# See Lemma 2.2 in http://arxiv.org/pdf/1405.7085v2.pdf
# TODO(liqzhang) Add a link to a document with formal statement
# and proof.
amortize_eps = tf.reshape(tf.log(1.0 + amortize_ratio * (
tf.exp(eps) - 1.0)), [1])
amortize_delta = tf.reshape(amortize_ratio * delta, [1])
return tf.group(*[tf.assign_add(self._eps_squared_sum,
tf.square(amortize_eps)),
tf.assign_add(self._delta_sum, amortize_delta)])
示例7: test_summary_saver
def test_summary_saver(self):
with tf.Graph().as_default() as g, tf.Session() as sess:
log_dir = 'log/dir'
summary_writer = testing.FakeSummaryWriter(log_dir, g)
var = tf.Variable(0.0)
tensor = tf.assign_add(var, 1.0)
summary_op = tf.scalar_summary('my_summary', tensor)
global_step = tf.contrib.framework.get_or_create_global_step()
train_op = tf.assign_add(global_step, 1)
hook = tf.train.SummarySaverHook(
summary_op=summary_op, save_steps=8, summary_writer=summary_writer)
hook.begin()
sess.run(tf.initialize_all_variables())
mon_sess = monitored_session._HookedSession(sess, [hook])
for i in range(30):
_ = i
mon_sess.run(train_op)
hook.end(sess)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=log_dir,
expected_graph=g,
expected_summaries={
1: {'my_summary': 1.0},
9: {'my_summary': 2.0},
17: {'my_summary': 3.0},
25: {'my_summary': 4.0},
})
示例8: apply
def apply(self, var_list):
"""Applies the running average to a list of variables
Creates shadow variables and update op. Returns a grouped update op for
all the averages in the list."""
update_ops = []
with tf.variable_scope('running_average'):
for var in var_list:
# add a shadow var that gets initialized to the same value
# and a count to keep track of how many times it's been updated
name = var.op.name
count = tf.get_variable(
name+'_count', dtype=tf.float32,
initializer=tf.constant_initializer(0.0),
shape=[], trainable=False)
shadow = tf.get_variable(
name+'_shadow', dtype=var.dtype,
initializer=var.initialized_value(),
collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
tf.GraphKeys.VARIABLES],
trainable=False)
# now make the update ops
# increase the count
count_update = tf.assign_add(count, 1.0)
with tf.control_dependencies([count_update]):
difference = (var - shadow)/count
update = tf.assign_add(shadow, difference)
update_ops.append(update)
self.shadow_vars[var] = (shadow, count)
return update_ops
示例9: evaluate_precision_recall
def evaluate_precision_recall(
input_layer, labels, threshold=0.5, per_example_weights=None, name=PROVIDED, phase=Phase.train
):
"""Computes the precision and recall of the prediction vs the labels.
Args:
input_layer: A Pretty Tensor object.
labels: The target labels to learn as a float tensor.
threshold: The threshold to use to decide if the prediction is true.
per_example_weights: A Tensor with a weight per example.
name: An optional name.
phase: The phase of this model; non training phases compute a total across
all examples.
Returns:
Precision and Recall.
"""
_ = name # Eliminate warning, name used for namescoping by PT.
selected, sum_retrieved, sum_relevant = _compute_precision_recall(
input_layer, labels, threshold, per_example_weights
)
if phase != Phase.train:
dtype = tf.float32
# Create the variables in all cases so that the load logic is easier.
relevant_count = tf.get_variable(
"relevant_count",
[],
dtype,
tf.zeros_initializer,
collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
trainable=False,
)
retrieved_count = tf.get_variable(
"retrieved_count",
[],
dtype,
tf.zeros_initializer,
collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
trainable=False,
)
selected_count = tf.get_variable(
"selected_count",
[],
dtype,
tf.zeros_initializer,
collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
trainable=False,
)
with input_layer.g.device(selected_count.device):
selected = tf.assign_add(selected_count, selected)
with input_layer.g.device(retrieved_count.device):
sum_retrieved = tf.assign_add(retrieved_count, sum_retrieved)
with input_layer.g.device(relevant_count.device):
sum_relevant = tf.assign_add(relevant_count, sum_relevant)
return (
tf.select(tf.equal(sum_retrieved, 0), tf.zeros_like(selected), selected / sum_retrieved),
tf.select(tf.equal(sum_relevant, 0), tf.zeros_like(selected), selected / sum_relevant),
)
示例10: running_mean
def running_mean(cost, tag_name, batch_size=1):
with tf.name_scope("running_mean_" + tag_name):
with tf.variable_scope(tag_name):
cost_sum = tf.get_variable(
"cost_sum",
initializer=tf.zeros_initializer,
dtype=tf.float64,
shape=(),
collections=[tf.GraphKeys.LOCAL_VARIABLES],
trainable=False)
batches = tf.get_variable(
"cost_num_batches",
initializer=tf.zeros_initializer,
dtype=tf.int32,
shape=(),
collections=[tf.GraphKeys.LOCAL_VARIABLES],
trainable=False)
cost_add = tf.assign_add(cost_sum, tf.cast(cost, dtype=tf.float64))
batches_add = tf.assign_add(batches, batch_size)
update_cost_mean = tf.group(cost_add, batches_add)
reset_batches = tf.assign(batches, 0)
reset_cost_sum = tf.assign(cost_sum, 0.0)
reset_cost_mean = tf.group(reset_batches, reset_cost_sum)
mean_cost = tf.divide(
cost_sum,
tf.cast(batches, dtype=tf.float64))
train_loss_summary = tf.summary.scalar(tag_name, mean_cost)
return reset_cost_mean, update_cost_mean, train_loss_summary
示例11: loss
def loss(loss_value):
"""Calculates aggregated mean loss."""
total_loss = tf.Variable(0.0, False)
loss_count = tf.Variable(0, False)
total_loss_update = tf.assign_add(total_loss, loss_value)
loss_count_update = tf.assign_add(loss_count, 1)
loss_op = total_loss / tf.cast(loss_count, tf.float32)
return [total_loss_update, loss_count_update], loss_op
示例12: test_capture_variable
def test_capture_variable(self):
monitor = learn.monitors.CaptureVariable(var_name="my_assign_add:0", every_n=8, first_n=2)
with tf.Graph().as_default() as g, self.test_session(g):
var = tf.Variable(0.0, name="my_var")
var.initializer.run()
tf.assign_add(var, 1.0, name="my_assign_add")
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
self.assertEqual({0: 1.0, 1: 2.0, 2: 3.0, 10: 4.0, 18: 5.0, 26: 6.0, 29: 7.0}, monitor.values)
示例13: train_one_epoch
def train_one_epoch(generator, discriminator,
generator_optimizer, discriminator_optimizer,
dataset, log_interval, noise_dim):
"""Trains `generator` and `discriminator` models on `dataset`.
Args:
generator: Generator model.
discriminator: Discriminator model.
generator_optimizer: Optimizer to use for generator.
discriminator_optimizer: Optimizer to use for discriminator.
dataset: Dataset of images to train on.
log_interval: How many global steps to wait between logging and collecting
summaries.
noise_dim: Dimension of noise vector to use.
"""
total_generator_loss = 0.0
total_discriminator_loss = 0.0
for (batch_index, images) in enumerate(tfe.Iterator(dataset)):
with tf.device('/cpu:0'):
tf.assign_add(tf.train.get_global_step(), 1)
with tf.contrib.summary.record_summaries_every_n_global_steps(log_interval):
current_batch_size = images.shape[0]
noise = tf.random_uniform(shape=[current_batch_size, noise_dim],
minval=-1., maxval=1., seed=batch_index)
with tfe.GradientTape(persistent=True) as g:
generated_images = generator(noise)
tf.contrib.summary.image('generated_images',
tf.reshape(generated_images, [-1, 28, 28, 1]),
max_images=10)
discriminator_gen_outputs = discriminator(generated_images)
discriminator_real_outputs = discriminator(images)
discriminator_loss_val = discriminator_loss(discriminator_real_outputs,
discriminator_gen_outputs)
total_discriminator_loss += discriminator_loss_val
generator_loss_val = generator_loss(discriminator_gen_outputs)
total_generator_loss += generator_loss_val
generator_grad = g.gradient(generator_loss_val, generator.variables)
discriminator_grad = g.gradient(discriminator_loss_val,
discriminator.variables)
with tf.variable_scope('generator'):
generator_optimizer.apply_gradients(zip(generator_grad,
generator.variables))
with tf.variable_scope('discriminator'):
discriminator_optimizer.apply_gradients(zip(discriminator_grad,
discriminator.variables))
if log_interval and batch_index > 0 and batch_index % log_interval == 0:
print('Batch #%d\tAverage Generator Loss: %.6f\t'
'Average Discriminator Loss: %.6f' % (
batch_index, total_generator_loss/batch_index,
total_discriminator_loss/batch_index))
示例14: accuracy
def accuracy(logits, labels):
"""Calculates aggregated accuracy."""
is_correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.reduce_sum(tf.cast(is_correct, tf.int32))
incorrect = tf.reduce_sum(tf.cast(tf.logical_not(is_correct), tf.int32))
correct_count = tf.Variable(0, False)
incorrect_count = tf.Variable(0, False)
correct_count_update = tf.assign_add(correct_count, correct)
incorrect_count_update = tf.assign_add(incorrect_count, incorrect)
accuracy_op = tf.cast(correct_count, tf.float32) / tf.cast(
correct_count + incorrect_count, tf.float32)
return [correct_count_update, incorrect_count_update], accuracy_op
示例15: setUp
def setUp(self):
tf.test.TestCase.setUp(self)
self.log_dir = 'log/dir'
self.summary_writer = testing.FakeSummaryWriter(self.log_dir)
var = tf.Variable(0.0)
tensor = tf.assign_add(var, 1.0)
self.summary_op = tf.summary.scalar('my_summary', tensor)
global_step = tf.contrib.framework.get_or_create_global_step()
self.train_op = tf.assign_add(global_step, 1)