当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.scatter_add方法代码示例

本文整理汇总了Python中tensorflow.scatter_add方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.scatter_add方法的具体用法?Python tensorflow.scatter_add怎么用?Python tensorflow.scatter_add使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.scatter_add方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: sparse_moving_average

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_add [as 别名]
def sparse_moving_average(self, variable, unique_indices, accumulant, name='Accumulator', decay=.9):
    """"""
    
    accumulant = tf.clip_by_value(accumulant, -self.clip, self.clip)
    first_dim = variable.get_shape().as_list()[0]
    accumulator = self.get_accumulator(name, variable)
    indexed_accumulator = tf.gather(accumulator, unique_indices)
    iteration = self.get_accumulator('{}/iteration'.format(name), variable, shape=[first_dim, 1])
    indexed_iteration = tf.gather(iteration, unique_indices)
    iteration = tf.scatter_add(iteration, unique_indices, tf.ones_like(indexed_iteration))
    indexed_iteration = tf.gather(iteration, unique_indices)
    
    if decay < 1:
      current_indexed_decay = decay * (1-decay**(indexed_iteration-1)) / (1-decay**indexed_iteration)
    else:
      current_indexed_decay = (indexed_iteration-1) / indexed_iteration
    
    accumulator = tf.scatter_update(accumulator, unique_indices, current_indexed_decay*indexed_accumulator)
    accumulator = tf.scatter_add(accumulator, unique_indices, (1-current_indexed_decay)*accumulant)
    return accumulator, iteration
  
  #============================================================= 
开发者ID:tdozat,项目名称:Parser-v3,代码行数:24,代码来源:optimizer.py

示例2: _collect_gradients

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_add [as 别名]
def _collect_gradients(gradients, variables):
    """ Collects gradients.

    Args:
        gradients: A list of gradients.
        variables: A list of variables for collecting the gradients.

    Returns: A tf op.
    """
    ops = []
    for grad, var in zip(gradients, variables):
        if isinstance(grad, tf.Tensor):
            ops.append(tf.assign_add(var, grad))
        else:
            ops.append(tf.scatter_add(var, grad.indices, grad.values))
    return tf.group(*ops, name="collect_gradients") 
开发者ID:zhaocq-nlp,项目名称:NJUNMT-tf,代码行数:18,代码来源:optimize.py

示例3: make_update_op

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_add [as 别名]
def make_update_op(self, upd_idxs, upd_keys, upd_vals,
                     batch_size, use_recent_idx, intended_output):
    """Function that creates all the update ops."""
    base_update_op = super(LSHMemory, self).make_update_op(
        upd_idxs, upd_keys, upd_vals,
        batch_size, use_recent_idx, intended_output)

    # compute hash slots to be updated
    hash_slot_idxs = self.get_hash_slots(upd_keys)

    # make updates
    update_ops = []
    with tf.control_dependencies([base_update_op]):
      for i, slot_idxs in enumerate(hash_slot_idxs):
        # for each slot, choose which entry to replace
        entry_idx = tf.random_uniform([batch_size],
                                      maxval=self.num_per_hash_slot,
                                      dtype=tf.int32)
        entry_mul = 1 - tf.one_hot(entry_idx, self.num_per_hash_slot,
                                   dtype=tf.int32)
        entry_add = (tf.expand_dims(upd_idxs, 1) *
                     tf.one_hot(entry_idx, self.num_per_hash_slot,
                                dtype=tf.int32))

        mul_op = tf.scatter_mul(self.hash_slots[i], slot_idxs, entry_mul)
        with tf.control_dependencies([mul_op]):
          add_op = tf.scatter_add(self.hash_slots[i], slot_idxs, entry_add)
          update_ops.append(add_op)

    return tf.group(*update_ops) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:32,代码来源:memory.py

示例4: center_loss_v1

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_add [as 别名]
def center_loss_v1(config, embedding, labels, **kargs):
	'''
	embedding dim : (batch_size, num_features)
	'''
	num_features = embedding.get_shape()[-1]
	with tf.variable_scope(config.scope+"_center_loss"):
		centroids = tf.get_variable('center',
						shape=[config.num_classes, num_features],
						dtype=tf.float32,
						initializer=tf.contrib.layers.xavier_initializer(),
						trainable=False)

		centroids_delta = tf.get_variable('centroidsUpdateTempVariable',
						shape=[config.num_classes, num_features],
						dtype=tf.float32,
						initializer=tf.zeros_initializer(),
						trainable=False)

		centroids_batch = tf.gather(centroids, labels)
		# cLoss = tf.nn.l2_loss(embedding - centroids_batch) / (batch_size) # Eq. 2
		
		# cLoss = tf.reduce_mean(tf.reduce_sum((embedding - centroids_batch)**2, axis=-1))
		cLoss = tf.reduce_sum((embedding - centroids_batch)**2, axis=-1)

		diff = centroids_batch - embedding

		delta_c_nominator = tf.scatter_add(centroids_delta, labels, diff)
		indices = tf.expand_dims(labels, -1)
		updates = tf.cast(tf.ones_like(labels), tf.float32)
		shape = tf.constant([num_features])

		labels_sum = tf.expand_dims(tf.scatter_nd(indices, updates, shape),-1)
		centroids = centroids.assign_sub(config.alpha * delta_c_nominator / (1.0 + labels_sum))

		centroids_delta = centroids_delta.assign(tf.zeros([config.num_classes, num_features]))

		return cLoss, centroids 
开发者ID:yyht,项目名称:BERT,代码行数:39,代码来源:loss_utils.py

示例5: _collect_gradients

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_add [as 别名]
def _collect_gradients(gradients, variables):
    ops = []

    for grad, var in zip(gradients, variables):
        if isinstance(grad, tf.Tensor):
            ops.append(tf.assign_add(var, grad))
        else:
            ops.append(tf.scatter_add(var, grad.indices, grad.values))

    return tf.group(*ops, name="collect_gradients") 
开发者ID:bzhangGo,项目名称:zero,代码行数:12,代码来源:cycle.py

示例6: _TestCase

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_add [as 别名]
def _TestCase(self, shape, indices, scatter_op=tf.scatter_add):
    """Run a random test case with the given shape and indices.

    Args:
      shape: Shape of the parameters array.
      indices: One-dimensional array of ints, the indices of the last dimension
               of the parameters to update.
      scatter_op: ScatterAdd or ScatterSub.
    """
    super(ScatterAddSubTest, self).setUp()
    with self.test_session(use_gpu=False):
      # Create a random parameter array of given shape
      p_init = np.random.rand(*shape).astype("f")
      # Create the shape of the update array. All dimensions except the last
      # match the parameter array, the last dimension equals the # of indices.
      vals_shape = [len(indices)] + shape[1:]
      vals_init = np.random.rand(*vals_shape).astype("f")
      v_i = [float(x) for x in vals_init.ravel()]
      p = tf.Variable(p_init)
      vals = tf.constant(v_i, shape=vals_shape, name="vals")
      ind = tf.constant(indices, dtype=tf.int32)
      p2 = scatter_op(p, ind, vals, name="updated_p")
      # p = init
      tf.global_variables_initializer().run()
      # p += vals
      result = p2.eval()
    # Compute the expected 'p' using numpy operations.
    for i, ind in enumerate(indices):
      if scatter_op == tf.scatter_add:
        p_init.reshape(shape[0], -1)[ind, :] += (
            vals_init.reshape(vals_shape[0], -1)[i, :])
      else:
        p_init.reshape(shape[0], -1)[ind, :] -= (
            vals_init.reshape(vals_shape[0], -1)[i, :])
    self.assertTrue(all((p_init == result).ravel())) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:37,代码来源:embedding_ops_test.py

示例7: testWrongShape

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_add [as 别名]
def testWrongShape(self):
    # Indices and values mismatch.
    var = tf.Variable(tf.zeros(shape=[1024, 64, 64], dtype=tf.float32))
    indices = tf.placeholder(tf.int32, shape=[32])
    values = tf.placeholder(tf.float32, shape=[33, 64, 64])
    with self.assertRaises(ValueError):
      tf.scatter_add(var, indices, values)

    # Var and values mismatch.
    values = tf.placeholder(tf.float32, shape=[32, 64, 63])
    with self.assertRaises(ValueError):
      tf.scatter_add(var, indices, values) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:14,代码来源:embedding_ops_test.py

示例8: _NumpyAdd

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_add [as 别名]
def _NumpyAdd(ref, indices, updates):
  # Since numpy advanced assignment does not support repeated indices,
  # we run a simple loop to perform scatter_add.
  for i, indx in np.ndenumerate(indices):
    ref[indx] += updates[i] 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:7,代码来源:scatter_ops_test.py

示例9: testVariableRankAdd

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_add [as 别名]
def testVariableRankAdd(self):
    self._VariableRankTests(tf.scatter_add) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:4,代码来源:scatter_ops_test.py

示例10: testRepeatIndicesAdd

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_add [as 别名]
def testRepeatIndicesAdd(self):
    self._VariableRankTests(tf.scatter_add, True) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:4,代码来源:scatter_ops_test.py

示例11: calc_entropy

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_add [as 别名]
def calc_entropy(self, inputs, scope):
        with tf.variable_scope(scope, reuse=True):
            maxtensor = tf.to_float(tf.size(inputs))

            bincount = tf.get_variable("bincount", [self.numbins])
            featuremapsum = tf.get_variable("featuremapsum", [1])
            featuremapcount = tf.get_variable("featuremapcount", [1])
            inputs = tf.Print(inputs, [inputs, tf.shape(
                inputs)], message="Framemap:", summarize=100)
            binnum = tf.to_int32(
                tf.floor((tf.reduce_sum(inputs) / maxtensor) * (self.numbins - 1)))
            tbincount = tf.scatter_add(
                bincount, binnum, tf.to_float(
                    tf.constant(1)))
            bincount = bincount.assign(tbincount)
            bincount = tf.Print(bincount,
                                [tf.count_nonzero(bincount)],
                                message="Non zero bins count:")

            tfeaturemapsum = tf.add(featuremapsum, tf.reduce_sum(inputs))
            featuremapsum = featuremapsum.assign(tfeaturemapsum)

            tfeaturemapcount = tf.add(featuremapcount, tf.to_float(tf.constant(1)))
            featuremapcount = featuremapcount.assign(tfeaturemapcount)

            meanactivation = tf.divide(featuremapsum, featuremapcount)
            pbin = tf.divide(tf.to_float(bincount), tf.to_float(featuremapcount))
            entropy = tf.multiply(pbin, tf.log(pbin))
            entropy = tf.where(
                tf.is_nan(entropy),
                tf.zeros_like(entropy),
                entropy)
            entropy = tf.reduce_sum(entropy)
            entropy = tf.Print(entropy, [entropy], message=": raw entropy: ")
            entropy = tf.multiply(entropy, tf.multiply(
                meanactivation, tf.constant(-1.0)))
            entropy = tf.Print(
                entropy, [
                    scope, entropy], message=": scaled entropy: ")
            return entropy 
开发者ID:CiscoAI,项目名称:amla,代码行数:42,代码来源:cell_main.py

示例12: collect_gradients

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_add [as 别名]
def collect_gradients(gradients, variables):
    ops = []

    for grad, var in zip(gradients, variables):
        if isinstance(grad, tf.Tensor):
            ops.append(tf.assign_add(var, grad))
        else:
            ops.append(tf.scatter_add(var, grad.indices, grad.values))

    return tf.group(*ops) 
开发者ID:bzhangGo,项目名称:transformer-aan,代码行数:12,代码来源:utils.py

示例13: compute_gradients

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scatter_add [as 别名]
def compute_gradients(self, loss, var_list=None,
                          gate_gradients=tf.train.Optimizer.GATE_OP,
                          aggregation_method=None,
                          colocate_gradients_with_ops=False,
                          grad_loss=None):
        grads_and_vars = self._optimizer.compute_gradients(loss , var_list,
            gate_gradients, aggregation_method, colocate_gradients_with_ops,
            grad_loss)

        grads, var_list = list(zip(*grads_and_vars))

        # Do not create extra variables when step is 1
        if self._step == 1:
            grads = [self._all_reduce(t) for t in grads]
            return list(zip(grads, var_list))

        first_var = min(var_list, key=lambda x: x.name)
        iter_var = self._create_non_slot_variable(
            initial_value=0 if self._step == 1 else 1, name="iter",
            colocate_with=first_var)

        new_grads = []

        for grad, var in zip(grads, var_list):
            grad_acc = self._zeros_slot(var, "grad_acc", self._name)

            if isinstance(grad, tf.IndexedSlices):
                grad_acc = tf.scatter_add(grad_acc, grad.indices, grad.values,
                                          use_locking=self._use_locking)
            else:
                grad_acc = tf.assign_add(grad_acc, grad,
                                         use_locking=self._use_locking)

            def _acc_grad():
                return grad_acc

            def _avg_grad():
                return self._all_reduce(grad_acc / self._step)

            grad = tf.cond(tf.equal(iter_var, 0), _avg_grad, _acc_grad)
            new_grads.append(grad)

        return list(zip(new_grads, var_list)) 
开发者ID:THUNLP-MT,项目名称:THUMT,代码行数:45,代码来源:optimizers.py


注:本文中的tensorflow.scatter_add方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。