当前位置: 首页>>代码示例>>Python>>正文


Python v1.group方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.group方法的典型用法代码示例。如果您正苦于以下问题:Python v1.group方法的具体用法?Python v1.group怎么用?Python v1.group使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.group方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _make_update

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import group [as 别名]
def _make_update(self):
    mss = []
    gsum = 0.0
    count = 0
    for sum_squared_grads in self._sum_squared_grads:
      ms = tf.sqrt(sum_squared_grads / self._num_squared_grads)
      gsum += tf.reduce_sum(ms)
      count += tf.reduce_sum(tf.ones_like(ms))
      mss.append(ms)
    gsum = gsum / count

    assignments = []
    for grad, var, save, sum_squared_grads, ms in zip(
        self._grads, self._vars, self._saves, self._sum_squared_grads, mss):
      decay_rate = tf.minimum(1.0, self._decay_rate*(ms/gsum))
      delta = (-self._learning_rate*grad / (ms + self._epsilon) +
               decay_rate*(save-var))
      assignments.append(var.assign_add(delta))
    return tf.group(assignments) 
开发者ID:deepmind,项目名称:lamb,代码行数:21,代码来源:dyneval.py

示例2: _apply_sparse_shared

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import group [as 别名]
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
    beta1_power, beta2_power = self._get_beta_accumulators()
    beta1_power = tf.cast(beta1_power, var.dtype.base_dtype)
    beta2_power = tf.cast(beta2_power, var.dtype.base_dtype)
    lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)
    beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
    beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
    epsilon_t = tf.cast(self._epsilon_t, var.dtype.base_dtype)
    lr = (lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power))
    # m_t = beta1 * m + (1 - beta1) * g_t
    m = self.get_slot(var, "m")
    m_scaled_g_values = grad * (1 - beta1_t)
    m_t = tf.assign(m, m * beta1_t, use_locking=self._use_locking)
    with tf.control_dependencies([m_t]):
      m_t = scatter_add(m, indices, m_scaled_g_values)
    # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
    v = self.get_slot(var, "v")
    v_scaled_g_values = (grad * grad) * (1 - beta2_t)
    v_t = tf.assign(v, v * beta2_t, use_locking=self._use_locking)
    with tf.control_dependencies([v_t]):
      v_t = scatter_add(v, indices, v_scaled_g_values)
    v_sqrt = tf.sqrt(v_t)
    var_update = tf.assign_sub(
        var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
    return tf.group(*[var_update, m_t, v_t]) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:27,代码来源:multistep_with_adamoptimizer.py

示例3: _apply_cond

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import group [as 别名]
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):
    """Apply conditionally if counter is zero."""
    grad_acc = self.get_slot(var, "grad_acc")

    def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):
      total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
      adam_op = apply_fn(total_grad, var, *args, **kwargs)
      with tf.control_dependencies([adam_op]):
        grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc),
                                              use_locking=self._use_locking)
      return tf.group(adam_op, grad_acc_to_zero_op)

    def accumulate_gradient(grad_acc, grad):
      assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
      return tf.group(assign_op)  # Strip return value

    return tf.cond(
        tf.equal(self._get_iter_variable(), 0),
        lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),
        lambda: accumulate_gradient(grad_acc, grad)) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:22,代码来源:multistep_optimizer.py

示例4: _finish

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import group [as 别名]
def _finish(self, update_ops, name_scope):
    """Updates beta_power variables every n batches and incrs counter."""
    iter_ = self._get_iter_variable()
    beta1_power, beta2_power = self._get_beta_accumulators()
    with tf.control_dependencies(update_ops):
      with tf.colocate_with(iter_):

        def update_beta_op():
          update_beta1 = beta1_power.assign(
              beta1_power * self._beta1_t,
              use_locking=self._use_locking)
          update_beta2 = beta2_power.assign(
              beta2_power * self._beta2_t,
              use_locking=self._use_locking)
          return tf.group(update_beta1, update_beta2)
        maybe_update_beta = tf.cond(
            tf.equal(iter_, 0), update_beta_op, tf.no_op)
        with tf.control_dependencies([maybe_update_beta]):
          update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t),
                                     use_locking=self._use_locking)
    return tf.group(
        *update_ops + [update_iter, maybe_update_beta], name=name_scope) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:24,代码来源:multistep_optimizer.py

示例5: test_adam

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import group [as 别名]
def test_adam(self):
    with self.test_session() as sess:
      w = tf.get_variable(
          "w",
          shape=[3],
          initializer=tf.constant_initializer([0.1, -0.2, -0.1]))
      x = tf.constant([0.4, 0.2, -0.5])
      loss = tf.reduce_mean(tf.square(x - w))
      tvars = tf.trainable_variables()
      grads = tf.gradients(loss, tvars)
      global_step = tf.train.get_or_create_global_step()
      optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)
      train_op = optimizer.apply_gradients(list(zip(grads, tvars)), global_step)
      init_op = tf.group(tf.global_variables_initializer(),
                         tf.local_variables_initializer())
      sess.run(init_op)
      for _ in range(100):
        sess.run(train_op)
      w_np = sess.run(w)
      self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2) 
开发者ID:google-research,项目名称:albert,代码行数:22,代码来源:optimization_test.py

示例6: assign_to_slices

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import group [as 别名]
def assign_to_slices(self, assign_fn, values, assign_to_tensor_list=None):
      """Assign to the slice variables.

      Args:
        assign_fn: a function from
          (mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation
        values: a list of tf.Tensor
        assign_to_tensor_list: an optional list of tf.Variable

      Returns:
        a tf.operation
      """
      if assign_to_tensor_list is None:
        assign_to_tensor_list = self._laid_out_tensor.all_slices
      # Handle both N -> 1 and N -> N cases.
      num_slices = min(len(assign_to_tensor_list), len(values))
      devices = [""] * num_slices
      return tf.group(
          mtf.parallel(devices, assign_fn,
                       [self._variable] * len(devices),
                       assign_to_tensor_list[:num_slices],
                       values[:num_slices])) 
开发者ID:tensorflow,项目名称:mesh,代码行数:24,代码来源:simd_mesh_impl.py

示例7: get_step_from_checkpoint_path

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import group [as 别名]
def get_step_from_checkpoint_path(checkpoint_path):
  """Returns the global step for the checkpoint at `checkpoint_path`.

  Assumes `checkpoint_path` corresponds to a file which contains the substring
  model.ckpt-{global_step}

  Args:
    checkpoint_path: str of path to a checkpoint file.

  Returns:
    int of the global step corresponding to the checkpoint file.

  Raises:
    ValueError if checkpoint_path does not correspond to a model checkpoint file
    which contains the global_step in its filename.
  """
  match = re.match(r".*model\.ckpt\-(\d+).*", checkpoint_path)
  if match is None:
    raise ValueError("Invalid checkpoint path {}".format(checkpoint_path))
  return int(match.group(1))


# TODO(noam): include more descriptive definitions 
开发者ID:tensorflow,项目名称:mesh,代码行数:25,代码来源:utils.py

示例8: f1_metric

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import group [as 别名]
def f1_metric(precision, precision_op, recall, recall_op):
  """Computes F1 based on precision and recall.

  Args:
    precision: <float> [batch_size]
    precision_op: Update op for precision.
    recall: <float> [batch_size]
    recall_op: Update op for recall.

  Returns:
    tensor and update op for F1.
  """
  f1_op = tf.group(precision_op, recall_op)
  numerator = 2 * tf.multiply(precision, recall)
  denominator = tf.add(precision, recall)
  f1 = tf.divide(numerator, denominator)

  # <float> [batch_size]
  zero_vec = tf.zeros_like(f1)
  is_valid = tf.greater(denominator, zero_vec)
  f1 = tf.where(is_valid, x=f1, y=zero_vec)

  return f1, f1_op 
开发者ID:google-research,项目名称:language,代码行数:25,代码来源:nq_long_utils.py

示例9: _build_select_slate_op

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import group [as 别名]
def _build_select_slate_op(self):
    p_no_click = self._prob_no_click_ph
    p = self._doc_affinity_scores_ph
    q = self._net_outputs.q_values[0]
    with tf.name_scope('select_slate'):
      self._output_slate = self._select_slate_fn(self._slate_size, p_no_click,
                                                 p, q)

    self._output_slate = tf.Print(
        self._output_slate, [tf.constant('cp 1'), self._output_slate, p, q],
        summarize=10000)
    self._output_slate = tf.reshape(self._output_slate, (self._slate_size,))

    self._action_counts = tf.get_variable(
        'action_counts',
        shape=[self._num_candidates],
        initializer=tf.zeros_initializer())
    output_slate = tf.reshape(self._output_slate, [-1])
    output_one_hot = tf.one_hot(output_slate, self._num_candidates)
    update_ops = []
    for i in range(self._slate_size):
      update_ops.append(tf.assign_add(self._action_counts, output_one_hot[i]))
    self._select_action_update_op = tf.group(*update_ops) 
开发者ID:google-research,项目名称:recsim,代码行数:25,代码来源:slate_decomp_q_agent.py

示例10: get_loss_scale_update_op

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import group [as 别名]
def get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
                             inc_loss_scale_every_n):
  """Returns the update op for loss scaling variables.

  We maintain the counter `loss_scale_normal_steps` to count the number of steps
  we have been using the current `loss_scale`. In most cases, this function
  increments `loss_scale_normal_steps`. However, if `loss_scale_normal_steps` is
  greater than the threshold `inc_loss_scale_every_n`, we double `loss_scale`
  and reset `loss_scale_normal_steps` to zero.

  This op is only called if the gradients don't have any infs or nans. Instead,
  if infs or nans occur in the gradients, we immeditately halve `loss_scale` and
  reset `loss_scale_normal_steps` to zero.

  Args:
    loss_scale: a tf.Variable represneting the loss_scale value.
    loss_scale_normal_steps: a tf.Variable representing the number of training
      steps that have run since the loss_scale last changed.
    inc_loss_scale_every_n: a Python integer threshold. `loss_scale` is
      increased every `inc_loss_scale_every_n` steps, unless the gradients have
      infs or nans.

  Returns:
    An op for updating `loss_scale` and `loss_scale_normal_steps`.
  """

  def increment_loss_scale_normal_steps_func():
    return tf.group(loss_scale_normal_steps.assign_add(1))

  def increase_loss_scale_func():
    return tf.group(
        tf.assign(loss_scale_normal_steps, 0),
        tf.assign(loss_scale, loss_scale * 2))

  # true_fn and false_fn must have the same type.
  return tf.cond(loss_scale_normal_steps < inc_loss_scale_every_n,
                 increment_loss_scale_normal_steps_func,
                 increase_loss_scale_func) 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:40,代码来源:variable_mgr_util.py

示例11: print_info

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import group [as 别名]
def print_info(self):
    """Print basic information."""
    benchmark_info = self._get_params_info()
    log_fn('Model:       %s' % self.model.get_model_name())
    log_fn('Dataset:     %s' % benchmark_info['dataset_name'])
    log_fn('Mode:        %s' % self.mode)
    log_fn('SingleSess:  %s' % benchmark_info['single_session'])
    log_fn('Batch size:  %s global' % (self.batch_size * self.num_workers))
    log_fn('             %s per device' % (self.batch_size //
                                           len(self.raw_devices)))
    if self.batch_group_size > 1:
      log_fn('             %d batches per prepocessing group' %
             self.batch_group_size)
    log_fn('Num batches: %d' % self.num_batches)
    log_fn('Num epochs:  %.2f' % self.num_epochs)
    log_fn('Devices:     %s' % benchmark_info['device_list'])
    log_fn('NUMA bind:   %s' % self.params.use_numa_affinity)
    log_fn('Data format: %s' % self.params.data_format)
    if self.rewriter_config:
      log_fn('RewriterConfig: %s' % self.rewriter_config)
    log_fn('Optimizer:   %s' % self.params.optimizer)
    log_fn('Variables:   %s' % self.params.variable_update)
    if (self.params.variable_update == 'replicated' or
        self.params.variable_update == 'distributed_all_reduce'
        or self.params.variable_update == 'collective_all_reduce'):
      log_fn('AllReduce:   %s' % self.params.all_reduce_spec)
    if self.job_name:
      log_fn('Sync:        %s' % self.params.cross_replica_sync)
    if self.params.staged_vars:
      log_fn('Staged vars: %s' % self.params.staged_vars)
    if self.params.variable_update == 'horovod' and self.params.horovod_device:
      log_fn('Horovod on:  %s' % self.params.horovod_device)
    log_fn('==========') 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:35,代码来源:benchmark_cnn.py

示例12: _build_eval_graph

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import group [as 别名]
def _build_eval_graph(self, scope_name=None):
    """Build the evaluation graph.

    Args:
      scope_name: String to filter what summaries are collected. Only summary
        ops whose name contains `scope_name` will be added, which is useful for
        only including evaluation ops.

    Returns:
      A GraphInfo named_tuple containing various useful ops and tensors of the
      evaluation grpah.
    """
    with self._do_eval():
      input_producer_op, enqueue_ops, fetches = self._build_model()
      local_var_init_op = tf.local_variables_initializer()
      table_init_ops = tf.tables_initializer()
      variable_mgr_init_ops = [local_var_init_op]
      if table_init_ops:
        variable_mgr_init_ops.extend([table_init_ops])
      with tf.control_dependencies([local_var_init_op]):
        variable_mgr_init_ops.extend(self.variable_mgr.get_post_init_ops())
      local_var_init_op_group = tf.group(*variable_mgr_init_ops)

      summary_op = tf.summary.merge_all(scope=scope_name)
      # The eval graph has no execution barrier because it doesn't run in
      # distributed mode.
      execution_barrier = None
      # We do not use the global step during evaluation.
      global_step = None
      return GraphInfo(input_producer_op, enqueue_ops, fetches,
                       execution_barrier, global_step, local_var_init_op_group,
                       summary_op)

  # TODO(reedwm): For consistency, we should have a similar
  # "_initialize_train_graph" function. They can likely be the same function. 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:37,代码来源:benchmark_cnn.py

示例13: add_sync_queues_and_barrier

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import group [as 别名]
def add_sync_queues_and_barrier(self, name_prefix, enqueue_after_list):
    """Adds ops to enqueue on all worker queues.

    Args:
      name_prefix: prefixed for the shared_name of ops.
      enqueue_after_list: control dependency from ops.

    Returns:
      An op that should be used as control dependency before starting next step.
    """
    self.sync_queue_counter += 1
    with tf.device(self.sync_queue_devices[(
        self.sync_queue_counter % len(self.sync_queue_devices))]):
      sync_queues = [
          tf.FIFOQueue(self.num_workers, [tf.bool], shapes=[[]],
                       shared_name='%s%s' % (name_prefix, i))
          for i in range(self.num_workers)]
      queue_ops = []
      # For each other worker, add an entry in a queue, signaling that it can
      # finish this step.
      token = tf.constant(False)
      with tf.control_dependencies(enqueue_after_list):
        for i, q in enumerate(sync_queues):
          if i == self.task_index:
            queue_ops.append(tf.no_op())
          else:
            queue_ops.append(q.enqueue(token))

      # Drain tokens off queue for this worker, one for each other worker.
      queue_ops.append(
          sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1))

      return tf.group(*queue_ops) 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:35,代码来源:benchmark_cnn.py

示例14: _make_save

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import group [as 别名]
def _make_save(self):
    assignments = []
    for save, var in zip(self._saves, self._vars):
      assignments.append(save.assign(var))
    return tf.group(assignments) 
开发者ID:deepmind,项目名称:lamb,代码行数:7,代码来源:dyneval.py

示例15: _make_restore

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import group [as 别名]
def _make_restore(self):
    assignments = []
    for save, var in zip(self._saves, self._vars):
      assignments.append(var.assign(save))
    return tf.group(assignments) 
开发者ID:deepmind,项目名称:lamb,代码行数:7,代码来源:dyneval.py


注:本文中的tensorflow.compat.v1.group方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。