当前位置: 首页>>代码示例>>Python>>正文


Python summary.scalar方法代码示例

本文整理汇总了Python中tensorflow.contrib.summary.scalar方法的典型用法代码示例。如果您正苦于以下问题:Python summary.scalar方法的具体用法?Python summary.scalar怎么用?Python summary.scalar使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.summary的用法示例。


在下文中一共展示了summary.scalar方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: where

# 需要导入模块: from tensorflow.contrib import summary [as 别名]
# 或者: from tensorflow.contrib.summary import scalar [as 别名]
def where(cond, true, false, name=None):
    """Similar to tf.where, but broadcasts scalar values."""
    with tf.name_scope(name, 'where', [cond, true, false]) as name:
        cond = tf.convert_to_tensor(cond, name='cond', dtype=tf.bool)
        true = tf.convert_to_tensor(true, name='true',
                                    dtype=false.dtype if isinstance(false, tf.Tensor) else None)
        false = tf.convert_to_tensor(false, name='false', dtype=true.dtype)
        if true.shape.rank == false.shape.rank == 0:
            shape = tf.shape(cond)
            true = tf.fill(shape, true)
            false = tf.fill(shape, false)
        elif true.shape.rank == 0:
            true = tf.fill(tf.shape(false), true)
        elif false.shape.rank == 0:
            false = tf.fill(tf.shape(true), false)
        return tf.where(cond, true, false, name=name) 
开发者ID:openai,项目名称:lm-human-preferences,代码行数:18,代码来源:core.py

示例2: record_stats

# 需要导入模块: from tensorflow.contrib import summary [as 别名]
# 或者: from tensorflow.contrib.summary import scalar [as 别名]
def record_stats(*, stats, summary_writer, step, log_interval, name=None, comm=MPI.COMM_WORLD):
    def log_stats(step, *stat_values):
        if comm.Get_rank() != 0 or step % log_interval != 0:
            return

        for k, v in safe_zip(stats.keys(), stat_values):
            print('k = ', k, ', v = ', v)

    summary_ops = [tf.py_func(log_stats, [step] + list(stats.values()), [])]
    if summary_writer:
        with summary_writer.as_default(), summary.always_record_summaries():
            for key, value in stats.items():
                summary_ops.append(summary.scalar(key, value, step=step))
    return tf.group(*summary_ops, name=name) 
开发者ID:openai,项目名称:lm-human-preferences,代码行数:16,代码来源:core.py

示例3: __init__

# 需要导入模块: from tensorflow.contrib import summary [as 别名]
# 或者: from tensorflow.contrib.summary import scalar [as 别名]
def __init__(self,
               update_batchnorm_params=True):
    self.update_batchnorm_params = update_batchnorm_params

    num_samples = datasets.get_count(FLAGS.train_split)
    if FLAGS.num_supervised_examples:
      num_samples = FLAGS.num_supervised_examples
    steps_per_epoch = num_samples // FLAGS.batch_size
    self.steps_per_epoch = steps_per_epoch

    global_step = tf.train.get_or_create_global_step()
    self.global_step_inc = tf.assign_add(global_step, 1)

    # lr_scale_batch_size defines a canonical batch size that is coupled with
    # the initial learning rate. If actual batch size is not the same as
    # canonical than learning rate is linearly scaled. This is very convinient
    # as this allows to vary batch size without recomputing learning rate.
    lr_factor = 1.0
    if FLAGS.lr_scale_batch_size:
      lr_factor = FLAGS.batch_size / float(FLAGS.lr_scale_batch_size)

    # We actually also accept fractional epochs.
    schedule_in_steps = utils.get_schedule_from_config(
        FLAGS.schedule, steps_per_epoch)
    warmup, decays = schedule_in_steps[0], schedule_in_steps[1:-1]

    self.lr = get_lr(
        global_step,
        base_lr=FLAGS.lr * lr_factor,
        decay_steps=decays,
        lr_decay_factor=FLAGS.lr_decay_factor,
        warmup_steps=warmup)

    # TODO(marvinritter): Re-enable summaries with support for TPU training.
    # tf.summary.scalar('learning_rate', self.lr) 
开发者ID:google-research,项目名称:s4l,代码行数:37,代码来源:trainer.py

示例4: focal_loss

# 需要导入模块: from tensorflow.contrib import summary [as 别名]
# 或者: from tensorflow.contrib.summary import scalar [as 别名]
def focal_loss(labels, logits, alpha, gamma):
  """Compute the focal loss between `logits` and the ground truth `labels`.

  Focal loss = -alpha_t * (1-pt)^gamma * log(pt)
  where pt is the probability of being classified to the true class.
  pt = p (if true class), otherwise pt = 1 - p. p = sigmoid(logit).

  Args:
    labels: A float32 tensor of size [batch, num_classes].
    logits: A float32 tensor of size [batch, num_classes].
    alpha: A float32 tensor of size [batch_size]
      specifying per-example weight for balanced cross entropy.
    gamma: A float32 scalar modulating loss from hard and easy examples.
  Returns:
    focal_loss: A float32 scalar representing normalized total loss.
  """
  with tf.name_scope('focal_loss'):
    logits = tf.cast(logits, dtype=tf.float32)
    cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(
        labels=labels, logits=logits)

    # positive_label_mask = tf.equal(labels, 1.0)
    # probs = tf.sigmoid(logits)
    # probs_gt = tf.where(positive_label_mask, probs, 1.0 - probs)
    # # With gamma < 1, the implementation could produce NaN during back prop.
    # modulator = tf.pow(1.0 - probs_gt, gamma)

    # A numerically stable implementation of modulator.
    if gamma == 0.0:
      modulator = 1.0
    else:
      modulator = tf.exp(-gamma * labels * logits - gamma * tf.log1p(
          tf.exp(-1.0 * logits)))

    loss = modulator * cross_entropy

    weighted_loss = alpha * loss
    focal_loss = tf.reduce_sum(weighted_loss)
    # Normalize by the total number of positive samples.
    focal_loss /= tf.reduce_sum(labels)
  return focal_loss 
开发者ID:richardaecn,项目名称:class-balanced-loss,代码行数:43,代码来源:resnet_main.py


注:本文中的tensorflow.contrib.summary.scalar方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。