当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.scalar_summary方法代码示例

本文整理汇总了Python中tensorflow.scalar_summary方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.scalar_summary方法的具体用法?Python tensorflow.scalar_summary怎么用?Python tensorflow.scalar_summary使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.scalar_summary方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: define_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_summary [as 别名]
def define_summaries(self):
        '''Helper function for init_opt'''
        all_sum = {'g': [], 'd': [], 'hr_g': [], 'hr_d': [], 'hist': []}
        for k, v in self.log_vars:
            if k.startswith('g'):
                all_sum['g'].append(tf.scalar_summary(k, v))
            elif k.startswith('d'):
                all_sum['d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hr_g'):
                all_sum['hr_g'].append(tf.scalar_summary(k, v))
            elif k.startswith('hr_d'):
                all_sum['hr_d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hist'):
                all_sum['hist'].append(tf.histogram_summary(k, v))

        self.g_sum = tf.merge_summary(all_sum['g'])
        self.d_sum = tf.merge_summary(all_sum['d'])
        self.hr_g_sum = tf.merge_summary(all_sum['hr_g'])
        self.hr_d_sum = tf.merge_summary(all_sum['hr_d'])
        self.hist_sum = tf.merge_summary(all_sum['hist']) 
开发者ID:hanzhanggit,项目名称:StackGAN,代码行数:22,代码来源:trainer.py

示例2: _activation_summary

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_summary [as 别名]
def _activation_summary(x):
  """Helper to create summaries for activations.

  Creates a summary that provides a histogram of activations.
  Creates a summary that measure the sparsity of activations.

  Args:
    x: Tensor
  Returns:
    nothing
  """
  # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
  # session. This helps the clarity of presentation on tensorboard.
  tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
  # tf.histogram_summary(tensor_name + '/activations', x)
  tf.summary.histogram(tensor_name + '/activations', x)
  # tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
  tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) 
开发者ID:hohoins,项目名称:ml,代码行数:20,代码来源:cifar10.py

示例3: _setup_training

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_summary [as 别名]
def _setup_training(self):
        """
        Set up a data flow graph for fine tuning
        """
        layer_num = self.layer_num
        act_func = ACTIVATE_FUNC[self.activate_func]
        sigma = self.sigma
        lr = self.learning_rate
        weights = self.weights
        biases = self.biases
        data1, data2 = self.data1, self.data2
        batch_size = self.batch_size
        optimizer = OPTIMIZER[self.optimizer]
        with tf.name_scope("training"):
            s1 = self._obtain_score(data1, weights, biases, act_func, "1")
            s2 = self._obtain_score(data2, weights, biases, act_func, "2")
            with tf.name_scope("cost"):
                sum_cost = tf.reduce_sum(tf.log(1 + tf.exp(-sigma*(s1-s2))))
                self.cost = cost = sum_cost / batch_size
        self.optimize = optimizer(lr).minimize(cost)

        for n in range(layer_num-1):
            tf.histogram_summary("weight"+str(n), weights[n])
            tf.histogram_summary("bias"+str(n), biases[n])
        tf.scalar_summary("cost", cost) 
开发者ID:mzhang001,项目名称:tfranknet,代码行数:27,代码来源:ranknet.py

示例4: setup_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_summary [as 别名]
def setup_summaries(self):
        episode_reward = tf.Variable(0.)
        s1 = tf.scalar_summary("Episode Reward " + str(self.actor_id), episode_reward)
        if self.alg_type == "a3c":
            summary_vars = [episode_reward]
        else:
            episode_ave_max_q = tf.Variable(0.)
            s2 = tf.scalar_summary("Max Q Value " + str(self.actor_id), episode_ave_max_q)
            logged_epsilon = tf.Variable(0.)
            s3 = tf.scalar_summary("Epsilon " + str(self.actor_id), logged_epsilon)
            summary_vars = [episode_reward, episode_ave_max_q, logged_epsilon]
        summary_placeholders = [tf.placeholder("float") for _ in range(len(summary_vars))]
        update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]
        with tf.control_dependencies(update_ops):
            summary_ops = tf.merge_all_summaries()
        return summary_placeholders, update_ops, summary_ops 
开发者ID:traai,项目名称:async-deep-rl,代码行数:18,代码来源:actor_learner.py

示例5: _activation_summary

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_summary [as 别名]
def _activation_summary(x):
    """Helper to create summaries for activations.
    
    Creates a summary that provides a histogram of activations.
    Creates a summary that measure the sparsity of activations.
    
    Args:
      x: Tensor
    Returns:
      nothing
    """
    # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
    # session. This helps the clarity of presentation on tensorboard.
    tensor_name = re.sub('%s_[0-9]*/' % LSPGlobals.TOWER_NAME, '', x.op.name)
    tf.histogram_summary(tensor_name + '/activations', x)
    tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) 
开发者ID:samitok,项目名称:deeppose,代码行数:18,代码来源:LSPModels.py

示例6: _add_loss_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_summary [as 别名]
def _add_loss_summaries(total_loss):
    """Add summaries for losses in DeepPose model.
    
    Generates moving average for all losses and associated summaries for
    visualizing the performance of the network.
    
    Args:
      total_loss: Total loss from loss().
    Returns:
      loss_averages_op: op for generating moving averages of losses.
    """
    # Compute the moving average of all individual losses and the total loss.
    loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
    losses = tf.get_collection('losses')
    loss_averages_op = loss_averages.apply(losses + [total_loss])
    
    # Attach a scalar summmary to all individual losses and the total loss; do the
    # same for the averaged version of the losses.
    for l in losses + [total_loss]:
        # Name each loss as '(raw)' and name the moving average version of the loss
        # as the original loss name.
        tf.scalar_summary(l.op.name +' (raw)', l)
        tf.scalar_summary(l.op.name, loss_averages.average(l))
    
    return loss_averages_op 
开发者ID:samitok,项目名称:deeppose,代码行数:27,代码来源:LSPModels.py

示例7: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_summary [as 别名]
def __init__(self, sess, data, runtime_base_dir, model_dir, variables, max_to_keep=20):
    self.sess = sess
    self.reset()

    with tf.variable_scope('t'):
      self.t_op = tf.Variable(0, trainable=False, name='t')
      self.t_add_op = self.t_op.assign_add(1)

    self.model_dir = os.path.join(runtime_base_dir, model_dir)
    self.saver = tf.train.Saver(variables + [self.t_op], max_to_keep=max_to_keep)
    self.writer = tf.train.SummaryWriter('%s/logs/%s' % (runtime_base_dir, model_dir), self.sess.graph)

    with tf.variable_scope('summary'):
      scalar_summary_tags = ['train_l', 'test_l']

      self.summary_placeholders = {}
      self.summary_ops = {}

      for tag in scalar_summary_tags:
        self.summary_placeholders[tag] = tf.placeholder('float32', None, name=tag.replace(' ', '_'))
        self.summary_ops[tag]  = tf.scalar_summary('%s/%s' % (data, tag), self.summary_placeholders[tag]) 
开发者ID:jakebelew,项目名称:gated-pixel-cnn,代码行数:23,代码来源:statistic.py

示例8: _add_train_op

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_summary [as 别名]
def _add_train_op(self):
    params = self._params

    self._lr_rate = tf.maximum(
        params.min_lr,
        tf.train.exponential_decay(params.lr, self._global_step, 30000, 0.98))

    tvars = tf.trainable_variables()
    # use reserved gpu for gradient computation
    with tf.device(self._get_gpu(self._num_gpus-1)):
      grads, global_norm = tf.clip_by_global_norm(
          tf.gradients(self._loss, tvars), params.max_grad_norm)
    tf.scalar_summary('global_norm', global_norm)
    optimizer = tf.train.AdamOptimizer(self._lr_rate)
    tf.scalar_summary('learning rate', self._lr_rate)
    with tf.device(self._next_device()):
      self._train_op = optimizer.apply_gradients(
          zip(grads, tvars), global_step=self._global_step, name='train_step')
    self._summaries = tf.merge_all_summaries()

    return self._train_op, self._loss, 
开发者ID:marshmelloX,项目名称:dynamic-coattention-network,代码行数:23,代码来源:selector.py

示例9: _add_loss_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_summary [as 别名]
def _add_loss_summaries(total_loss):
  """Add summaries for losses in model.

  Generates moving average for all losses and associated summaries for
  visualizing the performance of the network.

  Args:
    total_loss: Total loss from loss().
  Returns:
    loss_averages_op: op for generating moving averages of losses.
  """
  # Compute the moving average of all individual losses and the total loss.
  loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
  losses = tf.get_collection('losses')
  loss_averages_op = loss_averages.apply(losses + [total_loss])

  # Attach a scalar summmary to all individual losses and the total loss; do the
  # same for the averaged version of the losses.
  for l in losses + [total_loss]:
    # Name each loss as '(raw)' and name the moving average version of the loss
    # as the original loss name.
    tf.scalar_summary(l.op.name +' (raw)', l)
    tf.scalar_summary(l.op.name, loss_averages.average(l))

  return loss_averages_op 
开发者ID:twerkmeister,项目名称:iLID,代码行数:27,代码来源:deepaudio.py

示例10: accuracy

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_summary [as 别名]
def accuracy(logits, dense_labels):
  seen_german = tf.Variable(0, trainable=False)
  seen_english = tf.Variable(0, trainable=False)

  x = tf.nn.softmax(logits)

  correct_pred = tf.equal(tf.argmax(x, 1), tf.argmax(dense_labels, 1))

  german_samples = tf.equal(tf.constant(1, dtype="int64"), tf.argmax(dense_labels, 1))
  german_accuracy = tf.reduce_mean(tf.cast(tf.gather(correct_pred, tf.where(german_samples)), tf.float32))
  sum_german_samples = seen_german.assign_add(tf.reduce_sum(tf.cast(tf.gather(dense_labels, tf.where(german_samples)), tf.int32)))
  tf.scalar_summary("german_accuracy", german_accuracy)

  english_samples = tf.equal(tf.constant(0, dtype="int64"), tf.argmax(dense_labels, 1))
  english_accuracy = tf.reduce_mean(tf.cast(tf.gather(correct_pred, tf.where(english_samples)), tf.float32))
  sum_english_samples = seen_english.assign_add(tf.reduce_sum(tf.cast(tf.gather(dense_labels, tf.where(english_samples)), tf.int32)))
  tf.scalar_summary("english_accuracy", english_accuracy)

  german_predictions = tf.equal(tf.constant(1, dtype="int64"), tf.argmax(x, 1))
  german_predictions_count = tf.reduce_sum(tf.cast(german_predictions, tf.int32))

  accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
  tf.scalar_summary("accuracy", accuracy)
  return accuracy, english_accuracy, german_accuracy, german_predictions_count, sum_english_samples, sum_german_samples 
开发者ID:twerkmeister,项目名称:iLID,代码行数:26,代码来源:deepaudio.py

示例11: _init_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_summary [as 别名]
def _init_summaries(self):
        if self.is_train:
            logdir = os.path.join(SUMMARY_PATH, self.log_name, 'train')

            self.summary_writer = tf.summary.FileWriter(logdir)
            self.summary_writer_by_points = [tf.summary.FileWriter(os.path.join(logdir, 'point_%02d' % i))
                                             for i in range(16)]

            tf.scalar_summary('Average euclidean distance', self.euclidean_dist, collections = [KEY_SUMMARIES])

            for i in range(16):
                tf.scalar_summary('Joint euclidean distance', self.euclidean_dist_per_joint[i],
                                  collections = [KEY_SUMMARIES_PER_JOINT[i]])

            self.create_summary_from_weights()

            self.ALL_SUMMARIES = tf.merge_all_summaries(KEY_SUMMARIES)
            self.SUMMARIES_PER_JOINT = [tf.merge_all_summaries(KEY_SUMMARIES_PER_JOINT[i]) for i in range(16)]
        else:
            logdir = os.path.join(SUMMARY_PATH, self.log_name, 'test')
            self.summary_writer = tf.summary.FileWriter(logdir) 
开发者ID:marian-margeta,项目名称:gait-recognition,代码行数:23,代码来源:human_pose_nn.py

示例12: variable_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_summary [as 别名]
def variable_summaries(var, name):
        """
        Attach a lot of summaries to a Tensor for Tensorboard visualization.
        Ref: https://www.tensorflow.org/versions/r0.11/how_tos/summaries_and_tensorboard/index.html
        :param var: Variable to summarize
        :param name: Summary name
        """
        with tf.name_scope('summaries'):
            mean = tf.reduce_mean(var)
            tf.scalar_summary('mean/' + name, mean)
            with tf.name_scope('stddev'):
                stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
            tf.scalar_summary('stddev/' + name, stddev)
            tf.scalar_summary('max/' + name, tf.reduce_max(var))
            tf.scalar_summary('min/' + name, tf.reduce_min(var))
            tf.histogram_summary(name, var) 
开发者ID:rvinas,项目名称:sentiment-analysis-tensorflow,代码行数:18,代码来源:neural_network.py

示例13: _activation_summary

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_summary [as 别名]
def _activation_summary(x):
  """Helper to create summaries for activations.

  Creates a summary that provides a histogram of activations.
  Creates a summary that measures the sparsity of activations.

  Args:
    x: Tensor
  Returns:
    nothing
  """
  # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
  # session. This helps the clarity of presentation on tensorboard.
  tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
  tf.histogram_summary(tensor_name + '/activations', x)
  tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:18,代码来源:cifar10.py

示例14: _add_loss_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_summary [as 别名]
def _add_loss_summaries(total_loss):
  """Add summaries for losses in CIFAR-10 model.

  Generates moving average for all losses and associated summaries for
  visualizing the performance of the network.

  Args:
    total_loss: Total loss from loss().
  Returns:
    loss_averages_op: op for generating moving averages of losses.
  """
  # Compute the moving average of all individual losses and the total loss.
  loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
  losses = tf.get_collection('losses')
  loss_averages_op = loss_averages.apply(losses + [total_loss])

  # Attach a scalar summary to all individual losses and the total loss; do the
  # same for the averaged version of the losses.
  for l in losses + [total_loss]:
    # Name each loss as '(raw)' and name the moving average version of the loss
    # as the original loss name.
    tf.scalar_summary(l.op.name +' (raw)', l)
    tf.scalar_summary(l.op.name, loss_averages.average(l))

  return loss_averages_op 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:27,代码来源:cifar10.py

示例15: _get_train_ops

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scalar_summary [as 别名]
def _get_train_ops(self, features, _):
    (_,
     _,
     losses,
     training_op) = clustering_ops.KMeans(
         self._parse_tensor_or_dict(features),
         self._num_clusters,
         self._training_initial_clusters,
         self._distance_metric,
         self._use_mini_batch,
         random_seed=self._random_seed,
         kmeans_plus_plus_num_retries=self.kmeans_plus_plus_num_retries
     ).training_graph()
    incr_step = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
    self._loss = tf.reduce_sum(losses)
    tf.scalar_summary('loss/raw', self._loss)
    training_op = with_dependencies([training_op, incr_step], self._loss)
    return training_op, self._loss 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:20,代码来源:kmeans.py


注:本文中的tensorflow.scalar_summary方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。