当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.merge_all_summaries方法代码示例

本文整理汇总了Python中tensorflow.merge_all_summaries方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.merge_all_summaries方法的具体用法?Python tensorflow.merge_all_summaries怎么用?Python tensorflow.merge_all_summaries使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.merge_all_summaries方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: train

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import merge_all_summaries [as 别名]
def train(self):

	self.train_op = self.optim.minimize(self.loss, global_step=self.global_step)
        self.writer = tf.train.SummaryWriter("./logs/D_pretrained", self.sess.graph)
	self.summary_op = tf.merge_all_summaries()
        tf.initialize_all_variables().run()
        self.saver = tf.train.Saver(var_list=self.D_params_dict, max_to_keep=self.max_to_keep)
        count = 0
	for idx in range(self.max_iter//3000):
            self.save(self.checkpoint_dir, count)
            self.evaluate('test', count)
	    self.evaluate('train', count)
            for k in tqdm(range(3000)):
		right_images, right_text, _ = self.dataset.sequential_sample(self.batch_size)
		right_length = np.sum((right_text!=self.NOT)+0, 1)
		fake_images, fake_text, _ = self.negative_dataset.sequential_sample(self.batch_size)
		fake_length = np.sum((fake_text!=self.NOT)+0, 1)
		wrong_text = self.dataset.get_wrong_text(self.batch_size)
		wrong_length = np.sum((wrong_text!=self.NOT)+0, 1)
		feed_dict = {self.right_images:right_images, self.right_text:right_text, self.right_length:right_length, 
				self.fake_images:fake_images, self.fake_text:fake_text, self.fake_length:fake_length, 
				self.wrong_images:right_images, self.wrong_text:wrong_text, self.wrong_length:wrong_length}
		_, loss, summary_str = self.sess.run([self.train_op, self.loss, self.summary_op], feed_dict)
		self.writer.add_summary(summary_str, count)
                count += 1 
开发者ID:tsenghungchen,项目名称:show-adapt-and-tell,代码行数:27,代码来源:pretrain_LSTM_D.py

示例2: setup_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import merge_all_summaries [as 别名]
def setup_summaries(self):
        episode_reward = tf.Variable(0.)
        s1 = tf.scalar_summary("Episode Reward " + str(self.actor_id), episode_reward)
        if self.alg_type == "a3c":
            summary_vars = [episode_reward]
        else:
            episode_ave_max_q = tf.Variable(0.)
            s2 = tf.scalar_summary("Max Q Value " + str(self.actor_id), episode_ave_max_q)
            logged_epsilon = tf.Variable(0.)
            s3 = tf.scalar_summary("Epsilon " + str(self.actor_id), logged_epsilon)
            summary_vars = [episode_reward, episode_ave_max_q, logged_epsilon]
        summary_placeholders = [tf.placeholder("float") for _ in range(len(summary_vars))]
        update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]
        with tf.control_dependencies(update_ops):
            summary_ops = tf.merge_all_summaries()
        return summary_placeholders, update_ops, summary_ops 
开发者ID:traai,项目名称:async-deep-rl,代码行数:18,代码来源:actor_learner.py

示例3: init_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import merge_all_summaries [as 别名]
def init_summaries(self, config, grad_norm=None):
    summdir = config.dir("summary_dir", "summaries")
    model = config.string("model")
    summdir += model + "/"
    tf.gfile.MakeDirs(summdir)
    summary_writer = tf.summary.FileWriter(summdir, self.session.graph)
    summary_op = None
    summary_op_test = None
    if config.bool("write_summaries", True):
      if self.train_network is not None:
        train_summs = self.train_network.summaries
        if grad_norm is not None:
          grad_norm_summary = tf.summary.scalar("grad_norm", grad_norm)
          train_summs.append(grad_norm_summary)
        # better do not merge ALL summaries, since otherwise we get summaries from different networks
        # and might execute (parts of) the test network while training
        # self.summary_op = tf.merge_all_summaries()
        if len(train_summs) > 0:
          summary_op = tf.summary.merge(self.train_network.summaries)
      if self.test_network is not None and len(self.test_network.summaries) > 0:
        summary_op_test = tf.summary.merge(self.test_network.summaries)
    return summary_writer, summary_op, summary_op_test 
开发者ID:tobiasfshr,项目名称:MOTSFusion,代码行数:24,代码来源:Trainer.py

示例4: _add_train_op

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import merge_all_summaries [as 别名]
def _add_train_op(self):
    params = self._params

    self._lr_rate = tf.maximum(
        params.min_lr,
        tf.train.exponential_decay(params.lr, self._global_step, 30000, 0.98))

    tvars = tf.trainable_variables()
    # use reserved gpu for gradient computation
    with tf.device(self._get_gpu(self._num_gpus-1)):
      grads, global_norm = tf.clip_by_global_norm(
          tf.gradients(self._loss, tvars), params.max_grad_norm)
    tf.scalar_summary('global_norm', global_norm)
    optimizer = tf.train.AdamOptimizer(self._lr_rate)
    tf.scalar_summary('learning rate', self._lr_rate)
    with tf.device(self._next_device()):
      self._train_op = optimizer.apply_gradients(
          zip(grads, tvars), global_step=self._global_step, name='train_step')
    self._summaries = tf.merge_all_summaries()

    return self._train_op, self._loss, 
开发者ID:marshmelloX,项目名称:dynamic-coattention-network,代码行数:23,代码来源:selector.py

示例5: _init_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import merge_all_summaries [as 别名]
def _init_summaries(self):
        if self.is_train:
            logdir = os.path.join(SUMMARY_PATH, self.log_name, 'train')

            self.summary_writer = tf.summary.FileWriter(logdir)
            self.summary_writer_by_points = [tf.summary.FileWriter(os.path.join(logdir, 'point_%02d' % i))
                                             for i in range(16)]

            tf.scalar_summary('Average euclidean distance', self.euclidean_dist, collections = [KEY_SUMMARIES])

            for i in range(16):
                tf.scalar_summary('Joint euclidean distance', self.euclidean_dist_per_joint[i],
                                  collections = [KEY_SUMMARIES_PER_JOINT[i]])

            self.create_summary_from_weights()

            self.ALL_SUMMARIES = tf.merge_all_summaries(KEY_SUMMARIES)
            self.SUMMARIES_PER_JOINT = [tf.merge_all_summaries(KEY_SUMMARIES_PER_JOINT[i]) for i in range(16)]
        else:
            logdir = os.path.join(SUMMARY_PATH, self.log_name, 'test')
            self.summary_writer = tf.summary.FileWriter(logdir) 
开发者ID:marian-margeta,项目名称:gait-recognition,代码行数:23,代码来源:human_pose_nn.py

示例6: testMergeAllSummaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import merge_all_summaries [as 别名]
def testMergeAllSummaries(self):
    with tf.Graph().as_default():
      const = tf.constant(10.0)
      summ1 = tf.summary.histogram("h", const)
      summ2 = tf.summary.scalar("o", const, collections=["foo_key"])
      summ3 = tf.summary.scalar("c", const)
      merge = tf.summary.merge_all()
      self.assertEqual("MergeSummary", merge.op.type)
      self.assertEqual(2, len(merge.op.inputs))
      self.assertEqual(summ1, merge.op.inputs[0])
      self.assertEqual(summ3, merge.op.inputs[1])
      merge = tf.merge_all_summaries("foo_key")
      self.assertEqual("MergeSummary", merge.op.type)
      self.assertEqual(1, len(merge.op.inputs))
      self.assertEqual(summ2, merge.op.inputs[0])
      self.assertTrue(tf.merge_all_summaries("bar_key") is None) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:18,代码来源:summary_ops_test.py

示例7: build_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import merge_all_summaries [as 别名]
def build_summaries():
    episode_reward = tf.Variable(0.)
    scalar_summary("Reward", episode_reward)
    episode_ave_max_q = tf.Variable(0.)
    scalar_summary("Qmax Value", episode_ave_max_q)
    logged_epsilon = tf.Variable(0.)
    scalar_summary("Epsilon", logged_epsilon)
    # Threads shouldn't modify the main graph, so we use placeholders
    # to assign the value of every summary (instead of using assign method
    # in every thread, that would keep creating new ops in the graph)
    summary_vars = [episode_reward, episode_ave_max_q, logged_epsilon]
    summary_placeholders = [tf.placeholder("float")
                            for i in range(len(summary_vars))]
    assign_ops = [summary_vars[i].assign(summary_placeholders[i])
                  for i in range(len(summary_vars))]
    summary_op = merge_all_summaries()
    return summary_placeholders, assign_ops, summary_op 
开发者ID:limbo018,项目名称:FRU,代码行数:19,代码来源:atari_1step_qlearning.py

示例8: init_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import merge_all_summaries [as 别名]
def init_summaries(self, config, grad_norm=None):
    summdir = config.dir("summary_dir", "summaries")
    model = config.str("model")
    summdir += model + "/"
    tf.gfile.MakeDirs(summdir)
    summary_writer = tf.summary.FileWriter(summdir, self.session.graph)
    summary_op = None
    summary_op_test = None
    if config.bool("write_summaries", True):
      if self.train_network is not None and len(self.train_network.summaries) > 0:
        # better do not merge ALL summaries, since otherwise we get summaries from different networks
        # and might execute (parts of) the test network while training
        # self.summary_op = tf.merge_all_summaries()
        # atm we only collect summaries from the train network
        if grad_norm is None:
          summary_op = tf.summary.merge(self.train_network.summaries)
        else:
          #grad_norm = tf.Print(grad_norm, [grad_norm], "grad_norm")
          grad_norm_summary = tf.summary.scalar("grad_norm", grad_norm)
          summary_op = tf.summary.merge(self.train_network.summaries + [grad_norm_summary])
      if self.test_network is not None and len(self.test_network.summaries) > 0:
        summary_op_test = tf.summary.merge(self.test_network.summaries)
    return summary_writer, summary_op, summary_op_test 
开发者ID:JonathonLuiten,项目名称:PReMVOS,代码行数:25,代码来源:Trainer.py

示例9: init_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import merge_all_summaries [as 别名]
def init_summaries(self, config, grad_norm=None):
    summdir = config.dir("summary_dir", "summaries")
    model = config.string("model")
    summdir += model + "/"
    tf.gfile.MakeDirs(summdir)
    summary_writer = None
    summary_op = None
    summary_op_test = None
    if config.bool("write_summaries", True):
      summary_writer = tf.summary.FileWriter(summdir, self.session.graph)
      if self.train_network is not None:
        train_summs = self.train_network.summaries
        if grad_norm is not None:
          grad_norm_summary = tf.summary.scalar("grad_norm", grad_norm)
          train_summs.append(grad_norm_summary)
        # better do not merge ALL summaries, since otherwise we get summaries from different networks
        # and might execute (parts of) the test network while training
        # self.summary_op = tf.merge_all_summaries()
        if len(train_summs) > 0:
          summary_op = tf.summary.merge(train_summs)
      if self.test_network is not None and len(self.test_network.summaries) > 0:
        summary_op_test = tf.summary.merge(self.test_network.summaries)
    return summary_writer, summary_op, summary_op_test 
开发者ID:VisualComputingInstitute,项目名称:TrackR-CNN,代码行数:25,代码来源:Trainer.py

示例10: initialize_graph

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import merge_all_summaries [as 别名]
def initialize_graph(self, input_dim):
        self.input_dim = input_dim
        self._setup_base_graph()
        with self.graph.as_default():
            self.sess = tf.Session()
            self.init_op = tf.initialize_all_variables()
            self.summary = tf.merge_all_summaries()
            self.sess.run(self.init_op)
        self.initialized = True 
开发者ID:mzhang001,项目名称:tfranknet,代码行数:11,代码来源:ranknet.py

示例11: summary

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import merge_all_summaries [as 别名]
def summary(self):
        if self.__summary is None:
            self.__summary = tf.merge_all_summaries(key='summaries')
        return self.__summary 
开发者ID:UKPLab,项目名称:iwcs2017-answer-selection,代码行数:6,代码来源:__init__.py

示例12: initialize

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import merge_all_summaries [as 别名]
def initialize(self, log_dir="./logs"):
        self.merged_sum = tf.merge_all_summaries()
        self.writer = tf.train.SummaryWriter(log_dir, self.sess.graph_def)

        tf.initialize_all_variables().run()
        self.load(self.checkpoint_dir)

        start_iter = self.step.eval() 
开发者ID:nitishgupta,项目名称:neural-el,代码行数:10,代码来源:base.py

示例13: evaluate

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import merge_all_summaries [as 别名]
def evaluate(dataset):
  """Evaluate model on Dataset for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels from the dataset.
    images, labels, _ = image_processing.inputs(dataset)

    # Number of classes in the Dataset label set plus 1.
    # Label 0 is reserved for an (unused) background class.
    num_classes = dataset.num_classes() + 1

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits, _ = inception.inference(images, num_classes)

    # Calculate predictions.
    top_1_op = tf.nn.in_top_k(logits, labels, 1)
    top_5_op = tf.nn.in_top_k(logits, labels, 5)
    
    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
                                            graph_def=graph_def)

    while True:
      _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs) 
开发者ID:Cyber-Neuron,项目名称:inception_v3,代码行数:38,代码来源:inception_eval.py

示例14: run

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import merge_all_summaries [as 别名]
def run(self):
        self.session = tf.Session()
#         self.session = tf.Session(config=tf.ConfigProto(
#              inter_op_parallelism_threads=1,
#              intra_op_parallelism_threads=1))

        if (self.actor_id==0):
            #Initizlize Tensorboard summaries
            self.summary_op = tf.merge_all_summaries()
            self.summary_writer = tf.train.SummaryWriter(
                            "{}/{}".format(self.summ_base_dir, self.actor_id), self.session.graph_def) 

            # Initialize network parameters
            g_step = utils.restore_vars(self.saver, self.session, self.game, self.alg_type, self.max_local_steps)
            self.global_step.val.value = g_step
            self.last_saving_step = g_step   
            logger.debug("T{}: Initializing shared memory...".format(self.actor_id))
            self.init_shared_memory()

        # Wait until actor 0 finishes initializing shared memory
        self.barrier.wait()
        
        if self.actor_id > 0:
            logger.debug("T{}: Syncing with shared memory...".format(self.actor_id))
            self.sync_net_with_shared_memory(self.local_network, self.learning_vars)  
            if self.alg_type <> "a3c":
                self.sync_net_with_shared_memory(self.target_network, self.target_vars)

        # Wait until all actors are ready to start 
        self.barrier.wait()
        
        # Introduce a different start delay for each actor, so that they do not run in synchronism.
        # This is to avoid concurrent updates of parameters as much as possible 
        time.sleep(0.1877 * self.actor_id) 
开发者ID:traai,项目名称:async-deep-rl,代码行数:36,代码来源:actor_learner.py

示例15: build_graph

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import merge_all_summaries [as 别名]
def build_graph(self):
    self._add_placeholders()
    self._build_encoder()
    self._build_decoder()
    if self._params.mode != 'decode':
      alpha_true, beta_true = tf.split(0, 2, self._answers)
      self._global_step = tf.Variable(0, name='global_step', trainable=False)
      self._loss = self._loss_multitask(self._alpha, alpha_true,
                                        self._beta, beta_true)
    if self._params.mode == 'train':
      self._add_train_op()
    self._summaries = tf.merge_all_summaries()
    tf.logging.info('graph built...') 
开发者ID:marshmelloX,项目名称:dynamic-coattention-network,代码行数:15,代码来源:selector.py


注:本文中的tensorflow.merge_all_summaries方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。