當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.Print方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.Print方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.Print方法的具體用法?Python v1.Print怎麽用?Python v1.Print使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.Print方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: decode_jpeg

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Print [as 別名]
def decode_jpeg(image_buffer, scope=None):  # , dtype=tf.float32):
  """Decode a JPEG string into one 3-D float image Tensor.

  Args:
    image_buffer: scalar string Tensor.
    scope: Optional scope for op_scope.
  Returns:
    3-D float Tensor with values ranging from [0, 1).
  """
  # with tf.op_scope([image_buffer], scope, 'decode_jpeg'):
  # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]):
  with tf.name_scope(scope or 'decode_jpeg'):
    # Decode the string as an RGB JPEG.
    # Note that the resulting image contains an unknown height and width
    # that is set dynamically by decode_jpeg. In other words, the height
    # and width of image is unknown at compile-time.
    image = tf.image.decode_jpeg(image_buffer, channels=3,
                                 fancy_upscaling=False,
                                 dct_method='INTEGER_FAST')

    # image = tf.Print(image, [tf.shape(image)], 'Image shape: ')

    return image 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:25,代碼來源:preprocessing.py

示例2: Print

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Print [as 別名]
def Print(self, x, data, message, **kwargs):  # pylint: disable=invalid-name
    """call tf.Print.

    Args:
      x: a LaidOutTensor
      data: a list of LaidOutTensor
      message: a string
      **kwargs: keyword arguments to tf.print
    Returns:
      a LaidOutTensor
    """
    tf.logging.info("PlacementMeshImpl::Print")
    x = x.to_laid_out_tensor()
    new_slices = x.tensor_list[:]
    with tf.device(self._devices[0]):
      new_slices[0] = tf.Print(
          new_slices[0], [t for d in data for t in d.tensor_list],
          message, **kwargs)
    return self.LaidOutTensor(new_slices) 
開發者ID:tensorflow,項目名稱:mesh,代碼行數:21,代碼來源:placement_mesh_impl.py

示例3: print_text

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Print [as 別名]
def print_text(tf_sequences, vocab, use_bpe=False, predict_mode=False):
  """Print text."""
  def _print_separator():
    if not predict_mode:
      tf.logging.info("=" * 80)
  print_ops = [tf.py_func(_print_separator, [], [])]
  for name, tf_sequence, tf_length, convert2txt in tf_sequences:
    def _do_print(n, sequence, lengths, to_txt):
      if to_txt:
        s = sequence[0][:lengths[0]]
        output = id2text(s, vocab, use_bpe=use_bpe)
      else:
        output = " ".join(sequence[0])
      if not predict_mode:
        tf.logging.info("%s: %s", n, output)

    with tf.control_dependencies(print_ops):
      print_ops.append(tf.py_func(
          _do_print, [name, tf_sequence, tf_length, convert2txt], []))
  with tf.control_dependencies(print_ops):
    return tf.py_func(_print_separator, [], []) 
開發者ID:google-research,項目名稱:language,代碼行數:23,代碼來源:common.py

示例4: _build_select_slate_op

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Print [as 別名]
def _build_select_slate_op(self):
    p_no_click = self._prob_no_click_ph
    p = self._doc_affinity_scores_ph
    q = self._net_outputs.q_values[0]
    with tf.name_scope('select_slate'):
      self._output_slate = self._select_slate_fn(self._slate_size, p_no_click,
                                                 p, q)

    self._output_slate = tf.Print(
        self._output_slate, [tf.constant('cp 1'), self._output_slate, p, q],
        summarize=10000)
    self._output_slate = tf.reshape(self._output_slate, (self._slate_size,))

    self._action_counts = tf.get_variable(
        'action_counts',
        shape=[self._num_candidates],
        initializer=tf.zeros_initializer())
    output_slate = tf.reshape(self._output_slate, [-1])
    output_one_hot = tf.one_hot(output_slate, self._num_candidates)
    update_ops = []
    for i in range(self._slate_size):
      update_ops.append(tf.assign_add(self._action_counts, output_one_hot[i]))
    self._select_action_update_op = tf.group(*update_ops) 
開發者ID:google-research,項目名稱:recsim,代碼行數:25,代碼來源:slate_decomp_q_agent.py

示例5: body

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Print [as 別名]
def body(self, features):
    exp_coupling = ["affine", "additive"]
    if self.hparams.coupling not in exp_coupling:
      raise ValueError("Expected hparams.coupling to be in %s, got %s" %
                       (exp_coupling, self.hparams.coupling))
    if self.is_training:
      init_features = self.create_init_batch(features)
      init_op = self.objective_tower(init_features, init=True)
      init_op = tf.Print(
          init_op, [init_op], message="Triggering data-dependent init.",
          first_n=20)
      tf.add_to_collection("glow_init_op", init_op)
    train_op = self.objective_tower(features, init=False)
    return tf.zeros_like(features["targets"]), {"training": train_op} 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:16,代碼來源:glow.py

示例6: debugprint

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Print [as 別名]
def debugprint(x, name=''):
  """Small wrapper for tf.Print which prints summary statistics."""
  name += '\t' + x.name
  return tf.Print(x,
                  [tf.reduce_min(x), tf.reduce_mean(x), tf.reduce_max(x)],
                  name) 
開發者ID:magenta,項目名稱:magenta,代碼行數:8,代碼來源:layers.py

示例7: benchmark_handwritten

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Print [as 別名]
def benchmark_handwritten(self):
    with tf.Graph().as_default():
      ds, opt, hp, w, b = get_data_and_params()
      iterator = ds.make_one_shot_iterator()

      def loop_body(i, unused_previous_loss_t):
        """Manual implementation of training loop."""
        # Call get_next() inside body or else training happens repeatedly on
        # the first minibatch only.
        x, y = iterator.get_next()
        loss_t = loss_fn(x, y, w, b)
        train_op = opt.minimize(loss_t, var_list=(w, b))
        i = tf.cond(tf.equal(i % 100, 0),
                    lambda: tf.Print(i, [i, loss_t], message='Step, loss: '),
                    lambda: i)

        with tf.control_dependencies([train_op]):
          return i + 1, loss_t

      _, final_loss_t = tf.while_loop(
          lambda i, _: i < hp.train_steps,
          loop_body,
          [tf.constant(0), tf.constant(0.0)])

      with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        def target():
          loss_val = sess.run(final_loss_t)
          assert 0.1 < loss_val < 1, loss_val

        self.time_execution(
            'Handwritten',
            target,
            iter_volume=hp.train_steps,
            iter_unit='training steps') 
開發者ID:tensorflow,項目名稱:autograph,代碼行數:38,代碼來源:mnist_benchmark.py

示例8: print_dataset

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Print [as 別名]
def print_dataset(dataset):
  """tf.Print dataset fields for debugging purposes."""
  def my_fn(x):
    return {k: tf.Print(v, [v], k + ": ") for k, v in x.items()}
  return dataset.map(my_fn) 
開發者ID:google-research,項目名稱:text-to-text-transfer-transformer,代碼行數:7,代碼來源:utils.py

示例9: maybe_print_dataset

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Print [as 別名]
def maybe_print_dataset(dataset, should_print=False):
  """tf.Print dataset for debugging purposes."""
  return print_dataset(dataset) if should_print else dataset 
開發者ID:google-research,項目名稱:text-to-text-transfer-transformer,代碼行數:5,代碼來源:utils.py

示例10: call

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Print [as 別名]
def call(self, inputs, modulation=None):
    mean, variance = self._get_moments(inputs)
    # inputs = tf.Print(inputs, [mean, variance, self.beta, self.gamma], "NORM")
    return tf.nn.batch_normalization(
        inputs, mean, variance, self.beta, self.gamma, self.epsilon,
        name="normalize") 
開發者ID:tensorflow,項目名稱:compression,代碼行數:8,代碼來源:archs.py

示例11: simulate

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Print [as 別名]
def simulate(self, action):
    with tf.name_scope("environment/simulate"):
      actions = tf.concat([tf.expand_dims(action, axis=1)] * self._num_frames,
                          axis=1)
      history = self.history_buffer.get_all_elements()
      with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
        # We only need 1 target frame here, set it.
        hparams_target_frames = self._model.hparams.video_num_target_frames
        self._model.hparams.video_num_target_frames = 1
        model_output = self._model.infer({
            "inputs": history,
            "input_action": actions,
            "reset_internal_states": self._reset_model.read_value()
        })
        self._model.hparams.video_num_target_frames = hparams_target_frames

      observ = tf.cast(tf.squeeze(model_output["targets"], axis=1),
                       self.observ_dtype)

      reward = tf.to_float(model_output["target_reward"])
      reward = tf.reshape(reward, shape=(self.batch_size,)) + self._min_reward

      if self._intrinsic_reward_scale:
        # Use the model's uncertainty about its prediction as an intrinsic
        # reward. The uncertainty is measured by the log probability of the
        # predicted pixel value.
        if "targets_logits" not in model_output:
          raise ValueError("The use of intrinsic rewards requires access to "
                           "the logits. Ensure that model.infer returns "
                           "'targets_logits'")
        uncertainty_reward = compute_uncertainty_reward(
            model_output["targets_logits"], model_output["targets"])
        uncertainty_reward = tf.minimum(
            1., self._intrinsic_reward_scale * uncertainty_reward)
        uncertainty_reward = tf.Print(uncertainty_reward, [uncertainty_reward],
                                      message="uncertainty_reward", first_n=1,
                                      summarize=8)
        reward += uncertainty_reward

      done = tf.constant(False, tf.bool, shape=(self.batch_size,))

      with tf.control_dependencies([observ]):
        dump_frame_op = tf.cond(self._video_condition,
                                lambda: tf.py_func(self._video_dump_frame,  # pylint: disable=g-long-lambda
                                                   [observ, reward], []),
                                tf.no_op)
        with tf.control_dependencies(
            [self._observ.assign(observ),
             self.history_buffer.move_by_one_element(observ), dump_frame_op]):
          clear_reset_model_op = tf.assign(self._reset_model, tf.constant(0.0))
          with tf.control_dependencies([clear_reset_model_op]):
            return tf.identity(reward), tf.identity(done) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:54,代碼來源:simulated_batch_env.py

示例12: sample

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Print [as 別名]
def sample(news_config: GroverConfig, initial_context, eos_token, min_len, ignore_ids=None, p_for_topp=0.95,
           do_topk=False):
    """
    V1 version of: sample outputs from a model, and do it all at once
    :param news_config: Configuration used to construct the model
    :param initial_context: [batch_size, seq_length] that we'll start generating with
    :param eos_token: Stop generating if you see this (tf scalar)
    :param min_len: min length of sample
    :param ignore_ids: NEVER GENERATE THESE [vocab_size]
    :return:
    """
    batch_size, _ = get_shape_list(initial_context, expected_rank=2)

    if ignore_ids is None:
        ignore_ids = tf.constant([x == 0 for x in range(news_config.vocab_size)], dtype=tf.bool)

    with tf.name_scope('sample_sequence'):
        # Initial call to get cache
        context_output = initialize_from_context(initial_context, ignore_ids=ignore_ids, news_config=news_config,
                                                 p_for_topp=p_for_topp,
                                                 do_topk=do_topk)
        ctx = context_output['tokens']
        cache = context_output['cache']
        probs = context_output['probs']

        def body(ctx, cache, probs):
            """ for whatever reason this didn't work when I ran it on more than one at once... ugh."""
            next_outputs = sample_step(ctx[:, -1][:, None], ignore_ids=ignore_ids, news_config=news_config,
                                       batch_size=batch_size, p_for_topp=p_for_topp, cache=cache,
                                       do_topk=do_topk)

            # Update everything
            new_cache = tf.concat([cache, next_outputs['new_cache']], axis=-2)
            new_ids = tf.concat([ctx, next_outputs['new_tokens'][:, None]], axis=1)
            new_probs = tf.concat([probs, next_outputs['new_probs'][:, None]], axis=1)
            return [new_ids, new_cache, new_probs]

        def cond(ctx, cache, probs):
            # ctx = tf.Print(ctx,[tf.shape(ctx)])
            is_eos = tf.reduce_all(tf.reduce_any(tf.equal(ctx[:,-1:], eos_token), axis=1))
            is_len = tf.greater(get_shape_list(ctx)[1], min_len)
            return tf.logical_not(tf.logical_and(is_eos, is_len))

        tokens, cache, probs = tf.while_loop(
            cond=cond, body=body, maximum_iterations=1025 - get_shape_list(ctx)[1],
            loop_vars=[ctx, cache, probs],
            shape_invariants=[tf.TensorShape([batch_size, None]),
                              tf.TensorShape(
                                  [batch_size, news_config.num_hidden_layers, 2,
                                   news_config.num_attention_heads,
                                   None, news_config.hidden_size // news_config.num_attention_heads]),
                              tf.TensorShape([batch_size, None]),
                              ],
            back_prop=False,
        )
    return tokens, probs 
開發者ID:imcaspar,項目名稱:gpt2-ml,代碼行數:58,代碼來源:modeling.py

示例13: optimize_log_loss

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Print [as 別名]
def optimize_log_loss(decoder_tgt, decoder_outputs, weights, hps):
  """Optimize log loss.

  Args:
    decoder_tgt: gold outputs. [batch_size, len, vocab_size]
    decoder_outputs: predictions. [batch_size, len, vocab_size]
    weights: [batch_size, len] Mask.
    hps: hyperparams

  Returns:
    loss: Loss.
    train_op: Tensorflow Op for updating parameters.
  """

  loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
      labels=decoder_tgt,
      logits=decoder_outputs.rnn_output)
  loss = tf.reduce_mean(loss * weights)
  # loss = tf.Print(loss, [loss])

  global_step = tf.train.get_global_step()
  values = [hps.learning_rate,
            hps.learning_rate / 5.,
            hps.learning_rate / 10.,
            hps.learning_rate / 25.,
            hps.learning_rate / 50.]
  boundaries = [hps.lr_schedule,
                int(hps.lr_schedule*1.5),
                hps.lr_schedule*2,
                int(hps.lr_schedule*2.5)]
  learning_rate = tf.train.piecewise_constant(
      global_step, boundaries, values)

  assert hps.trainer == "adam", "Only supporting Adam now."

  trainable_var_list = tf.trainable_variables()
  grads = tf.gradients(loss, trainable_var_list)
  gvs = list(zip(grads, trainable_var_list))

  grads = [g for g, _ in gvs]
  train_op = adam.adam(
      trainable_var_list,
      grads,
      learning_rate,
      partial(adam.warmup_constant),
      hps.total_steps,
      weight_decay=hps.weight_decay,
      max_grad_norm=hps.max_grad_norm,
      bias_l2=True)

  return loss, train_op 
開發者ID:google-research,項目名稱:language,代碼行數:53,代碼來源:common.py


注:本文中的tensorflow.compat.v1.Print方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。