当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.scan方法代码示例

本文整理汇总了Python中tensorflow.scan方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.scan方法的具体用法?Python tensorflow.scan怎么用?Python tensorflow.scan使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.scan方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: multi_step

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scan [as 别名]
def multi_step(self, all_obs, initial_state, all_actions):
    """Calculate log-probs and other calculations on batch of episodes."""
    batch_size = tf.shape(initial_state)[0]
    time_length = tf.shape(all_obs[0])[0]
    initial_actions = [act[0] for act in all_actions]
    all_actions = [tf.concat([act[1:], act[0:1]], 0)
                   for act in all_actions]  # "final" action is dummy

    (internal_states, _, logits, log_probs,
     entropies, self_kls) = tf.scan(
        self.single_step,
        (all_obs, all_actions),
        initializer=self.get_initializer(
            batch_size, initial_state, initial_actions))

    # remove "final" computations
    log_probs = [log_prob[:-1] for log_prob in log_probs]
    entropies = [entropy[:-1] for entropy in entropies]
    self_kls = [self_kl[:-1] for self_kl in self_kls]

    return internal_states, logits, log_probs, entropies, self_kls 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:23,代码来源:policy.py

示例2: _update_value

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scan [as 别名]
def _update_value(self, observ, reward, length):
    """Perform multiple update steps of the value baseline.

    We need to decide for the summary of one iteration, and thus choose the one
    after half of the iterations.

    Args:
      observ: Sequences of observations.
      reward: Sequences of reward.
      length: Batch of sequence lengths.

    Returns:
      Summary tensor.
    """
    with tf.name_scope('update_value'):
      loss, summary = tf.scan(
          lambda _1, _2: self._update_value_step(observ, reward, length),
          tf.range(self._config.update_epochs_value),
          [0., ''], parallel_iterations=1)
      print_loss = tf.Print(0, [tf.reduce_mean(loss)], 'value loss: ')
      with tf.control_dependencies([loss, print_loss]):
        return summary[self._config.update_epochs_value // 2] 
开发者ID:utra-robosoccer,项目名称:soccer-matlab,代码行数:24,代码来源:algorithm.py

示例3: calculate_generalized_advantage_estimator

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scan [as 别名]
def calculate_generalized_advantage_estimator(
    reward, value, done, gae_gamma, gae_lambda):
  """Generalized advantage estimator."""

  # Below is slight weirdness, we set the last reward to 0.
  # This makes the advantage to be 0 in the last timestep
  reward = tf.concat([reward[:-1, :], value[-1:, :]], axis=0)
  next_value = tf.concat([value[1:, :], tf.zeros_like(value[-1:, :])], axis=0)
  next_not_done = 1 - tf.cast(tf.concat([done[1:, :],
                                         tf.zeros_like(done[-1:, :])], axis=0),
                              tf.float32)
  delta = reward + gae_gamma * next_value * next_not_done - value

  return_ = tf.reverse(tf.scan(
      lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg,
      [tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])],
      tf.zeros_like(delta[0, :]),
      parallel_iterations=1), [0])
  return tf.check_numerics(return_, "return") 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:21,代码来源:ppo.py

示例4: simulate

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scan [as 别名]
def simulate(self, action):
    with tf.name_scope("environment/simulate"):  # Do we need this?
      initializer = (tf.zeros_like(self._observ),
                     tf.fill((len(self),), 0.0), tf.fill((len(self),), False))

      def not_done_step(a, _):
        reward, done = self._batch_env.simulate(action)
        with tf.control_dependencies([reward, done]):
          # TODO(piotrmilos): possibly ignore envs with done
          r0 = tf.maximum(a[0], self._batch_env.observ)
          r1 = tf.add(a[1], reward)
          r2 = tf.logical_or(a[2], done)

          return (r0, r1, r2)

      simulate_ret = tf.scan(not_done_step, tf.range(self.skip),
                             initializer=initializer, parallel_iterations=1,
                             infer_shape=False)
      simulate_ret = [ret[-1, ...] for ret in simulate_ret]

      with tf.control_dependencies([self._observ.assign(simulate_ret[0])]):
        return tf.identity(simulate_ret[1]), tf.identity(simulate_ret[2]) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:24,代码来源:tf_atari_wrappers.py

示例5: omniglot

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scan [as 别名]
def omniglot():

    sess = tf.InteractiveSession()

    """    def wrapper(v):
        return tf.Print(v, [v], message="Printing v")

    v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')

    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
    temp = wrapper(v)
    #with tf.control_dependencies([temp]):
    temp.eval()
    print 'Hello'"""

    def update_tensor(V, dim2, val):  # Update tensor V, with index(:,dim2[:]) by val[:]
        val = tf.cast(val, V.dtype)
        def body(_, (v, d2, chg)):
            d2_int = tf.cast(d2, tf.int32)
            return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
        Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
        return Z 
开发者ID:hmishra2250,项目名称:NTM-One-Shot-TF,代码行数:27,代码来源:TestUpd.py

示例6: backward_step_fn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scan [as 别名]
def backward_step_fn(self, params, inputs):
        """
        Backwards step over a batch, to be used in tf.scan
        :param params:
        :param inputs: (batch_size, variable dimensions)
        :return:
        """
        mu_back, Sigma_back = params
        mu_pred_tp1, Sigma_pred_tp1, mu_filt_t, Sigma_filt_t, A = inputs

        # J_t = tf.matmul(tf.reshape(tf.transpose(tf.matrix_inverse(Sigma_pred_tp1), [0, 2, 1]), [-1, self.dim_z]),
        #                 self.A)
        # J_t = tf.transpose(tf.reshape(J_t, [-1, self.dim_z, self.dim_z]), [0, 2, 1])
        J_t = tf.matmul(tf.transpose(A, [0, 2, 1]), tf.matrix_inverse(Sigma_pred_tp1))
        J_t = tf.matmul(Sigma_filt_t, J_t)

        mu_back = mu_filt_t + tf.matmul(J_t, mu_back - mu_pred_tp1)
        Sigma_back = Sigma_filt_t + tf.matmul(J_t, tf.matmul(Sigma_back - Sigma_pred_tp1, J_t, adjoint_b=True))

        return mu_back, Sigma_back 
开发者ID:simonkamronn,项目名称:kvae,代码行数:22,代码来源:filter.py

示例7: compute_forwards

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scan [as 别名]
def compute_forwards(self, reuse=None):
        """Compute the forward step in the Kalman filter.
           The forward pass is intialized with p(z_1)=N(self.mu, self.Sigma).
           We then return the mean and covariances of the predictive distribution p(z_t|z_tm1,u_t), t=2,..T+1
           and the filtering distribution p(z_t|x_1:t,u_1:t), t=1,..T
           We follow the notation of Murphy's book, section 18.3.1
        """

        # To make sure we are not accidentally using the real outputs in the steps with missing values, set them to 0.
        y_masked = tf.multiply(tf.expand_dims(self.mask, 2), self.y)
        inputs = tf.concat([y_masked, self.u, tf.expand_dims(self.mask, 2)], axis=2)

        y_prev = tf.expand_dims(self.y_0, 0)  # (1, dim_y)
        y_prev = tf.tile(y_prev, (tf.shape(self.mu)[0], 1))
        alpha, state, u, buffer = self.alpha(y_prev, self.state, self.u[:, 0], init_buffer=True, reuse= reuse)

        # dummy matrix to initialize B and C in scan
        dummy_init_A = tf.ones([self.Sigma.get_shape()[0], self.dim_z, self.dim_z])
        dummy_init_B = tf.ones([self.Sigma.get_shape()[0], self.dim_z, self.dim_u])
        dummy_init_C = tf.ones([self.Sigma.get_shape()[0], self.dim_y, self.dim_z])
        forward_states = tf.scan(self.forward_step_fn, tf.transpose(inputs, [1, 0, 2]),
                                 initializer=(self.mu, self.Sigma, self.mu, self.Sigma, alpha, u, state, buffer,
                                              dummy_init_A, dummy_init_B, dummy_init_C),
                                 parallel_iterations=1, name='forward')
        return forward_states 
开发者ID:simonkamronn,项目名称:kvae,代码行数:27,代码来源:filter.py

示例8: _discount_reward_tensor_2d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scan [as 别名]
def _discount_reward_tensor_2d(reward, sequence_length=None,
                               discount=1., dtype=None):
    if sequence_length is not None:
        reward = mask_sequences(
            reward, sequence_length, dtype=dtype, tensor_rank=2)

    if discount == 1.:
        disc_reward = tf.cumsum(reward, axis=1, reverse=True)
    else:
        # [max_time, batch_size]
        rev_reward_T = tf.transpose(tf.reverse(reward, [1]), [1, 0])
        rev_reward_T_cum = tf.scan(
            fn=lambda acc, cur: cur + discount * acc,
            elems=rev_reward_T,
            initializer=tf.zeros_like(reward[:, 1]),
            back_prop=False)
        disc_reward = tf.reverse(
            tf.transpose(rev_reward_T_cum, [1, 0]), [1])

    return disc_reward 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:22,代码来源:rewards.py

示例9: calculate_generalized_advantage_estimator

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scan [as 别名]
def calculate_generalized_advantage_estimator(
    reward, value, done, gae_gamma, gae_lambda):
  # pylint: disable=g-doc-args
  """Generalized advantage estimator.

  Returns:
    GAE estimator. It will be one element shorter than the input; this is
    because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N].
  """
  # pylint: enable=g-doc-args

  next_value = value[1:, :]
  next_not_done = 1 - tf.cast(done[1:, :], tf.float32)
  delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done
           - value[:-1, :])

  return_ = tf.reverse(tf.scan(
      lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg,
      [tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])],
      tf.zeros_like(delta[0, :]),
      parallel_iterations=1), [0])
  return tf.check_numerics(return_, "return") 
开发者ID:yyht,项目名称:BERT,代码行数:24,代码来源:ppo.py

示例10: additive_walk_embedding

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scan [as 别名]
def additive_walk_embedding(predicate_embeddings):
    """
    Takes a walk, represented by a 3D Tensor with shape (batch_size, walk_length, embedding_length),
    and computes its embedding using a simple additive models.
    This method is roughly equivalent to:
    > walk_embedding = tf.reduce_prod(predicate_embeddings, axis=1)

    :param predicate_embeddings: 3D Tensor containing the embedding of the predicates in the walk.
    :return: 2D tensor of size (batch_size, embedding_length) containing the walk embeddings.
    """
    batch_size, embedding_len = tf.shape(predicate_embeddings)[0], tf.shape(predicate_embeddings)[2]

    # Transpose the (batch_size, walk_length, n) Tensor in a (walk_length, batch_size, n) Tensor
    transposed_embedding_matrix = tf.transpose(predicate_embeddings, perm=[1, 0, 2])

    # Define the initializer of the scan procedure - an all-zeros matrix
    initializer = tf.zeros((batch_size, embedding_len), dtype=predicate_embeddings.dtype)

    # The walk embeddings are given by the sum of the predicate embeddings
    # where zero is the neutral element wrt. the element-wise sum
    walk_embedding = tf.scan(lambda x, y: x + y, transposed_embedding_matrix, initializer=initializer)

    # Add the initializer as the first step in the scan sequence, in case the walk has zero-length
    return tf.concat(values=[tf.expand_dims(initializer, 0), walk_embedding], axis=0)[-1] 
开发者ID:uclnlp,项目名称:inferbeddings,代码行数:26,代码来源:embeddings.py

示例11: bilinear_diagonal_walk_embedding

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scan [as 别名]
def bilinear_diagonal_walk_embedding(predicate_embeddings):
    """
    Takes a walk, represented by a 3D Tensor with shape (batch_size, walk_length, embedding_length),
    and computes its embedding using a simple bilinear diagonal models.
    This method is roughly equivalent to:
    > walk_embedding = tf.reduce_prod(predicate_embeddings, axis=1)

    :param predicate_embeddings: 3D Tensor containing the embedding of the predicates in the walk.
    :return: 2D tensor of size (batch_size, embedding_length) containing the walk embeddings.
    """
    batch_size, embedding_len = tf.shape(predicate_embeddings)[0], tf.shape(predicate_embeddings)[2]

    # Transpose the (batch_size, walk_length, n) Tensor in a (walk_length, batch_size, n) Tensor
    transposed_embedding_matrix = tf.transpose(predicate_embeddings, perm=[1, 0, 2])

    # Define the initializer of the scan procedure - an all-ones matrix
    # where one is the neutral element wrt. the element-wise product
    initializer = tf.ones((batch_size, embedding_len), dtype=predicate_embeddings.dtype)

    # The walk embeddings are given by the element-wise product of the predicate embeddings
    walk_embedding = tf.scan(lambda x, y: x * y, transposed_embedding_matrix, initializer=initializer)

    # Add the initializer as the first step in the scan sequence, in case the walk has zero-length
    return tf.concat(values=[tf.expand_dims(initializer, 0), walk_embedding], axis=0)[-1] 
开发者ID:uclnlp,项目名称:inferbeddings,代码行数:26,代码来源:embeddings.py

示例12: testScan_Scoped

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scan [as 别名]
def testScan_Scoped(self):
    with self.test_session() as sess:
      with tf.variable_scope("root") as varscope:
        elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")

        r = tf.scan(simple_scoped_fn, elems)
        # Check that we have the one variable we asked for here.
        self.assertEqual(len(tf.trainable_variables()), 1)
        self.assertEqual(tf.trainable_variables()[0].name, "root/body/two:0")
        sess.run([tf.global_variables_initializer()])
        results = np.array([1, 6, 18, 44, 98, 208])
        self.assertAllEqual(results, r.eval())

        # Now let's reuse our single variable.
        varscope.reuse_variables()
        r = tf.scan(simple_scoped_fn, elems, initializer=2)
        self.assertEqual(len(tf.trainable_variables()), 1)
        results = np.array([6, 16, 38, 84, 178, 368])
        self.assertAllEqual(results, r.eval()) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:21,代码来源:functional_ops_test.py

示例13: testScanFoldl_Nested

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scan [as 别名]
def testScanFoldl_Nested(self):
    with self.test_session():
      elems = tf.constant([1.0, 2.0, 3.0, 4.0], name="data")
      inner_elems = tf.constant([0.5, 0.5], name="data")

      def r_inner(a, x):
        return tf.foldl(lambda b, y: b * y * x, inner_elems, initializer=a)

      r = tf.scan(r_inner, elems)

      # t == 0 (returns 1)
      # t == 1, a == 1, x == 2 (returns 1)
      #   t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1
      #   t_1 == 1, b == 1,      y == 0.5, returns b * y * x = 1
      # t == 2, a == 1, x == 3 (returns 1.5*1.5 == 2.25)
      #   t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1.5
      #   t_1 == 1, b == 1.5,    y == 0.5, returns b * y * x = 1.5*1.5
      # t == 3, a == 2.25, x == 4 (returns 9)
      #   t_0 == 0, b == a == 2.25, y == 0.5, returns b * y * x = 4.5
      #   t_1 == 1, b == 4.5,       y == 0.5, returns b * y * x = 9
      self.assertAllClose([1., 1., 2.25, 9.], r.eval()) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:23,代码来源:functional_ops_test.py

示例14: discounted_return

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scan [as 别名]
def discounted_return(reward, discount, bootstrap, axis, stop_gradient=True):
  """Discounted Monte Carlo return."""
  if discount == 1 and bootstrap is None:
    return tf.reduce_sum(reward, axis)
  if discount == 1:
    return tf.reduce_sum(reward, axis) + bootstrap
  # Bring the aggregation dimension front.
  dims = list(range(reward.shape.ndims))
  dims = [axis] + dims[1:axis] + [0] + dims[axis + 1:]
  reward = tf.transpose(reward, dims)
  if bootstrap is None:
    bootstrap = tf.zeros_like(reward[-1])
  return_ = tf.scan(
      fn=lambda agg, cur: cur + discount * agg,
      elems=reward,
      initializer=bootstrap,
      back_prop=not stop_gradient,
      reverse=True)
  return_ = tf.transpose(return_, dims)
  if stop_gradient:
    return_ = tf.stop_gradient(return_)
  return return_ 
开发者ID:google-research,项目名称:planet,代码行数:24,代码来源:temporal_difference.py

示例15: discounted_return

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import scan [as 别名]
def discounted_return(reward, length, discount):
  """Discounted Monte-Carlo returns."""
  timestep = tf.range(reward.shape[1].value)
  mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
  return_ = tf.reverse(tf.transpose(tf.scan(
      lambda agg, cur: cur + discount * agg,
      tf.transpose(tf.reverse(mask * reward, [1]), [1, 0]),
      tf.zeros_like(reward[:, -1]), 1, False), [1, 0]), [1])
  return tf.check_numerics(tf.stop_gradient(return_), 'return') 
开发者ID:utra-robosoccer,项目名称:soccer-matlab,代码行数:11,代码来源:utility.py


注:本文中的tensorflow.scan方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。