当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.multinomial函数代码示例

本文整理汇总了Python中tensorflow.multinomial函数的典型用法代码示例。如果您正苦于以下问题:Python multinomial函数的具体用法?Python multinomial怎么用?Python multinomial使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了multinomial函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: body

 def body(i, prev_base_state, prev_high_states, prev_y, prev_emb,
          y_array):
     state1 = decoder.grustep1.forward(prev_base_state, prev_emb)
     att_ctx = decoder.attstep.forward(state1)
     base_state = decoder.grustep2.forward(state1, att_ctx)
     if decoder.high_gru_stack == None:
         output = base_state
         high_states = []
     else:
         if decoder.high_gru_stack.context_state_size == 0:
             output, high_states = decoder.high_gru_stack.forward_single(
                 prev_high_states, base_state)
         else:
             output, high_states = decoder.high_gru_stack.forward_single(
                 prev_high_states, base_state, context=att_ctx)
     logits = decoder.predictor.get_logits(prev_emb, output, att_ctx,
                                        multi_step=False)
     new_y = tf.multinomial(logits, num_samples=1)
     new_y = tf.cast(new_y, dtype=tf.int32)
     new_y = tf.squeeze(new_y, axis=1)
     new_y = tf.where(tf.equal(prev_y, tf.constant(0, dtype=tf.int32)),
                      tf.zeros_like(new_y), new_y)
     y_array = y_array.write(index=i, value=new_y)
     new_emb = decoder.y_emb_layer.forward(new_y, factor=0)
     return i+1, base_state, high_states, new_y, new_emb, y_array
开发者ID:rsennrich,项目名称:nematus,代码行数:25,代码来源:rnn_inference.py

示例2: decoder_fn

 def decoder_fn(time, cell_state, cell_input, cell_output, context_state):
     with tf.name_scope(name, "simple_decoder_fn_inference",
                        [time, cell_state, cell_input, cell_output,
                         context_state]):
         if cell_input is not None:
             raise ValueError("Expected cell_input to be None, but saw: %s" %
                              cell_input)
         if cell_output is None:
             # invariant that this is time == 0
             next_input_id = tf.ones([batch_size], dtype=dtype) * (
                 start_of_sequence_id)
             done = tf.zeros([batch_size], dtype=tf.bool)
             cell_state = encoder_state
             cell_output = tf.zeros([cell_size],
                                    dtype=tf.float32)
         else:
             softmax_output = output_fn(cell_output)
             if sample:
                 next_input_id = tf.squeeze(tf.multinomial(softmax_output, 1), 1)
             else:
                 next_input_id = tf.argmax(softmax_output, 1)
             next_input_id = tf.cast(next_input_id, dtype=dtype)
             done = tf.equal(next_input_id, end_of_sequence_id)
         next_input = tf.gather(embeddings, next_input_id)
         # if time > maxlen, return all true vector
         done = tf.cond(
             tf.greater(time, maximum_length),
             lambda: tf.ones([batch_size], dtype=tf.bool),
             lambda: done)
         return (done, cell_state, next_input, next_input_id, context_state)
开发者ID:futurulus,项目名称:rl-cards,代码行数:30,代码来源:tfutils.py

示例3: UpdateProbs

  def UpdateProbs(self, inp):
    """Update probabilities of each particle based on 2D matrix inp which is a 2D perspectiuve projection of the scene"""

    projection, onscreen = self.project()
    filtered_projection = tf.to_int64(tf.select(onscreen, projection, tf.zeros_like(projection)))
    per_state_probabilities = tf.gather_nd(inp, filtered_projection)
    
    filtered_probabilities = tf.select(onscreen, per_state_probabilities, tf.zeros_like(per_state_probabilities))
    
    new_state_indicies = tf.squeeze(tf.multinomial(tf.expand_dims(tf.log(filtered_probabilities),0), self.particles/10*9))
    
    new_state = tf.gather(self.state, new_state_indicies)
    
    # Add momentum
    new_state = tf.concat(1, [new_state[:, 0:3] + new_state[:, 3:6], new_state[:, 3:10]])
    
    # Add in particles for the "just come onscreen" case.
    new_state = tf.concat(0, [new_state, tf.random_normal([self.particles/10, 10]) * self.initial_std + self.initial_bias])

    
    new_state = new_state + tf.random_normal([self.particles, 10]) * self.update_std
    # Todo:  permute state by adding noise.

    
    return self.state.assign(new_state)
开发者ID:Hello1024,项目名称:quadcopter,代码行数:25,代码来源:particle.py

示例4: loop_function

  def loop_function(prev,_):

    prev = tf.nn.xw_plus_b(
          prev, output_projection[0], output_projection[1])
    prev_symbol = tf.cast(tf.reshape(tf.multinomial(prev, 1), [FLAGS.batch_size*FLAGS.max_dec_sen_num]), tf.int32)
    emb_prev = tf.nn.embedding_lookup(embedding, prev_symbol)
    return emb_prev
开发者ID:dbolshak,项目名称:DPGAN,代码行数:7,代码来源:model.py

示例5: multinomial_squeeze

 def multinomial_squeeze(logits, temperature=1.0):
   logits_shape = common_layers.shape_list(logits)
   reshaped_logits = (
       tf.reshape(logits, [-1, logits_shape[-1]]) / temperature)
   choices = tf.multinomial(reshaped_logits, 1)
   choices = tf.reshape(choices, logits_shape[:-1])
   return choices
开发者ID:AranKomat,项目名称:tensor2tensor,代码行数:7,代码来源:t2t_model.py

示例6: testSmallEntropy

 def testSmallEntropy(self):
   with self.test_session(use_gpu=self.use_gpu):
     # A logit value of -10 corresponds to a probability of ~5e-5.
     logits = tf.constant([[-10., 10., -10.], [-10., -10., 10.]])
     num_samples = 1000
     samples = tf.multinomial(logits, num_samples).eval()
     self.assertAllEqual([[1] * num_samples, [2] * num_samples], samples)
开发者ID:Ambier,项目名称:tensorflow,代码行数:7,代码来源:multinomial_op_test.py

示例7: create_dc_actor_critic

    def create_dc_actor_critic(self, h_size, num_layers):
        num_streams = 1
        hidden_streams = self.create_new_obs(num_streams, h_size, num_layers)
        hidden = hidden_streams[0]

        if self.use_recurrent:
            tf.Variable(self.m_size, name="memory_size", trainable=False, dtype=tf.int32)
            self.prev_action = tf.placeholder(shape=[None], dtype=tf.int32, name='prev_action')
            self.prev_action_oh = c_layers.one_hot_encoding(self.prev_action, self.a_size)
            hidden = tf.concat([hidden, self.prev_action_oh], axis=1)

            self.memory_in = tf.placeholder(shape=[None, self.m_size], dtype=tf.float32, name='recurrent_in')
            hidden, self.memory_out = self.create_recurrent_encoder(hidden, self.memory_in)
            self.memory_out = tf.identity(self.memory_out, name='recurrent_out')

        self.policy = tf.layers.dense(hidden, self.a_size, activation=None, use_bias=False,
                                      kernel_initializer=c_layers.variance_scaling_initializer(factor=0.01))

        self.all_probs = tf.nn.softmax(self.policy, name="action_probs")
        self.output = tf.multinomial(self.policy, 1)
        self.output = tf.identity(self.output, name="action")

        self.value = tf.layers.dense(hidden, 1, activation=None)
        self.value = tf.identity(self.value, name="value_estimate")
        self.entropy = -tf.reduce_sum(self.all_probs * tf.log(self.all_probs + 1e-10), axis=1)
        self.action_holder = tf.placeholder(shape=[None], dtype=tf.int32)
        self.selected_actions = c_layers.one_hot_encoding(self.action_holder, self.a_size)

        self.all_old_probs = tf.placeholder(shape=[None, self.a_size], dtype=tf.float32, name='old_probabilities')

        # We reshape these tensors to [batch x 1] in order to be of the same rank as continuous control probabilities.
        self.probs = tf.expand_dims(tf.reduce_sum(self.all_probs * self.selected_actions, axis=1), 1)
        self.old_probs = tf.expand_dims(tf.reduce_sum(self.all_old_probs * self.selected_actions, axis=1), 1)
开发者ID:dhsmf1416,项目名称:lastalpha,代码行数:33,代码来源:models.py

示例8: _sample_single

 def _sample_single(args):
   logits, n_draw = args[0], args[1]  # [K], []
   x = tf.multinomial(logits[tf.newaxis, ...], n_draw,
                      seed)  # [1, n*n_draw]
   x = tf.reshape(x, shape=[n, -1])  # [n, n_draw]
   x = tf.reduce_sum(tf.one_hot(x, depth=k), axis=-2)  # [n, k]
   return x
开发者ID:asudomoeva,项目名称:probability,代码行数:7,代码来源:multinomial.py

示例9: testNegativeMinLogits

 def testNegativeMinLogits(self):
   tf.set_random_seed(78844)
   with self.test_session(use_gpu=self.use_gpu):
     logits = tf.constant([[np.finfo(np.float32).min] * 1023 + [0]])
     num_samples = 1000
     samples = tf.multinomial(logits, num_samples).eval()
     self.assertAllEqual([[1023] * num_samples], samples)
开发者ID:2020zyc,项目名称:tensorflow,代码行数:7,代码来源:multinomial_op_test.py

示例10: __init__

    def __init__(self, q_values, observations, num_actions, stochastic, eps,
                 softmax, softmax_temp):
        if softmax:
            action_dist = Categorical(q_values / softmax_temp)
            self.action = action_dist.sample()
            self.action_prob = action_dist.sampled_action_prob()
            return

        deterministic_actions = tf.argmax(q_values, axis=1)
        batch_size = tf.shape(observations)[0]

        # Special case masked out actions (q_value ~= -inf) so that we don't
        # even consider them for exploration.
        random_valid_action_logits = tf.where(
            tf.equal(q_values, tf.float32.min),
            tf.ones_like(q_values) * tf.float32.min, tf.ones_like(q_values))
        random_actions = tf.squeeze(
            tf.multinomial(random_valid_action_logits, 1), axis=1)

        chose_random = tf.random_uniform(
            tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
        stochastic_actions = tf.where(chose_random, random_actions,
                                      deterministic_actions)
        self.action = tf.cond(stochastic, lambda: stochastic_actions,
                              lambda: deterministic_actions)
        self.action_prob = None
开发者ID:robertnishihara,项目名称:ray,代码行数:26,代码来源:dqn_policy_graph.py

示例11: testEmpty

 def testEmpty(self):
   classes = 5
   with self.test_session(use_gpu=self.use_gpu):
     for batch in 0, 3:
       for samples in 0, 7:
         x = tf.multinomial(tf.zeros([batch, classes]), samples).eval()
         self.assertEqual(x.shape, (batch, samples))
开发者ID:2020zyc,项目名称:tensorflow,代码行数:7,代码来源:multinomial_op_test.py

示例12: sample

    def sample(self, projected_output):
        """Return integer ID tensor representing the sampled word.
        
        Args:
            projected_output: Tensor [1, 1, state_size], representing a single
                decoder timestep output. 
        """
        # TODO: We really need a tf.control_dependencies check here (for rank).
        with tf.name_scope('decoder_sampler', values=[projected_output]):

            # Protect against extra size-1 dimensions; grab the 1D tensor
            # of size state_size.
            logits = tf.squeeze(projected_output)
            if self.temperature < 0.02:
                return tf.argmax(logits, axis=0)

            # Convert logits to probability distribution.
            probabilities = tf.div(logits, self.temperature)
            projected_output = tf.div(
                tf.exp(probabilities),
                tf.reduce_sum(tf.exp(probabilities), axis=-1))

            # Sample 1 time from the probability distribution.
            sample_ID = tf.squeeze(
                tf.multinomial(tf.expand_dims(probabilities, 0), 1))
        return sample_ID
开发者ID:laurii,项目名称:DeepChatModels,代码行数:26,代码来源:decoders.py

示例13: __init__

    def __init__(self, brain, h_size=128, lr=1e-4, n_layers=2, m_size=128,
                 normalize=False, use_recurrent=False):
        LearningModel.__init__(self, m_size, normalize, use_recurrent, brain)

        num_streams = 1
        hidden_streams = self.create_new_obs(num_streams, h_size, n_layers)
        hidden = hidden_streams[0]
        self.dropout_rate = tf.placeholder(dtype=tf.float32, shape=[], name="dropout_rate")
        hidden_reg = tf.layers.dropout(hidden, self.dropout_rate)
        if self.use_recurrent:
            self.memory_in = tf.placeholder(shape=[None, self.m_size], dtype=tf.float32, name='recurrent_in')
            hidden_reg, self.memory_out = self.create_recurrent_encoder(hidden_reg, self.memory_in)
            self.memory_out = tf.identity(self.memory_out, name='recurrent_out')
        self.policy = tf.layers.dense(hidden_reg, self.a_size, activation=None, use_bias=False,
                                      kernel_initializer=c_layers.variance_scaling_initializer(factor=0.01))

        if brain.vector_action_space_type == "discrete":
            self.action_probs = tf.nn.softmax(self.policy)
            self.sample_action_float = tf.multinomial(self.policy, 1)
            self.sample_action_float = tf.identity(self.sample_action_float, name="action")
            self.sample_action = tf.cast(self.sample_action_float, tf.int32)
            self.true_action = tf.placeholder(shape=[None], dtype=tf.int32, name="teacher_action")
            self.action_oh = tf.one_hot(self.true_action, self.a_size)
            self.loss = tf.reduce_sum(-tf.log(self.action_probs + 1e-10) * self.action_oh)
            self.action_percent = tf.reduce_mean(tf.cast(
                tf.equal(tf.cast(tf.argmax(self.action_probs, axis=1), tf.int32), self.sample_action), tf.float32))
        else:
            self.sample_action = tf.identity(self.policy, name="action")
            self.true_action = tf.placeholder(shape=[None, self.a_size], dtype=tf.float32, name="teacher_action")
            self.loss = tf.reduce_sum(tf.squared_difference(self.true_action, self.sample_action))

        optimizer = tf.train.AdamOptimizer(learning_rate=lr)
        self.update = optimizer.minimize(self.loss)
开发者ID:dhsmf1416,项目名称:lastalpha,代码行数:33,代码来源:models.py

示例14: generate_string

  def generate_string(self, initial_logits, initial_state, sequence_length):
    """Builds sub-graph to generate a string, sampled from the model.

    Args:
      initial_logits: Starting logits to sampling from.
      initial_state: Starting state for the RNN core.
      sequence_length: Number of characters to sample.

    Returns:
      A Tensor of characters, with dimensions `[sequence_length, batch_size,
      output_size]`.
    """

    current_logits = initial_logits
    current_state = initial_state

    generated_letters = []
    for _ in range(sequence_length):
      # Sample a character index from distribution.
      char_index = tf.squeeze(tf.multinomial(current_logits, 1))
      char_one_hot = tf.one_hot(char_index, self._output_size, 1.0, 0.0)
      generated_letters.append(char_one_hot)

      # Feed character back into the deep_lstm.
      gen_out_seq, current_state = self._core(
          tf.nn.relu(self._embed_module(char_one_hot)),
          current_state)
      current_logits = self._output_module(gen_out_seq)

    generated_string = tf.stack(generated_letters)

    return generated_string
开发者ID:bch-runner-1,项目名称:sonnet,代码行数:32,代码来源:rnn_shakespeare.py

示例15: call

  def call(self, inputs):
    """Calculates logits and action.

    Args:
      inputs: Observations from a step in the cart-pole environment, of shape
        `(batch_size, input_size)`

    Returns:
      logits: the logits output by the output layer. This can be viewed as the
        likelihood vales of choosing the left (0) action. Shape:
        `(batch_size, 1)`.
      actions: randomly selected actions ({0, 1}) based on the logits. Shape:
        `(batch_size, 1)`.
    """
    hidden = self._hidden_layer(inputs)
    logits = self._output_layer(hidden)

    left_prob = tf.nn.sigmoid(logits)
    action_probs = tf.concat([left_prob, 1.0 - left_prob], 1)

    self._grad_fn = eager.implicit_gradients(
        self._get_cross_entropy_and_save_actions)

    actions = tf.multinomial(tf.log(action_probs), 1)
    return logits, actions
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:25,代码来源:cartpole_benchmark.py


注:本文中的tensorflow.multinomial函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。