当前位置: 首页>>代码示例>>Python>>正文


Python sonnet.BatchApply方法代码示例

本文整理汇总了Python中sonnet.BatchApply方法的典型用法代码示例。如果您正苦于以下问题:Python sonnet.BatchApply方法的具体用法?Python sonnet.BatchApply怎么用?Python sonnet.BatchApply使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sonnet的用法示例。


在下文中一共展示了sonnet.BatchApply方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: unroll

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchApply [as 别名]
def unroll(self, actions, env_outputs, core_state):
    """Manual implementation of the network unroll."""
    _, _, done, _ = env_outputs

    torso_outputs = snt.BatchApply(self._torso)((actions, env_outputs))

    # Note, in this implementation we can't use CuDNN RNN to speed things up due
    # to the state reset. This can be XLA-compiled (LSTMBlockCell needs to be
    # changed to implement snt.LSTMCell).
    initial_core_state = self._core.zero_state(tf.shape(actions)[1], tf.float32)
    core_output_list = []
    for input_, d in zip(tf.unstack(torso_outputs), tf.unstack(done)):
      # If the episode ended, the core state should be reset before the next.
      core_state = nest.map_structure(
          functools.partial(tf.where, d), initial_core_state, core_state)
      core_output, core_state = self._core(input_, core_state)
      core_output_list.append(core_output)

    return snt.BatchApply(self._head)(tf.stack(core_output_list)), core_state 
开发者ID:deepmind,项目名称:streetlearn,代码行数:21,代码来源:plain_agent.py

示例2: weighted_softmax

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchApply [as 别名]
def weighted_softmax(activations, strengths, strengths_op):
  """Returns softmax over activations multiplied by positive strengths.

  Args:
    activations: A tensor of shape `[batch_size, num_heads, memory_size]`, of
      activations to be transformed. Softmax is taken over the last dimension.
    strengths: A tensor of shape `[batch_size, num_heads]` containing strengths to
      multiply by the activations prior to the softmax.
    strengths_op: An operation to transform strengths before softmax.

  Returns:
    A tensor of same shape as `activations` with weighted softmax applied.
  """
  transformed_strengths = tf.expand_dims(strengths_op(strengths), -1)
  sharp_activations = activations * transformed_strengths
  softmax = snt.BatchApply(module_or_op=tf.nn.softmax)
  return softmax(sharp_activations) 
开发者ID:deepmind,项目名称:dnc,代码行数:19,代码来源:addressing.py

示例3: testValues

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchApply [as 别名]
def testValues(self):
    batch_size = 5
    num_heads = 3
    memory_size = 7

    activations_data = np.random.randn(batch_size, num_heads, memory_size)
    weights_data = np.ones((batch_size, num_heads))

    activations = tf.placeholder(tf.float32,
                                 [batch_size, num_heads, memory_size])
    weights = tf.placeholder(tf.float32, [batch_size, num_heads])
    # Run weighted softmax with identity placed on weights. Output should be
    # equal to a standalone softmax.
    observed = addressing.weighted_softmax(activations, weights, tf.identity)
    expected = snt.BatchApply(
        module_or_op=tf.nn.softmax, name='BatchSoftmax')(activations)
    with self.test_session() as sess:
      observed = sess.run(
          observed,
          feed_dict={activations: activations_data,
                     weights: weights_data})
      expected = sess.run(expected, feed_dict={activations: activations_data})
      self.assertAllClose(observed, expected) 
开发者ID:deepmind,项目名称:dnc,代码行数:25,代码来源:addressing_test.py

示例4: relation_network

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchApply [as 别名]
def relation_network(self, inputs):
    with tf.variable_scope("relation_network"):
      regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
      initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
      relation_network_module = snt.nets.MLP(
          [2 * self._num_latents] * 3,
          use_bias=False,
          regularizers={"w": regularizer},
          initializers={"w": initializer},
      )
      total_num_examples = self.num_examples_per_class*self.num_classes
      inputs = tf.reshape(inputs, [total_num_examples, self._num_latents])

      left = tf.tile(tf.expand_dims(inputs, 1), [1, total_num_examples, 1])
      right = tf.tile(tf.expand_dims(inputs, 0), [total_num_examples, 1, 1])
      concat_codes = tf.concat([left, right], axis=-1)
      outputs = snt.BatchApply(relation_network_module)(concat_codes)
      outputs = tf.reduce_mean(outputs, axis=1)
      # 2 * latents, because we are returning means and variances of a Gaussian
      outputs = tf.reshape(outputs, [self.num_classes,
                                     self.num_examples_per_class,
                                     2 * self._num_latents])

      return outputs 
开发者ID:deepmind,项目名称:leo,代码行数:26,代码来源:model.py

示例5: decoder

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchApply [as 别名]
def decoder(self, inputs):
    with tf.variable_scope("decoder"):
      l2_regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
      orthogonality_reg = get_orthogonality_regularizer(
          self._orthogonality_penalty_weight)
      initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
      # 2 * embedding_dim, because we are returning means and variances
      decoder_module = snt.Linear(
          2 * self.embedding_dim,
          use_bias=False,
          regularizers={"w": l2_regularizer},
          initializers={"w": initializer},
      )
      outputs = snt.BatchApply(decoder_module)(inputs)
      self._orthogonality_reg = orthogonality_reg(decoder_module.w)
      return outputs 
开发者ID:deepmind,项目名称:leo,代码行数:18,代码来源:model.py

示例6: apply_increasing_monotonic_fn

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchApply [as 别名]
def apply_increasing_monotonic_fn(self, wrapper, fn, *args, **parameters):
    if fn.__name__ in ('add', 'reduce_mean', 'reduce_sum', 'avg_pool'):
      if self.vertices.shape.ndims == self.nominal.shape.ndims:
        vertices_fn = fn
      else:
        vertices_fn = snt.BatchApply(fn, n_dims=2)
      return SimplexBounds(
          vertices_fn(self.vertices, *[bounds.vertices for bounds in args]),
          fn(self.nominal, *[bounds.nominal for bounds in args]),
          self.r)

    elif fn.__name__ == 'quotient':
      return SimplexBounds(
          self.vertices / tf.expand_dims(parameters['denom'], axis=1),
          fn(self.nominal),
          self.r)

    else:
      return super(SimplexBounds, self).apply_increasing_monotonic_fn(
          wrapper, fn, *args, **parameters) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:22,代码来源:simplex_bounds.py

示例7: measurement_update

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchApply [as 别名]
def measurement_update(self, encoding, particles, means, stds):
        """
        Compute the likelihood of the encoded observation for each particle.

        :param encoding: encoding of the observation
        :param particles:
        :param means:
        :param stds:
        :return: observation likelihood
        """

        # prepare input (normalize particles poses and repeat encoding per particle)
        particle_input = self.transform_particles_as_input(particles, means, stds)
        encoding_input = tf.tile(encoding[:, tf.newaxis, :], [1,  tf.shape(particles)[1], 1])
        input = tf.concat([encoding_input, particle_input], axis=-1)

        # estimate the likelihood of the encoded observation for each particle, remove last dimension
        obs_likelihood = snt.BatchApply(self.obs_like_estimator)(input)[:, :, 0]

        return obs_likelihood 
开发者ID:tu-rbo,项目名称:differentiable-particle-filters,代码行数:22,代码来源:dpf_kitti.py

示例8: _build

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchApply [as 别名]
def _build(self, x, presence=None):

    batch_size = int(x.shape[0])
    h = snt.BatchApply(snt.Linear(self._n_dims))(x)

    args = [self._n_heads, self._layer_norm, self._dropout_rate]
    klass = SelfAttention

    if self._n_inducing_points > 0:
      args = [self._n_inducing_points] + args
      klass = InducedSelfAttention

    for _ in range(self._n_layers):
      h = klass(*args)(h, presence)

    z = snt.BatchApply(snt.Linear(self._n_output_dims))(h)

    inducing_points = tf.get_variable(
        'inducing_points', shape=[1, self._n_outputs, self._n_output_dims])
    inducing_points = snt.TileByDim([0], [batch_size])(inducing_points)

    return MultiHeadQKVAttention(self._n_heads)(inducing_points, z, z, presence) 
开发者ID:akosiorek,项目名称:stacked_capsule_autoencoders,代码行数:24,代码来源:attention.py

示例9: unroll

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchApply [as 别名]
def unroll(self, actions, env_outputs, core_state):
    _, _, done, _ = env_outputs

    torso_outputs = snt.BatchApply(self._torso)((actions, env_outputs))

    # Note, in this implementation we can't use CuDNN RNN to speed things up due
    # to the state reset. This can be XLA-compiled (LSTMBlockCell needs to be
    # changed to implement snt.LSTMCell).
    initial_core_state = self._core.zero_state(tf.shape(actions)[1], tf.float32)
    core_output_list = []
    for input_, d in zip(tf.unstack(torso_outputs), tf.unstack(done)):
      # If the episode ended, the core state should be reset before the next.
      core_state = nest.map_structure(functools.partial(tf.where, d),
                                      initial_core_state, core_state)
      core_output, core_state = self._core(input_, core_state)
      core_output_list.append(core_output)

    return snt.BatchApply(self._head)(tf.stack(core_output_list)), core_state 
开发者ID:deepmind,项目名称:scalable_agent,代码行数:20,代码来源:experiment.py

示例10: unroll

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchApply [as 别名]
def unroll(self, actions, env_outputs, core_state):
    """Manual implementation of the network unroll."""
    _, _, done, _ = env_outputs

    torso_outputs = snt.BatchApply(self._torso)((actions, env_outputs))
    tf.logging.info(torso_outputs)
    conv_outputs, actions_and_rewards, goals = torso_outputs

    # Note, in this implementation we can't use CuDNN RNN to speed things up due
    # to the state reset. This can be XLA-compiled (LSTMBlockCell needs to be
    # changed to implement snt.LSTMCell).
    initial_core_state = self.initial_state(tf.shape(actions)[1])
    policy_input_list = []
    heading_output_list = []
    xy_output_list = []
    target_xy_output_list = []
    for torso_output_, action_and_reward_, goal_, done_ in zip(
        tf.unstack(conv_outputs),
        tf.unstack(actions_and_rewards),
        tf.unstack(goals),
        tf.unstack(done)):
      # If the episode ended, the core state should be reset before the next.
      core_state = nest.map_structure(
          functools.partial(tf.where, done_), initial_core_state, core_state)
      core_output, core_state = self._core(
          (torso_output_, action_and_reward_, goal_), core_state)
      policy_input_list.append(core_output[0])
      heading_output_list.append(core_output[1])
      xy_output_list.append(core_output[2])
      target_xy_output_list.append(core_output[3])
    head_output = snt.BatchApply(self._head)(tf.stack(policy_input_list),
                                             tf.stack(heading_output_list),
                                             tf.stack(xy_output_list),
                                             tf.stack(target_xy_output_list))

    return head_output, core_state 
开发者ID:deepmind,项目名称:streetlearn,代码行数:38,代码来源:goal_nav_agent.py

示例11: _build

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchApply [as 别名]
def _build(self, h):
    with tf.device(self.device):
      mod = snt.Linear(self.num_grad_channels)
      ret = snt.BatchApply(mod)(h)
      # return as [num_grad_channels] x [bs] x [num units]
      return tf.transpose(ret, perm=self.perm) 
开发者ID:itsamitgoel,项目名称:Gun-Detector,代码行数:8,代码来源:more_local_weight_update.py

示例12: bias_readout

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchApply [as 别名]
def bias_readout(self, h):
    with tf.device(self.remote_device):
      mod = snt.Linear(1, name='bias_readout')
      ret = snt.BatchApply(mod)(h)
      return tf.squeeze(ret, 2) 
开发者ID:itsamitgoel,项目名称:Gun-Detector,代码行数:7,代码来源:more_local_weight_update.py

示例13: to_delta_size

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchApply [as 别名]
def to_delta_size(self, h):
    with tf.device(self.remote_device):
      mod = snt.Linear(self.delta_dim)
      return snt.BatchApply(mod)(h) 
开发者ID:itsamitgoel,项目名称:Gun-Detector,代码行数:6,代码来源:more_local_weight_update.py

示例14: encoder

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchApply [as 别名]
def encoder(self, inputs):
    with tf.variable_scope("encoder"):
      after_dropout = tf.nn.dropout(inputs, rate=self.dropout_rate)
      regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
      initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
      encoder_module = snt.Linear(
          self._num_latents,
          use_bias=False,
          regularizers={"w": regularizer},
          initializers={"w": initializer},
      )
      outputs = snt.BatchApply(encoder_module)(after_dropout)
      return outputs 
开发者ID:deepmind,项目名称:leo,代码行数:15,代码来源:model.py

示例15: apply_conv1d

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchApply [as 别名]
def apply_conv1d(self, wrapper, w, b, padding, stride):
    mapped_centres = tf.nn.conv1d(self.nominal, w,
                                  padding=padding, stride=stride)
    if self.vertices.shape.ndims == 3:
      # `self.vertices` has no batch dimension; its shape is
      # (num_vertices, input_length, embedding_channels).
      mapped_vertices = tf.nn.conv1d(self.vertices, w,
                                     padding=padding, stride=stride)
    elif self.vertices.shape.ndims == 4:
      # `self.vertices` has shape
      # (batch_size, num_vertices, input_length, embedding_channels).
      # Vertices are different for each example in the batch,
      # e.g. for word perturbations.
      mapped_vertices = snt.BatchApply(
          lambda x: tf.nn.conv1d(x, w, padding=padding, stride=stride))(
              self.vertices)
    else:
      raise ValueError('"vertices" must have either 3 or 4 dimensions.')

    lb, ub = _simplex_bounds(mapped_vertices, mapped_centres, self.r, -3)

    nominal_out = tf.nn.conv1d(self.nominal, w,
                               padding=padding, stride=stride)
    if b is not None:
      nominal_out += b

    return relative_bounds.RelativeIntervalBounds(lb, ub, nominal_out) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:29,代码来源:simplex_bounds.py


注:本文中的sonnet.BatchApply方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。