当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow_probability.distributions方法代码示例

本文整理汇总了Python中tensorflow_probability.distributions方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow_probability.distributions方法的具体用法?Python tensorflow_probability.distributions怎么用?Python tensorflow_probability.distributions使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow_probability的用法示例。


在下文中一共展示了tensorflow_probability.distributions方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: sample

# 需要导入模块: import tensorflow_probability [as 别名]
# 或者: from tensorflow_probability import distributions [as 别名]
def sample(self, n, max_length=None, z=None, c_input=None, **kwargs):
    """Sample with an optional conditional embedding `z`."""
    if z is not None and int(z.shape[0]) != n:
      raise ValueError(
          '`z` must have a first dimension that equals `n` when given. '
          'Got: %d vs %d' % (z.shape[0], n))

    if self.hparams.z_size and z is None:
      tf.logging.warning(
          'Sampling from conditional model without `z`. Using random `z`.')
      normal_shape = [n, self.hparams.z_size]
      normal_dist = tfp.distributions.Normal(
          loc=tf.zeros(normal_shape), scale=tf.ones(normal_shape))
      z = normal_dist.sample()

    return self.decoder.sample(n, max_length, z, c_input, **kwargs) 
开发者ID:magenta,项目名称:magenta,代码行数:18,代码来源:base_model.py

示例2: sample

# 需要导入模块: import tensorflow_probability [as 别名]
# 或者: from tensorflow_probability import distributions [as 别名]
def sample(self, n, max_length=None, z=None, c_input=None, **kwargs):
    """Sample with an optional conditional embedding `z`."""
    if z is not None and z.shape[0].value != n:
      raise ValueError(
          '`z` must have a first dimension that equals `n` when given. '
          'Got: %d vs %d' % (z.shape[0].value, n))

    if self.hparams.z_size and z is None:
      tf.logging.warning(
          'Sampling from conditional model without `z`. Using random `z`.')
      normal_shape = [n, self.hparams.z_size]
      normal_dist = tfp.distributions.Normal(
          loc=tf.zeros(normal_shape), scale=tf.ones(normal_shape))
      z = normal_dist.sample()

    return self.decoder.sample(n, max_length, z, c_input, **kwargs) 
开发者ID:personads,项目名称:synvae,代码行数:18,代码来源:base_model.py

示例3: normal_prior

# 需要导入模块: import tensorflow_probability [as 别名]
# 或者: from tensorflow_probability import distributions [as 别名]
def normal_prior(prior_std):
    """Defines normal distribution prior for Bayesian neural network."""

    def prior_fn(dtype, shape, name, trainable, add_variable_fn):
        tfd = tfp.distributions
        dist = tfd.Normal(loc=tf.zeros(shape, dtype),
                          scale=dtype.as_numpy_dtype((prior_std)))
        batch_ndims = tf.size(input=dist.batch_shape_tensor())
        return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)

    return prior_fn 
开发者ID:sandialabs,项目名称:bcnn,代码行数:13,代码来源:utils.py

示例4: encode

# 需要导入模块: import tensorflow_probability [as 别名]
# 或者: from tensorflow_probability import distributions [as 别名]
def encode(self, sequence, sequence_length, control_sequence=None):
    """Encodes input sequences into a MultivariateNormalDiag distribution.

    Args:
      sequence: A Tensor with shape `[num_sequences, max_length, input_depth]`
          containing the sequences to encode.
      sequence_length: The length of each sequence in the `sequence` Tensor.
      control_sequence: (Optional) A Tensor with shape
          `[num_sequences, max_length, control_depth]` containing control
          sequences on which to condition. These will be concatenated depthwise
          to the input sequences.

    Returns:
      A tfp.distributions.MultivariateNormalDiag representing the posterior
      distribution for each sequence.
    """
    hparams = self.hparams
    z_size = hparams.z_size

    sequence = tf.to_float(sequence)
    if control_sequence is not None:
      control_sequence = tf.to_float(control_sequence)
      sequence = tf.concat([sequence, control_sequence], axis=-1)
    encoder_output = self.encoder.encode(sequence, sequence_length)

    mu = tf.layers.dense(
        encoder_output,
        z_size,
        name='encoder/mu',
        kernel_initializer=tf.random_normal_initializer(stddev=0.001))
    sigma = tf.layers.dense(
        encoder_output,
        z_size,
        activation=tf.nn.softplus,
        name='encoder/sigma',
        kernel_initializer=tf.random_normal_initializer(stddev=0.001))

    return ds.MultivariateNormalDiag(loc=mu, scale_diag=sigma) 
开发者ID:magenta,项目名称:magenta,代码行数:40,代码来源:base_model.py

示例5: kl_divergence

# 需要导入模块: import tensorflow_probability [as 别名]
# 或者: from tensorflow_probability import distributions [as 别名]
def kl_divergence(self, parameters_a, parameters_b):
    """Return KL divergence between the two distributions."""
    dist_a = self.create_dist(parameters_a)
    dist_b = self.create_dist(parameters_b)
    kl = tfd.kl_divergence(dist_a, dist_b)
    if self._event_ndims == 1:
      kl = tf.reduce_sum(kl, axis=-1)
    return kl 
开发者ID:google-research,项目名称:seed_rl,代码行数:10,代码来源:parametric_distribution.py

示例6: _distribution

# 需要导入模块: import tensorflow_probability [as 别名]
# 或者: from tensorflow_probability import distributions [as 别名]
def _distribution(self, time_step, policy_state):
    raise NotImplementedError(
        'EpsilonGreedyPolicy does not support distributions yet.') 
开发者ID:tensorflow,项目名称:agents,代码行数:5,代码来源:epsilon_greedy_policy.py

示例7: nested_distributions_from_specs

# 需要导入模块: import tensorflow_probability [as 别名]
# 或者: from tensorflow_probability import distributions [as 别名]
def nested_distributions_from_specs(specs, parameters):
  """Builds a nest of distributions from a nest of specs.

  Args:
    specs: A nest of distribution specs.
    parameters: A nest of distribution kwargs.

  Returns:
    Nest of distribution instances with the same structure as the given specs.
  """
  return nest.map_structure_up_to(
      specs, lambda spec, parameters: spec.build_distribution(**parameters),
      specs, parameters) 
开发者ID:tensorflow,项目名称:agents,代码行数:15,代码来源:distribution_spec.py

示例8: _distribution

# 需要导入模块: import tensorflow_probability [as 别名]
# 或者: from tensorflow_probability import distributions [as 别名]
def _distribution(self, time_step, policy_state):
    network_state, time_steps, actions = policy_state

    def _apply_sequence_update(tensors, tensor):
      return tf.concat([tensors, tensor[:, None]], axis=1)[:, 1:]

    time_steps = tf.nest.map_structure(
        _apply_sequence_update, time_steps, time_step)
    actions = tf.nest.map_structure(
        _apply_sequence_update, actions, tf.zeros_like(actions[:, 0]))

    # Actor network outputs nested structure of distributions or actions.
    action_or_distribution, network_state = self._apply_actor_network(
        time_steps, actions, network_state)

    policy_state = (network_state, time_steps, actions)

    def _to_distribution(action_or_distribution):
      if isinstance(action_or_distribution, tf.Tensor):
        # This is an action tensor, so wrap it in a deterministic distribution.
        return tfp.distributions.Deterministic(loc=action_or_distribution)
      return action_or_distribution

    distribution = tf.nest.map_structure(_to_distribution,
                                         action_or_distribution)
    return policy_step.PolicyStep(distribution, policy_state) 
开发者ID:alexlee-gk,项目名称:slac,代码行数:28,代码来源:slac_agent.py

示例9: _distribution

# 需要导入模块: import tensorflow_probability [as 别名]
# 或者: from tensorflow_probability import distributions [as 别名]
def _distribution(self, time_step, policy_state, training=False):
    if not policy_state:
      policy_state = {'actor_network_state': (), 'value_network_state': ()}
    else:
      policy_state = policy_state.copy()

    if 'actor_network_state' not in policy_state:
      policy_state['actor_network_state'] = ()
    if 'value_network_state' not in policy_state:
      policy_state['value_network_state'] = ()

    new_policy_state = {'actor_network_state': (), 'value_network_state': ()}

    def _to_distribution(action_or_distribution):
      if isinstance(action_or_distribution, tf.Tensor):
        # This is an action tensor, so wrap it in a deterministic distribution.
        return tfp.distributions.Deterministic(loc=action_or_distribution)
      return action_or_distribution

    (actions_or_distributions,
     new_policy_state['actor_network_state']) = self._apply_actor_network(
         time_step, policy_state['actor_network_state'], training=training)
    distributions = tf.nest.map_structure(_to_distribution,
                                          actions_or_distributions)

    if self._collect:
      policy_info = {
          'dist_params': ppo_utils.get_distribution_params(distributions)
      }
      if not self._compute_value_and_advantage_in_train:
        # If value_prediction is not computed in agent.train it needs to be
        # computed and saved here.
        (policy_info['value_prediction'],
         new_policy_state['value_network_state']) = self.apply_value_network(
             time_step.observation,
             time_step.step_type,
             value_state=policy_state['value_network_state'],
             training=False)
    else:
      policy_info = ()

    if (not new_policy_state['actor_network_state'] and
        not new_policy_state['value_network_state']):
      new_policy_state = ()
    elif not new_policy_state['value_network_state']:
      new_policy_state.pop('value_network_state', None)
    elif not new_policy_state['actor_network_state']:
      new_policy_state.pop('actor_network_state', None)

    return policy_step.PolicyStep(distributions, new_policy_state, policy_info) 
开发者ID:tensorflow,项目名称:agents,代码行数:52,代码来源:ppo_policy.py


注:本文中的tensorflow_probability.distributions方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。