當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow_probability.distributions方法代碼示例

本文整理匯總了Python中tensorflow_probability.distributions方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow_probability.distributions方法的具體用法?Python tensorflow_probability.distributions怎麽用?Python tensorflow_probability.distributions使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow_probability的用法示例。


在下文中一共展示了tensorflow_probability.distributions方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: sample

# 需要導入模塊: import tensorflow_probability [as 別名]
# 或者: from tensorflow_probability import distributions [as 別名]
def sample(self, n, max_length=None, z=None, c_input=None, **kwargs):
    """Sample with an optional conditional embedding `z`."""
    if z is not None and int(z.shape[0]) != n:
      raise ValueError(
          '`z` must have a first dimension that equals `n` when given. '
          'Got: %d vs %d' % (z.shape[0], n))

    if self.hparams.z_size and z is None:
      tf.logging.warning(
          'Sampling from conditional model without `z`. Using random `z`.')
      normal_shape = [n, self.hparams.z_size]
      normal_dist = tfp.distributions.Normal(
          loc=tf.zeros(normal_shape), scale=tf.ones(normal_shape))
      z = normal_dist.sample()

    return self.decoder.sample(n, max_length, z, c_input, **kwargs) 
開發者ID:magenta,項目名稱:magenta,代碼行數:18,代碼來源:base_model.py

示例2: sample

# 需要導入模塊: import tensorflow_probability [as 別名]
# 或者: from tensorflow_probability import distributions [as 別名]
def sample(self, n, max_length=None, z=None, c_input=None, **kwargs):
    """Sample with an optional conditional embedding `z`."""
    if z is not None and z.shape[0].value != n:
      raise ValueError(
          '`z` must have a first dimension that equals `n` when given. '
          'Got: %d vs %d' % (z.shape[0].value, n))

    if self.hparams.z_size and z is None:
      tf.logging.warning(
          'Sampling from conditional model without `z`. Using random `z`.')
      normal_shape = [n, self.hparams.z_size]
      normal_dist = tfp.distributions.Normal(
          loc=tf.zeros(normal_shape), scale=tf.ones(normal_shape))
      z = normal_dist.sample()

    return self.decoder.sample(n, max_length, z, c_input, **kwargs) 
開發者ID:personads,項目名稱:synvae,代碼行數:18,代碼來源:base_model.py

示例3: normal_prior

# 需要導入模塊: import tensorflow_probability [as 別名]
# 或者: from tensorflow_probability import distributions [as 別名]
def normal_prior(prior_std):
    """Defines normal distribution prior for Bayesian neural network."""

    def prior_fn(dtype, shape, name, trainable, add_variable_fn):
        tfd = tfp.distributions
        dist = tfd.Normal(loc=tf.zeros(shape, dtype),
                          scale=dtype.as_numpy_dtype((prior_std)))
        batch_ndims = tf.size(input=dist.batch_shape_tensor())
        return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)

    return prior_fn 
開發者ID:sandialabs,項目名稱:bcnn,代碼行數:13,代碼來源:utils.py

示例4: encode

# 需要導入模塊: import tensorflow_probability [as 別名]
# 或者: from tensorflow_probability import distributions [as 別名]
def encode(self, sequence, sequence_length, control_sequence=None):
    """Encodes input sequences into a MultivariateNormalDiag distribution.

    Args:
      sequence: A Tensor with shape `[num_sequences, max_length, input_depth]`
          containing the sequences to encode.
      sequence_length: The length of each sequence in the `sequence` Tensor.
      control_sequence: (Optional) A Tensor with shape
          `[num_sequences, max_length, control_depth]` containing control
          sequences on which to condition. These will be concatenated depthwise
          to the input sequences.

    Returns:
      A tfp.distributions.MultivariateNormalDiag representing the posterior
      distribution for each sequence.
    """
    hparams = self.hparams
    z_size = hparams.z_size

    sequence = tf.to_float(sequence)
    if control_sequence is not None:
      control_sequence = tf.to_float(control_sequence)
      sequence = tf.concat([sequence, control_sequence], axis=-1)
    encoder_output = self.encoder.encode(sequence, sequence_length)

    mu = tf.layers.dense(
        encoder_output,
        z_size,
        name='encoder/mu',
        kernel_initializer=tf.random_normal_initializer(stddev=0.001))
    sigma = tf.layers.dense(
        encoder_output,
        z_size,
        activation=tf.nn.softplus,
        name='encoder/sigma',
        kernel_initializer=tf.random_normal_initializer(stddev=0.001))

    return ds.MultivariateNormalDiag(loc=mu, scale_diag=sigma) 
開發者ID:magenta,項目名稱:magenta,代碼行數:40,代碼來源:base_model.py

示例5: kl_divergence

# 需要導入模塊: import tensorflow_probability [as 別名]
# 或者: from tensorflow_probability import distributions [as 別名]
def kl_divergence(self, parameters_a, parameters_b):
    """Return KL divergence between the two distributions."""
    dist_a = self.create_dist(parameters_a)
    dist_b = self.create_dist(parameters_b)
    kl = tfd.kl_divergence(dist_a, dist_b)
    if self._event_ndims == 1:
      kl = tf.reduce_sum(kl, axis=-1)
    return kl 
開發者ID:google-research,項目名稱:seed_rl,代碼行數:10,代碼來源:parametric_distribution.py

示例6: _distribution

# 需要導入模塊: import tensorflow_probability [as 別名]
# 或者: from tensorflow_probability import distributions [as 別名]
def _distribution(self, time_step, policy_state):
    raise NotImplementedError(
        'EpsilonGreedyPolicy does not support distributions yet.') 
開發者ID:tensorflow,項目名稱:agents,代碼行數:5,代碼來源:epsilon_greedy_policy.py

示例7: nested_distributions_from_specs

# 需要導入模塊: import tensorflow_probability [as 別名]
# 或者: from tensorflow_probability import distributions [as 別名]
def nested_distributions_from_specs(specs, parameters):
  """Builds a nest of distributions from a nest of specs.

  Args:
    specs: A nest of distribution specs.
    parameters: A nest of distribution kwargs.

  Returns:
    Nest of distribution instances with the same structure as the given specs.
  """
  return nest.map_structure_up_to(
      specs, lambda spec, parameters: spec.build_distribution(**parameters),
      specs, parameters) 
開發者ID:tensorflow,項目名稱:agents,代碼行數:15,代碼來源:distribution_spec.py

示例8: _distribution

# 需要導入模塊: import tensorflow_probability [as 別名]
# 或者: from tensorflow_probability import distributions [as 別名]
def _distribution(self, time_step, policy_state):
    network_state, time_steps, actions = policy_state

    def _apply_sequence_update(tensors, tensor):
      return tf.concat([tensors, tensor[:, None]], axis=1)[:, 1:]

    time_steps = tf.nest.map_structure(
        _apply_sequence_update, time_steps, time_step)
    actions = tf.nest.map_structure(
        _apply_sequence_update, actions, tf.zeros_like(actions[:, 0]))

    # Actor network outputs nested structure of distributions or actions.
    action_or_distribution, network_state = self._apply_actor_network(
        time_steps, actions, network_state)

    policy_state = (network_state, time_steps, actions)

    def _to_distribution(action_or_distribution):
      if isinstance(action_or_distribution, tf.Tensor):
        # This is an action tensor, so wrap it in a deterministic distribution.
        return tfp.distributions.Deterministic(loc=action_or_distribution)
      return action_or_distribution

    distribution = tf.nest.map_structure(_to_distribution,
                                         action_or_distribution)
    return policy_step.PolicyStep(distribution, policy_state) 
開發者ID:alexlee-gk,項目名稱:slac,代碼行數:28,代碼來源:slac_agent.py

示例9: _distribution

# 需要導入模塊: import tensorflow_probability [as 別名]
# 或者: from tensorflow_probability import distributions [as 別名]
def _distribution(self, time_step, policy_state, training=False):
    if not policy_state:
      policy_state = {'actor_network_state': (), 'value_network_state': ()}
    else:
      policy_state = policy_state.copy()

    if 'actor_network_state' not in policy_state:
      policy_state['actor_network_state'] = ()
    if 'value_network_state' not in policy_state:
      policy_state['value_network_state'] = ()

    new_policy_state = {'actor_network_state': (), 'value_network_state': ()}

    def _to_distribution(action_or_distribution):
      if isinstance(action_or_distribution, tf.Tensor):
        # This is an action tensor, so wrap it in a deterministic distribution.
        return tfp.distributions.Deterministic(loc=action_or_distribution)
      return action_or_distribution

    (actions_or_distributions,
     new_policy_state['actor_network_state']) = self._apply_actor_network(
         time_step, policy_state['actor_network_state'], training=training)
    distributions = tf.nest.map_structure(_to_distribution,
                                          actions_or_distributions)

    if self._collect:
      policy_info = {
          'dist_params': ppo_utils.get_distribution_params(distributions)
      }
      if not self._compute_value_and_advantage_in_train:
        # If value_prediction is not computed in agent.train it needs to be
        # computed and saved here.
        (policy_info['value_prediction'],
         new_policy_state['value_network_state']) = self.apply_value_network(
             time_step.observation,
             time_step.step_type,
             value_state=policy_state['value_network_state'],
             training=False)
    else:
      policy_info = ()

    if (not new_policy_state['actor_network_state'] and
        not new_policy_state['value_network_state']):
      new_policy_state = ()
    elif not new_policy_state['value_network_state']:
      new_policy_state.pop('value_network_state', None)
    elif not new_policy_state['actor_network_state']:
      new_policy_state.pop('actor_network_state', None)

    return policy_step.PolicyStep(distributions, new_policy_state, policy_info) 
開發者ID:tensorflow,項目名稱:agents,代碼行數:52,代碼來源:ppo_policy.py


注:本文中的tensorflow_probability.distributions方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。