当前位置: 首页>>代码示例>>Python>>正文


Python distributions.Normal方法代码示例

本文整理汇总了Python中tensorflow.contrib.distributions.Normal方法的典型用法代码示例。如果您正苦于以下问题:Python distributions.Normal方法的具体用法?Python distributions.Normal怎么用?Python distributions.Normal使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.distributions的用法示例。


在下文中一共展示了distributions.Normal方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: approximate_posterior

# 需要导入模块: from tensorflow.contrib import distributions [as 别名]
# 或者: from tensorflow.contrib.distributions import Normal [as 别名]
def approximate_posterior(self, tensor, scope='posterior'):
        """ Calculate the approximate posterior given the tensor """
        # Generate mu and sigma of the Gaussian for the approximate posterior
        with tf.variable_scope(scope, 'posterior', [tensor]):
            mean = layers.linear(tensor, self.sample_size, scope='mean')

            # Use the log of sigma for numerical stability
            log_sigma = layers.linear(tensor, self.sample_size, scope='log_sigma')

            # Create the Gaussian distribution
            sigma = tf.exp(log_sigma)
            posterior = distributions.Normal(mean, sigma, name='posterior')

            self.collect_named_outputs(posterior.loc)
            self.collect_named_outputs(posterior.scale)
            self.posteriors.append(posterior)

            return posterior 
开发者ID:dojoteef,项目名称:glas,代码行数:20,代码来源:sample.py

示例2: fixed_bg_ll

# 需要导入模块: from tensorflow.contrib import distributions [as 别名]
# 或者: from tensorflow.contrib.distributions import Normal [as 别名]
def fixed_bg_ll(self, images, background_score):
        """
        Compute likelihood score for the background assuming a fixed normal distribution
        :param images: Scenes of shape (n, h, w, c)
        :param background_score: a tensor of shape [n, h * w] with values between 0 and 1,
                                 determining to what degree a pixel can be considered background
        :return: a likelihood score of shape [batch_size]
        """
        dist = dists.Normal(0.0, 0.35)
        pixel_lls = dist.log_prob(images)
        pixel_lls = tf.reshape(pixel_lls, list(background_score.shape) + [-1])
        pixel_lls = tf.multiply(pixel_lls, tf.expand_dims(background_score, -1))
        # sum accross pixels and batch
        image_lls = tf.reduce_sum(pixel_lls, axis=[1, 2])
        return image_lls 
开发者ID:stelzner,项目名称:supair,代码行数:17,代码来源:model.py

示例3: __init__

# 需要导入模块: from tensorflow.contrib import distributions [as 别名]
# 或者: from tensorflow.contrib.distributions import Normal [as 别名]
def __init__(self, config, attention, latent_space, scope='BasicSampler'):
        """ Initialize the sampler """
        super(BasicSampler, self).__init__(scope=scope)

        self.posteriors = []
        self.samples = config.samples
        self.sample_size = config.sample_size

        self.attention = attention
        self.latent_space = latent_space

        shape = (config.batch_size, config.sample_size)
        self.prior = distributions.Normal(tf.zeros(shape), tf.ones(shape), name='prior') 
开发者ID:dojoteef,项目名称:glas,代码行数:15,代码来源:sample.py

示例4: define_model

# 需要导入模块: from tensorflow.contrib import distributions [as 别名]
# 或者: from tensorflow.contrib.distributions import Normal [as 别名]
def define_model(self, graph, sample_size=20, samples=1,
                     recognition=None, reuse=None, **kwargs):
        """
        Define a VariationalAutoencoderModel.

        For more details see Auto-Encoding Variational Bayes:
        https://arxiv.org/pdf/1312.6114v10.pdf

        Args:
            sample_size: The size of the samples from the approximate posterior
            samples: The number of samples approximate posterior
            recognition: Model to generate q(z|x). Required parameter.
            the model, but can be set later on the VariationalAutoencoderModel.
            reuse: Whether to reuse variables

        Returns:
            A VariationalAutoencoderModel
        """
        if recognition is None:
            raise TypeError('define_model() needs keyword only argument recognition')

        with tf.variable_scope('mean', reuse=reuse):
            mean = self.linear_layers(
                recognition.output_tensor, (sample_size), reuse=reuse)[-1]

        with tf.variable_scope('log_variance', reuse=reuse):
            log_variance = self.linear_layers(
                recognition.output_tensor, (sample_size), reuse=reuse)[-1]

        p_z = distributions.Normal(0.0, 1.0, name='P_z')
        q_z = distributions.Normal(mean, tf.sqrt(tf.exp(log_variance)), name='Q_z')

        posterior = tf.reduce_mean(q_z.sample(samples), 0)
        kl_divergence = tf.reduce_sum(distributions.kl(q_z, p_z), 1)
        return VariationalAutoencoderModel(graph, recognition, posterior, kl_divergence) 
开发者ID:dojoteef,项目名称:dvae,代码行数:37,代码来源:vae.py

示例5: value_type

# 需要导入模块: from tensorflow.contrib import distributions [as 别名]
# 或者: from tensorflow.contrib.distributions import Normal [as 别名]
def value_type(dist_value_type):
  """Creates a value type context for any StochasticTensor created within.

  Typical usage:

  ```
  with sg.value_type(sg.MeanValue(stop_gradients=True)):
    st = sg.StochasticTensor(distributions.Normal, mu=mu, sigma=sigma)
  ```

  In the example above, `st.value()` (or equivalently, `tf.identity(st)`) will
  be the mean value of the Normal distribution, i.e., `mu` (possibly
  broadcasted to the shape of `sigma`).  Furthermore, because the `MeanValue`
  was marked with `stop_gradients=True`, this value will have been wrapped
  in a `stop_gradients` call to disable any possible backpropagation.

  Args:
    dist_value_type: An instance of `MeanValue`, `SampleValue`, or
      any other stochastic value type.

  Yields:
    A context for `StochasticTensor` objects that controls the
    value created when they are initialized.

  Raises:
    TypeError: if `dist_value_type` is not an instance of a stochastic value
      type.
  """
  if not isinstance(dist_value_type, _StochasticValueType):
    raise TypeError("dist_value_type must be a Distribution Value Type")
  thread_id = threading.current_thread().ident
  stack = _STOCHASTIC_VALUE_STACK[thread_id]
  if stack:
    stack[-1].pushed_above(dist_value_type)
  stack.append(dist_value_type)
  yield
  stack.pop()
  if stack:
    stack[-1].popped_above(dist_value_type) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:41,代码来源:stochastic_tensor.py

示例6: norm

# 需要导入模块: from tensorflow.contrib import distributions [as 别名]
# 或者: from tensorflow.contrib.distributions import Normal [as 别名]
def norm(x, sigma):
    """Gaussian decay.
    Result is 1.0 for x = 0 and decays towards 0 for |x > sigma.
    """
    dist = Normal(0.0, sigma)
    return dist.pdf(x) / dist.pdf(0.0) 
开发者ID:simonmeister,项目名称:UnFlow,代码行数:8,代码来源:losses.py

示例7: _z

# 需要导入模块: from tensorflow.contrib import distributions [as 别名]
# 或者: from tensorflow.contrib.distributions import Normal [as 别名]
def _z(self, arg, is_prior):
        mean = self._linear(arg, self.z_size)
        stddev = self._linear(arg, self.z_size)
        stddev = tf.sqrt(tf.exp(stddev))
        epsilon = tf.random_normal(shape=[self.batch_size, self.z_size])

        z = mean if is_prior else mean + tf.multiply(stddev, epsilon)
        pdf_z = ds.Normal(loc=mean, scale=stddev)

        return z, pdf_z 
开发者ID:yumoxu,项目名称:stocknet-code,代码行数:12,代码来源:Model.py

示例8: get_z

# 需要导入模块: from tensorflow.contrib import distributions [as 别名]
# 或者: from tensorflow.contrib.distributions import Normal [as 别名]
def get_z(input, batch_size, z_size, W_mean, W_stddev, b_mean, b_stddev, is_prior):
    mean = tf.tensordot(input, W_mean, axes=1) + b_mean
    stddev = tf.tensordot(input, W_stddev, axes=1) + b_stddev
    stddev = tf.sqrt(tf.exp(stddev))
    epsilon = tf.random_normal(shape=[batch_size, z_size], name='epsilon')

    z = mean if is_prior else mean + tf.multiply(stddev, epsilon)

    pdf_z = ds.Normal(loc=mean, scale=stddev)

    return z, pdf_z 
开发者ID:yumoxu,项目名称:stocknet-code,代码行数:13,代码来源:neural.py

示例9: _multivariate_normal

# 需要导入模块: from tensorflow.contrib import distributions [as 别名]
# 或者: from tensorflow.contrib.distributions import Normal [as 别名]
def _multivariate_normal(self):
        return Normal([0.] * self._latent_dim, [1.] * self._latent_dim) 
开发者ID:wuga214,项目名称:IMPLEMENTATION_Variational-Auto-Encoder,代码行数:4,代码来源:generator.py

示例10: _build

# 需要导入模块: from tensorflow.contrib import distributions [as 别名]
# 或者: from tensorflow.contrib.distributions import Normal [as 别名]
def _build(self, transition, input_encoder, glimpse_encoder, glimpse_decoder, transform_estimator,
               steps_predictor, kwargs):
        """Build the model. See __init__ for argument description"""

        if self.explore_eps is not None:
            self.explore_eps = tf.get_variable('explore_eps', initializer=self.explore_eps, trainable=False)

        self.cell = AIRCell(self.img_size, self.glimpse_size, self.n_appearance, transition,
                            input_encoder, glimpse_encoder, glimpse_decoder, transform_estimator, steps_predictor,
                            canvas_init=None,
                            discrete_steps=self.discrete_steps,
                            explore_eps=self.explore_eps,
                            debug=self.debug,
                            **kwargs)

        initial_state = self.cell.initial_state(self.obs)

        dummy_sequence = tf.zeros((self.max_steps, self.batch_size, 1), name='dummy_sequence')
        outputs, state = tf.nn.dynamic_rnn(self.cell, dummy_sequence, initial_state=initial_state, time_major=True)

        for name, output in zip(self.cell.output_names, outputs):
            setattr(self, name, output)

        self.final_state = state[-2]
        self.glimpse = tf.reshape(self.presence * tf.nn.sigmoid(self.glimpse),
                                  (self.max_steps, self.batch_size,) + tuple(self.glimpse_size))
        self.canvas = tf.reshape(self.canvas, (self.max_steps, self.batch_size,) + tuple(self.img_size))
        self.canvas *= self.output_multiplier

        self.final_canvas = self.canvas[-1]

        self.output_distrib = Normal(self.final_canvas, self.output_std)

        posterior_step_probs = tf.transpose(tf.squeeze(self.presence_prob))
        self.num_steps_distrib = NumStepsDistribution(posterior_step_probs)

        self.num_step_per_sample = tf.to_float(tf.squeeze(tf.reduce_sum(self.presence, 0)))
        self.num_step = tf.reduce_mean(self.num_step_per_sample)
        self.gt_num_steps = tf.squeeze(tf.reduce_sum(self.nums, 0)) 
开发者ID:akosiorek,项目名称:attend_infer_repeat,代码行数:41,代码来源:model.py

示例11: __init__

# 需要导入模块: from tensorflow.contrib import distributions [as 别名]
# 或者: from tensorflow.contrib.distributions import Normal [as 别名]
def __init__(self, region, args, name,
                 given_means=None, given_stddevs=None, mean=0.0, num_dims=0):
        super().__init__(name)
        self.local_size = len(region)
        self.args = args
        self.scope = sorted(list(region))
        self.size = args.num_gauss
        self.num_dims = num_dims

        self.means = variable_with_weight_decay(name + '_means',
                                                shape=[1, self.local_size, args.num_gauss],
                                                stddev=1e-1,
                                                mean=mean,
                                                wd=args.gauss_param_l2,
                                                values=given_means)

        if args.gauss_min_var < args.gauss_max_var:
            if args.gauss_isotropic:
                sigma_params = variable_with_weight_decay(name + '_sigma_params',
                                                          shape=[1, 1, args.num_gauss],
                                                          stddev=1e-1,
                                                          wd=args.gauss_param_l2,
                                                          values=given_stddevs)
            else:
                sigma_params = variable_with_weight_decay(name + '_sigma_params',
                                                          shape=[1, self.local_size,
                                                                 args.num_gauss],
                                                          stddev=1e-1,
                                                          wd=args.gauss_param_l2,
                                                          values=given_stddevs)

            self.sigma = args.gauss_min_var + \
                (args.gauss_max_var - args.gauss_min_var) * tf.sigmoid(sigma_params)
        else:
            self.sigma = 1.0

        means = self.means
        if self.args.gauss_max_mean is not None:
            means = tf.sigmoid(means) * self.args.gauss_max_mean
        if self.args.gauss_min_mean is not None:
            means = tf.sigmoid(means) + self.args.gauss_min_mean

        self.dist = dists.Normal(means, tf.sqrt(self.sigma)) 
开发者ID:stelzner,项目名称:supair,代码行数:45,代码来源:rat_spn.py


注:本文中的tensorflow.contrib.distributions.Normal方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。