当前位置: 首页>>代码示例>>Python>>正文


Python Normal.sample方法代码示例

本文整理汇总了Python中edward.models.Normal.sample方法的典型用法代码示例。如果您正苦于以下问题:Python Normal.sample方法的具体用法?Python Normal.sample怎么用?Python Normal.sample使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在edward.models.Normal的用法示例。


在下文中一共展示了Normal.sample方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_update

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import sample [as 别名]
  def build_update(self):
    """
    Simulate Langevin dynamics using a discretized integrator. Its
    discretization error goes to zero as the learning rate decreases.
    """
    old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
                  for z, qz in six.iteritems(self.latent_vars)}

    # Simulate Langevin dynamics.
    learning_rate = self.step_size / tf.cast(self.t + 1, tf.float32)
    grad_log_joint = tf.gradients(self._log_joint(old_sample),
                                  list(six.itervalues(old_sample)))
    sample = {}
    for z, qz, grad_log_p in \
        zip(six.iterkeys(self.latent_vars),
            six.itervalues(self.latent_vars),
            grad_log_joint):
      event_shape = qz.get_event_shape()
      normal = Normal(mu=tf.zeros(event_shape),
                      sigma=learning_rate * tf.ones(event_shape))
      sample[z] = old_sample[z] + 0.5 * learning_rate * grad_log_p + \
          normal.sample()

    # Update Empirical random variables.
    assign_ops = []
    variables = {x.name: x for x in
                 tf.get_default_graph().get_collection(tf.GraphKeys.VARIABLES)}
    for z, qz in six.iteritems(self.latent_vars):
      variable = variables[qz.params.op.inputs[0].op.inputs[0].name]
      assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))

    # Increment n_accept.
    assign_ops.append(self.n_accept.assign_add(1))
    return tf.group(*assign_ops)
开发者ID:blei-lab,项目名称:edward,代码行数:36,代码来源:sgld.py

示例2: build_update

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import sample [as 别名]
  def build_update(self):
    """Simulate Langevin dynamics using a discretized integrator. Its
    discretization error goes to zero as the learning rate decreases.

    #### Notes

    The updates assume each Empirical random variable is directly
    parameterized by `tf.Variable`s.
    """
    old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
                  for z, qz in six.iteritems(self.latent_vars)}

    # Simulate Langevin dynamics.
    learning_rate = self.step_size / tf.cast(self.t + 1, tf.float32)
    grad_log_joint = tf.gradients(self._log_joint(old_sample),
                                  list(six.itervalues(old_sample)))
    sample = {}
    for z, grad_log_p in zip(six.iterkeys(old_sample), grad_log_joint):
      qz = self.latent_vars[z]
      event_shape = qz.event_shape
      normal = Normal(loc=tf.zeros(event_shape),
                      scale=learning_rate * tf.ones(event_shape))
      sample[z] = old_sample[z] + \
          0.5 * learning_rate * tf.convert_to_tensor(grad_log_p) + \
          normal.sample()

    # Update Empirical random variables.
    assign_ops = []
    for z, qz in six.iteritems(self.latent_vars):
      variable = qz.get_variables()[0]
      assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))

    # Increment n_accept.
    assign_ops.append(self.n_accept.assign_add(1))
    return tf.group(*assign_ops)
开发者ID:ekostem,项目名称:edward,代码行数:37,代码来源:sgld.py

示例3: _test

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import sample [as 别名]
def _test(mu, sigma, n):
  rv = Normal(mu=mu, sigma=sigma)
  rv_sample = rv.sample(n)
  x = rv_sample.eval()
  x_tf = tf.constant(x, dtype=tf.float32)
  mu = mu.eval()
  sigma = sigma.eval()
  assert np.allclose(rv.log_prob(x_tf).eval(),
                     stats.norm.logpdf(x, mu, sigma))
开发者ID:blei-lab,项目名称:edward,代码行数:11,代码来源:test_normal_log_prob.py

示例4: build_update

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import sample [as 别名]
  def build_update(self):
    """Simulate Hamiltonian dynamics using a numerical integrator.
    Correct for the integrator's discretization error using an
    acceptance ratio.

    #### Notes

    The updates assume each Empirical random variable is directly
    parameterized by `tf.Variable`s.
    """
    old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
                  for z, qz in six.iteritems(self.latent_vars)}
    old_sample = OrderedDict(old_sample)

    # Sample momentum.
    old_r_sample = OrderedDict()
    for z, qz in six.iteritems(self.latent_vars):
      event_shape = qz.event_shape
      normal = Normal(loc=tf.zeros(event_shape), scale=tf.ones(event_shape))
      old_r_sample[z] = normal.sample()

    # Simulate Hamiltonian dynamics.
    new_sample, new_r_sample = leapfrog(old_sample, old_r_sample,
                                        self.step_size, self._log_joint,
                                        self.n_steps)

    # Calculate acceptance ratio.
    ratio = tf.reduce_sum([0.5 * tf.reduce_sum(tf.square(r))
                           for r in six.itervalues(old_r_sample)])
    ratio -= tf.reduce_sum([0.5 * tf.reduce_sum(tf.square(r))
                            for r in six.itervalues(new_r_sample)])
    ratio += self._log_joint(new_sample)
    ratio -= self._log_joint(old_sample)

    # Accept or reject sample.
    u = Uniform().sample()
    accept = tf.log(u) < ratio
    sample_values = tf.cond(accept, lambda: list(six.itervalues(new_sample)),
                            lambda: list(six.itervalues(old_sample)))
    if not isinstance(sample_values, list):
      # `tf.cond` returns tf.Tensor if output is a list of size 1.
      sample_values = [sample_values]

    sample = {z: sample_value for z, sample_value in
              zip(six.iterkeys(new_sample), sample_values)}

    # Update Empirical random variables.
    assign_ops = []
    for z, qz in six.iteritems(self.latent_vars):
      variable = qz.get_variables()[0]
      assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))

    # Increment n_accept (if accepted).
    assign_ops.append(self.n_accept.assign_add(tf.where(accept, 1, 0)))
    return tf.group(*assign_ops)
开发者ID:ekostem,项目名称:edward,代码行数:57,代码来源:hmc.py

示例5: _test

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import sample [as 别名]
def _test(shape, n):
    rv = Normal(shape, loc=tf.zeros(shape), scale=tf.ones(shape))
    rv_sample = rv.sample(n)
    x = rv_sample.eval()
    x_tf = tf.constant(x, dtype=tf.float32)
    loc = rv.loc.eval()
    scale = rv.scale.eval()
    for idx in range(shape[0]):
        assert np.allclose(
            rv.log_prob_idx((idx, ), x_tf).eval(),
            stats.norm.logpdf(x[:, idx], loc[idx], scale[idx]))
开发者ID:TalkingData,项目名称:edward,代码行数:13,代码来源:test_normal_log_prob_idx.py

示例6: build_update

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import sample [as 别名]
  def build_update(self):
    """
    Simulate Hamiltonian dynamics using a numerical integrator.
    Correct for the integrator's discretization error using an
    acceptance ratio.
    """
    old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
                  for z, qz in six.iteritems(self.latent_vars)}

    # Sample momentum.
    old_r_sample = {}
    for z, qz in six.iteritems(self.latent_vars):
      event_shape = qz.get_event_shape()
      normal = Normal(mu=tf.zeros(event_shape), sigma=tf.ones(event_shape))
      old_r_sample[z] = normal.sample()

    # Simulate Hamiltonian dynamics.
    new_sample = old_sample
    new_r_sample = old_r_sample
    for _ in range(self.n_steps):
      new_sample, new_r_sample = leapfrog(old_sample, old_r_sample,
                                          self.step_size, self._log_joint)

    # Calculate acceptance ratio.
    ratio = tf.reduce_sum([0.5 * tf.square(r)
                           for r in six.itervalues(old_r_sample)])
    ratio -= tf.reduce_sum([0.5 * tf.square(r)
                            for r in six.itervalues(new_r_sample)])
    ratio += self._log_joint(new_sample)
    ratio -= self._log_joint(old_sample)

    # Accept or reject sample.
    u = Uniform().sample()
    accept = tf.log(u) < ratio
    sample_values = tf.cond(accept, lambda: list(six.itervalues(new_sample)),
                            lambda: list(six.itervalues(old_sample)))
    if not isinstance(sample_values, list):
      # ``tf.cond`` returns tf.Tensor if output is a list of size 1.
      sample_values = [sample_values]

    sample = {z: sample_value for z, sample_value in
              zip(six.iterkeys(new_sample), sample_values)}

    # Update Empirical random variables.
    assign_ops = []
    variables = {x.name: x for x in
                 tf.get_default_graph().get_collection(tf.GraphKeys.VARIABLES)}
    for z, qz in six.iteritems(self.latent_vars):
      variable = variables[qz.params.op.inputs[0].op.inputs[0].name]
      assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))

    # Increment n_accept (if accepted).
    assign_ops.append(self.n_accept.assign_add(tf.select(accept, 1, 0)))
    return tf.group(*assign_ops)
开发者ID:blei-lab,项目名称:edward,代码行数:56,代码来源:hmc.py

示例7: build_update

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import sample [as 别名]
  def build_update(self):
    """Simulate Hamiltonian dynamics with friction using a discretized
    integrator. Its discretization error goes to zero as the learning
    rate decreases.

    Implements the update equations from (15) of Chen et al. (2014).
    """
    old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
                  for z, qz in six.iteritems(self.latent_vars)}
    old_v_sample = {z: v for z, v in six.iteritems(self.v)}

    # Simulate Hamiltonian dynamics with friction.
    friction = tf.constant(self.friction, dtype=tf.float32)
    learning_rate = tf.constant(self.step_size * 0.01, dtype=tf.float32)
    grad_log_joint = tf.gradients(self._log_joint(old_sample),
                                  list(six.itervalues(old_sample)))

    # v_sample is so named b/c it represents a velocity rather than momentum.
    sample = {}
    v_sample = {}
    for z, grad_log_p in zip(six.iterkeys(old_sample), grad_log_joint):
      qz = self.latent_vars[z]
      event_shape = qz.event_shape
      normal = Normal(loc=tf.zeros(event_shape),
                      scale=(tf.sqrt(learning_rate * friction) *
                             tf.ones(event_shape)))
      sample[z] = old_sample[z] + old_v_sample[z]
      v_sample[z] = ((1. - 0.5 * friction) * old_v_sample[z] +
                     learning_rate * tf.convert_to_tensor(grad_log_p) +
                     normal.sample())

    # Update Empirical random variables.
    assign_ops = []
    for z, qz in six.iteritems(self.latent_vars):
      variable = qz.get_variables()[0]
      assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))
      assign_ops.append(tf.assign(self.v[z], v_sample[z]).op)

    # Increment n_accept.
    assign_ops.append(self.n_accept.assign_add(1))
    return tf.group(*assign_ops)
开发者ID:ekostem,项目名称:edward,代码行数:43,代码来源:sghmc.py

示例8: _test

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import sample [as 别名]
def _test(shape, loc, scale, size):
    x = Normal(shape, loc, scale)
    val_est = tuple(get_dims(x.sample(size=size)))
    val_true = (size, ) + shape
    assert val_est == val_true
开发者ID:appcoreopc,项目名称:edward,代码行数:7,代码来源:test_normal_sample.py

示例9: range

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import sample [as 别名]
init.run()

# Set up figure
fig = plt.figure(figsize=(8, 8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)

# draws from approximate posterior
S = 50
rs = np.random.RandomState(0)
inputs = np.linspace(-5, 3, num=400, dtype=np.float32)
x_in = tf.expand_dims(inputs, 1)
mus = []
for s in range(S):
    mus += [tf.sigmoid(ed.dot(x_in, qw.sample()) + qb.sample())]
mus = tf.stack(mus)

for t in range(inference.n_iter):
    info_dict = inference.update()
    inference.print_progress(info_dict)

    if t % inference.n_print == 0:
        outputs = mus.eval()

        # Plot data and functions
        plt.cla()
        ax.plot(x_train[:], y_train, 'bx')
        for s in range(S):
            ax.plot(inputs, outputs[s], alpha=0.2)
        ax.set_xlim([-5, 3])
开发者ID:DoktorMike,项目名称:DoktorMike.github.io,代码行数:33,代码来源:hierarchical_logistic_regression.py

示例10: range

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import sample [as 别名]
inference = ed.KLqp({w: qw, b: qb}, data={X: X_train, y: y_train})
inference.initialize(n_print=10, n_iter=600)

tf.global_variables_initializer().run()

# Set up figure.
fig = plt.figure(figsize=(8, 8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)

# Build samples from inferred posterior.
n_samples = 50
inputs = np.linspace(-5, 3, num=400, dtype=np.float32).reshape((400, 1))
probs = tf.stack([tf.sigmoid(ed.dot(inputs, qw.sample()) + qb.sample())
                  for _ in range(n_samples)])

for t in range(inference.n_iter):
  info_dict = inference.update()
  inference.print_progress(info_dict)

  if t % inference.n_print == 0:
    outputs = probs.eval()

    # Plot data and functions
    plt.cla()
    ax.plot(X_train[:], y_train, 'bx')
    for s in range(n_samples):
      ax.plot(inputs[:], outputs[s], alpha=0.2)
开发者ID:ekostem,项目名称:edward,代码行数:31,代码来源:bayesian_logistic_regression_klqp.py

示例11: Normal

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import sample [as 别名]
sns.jointplot(qb.params.eval()[nburn:T:stride],
              qw.params.eval()[nburn:T:stride])
plt.show()

# Posterior predictive checks.
y_post = ed.copy(y, {w: qw, b: qb})
# This is equivalent to
# y_post = Normal(loc=ed.dot(X, qw) + qb, scale=tf.ones(N))

print("Mean squared error on test data:")
print(ed.evaluate('mean_squared_error', data={X: X_test, y_post: y_test}))

print("Displaying prior predictive samples.")
n_prior_samples = 10

w_prior = w.sample(n_prior_samples).eval()
b_prior = b.sample(n_prior_samples).eval()

plt.scatter(X_train, y_train)

inputs = np.linspace(-1, 10, num=400)
for ns in range(n_prior_samples):
    output = inputs * w_prior[ns] + b_prior[ns]
    plt.plot(inputs, output)

plt.show()

print("Displaying posterior predictive samples.")
n_posterior_samples = 10

w_post = qw.sample(n_posterior_samples).eval()
开发者ID:ekostem,项目名称:edward,代码行数:33,代码来源:bayesian_linear_regression_sghmc.py

示例12: Normal

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import sample [as 别名]
              sigma=tf.nn.softplus(tf.Variable(tf.random_normal([2]))))
qb_1 = Normal(mu=tf.Variable(tf.random_normal([1])),
              sigma=tf.nn.softplus(tf.Variable(tf.random_normal([1]))))

data = {y: y_train}
inference = ed.KLqp({W_0: qW_0, b_0: qb_0,
                     W_1: qW_1, b_1: qb_1}, data)


# Sample functions from variational model to visualize fits.
rs = np.random.RandomState(0)
inputs = np.linspace(-5, 5, num=400, dtype=np.float32)
x = tf.expand_dims(tf.constant(inputs), 1)
mus = []
for s in range(10):
  mus += [neural_network(x, qW_0.sample(), qW_1.sample(),
                         qb_0.sample(), qb_1.sample())]

mus = tf.pack(mus)

sess = ed.get_session()
init = tf.initialize_all_variables()
init.run()


# FIRST VISUALIZATION (prior)

outputs = mus.eval()

fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
开发者ID:blei-lab,项目名称:edward,代码行数:33,代码来源:getting_started_example.py

示例13: Normal

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import sample [as 别名]
qw = Normal(mu=tf.Variable(tf.random_normal([1], 0, 1)),
            sigma=tf.nn.softplus(tf.Variable(1*tf.random_normal([1]))))
qb = Normal(mu=tf.Variable(tf.random_normal([1], 0, 1)),
            sigma=tf.nn.softplus(tf.Variable(1*tf.random_normal([1]))))
# Set up data and the inference method to Kullback Leibler
z_train = mydf.get("PassengersNorm").reshape([N,1])
x_train = mydf.get("Tempdev").reshape([N,1])
sess = ed.get_session()
data = {x: x_train[:,0], z: z_train[:,0]}
#inference = ed.KLqp({x_mu: qx_mu, z_mu: qz_mu, w: qw, b: qb}, data)
inference = ed.KLqp({w: qw, b: qb}, data)

# Set up for samples from models
mus = []
for i in range(10):
    mus += [qw.sample()]

mus = tf.stack(mus)

# Inference: Quick way - No Priors possible
# inference.run()

# Inference: More controlled way of inference running
inference.initialize(n_print=10, n_iter=600)
init = tf.global_variables_initializer()
init.run()

# Prior samples
outputs = mus.eval()
priordf=pd.DataFrame(outputs)
priordf['Sample']=["Sample"+str(x) for x in list(range(10))]
开发者ID:DoktorMike,项目名称:DoktorMike.github.io,代码行数:33,代码来源:playground.py

示例14: Dirichlet

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import sample [as 别名]
qpi_alpha = tf.nn.softplus(tf.Variable(tf.random_normal([K])))
qmu_mu = tf.Variable(tf.random_normal([K * D]))
qmu_sigma = tf.nn.softplus(tf.Variable(tf.random_normal([K * D])))
qsigma_alpha = tf.nn.softplus(tf.Variable(tf.random_normal([K * D])))
qsigma_beta = tf.nn.softplus(tf.Variable(tf.random_normal([K * D])))

qpi = Dirichlet(alpha=qpi_alpha)
qmu = Normal(mu=qmu_mu, sigma=qmu_sigma)
qsigma = InverseGamma(alpha=qsigma_alpha, beta=qsigma_beta)

data = {'x': x_train}
inference = ed.KLqp({'pi': qpi, 'mu': qmu, 'sigma': qsigma}, data, model)
inference.run(n_iter=2500, n_samples=10, n_minibatch=20)

# Average per-cluster and per-data point likelihood over many posterior samples.
log_liks = []
for s in range(100):
  zrep = {'pi': qpi.sample(()),
          'mu': qmu.sample(()),
          'sigma': qsigma.sample(())}
  log_liks += [model.predict(data, zrep)]

log_liks = tf.reduce_mean(log_liks, 0)

# Choose the cluster with the highest likelihood for each data point.
clusters = tf.argmax(log_liks, 0).eval()
plt.scatter(x_train[:, 0], x_train[:, 1], c=clusters, cmap=cm.bwr)
plt.axis([-3, 3, -3, 3])
plt.title("Predicted cluster assignments")
plt.show()
开发者ID:blei-lab,项目名称:edward,代码行数:32,代码来源:tf_mixture_gaussian.py

示例15: Normal

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import sample [as 别名]
inference = ed.KLqp({w: qw, b: qb}, data={X: X_train, y: y_train})
inference.run()

# CRITICISM
y_post = ed.copy(y, {w: qw, b: qb})
# This is equivalent to
# y_post = Normal(loc=ed.dot(X, qw) + qb, scale=tf.ones(N))

print("Mean squared error on test data:")
print(ed.evaluate('mean_squared_error', data={X: X_test, y_post: y_test}))

print("Displaying prior predictive samples.")
n_prior_samples = 10

w_prior = w.sample(n_prior_samples).eval()
b_prior = b.sample(n_prior_samples).eval()

plt.scatter(X_train, y_train)

inputs = np.linspace(-1, 10, num=400)
for ns in range(n_prior_samples):
    output = inputs * w_prior[ns] + b_prior[ns]
    plt.plot(inputs, output)

plt.show()

print("Displaying posterior predictive samples.")
n_posterior_samples = 10

w_post = qw.sample(n_posterior_samples).eval()
开发者ID:ekostem,项目名称:edward,代码行数:32,代码来源:bayesian_linear_regression_ppc.py


注:本文中的edward.models.Normal.sample方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。