当前位置: 首页>>代码示例>>Python>>正文


Python Normal.mean方法代码示例

本文整理汇总了Python中edward.models.Normal.mean方法的典型用法代码示例。如果您正苦于以下问题:Python Normal.mean方法的具体用法?Python Normal.mean怎么用?Python Normal.mean使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在edward.models.Normal的用法示例。


在下文中一共展示了Normal.mean方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import mean [as 别名]
def main(_):
  # data
  J = 8
  data_y = np.array([28, 8, -3, 7, -1, 1, 18, 12])
  data_sigma = np.array([15, 10, 16, 11, 9, 11, 10, 18])

  # model definition
  mu = Normal(0., 10.)
  logtau = Normal(5., 1.)
  theta_prime = Normal(tf.zeros(J), tf.ones(J))
  sigma = tf.placeholder(tf.float32, J)
  y = Normal(mu + tf.exp(logtau) * theta_prime, sigma * tf.ones([J]))

  data = {y: data_y, sigma: data_sigma}

  # ed.KLqp inference
  with tf.variable_scope('q_logtau'):
    q_logtau = Normal(tf.get_variable('loc', []),
                      tf.nn.softplus(tf.get_variable('scale', [])))

  with tf.variable_scope('q_mu'):
    q_mu = Normal(tf.get_variable('loc', []),
                  tf.nn.softplus(tf.get_variable('scale', [])))

  with tf.variable_scope('q_theta_prime'):
    q_theta_prime = Normal(tf.get_variable('loc', [J]),
                           tf.nn.softplus(tf.get_variable('scale', [J])))

  inference = ed.KLqp({logtau: q_logtau, mu: q_mu,
                      theta_prime: q_theta_prime}, data=data)
  inference.run(n_samples=15, n_iter=60000)
  print("====  ed.KLqp inference ====")
  print("E[mu] = %f" % (q_mu.mean().eval()))
  print("E[logtau] = %f" % (q_logtau.mean().eval()))
  print("E[theta_prime]=")
  print((q_theta_prime.mean().eval()))
  print("====  end ed.KLqp inference ====")
  print("")
  print("")

  # HMC inference
  S = 400000
  burn = S // 2

  hq_logtau = Empirical(tf.get_variable('hq_logtau', [S]))
  hq_mu = Empirical(tf.get_variable('hq_mu', [S]))
  hq_theta_prime = Empirical(tf.get_variable('hq_thetaprime', [S, J]))

  inference = ed.HMC({logtau: hq_logtau, mu: hq_mu,
                     theta_prime: hq_theta_prime}, data=data)
  inference.run()

  print("====  ed.HMC inference ====")
  print("E[mu] = %f" % (hq_mu.params.eval()[burn:].mean()))
  print("E[logtau] = %f" % (hq_logtau.params.eval()[burn:].mean()))
  print("E[theta_prime]=")
  print(hq_theta_prime.params.eval()[burn:, ].mean(0))
  print("====  end ed.HMC inference ====")
  print("")
  print("")
开发者ID:JoyceYa,项目名称:edward,代码行数:62,代码来源:eight_schools.py

示例2: _test_normal_normal

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import mean [as 别名]
  def _test_normal_normal(self, Inference, default, *args, **kwargs):
    with self.test_session() as sess:
      x_data = np.array([0.0] * 50, dtype=np.float32)

      mu = Normal(loc=0.0, scale=1.0)
      x = Normal(loc=mu, scale=1.0, sample_shape=50)

      if not default:
        qmu_loc = tf.Variable(tf.random_normal([]))
        qmu_scale = tf.nn.softplus(tf.Variable(tf.random_normal([])))
        qmu = Normal(loc=qmu_loc, scale=qmu_scale)

        # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
        inference = Inference({mu: qmu}, data={x: x_data})
      else:
        inference = Inference([mu], data={x: x_data})
        qmu = inference.latent_vars[mu]
      inference.run(*args, **kwargs)

      self.assertAllClose(qmu.mean().eval(), 0, rtol=0.1, atol=0.6)
      self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51),
                          rtol=0.15, atol=0.5)

      variables = tf.get_collection(
          tf.GraphKeys.GLOBAL_VARIABLES, scope='optimizer')
      old_t, old_variables = sess.run([inference.t, variables])
      self.assertEqual(old_t, inference.n_iter)
      sess.run(inference.reset)
      new_t, new_variables = sess.run([inference.t, variables])
      self.assertEqual(new_t, 0)
      self.assertNotEqual(old_variables, new_variables)
开发者ID:JoyceYa,项目名称:edward,代码行数:33,代码来源:klqp_test.py

示例3: main

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import mean [as 别名]
def main(_):
  ed.set_seed(42)
  N = 5000  # number of data points
  D = 10  # number of features

  # DATA
  w_true = np.random.randn(D)
  X_data = np.random.randn(N, D)
  p = expit(np.dot(X_data, w_true))
  y_data = np.array([np.random.binomial(1, i) for i in p])

  # MODEL
  X = tf.placeholder(tf.float32, [N, D])
  w = Normal(loc=tf.zeros(D), scale=tf.ones(D))
  y = Bernoulli(logits=ed.dot(X, w))

  # INFERENCE
  qw = Normal(loc=tf.get_variable("qw/loc", [D]),
              scale=tf.nn.softplus(tf.get_variable("qw/scale", [D])))

  inference = IWVI({w: qw}, data={X: X_data, y: y_data})
  inference.run(K=5, n_iter=1000)

  # CRITICISM
  print("Mean squared error in true values to inferred posterior mean:")
  print(tf.reduce_mean(tf.square(w_true - qw.mean())).eval())
开发者ID:JoyceYa,项目名称:edward,代码行数:28,代码来源:iwvi.py

示例4: main

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import mean [as 别名]
def main(_):
  ed.set_seed(142)

  # DATA
  x_train = build_toy_dataset(FLAGS.N, FLAGS.D, FLAGS.K)

  # MODEL
  w = Normal(loc=0.0, scale=10.0, sample_shape=[FLAGS.D, FLAGS.K])
  z = Normal(loc=0.0, scale=1.0, sample_shape=[FLAGS.M, FLAGS.K])
  x = Normal(loc=tf.matmul(w, z, transpose_b=True),
             scale=tf.ones([FLAGS.D, FLAGS.M]))

  # INFERENCE
  qw_variables = [tf.get_variable("qw/loc", [FLAGS.D, FLAGS.K]),
                  tf.get_variable("qw/scale", [FLAGS.D, FLAGS.K])]
  qw = Normal(loc=qw_variables[0], scale=tf.nn.softplus(qw_variables[1]))

  qz_variables = [tf.get_variable("qz/loc", [FLAGS.N, FLAGS.K]),
                  tf.get_variable("qz/scale", [FLAGS.N, FLAGS.K])]
  idx_ph = tf.placeholder(tf.int32, FLAGS.M)
  qz = Normal(loc=tf.gather(qz_variables[0], idx_ph),
              scale=tf.nn.softplus(tf.gather(qz_variables[1], idx_ph)))

  x_ph = tf.placeholder(tf.float32, [FLAGS.D, FLAGS.M])
  inference_w = ed.KLqp({w: qw}, data={x: x_ph, z: qz})
  inference_z = ed.KLqp({z: qz}, data={x: x_ph, w: qw})

  scale_factor = float(FLAGS.N) / FLAGS.M
  inference_w.initialize(scale={x: scale_factor, z: scale_factor},
                         var_list=qz_variables,
                         n_samples=5)
  inference_z.initialize(scale={x: scale_factor, z: scale_factor},
                         var_list=qw_variables,
                         n_samples=5)

  sess = ed.get_session()
  tf.global_variables_initializer().run()
  for _ in range(inference_w.n_iter):
    x_batch, idx_batch = next_batch(x_train, FLAGS.M)
    for _ in range(5):
      inference_z.update(feed_dict={x_ph: x_batch, idx_ph: idx_batch})

    info_dict = inference_w.update(feed_dict={x_ph: x_batch, idx_ph: idx_batch})
    inference_w.print_progress(info_dict)

    t = info_dict['t']
    if t % 100 == 0:
      print("\nInferred principal axes:")
      print(sess.run(qw.mean()))
开发者ID:JoyceYa,项目名称:edward,代码行数:51,代码来源:probabilistic_pca_subsampling.py

示例5: test_normalnormal_run

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import mean [as 别名]
  def test_normalnormal_run(self):
    with self.test_session() as sess:
      x_data = np.array([0.0] * 50, dtype=np.float32)

      mu = Normal(loc=0.0, scale=1.0)
      x = Normal(loc=tf.ones(50) * mu, scale=1.0)

      qmu_loc = tf.Variable(tf.random_normal([]))
      qmu_scale = tf.nn.softplus(tf.Variable(tf.random_normal([])))
      qmu = Normal(loc=qmu_loc, scale=qmu_scale)

      # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
      inference = ed.KLpq({mu: qmu}, data={x: x_data})
      inference.run(n_samples=25, n_iter=100)

      self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-1, atol=1e-1)
      self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51),
                          rtol=1e-1, atol=1e-1)
开发者ID:ekostem,项目名称:edward,代码行数:20,代码来源:test_klpq.py

示例6: test_normal_run

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import mean [as 别名]
  def test_normal_run(self):
    def ratio_estimator(data, local_vars, global_vars):
      """Use the optimal ratio estimator, r(z) = log p(z). We add a
      TensorFlow variable as the algorithm assumes that the function
      has parameters to optimize."""
      w = tf.get_variable("w", [])
      return z.log_prob(local_vars[z]) + w

    with self.test_session() as sess:
      z = Normal(loc=5.0, scale=1.0)

      qz = Normal(loc=tf.Variable(tf.random_normal([])),
                  scale=tf.nn.softplus(tf.Variable(tf.random_normal([]))))

      inference = ed.ImplicitKLqp({z: qz}, discriminator=ratio_estimator)
      inference.run(n_iter=200)

      self.assertAllClose(qz.mean().eval(), 5.0, atol=1.0)
开发者ID:JoyceYa,项目名称:edward,代码行数:20,代码来源:implicitklqp_test.py

示例7: build_toy_dataset

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import mean [as 别名]
N = 5000  # number of data points
D = 2  # data dimensionality
K = 1  # latent dimensionality

# DATA

x_train = build_toy_dataset(N, D, K)

# MODEL

w = Normal(mu=tf.zeros([D, K]), sigma=10.0 * tf.ones([D, K]))
z = Normal(mu=tf.zeros([N, K]), sigma=tf.ones([N, K]))
x = Normal(mu=tf.matmul(w, z, transpose_b=True), sigma=tf.ones([D, N]))

# INFERENCE

qw = Normal(mu=tf.Variable(tf.random_normal([D, K])),
            sigma=tf.nn.softplus(tf.Variable(tf.random_normal([D, K]))))
qz = Normal(mu=tf.Variable(tf.random_normal([N, K])),
            sigma=tf.nn.softplus(tf.Variable(tf.random_normal([N, K]))))

inference = ed.KLqp({w: qw, z: qz}, data={x: x_train})

init = tf.initialize_all_variables()
inference.run(n_iter=500, n_print=100, n_samples=10)

sess = ed.get_session()
print("Inferred principal axes:")
print(sess.run(qw.mean()))
开发者ID:blei-lab,项目名称:edward,代码行数:31,代码来源:probabilistic_pca.py

示例8: main

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import mean [as 别名]
def main(_):
  def ratio_estimator(data, local_vars, global_vars):
    """Takes as input a dict of data x, local variable samples z, and
    global variable samples beta; outputs real values of shape
    (x.shape[0] + z.shape[0],). In this example, there are no local
    variables.
    """
    # data[y] has shape (M,); global_vars[w] has shape (D,)
    # we concatenate w to each data point y, so input has shape (M, 1 + D)
    input = tf.concat([
        tf.reshape(data[y], [FLAGS.M, 1]),
        tf.tile(tf.reshape(global_vars[w], [1, FLAGS.D]), [FLAGS.M, 1])], 1)
    hidden = tf.layers.dense(input, 64, activation=tf.nn.relu)
    output = tf.layers.dense(hidden, 1, activation=None)
    return output

  ed.set_seed(42)

  # DATA
  w_true = np.ones(FLAGS.D) * 5.0
  X_train, y_train = build_toy_dataset(FLAGS.N, w_true)
  X_test, y_test = build_toy_dataset(FLAGS.N, w_true)
  data = generator([X_train, y_train], FLAGS.M)

  # MODEL
  X = tf.placeholder(tf.float32, [FLAGS.M, FLAGS.D])
  y_ph = tf.placeholder(tf.float32, [FLAGS.M])
  w = Normal(loc=tf.zeros(FLAGS.D), scale=tf.ones(FLAGS.D))
  y = Normal(loc=ed.dot(X, w), scale=tf.ones(FLAGS.M))

  # INFERENCE
  qw = Normal(loc=tf.get_variable("qw/loc", [FLAGS.D]) + 1.0,
              scale=tf.nn.softplus(tf.get_variable("qw/scale", [FLAGS.D])))

  inference = ed.ImplicitKLqp(
      {w: qw}, data={y: y_ph},
      discriminator=ratio_estimator, global_vars={w: qw})
  inference.initialize(n_iter=5000, n_print=100,
                       scale={y: float(FLAGS.N) / FLAGS.M})

  sess = ed.get_session()
  tf.global_variables_initializer().run()

  for _ in range(inference.n_iter):
    X_batch, y_batch = next(data)
    for _ in range(5):
      info_dict_d = inference.update(
          variables="Disc", feed_dict={X: X_batch, y_ph: y_batch})

    info_dict = inference.update(
        variables="Gen", feed_dict={X: X_batch, y_ph: y_batch})
    info_dict['loss_d'] = info_dict_d['loss_d']
    info_dict['t'] = info_dict['t'] // 6  # say set of 6 updates is 1 iteration

    t = info_dict['t']
    inference.print_progress(info_dict)
    if t == 1 or t % inference.n_print == 0:
      # Check inferred posterior parameters.
      mean, std = sess.run([qw.mean(), qw.stddev()])
      print("\nInferred mean & std:")
      print(mean)
      print(std)
开发者ID:JoyceYa,项目名称:edward,代码行数:64,代码来源:bayesian_linear_regression_implicitklqp.py

示例9: build_toy_dataset

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import mean [as 别名]
D = 10  # number of features

# DATA
coeff = np.random.randn(D)
X_train, y_train = build_toy_dataset(N, coeff)
X_test, y_test = build_toy_dataset(N, coeff)

# MODEL
X = tf.placeholder(tf.float32, [N, D])
w = Normal(mu=tf.zeros(D), sigma=tf.ones(D))
b = Normal(mu=tf.zeros(1), sigma=tf.ones(1))
y = Normal(mu=ed.dot(X, w) + b, sigma=tf.ones(N))

# INFERENCE
qw = Normal(mu=tf.Variable(tf.random_normal([D])),
            sigma=tf.nn.softplus(tf.Variable(tf.random_normal([D]))))
qb = Normal(mu=tf.Variable(tf.random_normal([1])),
            sigma=tf.nn.softplus(tf.Variable(tf.random_normal([1]))))

data = {X: X_train, y: y_train}
inference = ed.KLqp({w: qw, b: qb}, data)
inference.run(n_samples=5, n_iter=250)

# CRITICISM
y_post = ed.copy(y, {w: qw.mean(), b: qb.mean()})
# This is equivalent to
# y_post = Normal(mu=ed.dot(X, qw.mean()) + qb.mean(), sigma=tf.ones(N))

print("Mean squared error on test data:")
print(ed.evaluate('mean_squared_error', data={X: X_test, y_post: y_test}))
开发者ID:blei-lab,项目名称:edward,代码行数:32,代码来源:bayesian_linear_regression_10d.py

示例10: Normal

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import mean [as 别名]
qmu = Normal(
    loc=tf.Variable(tf.random_normal([1])),
    scale=tf.nn.softplus(tf.Variable(tf.random_normal([1]))))

latent_vars = {
    overall_mu: qmu,
    lnvar_students: qlnvarstudents,
    lnvar_questions: qlnvarquestions,
    student_etas: qstudents,
    question_etas: qquestions
}
data = {outcomes: obs}
inference = ed.KLqp(latent_vars, data)
inference.initialize(n_print=2, n_iter=50)

qstudents_mean = qstudents.mean()
qquestions_mean = qquestions.mean()

tf.global_variables_initializer().run()

f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.set_ylim([-3.0, 3.0])
ax2.set_ylim([-3.0, 3.0])
ax1.set_xlim([-3.0, 3.0])
ax2.set_xlim([-3.0, 3.0])

for t in range(inference.n_iter):
  info_dict = inference.update()
  inference.print_progress(info_dict)

  if t % inference.n_print == 0:
开发者ID:wujsAct,项目名称:edward,代码行数:33,代码来源:irt.py

示例11: main

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import mean [as 别名]
def main(_):
  ed.set_seed(42)

  # DATA
  data, true_s_etas, true_q_etas = build_toy_dataset(
      FLAGS.n_students, FLAGS.n_questions, FLAGS.n_obs)
  obs = data['outcomes'].values
  student_ids = data['student_id'].values.astype(int)
  question_ids = data['question_id'].values.astype(int)

  # MODEL
  lnvar_students = Normal(loc=0.0, scale=1.0)
  lnvar_questions = Normal(loc=0.0, scale=1.0)

  sigma_students = tf.sqrt(tf.exp(lnvar_students))
  sigma_questions = tf.sqrt(tf.exp(lnvar_questions))

  overall_mu = Normal(loc=tf.zeros(1), scale=tf.ones(1))

  student_etas = Normal(loc=0.0, scale=sigma_students,
                        sample_shape=FLAGS.n_students)
  question_etas = Normal(loc=0.0, scale=sigma_questions,
                         sample_shape=FLAGS.n_questions)

  observation_logodds = (tf.gather(student_etas, student_ids) +
                         tf.gather(question_etas, question_ids) +
                         overall_mu)
  outcomes = Bernoulli(logits=observation_logodds)

  # INFERENCE
  qstudents = Normal(
      loc=tf.get_variable("qstudents/loc", [FLAGS.n_students]),
      scale=tf.nn.softplus(
          tf.get_variable("qstudents/scale", [FLAGS.n_students])))
  qquestions = Normal(
      loc=tf.get_variable("qquestions/loc", [FLAGS.n_questions]),
      scale=tf.nn.softplus(
          tf.get_variable("qquestions/scale", [FLAGS.n_questions])))
  qlnvarstudents = Normal(
      loc=tf.get_variable("qlnvarstudents/loc", []),
      scale=tf.nn.softplus(
          tf.get_variable("qlnvarstudents/scale", [])))
  qlnvarquestions = Normal(
      loc=tf.get_variable("qlnvarquestions/loc", []),
      scale=tf.nn.softplus(
          tf.get_variable("qlnvarquestions/scale", [])))
  qmu = Normal(
      loc=tf.get_variable("qmu/loc", [1]),
      scale=tf.nn.softplus(
          tf.get_variable("qmu/scale", [1])))

  latent_vars = {
      overall_mu: qmu,
      lnvar_students: qlnvarstudents,
      lnvar_questions: qlnvarquestions,
      student_etas: qstudents,
      question_etas: qquestions
  }
  data = {outcomes: obs}
  inference = ed.KLqp(latent_vars, data)
  inference.initialize(n_print=2, n_iter=50)

  qstudents_mean = qstudents.mean()
  qquestions_mean = qquestions.mean()

  tf.global_variables_initializer().run()

  f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
  ax1.set_ylim([-3.0, 3.0])
  ax2.set_ylim([-3.0, 3.0])
  ax1.set_xlim([-3.0, 3.0])
  ax2.set_xlim([-3.0, 3.0])

  for t in range(inference.n_iter):
    info_dict = inference.update()
    inference.print_progress(info_dict)

    if t % inference.n_print == 0:
      # CRITICISM
      ax1.clear()
      ax2.clear()
      ax1.set_ylim([-3.0, 3.0])
      ax2.set_ylim([-3.0, 3.0])
      ax1.set_xlim([-3.0, 3.0])
      ax2.set_xlim([-3.0, 3.0])

      ax1.set_title('Student Intercepts')
      ax2.set_title('Question Intercepts')
      ax1.set_xlabel('True Student Random Intercepts')
      ax1.set_ylabel('Estimated Student Random Intercepts')
      ax2.set_xlabel('True Question Random Intercepts')
      ax2.set_ylabel('Estimated Question Random Intercepts')

      ax1.scatter(true_s_etas, qstudents_mean.eval(), s=0.05)
      ax2.scatter(true_q_etas, qquestions_mean.eval(), s=0.05)
      plt.draw()
      plt.pause(2.0 / 60.0)
开发者ID:JoyceYa,项目名称:edward,代码行数:99,代码来源:irt.py

示例12: list

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import mean [as 别名]
    grads = tf.gradients(loss, [v._ref() for v in var_list])
    grads_and_vars = list(zip(grads, var_list))
    return loss, grads_and_vars


ed.set_seed(42)
N = 5000  # number of data points
D = 10  # number of features

# DATA
w_true = np.random.randn(D)
X_data = np.random.randn(N, D)
p = expit(np.dot(X_data, w_true))
y_data = np.array([np.random.binomial(1, i) for i in p])

# MODEL
X = tf.placeholder(tf.float32, [N, D])
w = Normal(loc=tf.zeros(D), scale=tf.ones(D))
y = Bernoulli(logits=ed.dot(X, w))

# INFERENCE
qw = Normal(loc=tf.Variable(tf.random_normal([D])),
            scale=tf.nn.softplus(tf.Variable(tf.random_normal([D]))))

inference = IWVI({w: qw}, data={X: X_data, y: y_data})
inference.run(K=5, n_iter=1000)

# CRITICISM
print("Mean squared error in true values to inferred posterior mean:")
print(tf.reduce_mean(tf.square(w_true - qw.mean())).eval())
开发者ID:ekostem,项目名称:edward,代码行数:32,代码来源:iwvi.py

示例13: Normal

# 需要导入模块: from edward.models import Normal [as 别名]
# 或者: from edward.models.Normal import mean [as 别名]
qw = Normal(loc=tf.Variable(tf.random_normal([D]) + 1.0),
            scale=tf.nn.softplus(tf.Variable(tf.random_normal([D]))))

inference = ed.ImplicitKLqp(
    {w: qw}, data={y: y_ph},
    discriminator=ratio_estimator, global_vars={w: qw})
inference.initialize(n_iter=5000, n_print=100, scale={y: float(N) / M})

sess = ed.get_session()
tf.global_variables_initializer().run()

for _ in range(inference.n_iter):
  X_batch, y_batch = next(data)
  for _ in range(5):
    info_dict_d = inference.update(
        variables="Disc", feed_dict={X: X_batch, y_ph: y_batch})

  info_dict = inference.update(
      variables="Gen", feed_dict={X: X_batch, y_ph: y_batch})
  info_dict['loss_d'] = info_dict_d['loss_d']
  info_dict['t'] = info_dict['t'] // 6  # say set of 6 updates is 1 iteration

  t = info_dict['t']
  inference.print_progress(info_dict)
  if t == 1 or t % inference.n_print == 0:
    # Check inferred posterior parameters.
    mean, std = sess.run([qw.mean(), qw.stddev()])
    print("\nInferred mean & std:")
    print(mean)
    print(std)
开发者ID:ekostem,项目名称:edward,代码行数:32,代码来源:bayesian_linear_regression_implicitklqp.py


注:本文中的edward.models.Normal.mean方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。