当前位置: 首页>>代码示例>>Python>>正文


Python v2.reduce_mean方法代码示例

本文整理汇总了Python中tensorflow.compat.v2.reduce_mean方法的典型用法代码示例。如果您正苦于以下问题:Python v2.reduce_mean方法的具体用法?Python v2.reduce_mean怎么用?Python v2.reduce_mean使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v2的用法示例。


在下文中一共展示了v2.reduce_mean方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_randomized_qmc_basic

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_mean [as 别名]
def test_randomized_qmc_basic(self):
    """Tests the randomization of the random.halton sequences."""
    # This test is identical to the example given in Owen (2017), Figure 5.
    dim = 20
    num_results = 2000
    replica = 5
    seed = 121117

    values = []
    for i in range(replica):
      sample, _ = random.halton.sample(dim, num_results=num_results,
                                       seed=seed + i)
      f = tf.reduce_mean(
          input_tensor=tf.reduce_sum(input_tensor=sample, axis=1)**2)
      values.append(self.evaluate(f))
    self.assertAllClose(np.mean(values), 101.6667, atol=np.std(values) * 2) 
开发者ID:google,项目名称:tf-quant-finance,代码行数:18,代码来源:halton_test.py

示例2: test_multiivariate_xla_compatible

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_mean [as 别名]
def test_multiivariate_xla_compatible(self):
    """Tests that multiivariate GBM sampling is XLA-compatible."""
    corr_matrix = [[1, 0.1], [0.1, 1]]
    process = tff.models.MultivariateGeometricBrownianMotion(
        dim=2, means=0.05, volatilities=[0.1, 0.2], corr_matrix=corr_matrix,
        dtype=tf.float64)
    times = [0.1, 0.5, 1.0]
    initial_state = [1.0, 2.0]
    @tf.function
    def sample_fn():
      return process.sample_paths(
          times=times, initial_state=initial_state, num_samples=10000)
    samples = tf.xla.experimental.compile(sample_fn)[0]
    log_s = tf.math.log(samples)
    mean = tf.reduce_mean(log_s, axis=0)
    expected_mean = ((process._means - process._vols**2 / 2)
                     * np.array(np.expand_dims(times, -1))
                     + np.log(initial_state))
    self.assertAllClose(mean, expected_mean, atol=1e-2, rtol=1e-2) 
开发者ID:google,项目名称:tf-quant-finance,代码行数:21,代码来源:geometric_brownian_motion_test.py

示例3: nonneg_crossentropy

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_mean [as 别名]
def nonneg_crossentropy(expr, target):
  """A cross entropy operator that is appropriate for NQL outputs.

  Query expressions often evaluate to sparse vectors.  This evaluates cross
  entropy safely.

  Args:
    expr: a Tensorflow expression for some predicted values.
    target: a Tensorflow expression for target values.

  Returns:
    Tensorflow expression for cross entropy.
  """
  expr_replacing_0_with_1 = \
     tf.where(expr > 0, expr, tf.ones(tf.shape(input=expr), tf.float32))
  cross_entropies = tf.reduce_sum(
      input_tensor=-target * tf.math.log(expr_replacing_0_with_1), axis=1)
  return tf.reduce_mean(input_tensor=cross_entropies, axis=0) 
开发者ID:google-research,项目名称:language,代码行数:20,代码来源:__init__.py

示例4: get_cross_entropy_loss

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_mean [as 别名]
def get_cross_entropy_loss(learner_agent_output, env_output, actor_agent_output,
                           actor_action, reward_clipping, discounting,
                           baseline_cost, entropy_cost, num_steps):
  """Computes cross entropy loss."""
  del env_output
  del actor_agent_output
  del reward_clipping
  del discounting
  del baseline_cost
  del entropy_cost

  # Align learner output and actor output.
  # NOTE that for a tensor of num_timesteps=3, learner output has output at
  # timesteps [t1, t2, t3] while actor output has output at timesteps [t0, t1,
  # t2]. Hence the need to align before computing cross-entropy loss.
  policy_logits = learner_agent_output.policy_logits[:-1]
  target_actions = actor_action.oracle_next_action_idx[1:]

  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      labels=target_actions, logits=policy_logits)
  tf.summary.scalar(
      'loss/cross_entropy_loss', tf.reduce_mean(cross_entropy), step=num_steps)
  return cross_entropy 
开发者ID:google-research,项目名称:valan,代码行数:25,代码来源:loss_fns.py

示例5: loss_fn

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_mean [as 别名]
def loss_fn(params, inputs, targets):
  predicted = params[0] * inputs + params[1]
  loss = tf.reduce_mean(input_tensor=tf.square(predicted - targets))
  return tf_np.asarray(loss) 
开发者ID:google,项目名称:trax,代码行数:6,代码来源:extensions_test.py

示例6: testPmean

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_mean [as 别名]
def testPmean(self):
    if extensions.tpu_devices():
      self.skipTest("pmean for TPU is not supported yet")
    devices = self._get_two_devices(require_same_type=True)

    def reduce_mean(f):
      return extensions.pmean(f)

    data = tf_np.asarray(tf.convert_to_tensor(value=[1, 3]))
    pmapped = extensions.pmap(reduce_mean, devices=devices)
    result = pmapped(data)

    self.assertAllClose(result[0], 2)
    self.assertAllClose(result[1], 2) 
开发者ID:google,项目名称:trax,代码行数:16,代码来源:extensions_test.py

示例7: test_normal_integral_mean_and_var_correctly_estimated

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_mean [as 别名]
def test_normal_integral_mean_and_var_correctly_estimated(self):
    n = int(1000)
    # This test is almost identical to the similarly named test in
    # monte_carlo_test.py. The only difference is that we use the Halton
    # samples instead of the random samples to evaluate the expectations.
    # MC with pseudo random numbers converges at the rate of 1/ Sqrt(N)
    # (N=number of samples). For QMC in low dimensions, the expected convergence
    # rate is ~ 1/N. Hence we should only need 1e3 samples as compared to the
    # 1e6 samples used in the pseudo-random monte carlo.
    mu_p = tf.constant([-1., 1.], dtype=tf.float64)
    mu_q = tf.constant([0., 0.], dtype=tf.float64)
    sigma_p = tf.constant([0.5, 0.5], dtype=tf.float64)
    sigma_q = tf.constant([1., 1.], dtype=tf.float64)
    p = tfd.Normal(loc=mu_p, scale=sigma_p)
    q = tfd.Normal(loc=mu_q, scale=sigma_q)

    cdf_sample, _ = random.halton.sample(2, num_results=n, dtype=tf.float64,
                                         seed=1729)
    q_sample = q.quantile(cdf_sample)

    # Compute E_p[X].
    e_x = tf.reduce_mean(q_sample * p.prob(q_sample) / q.prob(q_sample), 0)

    # Compute E_p[X^2 - E_p[X]^2].
    e_x2 = tf.reduce_mean(q_sample**2 * p.prob(q_sample) / q.prob(q_sample)
                          - e_x**2, 0)
    stddev = tf.sqrt(e_x2)

    # Keep the tolerance levels the same as in monte_carlo_test.py.
    self.assertEqual(p.batch_shape, e_x.shape)
    self.assertAllClose(self.evaluate(p.mean()), self.evaluate(e_x), rtol=0.01)
    self.assertAllClose(
        self.evaluate(p.stddev()), self.evaluate(stddev), rtol=0.02) 
开发者ID:google,项目名称:tf-quant-finance,代码行数:35,代码来源:halton_test.py

示例8: test_docstring_example

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_mean [as 别名]
def test_docstring_example(self):
    # Produce the first 1000 members of the Halton sequence in 3 dimensions.
    num_results = 1000
    dim = 3
    sample, params = random.halton.sample(dim, num_results=num_results,
                                          seed=127)

    # Evaluate the integral of x_1 * x_2^2 * x_3^3  over the three dimensional
    # hypercube.
    powers = tf.range(1., limit=dim + 1)
    integral = tf.reduce_mean(
        input_tensor=tf.reduce_prod(input_tensor=sample**powers, axis=-1))
    true_value = 1. / tf.reduce_prod(input_tensor=powers + 1.)

    # Produces a relative absolute error of 1.7%.
    self.assertAllClose(
        self.evaluate(integral), self.evaluate(true_value), rtol=0.02)

    # Now skip the first 1000 samples and recompute the integral with the next
    # thousand samples. The sequence_indices argument can be used to do this.

    sequence_indices = tf.range(
        start=1000, limit=1000 + num_results, dtype=tf.int32)
    sample_leaped, _ = random.halton.sample(
        dim, sequence_indices=sequence_indices, randomization_params=params)

    integral_leaped = tf.reduce_mean(
        input_tensor=tf.reduce_prod(
            input_tensor=sample_leaped**powers, axis=-1))
    self.assertAllClose(
        self.evaluate(integral_leaped), self.evaluate(true_value), rtol=0.05) 
开发者ID:google,项目名称:tf-quant-finance,代码行数:33,代码来源:halton_test.py

示例9: test_normal_integral_mean_and_var_correctly_estimated

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_mean [as 别名]
def test_normal_integral_mean_and_var_correctly_estimated(self):
    n = int(1000)
    # This test is almost identical to the similarly named test in
    # monte_carlo_test.py. The only difference is that we use the Sobol
    # samples instead of the random samples to evaluate the expectations.
    # MC with pseudo random numbers converges at the rate of 1/ Sqrt(N)
    # (N=number of samples). For QMC in low dimensions, the expected convergence
    # rate is ~ 1/N. Hence we should only need 1e3 samples as compared to the
    # 1e6 samples used in the pseudo-random monte carlo.
    dtype = tf.float64
    mu_p = tf.constant([-1., 1.], dtype=dtype)
    mu_q = tf.constant([0., 0.], dtype=dtype)
    sigma_p = tf.constant([0.5, 0.5], dtype=dtype)
    sigma_q = tf.constant([1., 1.], dtype=dtype)
    p = tfp.distributions.Normal(loc=mu_p, scale=sigma_p)
    q = tfp.distributions.Normal(loc=mu_q, scale=sigma_q)

    cdf_sample = random.sobol.sample(2, n, dtype=dtype)
    q_sample = q.quantile(cdf_sample)

    # Compute E_p[X].
    e_x = tf.reduce_mean(q_sample * p.prob(q_sample) / q.prob(q_sample), 0)

    # Compute E_p[X^2 - E_p[X]^2].
    e_x2 = tf.reduce_mean(q_sample**2 * p.prob(q_sample) / q.prob(q_sample)
                          - e_x**2, 0)
    stddev = tf.sqrt(e_x2)

    # Keep the tolerance levels the same as in monte_carlo_test.py.
    self.assertEqual(p.batch_shape, e_x.shape)
    self.assertAllClose(self.evaluate(p.mean()), self.evaluate(e_x), rtol=0.01)
    self.assertAllClose(
        self.evaluate(p.stddev()), self.evaluate(stddev), rtol=0.02) 
开发者ID:google,项目名称:tf-quant-finance,代码行数:35,代码来源:sobol_test.py

示例10: test_multivariate_sample_mean_and_variance

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_mean [as 别名]
def test_multivariate_sample_mean_and_variance(self, dtype):
    """Tests the mean and vol of the univariate GBM sampled paths."""
    means = 0.05
    volatilities = [0.1, 0.2]
    corr_matrix = [[1, 0.1], [0.1, 1]]
    process = tff.models.MultivariateGeometricBrownianMotion(
        dim=2, means=means, volatilities=volatilities, corr_matrix=corr_matrix,
        dtype=dtype)
    times = [0.1, 0.5, 1.0]
    initial_state = [1.0, 2.0]
    samples = process.sample_paths(
        times=times, initial_state=initial_state,
        random_type=tff.math.random.RandomType.SOBOL, num_samples=10000)
    log_s = tf.math.log(samples)
    mean = tf.reduce_mean(log_s, axis=0, keepdims=True)
    var = tf.reduce_mean((log_s - mean)**2, axis=0)
    expected_mean = ((process._means - process._vols**2 / 2)
                     * np.array(np.expand_dims(times, -1))
                     + np.log(initial_state))
    expected_var = process._vols**2 * np.array(np.expand_dims(times, -1))
    with self.subTest("Drift"):
      self.assertAllClose(tf.squeeze(mean), expected_mean, atol=1e-3, rtol=1e-3)
    with self.subTest("Variance"):
      self.assertAllClose(tf.squeeze(var), expected_var, atol=1e-3, rtol=1e-3)
    with self.subTest("Correlations"):
      samples = self.evaluate(samples)
      for i in range(len(times)):
        corr = np.corrcoef(samples[:, i, :], rowvar=False)
      self.assertAllClose(corr, corr_matrix, atol=1e-2, rtol=1e-2) 
开发者ID:google,项目名称:tf-quant-finance,代码行数:31,代码来源:geometric_brownian_motion_test.py

示例11: test_univariate_xla_compatible

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_mean [as 别名]
def test_univariate_xla_compatible(self):
    """Tests that univariate GBM sampling is XLA-compatible."""
    process = tff.models.GeometricBrownianMotion(0.05, 0.5, dtype=tf.float64)
    @tf.function
    def sample_fn():
      return process.sample_paths(
          times=[0.1, 0.5, 1.0], initial_state=2.0, num_samples=10000)
    samples = tf.xla.experimental.compile(sample_fn)[0]
    log_s = tf.math.log(samples)
    mean = tf.reduce_mean(log_s, axis=0)
    expected_mean = ((process._mu - process._sigma**2 / 2)
                     * np.array([0.1, 0.5, 1.0]) + np.log(2.))
    self.assertAllClose(tf.squeeze(mean), expected_mean, atol=1e-2, rtol=1e-2) 
开发者ID:google,项目名称:tf-quant-finance,代码行数:15,代码来源:geometric_brownian_motion_test.py

示例12: test_variables_receive_gradients

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_mean [as 别名]
def test_variables_receive_gradients(self):
    loc = tf.Variable(1., dtype=tf.float32)
    log_scale = tf.Variable(0., dtype=tf.float32)
    with tf.GradientTape() as tape:
      dist = self.dist_cls(loc=loc, scale=tf.exp(log_scale))
      x = tf.random.normal([20])
      loss = -tf.reduce_mean(dist.log_prob(x))
    grads = tape.gradient(loss, [loc, log_scale])
    self.assertLen(grads, 2)
    self.assertNotIn(None, grads) 
开发者ID:tensorflow,项目名称:compression,代码行数:12,代码来源:uniform_noise_test.py

示例13: test_variables_receive_gradients

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_mean [as 别名]
def test_variables_receive_gradients(self):
    df = deep_factorized.DeepFactorized()
    with tf.GradientTape() as tape:
      x = tf.random.normal([20])
      loss = -tf.reduce_mean(df.log_prob(x))
    grads = tape.gradient(loss, df.trainable_variables)
    self.assertLen(grads, 8)
    self.assertNotIn(None, grads) 
开发者ID:tensorflow,项目名称:compression,代码行数:10,代码来源:deep_factorized_test.py

示例14: call

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_mean [as 别名]
def call(self, inputs, lengths, training):
    seq_mask = tf.sequence_mask(
        lengths, inputs.shape[1], dtype=tf.dtypes.float32)
    forward_outputs = self.forward_rnn(inputs, training=training)
    reversed_inputs = tf.reverse_sequence(inputs, lengths, seq_axis=1)
    backward_outputs = self.backward_rnn(reversed_inputs, training=training)
    backward_outputs = tf.reverse_sequence(
        backward_outputs, lengths, seq_axis=1)
    outputs = tf.concat([forward_outputs, backward_outputs], axis=-1)
    outputs = outputs * tf.expand_dims(seq_mask, -1)

    if self.reduce_states:
      outputs = tf.reduce_mean(outputs, axis=1)

    return outputs 
开发者ID:google-research,项目名称:valan,代码行数:17,代码来源:lingunet.py

示例15: _compute_baseline_loss

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import reduce_mean [as 别名]
def _compute_baseline_loss(advantages, step):
  # Loss for the baseline, summed over the time dimension. Multiply by 0.5 to
  # match the standard update rule:
  #   d(loss) / d(baseline) = advantage
  baseline_cost = .5 * tf.square(advantages)
  tf.summary.scalar(
      'loss/baseline_cost', tf.reduce_mean(baseline_cost), step=step)
  return baseline_cost 
开发者ID:google-research,项目名称:valan,代码行数:10,代码来源:loss_fns.py


注:本文中的tensorflow.compat.v2.reduce_mean方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。