当前位置: 首页>>代码示例>>Python>>正文


Python v1.square方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.square方法的典型用法代码示例。如果您正苦于以下问题:Python v1.square方法的具体用法?Python v1.square怎么用?Python v1.square使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.square方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: cv_squared

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import square [as 别名]
def cv_squared(x):
  """The squared coefficient of variation of a sample.

  Useful as a loss to encourage a positive distribution to be more uniform.
  Epsilons added for numerical stability.
  Returns 0 for an empty Tensor.

  Args:
    x: a `Tensor`.

  Returns:
    a `Scalar`.
  """
  epsilon = 1e-10
  float_size = tf.to_float(tf.size(x)) + epsilon
  mean = tf.reduce_sum(x) / float_size
  variance = tf.reduce_sum(tf.squared_difference(x, mean)) / float_size
  return variance / (tf.square(mean) + epsilon) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:20,代码来源:expert_utils.py

示例2: bottleneck

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import square [as 别名]
def bottleneck(self, x):
    hparams = self.hparams
    z_size = hparams.bottleneck_bits
    x_shape = common_layers.shape_list(x)
    with tf.variable_scope("vae"):
      mu = tf.layers.dense(x, z_size, name="mu")
      if hparams.mode != tf.estimator.ModeKeys.TRAIN:
        return mu, 0.0  # No sampling or kl loss on eval.
      log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
      epsilon = tf.random_normal(x_shape[:-1] + [z_size])
      z = mu + tf.exp(log_sigma / 2) * epsilon
      kl = 0.5 * tf.reduce_mean(
          tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
      free_bits = z_size // 4
      kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
    return z, kl_loss * hparams.kl_beta 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:18,代码来源:autoencoders.py

示例3: vae

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import square [as 别名]
def vae(x, z_size, name=None):
  """Simple variational autoencoder without discretization.

  Args:
    x: Input to the discretization bottleneck.
    z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
    name: Name for the bottleneck scope.

  Returns:
    Embedding function, latent, loss, mu and log_simga.
  """
  with tf.variable_scope(name, default_name="vae"):
    mu = tf.layers.dense(x, z_size, name="mu")
    log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
    shape = common_layers.shape_list(x)
    epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])
    z = mu + tf.exp(log_sigma / 2) * epsilon
    kl = 0.5 * tf.reduce_mean(
        tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
    free_bits = z_size // 4
    kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
    return z, kl_loss, mu, log_sigma 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:24,代码来源:discretization.py

示例4: test_group_lasso_conv3d

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import square [as 别名]
def test_group_lasso_conv3d(self):
    shape = [3, 3, 3]
    video = tf.zeros([2, 3, 3, 3, 1])
    net = slim.conv3d(
        video,
        5,
        shape,
        padding='VALID',
        weights_initializer=tf.glorot_normal_initializer(),
        scope='vconv1')
    conv3d_op = tf.get_default_graph().get_operation_by_name('vconv1/Conv3D')
    conv3d_weights = conv3d_op.inputs[1]

    threshold = 0.09
    flop_reg = flop_regularizer.GroupLassoFlopsRegularizer([net.op],
                                                           threshold=threshold)
    norm = tf.sqrt(tf.reduce_mean(tf.square(conv3d_weights), [0, 1, 2, 3]))
    alive = tf.reduce_sum(tf.cast(norm > threshold, tf.float32))
    with self.session():
      flop_coeff = 2 * shape[0] * shape[1] * shape[2]
      tf.compat.v1.global_variables_initializer().run()
      self.assertAllClose(flop_reg.get_cost(), flop_coeff * alive)
      self.assertAllClose(flop_reg.get_regularization_term(),
                          flop_coeff * tf.reduce_sum(norm)) 
开发者ID:google-research,项目名称:morph-net,代码行数:26,代码来源:flop_regularizer_test.py

示例5: lazy_square

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import square [as 别名]
def lazy_square(tensor):
  """Computes the square of a tensor in a lazy way.

  This function is lazy in the following sense, for:
    tensor = tf.sqrt(input)
  will return input (and not tf.square(tensor)).

  Args:
    tensor: A `Tensor` of floats to compute the square of.

  Returns:
    The square of the input tensor.
  """
  if tensor.op.type == 'Sqrt':
    return tensor.op.inputs[0]
  else:
    return tf.square(tensor) 
开发者ID:google-research,项目名称:morph-net,代码行数:19,代码来源:grouping_regularizers.py

示例6: bottleneck

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import square [as 别名]
def bottleneck(self, x):
    z_size = self.hparams.bottleneck_bits
    x_shape = common_layers.shape_list(x)
    with tf.variable_scope('bottleneck', reuse=tf.AUTO_REUSE):
      mu = x[..., :self.hparams.bottleneck_bits]
      if self.hparams.mode != tf.estimator.ModeKeys.TRAIN:
        return mu, 0.0  # No sampling or kl loss on eval.
      log_sigma = x[..., self.hparams.bottleneck_bits:]
      epsilon = tf.random_normal(x_shape[:-1] + [z_size])
      z = mu + tf.exp(log_sigma / 2) * epsilon
      kl = 0.5 * tf.reduce_mean(
          tf.exp(log_sigma) + tf.square(mu) - 1. - log_sigma, axis=-1)
      # This is the 'free bits' trick mentioned in Kingma et al. (2016)
      free_bits = self.hparams.free_bits
      kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
    return z, kl_loss * self.hparams.kl_beta 
开发者ID:magenta,项目名称:magenta,代码行数:18,代码来源:image_vae.py

示例7: _get_cost_function

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import square [as 别名]
def _get_cost_function(self):
        """Compute the cost of the Mittens objective function.

        If self.mittens = 0, this is the same as the cost of GloVe.
        """
        self.weights = tf.placeholder(
            tf.float32, shape=[self.n_words, self.n_words])
        self.log_coincidence = tf.placeholder(
            tf.float32, shape=[self.n_words, self.n_words])
        self.diffs = tf.subtract(self.model, self.log_coincidence)
        cost = tf.reduce_sum(
            0.5 * tf.multiply(self.weights, tf.square(self.diffs)))
        if self.mittens > 0:
            self.mittens = tf.constant(self.mittens, tf.float32)
            cost += self.mittens * tf.reduce_sum(
                tf.multiply(
                    self.has_embedding,
                    self._tf_squared_euclidean(
                        tf.add(self.W, self.C),
                        self.original_embedding)))
        tf.summary.scalar("cost", cost)
        return cost 
开发者ID:roamanalytics,项目名称:mittens,代码行数:24,代码来源:tf_mittens.py

示例8: setUp

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import square [as 别名]
def setUp(self):
        super(PersistentOpEvaluatorTest, self).setUp()

        patch = tf.test.mock.patch(
            "tensorflow.compat.v1.Session", wraps=tf.Session
        )
        patch.start()
        self.addCleanup(patch.stop)

        class Squarer(op_evaluator.PersistentOpEvaluator):
            def __init__(self):
                super(Squarer, self).__init__()
                self._input = None
                self._squarer = None

            def initialize_graph(self):
                self._input = tf.placeholder(tf.int32)
                self._squarer = tf.square(self._input)

            def run(self, xs):  # pylint: disable=arguments-differ
                return self._squarer.eval(feed_dict={self._input: xs})

        self._square = Squarer() 
开发者ID:tensorflow,项目名称:tensorboard,代码行数:25,代码来源:op_evaluator_test.py

示例9: normalized_mean_square_error

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import square [as 别名]
def normalized_mean_square_error(output, target):
    """Return the TensorFlow expression of normalized mean-squre-error of two distributions.

    Parameters
    ----------
    output : 2D or 4D tensor.
    target : 2D or 4D tensor.
    """
    with tf.name_scope("mean_squared_error_loss"):
        if output.get_shape().ndims == 2:   # [batch_size, n_feature]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=1))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=1))
        elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1,2,3]))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1,2,3]))
        nmse = tf.reduce_mean(nmse_a / nmse_b)
    return nmse 
开发者ID:ravisvi,项目名称:super-resolution-videos,代码行数:19,代码来源:cost.py

示例10: _compute_loss

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import square [as 别名]
def _compute_loss(self, prediction_tensor, target_tensor, weights):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the (encoded) predicted locations of objects.
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the regression targets
      weights: a float tensor of shape [batch_size, num_anchors]

    Returns:
      loss: a float tensor of shape [batch_size, num_anchors] tensor
        representing the value of the loss function.
    """
    weighted_diff = (prediction_tensor - target_tensor) * tf.expand_dims(
        weights, 2)
    square_diff = 0.5 * tf.square(weighted_diff)
    return tf.reduce_sum(square_diff, 2) 
开发者ID:tensorflow,项目名称:models,代码行数:20,代码来源:losses.py

示例11: proto_maml_fc_bias

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import square [as 别名]
def proto_maml_fc_bias(self, prototypes, zero_pad_to_max_way=False):
    """Computes the Prototypical MAML fc layer's bias.

    Args:
      prototypes: Tensor of shape [num_classes, embedding_size]
      zero_pad_to_max_way: Whether to zero padd to max num way.

    Returns:
      fc_bias: Tensor of shape [num_classes] or [self.logit_dim]
        when zero_pad_to_max_way is True.
    """
    fc_bias = -tf.square(tf.norm(prototypes, axis=1))
    if zero_pad_to_max_way:
      paddings = [[0, self.logit_dim - tf.shape(fc_bias)[0]]]
      fc_bias = tf.pad(fc_bias, paddings, 'CONSTANT', constant_values=0)
    return fc_bias 
开发者ID:google-research,项目名称:meta-dataset,代码行数:18,代码来源:learner.py

示例12: _make_add_squared_grads

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import square [as 别名]
def _make_add_squared_grads(self):
    assignments = []
    for sum_squared_grads, grads in zip(self._sum_squared_grads, self._grads):
      assignments.append(sum_squared_grads.assign_add(tf.square(grads)))
    return tf.group(assignments + [self._num_squared_grads.assign_add(1)]) 
开发者ID:deepmind,项目名称:lamb,代码行数:7,代码来源:dyneval.py

示例13: _grad_variance

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import square [as 别名]
def _grad_variance(self):
    """Estimate of gradient Variance.

    Returns:
      C_t ops.
    """
    grad_var_ops = []
    tensor_to_avg = []
    for t, g in zip(self._vars, self._grad):
      if isinstance(g, tf.IndexedSlices):
        tensor_to_avg.append(
            tf.reshape(tf.unsorted_segment_sum(g.values,
                                               g.indices,
                                               g.dense_shape[0]),
                       shape=t.get_shape()))
      else:
        tensor_to_avg.append(g)
    avg_op = self._moving_averager.apply(tensor_to_avg)
    grad_var_ops.append(avg_op)
    with tf.control_dependencies([avg_op]):
      self._grad_avg = [self._moving_averager.average(val)
                        for val in tensor_to_avg]
      self._grad_avg_squared = [tf.square(val) for val in self._grad_avg]

    # Compute Variance
    self._grad_var = tf.maximum(
        tf.constant(1e-6, dtype=self._grad_norm_squared_avg.dtype),
        self._grad_norm_squared_avg
        - tf.add_n([tf.reduce_sum(val) for val in self._grad_avg_squared]))
    if self._sparsity_debias:
      self._grad_var *= self._sparsity_avg
    return grad_var_ops  # C_t 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:34,代码来源:yellowfin.py

示例14: reduce_rms

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import square [as 别名]
def reduce_rms(x):
  return tf.sqrt(tf.reduce_mean(tf.square(x))) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:4,代码来源:adafactor.py

示例15: vq_nearest_neighbor

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import square [as 别名]
def vq_nearest_neighbor(x, means,
                        soft_em=False, num_samples=10, temperature=None):
  """Find the nearest element in means to elements in x."""
  bottleneck_size = common_layers.shape_list(means)[0]
  x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
  means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
  scalar_prod = tf.matmul(x, means, transpose_b=True)
  dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
  if soft_em:
    x_means_idx = tf.multinomial(-dist, num_samples=num_samples)
    x_means_hot = tf.one_hot(
        x_means_idx, depth=common_layers.shape_list(means)[0])
    x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
  else:
    if temperature is None:
      x_means_idx = tf.argmax(-dist, axis=-1)
    else:
      x_means_idx = tf.multinomial(- dist / temperature, 1)
      x_means_idx = tf.squeeze(x_means_idx, axis=-1)
    if (common_layers.should_generate_summaries() and
        not common_layers.is_xla_compiled()):
      tf.summary.histogram("means_idx", tf.reshape(x_means_idx, [-1]))
    x_means_hot = tf.one_hot(x_means_idx, bottleneck_size)
  x_means_hot_flat = tf.reshape(x_means_hot, [-1, bottleneck_size])
  x_means = tf.matmul(x_means_hot_flat, means)
  e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means)))
  return x_means_hot, e_loss, dist 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:29,代码来源:discretization.py


注:本文中的tensorflow.compat.v1.square方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。