当前位置: 首页>>代码示例>>Python>>正文


Python v1.sqrt方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.sqrt方法的典型用法代码示例。如果您正苦于以下问题:Python v1.sqrt方法的具体用法?Python v1.sqrt怎么用?Python v1.sqrt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.sqrt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _make_update

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sqrt [as 别名]
def _make_update(self):
    mss = []
    gsum = 0.0
    count = 0
    for sum_squared_grads in self._sum_squared_grads:
      ms = tf.sqrt(sum_squared_grads / self._num_squared_grads)
      gsum += tf.reduce_sum(ms)
      count += tf.reduce_sum(tf.ones_like(ms))
      mss.append(ms)
    gsum = gsum / count

    assignments = []
    for grad, var, save, sum_squared_grads, ms in zip(
        self._grads, self._vars, self._saves, self._sum_squared_grads, mss):
      decay_rate = tf.minimum(1.0, self._decay_rate*(ms/gsum))
      delta = (-self._learning_rate*grad / (ms + self._epsilon) +
               decay_rate*(save-var))
      assignments.append(var.assign_add(delta))
    return tf.group(assignments) 
开发者ID:deepmind,项目名称:lamb,代码行数:21,代码来源:dyneval.py

示例2: layer_norm

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sqrt [as 别名]
def layer_norm(x, reduction_indices, epsilon=1e-9, gain=None, bias=None,
               per_element=True, scope=None):
  """DOC."""
  reduction_indices = ensure_list(reduction_indices)
  mean = tf.reduce_mean(x, reduction_indices, keep_dims=True)
  variance = tf.reduce_mean(tf.squared_difference(x, mean),
                            reduction_indices, keep_dims=True)
  normalized = (x - mean) / tf.sqrt(variance + epsilon)
  dtype = x.dtype
  shape = x.get_shape().as_list()
  for i in six.moves.range(len(shape)):
    if i not in reduction_indices or not per_element:
      shape[i] = 1
  with tf.variable_scope(scope or 'layer_norm'):
    if gain is None:
      gain = tf.get_variable('gain', shape=shape, dtype=dtype,
                             initializer=tf.ones_initializer())
    if bias is None:
      bias = tf.get_variable('bias', shape=shape, dtype=dtype,
                             initializer=tf.zeros_initializer())
  return gain*normalized+bias 
开发者ID:deepmind,项目名称:lamb,代码行数:23,代码来源:utils.py

示例3: _dist_to_opt

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sqrt [as 别名]
def _dist_to_opt(self):
    """Distance to optimum.

    Returns:
      D_t ops
    """
    dist_to_opt_ops = []
    # Running average of the norm of gradient
    self._grad_norm = tf.sqrt(self._grad_norm_squared)
    avg_op = self._moving_averager.apply([self._grad_norm,])
    dist_to_opt_ops.append(avg_op)
    with tf.control_dependencies([avg_op]):
      self._grad_norm_avg = self._moving_averager.average(self._grad_norm)
      # Single iteration distance estimation, note here
      # self._grad_norm_avg is per variable
      self._d_t = self._grad_norm_avg / self._grad_norm_squared_avg
    # Running average of distance
    avg_op = self._moving_averager.apply([self._d_t])
    dist_to_opt_ops.append(avg_op)
    with tf.control_dependencies([avg_op]):
      self._dist_to_opt_avg = tf.identity(
          self._moving_averager.average(self._d_t))
      if self._sparsity_debias:
        self._dist_to_opt_avg /= tf.sqrt(self._sparsity_avg)
    return dist_to_opt_ops  # D_t 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:27,代码来源:yellowfin.py

示例4: _grad_sparsity

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sqrt [as 别名]
def _grad_sparsity(self):
    """Gradient sparsity."""
    # If the sparse minibatch gradient has 10 percent of its entries
    # non-zero, its sparsity is 0.1.
    # The norm of dense gradient averaged from full dataset
    # are roughly estimated norm of minibatch
    # sparse gradient norm * sqrt(sparsity)
    # An extension maybe only correct the sparse blob.
    non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad])
    all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad])
    self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype)
    self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype)
    avg_op = self._moving_averager.apply([self._sparsity,])
    with tf.control_dependencies([avg_op]):
      self._sparsity_avg = self._moving_averager.average(self._sparsity)
    return avg_op 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:18,代码来源:yellowfin.py

示例5: xception_exit

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sqrt [as 别名]
def xception_exit(inputs):
  """Xception exit flow."""
  with tf.variable_scope("xception_exit"):
    x = inputs
    x_shape = x.get_shape().as_list()
    if x_shape[1] is None or x_shape[2] is None:
      length_float = tf.to_float(tf.shape(x)[1])
      length_float *= tf.to_float(tf.shape(x)[2])
      spatial_dim_float = tf.sqrt(length_float)
      spatial_dim = tf.to_int32(spatial_dim_float)
      x_depth = x_shape[3]
      x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
    elif x_shape[1] != x_shape[2]:
      spatial_dim = int(math.sqrt(float(x_shape[1] * x_shape[2])))
      if spatial_dim * spatial_dim != x_shape[1] * x_shape[2]:
        raise ValueError("Assumed inputs were square-able but they were "
                         "not. Shape: %s" % x_shape)
      x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])

    x = common_layers.conv_block_downsample(x, (3, 3), (2, 2), "SAME")
    return tf.nn.relu(x) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:23,代码来源:xception.py

示例6: _data_dep_init

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sqrt [as 别名]
def _data_dep_init(self, inputs):
    """Data dependent initialization for eager execution."""

    with tf.variable_scope("data_dep_init"):
      # Generate data dependent init values
      activation = self.layer.activation
      self.layer.activation = None
      x_init = self.layer.call(inputs)
      m_init, v_init = tf.moments(x_init, self.norm_axes)
      scale_init = 1. / tf.sqrt(v_init + 1e-10)

    # Assign data dependent init values
    self.layer.g = self.layer.g * scale_init
    self.layer.bias = (-m_init * scale_init)
    self.layer.activation = activation
    self.initialized = True 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:18,代码来源:common_layers.py

示例7: dense_weightnorm

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sqrt [as 别名]
def dense_weightnorm(
    name, x, n_out, x_mask, init_scale, init, dtype=tf.float32):
  """Dense layer with weight normalization."""
  n_in = common_layers.shape_list(x)[2]
  eps = tf.keras.backend.epsilon()
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    v = tf.get_variable(
        "v", [n_in, n_out], dtype,
        initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
    v = v / tf.norm(v, axis=0, keepdims=True)
    t = tf.matmul(x, v)  # [B, L, n_out]
    mean, var = moments_over_bl(t, x_mask)
    g_init = init_scale / (tf.sqrt(var) + eps)
    g = get_variable_ddi(
        "g", [n_out], g_init, init,
        initializer=tf.zeros_initializer, dtype=dtype, trainable=True)
    b = get_variable_ddi(
        "b", [n_out], -mean*g_init, init,
        initializer=tf.zeros_initializer, dtype=dtype, trainable=True)
    w = g * v
    y = tf.matmul(x, w) + b
    tf.summary.histogram("_g", g)
    return y 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:25,代码来源:transformer_glow_layers_ops.py

示例8: test_group_lasso_conv3d

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sqrt [as 别名]
def test_group_lasso_conv3d(self):
    shape = [3, 3, 3]
    video = tf.zeros([2, 3, 3, 3, 1])
    net = slim.conv3d(
        video,
        5,
        shape,
        padding='VALID',
        weights_initializer=tf.glorot_normal_initializer(),
        scope='vconv1')
    conv3d_op = tf.get_default_graph().get_operation_by_name('vconv1/Conv3D')
    conv3d_weights = conv3d_op.inputs[1]

    threshold = 0.09
    flop_reg = flop_regularizer.GroupLassoFlopsRegularizer([net.op],
                                                           threshold=threshold)
    norm = tf.sqrt(tf.reduce_mean(tf.square(conv3d_weights), [0, 1, 2, 3]))
    alive = tf.reduce_sum(tf.cast(norm > threshold, tf.float32))
    with self.session():
      flop_coeff = 2 * shape[0] * shape[1] * shape[2]
      tf.compat.v1.global_variables_initializer().run()
      self.assertAllClose(flop_reg.get_cost(), flop_coeff * alive)
      self.assertAllClose(flop_reg.get_regularization_term(),
                          flop_coeff * tf.reduce_sum(norm)) 
开发者ID:google-research,项目名称:morph-net,代码行数:26,代码来源:flop_regularizer_test.py

示例9: __init__

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sqrt [as 别名]
def __init__(self, regularizers_to_group, threshold=DEFAULT_THRESHOLD):
    """Creates an instance.

    Args:
      regularizers_to_group: A list of generic_regularizers.OpRegularizer
        objects.Their regularization_vector (alive_vector) are expected to be of
        the same length.
      threshold: A float. An group of activations will be considered alive if
        its L2 norm is greater than `threshold`.

    Raises:
      ValueError: regularizers_to_group is not of length at least 2.
    """
    if len(regularizers_to_group) < 2:
      raise ValueError('Groups must be of at least size 2.')
    self._regularization_vector = tf.sqrt(
        tf.add_n([
            lazy_square(r.regularization_vector)
            for r in regularizers_to_group
        ]))
    self._alive_vector = self._regularization_vector > threshold 
开发者ID:google-research,项目名称:morph-net,代码行数:23,代码来源:grouping_regularizers.py

示例10: lazy_square

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sqrt [as 别名]
def lazy_square(tensor):
  """Computes the square of a tensor in a lazy way.

  This function is lazy in the following sense, for:
    tensor = tf.sqrt(input)
  will return input (and not tf.square(tensor)).

  Args:
    tensor: A `Tensor` of floats to compute the square of.

  Returns:
    The square of the input tensor.
  """
  if tensor.op.type == 'Sqrt':
    return tensor.op.inputs[0]
  else:
    return tf.square(tensor) 
开发者ID:google-research,项目名称:morph-net,代码行数:19,代码来源:grouping_regularizers.py

示例11: _apply_gradients

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sqrt [as 别名]
def _apply_gradients(self, grads, x, optim_state):
    """Applies gradients."""
    lr = self._lr_fn(optim_state.t)
    new_optim_state = self._State(
        t=optim_state.t + 1,
        m=[None] * len(x),
        u=[None] * len(x))
    t = tf.cast(new_optim_state.t, tf.float32)
    new_x = [None] * len(x)
    for i in range(len(x)):
      g = grads[i]
      m_old = optim_state.m[i]
      u_old = optim_state.u[i]
      new_optim_state.m[i] = self._beta1 * m_old + (1. - self._beta1) * g
      new_optim_state.u[i] = self._beta2 * u_old + (1. - self._beta2) * g * g
      m_hat = new_optim_state.m[i] / (1. - tf.pow(self._beta1, t))
      u_hat = new_optim_state.u[i] / (1. - tf.pow(self._beta2, t))
      new_x[i] = x[i] - lr * m_hat / (tf.sqrt(u_hat) + self._epsilon)
    return new_x, new_optim_state 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:21,代码来源:attacks.py

示例12: layer_norm

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sqrt [as 别名]
def layer_norm(layer_inputs, hidden_size):
  """Implements layer norm from [Ba et al. 2016] Layer Normalization.

  See eqn. 4 in (https://arxiv.org/pdf/1607.06450.pdf).

  Args:
    layer_inputs (tensor): The inputs to the layer.
      shape <float32>[batch_size, hidden_size]
    hidden_size (int): Dimensionality of the hidden layer.

  Returns:
    normalized (tensor): layer_inputs, normalized over all the hidden units in
      the layer.
      shape <float32>[batch_size, hidden_size]
  """

  mean, var = tf.nn.moments(layer_inputs, [1], keep_dims=True)
  with tf.variable_scope("layernorm", reuse=tf.AUTO_REUSE):
    gain = tf.get_variable(
        "gain", shape=[hidden_size], initializer=tf.constant_initializer(1))
    bias = tf.get_variable(
        "bias", shape=[hidden_size], initializer=tf.constant_initializer(0))

  normalized = gain * (layer_inputs - mean) / tf.sqrt(var + _EPSILON) + bias
  return normalized 
开发者ID:google-research,项目名称:language,代码行数:27,代码来源:model_utils.py

示例13: normalized_mean_square_error

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sqrt [as 别名]
def normalized_mean_square_error(output, target):
    """Return the TensorFlow expression of normalized mean-squre-error of two distributions.

    Parameters
    ----------
    output : 2D or 4D tensor.
    target : 2D or 4D tensor.
    """
    with tf.name_scope("mean_squared_error_loss"):
        if output.get_shape().ndims == 2:   # [batch_size, n_feature]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=1))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=1))
        elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1,2,3]))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1,2,3]))
        nmse = tf.reduce_mean(nmse_a / nmse_b)
    return nmse 
开发者ID:ravisvi,项目名称:super-resolution-videos,代码行数:19,代码来源:cost.py

示例14: cosine_similarity

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sqrt [as 别名]
def cosine_similarity(v1, v2):
    """Cosine similarity [-1, 1], `wiki <https://en.wikipedia.org/wiki/Cosine_similarity>`_.

    Parameters
    -----------
    v1, v2 : tensor of [batch_size, n_feature], with the same number of features.

    Returns
    -----------
    a tensor of [batch_size, ]
    """
    try: ## TF1.0
        cost = tf.reduce_sum(tf.multiply(v1, v2), 1) / (tf.sqrt(tf.reduce_sum(tf.multiply(v1, v1), 1)) * tf.sqrt(tf.reduce_sum(tf.multiply(v2, v2), 1)))
    except: ## TF0.12
        cost = tf.reduce_sum(tf.mul(v1, v2), reduction_indices=1) / (tf.sqrt(tf.reduce_sum(tf.mul(v1, v1), reduction_indices=1)) * tf.sqrt(tf.reduce_sum(tf.mul(v2, v2), reduction_indices=1)))
    return cost


## Regularization Functions 
开发者ID:ravisvi,项目名称:super-resolution-videos,代码行数:21,代码来源:cost.py

示例15: _ensure_keep_mask

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import sqrt [as 别名]
def _ensure_keep_mask(self, x):
    if self._keep_mask is None or not self._share_mask:
      shape = tf.shape(x)
      # Calculate the stddev for the normal distribution that
      # matches the stddev of the bernoulli with p=keep_prob.
      stddev = tf.sqrt((1 - self._keep_prob) / self._keep_prob)
      self._keep_mask = tf.random_normal(shape, mean=1.0, stddev=stddev,
                                         dtype=x.dtype)
      self._keep_mask.set_shape(x.get_shape())
    return self._keep_mask 
开发者ID:deepmind,项目名称:lamb,代码行数:12,代码来源:dropout.py


注:本文中的tensorflow.compat.v1.sqrt方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。