当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.norm方法代码示例

本文整理汇总了Python中tensorflow.norm方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.norm方法的具体用法?Python tensorflow.norm怎么用?Python tensorflow.norm使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.norm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: perturb

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import norm [as 别名]
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:20,代码来源:pgd_cw_whitebox.py

示例2: get_train_op

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import norm [as 别名]
def get_train_op(self, loss, clip_factor, clip, step):
        import tensorflow as tf
        optimizer = tf.train.AdamOptimizer(learning_rate=step)
        gradients, variables = zip(*optimizer.compute_gradients(loss))
        filtered_grads = []
        filtered_vars = []
        for i in range(len(gradients)):
            if gradients[i] is not None:
                filtered_grads.append(gradients[i])
                filtered_vars.append(variables[i])
        gradients = filtered_grads
        variables = filtered_vars
        if clip:
            gradients, _ = tf.clip_by_global_norm(gradients, clip_factor)
        grad_norm = tf.reduce_sum([tf.norm(grad) for grad in gradients])
        train_op = optimizer.apply_gradients(zip(gradients, variables))
        return optimizer, train_op, grad_norm 
开发者ID:jet-black,项目名称:ppo-lstm-parallel,代码行数:19,代码来源:agent.py

示例3: LandmarkImageLayer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import norm [as 别名]
def LandmarkImageLayer(Landmarks):
    
    def draw_landmarks(L):
        def draw_landmarks_helper(Point):
            intLandmark = tf.to_int32(Point)
            locations = Offsets + intLandmark
            dxdy = Point - tf.to_float(intLandmark)
            offsetsSubPix = tf.to_float(Offsets) - dxdy
            vals = 1 / (1 + tf.norm(offsetsSubPix, axis=2))
            img = tf.scatter_nd(locations, vals, shape=(IMGSIZE, IMGSIZE))
            return img
        Landmark = tf.reverse(tf.reshape(L, [-1,2]), [-1])
        # Landmark = tf.reshape(L, (-1, 2))
        Landmark = tf.clip_by_value(Landmark, HalfSize, IMGSIZE - 1 - HalfSize)
        # Ret = 1 / (tf.norm(tf.map_fn(DoIn,Landmarks),axis = 3) + 1)
        Ret = tf.map_fn(draw_landmarks_helper, Landmark)
        Ret = tf.reshape(tf.reduce_max(Ret, axis=0), [IMGSIZE, IMGSIZE, 1])
        return Ret
    return tf.map_fn(draw_landmarks, Landmarks) 
开发者ID:junhwanjang,项目名称:face_landmark_dnn,代码行数:21,代码来源:layers.py

示例4: layer_norm_compute

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import norm [as 别名]
def layer_norm_compute(x, epsilon, scale, bias, layer_collection=None):
  """Layer norm raw computation."""

  # Save these before they get converted to tensors by the casting below
  params = (scale, bias)

  epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
  mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
  variance = tf.reduce_mean(
      tf.squared_difference(x, mean), axis=[-1], keepdims=True)
  norm_x = (x - mean) * tf.rsqrt(variance + epsilon)

  output = norm_x * scale + bias


  return output 
开发者ID:yyht,项目名称:BERT,代码行数:18,代码来源:common_layers.py

示例5: group_norm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import norm [as 别名]
def group_norm(x, filters=None, num_groups=8, epsilon=1e-5):
  """Group normalization as in https://arxiv.org/abs/1803.08494."""
  x_shape = shape_list(x)
  if filters is None:
    filters = x_shape[-1]
  assert len(x_shape) == 4
  assert filters % num_groups == 0
  # Prepare variables.
  scale = tf.get_variable(
      "group_norm_scale", [filters], initializer=tf.ones_initializer())
  bias = tf.get_variable(
      "group_norm_bias", [filters], initializer=tf.zeros_initializer())
  epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
  # Reshape and compute group norm.
  x = tf.reshape(x, x_shape[:-1] + [num_groups, filters // num_groups])
  # Calculate mean and variance on heights, width, channels (not groups).
  mean, variance = tf.nn.moments(x, [1, 2, 4], keep_dims=True)
  norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
  return tf.reshape(norm_x, x_shape) * scale + bias 
开发者ID:yyht,项目名称:BERT,代码行数:21,代码来源:common_layers.py

示例6: _add_grads_and_vars_to_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import norm [as 别名]
def _add_grads_and_vars_to_summaries(model_results: ModelResults):
        if model_results.grads_and_vars is not None:
            for grad, var in model_results.grads_and_vars:
                grad_name = ('gradient/' + var.name).replace(':', '_')
                model_utils.add_histogram_summary(grad_name, grad)
                grad_norm = tf.norm(grad)
                grad_norm_name = "gradient_l2_norms/scalar_" + grad_name
                model_utils.add_summary_by_name(grad_norm_name, grad_norm)
            all_grads = list(zip(*model_results.grads_and_vars))[0]
            global_grad_norm = tf.global_norm(all_grads)
            global_norm_name = "_".join(["scalar", "global_gradient_l2_norm"])
            model_utils.add_summary_by_name(global_norm_name, global_grad_norm)

        if model_results.regularization_grads_and_vars is not None:
            for grad, var in model_results.regularization_grads_and_vars:
                grad_name = ('reg_gradient/' + var.name).replace(':', '_')
                model_utils.add_histogram_summary(grad_name, grad) 
开发者ID:audi,项目名称:nucleus7,代码行数:19,代码来源:model_handler.py

示例7: wgan_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import norm [as 别名]
def wgan_loss(x, gz, discriminator, beta=10.0):
  """Improved Wasserstein GAN loss.

  Args:
    x: Batch of real samples.
    gz: Batch of generated samples.
    discriminator: Discriminator function.
    beta: Regualarizer factor.
  Returns:
    d_loss: Discriminator loss.
    g_loss: Generator loss.
  """
  dx = discriminator(x)
  with tf.variable_scope(tf.get_variable_scope(), reuse=True):
    dgz = discriminator(gz)
  batch_size = tf.shape(x)[0]
  alpha = tf.random_uniform([batch_size])
  xhat = x * alpha + gz * (1 - alpha)
  with tf.variable_scope(tf.get_variable_scope(), reuse=True):
    dxhat = discriminator(xhat)
  gnorm = tf.norm(tf.gradients(dxhat, xhat)[0])
  d_loss = -tf.reduce_mean(dx - dgz - beta * tf.square(gnorm - 1))
  g_loss = -tf.reduce_mean(dgz)
  return d_loss, g_loss 
开发者ID:vahidk,项目名称:TensorflowFramework,代码行数:26,代码来源:loss_ops.py

示例8: test_unflatten_batch_to_2d_random

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import norm [as 别名]
def test_unflatten_batch_to_2d_random(self, sizes, max_rows, num_features):
    """Test unflattening with random inputs."""
    max_rows = np.max(sizes) if max_rows is None else max_rows
    output_shape = np.concatenate(
        (np.shape(sizes), (max_rows,), (num_features,)))
    total_rows = np.sum(sizes)
    data = 0.1 + np.random.uniform(size=(total_rows, num_features))

    unflattened = utils.unflatten_2d_to_batch(data, sizes, max_rows)
    flattened = tf.reshape(unflattened, (-1, num_features))
    nonzero_rows = tf.compat.v1.where(tf.norm(tensor=flattened, axis=-1))
    flattened_unpadded = tf.gather(
        params=flattened, indices=tf.squeeze(input=nonzero_rows, axis=-1))

    self.assertAllEqual(tf.shape(input=unflattened), output_shape)
    self.assertAllEqual(flattened_unpadded, data) 
开发者ID:tensorflow,项目名称:graphics,代码行数:18,代码来源:utils_test.py

示例9: test_normalized_random_uniform_initializer_is_normalized

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import norm [as 别名]
def test_normalized_random_uniform_initializer_is_normalized(self):
    """Tests normalized_random_uniform_initializer outputs are normalized."""
    tensor_size = np.random.randint(3)
    tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()

    variable = tf.compat.v1.get_variable(
        "test_variable",
        shape=tensor_shape + [4],
        dtype=tf.float32,
        initializer=quaternion.normalized_random_uniform_initializer(),
        use_resource=False)
    self.evaluate(tf.compat.v1.global_variables_initializer())
    value = self.evaluate(variable)
    norms = np.linalg.norm(value, axis=-1)
    ones = np.ones(tensor_shape)

    self.assertAllClose(norms, ones, rtol=1e-3) 
开发者ID:tensorflow,项目名称:graphics,代码行数:19,代码来源:quaternion_test.py

示例10: generate

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import norm [as 别名]
def generate(self, x, **kwargs):
        """
        Return a tensor that constructs adversarial examples for the given
        input. Generate uses tf.py_func in order to operate over tensors.

        :param x: (required) A tensor with the inputs.
        :param y_target: (required) A tensor with the one-hot target labels.
        :param batch_size: The number of inputs to include in a batch and
                           process simultaneously.
        :param binary_search_steps: The number of times we perform binary
                                    search to find the optimal tradeoff-
                                    constant between norm of the purturbation
                                    and cross-entropy loss of classification.
        :param max_iterations: The maximum number of iterations.
        :param initial_const: The initial tradeoff-constant to use to tune the
                              relative importance of size of the perturbation
                              and cross-entropy loss of the classification.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """
        import tensorflow as tf
        from .attacks_tf import LBFGS_attack
        self.parse_params(**kwargs)

        _, nb_classes = self.get_or_guess_labels(x, kwargs)

        attack = LBFGS_attack(
            self.sess, x, self.model.get_probs(x), self.y_target,
            self.binary_search_steps, self.max_iterations, self.initial_const,
            self.clip_min, self.clip_max, nb_classes, self.batch_size)

        def lbfgs_wrap(x_val, y_val):
            return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype)

        wrap = tf.py_func(lbfgs_wrap, [x, self.y_target], self.tf_dtype)
        wrap.set_shape(x.get_shape())

        return wrap 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:40,代码来源:attacks.py

示例11: parse_params

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import norm [as 别名]
def parse_params(self,
                     layer=None,
                     eps=0.3,
                     eps_iter=0.05,
                     nb_iter=10,
                     ord=np.inf,
                     clip_min=None,
                     clip_max=None,
                     **kwargs):
        """
        Take in a dictionary of parameters and applies attack-specific checks
        before saving them as attributes.

        Attack-specific parameters:

        :param layer: (required str) name of the layer to target.
        :param eps: (required float) maximum distortion of adversarial example
                    compared to original input
        :param eps_iter: (required float) step size for each attack iteration
        :param nb_iter: (required int) Number of attack iterations.
        :param ord: (optional) Order of the norm (mimics Numpy).
                    Possible values: np.inf, 1 or 2.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """

        # Save attack-specific parameters
        self.layer = layer
        self.eps = eps
        self.eps_iter = eps_iter
        self.nb_iter = nb_iter
        self.ord = ord
        self.clip_min = clip_min
        self.clip_max = clip_max

        # Check if order of the norm is acceptable given current implementation
        if self.ord not in [np.inf, 1, 2]:
            raise ValueError("Norm order must be either np.inf, 1, or 2.")

        return True 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:42,代码来源:attacks.py

示例12: triplet_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import norm [as 别名]
def triplet_loss(outputs, inputs, **config):
    distance_p = tf.norm(outputs['descriptor_image'] - outputs['descriptor_p'], axis=1)
    distance_n = tf.norm(outputs['descriptor_image'] - outputs['descriptor_n'], axis=1)
    if config['loss_in']:
        loss = tf.maximum(distance_p + config['triplet_margin'] - distance_n, 0)
        if config['loss_squared']:
            loss = tf.square(loss)
    else:
        dp = tf.square(distance_p) if config['loss_squared'] else distance_p
        dn = tf.square(distance_n) if config['loss_squared'] else distance_n
        loss = dp + tf.maximum(config['triplet_margin'] - dn, 0)
    return [tf.reduce_mean(i) for i in [loss, distance_p, distance_n]] 
开发者ID:ethz-asl,项目名称:hierarchical_loc,代码行数:14,代码来源:layers.py

示例13: rescale

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import norm [as 别名]
def rescale(self, U: List[Tensor], fNonUnit: int) -> List[Tensor]:
        """Puts all variance in the factor `fUpdate`-th factor.

        The method assumes that the norm of all filters is larger than 0."""
        F = len(U)

        # calculathe the scale for each source
        scaleOfSources = tf.ones_like(U[0][..., 0])
        for f in range(F):
            scaleOfSources = scaleOfSources*tf.norm(U[f], axis=-1)

        for f in range(F):
            # determine rescaling constant depending on the factor number
            Uf = U[f]
            normUf = tf.norm(Uf, axis=-1)
            if f == fNonUnit:
                # put all variance in the filters of the fUpdate-th factor
                rescaleConstant = scaleOfSources/normUf
            else:
                # normalize the filters all other factors
                rescaleConstant = 1./normUf

            # rescaled filters
            Uf = Uf*rescaleConstant[..., None]
            U[f] = Uf

        return(U) 
开发者ID:bethgelab,项目名称:decompose,代码行数:29,代码来源:tensorFactorisation.py

示例14: sample

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import norm [as 别名]
def sample(cls, parameters: Dict[str, Tensor], nSamples: Tensor) -> Tensor:
        tau0, tau1 = parameters["tau0"], parameters["tau"]
        tau = tf.matmul(tau0[..., None], tau1[None, ...])
        norm = tf.distributions.Normal(loc=tf.zeros_like(tau),
                                       scale=1./tf.sqrt(tau1))
        r = norm.sample(sample_shape=(nSamples,))
        return(r) 
开发者ID:bethgelab,项目名称:decompose,代码行数:9,代码来源:cenNormalRankOneAlgorithms.py

示例15: pdf

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import norm [as 别名]
def pdf(cls, parameters: Dict[str, Tensor], data: Tensor) -> Tensor:
        tau0, tau1 = parameters["tau0"], parameters["tau1"]
        tau = tf.matmul(tau0[..., None], tau1[None, ...])
        norm = tf.distributions.Normal(loc=tf.zeros_like(tau),
                                       scale=tf.sqrt(1./tau))
        pdf = norm.prob(value=data)
        return(pdf) 
开发者ID:bethgelab,项目名称:decompose,代码行数:9,代码来源:cenNormalRankOneAlgorithms.py


注:本文中的tensorflow.norm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。