当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.norm函数代码示例

本文整理汇总了Python中tensorflow.norm函数的典型用法代码示例。如果您正苦于以下问题:Python norm函数的具体用法?Python norm怎么用?Python norm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了norm函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: cosineface_losses

def cosineface_losses(embedding, labels, out_num, w_init=None, s=30., m=0.4):
    '''
    :param embedding: the input embedding vectors
    :param labels:  the input labels, the shape should be eg: (batch_size, 1)
    :param s: scalar value, default is 30
    :param out_num: output class num
    :param m: the margin value, default is 0.4
    :return: the final cacualted output, this output is send into the tf.nn.softmax directly
    '''
    with tf.variable_scope('cosineface_loss'):
        # inputs and weights norm
        embedding_norm = tf.norm(embedding, axis=1, keep_dims=True)
        embedding = tf.div(embedding, embedding_norm, name='norm_embedding')
        weights = tf.get_variable(name='embedding_weights', shape=(embedding.get_shape().as_list()[-1], out_num),
                                  initializer=w_init, dtype=tf.float32)
        weights_norm = tf.norm(weights, axis=0, keep_dims=True)
        weights = tf.div(weights, weights_norm, name='norm_weights')
        # cos_theta - m
        cos_t = tf.matmul(embedding, weights, name='cos_t')
        cos_t_m = tf.subtract(cos_t, m, name='cos_t_m')

        mask = tf.one_hot(labels, depth=out_num, name='one_hot_mask')
        inv_mask = tf.subtract(1., mask, name='inverse_mask')

        output = tf.add(s * tf.multiply(cos_t, inv_mask), s * tf.multiply(cos_t_m, mask), name='cosineface_loss_output')
    return output
开发者ID:xy694942097,项目名称:InsightFace_TF,代码行数:26,代码来源:face_losses.py

示例2: tf_summary

 def tf_summary(self):
     tf.summary.scalar('cost', self.cost)
     tf.summary.scalar('w_fnorm', tf.norm(self.W, ord='euclidean', axis=[-2,-1]))   # Frobenius Norm
     tf.summary.scalar('b_1norm', tf.norm(self.b, ord=1))
     tf.summary.scalar('b_2norm', tf.norm(self.b, ord=2))
     self.summary = tf.summary.merge_all()   # for saving in the epoch/iteration
     self.sw = tf.summary.FileWriter(self.result_dir, self.sess.graph)
开发者ID:jamescfli,项目名称:PythonTest,代码行数:7,代码来源:make_logistic_regression_model.py

示例3: rothk_penalty

    def rothk_penalty(self, d_real, d_fake):
        config = self.config
        g_sample = self.gan.uniform_sample
        x = self.gan.inputs.x
        gradx = tf.gradients(d_real, [x])[0]
        gradg = tf.gradients(d_fake, [g_sample])[0]
        gradx = tf.reshape(gradx, [self.ops.shape(gradx)[0], -1])
        gradg = tf.reshape(gradg, [self.ops.shape(gradg)[0], -1])
        gradx_norm = tf.norm(gradx, axis=1, keep_dims=True)
        gradg_norm = tf.norm(gradg, axis=1, keep_dims=True)
        if int(gradx_norm.get_shape()[0]) != int(d_real.get_shape()[0]):
            print("Condensing along batch for rothk")
            gradx_norm = tf.reduce_mean(gradx_norm, axis=0)
            gradg_norm = tf.reduce_mean(gradg_norm, axis=0)
        gradx = tf.square(gradx_norm) * tf.square(1-tf.nn.sigmoid(d_real))
        gradg = tf.square(gradg_norm) * tf.square(tf.nn.sigmoid(d_fake))
        loss = gradx + gradg
        loss *= config.rothk_lambda or 1
        if config.rothk_decay:
            decay_function = config.decay_function or tf.train.exponential_decay
            decay_steps = config.decay_steps or 50000
            decay_rate = config.decay_rate or 0.9
            decay_staircase = config.decay_staircase or False
            global_step = tf.train.get_global_step()
            loss = decay_function(loss, global_step, decay_steps, decay_rate, decay_staircase)

        return loss
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:27,代码来源:base_loss.py

示例4: p_norm

def p_norm(tensor,order):
    if type(order) in [int,float]:
        return tf.norm(tensor,ord=order)
    elif type(order) in [list,tuple]:
        return [tf.norm(tensor,ord=order_item) for order_item in order]
    else:
        raise ValueError('Unrecognized order of p_norm: %s'%str(order))
开发者ID:liuchen11,项目名称:SSDForRNN,代码行数:7,代码来源:norm.py

示例5: find_best_k

def find_best_k(X, Z):
    best_k = 1
    best_valid_loss = float("inf")
    for k in [1, 3, 5, 50]:
        sess = tf.InteractiveSession()

        dist = calculate_euclidean_distance(X, Z)
        # print(sess.run(dist, feed_dict={X: trainData, Z: testData}))
        r = calculate_responsibilities(dist, k=k)
        prediction = tf.matmul(r, casted_train_target)

        train_losses = tf.norm(trainTarget - prediction)
        valid_losses = tf.norm(validTarget - prediction)
        valid_losses = sess.run(valid_losses, feed_dict={X: trainData, Z: validData})
        test_losses = tf.norm(testTarget - prediction)
        print("Training/Validation/Testing loss for k={:d} is {:f}/{:f}/{:f}"
              .format(k, sess.run(train_losses, feed_dict={X: trainData, Z: trainData}),
                      valid_losses,
                      sess.run(test_losses, feed_dict={X: trainData, Z: testData})))

        if valid_losses < best_valid_loss:
            best_k = k
            best_valid_loss = valid_losses

    return best_k, best_valid_loss
开发者ID:mchenchen,项目名称:Course-Work,代码行数:25,代码来源:a1_2.py

示例6: _l1_loss

 def _l1_loss(self, hparams):
     l1_loss = tf.zeros([1], dtype=tf.float32)
     # embedding_layer l2 loss
     for param in self.embed_params:
         l1_loss = tf.add(l1_loss, tf.multiply(hparams.embed_l1, tf.norm(param, ord=1)))
     params = self.layer_params
     for param in params:
         l1_loss = tf.add(l1_loss, tf.multiply(hparams.layer_l1, tf.norm(param, ord=1)))
     return l1_loss
开发者ID:zeroToAll,项目名称:tensorflow_practice,代码行数:9,代码来源:base_model.py

示例7: apply_gradients

  def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    var_list = [ v for _,v in grads_and_vars]
    with ops.init_scope():
        zt = [self._get_or_make_slot(v, v, "zt", self._name) for _,v in grads_and_vars]
        slots_list = []
        for name in self.optimizer.get_slot_names():
            for var in self.optimizer.variables():
                self._get_or_make_slot(var, var, "zt", "zt")
    self._prepare()

    def _name(post, s):
        ss = s.split(":")
        return ss[0] + "_" + post + "_dontsave"
    zt = [self.get_slot(v, "zt") for _,v in grads_and_vars]
    xt = [tf.Variable(v, name=_name("gigaxt",v.name)) for _,v in grads_and_vars]
    tmp = [tf.Variable(v, name=_name("gigatmp",v.name)) for _,v in grads_and_vars]
    xslots_list = []
    zslots_list = []
    tmpslots_list = []
    slots_vars = []
    for name in self.optimizer.get_slot_names():
        for var in self.optimizer.variables():
            slots_vars += [var]
            xslots_list.append(tf.Variable(var))
            zslots_list.append(self._get_or_make_slot(var, var, "zt", "zt"))
            tmpslots_list.append(tf.Variable(var, name=_name("gigaslottmp", var.name)))


    restored_vars = var_list + slots_vars
    zt_vars = zt + zslots_list
    xt_vars = xt + xslots_list
    tmp_vars = tmp + tmpslots_list
    all_grads = [ g for g, _ in grads_and_vars ]
    # store variables for resetting

    op1 = tf.group(*[tf.assign(w, v) for w,v in zip(tmp_vars, restored_vars)]) # store tmp_vars

    with tf.get_default_graph().control_dependencies([op1]):
        op2 = self.optimizer.apply_gradients(grads_and_vars.copy(), global_step=global_step, name=name)
        with tf.get_default_graph().control_dependencies([op2]):
            op3 = tf.group(*[tf.assign(w, v) for w,v in zip(xt_vars, restored_vars)]) # store xt^+1 in xt_vars
            with tf.get_default_graph().control_dependencies([op3]):
                op4 = tf.group(*[tf.assign(w, v) for w,v in zip(restored_vars, zt_vars)]) # restore vars to zt (different weights)
                with tf.get_default_graph().control_dependencies([op4]):
                    op5 = self.optimizer2.apply_gradients(grads_and_vars.copy(), global_step=global_step, name=name) # zt+1
                    with tf.get_default_graph().control_dependencies([op5]):
                        zt1_xt1 = [_restored_vars - _xt1_vars for _restored_vars, _xt1_vars in zip(restored_vars, xt_vars)]
                        St1 = [tf.minimum(1.0, tf.norm(_zt1_vars-_zt_vars) / tf.norm(_zt1_xt1)) for _zt1_vars, _zt_vars, _zt1_xt1 in zip(restored_vars, zt_vars, zt1_xt1)]
                        self.gan.add_metric('st1',tf.reduce_mean(tf.add_n(St1)/len(St1)))
                        #self.gan.add_metric('xzt1',tf.norm(xt_vars[0]-zt_vars[0]))
                        nextw = [_xt_t1 + _St1 * _zt1_xt1 for _xt_t1, _St1, _zt1_xt1 in zip(xt_vars, St1, zt1_xt1)]
                        op6 = tf.group(*[tf.assign(w, v) for w,v in zip(zt_vars, restored_vars)]) # set zt+1
                        with tf.get_default_graph().control_dependencies([op6]):
                            op7 = tf.group(*[tf.assign(w, v) for w,v in zip(restored_vars, nextw)]) # set xt+1
                            with tf.get_default_graph().control_dependencies([op7]):
                                return tf.no_op()
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:56,代码来源:giga_wolf_optimizer.py

示例8: s_norm

def s_norm(tensor,order):
    s,U,V=tf.svd(tensor,full_matrices=False)
    result=None
    if type(order) in [int,float]:
        result=tf.norm(s,ord=order)
    elif type(order) in [list,tuple]:
        result=[tf.norm(s,ord=order_item) for order_item in order]
    else:
        raise ValueError('Unrecognized order of s_norm: %s'%str(order))
    return s,result
开发者ID:liuchen11,项目名称:SSDForRNN,代码行数:10,代码来源:norm.py

示例9: __tensor_norm__

 def __tensor_norm__(self,tensor,order):
     if order in ['Si']:           # Schatten inf norm
         s,U,V=tf.svd(tensor,full_matrices=False)
         return tf.norm(s,ord=np.inf)
     elif order[0]=='S':           # Schatten norm
         s,U,V=tf.svd(tensor,full_matrices=False)
         sub_order=int(order[1:])
         return tf.norm(s,ord=sub_order)
     else:
         sub_order=int(order)
         return tf.norm(tensor,ord=sub_order)
开发者ID:liuchen11,项目名称:SSDForRNN,代码行数:11,代码来源:RNNs.py

示例10: body

  def body(self, features):
    hp = self.hparams
    # pylint: disable=eval-used
    if hp.image_input_type == "image":
      image_feat = vqa_layers.image_embedding(
          features["inputs"],
          model_fn=eval(hp.image_model_fn),
          trainable=hp.train_resnet,
          is_training=hp.mode == tf.estimator.ModeKeys.TRAIN)
    else:
      image_feat = features["inputs"]

    image_feat = common_layers.flatten4d3d(image_feat)
    image_feat = common_layers.dense(image_feat, hp.hidden_size)
    utils.collect_named_outputs("norms", "image_feat_after_proj",
                                tf.norm(image_feat, axis=-1))

    question = common_layers.flatten4d3d(features["question"])
    utils.collect_named_outputs("norms", "question_embedding",
                                tf.norm(question, axis=-1))
    (encoder_input, encoder_self_attention_bias,
     encoder_decoder_attention_bias) = prepare_image_question_encoder(
         image_feat, question, hp)

    encoder_input = tf.nn.dropout(
        encoder_input, keep_prob=1.-hp.layer_prepostprocess_dropout)

    encoder_output, _ = recurrent_transformer_decoder(
        encoder_input, None, encoder_self_attention_bias, None,
        hp, name="encoder")
    utils.collect_named_outputs(
        "norms", "encoder_output", tf.norm(encoder_output, axis=-1))

    # scale query by sqrt(hidden_size)
    query = tf.get_variable("query", [hp.hidden_size]) * hp.hidden_size **0.5
    query = tf.expand_dims(tf.expand_dims(query, axis=0), axis=0)
    batch_size = common_layers.shape_list(encoder_input)[0]
    query = tf.tile(query, [batch_size, 1, 1])
    query = tf.nn.dropout(
        query, keep_prob=1.-hp.layer_prepostprocess_dropout)

    decoder_output, _ = recurrent_transformer_decoder(
        query, encoder_output, None, encoder_decoder_attention_bias,
        hp, name="decoder")
    utils.collect_named_outputs("norms", "decoder_output",
                                tf.norm(decoder_output, axis=-1))

    norm_tensors = utils.convert_collection_to_dict("norms")
    vqa_layers.summarize_tensors(norm_tensors, tag="norms/")

    # Expand dimension 1 and 2
    return tf.expand_dims(decoder_output, axis=1)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:52,代码来源:vqa_recurrent_self_attention.py

示例11: image_encoder

def image_encoder(image_feat,
                  hparams,
                  name="image_encoder",
                  save_weights_to=None,
                  make_image_summary=True):
  """A stack of self attention layers."""

  x = image_feat
  image_hidden_size = hparams.image_hidden_size or hparams.hidden_size
  image_filter_size = hparams.image_filter_size or hparams.filter_size
  with tf.variable_scope(name):
    for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers):
      with tf.variable_scope("layer_%d" % layer):
        with tf.variable_scope("self_attention"):
          y = vqa_layers.multihead_attention(
              common_layers.layer_preprocess(x, hparams),
              None,
              None,
              hparams.attention_key_channels or image_hidden_size,
              hparams.attention_value_channels or image_hidden_size,
              image_hidden_size,
              hparams.num_heads,
              hparams.attention_dropout,
              attention_type=hparams.image_self_attention_type,
              save_weights_to=save_weights_to,
              make_image_summary=make_image_summary,
              scale_dotproduct=hparams.scale_dotproduct,
          )
          utils.collect_named_outputs(
              "norms", "image_feat_self_attention_%d"%(layer),
              tf.norm(y, axis=-1))
          x = common_layers.layer_postprocess(x, y, hparams)
          utils.collect_named_outputs(
              "norms", "image_feat_self_attention_postprocess_%d"%(layer),
              tf.norm(x, axis=-1))
        with tf.variable_scope("ffn"):
          y = common_layers.dense_relu_dense(
              common_layers.layer_preprocess(x, hparams),
              image_filter_size,
              image_hidden_size,
              dropout=hparams.relu_dropout,
          )
          utils.collect_named_outputs(
              "norms", "image_feat_ffn_%d"%(layer), tf.norm(y, axis=-1))
          x = common_layers.layer_postprocess(x, y, hparams)
          utils.collect_named_outputs(
              "norms", "image_feat_ffn_postprocess_%d"%(layer),
              tf.norm(x, axis=-1))
    # if normalization is done in layer_preprocess, then it should also be done
    # on the output, since the output can grow very large, being the sum of
    # a whole stack of unnormalized layer outputs.
    return common_layers.layer_preprocess(x, hparams)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:52,代码来源:vqa_self_attention.py

示例12: project_gradient_layer

 def project_gradient_layer(gs):
     if self.config.norm == 'softmax':
         return tf.nn.softmax(gs)
     elif self.config.norm == 'euclidean':
         return gs / (tf.sqrt(tf.reduce_sum(tf.square(gs)))+1e-8)
     elif self.config.norm == 'inf':
         return gs / (tf.norm(gs, ord=np.inf)+1e-8)
     elif self.config.norm == 'max':
         return gs / (tf.reduce_max(tf.abs(gs))+1e-8)
     elif self.config.norm == False:
         return gs
     else:
         return gs / (tf.norm(gs, ord=self.config.norm)+1e-8)
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:13,代码来源:gradient_magnitude_optimizer.py

示例13: nearest

def nearest(x, means, hparams):
  """Find the nearest means to elements in x."""
  x, means = tf.stop_gradient(x), tf.stop_gradient(means)
  x_flat = tf.reshape(x, [-1, hparams.hidden_size])
  x_norm = tf.norm(x_flat, axis=-1, keep_dims=True)
  means_norm = tf.norm(means, axis=-1, keep_dims=True)
  dist = x_norm + tf.transpose(means_norm) - 2 * tf.matmul(x_flat, means,
                                                           transpose_b=True)
  _, nearest_idx = tf.nn.top_k(- dist, k=1)
  nearest_hot = tf.one_hot(tf.squeeze(nearest_idx, axis=1), hparams.v_size)
  shape = common_layers.shape_list(x)
  shape[-1] = hparams.v_size
  nearest_hot = tf.reshape(nearest_hot, shape=shape)
  return tf.stop_gradient(nearest_hot)
开发者ID:AranKomat,项目名称:tensor2tensor,代码行数:14,代码来源:transformer_vae.py

示例14: _cross_l_loss

 def _cross_l_loss(self):
     """Construct L1-norm and L2-norm on cross network parameters for loss function.
     Returns:
         obj: Regular loss value on cross network parameters.
     """
     cross_l_loss = tf.zeros([1], dtype=tf.float32)
     for param in self.cross_params:
         cross_l_loss = tf.add(
             cross_l_loss, tf.multiply(self.hparams.cross_l1, tf.norm(param, ord=1))
         )
         cross_l_loss = tf.add(
             cross_l_loss, tf.multiply(self.hparams.cross_l2, tf.norm(param, ord=2))
         )
     return cross_l_loss
开发者ID:David-Li-L,项目名称:recommenders,代码行数:14,代码来源:base_model.py

示例15: _PerCentroidNormalization

  def _PerCentroidNormalization(self, unnormalized_vector):
    """Perform per-centroid normalization.

    Args:
      unnormalized_vector: [KxD] float tensor.

    Returns:
      per_centroid_normalized_vector: [KxD] float tensor, with normalized
        aggregated residuals. Some residuals may be all-zero.
      visual_words: Int tensor containing indices of visual words which are
        present for the set of features.
    """
    unnormalized_vector = tf.reshape(
        unnormalized_vector,
        [self._codebook_size, self._feature_dimensionality])
    per_centroid_norms = tf.norm(unnormalized_vector, axis=1)

    visual_words = tf.reshape(
        tf.where(
            tf.greater(per_centroid_norms, tf.sqrt(_NORM_SQUARED_TOLERANCE))),
        [-1])

    per_centroid_normalized_vector = tf.math.l2_normalize(
        unnormalized_vector, axis=1, epsilon=_NORM_SQUARED_TOLERANCE)

    return per_centroid_normalized_vector, visual_words
开发者ID:rder96,项目名称:models,代码行数:26,代码来源:feature_aggregation_extractor.py


注:本文中的tensorflow.norm函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。