当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.div方法代码示例

本文整理汇总了Python中tensorflow.div方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.div方法的具体用法?Python tensorflow.div怎么用?Python tensorflow.div使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.div方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: preprocess_image

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import div [as 别名]
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:21,代码来源:lenet_preprocessing.py

示例2: ExpectScaledSize

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import div [as 别名]
def ExpectScaledSize(self, spec, target_shape, factor=1):
    """Tests that the output of the graph of the given spec has target_shape."""
    with tf.Graph().as_default():
      with self.test_session() as sess:
        self.SetupInputs()
        # Only the placeholders are given at construction time.
        vgsl = vgslspecs.VGSLSpecs(self.ph_widths, self.ph_heights, True)
        outputs = vgsl.Build(self.ph_image, spec)
        # Compute the expected output widths from the given scale factor.
        target_widths = tf.div(self.in_widths, factor).eval()
        target_heights = tf.div(self.in_heights, factor).eval()
        # Run with the 'real' data.
        tf.global_variables_initializer().run()
        res_image, res_widths, res_heights = sess.run(
            [outputs, vgsl.GetLengths(2), vgsl.GetLengths(1)],
            feed_dict={self.ph_image: self.in_image,
                       self.ph_widths: self.in_widths,
                       self.ph_heights: self.in_heights})
        self.assertEqual(tuple(res_image.shape), target_shape)
        if target_shape[1] > 1:
          self.assertEqual(tuple(res_heights), tuple(target_heights))
        if target_shape[2] > 1:
          self.assertEqual(tuple(res_widths), tuple(target_widths)) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:25,代码来源:vgslspecs_test.py

示例3: drop_path

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import div [as 别名]
def drop_path(inputs, keep_prob, is_training=True, scope=None):
    """Drops out a whole example hiddenstate with the specified probability.
    """
    with tf.name_scope(scope, 'drop_path', [inputs]):
        net = inputs
        if is_training:
            batch_size = tf.shape(net)[0]
            noise_shape = [batch_size, 1, 1, 1]
            random_tensor = keep_prob
            random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32)
            binary_tensor = tf.floor(random_tensor)
            net = tf.div(net, keep_prob) * binary_tensor
        return net

# =========================================================================== #
# Useful methods
# =========================================================================== # 
开发者ID:huawei-noah,项目名称:ghostnet,代码行数:19,代码来源:utils.py

示例4: normalize_score

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import div [as 别名]
def normalize_score(log_probs, sequence_lengths, length_penalty_weight):
    """Normalizes scores for beam search hypotheses by the length.
    Args:
        log_probs: The log probabilities with shape
            `[beam_width, vocab_size]`.
        sequence_lengths: The sequence length of all hypotheses, a tensor
            of shape `[beam_size, vocab_size]`.
        length_penalty_weight: A float value, a scalar that weights the length
            penalty. Disabled with 0.0.
    Returns:
        score: The scores normalized by the length_penalty
    """
    # Calculate the length penality
    length_penality = tf.div(
        (5. + tf.to_float(sequence_lengths))**length_penalty_weight,
        (5. + 1.)**length_penalty_weight)
    # NOTE: See details in https://arxiv.org/abs/1609.08144.

    # Normalize log probabiltiies by the length penality
    if length_penalty_weight is None or length_penalty_weight == 1:
        score = log_probs
    else:
        score = log_probs / length_penality

    return score 
开发者ID:hirofumi0810,项目名称:tensorflow_end2end_speech_recognition,代码行数:27,代码来源:util.py

示例5: _apply_dense

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import div [as 别名]
def _apply_dense(self, grad, var):
    lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
    beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
    beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
    if var.dtype.base_dtype == tf.float16:
        eps = 1e-7  # Can't use 1e-8 due to underflow -- not sure if it makes a big difference.
    else:
        eps = 1e-8

    v = self.get_slot(var, "v")
    v_t = v.assign(beta2_t * v + (1. - beta2_t) * tf.square(grad))
    m = self.get_slot(var, "m")
    m_t = m.assign( beta1_t * m + (1. - beta1_t) * grad )
    v_t_hat = tf.div(v_t, 1. - beta2_t)
    m_t_hat = tf.div(m_t, 1. - beta1_t)
    
    g_t = tf.div( m_t, tf.sqrt(v_t)+eps )
    g_t_1 = self.get_slot(var, "g")
    g_t = g_t_1.assign( g_t )

    var_update = state_ops.assign_sub(var, 2. * lr_t * g_t - lr_t * g_t_1) #Adam would be lr_t * g_t
    return control_flow_ops.group(*[var_update, m_t, v_t, g_t]) 
开发者ID:HyperGAN,项目名称:HyperGAN,代码行数:24,代码来源:adamirror.py

示例6: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import div [as 别名]
def __call__(self, step):
    # TODO(tf-mot): consider switch to divide for 1.XX also.
    if hasattr(tf, 'div'):
      divide = tf.div
    else:
      divide = tf.math.divide

    # TODO(pulkitb): Replace function with tf.polynomial_decay
    with tf.name_scope('polynomial_decay_pruning_schedule'):
      p = tf.math.minimum(
          1.0,
          tf.math.maximum(
              0.0,
              divide(
                  tf.dtypes.cast(step - self.begin_step, tf.float32),
                  self.end_step - self.begin_step)))
      sparsity = tf.math.add(
          tf.math.multiply(self.initial_sparsity - self.final_sparsity,
                           tf.math.pow(1 - p, self.power)),
          self.final_sparsity,
          name='sparsity')

    return (self._should_prune_in_step(step, self.begin_step, self.end_step,
                                       self.frequency),
            sparsity) 
开发者ID:tensorflow,项目名称:model-optimization,代码行数:27,代码来源:pruning_schedule.py

示例7: multi_view_att

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import div [as 别名]
def multi_view_att(ori_memory, att_w, dec_hidden, *args):
    bsz, max_len, rnn_hsz = args

    dec_hidden = att_w(dec_hidden)  # b*f

    ori_memory_t = tf.transpose(ori_memory, perm=[2, 0, 1])  # f*b*t
    flatten_om = tf.layers.flatten(ori_memory_t)

    beta_is = tf.exp(tf.tanh(tf.matmul(dec_hidden, flatten_om)))  # b*b*t
    beta_is = tf.reshape(beta_is, [bsz, bsz, max_len])
    beta_is = tf.transpose(beta_is, perm=[2, 0, 1])  # t*b*b

    beta_i_sum = tf.reduce_sum(beta_is, axis=0, keepdims=True)
    beta_i_sum = tf.tile(beta_i_sum, [max_len, 1, 1])
    beta_is = tf.div(beta_is, beta_i_sum)

    ori_memory_t = tf.transpose(ori_memory, perm=[1, 0, 2])
    return tf.reduce_sum(tf.matmul(beta_is, ori_memory_t), axis=0) 
开发者ID:ne7ermore,项目名称:deeping-flow,代码行数:20,代码来源:model.py

示例8: intra_decoder_atten

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import div [as 别名]
def intra_decoder_atten(w_dec, dec_hidden, dec_out):
    """
    Args:
        w_dec: w_dec_atten, size - [dec_hsz*dec_hsz]
        dec_hidden: decode hidden/time, size - [bsz*dec_hsz]
        dec_out: decode out, size - [bsz*time*dec_hsz]

    Return:
        dec_c_t: doc vector, size - [bsz*dec_hsz]
    """
    pre_hiddens = tf.transpose(dec_out, perm=[1, 0, 2])
    times = tf.shape(dec_out)[1]

     # formulation 6
    d_tts = tf.exp(tf.multiply(tf.tile(tf.expand_dims(
        tf.matmul(dec_hidden, w_dec), 0), [times, 1, 1]), pre_hiddens))

    # formulation 7
    norm_d_tt = tf.tile(tf.reduce_sum(d_tts, 0, keep_dims=True), [times, 1, 1])
    alpha_dec_tts = tf.div(d_tts, norm_d_tt)

    # formulation 8
    dec_c_t = tf.reduce_sum(tf.multiply(alpha_dec_tts, pre_hiddens), 0)

    return dec_c_t 
开发者ID:ne7ermore,项目名称:deeping-flow,代码行数:27,代码来源:attention.py

示例9: softmax_N

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import div [as 别名]
def softmax_N(tensor, name=None):
  """Apply softmax across last dimension of a tensor.

  Args:
    tensor: Input tensor.
    name: Name for this op. If None, defaults to 'softmax_N'.

  Returns:
    A tensor with softmax-normalized values on the last dimension.
  """
  with tf.name_scope(name, 'softmax_N', [tensor]):
    exp_tensor = tf.exp(tensor)
    reduction_indices = [tensor.get_shape().ndims - 1]
    return tf.div(exp_tensor,
                  tf.reduce_sum(
                      exp_tensor, axis=reduction_indices, keep_dims=True)) 
开发者ID:simonfqy,项目名称:PADME,代码行数:18,代码来源:model_ops.py

示例10: contrastive_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import div [as 别名]
def contrastive_loss(label, feat1, feat2, margin=1.0):

	distance = tf.sqrt(1e-20+tf.reduce_sum(tf.pow(feat1-feat2, 2), 1, keep_dims=True))
	# distance_norm = tf.add(tf.sqrt(tf.reduce_sum(tf.square(feat1), 1, keep_dims=True)), tf.sqrt(tf.reduce_sum(tf.square(feat2), 1, keep_dims=True)))
	# distance = tf.div(distance, tf.stop_gradient(distance_norm+1e-10))
	distance = tf.reshape(distance, [-1], name="distance")

	input_shape_list = bert_utils.get_shape_list(feat1, expected_rank=[2])
	batch_size = input_shape_list[0]

	y = tf.cast(label, tf.float32)
	 # the smaller is better
	tmp = y * tf.pow(distance, 2)
	# when distance is larger than margin, then ignore gradient
	tmp2 = (1-y) *tf.pow(tf.maximum((margin - distance), 0.0), 2)
	per_example_loss = (tmp +tmp2)/2
	return per_example_loss, distance 
开发者ID:yyht,项目名称:BERT,代码行数:19,代码来源:loss_utils.py

示例11: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import div [as 别名]
def __init__(
        self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size, trainableEmbeddings):

        # Placeholders for input, output and dropout
        self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
        self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
        self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0, name="l2_loss")
          
        # Embedding layer
        with tf.name_scope("embedding"):
            self.W = tf.Variable(
                tf.constant(0.0, shape=[vocab_size, embedding_size]),
                trainable=trainableEmbeddings,name="W")
            self.embedded_words1 = tf.nn.embedding_lookup(self.W, self.input_x1)
            self.embedded_words2 = tf.nn.embedding_lookup(self.W, self.input_x2)
        print self.embedded_words1
        # Create a convolution + maxpool layer for each filter size
        with tf.name_scope("output"):
            self.out1=self.stackedRNN(self.embedded_words1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
            self.out2=self.stackedRNN(self.embedded_words2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
            self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
            self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
            self.distance = tf.reshape(self.distance, [-1], name="distance")
        with tf.name_scope("loss"):
            self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
        #### Accuracy computation is outside of this class.
        with tf.name_scope("accuracy"):
            self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
            correct_predictions = tf.equal(self.temp_sim, self.input_y)
            self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") 
开发者ID:dhwajraj,项目名称:deep-siamese-text-similarity,代码行数:36,代码来源:siamese_network_semantic.py

示例12: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import div [as 别名]
def __init__(
        self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size):

        # Placeholders for input, output and dropout
        self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
        self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
        self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0, name="l2_loss")
          
        # Embedding layer
        with tf.name_scope("embedding"):
            self.W = tf.Variable(
                tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
                trainable=True,name="W")
            self.embedded_chars1 = tf.nn.embedding_lookup(self.W, self.input_x1)
            #self.embedded_chars_expanded1 = tf.expand_dims(self.embedded_chars1, -1)
            self.embedded_chars2 = tf.nn.embedding_lookup(self.W, self.input_x2)
            #self.embedded_chars_expanded2 = tf.expand_dims(self.embedded_chars2, -1)

        # Create a convolution + maxpool layer for each filter size
        with tf.name_scope("output"):
            self.out1=self.BiRNN(self.embedded_chars1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
            self.out2=self.BiRNN(self.embedded_chars2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
            self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
            self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
            self.distance = tf.reshape(self.distance, [-1], name="distance")
        with tf.name_scope("loss"):
            self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
        #### Accuracy computation is outside of this class.
        with tf.name_scope("accuracy"):
            self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
            correct_predictions = tf.equal(self.temp_sim, self.input_y)
            self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") 
开发者ID:dhwajraj,项目名称:deep-siamese-text-similarity,代码行数:38,代码来源:siamese_network.py

示例13: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import div [as 别名]
def call(self, seq_value_len_list, mask=None, **kwargs):
        if self.supports_masking:
            if mask is None:
                raise ValueError(
                    "When supports_masking=True,input must support masking")
            uiseq_embed_list = seq_value_len_list
            mask = tf.to_float(mask)
            user_behavior_length = tf.reduce_sum(mask, axis=-1, keep_dims=True)
            mask = tf.expand_dims(mask, axis=2)
        else:
            uiseq_embed_list, user_behavior_length = seq_value_len_list

            mask = tf.sequence_mask(user_behavior_length,
                                    self.seq_len_max, dtype=tf.float32)
            mask = tf.transpose(mask, (0, 2, 1))

        embedding_size = uiseq_embed_list.shape[-1]

        mask = tf.tile(mask, [1, 1, embedding_size])

        uiseq_embed_list *= mask
        hist = uiseq_embed_list
        if self.mode == "max":
            return tf.reduce_max(hist, 1, keep_dims=True)

        hist = tf.reduce_sum(hist, 1, keep_dims=False)

        if self.mode == "mean":
            hist = tf.div(hist, user_behavior_length+self.eps)

        hist = tf.expand_dims(hist, axis=1)
        return hist 
开发者ID:ShenDezhou,项目名称:icme2019,代码行数:34,代码来源:sequence.py

示例14: getCosineSimilarity

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import div [as 别名]
def getCosineSimilarity(q, a):
        q1 = tf.sqrt(tf.reduce_sum(tf.multiply(q, q), 1))
        a1 = tf.sqrt(tf.reduce_sum(tf.multiply(a, a), 1))
        mul = tf.reduce_sum(tf.multiply(q, a), 1)
        cosSim = tf.div(mul, tf.multiply(q1, a1))
        return cosSim 
开发者ID:shuaihuaiyi,项目名称:QA,代码行数:8,代码来源:qaLSTMNet.py

示例15: _gather_clone_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import div [as 别名]
def _gather_clone_loss(clone, num_clones, regularization_losses):
  """Gather the loss for a single clone.

  Args:
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.

  Returns:
    A tensor for the total loss for the clone.  Can be None.
  """
  # The return value.
  sum_loss = None
  # Individual components of the loss that will need summaries.
  clone_loss = None
  regularization_loss = None
  # Compute and aggregate losses on the clone device.
  with tf.device(clone.device):
    all_losses = []
    clone_losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
    if clone_losses:
      clone_loss = tf.add_n(clone_losses, name='clone_loss')
      if num_clones > 1:
        clone_loss = tf.div(clone_loss, 1.0 * num_clones,
                            name='scaled_clone_loss')
      all_losses.append(clone_loss)
    if regularization_losses:
      regularization_loss = tf.add_n(regularization_losses,
                                     name='regularization_loss')
      all_losses.append(regularization_loss)
    if all_losses:
      sum_loss = tf.add_n(all_losses)
  # Add the summaries out of the clone device block.
  if clone_loss is not None:
    tf.summary.scalar(clone.scope + '/clone_loss', clone_loss)
  if regularization_loss is not None:
    tf.summary.scalar('regularization_loss', regularization_loss)
  return sum_loss 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:41,代码来源:model_deploy.py


注:本文中的tensorflow.div方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。