当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.multiply函数代码示例

本文整理汇总了Python中tensorflow.multiply函数的典型用法代码示例。如果您正苦于以下问题:Python multiply函数的具体用法?Python multiply怎么用?Python multiply使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了multiply函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: attention_mechanism_parallel

    def attention_mechanism_parallel(self,c_full,m,q,i):
        """ parallel implemtation of gate function given a list of candidate sentence, a query, and previous memory.
        Input:
           c_full: candidate fact. shape:[batch_size,story_length,hidden_size]
           m: previous memory. shape:[batch_size,hidden_size]
           q: question. shape:[batch_size,hidden_size]
        Output: a scalar score (in batch). shape:[batch_size,story_length]
        """
        q=tf.expand_dims(q,axis=1) #[batch_size,1,hidden_size]
        m=tf.expand_dims(m,axis=1) #[batch_size,1,hidden_size]

        # 1.define a large feature vector that captures a variety of similarities between input,memory and question vector: z(c,m,q)
        c_q_elementwise=tf.multiply(c_full,q)          #[batch_size,story_length,hidden_size]
        c_m_elementwise=tf.multiply(c_full,m)          #[batch_size,story_length,hidden_size]
        c_q_minus=tf.abs(tf.subtract(c_full,q))        #[batch_size,story_length,hidden_size]
        c_m_minus=tf.abs(tf.subtract(c_full,m))        #[batch_size,story_length,hidden_size]
        # c_transpose Wq
        c_w_q=self.x1Wx2_parallel(c_full,q,"c_w_q"+str(i))   #[batch_size,story_length,hidden_size]
        c_w_m=self.x1Wx2_parallel(c_full,m,"c_w_m"+str(i))   #[batch_size,story_length,hidden_size]
        # c_transposeWm
        q_tile=tf.tile(q,[1,self.story_length,1])     #[batch_size,story_length,hidden_size]
        m_tile=tf.tile(m,[1,self.story_length,1])     #[batch_size,story_length,hidden_size]
        z=tf.concat([c_full,m_tile,q_tile,c_q_elementwise,c_m_elementwise,c_q_minus,c_m_minus,c_w_q,c_w_m],2) #[batch_size,story_length,hidden_size*9]
        # 2. two layer feed foward
        g=tf.layers.dense(z,self.hidden_size*3,activation=tf.nn.tanh)  #[batch_size,story_length,hidden_size*3]
        g=tf.layers.dense(g,1,activation=tf.nn.sigmoid)                #[batch_size,story_length,1]
        g=tf.squeeze(g,axis=2)                                         #[batch_size,story_length]
        return g
开发者ID:AmjadHisham,项目名称:text_classification,代码行数:28,代码来源:a8_dynamic_memory_network.py

示例2: log_loss

def log_loss(labels, predictions, epsilon=1e-7, scope=None, weights=None):
  """Calculate log losses.

  Same as tf.losses.log_loss except that this returns the individual losses
  instead of passing them into compute_weighted_loss and returning their
  weighted mean. This is useful for eval jobs that report the mean loss. By
  returning individual losses, that mean loss can be the same regardless of
  batch size.

  Args:
    labels: The ground truth output tensor, same dimensions as 'predictions'.
    predictions: The predicted outputs.
    epsilon: A small increment to add to avoid taking a log of zero.
    scope: The scope for the operations performed in computing the loss.
    weights: Weights to apply to labels.

  Returns:
    A `Tensor` representing the loss values.

  Raises:
    ValueError: If the shape of `predictions` doesn't match that of `labels`.
  """
  with tf.name_scope(scope, "log_loss", (predictions, labels)):
    predictions = tf.to_float(predictions)
    labels = tf.to_float(labels)
    predictions.get_shape().assert_is_compatible_with(labels.get_shape())
    losses = -tf.multiply(labels, tf.log(predictions + epsilon)) - tf.multiply(
        (1 - labels), tf.log(1 - predictions + epsilon))
    if weights is not None:
      losses = tf.multiply(losses, weights)

    return losses
开发者ID:cghawthorne,项目名称:magenta,代码行数:32,代码来源:tf_utils.py

示例3: batch_iou

def batch_iou(bboxes, bbox):
  """Compute iou of a batch of boxes with another box. Box format '[y_min, x_min, y_max, x_max]'.
  Args:
    bboxes: A batch of boxes. 2-D with shape `[B, 4]`.
    bbox: A single box. 1-D with shape `[4]`.

  Returns:
    Batch of IOUs
  """
  lr = tf.maximum(
    tf.minimum(bboxes[:, 3], bbox[3]) -
    tf.maximum(bboxes[:, 1], bbox[1]),
    0
  )
  tb = tf.maximum(
    tf.minimum(bboxes[:, 2], bbox[2]) -
    tf.maximum(bboxes[:, 0], bbox[0]),
    0
  )
  intersection = tf.multiply(tb, lr)
  union = tf.subtract(
    tf.multiply((bboxes[:, 3] - bboxes[:, 1]), (bboxes[:, 2] - bboxes[:, 0])) +
    tf.multiply((bbox[3] - bbox[1]), (bbox[2] - bbox[0])),
    intersection
  )
  iou = tf.div(intersection, union)
  return iou
开发者ID:tigercut,项目名称:MobileNet,代码行数:27,代码来源:det_utils.py

示例4: pearsoncorrelation

def pearsoncorrelation(ypred, y):
    muy_ypred = tf.reduce_mean(ypred)
    muy_y = tf.reduce_mean(y)
    numerator = tf.reduce_sum(tf.multiply(ypred - muy_ypred, y - muy_y))
    denominator = tf.multiply(tf.sqrt(tf.reduce_sum(tf.square(ypred - muy_ypred))),
                              tf.sqrt(tf.reduce_sum(tf.square(y - muy_y)))) + 1e-10
    return numerator / denominator
开发者ID:savourylie,项目名称:fucos-tensorflow,代码行数:7,代码来源:metrics.py

示例5: task_specific_attention

 def task_specific_attention(self, inputs, output_size,
                             initializer=layers.xavier_initializer(),
                             activation_fn=tf.tanh, scope=None):
     """
     Performs task-specific attention reduction, using learned
     attention context vector (constant within task of interest).
     Args:
         inputs: Tensor of shape [batch_size, units, input_size]
             `input_size` must be static (known)
             `units` axis will be attended over (reduced from output)
             `batch_size` will be preserved
         output_size: Size of output's inner (feature) dimension
     Returns:
        outputs: Tensor of shape [batch_size, output_dim].
     """
     assert len(inputs.get_shape()) == 3 and inputs.get_shape()[-1].value is not None
     with tf.variable_scope(scope or 'attention') as scope:
         # u_w, attention 向量
         attention_context_vector = tf.get_variable(name='attention_context_vector', shape=[output_size],
                                                    initializer=initializer, dtype=tf.float32)
         # 全连接层,把 h_i 转为 u_i , shape= [batch_size, units, input_size] -> [batch_size, units, output_size]
         input_projection = layers.fully_connected(inputs, output_size, activation_fn=activation_fn, scope=scope)
         # 输出 [batch_size, units]
         vector_attn = tf.reduce_sum(tf.multiply(input_projection, attention_context_vector), axis=2, keep_dims=True)
         attention_weights = tf.nn.softmax(vector_attn, dim=1)
         tf.summary.histogram('attention_weigths', attention_weights)
         weighted_projection = tf.multiply(inputs, attention_weights)
         outputs = tf.reduce_sum(weighted_projection, axis=1)
         return outputs  # 输出 [batch_size, hidden_size*2]
开发者ID:brucexia6116,项目名称:zhihu-text-classification,代码行数:29,代码来源:network.py

示例6: compute_auc

 def compute_auc(tp, fn, tn, fp, name):
   """Computes the roc-auc or pr-auc based on confusion counts."""
   rec = tf.div(tp + epsilon, tp + fn + epsilon)
   if curve == 'ROC':
     fp_rate = tf.div(fp, fp + tn + epsilon)
     x = fp_rate
     y = rec
   elif curve == 'R':  # recall auc
     x = tf.linspace(1., 0., num_thresholds)
     y = rec
   else:  # curve == 'PR'.
     prec = tf.div(tp + epsilon, tp + fp + epsilon)
     x = rec
     y = prec
   if summation_method == 'trapezoidal':
     return tf.reduce_sum(
       tf.multiply(x[:num_thresholds - 1] - x[1:],
                   (y[:num_thresholds - 1] + y[1:]) / 2.),
       name=name)
   elif summation_method == 'minoring':
     return tf.reduce_sum(
       tf.multiply(x[:num_thresholds - 1] - x[1:],
                   tf.minimum(y[:num_thresholds - 1], y[1:])),
       name=name)
   elif summation_method == 'majoring':
     return tf.reduce_sum(
       tf.multiply(x[:num_thresholds - 1] - x[1:],
                   tf.maximum(y[:num_thresholds - 1], y[1:])),
       name=name)
   else:
     raise ValueError('Invalid summation_method: %s' % summation_method)
开发者ID:fossabot,项目名称:SiamFC-TensorFlow,代码行数:31,代码来源:track_metrics.py

示例7: cell

    def cell(self,s_t,h_all,w_all,i):
        """
        parallel implementation of single time step for compute of input with memory
        :param s_t:   [batch_size,hidden_size].vector representation of current input(is a sentence).notice:hidden_size=embedding_size
        :param w_all: [batch_size,block_size,hidden_size]
        :param h_all: [batch_size,block_size,hidden_size]
        :return: new hidden state: [batch_size,block_size,hidden_size]
        """
        # 1.gate
        s_t_expand=tf.expand_dims(s_t, axis=1)       #[batch_size,1,hidden_size]
        g=tf.nn.sigmoid(tf.multiply(s_t_expand,h_all)+tf.multiply(s_t_expand,w_all))#shape:[batch_size,block_size,hidden_size]

        # 2.candidate hidden state
        #below' shape:[batch_size*block_size,hidden_size]
        h_candidate_part1=tf.matmul(tf.reshape(h_all,shape=(-1,self.dimension)), self.U) + tf.matmul(tf.reshape(w_all,shape=(-1,self.dimension)), self.V)+self.h_bias
        print("======>h_candidate_part1:",h_candidate_part1) #(160, 100)
        h_candidate_part1=tf.reshape(h_candidate_part1,shape=(self.batch_size,self.block_size,self.dimension)) #[batch_size,block_size,hidden_size]
        h_candidate_part2=tf.expand_dims(tf.matmul(s_t,self.W)+self.h2_bias,axis=1)              #shape:[batch_size,1,hidden_size]
        h_candidate=self.activation(h_candidate_part1+h_candidate_part2,scope="h_candidate"+str(i))   #shape:[batch_size,block_size,hidden_size]

        # 3.update hidden state
        h_all=h_all+tf.multiply(g,h_candidate) #shape:[batch_size,block_size,hidden_size]

        # 4.normalized hidden state
        h_all=tf.nn.l2_normalize(h_all,-1) #shape:[batch_size,block_size,hidden_size]
        return h_all  #shape:[batch_size,block_size,hidden_size]
开发者ID:AmjadHisham,项目名称:text_classification,代码行数:26,代码来源:a3_entity_network.py

示例8: fast_rcnn_bbox_loss

def fast_rcnn_bbox_loss(fast_rcnn_bbox_pred, bbox_targets, roi_inside_weights, roi_outside_weights):
    '''
    Calculate the fast RCNN bounding box refinement loss. Measures how well 
    the fast RCNN is able to refine localization.

    lam/N_reg * sum_i(p_i^* * L_reg(t_i,t_i^*))

    lam: classification vs bbox loss balance parameter     
    N_reg: Number of anchor locations (~2500)
    p_i^*: ground truth label for anchor (loss only for positive anchors)
    L_reg: smoothL1 loss
    t_i: Parameterized prediction of bounding box
    t_i^*: Parameterized ground truth of closest bounding box
    
    TODO: rpn_inside_weights likely deprecated; might consider obliterating
    '''  
    with tf.variable_scope('fast_rcnn_bbox_loss'):
        # How far off was the prediction?
        diff = tf.multiply(roi_inside_weights, fast_rcnn_bbox_pred - bbox_targets)
        diff_sL1 = smoothL1(diff, 1.0)
        
        # Only count loss for positive anchors
        roi_bbox_reg = tf.reduce_mean(tf.reduce_sum(tf.multiply(roi_outside_weights, diff_sL1), reduction_indices=[1]))
    
        # Constant for weighting bounding box loss with classification loss
        roi_bbox_reg = cfg.TRAIN.FRCNN_BBOX_LAMBDA * roi_bbox_reg
        
    return roi_bbox_reg
开发者ID:zymale,项目名称:tf-Faster-RCNN,代码行数:28,代码来源:loss_functions.py

示例9: _combine_feature

        def _combine_feature(net, feature, op=None):
            if op == "conv":
                options['stride']=[1,1]
                options['avg_pool']=[1,1]
                layers = int(args[1])
                feature = self.layer_conv(feature, [layers], options)

            if op == "linear":
                feature = self.layer_linear(feature, [args[1]], options)
                feature = self.layer_reshape(feature, [args[2]], options)

            if op == 'gru':
                tanh = tf.tanh
                #tanh = self.ops.prelu()
               # tanh = self.ops.double_sided(default_activation=tanh)
                sigmoid = tf.sigmoid
               # sigmoid = self.ops.double_sided(default_activation=sigmoid)
                def _conv(_net,name, scale=1):
                    _options = dict(options)
                    _options['activation']=None
                    _options['name']=self.ops.description+name
                    return self.layer_conv(_net, [int(args[1])//scale], _options)
                z = sigmoid(_conv(net,'z',scale=2))
                r = tf.sigmoid(_conv(net,'r',scale=2))
                th = _conv(net,'net',scale=2)
                fh = _conv(feature,'feature',scale=2)
                h = tanh(th + fh  * r)
                net = tf.multiply( (1-z), h) + tf.multiply(feature, z)

            if 'only' in options:
                return net
            if feature is not None:
                net = tf.concat([net, feature], axis=len(self.ops.shape(net))-1)
            return net
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:34,代码来源:configurable_component.py

示例10: __call__

  def __call__(self, inputs, state, scope=None):
    with _checked_scope(self, scope or "rwa_cell", reuse=self._reuse):
      h, n, d, a_max = state

      with vs.variable_scope("u"):
        u = _linear(inputs, self._num_units, True)

      with vs.variable_scope("g"):
        g = _linear([inputs, h], self._num_units, True)

      with vs.variable_scope("a"):
        a = _linear([inputs, h], self._num_units, False) # The bias term when factored out of the numerator and denominator cancels and is unnecessary

      z = tf.multiply(u, tanh(g))

      a_newmax = tf.maximum(a_max, a)
      exp_diff = tf.exp(a_max - a_newmax)
      exp_scaled = tf.exp(a - a_newmax)

      n = tf.multiply(n, exp_diff) + tf.multiply(z, exp_scaled)  # Numerically stable update of numerator
      d = tf.multiply(d, exp_diff) + exp_scaled  # Numerically stable update of denominator
      h_new = self._activation(tf.div(n, d))

      new_state = RWACellTuple(h_new, n, d, a_newmax)

    return h_new, new_state
开发者ID:indiejoseph,项目名称:chinese-char-rnn,代码行数:26,代码来源:rwa_cell.py

示例11: rpn_bbox_loss

def rpn_bbox_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_inside_weights, rpn_outside_weights):
    '''
    Calculate the Region Proposal Network bounding box loss. Measures how well 
    the RPN is able to propose regions by the performance of its localization.

    lam/N_reg * sum_i(p_i^* * L_reg(t_i,t_i^*))

    lam: classification vs bbox loss balance parameter     
    N_reg: Number of anchor locations (~2500)
    p_i^*: ground truth label for anchor (loss only for positive anchors)
    L_reg: smoothL1 loss
    t_i: Parameterized prediction of bounding box
    t_i^*: Parameterized ground truth of closest bounding box
    '''    
    with tf.variable_scope('rpn_bbox_loss'):
        # Transposing
        rpn_bbox_targets = tf.transpose(rpn_bbox_targets, [0,2,3,1])
        rpn_inside_weights = tf.transpose(rpn_inside_weights, [0,2,3,1])
        rpn_outside_weights = tf.transpose(rpn_outside_weights, [0,2,3,1])
        
        # How far off was the prediction?
        diff = tf.multiply(rpn_inside_weights, rpn_bbox_pred - rpn_bbox_targets)
        diff_sL1 = smoothL1(diff, 3.0)
        
        # Only count loss for positive anchors. Make sure it's a sum.
        rpn_bbox_reg = tf.reduce_sum(tf.multiply(rpn_outside_weights, diff_sL1))
    
        # Constant for weighting bounding box loss with classification loss
        rpn_bbox_reg = cfg.TRAIN.RPN_BBOX_LAMBDA * rpn_bbox_reg
    
    return rpn_bbox_reg    
开发者ID:zymale,项目名称:tf-Faster-RCNN,代码行数:31,代码来源:loss_functions.py

示例12: psnr

def psnr(prediction, ground_truth, maxp=None, name='psnr'):
    """`Peek Signal to Noise Ratio <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.

    .. math::

        PSNR = 20 \cdot \log_{10}(MAX_p) - 10 \cdot \log_{10}(MSE)

    Args:
        prediction: a :class:`tf.Tensor` representing the prediction signal.
        ground_truth: another :class:`tf.Tensor` with the same shape.
        maxp: maximum possible pixel value of the image (255 in in 8bit images)

    Returns:
        A scalar tensor representing the PSNR.
    """

    maxp = float(maxp)

    def log10(x):
        with tf.name_scope("log10"):
            numerator = tf.log(x)
            denominator = tf.log(tf.constant(10, dtype=numerator.dtype))
            return numerator / denominator

    mse = tf.reduce_mean(tf.square(prediction - ground_truth))
    if maxp is None:
        psnr = tf.multiply(log10(mse), -10., name=name)
    else:
        psnr = tf.multiply(log10(mse), -10.)
        psnr = tf.add(tf.multiply(20., log10(maxp)), psnr, name=name)

    return psnr
开发者ID:mahmoudovic,项目名称:MATRIXAI,代码行数:32,代码来源:symbolic_functions.py

示例13: add_dyprune

def add_dyprune(weights):
    crate = config.crate[weights.name[:-2]] #hyperpara C rate
    prune_mask = tf.Variable(tf.ones_like(weights),name=weights.name[:-2]+'mask', trainable=False)

    #calculate mask
    mean = tf.divide(tf.reduce_sum(tf.multiply(tf.abs(weights),prune_mask)),tf.reduce_sum(prune_mask))
    var = tf.multiply(weights,prune_mask)
    var = tf.square(var)
    mean_q = tf.square(mean)*tf.reduce_sum(prune_mask)
    var = tf.reduce_sum(var) - mean_q
    var = tf.divide(var,tf.reduce_sum(prune_mask))
    var = tf.sqrt(var)
    t1_lower = (mean+var*crate)*0.25 #hyperpara a
    t1_upper = (mean+var*crate)*0.45 #hyperpara b
    
    indicator_lower1 = tf.greater_equal(tf.abs(weights), tf.ones_like(weights) * t1_lower)    
    indicator_upper1 = tf.greater_equal(tf.abs(weights), tf.ones_like(weights) * t1_upper)
    indicator_matrix1 = tf.greater_equal(prune_mask, tf.zeros_like(weights))
    indicator_matrix1 = tf.logical_and(indicator_matrix1,indicator_lower1)
    indicator_matrix1 = tf.logical_or(indicator_matrix1,indicator_upper1)
    indicator_matrix1 = tf.to_float(indicator_matrix1)
    update = prune_mask.assign(indicator_matrix1)

    prune_fc = tf.multiply(weights, prune_mask)
    return prune_fc
开发者ID:Ewenwan,项目名称:Project,代码行数:25,代码来源:densenetfinalDNS.py

示例14: streaming_f1

    def streaming_f1(self, labels, predictions, n_classes, weights=None, type='macro'):
        labels_and_predictions_by_class = [(tf.equal(labels, c), tf.equal(predictions, c)) for c in range(0, n_classes)]
        tp_by_class_val, tp_by_class_update_op = zip(*[tf.metrics.true_positives(label, prediction, weights=weights)
                                                       for label, prediction in labels_and_predictions_by_class])
        fn_by_class_val, fn_by_class_update_op = zip(*[tf.metrics.false_negatives(label, prediction, weights=weights)
                                                       for label, prediction in labels_and_predictions_by_class])
        fp_by_class_val, fp_by_class_update_op = zip(*[tf.metrics.false_positives(label, prediction, weights=weights)
                                                       for label, prediction in labels_and_predictions_by_class])

        f1_update_op = tf.group(*chain(tp_by_class_update_op, fn_by_class_update_op, fp_by_class_update_op))

        if type == 'macro':
            epsilon = [10e-6 for _ in range(n_classes)]
            f1_val = tf.multiply(2., tp_by_class_val) / (tf.reduce_sum([tf.multiply(2., tp_by_class_val),
                                                                        fp_by_class_val, fn_by_class_val, epsilon],
                                                                       axis=0))
            f1_val = tf.reduce_mean(f1_val)
        else:
            epsilon = 10e-6
            total_tp = tf.reduce_sum(tp_by_class_val)
            total_fn = tf.reduce_sum(fn_by_class_val)
            total_fp = tf.reduce_sum(fp_by_class_val)

            f1_val = tf.squeeze(tf.multiply(2., total_tp) / (tf.multiply(2., total_tp) +
                                                             total_fp + total_fn + epsilon,
                                                             ))
        return f1_val, f1_update_op
开发者ID:Aurora11111,项目名称:CloudML-Serving,代码行数:27,代码来源:fast_text_model.py

示例15: thresholding

def thresholding(inputs):
    # find the mean for each example in the batch
    mean_output = tf.reduce_mean(inputs, axis=1)

    # scale each mean based on a factor
    threshold_scalar = tf.Variable(utils.threshold_scalar, tf.float32)
    scaled_mean = tf.scalar_mul(threshold_scalar, mean_output)
    scaled_mean = tf.reshape(scaled_mean, [utils.batch_size])

    # setup matrix for
    min_thresh_for_max = tf.fill([utils.batch_size], 0.05)
    max_thresh_for_min = tf.fill([utils.batch_size], 0.15)   #0.4
    thresholds = tf.maximum(min_thresh_for_max, scaled_mean)
    thresholds = tf.minimum(max_thresh_for_min, thresholds)

    # zero values under the thresholds using bitmask
    thresholds = tf.reshape(thresholds, [128, 1, 1])

    threshold_mask = tf.cast(tf.greater(inputs, thresholds), tf.float32)
    thresholded_input = tf.multiply(inputs, threshold_mask)

    # peak picking
    # select beats by x[i-1] < x[i] > x[i+1] (local maximum)
    x_minus_1 = tf.cast(tf.greater(thresholded_input, tf.manip.roll(thresholded_input, shift=-1, axis=1)), tf.float32)
    x_plus_1 = tf.cast(tf.greater(thresholded_input, tf.manip.roll(thresholded_input, shift=1, axis=1)), tf.float32)
    output = tf.multiply(x_minus_1, x_plus_1)


    return output
开发者ID:nearlyeveryone,项目名称:bpm,代码行数:29,代码来源:bpm_estimator.py


注:本文中的tensorflow.multiply函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。