当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.sub函数代码示例

本文整理汇总了Python中tensorflow.sub函数的典型用法代码示例。如果您正苦于以下问题:Python sub函数的具体用法?Python sub怎么用?Python sub使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了sub函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: IoU

def IoU(bbox, gt):

    # bbox = [ x , y , w , h ] ( x , y  left up)

    shape = [-1, 1]

    x1 = tf.maximum(tf.cast(bbox[0], tf.float32), tf.reshape(tf.cast(gt[:,0], tf.float32), shape))
    y1 = tf.maximum(tf.cast(bbox[1], tf.float32), tf.reshape(tf.cast(gt[:,1], tf.float32), shape))
    x2 = tf.minimum(tf.cast(bbox[2] + bbox[0], tf.float32), tf.reshape(tf.cast(gt[:,2] + gt[:,0], tf.float32), shape))
    y2 = tf.minimum(tf.cast(bbox[3] + bbox[1], tf.float32), tf.reshape(tf.cast(gt[:,3] + gt[:,1], tf.float32), shape))


    inter_w = tf.sub(x2,x1)

    inter_h = tf.sub(y2,y1)

    inter = tf.cast(inter_w * inter_h, tf.float32)

    bounding_box = tf.cast(tf.mul(bbox[2],bbox[3]), tf.float32)

    ground_truth = tf.reshape(tf.cast(tf.mul(gt[:,2],gt[:,3]), tf.float32), shape)

    #iou = tf.div(inter,tf.sub(tf.add(bounding_box,tf.reshape(ground_truth,shape)),inter))

    iou = inter / (bounding_box + ground_truth - inter)

    # limit the iou range between 0 and 1
    
    mask_less = tf.cast(tf.logical_not(tf.less(iou, tf.zeros_like(iou))), tf.float32)
    #mask_great = tf.cast(tf.logical_not(tf.greater(iou, tf.ones_like(iou))), tf.float32)
    
    iou = tf.mul(iou, mask_less)
    #iou = tf.mul(iou, positive_mask)
    
    return iou
开发者ID:Johannes-brahms,项目名称:Yolo,代码行数:35,代码来源:utils.py

示例2: __init__

    def __init__(self, num_features, num_output, l2_reg_lambda=0.0, neg_output=False):
        self.input_x = tf.placeholder(tf.float32, [None, num_features], name="input_x")
        self.input_y = tf.placeholder(tf.float32, [None, num_output], name="input_y")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0)

        with tf.name_scope("softmax"):
            filter_shape = [num_features, num_output]
            W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1))
            b = tf.Variable(tf.constant(0.1, shape=[num_output]))

            self.raw_scores = tf.nn.xw_plus_b(self.input_x, W, b, name="scores")
            if neg_output:
                self.scores = tf.nn.elu(self.raw_scores, name="tanh")

            else:
                self.scores = tf.nn.relu(self.raw_scores, name="relu")


            l2_loss += tf.nn.l2_loss(W)
            l2_loss += tf.nn.l2_loss(b)

        with tf.name_scope("loss"):
            self.losses = tf.square(tf.sub(self.scores, self.input_y))
            self.avgloss = tf.reduce_mean(tf.abs(tf.sub(self.scores, self.input_y)))
            self.loss = tf.reduce_mean(self.losses) + l2_reg_lambda * l2_loss
开发者ID:bgshin,项目名称:cnntweets,代码行数:27,代码来源:regression.py

示例3: lossFunction

def lossFunction(logits, labels, scale_factor):
    print "TrackNet:  building loss function..."
    logit_trans, logit_rot = tf.split(1,2,logits)
    label_trans, label_rot = tf.split(1,2,labels)
    trans_loss = tf.nn.l2_loss(tf.sub(logit_trans, label_trans))
    rot_loss = tf.mul(scale_factor, tf.nn.l2_loss(tf.sub(logit_trans, label_trans)))
    return tf.add(trans_loss,rot_loss)
开发者ID:qenops,项目名称:RGBDAugmentedReality,代码行数:7,代码来源:trackNet.py

示例4: r_loss

def r_loss(communities = 2, group_size = 10, seed=None, p=0.4, q=0.05, r=1.0, projection_dim=2):
    """testing to see if the loss will decrease backproping through very simple function"""
    B = np.asarray(balanced_stochastic_blockmodel(communities, group_size, p, q, seed)).astype(np.double)
    B = tf.cast(B, tf.float64)
    Diag = tf.diag(tf.reduce_sum(B,0))
    Diag = tf.cast(Diag, tf.float64)

    #r_grid = tf.linspace(r_min, r_max, grid_size)
    r = tf.cast(r, tf.float64)
    
    BH = (tf.square(r)-1)*tf.diag(tf.ones(shape=[communities*group_size], dtype=tf.float64))-tf.mul(r, B)+Diag 
    
    with tf.Session() as sess:
        eigenval, eigenvec = tf.self_adjoint_eig(BH)
        eigenvec_proj = tf.slice(eigenvec, [0,0], [communities*group_size, projection_dim])
                
        true_assignment_a = tf.concat(0, [-1*tf.ones([group_size], dtype=tf.float64),
                                      tf.ones([group_size], dtype=tf.float64)])
        true_assignment_b = -1*true_assignment_a
        true_assignment_a = tf.expand_dims(true_assignment_a, 1)
        true_assignment_b = tf.expand_dims(true_assignment_b, 1)

            
        projected_a = tf.matmul(tf.matmul(eigenvec_proj, tf.transpose(eigenvec_proj)), true_assignment_a)#tf.transpose(true_assignment_a))
        projected_b = tf.matmul(tf.matmul(eigenvec_proj, tf.transpose(eigenvec_proj)), true_assignment_b)#tf.transpose(true_assignment_b))
            
        loss = tf.minimum(tf.reduce_sum(tf.square(tf.sub(projected_a, true_assignment_a))),
                              tf.reduce_sum(tf.square(tf.sub(projected_b, true_assignment_b))))
            

        d = sess.run(loss)
    return d
开发者ID:lishali,项目名称:clusternet,代码行数:32,代码来源:error_bars_loss.py

示例5: convert_to_one

def convert_to_one(bbox, width, height, S):

    x, y, w, h = bbox

    x = tf.cast(x, tf.float32)
    y = tf.cast(y, tf.float32)
    w = tf.cast(w, tf.float32)
    h = tf.cast(h, tf.float32)

    global_center_x = tf.mul(tf.add(tf.mul(x, 2), w), 0.5)
    global_center_y = tf.mul(tf.add(tf.mul(y, 2), h), 0.5)

    w = tf.div(w, width)
    h = tf.div(h, height)

    cell_w = tf.cast(tf.div(tf.cast(width, tf.int32), S), tf.float32)
    cell_h = tf.cast(tf.div(tf.cast(height, tf.int32), S), tf.float32)

    cell_coord_x = tf.cast(tf.cast(tf.div(global_center_x, cell_w), tf.int32), tf.float32)
    cell_coord_y = tf.cast(tf.cast(tf.div(global_center_y, cell_h), tf.int32), tf.float32)

    offset_x = tf.div(tf.sub(global_center_x, tf.mul(cell_coord_x, cell_w)), cell_w)
    offset_y = tf.div(tf.sub(global_center_y, tf.mul(cell_coord_y, cell_h)), cell_h)

    assert offset_x.dtype == tf.float32 and \
            offset_y.dtype == tf.float32 and \
            w.dtype == tf.float32 and \
            h.dtype == tf.float32

    bbox = [offset_x, offset_y, w, h]

    return bbox
开发者ID:Johannes-brahms,项目名称:Yolo,代码行数:32,代码来源:yolo_utils.py

示例6: _build_loss

    def _build_loss(self):

        with tf.variable_scope("loss"):

            # Compute y_j = r_j * discount*best_qvalue
            self.tf_discount = tf.constant(self.discount)
            self.qtarget = tf.add(self.pl_rewards, tf.mul(1.0-self.pl_terminals, tf.mul(self.tf_discount, self.pl_qtargets)))

            # Select Q-values for given actions
            self.actions_one_hot = tf.one_hot(self.pl_actions, self.num_actions, 1.0, 0.0)
            self.qvalue_pred = tf.reduce_sum(tf.mul(self.qvalues, self.actions_one_hot), reduction_indices=1)

            # Difference between target and predicted Q-network output
            self.delta = tf.sub(self.qtarget, self.qvalue_pred)

            if self.clip_delta > 0:
                # Perform clipping of the error term, default clipping is to (-1, +1) range
                self.quadratic_part = tf.minimum(tf.abs(self.delta), tf.constant(self.clip_delta))
                self.linear_part    = tf.sub(tf.abs(self.delta), self.quadratic_part)
                self.delta_square   = tf.mul(tf.constant(0.5), tf.square(self.quadratic_part)) + (self.clip_delta*self.linear_part)
                #self.delta_clipped = tf.clip_by_value(self.delta, -1.0*self.clip_delta, self.clip_delta)
                #self.delta_square  = tf.square(self.delta_clipped)
            else:
                # No error clipping
                self.delta_square  = tf.square(self.delta)

        # Actual loss
        if self.batch_accumulator == "sum":
           self.loss = tf.reduce_sum(self.delta_square)
        else:
           self.loss = tf.reduce_mean(self.delta_square)

        # Running average of the loss for TensorBoard
        self.loss_moving_avg    = tf.train.ExponentialMovingAverage(decay=0.999)
        self.loss_moving_avg_op = self.loss_moving_avg.apply([self.loss])
开发者ID:tomrunia,项目名称:DeepReinforcementLearning-Atari,代码行数:35,代码来源:qnetwork.py

示例7: __init__

    def __init__(self, inputX, C=None, hidden_dims=[300,150,300], lambda1=0.01, lambda2=0.01, activation='tanh', \
                weight_init='uniform', noise=None, learning_rate=0.1, optimizer='Adam'):

        self.noise = noise
        n_sample, n_feat = inputX.shape

        # M must be a even number
        assert len(hidden_dims) % 2 == 1

        # Add the end layer
        hidden_dims.append(n_feat)

        # self.depth = len(dims)

        # This is not the symbolic variable of tensorflow, this is real!
        self.inputX = inputX

        if C is None:
            # Transpose the matrix first, and get the whole matrix of C
            self.inputC = sparseCoefRecovery(inputX.T)
        else:
            self.inputC = C

        self.C = tf.placeholder(dtype=tf.float32, shape=[None, None], name='C')

        self.hidden_layers = []
        self.X = self._add_noise(tf.placeholder(dtype=tf.float32, shape=[None, n_feat], name='X'))

        input_hidden = self.X
        weights, biases = init_layer_weight(hidden_dims, inputX, weight_init)

        # J3 regularization term
        J3_list = []
        for init_w, init_b in zip(weights, biases):
            self.hidden_layers.append(DenseLayer(input_hidden, init_w, init_b, activation=activation))
            input_hidden = self.hidden_layers[-1].output
            J3_list.append(tf.reduce_mean(tf.square(self.hidden_layers[-1].w)))
            J3_list.append(tf.reduce_mean(tf.square(self.hidden_layers[-1].b)))

        J3 = lambda2 * tf.add_n(J3_list)

        self.H_M = self.hidden_layers[-1].output
        # H(M/2) the output of the mid layer
        self.H_M_2 = self.hidden_layers[(len(hidden_dims)-1)/2].output

        # calculate loss J1
        # J1 = tf.nn.l2_loss(tf.sub(self.X, self.H_M))

        J1 = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(self.X, self.H_M))))

        # calculate loss J2
        J2 = lambda1 * tf.sqrt(tf.reduce_mean(tf.square(tf.sub(tf.transpose(self.H_M_2), \
                                     tf.matmul(tf.transpose(self.H_M_2), self.C)))))

        self.cost = J1 + J2 + J3

        self.optimizer = optimize(self.cost, learning_rate, optimizer)
开发者ID:tonyabracadabra,项目名称:Deep-Subspace-Clustering,代码行数:57,代码来源:dsc.py

示例8: metric_single

def metric_single(training, test, scale_frac, scales):
    """Calculates the distance between a training and test instance."""
    if scale_frac == 0:
        distance = tf.sqrt(tf.reduce_sum(tf.square(tf.sub(training, test)), reduction_indices=1, keep_dims=True))
    else:
        distance = tf.sqrt(
            tf.reduce_sum(tf.square(tf.div(tf.sub(training, test), scales)), reduction_indices=1, keep_dims=True)
        )
    return distance
开发者ID:AidanGG,项目名称:tensorflow_tmva,代码行数:9,代码来源:knn.py

示例9: binary_cross_entropy

def binary_cross_entropy(prediction, target):
    """
    let o=prediction, t=target
    -(t*log(o) + (1-t)*log(1-o))
    
    Adds a small (1e-12) value to the logarithms to avoid log(0)
    """
    op1 = tf.mul(target, tf.log(prediction + 1e-12))
    op2 = tf.mul(tf.sub(1., target), tf.log(tf.sub(1., prediction) + 1e-12))
    return tf.neg(tf.add(op1, op2))
开发者ID:CellProfiling,项目名称:AutomaticProteinLocalization,代码行数:10,代码来源:tensordnn.py

示例10: comU

def comU(a, b, tag = 2):

    fea = []
    fea.append(cosine_distance(a, b))
    #fea.append(tf.sqrt(tf.reduce_sum(tf.square(tf.sub(a,b)), axis=1)))
    fea.append(tf.sqrt(tf.reduce_sum(tf.square(tf.sub(a,b)), axis=1)))
    if tag == 2:
        fea.append(tf.reduce_max(tf.abs(tf.sub(a, b)), axis=1))
    #print 'fea=', fea
    return tf.pack(fea, axis=1)
开发者ID:QuickyFinger,项目名称:Attention-Based-Multi-Perspective-Convolutional-Neural-Networks-for-Textual-Similarity-Measurement,代码行数:10,代码来源:train.py

示例11: norm

def norm(name, input_layer):
    """
    Batch-normalizes the layer as in http://arxiv.org/abs/1502.03167
    This is important since it allows the different scales to talk to each other when they get joined.
    """
    mean, variance = tf.nn.moments(input_layer, [0, 1, 2])
    variance_epsilon = 0.01  # TODO: Check what this value should be
    inv = tf.rsqrt(variance + variance_epsilon)
    scale = tf.Variable(tf.random_uniform([1]), name="scale")  # TODO: How should these initialize?
    offset = tf.Variable(tf.random_uniform([1]), name="offset")
    return tf.sub(tf.mul(tf.mul(scale, inv), tf.sub(input_layer, mean)), offset, name=name)
开发者ID:fgeorg,项目名称:texture-networks,代码行数:11,代码来源:texture_network.py

示例12: tf_2d_normal

 def tf_2d_normal(x1, x2, mu1, mu2, s1, s2, rho):
   # eq # 24 and 25 of http://arxiv.org/abs/1308.0850
   norm1 = tf.sub(x1, mu1)
   norm2 = tf.sub(x2, mu2)
   s1s2 = tf.mul(s1, s2)
   z = tf.square(tf.div(norm1, s1))+tf.square(tf.div(norm2, s2))-2*tf.div(tf.mul(rho, tf.mul(norm1, norm2)), s1s2)
   negRho = 1-tf.square(rho)
   result = tf.exp(tf.div(-z,2*negRho))
   denom = 2*np.pi*tf.mul(s1s2, tf.sqrt(negRho))
   result = tf.div(result, denom)
   return result
开发者ID:DanialBahrami,项目名称:write-rnn-tensorflow,代码行数:11,代码来源:model.py

示例13: tf_2d_normal

def tf_2d_normal(x1, x2, mu1, mu2, s1, s2, rho):
  #Inspired from Hardmaru's implementation on Github
  norm1 = tf.sub(x1, mu1)
  norm2 = tf.sub(x2, mu2)
  s1s2 = tf.mul(s1, s2)
  z = tf.square(tf.div(norm1, s1))+tf.square(tf.div(norm2, s2))-2*tf.div(tf.mul(rho, tf.mul(norm1, norm2)), s1s2)
  negRho = 1-tf.square(rho)
  result = tf.exp(tf.div(-z,2*negRho))
  denom = 2*np.pi*tf.mul(s1s2, tf.sqrt(negRho))
  result = tf.div(result, denom)
  return result
开发者ID:RobRomijnders,项目名称:attention,代码行数:11,代码来源:attention_main_gauss.py

示例14: spatial_batch_norm

def spatial_batch_norm(input_layer, name='spatial_batch_norm'):
    """
    Batch-normalizes the layer as in http://arxiv.org/abs/1502.03167
    This is important since it allows the different scales to talk to each other when they get joined.
    """
    mean, variance = tf.nn.moments(input_layer, [0, 1, 2])
    variance_epsilon = 0.01  # TODO: Check what this value should be
    inv = tf.rsqrt(variance + variance_epsilon)
    num_channels = input_layer.get_shape().as_list()[3]  # TODO: Clean this up
    scale = tf.Variable(tf.random_uniform([num_channels]), name='scale')  # TODO: How should these initialize?
    offset = tf.Variable(tf.random_uniform([num_channels]), name='offset')
    return_val = tf.sub(tf.mul(tf.mul(scale, inv), tf.sub(input_layer, mean)), offset, name=name)
    return return_val
开发者ID:ProofByConstruction,项目名称:texture-networks,代码行数:13,代码来源:network_helpers.py

示例15: loss_with_step

 def loss_with_step(self):
     margin = 5.0
     labels_t = self.y_
     labels_f = tf.sub(1.0, self.y_, name="1-yi")          # labels_ = !labels;
     eucd2 = tf.pow(tf.sub(self.o1, self.o2), 2)
     eucd2 = tf.reduce_sum(eucd2, 1)
     eucd = tf.sqrt(eucd2+1e-6, name="eucd")
     C = tf.constant(margin, name="C")
     pos = tf.mul(labels_t, eucd, name="y_x_eucd")
     neg = tf.mul(labels_f, tf.maximum(0.0, tf.sub(C, eucd)), name="Ny_C-eucd")
     losses = tf.add(pos, neg, name="losses")
     loss = tf.reduce_mean(losses, name="loss")
     return loss
开发者ID:koosyong,项目名称:siamese_tf_mnist,代码行数:13,代码来源:inference.py


注:本文中的tensorflow.sub函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。