当前位置: 首页>>代码示例>>Python>>正文


Python tensor.and_函数代码示例

本文整理汇总了Python中theano.tensor.and_函数的典型用法代码示例。如果您正苦于以下问题:Python and_函数的具体用法?Python and_怎么用?Python and_使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了and_函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: getRpRnTpTnForTrain0OrVal1

    def getRpRnTpTnForTrain0OrVal1(self, y, training0OrValidation1):
        # The returned list has (numberOfClasses)x4 integers: >numberOfRealPositives, numberOfRealNegatives, numberOfTruePredictedPositives, numberOfTruePredictedNegatives< for each class (incl background).
        # Order in the list is the natural order of the classes (ie class-0 RP,RN,TPP,TPN, class-1 RP,RN,TPP,TPN, class-2 RP,RN,TPP,TPN ...)
        # param y: y = T.itensor4('y'). Dimensions [batchSize, r, c, z]
        
        yPredToUse = self.y_pred_train if  training0OrValidation1 == 0 else self.y_pred_val
        checkDimsOfYpredAndYEqual(y, yPredToUse, "training" if training0OrValidation1 == 0 else "validation")
        
        returnedListWithNumberOfRpRnTpTnForEachClass = []
        
        for class_i in xrange(0, self._numberOfOutputClasses) :
            #Number of Real Positive, Real Negatives, True Predicted Positives and True Predicted Negatives are reported PER CLASS (first for WHOLE).
            tensorOneAtRealPos = T.eq(y, class_i)
            tensorOneAtRealNeg = T.neq(y, class_i)

            tensorOneAtPredictedPos = T.eq(yPredToUse, class_i)
            tensorOneAtPredictedNeg = T.neq(yPredToUse, class_i)
            tensorOneAtTruePos = T.and_(tensorOneAtRealPos,tensorOneAtPredictedPos)
            tensorOneAtTrueNeg = T.and_(tensorOneAtRealNeg,tensorOneAtPredictedNeg)
                    
            returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtRealPos) )
            returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtRealNeg) )
            returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtTruePos) )
            returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtTrueNeg) )
            
        return returnedListWithNumberOfRpRnTpTnForEachClass
开发者ID:alonshmilo,项目名称:MedicalData_jce,代码行数:26,代码来源:cnnLayerTypes.py

示例2: ber

 def ber(self, y):
     tp = T.and_(T.eq(y, 1), T.eq(self.y_pred, 1)).sum()
     tn = T.and_(T.eq(y, 0), T.eq(self.y_pred, 0)).sum()
     fp = T.and_(T.eq(y, 0), T.eq(self.y_pred, 1)).sum()
     fn = T.and_(T.eq(y, 1), T.eq(self.y_pred, 0)).sum()
     ber = 0.5 * (T.true_div(fp, tp + fp) + T.true_div(fn, tn + fn))
     return ber
开发者ID:IraKorshunova,项目名称:CNN,代码行数:7,代码来源:logreg_layer.py

示例3: multiclassRealPosAndNegAndTruePredPosNegTraining0OrValidation1

    def multiclassRealPosAndNegAndTruePredPosNegTraining0OrValidation1(self, y, training0OrValidation1):
	"""
	The returned list has (numberOfClasses)x4 integers: >numberOfRealPositives, numberOfRealNegatives, numberOfTruePredictedPositives, numberOfTruePredictedNegatives< for each class (incl background).
	Order in the list is the natural order of the classes (ie class-0 RP,RN,TPP,TPN, class-1 RP,RN,TPP,TPN, class-2 RP,RN,TPP,TPN ...)
	"""
	returnedListWithNumberOfRpRnPpPnForEachClass = []

	for class_i in xrange(0, self.numberOfOutputClasses) :
		#Number of Real Positive, Real Negatives, True Predicted Positives and True Predicted Negatives are reported PER CLASS (first for WHOLE).
		vectorOneAtRealPositives = T.eq(y, class_i)
		vectorOneAtRealNegatives = T.neq(y, class_i)

		if training0OrValidation1 == 0 : #training:
			yPredToUse = self.y_pred
		else: #validation
			yPredToUse = self.y_pred_inference

		vectorOneAtPredictedPositives = T.eq(yPredToUse, class_i)
		vectorOneAtPredictedNegatives = T.neq(yPredToUse, class_i)
		vectorOneAtTruePredictedPositives = T.and_(vectorOneAtRealPositives,vectorOneAtPredictedPositives)
		vectorOneAtTruePredictedNegatives = T.and_(vectorOneAtRealNegatives,vectorOneAtPredictedNegatives)
		    
		returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtRealPositives) )
		returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtRealNegatives) )
		returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtTruePredictedPositives) )
		returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtTruePredictedNegatives) )

	return returnedListWithNumberOfRpRnPpPnForEachClass
开发者ID:pliu007,项目名称:deepmedic,代码行数:28,代码来源:cnnLayerTypes.py

示例4: one_run

def one_run(my_x, my_y, my_z,
            my_u, my_v, my_w,
            my_weight,
            my_heat, my_albedo, my_microns_per_shell):

    # move
    random = rng.uniform(low=0.00003, high=1.)
    t = -T.log(random)

    x_moved = my_x + my_u*t
    y_moved = my_y + my_v*t
    z_moved = my_z + my_w*t

    # absorb
    shell = T.cast(T.sqrt(T.sqr(x_moved) + T.sqr(y_moved) + T.sqr(z_moved))
                   * my_microns_per_shell, 'int32')
    shell = T.clip(shell, 0, SHELL_MAX-1)

    new_weight = my_weight * my_albedo

    # new direction
    xi1 = rng.uniform(low=-1., high=1.)
    xi2 = rng.uniform(low=-1., high=1.)
    xi_norm = T.sqrt(T.sqr(xi1) + T.sqr(xi2))

    t_xi = rng.uniform(low=0.000000001, high=1.)

    # rescale xi12 to fit t_xi as norm
    xi1 = xi1/xi_norm * T.sqr(t_xi)
    xi2 = xi2/xi_norm * T.sqr(t_xi)

    u_new_direction = 2. * t_xi - 1.
    v_new_direction = xi1 * T.sqrt((1. - T.sqr(u_new_direction)) / t_xi)
    w_new_direction = xi2 * T.sqrt((1. - T.sqr(u_new_direction)) / t_xi)

    # roulette
    weight_for_starting_roulette = 0.001
    CHANCE = 0.1
    partakes_roulette = T.switch(T.lt(new_weight, weight_for_starting_roulette),
                                 1,
                                 0)
    roulette = rng.uniform(low=0., high=1.)
    loses_roulette = T.gt(roulette, CHANCE)
    # if roulette decides to terminate the photon: set weight to 0
    weight_after_roulette = ifelse(T.and_(partakes_roulette, loses_roulette),
                                     0.,
                                     new_weight)
    # if partakes in roulette but does not get terminated
    weight_after_roulette = ifelse(T.and_(partakes_roulette, T.invert(loses_roulette)),
                                     weight_after_roulette / CHANCE,
                                     weight_after_roulette)

    new_heat = (1.0 - my_albedo) * my_weight
    heat_i = my_heat[shell]

    return (x_moved, y_moved, z_moved,\
           u_new_direction, v_new_direction, w_new_direction,\
           weight_after_roulette),\
           OrderedDict({my_heat: T.inc_subtensor(heat_i, new_heat)})
开发者ID:151706061,项目名称:MITK,代码行数:59,代码来源:monte_carlo_single_photon.py

示例5: confusion_matrix

 def confusion_matrix(self, y):
     """
     Returns confusion matrix
     """
     tp = T.and_(T.eq(y, 1), T.eq(self.y_pred, 1)).sum()
     tn = T.and_(T.eq(y, 0), T.eq(self.y_pred, 0)).sum()
     fp = T.and_(T.eq(y, 0), T.eq(self.y_pred, 1)).sum()
     fn = T.and_(T.eq(y, 1), T.eq(self.y_pred, 0)).sum()
     return [tp, tn, fp, fn]
开发者ID:IraKorshunova,项目名称:DataMiningProject,代码行数:9,代码来源:softmax_layer.py

示例6: theano_digitize

def theano_digitize(x, bins):
    """
    Equivalent to numpy digitize.

    Parameters
    ----------
    x : Theano tensor or array_like
        The array or matrix to be digitized
    bins : array_like
        The bins with which x should be digitized

    Returns
    -------
    A Theano tensor
        The indices of the bins to which each value in input array belongs.
    """
    binned = T.zeros_like(x) + len(bins)
    for i in range(len(bins)):
        bin=bins[i]
        if i == 0:
            binned=T.switch(T.lt(x,bin),i,binned)
        else:
            ineq = T.and_(T.ge(x,bins[i-1]),T.lt(x,bin))
            binned=T.switch(ineq,i,binned)
    binned=T.switch(T.isnan(x), len(bins), binned)
    return binned
开发者ID:eglxiang,项目名称:xnn,代码行数:26,代码来源:utils.py

示例7: errorReport

 def errorReport(self, y, n):
     # compute error rate by class
     # check if y has same dimension of y_pred
     if y.ndim != self.y_pred.ndim:
         raise TypeError('y should have the same shape as self.y_pred',
             ('y', target.type, 'y_pred', self.y_pred.type))
     # check if y is of the correct datatype
     if y.dtype.startswith('int'):
         c = numpy.zeros((self.n_out, self.n_out + 1), dtype=numpy.int64)
         counts = T.as_tensor_variable(c)
         classVector = numpy.zeros(n)
         for i in xrange(self.n_out):
             othersVector = numpy.zeros(n)
             for j in xrange(self.n_out):
                 counts = theano.tensor.basic.set_subtensor(
                     counts[i, j],
                     T.sum(T.and_(T.eq(self.y_pred, othersVector),
                                  T.eq(y, classVector))))
                 othersVector = othersVector + 1
             counts = theano.tensor.basic.set_subtensor(
                 counts[i, self.n_out],
                 T.sum(T.eq(y, classVector)))
             classVector = classVector + 1
         return counts
     else:
         raise NotImplementedError()
开发者ID:chagge,项目名称:DeepLearning,代码行数:26,代码来源:logistic_sgd.py

示例8: logp_loss3

	def logp_loss3(self, x, y, fake_label,neg_label, pos_ratio = 0.5): #adopt maxout  for  negative   
		# pos_rati0  means  pos examples weight (0.5 means  equal 1:1)


		print "adopt  positives  weight  ............. "+str(pos_ratio)
		y = y.dimshuffle((1,0))
		inx = x.dimshuffle((1,0))
		fake_mask = T.neq(y, fake_label)
		y = y*fake_mask

		pos_mask = T.and_(fake_mask, T.le(y, neg_label-1))*pos_ratio
		neg_mask = T.ge(y, neg_label)*(1- pos_ratio)


		pos_score, neg_score = self.structure2(inx,False)
		maxneg = T.max(neg_score, axis = -1)

		scores = T.concatenate((pos_score, maxneg.dimshuffle((0,1,'x'))), axis = 2)

		d3shape = scores.shape

		#seq*batch , label
		scores = scores.reshape((d3shape[0]*d3shape[1],  d3shape[2]))
		pro = T.nnet.softmax(scores)

		_logp = T.nnet.categorical_crossentropy(pro, y.flatten())

		_logp = _logp.reshape(fake_mask.shape)

		loss = (T.sum(_logp*pos_mask)+ T.sum(_logp*neg_mask))/ (T.sum(pos_mask)+T.sum(neg_mask))
		pos_loss = T.sum(_logp*pos_mask)
		neg_loss = T.sum(_logp*neg_mask)


		return loss, pos_loss, neg_loss
开发者ID:mswellhao,项目名称:active_NER,代码行数:35,代码来源:token_model.py

示例9: asimov_errors

 def asimov_errors(self, y):
     # check if y has same dimension of y_pred
     if y.ndim != self.logRegressionLayer.y_pred.ndim:
         raise TypeError(
             'y should have the same shape as self.y_pred',
             ('y', y.type, 'y_pred', self.y_pred.type)
         )
     # check if y is of the correct datatype
     if y.dtype.startswith('int'):
         S = T.sum(T.eq(y,1))
         B = T.sum(T.eq(y,0))#*10000 # TODO: cross-section scaling
         s = T.sum(T.and_(T.eq(y,1),T.eq(self.logRegressionLayer.y_pred,1)))
         b = T.sum(T.and_(T.eq(y,0),T.eq(self.logRegressionLayer.y_pred,1)))#*10000 TODO: cross-section scaling
         return(S,B,s,b)
         # represents a mistake in prediction
     else:
         raise NotImplementedError()
开发者ID:jannickep,项目名称:HiggsChallenge,代码行数:17,代码来源:mlp.py

示例10: incomplete_beta

def incomplete_beta(a, b, value):
    '''Incomplete beta implementation
    Power series and continued fraction expansions chosen for best numerical
    convergence across the board based on inputs.
    '''
    machep = tt.constant(np.MachAr().eps, dtype='float64')
    one = tt.constant(1, dtype='float64')
    w = one - value

    ps = incomplete_beta_ps(a, b, value)

    flip = tt.gt(value, (a / (a + b)))
    aa, bb = a, b
    a = tt.switch(flip, bb, aa)
    b = tt.switch(flip, aa, bb)
    xc = tt.switch(flip, value, w)
    x = tt.switch(flip, w, value)

    tps = incomplete_beta_ps(a, b, x)
    tps = tt.switch(tt.le(tps, machep), one - machep, one - tps)

    # Choose which continued fraction expansion for best convergence.
    small = tt.lt(x * (a + b - 2.0) - (a - one), 0.0)
    cfe = incomplete_beta_cfe(a, b, x, small)
    w = tt.switch(small, cfe, cfe / xc)

    # Direct incomplete beta accounting for flipped a, b.
    t = tt.exp(
        a * tt.log(x) + b * tt.log(xc) +
        gammaln(a + b) - gammaln(a) - gammaln(b) +
        tt.log(w / a)
    )

    t = tt.switch(
        flip,
        tt.switch(tt.le(t, machep), one - machep, one - t),
        t
    )
    return tt.switch(
        tt.and_(flip, tt.and_(tt.le((b * x), one), tt.le(x, 0.95))),
        tps,
        tt.switch(
            tt.and_(tt.le(b * value, one), tt.le(value, 0.95)),
            ps,
            t))
开发者ID:alexander-belikov,项目名称:pymc3,代码行数:45,代码来源:dist_math.py

示例11: jaccard_similarity

def jaccard_similarity(y_true, y_predicted):
    """
    y_true: tensor ({1, 0})
    y_predicted: tensor ({1, 0})
    note - we round predicted because float probabilities would not work
    """
    y_predicted = T.round(y_predicted).astype(theano.config.floatX)
    either_nonzero = T.or_(T.neq(y_true, 0), T.neq(y_predicted, 0))
    return T.and_(T.neq(y_true, y_predicted), either_nonzero).sum(axis=-1, dtype=theano.config.floatX) / either_nonzero.sum(axis=-1, dtype=theano.config.floatX)
开发者ID:fdoperezi,项目名称:santander,代码行数:9,代码来源:classification.py

示例12: dtw

 def dtw(i, q_p, b_p, Q, D, inf):
   i0 = T.eq(i, 0)
   # inf = T.cast(1e10,'float32') * T.cast(T.switch(T.eq(self.n,0), T.switch(T.eq(i,0), 0, 1), 1), 'float32')
   penalty = T.switch(T.and_(T.neg(n0), i0), big, T.constant(0.0, 'float32'))
   loop = T.constant(0.0, 'float32') + q_p
   forward = T.constant(0.0, 'float32') + T.switch(T.or_(n0, i0), 0, Q[i - 1])
   opt = T.stack([loop, forward])
   k_out = T.cast(T.argmin(opt, axis=0), 'int32')
   return opt[k_out, T.arange(opt.shape[1])] + D[i] + penalty, k_out
开发者ID:atuxhe,项目名称:returnn,代码行数:9,代码来源:RecurrentTransform.py

示例13: in_transit

    def in_transit(self, t, r=0.0, texp=None):
        """Get a list of timestamps that are in transit

        Args:
            t (vector): A vector of timestamps to be evaluated.
            r (Optional): The radii of the planets.
            texp (Optional[float]): The exposure time.

        Returns:
            The indices of the timestamps that are in transit.

        """

        z = tt.zeros_like(self.a)
        r = tt.as_tensor_variable(r) + z
        R = self.r_star + z

        # Wrap the times into time since transit
        hp = 0.5 * self.period
        dt = tt.mod(self._warp_times(t) - self.t0 + hp, self.period) - hp

        if self.ecc is None:
            # Equation 14 from Winn (2010)
            k = r / R
            arg = tt.square(1 + k) - tt.square(self.b)
            factor = R / (self.a * self.sin_incl)
            hdur = hp * tt.arcsin(factor * tt.sqrt(arg)) / np.pi
            t_start = -hdur
            t_end = hdur
            flag = z

        else:
            M_contact = self.contact_points_op(
                self.a, self.ecc, self.cos_omega, self.sin_omega,
                self.cos_incl + z, self.sin_incl + z, R + r)
            flag = M_contact[2]

            t_start = (M_contact[0] - self.M0) / self.n
            t_start = tt.mod(t_start + hp, self.period) - hp
            t_end = (M_contact[1] - self.M0) / self.n
            t_end = tt.mod(t_end + hp, self.period) - hp

            t_start = tt.switch(tt.gt(t_start, 0.0),
                                t_start - self.period, t_start)
            t_end = tt.switch(tt.lt(t_end, 0.0),
                              t_end + self.period, t_end)

        if texp is not None:
            t_start -= 0.5*texp
            t_end += 0.5*texp

        mask = tt.any(tt.and_(dt >= t_start, dt <= t_end), axis=-1)
        result = ifelse(tt.all(tt.eq(flag, 0)),
                        tt.arange(t.size)[mask],
                        tt.arange(t.size))

        return result
开发者ID:dfm,项目名称:exoplanet,代码行数:57,代码来源:keplerian.py

示例14: objective

def objective(y_true, y_pred, P, Q, alpha=0., beta=0.15, dbeta=0., gamma=0.01, gamma1=-1., poos=0.23, eps=1e-6):
    '''Expects a binary class matrix instead of a vector of scalar classes.
    '''

    beta = np.float32(beta)
    dbeta = np.float32(dbeta)
    gamma = np.float32(gamma)
    poos = np.float32(poos)
    eps = np.float32(eps)

    # scale preds so that the class probas of each sample sum to 1
    y_pred += eps
    y_pred /= y_pred.sum(axis=-1, keepdims=True)

    y_true = T.cast(y_true.flatten(), 'int64')
    y1 = T.and_(T.gt(y_true, 0), T.le(y_true, Q))  # in-set
    y0 = T.or_(T.eq(y_true, 0), T.gt(y_true, Q))  # out-of-set or unlabeled
    y0sum = y0.sum() + eps  # number of oos
    y1sum = y1.sum() + eps  # number of in-set
    # we want to reduce cross entrophy of labeled data
    # convert all oos/unlabeled to label=0
    cost0 = T.nnet.categorical_crossentropy(y_pred, T.switch(y_true <= Q, y_true, 0))
    cost0 = T.dot(y1, cost0) / y1sum  # average cost per labeled example

    if alpha:
        cost1 = T.nnet.categorical_crossentropy(y_pred, y_pred)
        cost1 = T.dot(y0, cost1) / y0sum  # average cost per labeled example
        cost0 += alpha*cost1

    # we want to increase the average entrophy in each batch
    # average over batch
    if beta:
        y_pred_avg0 = T.dot(y0, y_pred) / y0sum
        y_pred_avg0 = T.clip(y_pred_avg0, eps, np.float32(1) - eps)
        y_pred_avg0 /= y_pred_avg0.sum(axis=-1, keepdims=True)
        cost2 = T.nnet.categorical_crossentropy(y_pred_avg0.reshape((1,-1)), P-dbeta)[0] # [None,:]
        cost2 = T.switch(y0sum > 0.5, cost2, 0.)  # ignore cost2 if no samples
        cost0 += beta*cost2

    # binary classifier score
    if gamma:
        y_pred0 = T.clip(y_pred[:,0], eps, np.float32(1) - eps)
        if gamma1 < 0.:
            cost3 = - T.dot(poos*y0,T.log(y_pred0)) - T.dot(np.float32(1)-poos*y0.T,T.log(np.float32(1)-y_pred0))
            cost3 /= y_pred.shape[0]
            cost0 += gamma*cost3
        elif gamma1 > 0.:
            cost3 = - T.dot(poos*y0,T.log(y_pred0)) - T.dot((np.float32(1)-poos)*y0,T.log(np.float32(1)-y_pred0))
            cost3 /= y0sum
            cost31 =  - T.dot(y1,T.log(np.float32(1)-y_pred0))
            cost3 /= y1sum
            cost0 += gamma*cost3 + gamma1*cost31
        else:  # gamma1 == 0.
            cost3 = - T.dot(poos*y0,T.log(y_pred0)) - T.dot((np.float32(1)-poos)*y0, T.log(np.float32(1)-y_pred0))
            cost3 /= y0sum
            cost0 += gamma*cost3
    return cost0
开发者ID:fulldecent,项目名称:LRE,代码行数:57,代码来源:ladder.py

示例15: masked_categorical_accuracy

def masked_categorical_accuracy(y_true, y_pred, mask):

    y_true = K.argmax(y_true, axis=-1)
    y_pred = K.argmax(y_pred, axis=-1)

    error = K.equal(y_true, y_pred)

    mask_template = T.and_(T.neq(y_true,  mask), T.neq(y_true, 0)).nonzero()

    return K.mean(error[mask_template])
开发者ID:rudaoshi,项目名称:neuralmachines,代码行数:10,代码来源:sequence_tagging_keras.py


注:本文中的theano.tensor.and_函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。