当前位置: 首页>>代码示例>>Python>>正文


Python tensor.where方法代码示例

本文整理汇总了Python中theano.tensor.where方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.where方法的具体用法?Python tensor.where怎么用?Python tensor.where使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.where方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_output_for

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import where [as 别名]
def get_output_for(self, inputs, **kwargs):
    '''
    First layer is a batch of embedding indices:
    [[11,21,43,0,0],
     [234,543,0,0,0,],
     ...
    ]
    Second layer are the embeddings:
    [ [[.02, .01...],
       [.004, .005, ...],
       ...,
       .0 .0 .0 ... ,
       .0 .0 .0 ...],
      [[...],
       ....
      ]
    ]
    ''' 

    return \
        T.where(T.eq(inputs[0],0), np.float32(0.0), np.float32(1.0)).dimshuffle((0,1,'x')) * inputs[1] 
开发者ID:UKPLab,项目名称:semeval2017-scienceie,代码行数:23,代码来源:siamese_cbowUtils.py

示例2: errors

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import where [as 别名]
def errors(self, y):
        """Return a float representing the number of errors in the minibatch
        over the total number of examples of the minibatch ; zero one
        loss over the size of the minibatch

        :type y: theano.tensor.TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label
        """

        # check if y has same dimension of y_pred
        if y.ndim != self.class_prediction.ndim:
            raise TypeError('y should have the same shape as self.class_prediction',
                ('y', y.type, 'class_prediction', self.class_prediction.type))
        # check if y is of the correct datatype
        if y.dtype.startswith('int'):
            # the T.neq operator returns a vector of 0s and 1s, where 1
            # represents a mistake in prediction
            return T.mean(T.neq(self.class_prediction, y))
        else:
            print "something went wrong"
            raise NotImplementedError() 
开发者ID:GUR9000,项目名称:Deep_MRI_brain_extraction,代码行数:24,代码来源:NN_ConvLayer_2D.py

示例3: P

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import where [as 别名]
def P(self, lat, lon):
        """Compute the pixelization matrix, no filters or illumination."""
        # Get the Cartesian points
        xpt, ypt, zpt = self.latlon_to_xyz(lat, lon)

        # Compute the polynomial basis at the point
        pT = self.pT(xpt, ypt, zpt)[:, : (self.ydeg + 1) ** 2]

        # Transform to the Ylm basis
        pTA1 = ts.dot(pT, self.A1)

        # NOTE: The factor of `pi` ensures the correct normalization.
        # This is *different* from the derivation in the paper, but it's
        # due to the fact that the in starry we normalize the spherical
        # harmonics in a slightly strange way (they're normalized so that
        # the integral of Y_{0,0} over the unit sphere is 4, not 4pi).
        # This is useful for thermal light maps, where the flux from a map
        # with Y_{0,0} = 1 is *unity*. But it messes up things for reflected
        # light maps, so we need to account for that here.
        if self._reflected:
            pTA1 *= np.pi

        # We're done
        return pTA1 
开发者ID:rodluger,项目名称:starry,代码行数:26,代码来源:core.py

示例4: rv

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import where [as 别名]
def rv(
        self, theta, xo, yo, zo, ro, inc, obl, y, u, veq, alpha, tau, delta
    ):
        """Compute the observed radial velocity anomaly."""
        # Compute the velocity-weighted intensity
        f = self.compute_rv_filter(inc, obl, veq, alpha)
        Iv = self.flux(
            theta, xo, yo, zo, ro, inc, obl, y, u, f, alpha, tau, delta
        )

        # Compute the inverse of the intensity
        f0 = tt.zeros_like(f)
        f0 = tt.set_subtensor(f0[0], np.pi)
        I = self.flux(
            theta, xo, yo, zo, ro, inc, obl, y, u, f0, alpha, tau, delta
        )
        invI = tt.ones((1,)) / I
        invI = tt.where(tt.isinf(invI), 0.0, invI)

        # The RV signal is just the product
        return Iv * invI 
开发者ID:rodluger,项目名称:starry,代码行数:23,代码来源:core.py

示例5: focal_loss_fixed

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import where [as 别名]
def focal_loss_fixed(self, y_true, y_pred):
        if(K.backend()=="tensorflow"):
            import tensorflow as tf
            pt = tf.where(tf.equal(y_true, 1), y_pred, 1 - y_pred)
            return -K.sum(self.alpha * K.pow(1. - pt, self.gamma) * K.log(pt))
        if(K.backend()=="theano"):
            import theano.tensor as T
            pt = T.where(T.eq(y_true, 1), y_pred, 1 - y_pred)
            return -K.sum(self.alpha * K.pow(1. - pt, self.gamma) * K.log(pt)) 
开发者ID:batikim09,项目名称:LIVE_SER,代码行数:11,代码来源:custom_cost.py

示例6: compute_moll_grid

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import where [as 别名]
def compute_moll_grid(self, res):
        """Compute the polynomial basis on a Mollweide grid."""
        # See NOTE on tt.mgrid bug in `compute_ortho_grid`
        dx = 2 * np.sqrt(2) / (res - 0.01)
        y, x = tt.mgrid[
            -np.sqrt(2) : np.sqrt(2) : dx,
            -2 * np.sqrt(2) : 2 * np.sqrt(2) : 2 * dx,
        ]

        # Make points off-grid nan
        a = np.sqrt(2)
        b = 2 * np.sqrt(2)
        y = tt.where((y / a) ** 2 + (x / b) ** 2 <= 1, y, np.nan)

        # https://en.wikipedia.org/wiki/Mollweide_projection
        theta = tt.arcsin(y / np.sqrt(2))
        lat = tt.arcsin((2 * theta + tt.sin(2 * theta)) / np.pi)
        lon0 = 3 * np.pi / 2
        lon = lon0 + np.pi * x / (2 * np.sqrt(2) * tt.cos(theta))

        # Back to Cartesian, this time on the *sky*
        x = tt.reshape(tt.cos(lat) * tt.cos(lon), [1, -1])
        y = tt.reshape(tt.cos(lat) * tt.sin(lon), [1, -1])
        z = tt.reshape(tt.sin(lat), [1, -1])
        R = self.RAxisAngle(tt.as_tensor_variable([1.0, 0.0, 0.0]), -np.pi / 2)
        return (
            tt.concatenate(
                (
                    tt.reshape(lat, (1, -1)),
                    tt.reshape(lon - 1.5 * np.pi, (1, -1)),
                )
            ),
            tt.dot(R, tt.concatenate((x, y, z))),
        ) 
开发者ID:rodluger,项目名称:starry,代码行数:36,代码来源:core.py

示例7: negative_log_likelihood_classwise_masking

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import where [as 别名]
def negative_log_likelihood_classwise_masking(self, y, mask_class_labeled, mask_class_not_present):
        """
        todo: test.
        :y: true classes (as integer value): (batchsize, x, y)
        :mask_class_labeled: matrix: (batchsize, num_classes) allowed values: 0 or 1; setting everything to 1 leads to the ordinary nll; all zeroes is an invalid state.
                    a zero for one class indicates that this class may be present but is not labeled as such.
        :mask_class_not_present: (batchsize, num_classes): similar to mask_class_labeled, but now a 1 indicates that a class is CERTAINLY NOT PRESENT in the batch.
        
        values of -1 in y count as "absolutely not labeled / ignore predictions"; this has PRIORITY over anything else (including mask_class_not_present).
        """
        y                      = y.dimshuffle(0, 'x', 1, 2)                        #(batchsize, 1, x, y)
        mask_class_labeled     = mask_class_labeled.dimshuffle(0, 1, 'x', 'x')     #(batchsize, num_classes,1 ,1)
        mask_class_not_present = mask_class_not_present.dimshuffle(0, 1, 'x', 'x') #(batchsize, num_classes,1 ,1)
        global_loss_mask = (y != -1) #apply to overall loss after everything is calculated; marks positions 
        
        
        pred = self.class_probabilities_realshape # (batchsize, num_classes, x, y)
        mod_y = T.where(y<0,0,y)
        
        #dirty hack: compute "standard" nll when most predictive weight is put on classes which are in fact labeled
        votes_not_for_unlabeled = T.where( T.sum(pred*mask_class_labeled,axis=1)>=0.5, 1, 0 ).dimshuffle(0,'x',1,2)

        # could also add '* mask_class_labeled' inside, but this should not change anything , provided there is no logical conflict between y and mask_class_labeled !
        nll = -T.mean((T.log(pred) * votes_not_for_unlabeled * global_loss_mask)[:,mod_y]) #standard loss part -> increase p(correct_prediction); thus disabled if the "correct" class is not known
        
        # penalize predictions: sign is a plus! (yes: '+')
        # remove <global_loss_mask> if <mask_class_not_present> should override 'unlabeled' areas.
        nll += T.mean(T.log(pred) * mask_class_not_present * global_loss_mask) 
        
        return nll
        
#        no_cls   = T.alloc(np.int16(255), 1,1,1,1)
#        no_cls_ix = T.eq(no_cls,y) # (bs,x,y) tensor, 1 where y==255
#        # true if y==255 AND for outputneurons of (generally) labelled classes,
#        # i.e. at positions all where those classes are NOT appearing in the        data
#        no_cls_ix = no_cls_ix.dimshuffle * mask_class_labeled.dimshuffle(0, 1, 'x', 'x')
#        no_cls_ix = no_cls_ix.nonzero() # selects the output neurons of        negatively labelled pixels
#        
#        ix       =       T.arange(self.class_probabilities.shape[1]).dimshuffle('x', 0, 'x','x')  # (1,4,1, 1 )
#        select   = T.eq(ix,y).nonzero() # selects the output neurons of        positively labelled pixels
#        
#        push_up  = -T.log(self.class_probabilities)[select]
#        push_dn  =  T.log(self.class_probabilities)[no_cls_ix] / mask_class_labeled.sum(axis=1).dimshuffle(0, 'x', 'x', 'x')
#        nll_inst = push_up + push_dn
#        nll      = T.mean(nll_inst) 
开发者ID:GUR9000,项目名称:Deep_MRI_brain_extraction,代码行数:47,代码来源:NN_ConvLayer_2D.py

示例8: __init__

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import where [as 别名]
def __init__(self, rng, rstream, x, y, setting): # add cost

        """

        Constructing the mlp model.
        
        Arguments: 
            rng, rstream         - random streams

        """
        self.paramsEle = []
        self.paramsHyper = []
        self.layers = [ll.InputLayer((None, 3, 28, 28))]
        self.layers.append(ll.ReshapeLayer(self.layers[-1], (None, 3*28*28)))
        penalty = 0.
        for num in [1000, 1000, 1000, 10]: # TODO: refactor it later
            self.layers.append(DenseLayerWithReg(setting, self.layers[-1], num_units=num))
            self.paramsEle += self.layers[-1].W
            self.paramsEle += self.layers[-1].b
            if setting.regL2 is not None:
                tempL2 = self.layers[-1].L2 * T.sqr(self.layers[-1].W)
                penalty += T.sum(tempL2)
                self.paramsHyper += self.layers[-1].L2

        self.y = self.layers[-1].output
        self.prediction = T.argmax(self.y, axis=1)
        self.penalty = penalty if penalty != 0. else T.constant(0.)

        def stable(x, stabilize=True):
            if stabilize:
                x = T.where(T.isnan(x), 1000., x)
                x = T.where(T.isinf(x), 1000., x)
            return x

        if setting.cost == 'categorical_crossentropy':
            def costFun1(y, label):
                return stable(-T.log(y[T.arange(label.shape[0]), label]),
                              stabilize=True)
        else:
            raise NotImplementedError


        def costFunT1(*args, **kwargs):
            return T.mean(costFun1(*args, **kwargs))

        # cost function
        self.trainCost = costFunT1(self.y, y)
        self.classError = T.mean(T.cast(T.neq(self.guessLabel, y), 'float32')) 
开发者ID:bigaidream-projects,项目名称:drmad,代码行数:50,代码来源:mlp.py

示例9: unweighted_intensity

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import where [as 别名]
def unweighted_intensity(
        self, lat, lon, y, u, f, theta, alpha, tau, delta, ld
    ):
        """
        Compute the intensity in the absence of an illumination source
        (i.e., the albedo).

        """
        # Get the Cartesian points
        xpt, ypt, zpt = self.latlon_to_xyz(lat, lon)

        # Compute the polynomial basis at the point
        pT = self.pT(xpt, ypt, zpt)

        # Apply the differential rotation operator
        if self.nw is None:
            y = tt.reshape(
                self.tensordotD(
                    tt.reshape(y, (1, -1)),
                    tt.reshape(theta, (-1,)),
                    alpha,
                    tau,
                    delta,
                ),
                (-1,),
            )
        else:
            y = tt.transpose(
                self.tensordotD(
                    tt.transpose(y),
                    tt.ones(self.nw) * theta,
                    alpha,
                    tau,
                    delta,
                )
            )

        # Transform the map to the polynomial basis
        A1y = ts.dot(self.A1, y)

        # Apply the filter
        if self.filter:
            u0 = tt.zeros_like(u)
            u0 = tt.set_subtensor(u0[0], -1.0)
            A1y = ifelse(
                ld, tt.dot(self.F(u, f), A1y), tt.dot(self.F(u0, f), A1y)
            )

        # Dot the polynomial into the basis.
        # NOTE: The factor of `pi` ensures the correct normalization.
        # This is *different* from the derivation in the paper, but it's
        # due to the fact that the in starry we normalize the spherical
        # harmonics in a slightly strange way (they're normalized so that
        # the integral of Y_{0,0} over the unit sphere is 4, not 4pi).
        # This is useful for thermal light maps, where the flux from a map
        # with Y_{0,0} = 1 is *unity*. But it messes up things for reflected
        # light maps, so we need to account for that here.
        return np.pi * tt.dot(pT, A1y) 
开发者ID:rodluger,项目名称:starry,代码行数:60,代码来源:core.py


注:本文中的theano.tensor.where方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。