当前位置: 首页>>代码示例>>Python>>正文


Python tensor.mul方法代码示例

本文整理汇总了Python中theano.tensor.mul方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.mul方法的具体用法?Python tensor.mul怎么用?Python tensor.mul使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.mul方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: errors4one

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import mul [as 别名]
def errors4one(self, z, out, weight=None, distLabelType='12C'):
	distBins = config.distCutoffs[distLabelType]
	label8 = DistanceUtils.LabelsOfOneDistance(config.ContactDefinition, distBins)
	label15 = DistanceUtils.LabelsOfOneDistance(config.InteractionLimit, distBins)

	z3C = T.cast( T.ge(z, label8), 'int32') + T.cast( T.ge(z, label15), 'int32')
	o3C = T.cast( T.ge(out, label8), 'int32') + T.cast( T.ge(out, label15), 'int32')

	if weight is not None:
            err = T.sum( T.mul(weight, T.neq(o3C, z3C) ) )*1./T.sum(weight)
	else:
            err = T.mean( T.neq(o3C , z3C) ) 

	## err is s scalar, convert it to a tensor with ndim=1
	return T.stack([err] )

    ## this function returns a vector of errors, the size of this vector is equal to the sum of ValueDims for all the responses 
开发者ID:j3xugit,项目名称:RaptorX-Contact,代码行数:19,代码来源:Model4DistancePrediction.py

示例2: ConvByPattern

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import mul [as 别名]
def ConvByPattern(x, patterns, mask=None):
    W = np.transpose(patterns, (3, 0, 1, 2))
    out2 = T.nnet.conv2d(x.dimshuffle(0, 3, 1, 2), W, filter_shape=W.shape, border_mode='half')
    if mask is not None:
        ## mask has shape (batchSize, #rows_to_be_masked, nCols)

        ## a subtensor of out2 along the horiz direction
        out2_sub_horiz = out2[:, :, :mask.shape[1], :]
        mask_horiz = mask.dimshuffle(0, 'x', 1, 2)
        out3 = T.set_subtensor(out2_sub_horiz, T.mul(out2_sub_horiz, mask_horiz) )

        ## a subtensor of out3 along the vertical direction
        out3_sub_vertical = out3[:, :, :, :mask.shape[1] ]
        mask_vertical = mask.dimshuffle(0, 'x', 2, 1)
        y = T.set_subtensor(out3_sub_vertical, T.mul(out3_sub_vertical, mask_vertical) )
    else:
	y = out2

    y = y.dimshuffle(0, 2, 3, 1)

    return y/np.prod(patterns.shape[1:3]) 
开发者ID:j3xugit,项目名称:RaptorX-Contact,代码行数:23,代码来源:utils.py

示例3: rbf_kernel

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import mul [as 别名]
def rbf_kernel(X):

    XY = T.dot(X, X.T)
    x2 = T.sum(X**2, axis=1).dimshuffle(0, 'x')
    X2e = T.repeat(x2, X.shape[0], axis=1)
    H = X2e +  X2e.T - 2. * XY

    V = H.flatten()
    # median distance
    h = T.switch(T.eq((V.shape[0] % 2), 0),
        # if even vector
        T.mean(T.sort(V)[ ((V.shape[0] // 2) - 1) : ((V.shape[0] // 2) + 1) ]),
        # if odd vector
        T.sort(V)[V.shape[0] // 2])

    h = T.sqrt(.5 * h / T.log(H.shape[0].astype('float32') + 1.)) 
    
    # compute the rbf kernel
    kxy = T.exp(-H / (h ** 2) / 2.0)

    dxkxy = -T.dot(kxy, X)
    sumkxy = T.sum(kxy, axis=1).dimshuffle(0, 'x')
    dxkxy = T.add(dxkxy, T.mul(X, sumkxy)) / (h ** 2)

    return kxy, dxkxy 
开发者ID:DartML,项目名称:SteinGAN,代码行数:27,代码来源:rbm_adv.py

示例4: sequence_iteration

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import mul [as 别名]
def sequence_iteration(self, output, mask,use_dropout=0,dropout_value=0.5):

        dot_product = T.dot(output , self.t_w_out)

        net_o = T.add( dot_product , self.t_b_out )

        ex_net = T.exp(net_o)
        sum_net = T.sum(ex_net, axis=2, keepdims=True)
        softmax_o = ex_net / sum_net


        mask = T.addbroadcast(mask, 2) # to do nesseccary?
        output = T.mul(mask, softmax_o)   + T.mul( (1. - mask) , 1e-6 )

        return output #result


######                     Linear Layer
######################################## 
开发者ID:JoergFranke,项目名称:recnet,代码行数:21,代码来源:output_layer.py

示例5: t_forward_step

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import mul [as 别名]
def t_forward_step(self,mask, rzup_in_sig, h_pre, u_rz, u_up, t_n_out): #u_r, u_z,



        signal_act = self.activation
        gate_act = self.sigmoid()

        preact = T.dot( h_pre, u_rz)


        r = gate_act( T.add( rzup_in_sig[:, 0:t_n_out] , preact[:, 0:t_n_out] )) #T.dot( h_pre, u_r) ) )
        z = gate_act( T.add( rzup_in_sig[:, t_n_out:2 * t_n_out] , preact[:, t_n_out:2 * t_n_out] )) #T.dot(h_pre, u_z) ))

        h_update = signal_act( T.add( rzup_in_sig[:, 2*t_n_out:3*t_n_out] , T.dot( T.mul( h_pre, r), u_up) ))

        h_new = T.add( (1.-z) * h_update , z * h_pre )

        mask = T.addbroadcast(mask, 1)
        out_sig =  T.add( mask * h_new   , (1. - mask) * h_pre )

        return out_sig 
开发者ID:JoergFranke,项目名称:recnet,代码行数:23,代码来源:recurrent_layer.py

示例6: test_local_sigm_times_exp

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import mul [as 别名]
def test_local_sigm_times_exp(self):
        """
        Test the `local_sigm_times_exp` optimization.
        exp(x) * sigm(-x) -> sigm(x)
        exp(-x) * sigm(x) -> sigm(-x)
        """
        def match(func, ops):
            # print [node.op.scalar_op for node in func.maker.fgraph.toposort()]
            assert [node.op for node in func.maker.fgraph.toposort()] == ops
        m = self.get_mode(excluding=['local_elemwise_fusion', 'inplace'])
        x, y = tensor.vectors('x', 'y')

        f = theano.function([x], sigmoid(-x) * tensor.exp(x), mode=m)
        assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
        match(f, [sigmoid])

        f = theano.function([x], sigmoid(x) * tensor.exp(-x), mode=m)
        assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
        match(f, [tensor.neg, sigmoid])

        f = theano.function([x], -(-(-(sigmoid(x)))) * tensor.exp(-x), mode=m)
        assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
        match(f, [tensor.neg, sigmoid, tensor.neg])

        f = theano.function(
                [x, y],
                (sigmoid(x) * sigmoid(-y) * -tensor.exp(-x) *
                 tensor.exp(x * y) * tensor.exp(y)),
                mode=m)
        assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
        match(f, [sigmoid, tensor.mul, tensor.neg, tensor.exp, sigmoid,
                  tensor.mul]) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:34,代码来源:test_sigm.py

示例7: negative_log_likelihood

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import mul [as 别名]
def negative_log_likelihood(self, y, sampleWeight=None):
        """Return the mean of the negative log-likelihood of the prediction
        of this model under a given target distribution.

        .. math::

            \frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
            \frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
                \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
            \ell (\theta=\{W,b\}, \mathcal{D})

        :type y: theano.tensor.TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label

        Note: we use the mean instead of the sum so that
              the learning rate is less dependent on the batch size
        """
        # start-snippet-2
        # y.shape[0] is (symbolically) the number of rows in y, i.e.,
        # number of examples (call it n) in the minibatch
        # T.arange(y.shape[0]) is a symbolic vector which will contain
        # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
        # Log-Probabilities (call it LP) with one row per example and
        # one column per class LP[T.arange(y.shape[0]),y] is a vector
        # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
        # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
        # the mean (across minibatch examples) of the elements in v,
        # i.e., the mean log-likelihood across the minibatch.

        if sampleWeight is not None:
            return -T.sum(T.mul(sampleWeight, T.log(self.p_y_given_x)[T.arange(y.shape[0]), y] ) )/T.sum(sampleWeight)
	else:
            return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])

        # end-snippet-2 
开发者ID:j3xugit,项目名称:RaptorX-Contact,代码行数:38,代码来源:LogReg.py

示例8: errors

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import mul [as 别名]
def errors(self, y, sampleWeight=None):
        """Return a float representing the number of errors in the minibatch
        over the total number of examples of the minibatch ; zero one
        loss over the size of the minibatch

        :type y: theano.tensor.TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label
        """

        # check if y has same dimension of y_pred
        if y.ndim != self.y_pred.ndim:
            raise TypeError(
                'y should have the same shape as self.y_pred',
                ('y', y.type, 'y_pred', self.y_pred.type)
            )
        # check if y is of the correct datatype
        if y.dtype.startswith('int'):
            # the T.neq operator returns a vector of 0s and 1s, where 1
            # represents a mistake in prediction
	    if sampleWeight is not None:
		return T.sum( T.mul(sampleWeight, T.neq(self.y_pred, y) ) ) * 1./T.sum(sampleWeight)
	    else:
                return T.mean(T.neq(self.y_pred, y))
        else:
            raise NotImplementedError()

    ## T.bincount is a weird function. Its return value has the same type as the dtype of the elements in the array to be counted.
    ##calculate the classification errors for each of the three categories 
开发者ID:j3xugit,项目名称:RaptorX-Contact,代码行数:31,代码来源:LogReg.py

示例9: errorsBreakdown

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import mul [as 别名]
def errorsBreakdown(self, y):
	
	##truth shall be casted to at least int32
	def breakDown3C(pred=None, truth=None):
	    labelcount = T.bincount(truth, minlength=3)
            err = T.neq(pred, truth)
            truth_with_wrong_pred = truth[err.nonzero()]
	    errcount = T.bincount(truth_with_wrong_pred, minlength=3)

	    ## use 0.0001 to avoid division by 0
            return T.mul(errcount, 1./(labelcount + 0.0001) )

	if self.n_out == 3:
	    truth = T.cast(y, 'int32')
	    return breakDown3C(self.y_pred, truth)

	if self.n_out == 12:
	    ## convert the 12-label system to the 3-label system
	    ## 0, 1, 2, 3 to 0; 4,5,6,7,8,9,10 to 1; and 11 to 2
	    y1 = T.zeros_like(y)
	    y2 = T.gt(y, 3)
	    y3 = T.gt(y, 10)
	    truth = T.cast(y1 + y2 + y3, 'int32')

	    pred1 = T.zeros_like(self.y_pred)
	    pred2 = T.gt(self.y_pred, 3)
	    pred3 = T.gt(self.y_pred, 10)
	    pred = T.cast( y1 + y2 + y3, 'int32')

	    return breakDown3C(pred, truth)
            
	else:
	    print 'this function only works when n_out is either 3 or 12'
	    sys.exit(-1)

    ## calculate the confusion matrix of the prediction 
开发者ID:j3xugit,项目名称:RaptorX-Contact,代码行数:38,代码来源:LogReg.py

示例10: ExpandBy4dPattern

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import mul [as 别名]
def ExpandBy4dPattern(x, patterns):
    ##patterns has shape (numPatterns, nPatternRows, nPatternCols, numLabels)
    ##each element is between 0 and 1 and the sum of the vector patterns[i, j, k, :] is equal to 1
    pshape = patterns.shape

    ## y1 has shape (batchSize, nRows * pshape[1], nCols * pshape[2], pshape[0])
    y1 = MyRepeat(x, (pshape[1], pshape[2]), axes=[1, 2])
    expandedPatterns = T.tile(patterns, (1, x.shape[1], x.shape[2], 1) ).dimshuffle('x', 1, 2, 0, 3)

    ylist = []
    for i in xrange(pshape[3]):
	y2 = T.mul( y1, expandedPatterns[:, :, :, :, i] )
	y3 = T.sum( y2, axis=3, keepdims=True)
	ylist.append(y3)
    return T.concatenate( ylist, axis=3) 
开发者ID:j3xugit,项目名称:RaptorX-Contact,代码行数:17,代码来源:utils.py

示例11: errors

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import mul [as 别名]
def errors(self, y, sampleWeight=None):
		assert (y.ndim == 2)
		err_sqr = T.sqr( y - self.y_pred )
		if sampleWeight is None:
			return T.sqrt(T.mean(err_sqr, axis=0 ) )

		assert (sampleWeight.ndim == 2)
		if self.n_variables == 1:
			weight = sampleWeight
		else:
			weight = T.concatenate( [ sampleWeight, sampleWeight], axis=1 )
		return T.sqrt( T.sum(T.mul( err_sqr, weight ), axis=0)/ T.sum(sampleWeight) )

	## y has shape (batchSize, n_variables), sampleWeight shall have shape (batchSize, 1) instead of (batchSize,) 
开发者ID:j3xugit,项目名称:RaptorX-Contact,代码行数:16,代码来源:NN4Normal.py

示例12: NLL

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import mul [as 别名]
def NLL(self, y, sampleWeight=None):
        ###Return the mean of the negative log-likelihood of the prediction of this model under a given target distribution.

        if sampleWeight is not None:
            return -T.sum(T.mul(sampleWeight, T.log(self.p_y_given_x)[T.arange(y.shape[0]), y] ) )/T.sum(sampleWeight)
        else:
            return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y]) 
开发者ID:j3xugit,项目名称:RaptorX-Contact,代码行数:9,代码来源:NN4LogReg.py

示例13: errors

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import mul [as 别名]
def errors(self, y, sampleWeight=None):
	assert (y.ndim == 2)
	err = T.neq(self.y_pred, y)
	if sampleWeight is None:
		return T.mean(err, axis=0)

	assert (sampleWeight.ndim == 2)
	return T.sum( T.mul(err, sampleWeight), axis=0)/T.sum(sampleWeight)

    ## this function returns a scalar 
开发者ID:j3xugit,项目名称:RaptorX-Contact,代码行数:12,代码来源:NN4LogReg.py

示例14: test_local_sigm_times_exp

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import mul [as 别名]
def test_local_sigm_times_exp(self):
        """
        Test the `local_sigm_times_exp` optimization.
        exp(x) * sigm(-x) -> sigm(x)
        exp(-x) * sigm(x) -> sigm(-x)
        """
        def match(func, ops):
            # print [node.op.scalar_op for node in func.maker.fgraph.toposort()]
            assert [node.op for node in func.maker.fgraph.toposort()] == ops
        m = self.get_mode(excluding=['local_elemwise_fusion', 'inplace'])
        x, y = tensor.vectors('x', 'y')

        f = theano.function([x], sigmoid(-x) * tensor.exp(x), mode=m)
        match(f, [sigmoid])

        f = theano.function([x], sigmoid(x) * tensor.exp(-x), mode=m)
        match(f, [tensor.neg, sigmoid])

        f = theano.function([x], -(-(-(sigmoid(x)))) * tensor.exp(-x), mode=m)
        match(f, [tensor.neg, sigmoid, tensor.neg])

        f = theano.function(
                [x, y],
                (sigmoid(x) * sigmoid(-y) * -tensor.exp(-x) *
                 tensor.exp(x * y) * tensor.exp(y)),
                mode=m)
        match(f, [sigmoid, tensor.mul, tensor.neg, tensor.exp, sigmoid,
                  tensor.mul]) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:30,代码来源:test_sigm.py

示例15: SlopeLinInv

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import mul [as 别名]
def SlopeLinInv(slope):
    """
    Truncated linear unit
    :param slope: slope of negative quadrant
    :return: x if x > 0 else x*slope
    """
    import theano.tensor as T

    def inner(x):
        return T.switch(T.gt(x, 0), x, T.mul(x, slope))
    return inner 
开发者ID:moberweger,项目名称:deep-prior,代码行数:13,代码来源:helpers.py


注:本文中的theano.tensor.mul方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。