当前位置: 首页>>代码示例>>Python>>正文


Python tensor.tensordot方法代码示例

本文整理汇总了Python中theano.tensor.tensordot方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.tensordot方法的具体用法?Python tensor.tensordot怎么用?Python tensor.tensordot使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.tensordot方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_output_for

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tensordot [as 别名]
def get_output_for(self, input, init=False, **kwargs):
        if input.ndim > 2:
            # if the input has more than two dimensions, flatten it into a
            # batch of feature vectors.
            input = input.flatten(2)
        
        activation = T.tensordot(input, self.W, [[1], [0]])
        abs_dif = (T.sum(abs(activation.dimshuffle(0,1,2,'x') - activation.dimshuffle('x',1,2,0)),axis=2)
                    + 1e6 * T.eye(input.shape[0]).dimshuffle(0,'x',1))

        if init:
            mean_min_abs_dif = 0.5 * T.mean(T.min(abs_dif, axis=2),axis=0)
            abs_dif /= mean_min_abs_dif.dimshuffle('x',0,'x')
            self.init_updates = [(self.log_weight_scale, self.log_weight_scale-T.log(mean_min_abs_dif).dimshuffle(0,'x'))]
        
        f = T.sum(T.exp(-abs_dif),axis=2)

        if init:
            mf = T.mean(f,axis=0)
            f -= mf.dimshuffle('x',0)
            self.init_updates.append((self.b, -mf))
        else:
            f += self.b.dimshuffle('x',0)

        return T.concatenate([input, f], axis=1) 
开发者ID:djsutherland,项目名称:opt-mmd,代码行数:27,代码来源:nn.py

示例2: output

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tensordot [as 别名]
def output(self, input=None, dropout_active=True, *args, **kwargs): # use the 'dropout_active' keyword argument to disable it at test time. It is on by default.
        if input == None:
            input = self.input_layer.output(dropout_active=dropout_active, *args, **kwargs)
        
        if dropout_active and (self.dropout > 0.):
            retain_prob = 1 - self.dropout
            if self.dropout_tied:
                # tying of the dropout masks across the entire feature maps, so broadcast across the feature maps.

                 mask = srng.binomial((input.shape[0], input.shape[1]), p=retain_prob, dtype='int32').astype('float32').dimshuffle(0, 1, 'x', 'x')
            else:
                mask = srng.binomial(input.shape, p=retain_prob, dtype='int32').astype('float32')
                # apply the input mask and rescale the input accordingly. By doing this it's no longer necessary to rescale the weights at test time.
            input = input / retain_prob * mask

        prod = T.tensordot(input, self.W, [[1], [0]]) # this has shape (batch_size, width, height, out_maps)
        prod = prod.dimshuffle(0, 3, 1, 2) # move the feature maps to the 1st axis, where they were in the input
        return self.nonlinearity(prod + self.b.dimshuffle('x', 0, 'x', 'x')) 
开发者ID:benanne,项目名称:kaggle-galaxies,代码行数:20,代码来源:layers.py

示例3: grad

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tensordot [as 别名]
def grad(self, inputs, cost_grad):
        """
        In defining the gradient, the Finite Fourier Transform is viewed as
        a complex-differentiable function of a complex variable
        """
        a = inputs[0]
        n = inputs[1]
        axis = inputs[2]
        grad = cost_grad[0]
        if not isinstance(axis, tensor.TensorConstant):
            raise NotImplementedError('%s: gradient is currently implemented'
                                      ' only for axis being a Theano constant'
                                      % self.__class__.__name__)
        axis = int(axis.data)
        # notice that the number of actual elements in wrto is independent of
        # possible padding or truncation:
        elem = tensor.arange(0, tensor.shape(a)[axis], 1)
        # accounts for padding:
        freq = tensor.arange(0, n, 1)
        outer = tensor.outer(freq, elem)
        pow_outer = tensor.exp(((-2 * math.pi * 1j) * outer) / (1. * n))
        res = tensor.tensordot(grad, pow_outer, (axis, 0))

        # This would be simpler but not implemented by theano:
        # res = tensor.switch(tensor.lt(n, tensor.shape(a)[axis]),
        # tensor.set_subtensor(res[...,n::], 0, False, False), res)

        # Instead we resort to that to account for truncation:
        flip_shape = list(numpy.arange(0, a.ndim)[::-1])
        res = res.dimshuffle(flip_shape)
        res = tensor.switch(tensor.lt(n, tensor.shape(a)[axis]),
                            tensor.set_subtensor(res[n::, ], 0, False, False),
                            res)
        res = res.dimshuffle(flip_shape)

        # insures that gradient shape conforms to input shape:
        out_shape = list(numpy.arange(0, axis)) + [a.ndim - 1] +\
            list(numpy.arange(axis, a.ndim - 1))
        res = res.dimshuffle(*out_shape)
        return [res, None, None] 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:42,代码来源:fourier.py

示例4: __init__

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tensordot [as 别名]
def __init__(self, input, n_in, n_out):
	## input has shape (batchSize, seqLen, n_in)
	## input shall be a binary tensor, each row has only one 1
	
	self.n_in = n_in
	self.n_out = n_out
	self.input = input

	value_bound = np.sqrt(6./(n_in * n_in  + n_out))
	W_values = np.asarray(np.random.uniform(low = - value_bound, high = value_bound, size=(n_in, n_in, n_out)), dtype=theano.config.floatX)
	self.W = theano.shared (value = W_values, name = 'EmbeddingLayer_W', borrow=True)


	## out1 shall have shape (batchSize, seqLen, n_in, n_out)
	out1 =  T.tensordot(input, self.W, axes=1)

	##out2 has shape(batchSize, n_out, seqLen, n_in)
	out2 = out1.dimshuffle(0, 3, 1, 2)

	##input2 has shape(batchSize, n_in, seqLen)
	input2 = input.dimshuffle(0,2,1)

	##out3 shall have shape (batchSize, n_out, seqLen, seqLen)
	out3 = T.batched_tensordot(out2, input2, axes=1)

	##output has shape (batchSize, seqLen, seqLen, n_out)
	self.output = out3.dimshuffle(0, 2, 3, 1)

	self.params = [self.W]
	self.paramL1 = abs(self.W).sum()
	self.paramL2 = (self.W**2).sum()
	##self.pcenters = (self.W.sum(axis=[0, 1])**2 ).sum()
	self.pcenters = (self.W.mean(axis=[0, 1])**2 ).sum() 
开发者ID:j3xugit,项目名称:RaptorX-Contact,代码行数:35,代码来源:EmbeddingLayer.py

示例5: get_output_for

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tensordot [as 别名]
def get_output_for(self, input, init=False, **kwargs):
        if input.ndim > 2:
            # if the input has more than two dimensions, flatten it into a
            # batch of feature vectors.
            input = input.flatten(2)
        
        activation = T.tensordot(input, self.W, [[1], [0]])
        abs_dif = (T.sum(abs(activation.dimshuffle(0,1,2,'x') - activation.dimshuffle('x',1,2,0)),axis=2)
                    + 1e6 * T.eye(input.shape[0]).dimshuffle(0,'x',1))

        if init:
            mean_min_abs_dif = 0.5 * T.mean(T.min(abs_dif, axis=2),axis=0)
            abs_dif /= mean_min_abs_dif.dimshuffle('x',0,'x')
            self.init_updates = [(self.log_weight_scale, self.log_weight_scale-T.log(mean_min_abs_dif).dimshuffle(0,'x'))]
        
        f = T.sum(T.exp(-abs_dif),axis=2)

        if init:
            mf = T.mean(f,axis=0)
            f -= mf.dimshuffle('x',0)
            self.init_updates.append((self.b, -mf))
        else:
            f += self.b.dimshuffle('x',0)

        return T.concatenate([input, f], axis=1)

# Input Mixture of Gaussian Layer 
开发者ID:val-iisc,项目名称:deligan,代码行数:29,代码来源:nn.py

示例6: __call__

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tensordot [as 别名]
def __call__(self, X, w_temp, m_temp):
        # input dimensions
        # X:      (nb_samples, input_dim)
        # w_temp: (nb_samples, memory_dim)
        # m_temp: (nb_samples, memory_dim, memory_width) ::tensor_memory

        key   = dot(X, self.W_key, self.b_key)  # (nb_samples, memory_width)
        lock  = dot(m_temp, self.W_lock)        # (nb_samples, memory_dim, memory_width)
        shift = self.softmax(
            dot(X, self.W_shift, self.b_shift))  # (nb_samples, shift_width)

        beta = self.softplus(dot(X, self.W_beta, self.b_beta))[:, None]  # (nb_samples, x)
        gamma = self.softplus(dot(X, self.W_gama, self.b_gama)) + 1.  # (nb_samples,)
        gamma = gamma[:, None]  # (nb_samples, x)
        g = self.sigmoid(dot(X, self.W_g, self.b_g))[:, None]  # (nb_samples, x)

        signal = [key, shift, beta, gamma, g]

        energy = T.sum(key[:, None, :] * lock, axis=2)
        # energy = T.tensordot(key[:, None, :] + lock, self.v, [2, 0])
        w_c    = self.softmax(beta * energy)
        # w_c = self.softmax(
        #     beta * cosine_sim2d(key, m_temp))  # (nb_samples, memory_dim) //content-based addressing
        w_g = g * w_c + (1 - g) * w_temp  # (nb_samples, memory_dim) //history interpolation
        w_s = shift_convolve2d(w_g, shift, self.shift_conv)  # (nb_samples, memory_dim) //convolutional shift
        w_p = w_s ** gamma  # (nb_samples, memory_dim) //sharpening
        w_t = w_p / T.sum(w_p, axis=1)[:, None]  # (nb_samples, memory_dim)
        return w_t 
开发者ID:memray,项目名称:seq2seq-keyphrase,代码行数:30,代码来源:ntm_minibatch.py

示例7: _get_pi_var

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tensordot [as 别名]
def _get_pi_var(self):
        w_var, lambda_var = self.param_vars
        A_split_var, b_split_var, _ = self._get_A_b_c_split_vars()

        A_var = T.tensordot(A_split_var, w_var / self.repeats, axes=(0, 0)) + T.diag(lambda_var)
        B_var = T.tensordot(b_split_var, w_var / self.repeats, axes=(0, 0))
        pi_var = T.batched_tensordot(T.nlinalg.matrix_inverse(A_var), B_var, axes=(2, 1))  # preprocessed units
        return pi_var 
开发者ID:alexlee-gk,项目名称:visual_dynamics,代码行数:10,代码来源:servoing_policy.py

示例8: get_output_for

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tensordot [as 别名]
def get_output_for(self, inputs, **kwargs):
        """
        Compute this layer's output function given a symbolic input variable.

        Parameters
        ----------
        :param inputs: list of theano.TensorType
            `inputs[0]` should always be the symbolic input variable.  When
            this layer has a mask input (i.e. was instantiated with
            `mask_input != None`, indicating that the lengths of sequences in
            each batch vary), `inputs` should have length 2, where `inputs[1]`
            is the `mask`.  The `mask` should be supplied as a Theano variable
            denoting whether each time step in each sequence in the batch is
            part of the sequence or not.  `mask` should be a matrix of shape
            ``(n_batch, n_time_steps)`` where ``mask[i, j] = 1`` when ``j <=
            (length of sequence i)`` and ``mask[i, j] = 0`` when ``j > (length
            of sequence i)``.
        :return: theano.TensorType
            Symbolic output variable.
        """
        input = inputs[0]
        mask = None
        if self.mask_incoming_index > 0:
            mask = inputs[self.mask_incoming_index]

        # compute out by tensor dot ([batch, length, input] * [input, num_label, num_label]
        # the shape of out should be [batch, length, num_label, num_label]
        out = T.tensordot(input, self.W, axes=[[2], [0]])

        if self.b is not None:
            b_shuffled = self.b.dimshuffle('x', 'x', 0, 1)
            out = out + b_shuffled

        if mask is not None:
            mask_shuffled = mask.dimshuffle(0, 1, 'x', 'x')
            out = out * mask_shuffled
        return out 
开发者ID:XuezheMax,项目名称:LasagneNLP,代码行数:39,代码来源:crf.py

示例9: _outter

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tensordot [as 别名]
def _outter(self, t1, t2):
        return tensor.tensordot(t1, t2, axes=([], [])) 
开发者ID:AgnezIO,项目名称:agnez,代码行数:4,代码来源:gaborfitting.py

示例10: sym_mask_logdensity_estimator_intermediate

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tensordot [as 别名]
def sym_mask_logdensity_estimator_intermediate(self, x, mask):
        non_linearity_name = self.parameters["nonlinearity"].get_name()
        assert(non_linearity_name == "sigmoid" or non_linearity_name == "RLU")
        x = x.T  # BxD
        mask = mask.T  # BxD
        output_mask = constantX(1) - mask  # BxD
        D = constantX(self.n_visible)
        d = mask.sum(1)  # d is the 1-based index of the dimension whose value to infer (not the size of the context)
        masked_input = x * mask  # BxD
        h = self.nonlinearity(T.dot(masked_input, self.W1) + T.dot(mask, self.Wflags) + self.b1)  # BxH
        for l in xrange(self.n_layers - 1):
            h = self.nonlinearity(T.dot(h, self.Ws[l]) + self.bs[l])  # BxH
        z_alpha = T.tensordot(h, self.V_alpha, [[1], [1]]) + T.shape_padleft(self.b_alpha)
        z_mu = T.tensordot(h, self.V_mu, [[1], [1]]) + T.shape_padleft(self.b_mu)
        z_sigma = T.tensordot(h, self.V_sigma, [[1], [1]]) + T.shape_padleft(self.b_sigma)
        temp = T.exp(z_alpha)  # + 1e-6
        # temp += T.shape_padright(temp.sum(2)/1e-3)
        Alpha = temp / T.shape_padright(temp.sum(2))  # BxDxC
        Mu = z_mu  # BxDxC
        Sigma = T.exp(z_sigma)  # + 1e-6 #BxDxC

        # Alpha = Alpha * T.shape_padright(output_mask) + T.shape_padright(mask)
        # Mu = Mu * T.shape_padright(output_mask)
        # Sigma = Sigma * T.shape_padright(output_mask) + T.shape_padright(mask)
        # Phi = -constantX(0.5) * T.sqr((Mu - T.shape_padright(x*output_mask)) / Sigma) - T.log(Sigma) - constantX(0.5 * np.log(2*np.pi)) #BxDxC

        Phi = -constantX(0.5) * T.sqr((Mu - T.shape_padright(x)) / Sigma) - T.log(Sigma) - constantX(0.5 * np.log(2 * np.pi))  # BxDxC
        logdensity = (log_sum_exp(Phi + T.log(Alpha), axis=2) * output_mask).sum(1) * D / (D - d)
        return (logdensity, z_alpha, z_mu, z_sigma, Alpha, Mu, Sigma, h) 
开发者ID:MarcCote,项目名称:NADE,代码行数:31,代码来源:OrderlessMoGNADE.py

示例11: sym_masked_neg_loglikelihood_gradient

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tensordot [as 别名]
def sym_masked_neg_loglikelihood_gradient(self, x, mask):
        """ x is a matrix of column datapoints (DxB) D = n_visible, Bfloat = batch size """
        logdensity, z_alpha, z_mu, z_sigma, Alpha, Mu, Sigma, h = self.sym_mask_logdensity_estimator_intermediate(x, mask)

#        nnz = output_mask.sum(0)
#        sparsity_multiplier = T.shape_padright(T.shape_padleft((B+1e-6)/(nnz+1e-6)))

#        wPhi = T.maximum(Phi + T.log(Alpha), constantX(-100.0)) #BxDxC
#        lp_current = log_sum_exp(wPhi, axis = 2) * output_mask #BxD
#        lp_current_sum = (lp_current.sum(1) * D / (D-d)).sum() #1

        loglikelihood = logdensity.mean(dtype=floatX)
        loss = -loglikelihood

        dp_dz_alpha = T.grad(loss, z_alpha)  # BxDxC
        gb_alpha = dp_dz_alpha.sum(0)  # DxC
        gV_alpha = T.tensordot(h.T, dp_dz_alpha, [[1], [0]]).dimshuffle((1, 0, 2))  # DxHxC

        dp_dz_mu = T.grad(loss, z_mu)  # BxDxC
        dp_dz_mu = dp_dz_mu * Sigma  # Heuristic
        gb_mu = dp_dz_mu.sum(0)  # DxC
        gV_mu = T.tensordot(h.T, dp_dz_mu, [[1], [0]]).dimshuffle((1, 0, 2))  # DxHxC

        dp_dz_sigma = T.grad(loss, z_sigma)  # BxDxC
        gb_sigma = dp_dz_sigma.sum(0)  # DxC
        gV_sigma = T.tensordot(h.T, dp_dz_sigma, [[1], [0]]).dimshuffle((1, 0, 2))  # DxHxC

        if self.n_layers > 1:
            gWs, gbs, gW1, gWflags, gb1 = T.grad(loss, [self.Ws, self.bs, self.W1, self.Wflags, self.b1])
            gradients = {"V_alpha":gV_alpha, "b_alpha":gb_alpha, "V_mu":gV_mu, "b_mu":gb_mu, "V_sigma":gV_sigma, "b_sigma":gb_sigma, "Ws":gWs, "bs":gbs, "W1":gW1, "b1":gb1, "Wflags":gWflags}
        else:
            gW1, gWflags, gb1 = T.grad(loss, [self.W1, self.Wflags, self.b1])
            gradients = {"V_alpha":gV_alpha, "b_alpha":gb_alpha, "V_mu":gV_mu, "b_mu":gb_mu, "V_sigma":gV_sigma, "b_sigma":gb_sigma, "W1":gW1, "b1":gb1, "Wflags":gWflags}
        # Gradients
        return (loss, gradients) 
开发者ID:MarcCote,项目名称:NADE,代码行数:37,代码来源:OrderlessMoGNADE.py

示例12: rot_filters

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tensordot [as 别名]
def rot_filters(self, theta):
        fsize = self.filter_size[0];
        ind = T.as_tensor_variable(np.indices((fsize, fsize)) - (fsize - 1.0) / 2.0);
        rotate = T.stack(T.cos(theta), -T.sin(theta), T.sin(theta), T.cos(theta)).reshape((2, 2));
        ind_rot = T.tensordot(rotate, ind, axes=((0, 0))) + (fsize - 1.0) / 2.0;
        transy = T.clip(ind_rot[0], 0, fsize - 1 - .00001);
        transx = T.clip(ind_rot[1], 0, fsize - 1 - .00001);
        vert = T.iround(transy);
        horz = T.iround(transx);
        return self.W[:, :, vert, horz]; 
开发者ID:SBU-BMI,项目名称:u24_lymphocyte,代码行数:12,代码来源:rotconv.py

示例13: output

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tensordot [as 别名]
def output(self, left_input, right_input):
        """
        Param:
        -----------

        left_input: theano.tensor.row
            embedding for left hand side input

        right_input: theano.tensor.row
            embedding for right hand side input

        Return:
        -----------
        The output embedding
        """
        assert left_input.ndim <= 2
        assert right_input.ndim <= 2


        # if left_input and right_input are 1d array
        # make it a 2D row 
        if left_input.ndim == 1: 
            left_input = left_input.dimshuffle('x', 0) 

        if right_input.ndim == 1:
            right_input = right_input.dimshuffle('x', 0) 
            
        concat_vec = T.concatenate(
            [left_input, right_input],
            axis = 1
        )
        
        result = T.tanh(T.dot(concat_vec, T.tensordot(self.V, concat_vec.T, [2, 0])) + T.dot(self.W, concat_vec.T))
        return result.flatten() 
开发者ID:xiaohan2012,项目名称:twitter-sent-dnn,代码行数:36,代码来源:recnn_train.py

示例14: get_output_for

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tensordot [as 别名]
def get_output_for(self, input,**kwargs):
        feature2d = input[0]
        feature1d = input[1]
        feature1d_h = feature1d.dimshuffle(0, 1, 2, 'x')
        feature1d_h = T.tensordot(feature1d_h, self.one, [[3], [0]])
        feature1d_v = feature1d_h.dimshuffle(0, 1, 3, 2)

        return T.concatenate([feature2d, feature1d_h, feature1d_v], axis = 1) 
开发者ID:largelymfs,项目名称:deepcontact,代码行数:10,代码来源:layers.py

示例15: get_output_for

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tensordot [as 别名]
def get_output_for(self, input, init=False, **kwargs):
        if input.ndim > 2:
            # if the input has more than two dimensions, flatten it into a
            # batch of feature vectors.
            input = input.flatten(2)
        
        activation = T.tensordot(input, self.W, [[1], [0]])
        abs_dif = (T.sum(abs(activation.dimshuffle(0,1,2,'x') - activation.dimshuffle('x',1,2,0)),axis=2)
                    + 1e6 * T.eye(input.shape[0]).dimshuffle(0,'x',1))

        if init:
            mean_min_abs_dif = 0.5 * T.mean(T.min(abs_dif, axis=2),axis=0)
            abs_dif /= mean_min_abs_dif.dimshuffle('x',0,'x')
            self.init_updates = [(self.log_weight_scale, self.log_weight_scale-T.log(mean_min_abs_dif).dimshuffle(0,'x'))]
        
        f = T.sum(T.exp(-abs_dif),axis=2)

        if init:
            mf = T.mean(f,axis=0)
            f -= mf.dimshuffle('x',0)
            self.init_updates.append((self.b, -mf))
        else:
            f += self.b.dimshuffle('x',0)

        return T.concatenate([input, f], axis=1)  

# Convenience function to define an inception-style block 
开发者ID:ajbrock,项目名称:Neural-Photo-Editor,代码行数:29,代码来源:layers.py


注:本文中的theano.tensor.tensordot方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。