当前位置: 首页>>代码示例>>Python>>正文


Python tensor.mean函数代码示例

本文整理汇总了Python中theano.tensor.mean函数的典型用法代码示例。如果您正苦于以下问题:Python mean函数的具体用法?Python mean怎么用?Python mean使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了mean函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_cost_updates

    def get_cost_updates(self, contraction_level, learning_rate, cost_measure="cross_entropy"):
        """ This function computes the cost and the updates for one trainng
        step of the cA """

        y = self.get_hidden_values(self.x)
        z = self.get_reconstructed_input(y)
        J = self.get_jacobian(y, self.W)

        if cost_measure=="cross_entropy":
            #self.L_rec = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
            self.L_rec = T.mean(- T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z),axis=1))
        elif cost_measure=="euclidean":
            self.L_rec = T.mean(T.sum((self.x-z)**2,axis=1)) 
            
        # Compute the jacobian and average over the number of samples/minibatch
        self.L_jacob = T.mean(T.sum(J ** 2) / self.n_batchsize)
        
        cost = self.L_rec + contraction_level * self.L_jacob

        # compute the gradients of the cost of the `cA` with respect
        # to its parameters
        gparams = T.grad(cost, self.params)
        # generate the list of updates
        updates = []
        for param, gparam in zip(self.params, gparams):
            updates.append((param, param - learning_rate * gparam))

        return (cost, updates)
开发者ID:LazyXuan,项目名称:DECRES,代码行数:28,代码来源:cA.py

示例2: _test_layer_stats

 def _test_layer_stats(self, layer_output):
     """
     DESCRIPTION:
         This method is called every batch whereby the examples from test or valid set 
         is pass through, the final result will be the mean of all the results from all 
         the batches in an epoch from the test set or valid set.
     PARAM:
         layer_output: the output from the layer
     RETURN:
         A list of tuples of [('name_a', var_a), ('name_b', var_b)] whereby var is scalar 
     """
     
     w_len = T.sqrt((self.W ** 2).sum(axis=0))
     max_length = T.max(w_len)
     mean_length = T.mean(w_len)
     min_length = T.min(w_len)
     
     return [('max_col_length', max_length),
             ('mean_col_length', mean_length),
             ('min_col_length', min_length), 
             ('output_max', T.max(layer_output)),
             ('output_mean', T.mean(layer_output)), 
             ('output_min', T.min(layer_output)),
             ('max_W', T.max(self.W)),
             ('mean_W', T.mean(self.W)),
             ('min_W', T.min(self.W)),
             ('max_b', T.max(self.b)),
             ('mean_b', T.mean(self.b)),
             ('min_b', T.min(self.b))]
开发者ID:hycis,项目名称:smartNN,代码行数:29,代码来源:layer.py

示例3: get_lossfun

    def get_lossfun(self, l1, l2):
        """
        Generate a loss function
        
        The default one is mean negative log-likelihood
        
        :param l1: weight of L1 term, None for no L1 term
        :param l2: weight of L2 term, None for no L2 term
        """

        if self.ff_net.layers[-1].activation_name == 'softmax':
                q = -T.mean(     # minimize negative log-likelihood
                        T.log(
                                self.ff_net.get_learning_passthrough(self.x)
                             )
                            [T.arange(self.y.shape[0]), self.y]
                          )
        else:
            q = T.mean(     # minimize error function
                        (self.ff_net.get_learning_passthrough(self.x) - self.y)**2
            )

        try:
            if l1 is not None:
                q = q + self.ff_net.l1 * l1
        except AttributeError:
            pass

        try:
            if l2 is not None:
                q = q + self.ff_net.l2 * l2
        except AttributeError:
            pass

        return q
开发者ID:piotrmaslanka,项目名称:nnetsys,代码行数:35,代码来源:teacher.py

示例4: get_monitoring_channels

    def get_monitoring_channels(self, V):

        try:
            self.compile_mode()

            rval = {}

            #from_ip = self.inference_procedure.get_monitoring_channels(V, self)

            #rval.update(from_ip)

            if self.monitor_params:
                for param in self.get_params():
                    rval[param.name + '_min'] = full_min(param)
                    rval[param.name + '_mean'] = T.mean(param)
                    rval[param.name + '_max'] = full_max(param)

                    if 'W' in param.name:
                        norms = theano_norms(param)

                        rval[param.name + '_norms_min' ]= T.min(norms)
                        rval[param.name + '_norms_mean'] = T.mean(norms)
                        rval[param.name + '_norms_max'] = T.max(norms)

            new_rval = {}
            for key in rval:
                new_rval[self.monitoring_channel_prefix+key] = rval[key]

            rval = new_rval

            return rval
        finally:
            self.deploy_mode()
开发者ID:JasonBens,项目名称:pylearn,代码行数:33,代码来源:dbm.py

示例5: batchnorm

def batchnorm(X, rescale=None, reshift=None, u=None, s=None, e=1e-8):
    """
    batchnorm with support for not using scale and shift parameters
    as well as inference values (u and s) and partial batchnorm (via a)
    will detect and use convolutional or fully connected version
    """
    g = rescale
    b = reshift
    if X.ndim == 4:
        if u is not None and s is not None:
            # use normalization params given a priori
            b_u = u.dimshuffle('x', 0, 'x', 'x')
            b_s = s.dimshuffle('x', 0, 'x', 'x')
        else:
            # compute normalization params from input
            b_u = T.mean(X, axis=[0, 2, 3]).dimshuffle('x', 0, 'x', 'x')
            b_s = T.mean(T.sqr(X - b_u), axis=[0, 2, 3]).dimshuffle('x', 0, 'x', 'x')
        # batch normalize
        X = (X - b_u) / T.sqrt(b_s + e)
        if g is not None and b is not None:
            # apply rescale and reshift
            X = X*T.exp(0.2*g.dimshuffle('x', 0, 'x', 'x')) + b.dimshuffle('x', 0, 'x', 'x')
    elif X.ndim == 2:
        if u is None and s is None:
            # compute normalization params from input
            u = T.mean(X, axis=0)
            s = T.mean(T.sqr(X - u), axis=0)
        # batch normalize
        X = (X - u) / T.sqrt(s + e)
        if g is not None and b is not None:
            # apply rescale and reshift
            X = X*T.exp(0.2*g) + b
    else:
        raise NotImplementedError
    return X
开发者ID:Philip-Bachman,项目名称:Sequential-Generation,代码行数:35,代码来源:NetLayers.py

示例6: add_regularization

    def add_regularization(self, layer):
        regularization = 0

        if self._recon_strategy == 'forward':
            input_x = layer.x
            recon_x = layer.reconstruct_x()

            input_y = layer.y
            recon_y = layer.reconstruct_y()

            regularization += Tensor.mean((abs(input_x - recon_x)).sum(axis=1, dtype=Tensor.config.floatX))
            regularization += Tensor.mean((abs(input_y - recon_y)).sum(axis=1, dtype=Tensor.config.floatX))
        elif self._recon_strategy == 'backward':
            input_x = layer.x
            recon_x = Tensor.dot(layer.output_forward_x,
                                 layer.Wx.T)

            input_y = layer.y
            recon_y = Tensor.dot(layer.output_forward_y,
                                 layer.Wy.T)

            regularization += Tensor.mean((abs(input_x - recon_x)).sum(axis=1, dtype=Tensor.config.floatX))
            regularization += Tensor.mean((abs(input_y - recon_y)).sum(axis=1, dtype=Tensor.config.floatX))

        return regularization
开发者ID:aviveise,项目名称:double_encoder,代码行数:25,代码来源:reconstruction_regularization.py

示例7: cost_updates

	def cost_updates(self,lr,data,k=1):
		ph_activation_scores = T.dot(data,self.W) + self.h_bias
		ph_activation_probs, ph_samples, ph_updates  = self.h.sample(ph_activation_scores)

		chain_start = ph_samples

		[nv_activation_scores,nv_activation_probs,nv_samples,\
		 nh_activation_scores,nh_activation_probs,nh_samples], updates = \
		theano.scan(
				 self.gibbs_hvh,
				 outputs_info = [None,None,None,None,None,chain_start],
				 n_steps      = k
			)
		chain_end = nv_samples[-1]
		cost = T.mean(self.free_energy(data))\
				- T.mean(self.free_energy(chain_end))\
				 + self.regularisation()

		gparams = T.grad(cost,self.tunables,consider_constant=[chain_end])

		alpha = T.cast(self.momentum,dtype=theano.config.floatX)
		updates = [
				( param, param - ( alpha * prev_chg + gparam * lr ) )
		   		for gparam,param,prev_chg in zip(gparams,self.tunables,self.deltas)
		   ] + [
				( prev_chg, alpha * prev_chg + gparam * lr )
				for prev_chg,gparam in zip(self.deltas,gparams)
		   ]# + ph_updates + nv_updates + nh_updates

		monitoring_cost = self.reconstruction_cost(updates,nv_activation_scores[-1],data)

		return monitoring_cost,updates
开发者ID:parasitew,项目名称:python-dbn,代码行数:32,代码来源:rbm.py

示例8: __init__

    def __init__(self, fin, f1, nin1, f2, nin2, f3, nin3, expand, h1, outputs,
                 lr, C, pDropConv=0.2, pDropHidden=0.5):
        # 超参数
        self.lr = lr
        self.C = C
        self.pDropConv = pDropConv
        self.pDropHidden = pDropHidden
        # 所有需要优化的参数放入列表中,分别是连接权重和偏置
        self.params = []
        self.paramsNIN = []
        self.paramsConv = []
        # 卷积层,w=(本层特征图个数,上层特征图个数,卷积核行数,卷积核列数),b=(本层特征图个数)
        self.paramsNIN.append(layerNINParams((f1, fin, nin1, 3, 3), expand))
        self.paramsNIN.append(layerNINParams((f2, f1 * expand, nin2, 3, 3), expand))
        self.paramsNIN.append(layerNINParams((f3, f2 * expand, nin3, 3, 3), expand))
        # 全局平均池化层
        self.paramsConv.append(layerConvParams((h1, f3 * expand, 1, 1)))
        self.paramsConv.append(layerConvParams((outputs, h1, 1, 1)))
        self.params = self.paramsNIN + self.paramsConv

        # 定义 Theano 符号变量,并构建 Theano 表达式
        self.X = T.tensor4('X')
        self.Y = T.matrix('Y')
        # 训练集代价函数
        YDropProb = model(self.X, self.params, pDropConv, pDropHidden)
        self.trNeqs = basicUtils.neqs(YDropProb, self.Y)
        trCrossEntropy = categorical_crossentropy(YDropProb, self.Y)
        self.trCost = T.mean(trCrossEntropy) + C * basicUtils.regularizer(flatten(self.params))

        # 测试验证集代价函数
        YFullProb = model(self.X, self.params, 0., 0.)
        self.vateNeqs = basicUtils.neqs(YFullProb, self.Y)
        self.YPred = T.argmax(YFullProb, axis=1)
        vateCrossEntropy = categorical_crossentropy(YFullProb, self.Y)
        self.vateCost = T.mean(vateCrossEntropy) + C * basicUtils.regularizer(flatten(self.params))
开发者ID:ifenghao,项目名称:myDeepLearning,代码行数:35,代码来源:rowfccolfcv1.py

示例9: forward

    def forward(self,input_org,train=True,update_batch_stat=True,finetune=False):
        print "Layer/BatchNormalization"
        ldim,cdim,rdim = self._internal_shape(input_org)
        input = input_org.reshape((ldim,cdim,rdim))
        if (train):
            mean = T.mean(input, axis=(0, 2), keepdims=True )
            var = T.mean((input-mean)**2, axis=(0, 2), keepdims=True)

            if(update_batch_stat):
                finetune_N = theano.clone(self.finetune_N, share_inputs=False)
                if(finetune):
                    finetune_N.default_update = finetune_N+1
                    ratio = T.cast(1-1.0/(finetune_N+1),theano.config.floatX)
                else:
                    finetune_N.default_update = 0
                    ratio = self.moving_avg_ratio
                m = ldim*rdim
                scale = T.cast(m/(m-1.0),theano.config.floatX)
                est_mean = theano.clone(self.est_mean, share_inputs=False)
                est_var = theano.clone(self.est_var, share_inputs=False)
                est_mean.default_update = T.cast(ratio*self.est_mean + (1-ratio)*mean,theano.config.floatX)
                est_var.default_update = T.cast(ratio*self.est_var + (1-ratio)*scale*var,theano.config.floatX)
                mean += 0 * est_mean
                var += 0 * est_var
            output = self._pbc(self.gamma) * (input - self._pbc(mean)) \
                     / T.sqrt(1e-6+self._pbc(var)) + self._pbc(self.beta)

        else:
            output = self._pbc(self.gamma) * (input - self._pbc(self.est_mean)) \
                     / T.sqrt(1e-6+self._pbc(self.est_var)) + self._pbc(self.beta)

        return output.reshape(input_org.shape)
开发者ID:ilovecv,项目名称:vat,代码行数:32,代码来源:batch_normalization.py

示例10: test_minres_with_jacobi

def test_minres_with_jacobi():
    vv = theano.shared(v, name='v')
    gg = theano.shared(g, name='g')
    hh = theano.shared(h, name='h')
    dw = T.dot(v.T,g) / M
    dv = T.dot(g.T,h) / M
    da = T.mean(v, axis=0)
    db = T.mean(g, axis=0)
    dc = T.mean(h, axis=0)
   
    Ldiag_terms = natural.generic_compute_L_diag([vv,gg,hh])
    Ms = [Ldiag_term + 0.1 for Ldiag_term in Ldiag_terms]

    newgrads = minres.minres(
            lambda xw, xv, xa, xb, xc: natural.compute_Lx(vv,gg,hh,xw,xv,xa,xb,xc),
            [dw, dv, da, db, dc],
            rtol=1e-5,
            damp = 0.,
            maxiter = 10000,
            Ms = Ms,
            profile=0)[0]

    f = theano.function([], newgrads)
    [new_dw, new_dv, new_da, new_db, new_dc] = f()
    numpy.testing.assert_almost_equal(Linv_x_w, new_dw, decimal=1)
    numpy.testing.assert_almost_equal(Linv_x_v, new_dv, decimal=1)
    numpy.testing.assert_almost_equal(Linv_x_a, new_da, decimal=1)
    numpy.testing.assert_almost_equal(Linv_x_b, new_db, decimal=1)
    numpy.testing.assert_almost_equal(Linv_x_c, new_dc, decimal=1)
开发者ID:gdesjardins,项目名称:DBM,代码行数:29,代码来源:test_natural.py

示例11: test_linearcg

def test_linearcg():
    vv = theano.shared(v, name='v')
    gg = theano.shared(g, name='g')
    hh = theano.shared(h, name='h')
    dw = T.dot(v.T,g) / M
    dv = T.dot(g.T,h) / M
    da = T.mean(v, axis=0)
    db = T.mean(g, axis=0)
    dc = T.mean(h, axis=0)

    newgrads = lincg.linear_cg(
            lambda xw, xv, xa, xb, xc: natural.compute_Lx(vv,gg,hh,xw,xv,xa,xb,xc),
            [dw, dv, da, db, dc],
            rtol=1e-5,
            maxiter = 30,
            damp = 0.,
            floatX = floatX,
            profile=0)

    f = theano.function([], newgrads)
    [new_dw, new_dv, new_da, new_db, new_dc] = f()
    numpy.testing.assert_almost_equal(Linv_x_w, new_dw, decimal=1)
    numpy.testing.assert_almost_equal(Linv_x_v, new_dv, decimal=1)
    numpy.testing.assert_almost_equal(Linv_x_a, new_da, decimal=1)
    numpy.testing.assert_almost_equal(Linv_x_b, new_db, decimal=1)
    numpy.testing.assert_almost_equal(Linv_x_c, new_dc, decimal=1)
开发者ID:gdesjardins,项目名称:DBM,代码行数:26,代码来源:test_natural.py

示例12: test_minres_with_xinit

def test_minres_with_xinit():
    rng = numpy.random.RandomState(123412)

    vv = theano.shared(v, name='v')
    gg = theano.shared(g, name='g')
    hh = theano.shared(h, name='h')
    dw = T.dot(v.T,g) / M
    dv = T.dot(g.T,h) / M
    da = T.mean(v, axis=0)
    db = T.mean(g, axis=0)
    dc = T.mean(h, axis=0)
  
    xinit = [ rng.rand(N0,N1),
              rng.rand(N1,N2),
              rng.rand(N0),
              rng.rand(N1),
              rng.rand(N2)]
    xinit = [xi.astype(floatX) for xi in xinit]

    newgrads = minres.minres(
            lambda xw, xv, xa, xb, xc: natural.compute_Lx(vv,gg,hh,xw,xv,xa,xb,xc),
            [dw, dv, da, db, dc],
            rtol=1e-5,
            damp = 0.,
            maxiter = 10000,
            xinit = xinit,
            profile=0)[0]

    f = theano.function([], newgrads)
    [new_dw, new_dv, new_da, new_db, new_dc] = f()
    numpy.testing.assert_almost_equal(Linv_x_w, new_dw, decimal=1)
    numpy.testing.assert_almost_equal(Linv_x_v, new_dv, decimal=1)
    numpy.testing.assert_almost_equal(Linv_x_a, new_da, decimal=1)
    numpy.testing.assert_almost_equal(Linv_x_b, new_db, decimal=1)
    numpy.testing.assert_almost_equal(Linv_x_c, new_dc, decimal=1)
开发者ID:gdesjardins,项目名称:DBM,代码行数:35,代码来源:test_natural.py

示例13: plotUpdate

    def plotUpdate(self,updates):
        '''
        >>>get update info of each layer
        >>>type updates: dict
        >>>para updates: update dictionary
        '''
        maxdict=T.zeros(shape=(self.deep*2+1,))
        mindict=T.zeros(shape=(self.deep*2+1,))
        meandict=T.zeros(shape=(self.deep*2+1,))
        
        for i in xrange(self.deep):
            updw=updates[self.layers[i].w]-self.layers[i].w
            maxdict=T.set_subtensor(maxdict[2*i],T.max(updw))
            mindict=T.set_subtensor(mindict[2*i],T.min(updw))
            meandict=T.set_subtensor(meandict[2*i],T.mean(updw))
            updb=updates[self.layers[i].b]-self.layers[i].b
            maxdict=T.set_subtensor(maxdict[2*i+1],T.max(updb))
            mindict=T.set_subtensor(mindict[2*i+1],T.min(updb))
            meandict=T.set_subtensor(meandict[2*i+1],T.mean(updb))

        updw=updates[self.classifier.w]-self.classifier.w
        maxdict=T.set_subtensor(maxdict[self.deep*2],T.max(updw))
        mindict=T.set_subtensor(mindict[self.deep*2],T.min(updw))
        meandict=T.set_subtensor(meandict[self.deep*2],T.mean(updw))
        return [maxdict,mindict,meandict]
开发者ID:wolfhu,项目名称:RCNNSentence,代码行数:25,代码来源:dcnnModel.py

示例14: negative_log_likelihood

    def negative_log_likelihood(self, y):
        """ Return the mean of the negative log-likelihood of the prediction
        of this model under a given target distribution.

        .. math::
            \frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
            \frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
                    \ell (\theta=\{W,b\}, \mathcal{D})

        :type y: theano.tensor.TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label
        Note: we use the mean instead of the sum so that
              the learning rate is less dependent on the batch size
        """

        # y.shape[0] is (symbolically) the number of rows in y, i.e.,
        # number of examples (call it n) in the minibatch
        # T.arange(y.shape[0]) is a symbolic vector which will contain
        # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
        # Log-Probabilities (call it LP) with one row per example and
        # one column per class LP[T.arange(y.shape[0]),y] is a vector
        # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
        # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
        # the mean (across minibatch examples) of the elements in v,
        # i.e., the mean log-likelihood across the minibatch.
        if self.is_binary:
            -T.mean(T.log(self.p_y_given_x))
        return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
开发者ID:caglar,项目名称:prmlp,代码行数:29,代码来源:prmlp.py

示例15: stddev_bias

def stddev_bias(x, eps, axis=0):
    mu = T.mean(x + eps, axis=axis)
    mu.name = "std_mean"
    var = T.mean((x - mu)**2 + eps)
    var.name = "std_variance"
    stddev = T.sqrt(var)
    return stddev
开发者ID:LeonBai,项目名称:lisa_emotiw-1,代码行数:7,代码来源:utils.py


注:本文中的theano.tensor.mean函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。