当前位置: 首页>>代码示例>>Python>>正文


Python tensor.outer函数代码示例

本文整理汇总了Python中theano.tensor.outer函数的典型用法代码示例。如果您正苦于以下问题:Python outer函数的具体用法?Python outer怎么用?Python outer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了outer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: step

  def step(self, i_t, x_t, z_t, att_p, y_p, c_p, *other_args):
    # See Unit.scan() for seqs.
    # args: seqs (x_t = unit.xc, z_t, i_t), outputs (# unit.n_act, y_p, c_p, ...), non_seqs (none)
    other_outputs = []
    #att_p = theano.printing.Print('att in lstms', attrs=['__str__'])(att_p)
    if self.recurrent_transform:
      state_vars = other_args[:len(self.recurrent_transform.state_vars)]
      self.recurrent_transform.set_sorted_state_vars(state_vars)
      z_r, r_updates = self.recurrent_transform.step(y_p)
      z_t += z_r
      for v in self.recurrent_transform.get_sorted_state_vars():
        other_outputs += [r_updates[v]]
    maxatt = att_p.repeat(z_t.shape[1]).reshape((z_t.shape[0],z_t.shape[1]))#.dimshuffle(1,0)
    #maxatt = theano.printing.Print('maxatt',attrs=['__str__','shape'])(maxatt)
    z_t = T.switch(maxatt>0,z_t,z_t + T.dot(y_p, self.W_re))
    #z_t += T.dot(y_p, self.W_re)
    #z_t = theano.printing.Print('z_t lstms',attrs=['shape'])(z_t)

    partition = z_t.shape[1] // 4
    ingate = T.nnet.sigmoid(z_t[:,:partition])
    forgetgate = ((T.nnet.sigmoid(z_t[:,partition:2*partition])).T * (1.-att_p)).T
    outgate = T.nnet.sigmoid(z_t[:,2*partition:3*partition])
    input = T.tanh(z_t[:,3*partition:4*partition])
    #c_t = ((forgetgate * c_p + ingate * input).T * (1.-T.max(att_p,axis=-1))).T
    c_t = forgetgate * c_p + ingate * input
    y_t = outgate * T.tanh(c_t)
    i_output = T.outer(i_t, self.o_output)
    i_h = T.outer(i_t, self.o_h)
    # return: next outputs (# unit.n_act, y_t, c_t, ...)
    return (y_t * i_output, c_t * i_h + c_p * (1 - i_h)) + tuple(other_outputs)
开发者ID:rwth-i6,项目名称:returnn,代码行数:30,代码来源:NetworkRecurrentLayer.py

示例2: full

 def full(self, X, Xs=None):
     X, Xs = self._slice(X, Xs)
     scf_x = self.scaling_func(X, self.args)
     if Xs is None:
         return tt.outer(scf_x, scf_x) * self.cov_func(X)
     else:
         scf_xs = self.scaling_func(Xs, self.args)
         return tt.outer(scf_x, scf_xs) * self.cov_func(X, Xs)
开发者ID:aloctavodia,项目名称:pymc3,代码行数:8,代码来源:cov.py

示例3: contrastive_divergence_1

 def contrastive_divergence_1(self, v1):
     '''Determine the weight updates according to CD-1'''
     h1 = self.sample_h_given_v(v1)
     v2 = self.sample_v_given_h(h1)
     h2p = self.propup(v2)
     return (T.outer(v1, h1) - T.outer(v2, h2p),
             v1 - v2,
             h1 - h2p)
开发者ID:malie,项目名称:theano-rbm-on-word-tuples,代码行数:8,代码来源:rbm1.py

示例4: image_step_val

def image_step_val(Imat, htm1mat, ctm1mat, 
                   Wcnn, Wxi, Whi, bi, Wxf, Whf, bf, 
                   Wxc, Whc, bc, Wxo, Who, bo, Why, by, forbatch):
    xtmat = theano.dot(Imat, Wcnn)
    itmat = sigma(theano.dot(xtmat,Wxi) + theano.dot(htm1mat,Whi) + T.outer(forbatch,bi) )
    ftmat = sigma(theano.dot(xtmat,Wxf) + theano.dot(htm1mat,Whf) + T.outer(forbatch,bf) )
    ctmat = ftmat * ctm1mat + itmat*act(theano.dot(xtmat,Wxc)+theano.dot(htm1mat,Whc)+T.outer(forbatch,bc) )
    otmat = sigma(theano.dot(xtmat,Wxo) + theano.dot(htm1mat,Who) + T.outer(forbatch,bo) )
    htmat = otmat * act(ctmat)
#    yt = T.concatenate([addzero,tempyt],axis=0)
    return htmat, ctmat    
开发者ID:lizuyao2010,项目名称:lstm_theano,代码行数:11,代码来源:ggmodeladam.py

示例5: psb

def psb(inverse_hessian, weight_delta, gradient_delta, **options):
    gradient_delta_t = gradient_delta.T
    param = weight_delta - inverse_hessian.dot(gradient_delta)

    devider = (1. / T.dot(gradient_delta, gradient_delta))
    param1 = T.outer(param, gradient_delta) + T.outer(gradient_delta, param)
    param2 = (
        T.dot(gradient_delta, param) *
        T.outer(gradient_delta, gradient_delta_t)
    )

    return inverse_hessian + param1 * devider - param2 * devider ** 2
开发者ID:EdwardBetts,项目名称:neupy,代码行数:12,代码来源:quasi_newton.py

示例6: train

def train():
    train_set, valid_set, test_set = loadData()
    x,y = train_set
    m,n_input = x.shape
    width = 28
    height = 28
    n_hidden = 49
    
    learning_rate = .1
    
    #set up shared variables
    W = theano.shared(numpy.random.uniform(-4 * numpy.sqrt(6. / (n_hidden + n_input)),4 * numpy.sqrt(6. / (n_hidden + n_input)),(n_hidden,width*height)),name="W")
    b_v = theano.shared(numpy.zeros((width*height,)),name="b_v")
    b_h = theano.shared(numpy.zeros((n_hidden,)),name="b_h")
    
    theano_rng = T.shared_randomstreams.RandomStreams(numpy.random.randint(2 ** 30))
    
    v_input = T.fvector("v_input")
    
    #1. sample hidden units
    h_prob = T.nnet.sigmoid(T.dot(v_input,W.T)+b_h)
    h_sample = theano_rng.binomial(size=(n_hidden,), n=1, p=h_prob)
    #2. calculate positive gradient
    g_p = T.outer(v_input,h_sample)
    #3. make reconstruction
    v_prob_reconstruction = T.nnet.sigmoid(T.dot(h_sample,W)+b_v)
    v_reconstruction = theano_rng.binomial(size=(n_input,), n=1, p=v_prob_reconstruction)
    h_prob_reconstruction = T.nnet.sigmoid(T.dot(v_reconstruction,W.T)+b_h)
    h_reconstruction = theano_rng.binomial(size=(n_hidden,), n=1, p=h_prob_reconstruction)
    #4. calculate negative gradient
    g_n = T.outer(v_reconstruction,h_reconstruction)
    #FUNCTIONS FOR TESTING
    #f_h_prob = theano.function(inputs=[v_input,],outputs=[h_prob,])
    #f_h_sample = theano.function(inputs=[v_input,],outputs=[h_sample,])
    #f_g_p = theano.function(inputs=[v_input,],outputs=[g_p,])
    #f_v_prob_reconstruction = theano.function(inputs=[v_input,],outputs=[v_prob_reconstruction,])
    #f_v_reconstruction = theano.function(inputs=[v_input,],outputs=[v_reconstruction,])
    #f_h_prob_reconstruction = theano.function(inputs=[v_input,],outputs=[h_prob_reconstruction,])
    #f_h_reconstruction = theano.function(inputs=[v_input,],outputs=[h_reconstruction,])
    #f_g_n = theano.function(inputs=[v_input,],outputs=[g_n,])
    
    learn = theano.function(inputs=[v_input,],updates=[(W,W+learning_rate*(g_p-g_n).T)])
    
    for i in range(300001):
        if i > 0:
            if i%10000 == 0:
                print "Epcoh: ",i
                display_weights(W,width,height,i)
        learn(x[i%m,:])
    
    with open('weights.pkl', 'wb') as output:
        pickle.dump(W.get_value(), output, pickle.HIGHEST_PROTOCOL)
开发者ID:gajduk,项目名称:deep-learning,代码行数:52,代码来源:restricted_boltzman_machine.py

示例7: times_reflection

        def times_reflection(input, n_hidden, reflection):
            input_re = input[:, :n_hidden]
            input_im = input[:, n_hidden:]
            reflect_re = reflection[n_hidden:]
            reflect_im = reflection[:n_hidden]
            
            vstarv = (reflect_re**2 + reflect_im**2).sum()
            input_re_reflect = input_re - 2 / vstarv * (T.outer(T.dot(input_re, reflect_re), reflect_re) +
                                                        T.outer(T.dot(input_im, reflect_im), reflect_im))
            input_im_reflect = input_im - 2 / vstarv * (-T.outer(T.dot(input_re, reflect_im), reflect_im) +
                                                        T.outer(T.dot(input_im, reflect_re), reflect_re))

            return T.concatenate([input_re_reflect, input_im_reflect], axis=1)      
开发者ID:amarshah,项目名称:theano_fun,代码行数:13,代码来源:complex_RNN_scale.py

示例8: _step

 def _step(x_t, i_t, c_tm1, y_tm1):
   #z_t = T.dot(x_t, W) + T.dot(y_tm1, V_h) + b
   z_t = x_t + T.dot(y_tm1, V_h)
   partition = z_t.shape[1] / 4
   ingate = T.nnet.sigmoid(z_t[:,:partition])
   forgetgate = T.nnet.sigmoid(z_t[:,partition:2*partition])
   outgate = T.nnet.sigmoid(z_t[:,2*partition:3*partition])
   input = T.tanh(z_t[:,3*partition:4*partition])
   c_t = forgetgate * c_tm1 + ingate * input
   y_t = outgate * T.tanh(c_t)
   i_output = T.outer(i_t, o_output)
   i_h = T.outer(i_t, o_h)
   return c_t * i_h + c_tm1 * (1 - i_h), y_t * i_output
开发者ID:rwth-i6,项目名称:returnn,代码行数:13,代码来源:test_FastLSTMLayer.py

示例9: __init__

 def __init__(self, C, D):
     self.W = theano.shared(np.ones((C,D), dtype='float32'))
     t_M = T.matrix('M', dtype='float32')
     t_vM = T.vector('M', dtype='float32')
     t_Y = T.vector('Y', dtype='float32')
     t_I = T.vector('I', dtype='float32')
     t_s = T.vector('s', dtype='float32')
     t_eps = T.scalar('epsilon', dtype='float32')
     self.input_integration = theano.function(
         [t_Y],
         T.dot(T.log(self.W),t_Y),
         allow_input_downcast=True
         )
     self.M_summation = theano.function(
         [t_M],
         T.sum(t_M, axis=0),
         allow_input_downcast=True
         )
     self.recurrent_softmax = theano.function(
         [t_I,t_vM],
         t_vM*T.exp(t_I)/T.sum(t_vM*T.exp(t_I)),
         allow_input_downcast=True
         )
     self.weight_update = theano.function(
         [t_Y,t_s,t_eps],
         self.W,
         updates={
             self.W:
             self.W + t_eps*(T.outer(t_s,t_Y) - t_s[:,np.newaxis]*self.W)
             },
         allow_input_downcast=True
         )
     self.epsilon = None
     self._Y = None
     self._s = None
开发者ID:dennisforster,项目名称:NeSi,代码行数:35,代码来源:poisson_theano.py

示例10: __init__

 def __init__(self, C, D, use_unlabeled):
     self.W = theano.shared(np.ones((C,D), dtype='float32'))
     t_eps = T.scalar('epsilon', dtype='float32')
     t_Y = T.vector('Y', dtype='float32')
     t_s = T.vector('s', dtype='float32')
     self.activation_unlabeled = theano.function(
         [t_Y],
         T.sum(t_Y*self.W/T.sum(self.W, axis=0), axis=1),
         allow_input_downcast=True
         )
     self.activation_normalization = theano.function(
         [t_s],
         t_s/T.sum(t_s),
         allow_input_downcast=True
         )
     self.weight_update = theano.function(
         [t_Y,t_s,t_eps],
         self.W,
         updates={
             self.W:
             self.W + t_eps*(T.outer(t_s,t_Y) - t_s[:,np.newaxis]*self.W)
             },
         allow_input_downcast=True
         )
     self.epsilon = None
     self._Y = None
     self._s = None
     self._delta = np.eye(C, dtype='float32')
     self._C = C
     self._use_unlabeled = use_unlabeled
     self._skipupdate = False
开发者ID:dennisforster,项目名称:NeSi,代码行数:31,代码来源:mixturemodel_ssl_theano.py

示例11: learningstep_m1

    def learningstep_m1(self, Y, L, M, W, epsilon):
        """Perform a single learning step.

        This is a faster learning step for the case of
        mini-batch-size = 1.

        Keyword arguments:
        the keyword arguments must be the same as given in
        self.input_parameters(mode) for mode='train'.
        """
        # Input integration:
        I = T.dot(T.log(W),Y)
        # recurrent term:
        vM = theano.ifelse.ifelse(
            T.eq(L,-1), # if no label is provided
            T.sum(M, axis=0),
            M[L,:]
            )
        # numeric trick to prevent overflow in the exp-function:
        max_exponent = 88. - T.log(I.shape[0]).astype('float32')
        scale = theano.ifelse.ifelse(T.gt(I[T.argmax(I)], max_exponent),
            I[T.argmax(I)] - max_exponent, 0.)
        # activation: recurrent softmax with overflow protection
        s = vM*T.exp(I-scale)/T.sum(vM*T.exp(I-scale))
        s.name = 's_%d.%d[t]'%(self._nmultilayer,self._nlayer)
        # weight update
        W_new = W + epsilon*(T.outer(s,Y) - s[:,np.newaxis]*W)
        W_new.name = 'W_%d.%d[t]'%(self._nmultilayer,self._nlayer)
        return s, W_new
开发者ID:smajida,项目名称:NeSi,代码行数:29,代码来源:poisson_theano_scan.py

示例12: one_iter

 def one_iter(W_i, V_i, b_i, a, v_lt_i, p_lt_i, log_likelihood):
     h_i = self.sigmoid(a)
     p_i = self.sigmoid(T.dot(h_i, V_i) + b_i)
     v_i = 1. * (theano_rng.uniform([num_samples]) <= p_i)
     log_likelihood += v_i * T.log(p_i) + (1 - v_i) * T.log(1 - p_i)
     a += T.outer(v_i, W_i)
     return a, v_i, p_i, log_likelihood
开发者ID:pombredanne,项目名称:research,代码行数:7,代码来源:nade.py

示例13: grad

    def grad(self, inputs, output_gradients):
        """
        Reverse-mode gradient updates for matrix solve operation c = A \ b.

        Symbolic expression for updates taken from [1]_.

        References
        ----------
        ..[1] M. B. Giles, "An extended collection of matrix derivative results
          for forward and reverse mode automatic differentiation",
          http://eprints.maths.ox.ac.uk/1079/

        """
        A, b = inputs
        c = self(A, b)
        c_bar = output_gradients[0]
        trans_map = {
            'lower_triangular': 'upper_triangular',
            'upper_triangular': 'lower_triangular'
        }
        trans_solve_op = Solve(
            # update A_structure and lower to account for a transpose operation
            A_structure=trans_map.get(self.A_structure, self.A_structure),
            lower=not self.lower
        )
        b_bar = trans_solve_op(A.T, c_bar)
        # force outer product if vector second input
        A_bar = -tensor.outer(b_bar, c) if c.ndim == 1 else -b_bar.dot(c.T)
        if self.A_structure == 'lower_triangular':
            A_bar = tensor.tril(A_bar)
        elif self.A_structure == 'upper_triangular':
            A_bar = tensor.triu(A_bar)
        return [A_bar, b_bar]
开发者ID:HapeMask,项目名称:Theano,代码行数:33,代码来源:slinalg.py

示例14: get_square_norm_gradients_scan

def get_square_norm_gradients_scan(D_by_layer, cost, accum = 0):

    # This returns a theano variable that will be of shape (minibatch_size, ).
    # It will contain, for each training example, the associated square-norm of the total gradient.
    # If you take the element-wise square-root afterwards, you will get
    # the associated 2-norms, which is what you want for importance sampling.

    for (layer_name, D) in D_by_layer.items():

        backprop_output = tensor.grad(cost, D['output'])

        if D.has_key('weight'):
            A = D['input']
            B = backprop_output
            S, _ =  theano.scan(fn=lambda A, B: tensor.sqr(tensor.outer(A,B)).sum(),
                                        sequences=[A,B])
            accum = accum + S

        if D.has_key('bias'):

            B = backprop_output
            S, _ =  theano.scan(fn=lambda B: tensor.sqr(B).sum(),
                                        sequences=[B])
            accum = accum + S
        
    return accum
开发者ID:chinnadhurai,项目名称:ImportanceSamplingSGD,代码行数:26,代码来源:verifying_grad_square_norm_formula.py

示例15: compute_psi1

def compute_psi1(lls, lsf, xmean, xvar, z):

    if xmean.ndim == 1:
        xmean = xmean[ None, : ]

    ls = T.exp(lls)
    sf = T.exp(lsf)
    lspxvar = ls + xvar
    constterm1 = ls / lspxvar
    constterm2 = T.prod(T.sqrt(constterm1), 1)
    r2_psi1 = T.outer(T.sum(xmean * xmean / lspxvar, 1), T.ones_like(z[ : , 0 : 1 ])) \
        - np.float32(2) * T.dot(xmean / lspxvar, T.transpose(z)) + \
        T.dot(np.float32(1.0) / lspxvar, T.transpose(z)**2)
    psi1 = sf * T.outer(constterm2, T.ones_like(z[ : , 0 : 1 ])) * T.exp(-np.float32(0.5) * r2_psi1)

    return psi1
开发者ID:nair-p,项目名称:sdvae,代码行数:16,代码来源:gauss.py


注:本文中的theano.tensor.outer函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。