当前位置: 首页>>代码示例>>Python>>正文


Python tensor.stack函数代码示例

本文整理汇总了Python中theano.tensor.stack函数的典型用法代码示例。如果您正苦于以下问题:Python stack函数的具体用法?Python stack怎么用?Python stack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了stack函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: stack_and_shared

def stack_and_shared(_input):
    """
    This will take a list of input variables, turn them into theano shared variables, and return them stacked
    in a single tensor.

    :param _input: list of input variables
    :type _input: list, object, or none

    :return: symbolic tensor of the input variables stacked, or none
    :rtype: Tensor or None
    """
    if _input is None:
        return None
    elif isinstance(_input, list):
        shared_ins = []
        for _in in _input:
            try:
                shared_ins.append(theano.shared(_in))
            except TypeError as _:
                shared_ins.append(_in)
        return T.stack(shared_ins)
    else:
        try:
            _output = [theano.shared(_input)]
        except TypeError as _:
            _output = [_input]
        return T.stack(_output)
开发者ID:chagge,项目名称:OpenDeep,代码行数:27,代码来源:misc.py

示例2: _step

        def _step(x_, h_, c_, pred_, prob_):
            h_a = []
            c_a = []
            for it in range(self.n_levels):
                preact = T.dot(h_[it], self.U[it])
                preact += T.dot(x_, self.W[it]) + self.b[it]

                i = T.nnet.sigmoid(_slice(preact, 0, self.n_dim))
                f = T.nnet.sigmoid(_slice(preact, 1, self.n_dim))
                o = T.nnet.sigmoid(_slice(preact, 2, self.n_dim))
                c = T.tanh(_slice(preact, 3, self.n_dim))

                c = f * c_[it] + i * c
                h = o * T.tanh(c)

                h_a.append(h)
                c_a.append(c)

                x_ = h

            q = T.dot(h, self.L) + self.b0
            prob = T.nnet.softmax(q)
            pred = T.argmax(prob, axis=1)

            return T.stack(h_a).squeeze(), T.stack(c_a).squeeze(), pred, prob
开发者ID:velicue,项目名称:char-rnn-theano,代码行数:25,代码来源:lstm.py

示例3: predict_K

    def predict_K(self, x, z, params):
        # s_mean, s_x for computing mean from s_x
        Ks = []
        Ks_new = []
        offset = 0
        for kern, slice_k in zip(self.kernels, self.slices):
            params_k = params[offset: offset + kern.n_params]
            K_k, K_new_k = kern.predict_K(
                x[:, slice_k], z[:, slice_k], params_k)
            Ks.append(K_k)
            Ks_new.append(K_new_k)
            offset += kern.n_params

        log_weights = TT.concatenate((np.asarray([0]),
                                      params[offset:offset + self.n_my_params]))
        weights = TT.exp(log_weights) / TT.exp(log_weights).sum()

        if len(self.kernels) == 1:
            return Ks[0], Ks_new[0]
        else:
            # XXX: log_K, should be logadd here (#11)
            wK = TT.sum(
                weights[:, None, None] * TT.stack(*Ks), axis=0)
            wK_new = TT.sum(
                weights[:, None, None] * TT.stack(*Ks_new), axis=0)
            return wK, wK_new
开发者ID:gopal-m,项目名称:hyperopt-gpsmbo,代码行数:26,代码来源:kernels.py

示例4: tangent2ambient

 def tangent2ambient(self, X, Z):
     U = tensor.stack((X.U.dot(Z.M) + Z.Up, X.U), 0).reshape((-1, X.U.shape[1]))
     #U = np.hstack((X.U.dot(Z.M) + Z.Up, X.U))
     S = tensor.eye(2*self._k)
     V = tensor.stack((X.V, Z.Vp), 1).reshape((X.V.shape[0], -1))
     #V = np.vstack((X.V, Z.Vp))
     return ManifoldElementShared.from_vars((U, S, V), shape=(self._m, self._n), r=self._k)
开发者ID:Nehoroshiy,项目名称:theano_manifold,代码行数:7,代码来源:fixed_rank.py

示例5: stack_and_shared

def stack_and_shared(input):
    """
    This will take a list of input variables, turn them into theano shared variables, and return them stacked
    in a single tensor.

    Parameters
    ----------
    input : list or object
        List of input variables to stack into a single shared tensor.

    Returns
    -------
    tensor
        Symbolic tensor of the input variables stacked, or None if input was None.
    """
    if input is None:
        return None
    elif isinstance(input, list):
        shared_ins = []
        for _in in input:
            try:
                shared_ins.append(theano.shared(_in))
            except TypeError as _:
                shared_ins.append(_in)
        return T.stack(shared_ins)
    else:
        try:
            _output = [theano.shared(input)]
        except TypeError as _:
            _output = [input]
        return T.stack(_output)
开发者ID:JediKoder,项目名称:OpenDeep,代码行数:31,代码来源:misc.py

示例6: finetune_cost_updates

    def finetune_cost_updates(self, center, mu, learning_rate):
        """ This function computes the cost and the updates ."""

        # note : we sum over the size of a datapoint; if we are using
        #        minibatches, L will be a vector, withd one entry per
        #        example in minibatch
        network_output = self.get_output()
        temp = T.pow(center - network_output, 2)    
        
        L =  T.sum(temp, axis=1) 
        # Add the network reconstruction error 
        z = self.get_network_reconst()
        reconst_err = T.sum(T.pow(self.x - z, 2), axis = 1)            
        L = self.beta*L + self.lbd*reconst_err
        
        cost1 = T.mean(L)
        cost2 = self.lbd*T.mean(reconst_err)  
        cost3 = cost1 - cost2

        # compute the gradients of the cost of the `dA` with respect
        # to its parameters
        gparams = T.grad(cost1, self.params)  
        # generate the list of updates
        updates = []
        grad_values = []
        param_norm = []
        for param, delta, gparam in zip(self.params, self.delta, gparams):
            updates.append( (delta, mu*delta - learning_rate * gparam) )
            updates.append( (param, param + mu*mu*delta - (1+mu)*learning_rate*gparam ))
            grad_values.append(gparam.norm(L=2))
            param_norm.append(param.norm(L=2))
        
        grad_ = T.stack(*grad_values)
        param_ = T.stack(*param_norm)
        return ((cost1, cost2, cost3, grad_, param_), updates)
开发者ID:WenjunJiang,项目名称:DCN,代码行数:35,代码来源:multi_layer_km.py

示例7: func

 def func(chol_vec, delta):
     chol = tt.stack([
         tt.stack([tt.exp(0.1 * chol_vec[0]), 0]),
         tt.stack([chol_vec[1], 2 * tt.exp(chol_vec[2])]),
     ])
     cov = tt.dot(chol, chol.T)
     return MvNormalLogp()(cov, delta)
开发者ID:aloctavodia,项目名称:pymc3,代码行数:7,代码来源:test_dist_math.py

示例8: generate

    def generate(self, h_, c_, x_):
        h_a = []
        c_a = []
        for it in range(self.n_levels):
            preact = T.dot(x_, self.W[it])
            preact += T.dot(h_[it], self.U[it]) + self.b[it]

            i = T.nnet.sigmoid(self.slice(preact, 0, self.n_dim))
            f = T.nnet.sigmoid(self.slice(preact, 1, self.n_dim))
            o = T.nnet.sigmoid(self.slice(preact, 2, self.n_dim))
            c = T.tanh(self.slice(preact, 3, self.n_dim))

            c = f * c_[it] + i * c
            h = o * T.tanh(c)

            h_a.append(h)
            c_a.append(c)

            x_ = h

        q = T.dot(h, self.L) + self.b0
        # mask = T.concatenate([T.alloc(np_floatX(1.), q.shape[0] - 1), T.alloc(np_floatX(0.), 1)])
        prob = T.nnet.softmax(q / 1)

        return prob, T.stack(h_a).squeeze(), T.stack(c_a)[0].squeeze()
开发者ID:velicue,项目名称:char-rnn-theano,代码行数:25,代码来源:lstm.py

示例9: _setOutputs

	def _setOutputs(self) :
		inps = []
		for l in self.network.inConnections[self] :
			inps.append(l.outputs)

		self.outputs = tt.stack(inps).reshape((-1, self.nbChannels, self.height, self.width))
		self.testOutputs = tt.stack(inps).reshape((-1, self.nbChannels, self.height, self.width))
开发者ID:BenJamesbabala,项目名称:Mariana,代码行数:7,代码来源:convolution.py

示例10: retr

    def retr(self, X, Z, t=None):
        if t is None:
            t = 1.0
        Qu, Ru = tensor.nlinalg.QRFull(Z.Up)

        # we need rq decomposition here
        Qv, Rv = tensor.nlinalg.QRFull(Z.Vp[::-1].T)
        Rv = Rv.T[::-1]
        Rv[:, :] = Rv[:, ::-1]
        Qv = Qv.T[::-1]

        # now we have rq decomposition (Rv @ Qv = Z.Vp)
        #Rv, Qv = rq(Z.Vp, mode='economic')


        zero_block = tensor.zeros((Ru.shape[0], Rv.shape[1]))
        block_mat = tensor.stack(
            (
                tensor.stack((X.S + t * Z.M, t * Rv), 1).reshape((Rv.shape[0], -1)),
                tensor.stack((t * Ru, zero_block), 1).reshape((Ru.shape[0], -1))
            )
        ).reshape((-1, Ru.shape[1] + Rv.shape[1]))

        Ut, St, Vt = tensor.nlinalg.svd(block_mat, full_matrices=False)

        U = tensor.stack((X.U, Qu), 1).reshape((Qu.shape[0], -1)).dot(Ut[:, :self._k])
        V = Vt[:self._k, :].dot(tensor.stack((X.V, Qv), 0).reshape((-1, Qv.shape[1])))
        # add some machinery eps to get a slightly perturbed element of a manifold
        # even if we have some zeros in S
        S = tensor.diag(St[:self._k]) + tensor.diag(np.spacing(1) * tensor.ones(self._k))
        return ManifoldElementShared.from_vars((U, S, V), shape=(self._m, self._n), r=self._k)
开发者ID:Nehoroshiy,项目名称:theano_manifold,代码行数:31,代码来源:fixed_rank.py

示例11: _batch_vectorization

    def _batch_vectorization(self,**args):
        fun_in = args["fun"]
        symbolic_X_list = args["symbolic_X_list"]
        if "symbolic_c_inp_list" in args and "t" in args:
            t = args["t"]
            symbolic_c_inp_list = args["symbolic_c_inp_list"]
            fun = lambda x,y: fun_in(x,y,t)
        elif "symbolic_c_inp_list" in args and "t" not in args:
            symbolic_c_inp_list = args["symbolic_c_inp_list"]
            fun = fun_in
        elif "symbolic_c_inp_list" not in args and "t" in args:
            t = args["t"]
            symbolic_c_inp_list = []
            fun = lambda x,y: fun_in(x,t)

        fun_list = []
        for i in np.arange(self.number_of_rollouts):
            symbolic_X_list_i = [a[i] for a in symbolic_X_list]
            symbolic_c_inp_list_i = [a[i] for a in symbolic_c_inp_list]
            out_list = fun(symbolic_X_list_i,symbolic_c_inp_list)
            fun_list.append(out_list)
        if type(fun_list[0]) != list:
            return T.stack(fun_list,axis = 0)
        else:
            ziped_list = [list(a) for a in zip(*fun_list)]
            return [T.stack(a,axis = 0) for a in ziped_list]
开发者ID:DoTha,项目名称:ParallelPice,代码行数:26,代码来源:define_Theano_Control_Problem.py

示例12: tangent2ambient

 def tangent2ambient(self, X, Z):
     U = tensor.stack((X.U.dot(Z.M) + Z.Up, X.U), 0).reshape((-1, X.U.shape[1]))
     #U = np.hstack((X.U.dot(Z.M) + Z.Up, X.U))
     S = tensor.eye(2*self._k)
     V = tensor.stack((X.V, Z.Vp), 1).reshape((X.V.shape[0], -1))
     #V = np.vstack((X.V, Z.Vp))
     return (U, S, V)
开发者ID:Nehoroshiy,项目名称:theano_manifold,代码行数:7,代码来源:fixed_rank_splitted.py

示例13: retr

    def retr(self, X, Z, t=None):
        U, S, V = X
        Up, M, Vp = Z
        if t is None:
            t = 1.0
        Qu, Ru = tensor.nlinalg.qr(Up)

        # we need rq decomposition here
        Qv, Rv = tensor.nlinalg.qr(Vp[::-1].T)
        Rv = Rv.T[::-1]
        Rv = Rv[:, ::-1]
        Qv = Qv.T[::-1]

        # now we have rq decomposition (Rv @ Qv = Z.Vp)
        #Rv, Qv = rq(Z.Vp, mode='economic')


        zero_block = tensor.zeros((Ru.shape[0], Rv.shape[1]))
        block_mat = tensor.stack(
            (
                tensor.stack((S + t * M, t * Rv), 1).reshape((Rv.shape[0], -1)),
                tensor.stack((t * Ru, zero_block), 1).reshape((Ru.shape[0], -1))
            )
        ).reshape((-1, Ru.shape[1] + Rv.shape[1]))

        Ut, St, Vt = tensor.nlinalg.svd(block_mat, full_matrices=False)

        U_res = tensor.stack((U, Qu), 1).reshape((Qu.shape[0], -1)).dot(Ut[:, :self._k])
        V_res = Vt[:self._k, :].dot(tensor.stack((V, Qv), 0).reshape((-1, Qv.shape[1])))
        # add some machinery eps to get a slightly perturbed element of a manifold
        # even if we have some zeros in S
        S_res = tensor.diag(St[:self._k]) + tensor.diag(np.spacing(1) * tensor.ones(self._k))
        return (U_res, S_res, V_res)
开发者ID:Nehoroshiy,项目名称:theano_manifold,代码行数:33,代码来源:fixed_rank_splitted.py

示例14: _for_step

    def _for_step(self,
                  xi_t, xf_t, xo_t, xc_t, mask_t,
                  h_tm1, c_tm1,
                  context, context_mask, context_att_trans,
                  hist_h, hist_h_att_trans,
                  b_u):

        # context: (batch_size, context_size, context_dim)

        # (batch_size, att_layer1_dim)
        h_tm1_att_trans = T.dot(h_tm1, self.att_h_W1)

        # (batch_size, context_size, att_layer1_dim)
        att_hidden = T.tanh(context_att_trans + h_tm1_att_trans[:, None, :])

        # (batch_size, context_size, 1)
        att_raw = T.dot(att_hidden, self.att_W2) + self.att_b2

        # (batch_size, context_size)
        ctx_att = T.exp(att_raw).reshape((att_raw.shape[0], att_raw.shape[1]))

        if context_mask:
            ctx_att = ctx_att * context_mask

        ctx_att = ctx_att / T.sum(ctx_att, axis=-1, keepdims=True)

        # (batch_size, context_dim)
        ctx_vec = T.sum(context * ctx_att[:, :, None], axis=1)

        ##### attention over history #####

        if hist_h:
            hist_h = T.stack(hist_h).dimshuffle((1, 0, 2))
            hist_h_att_trans = T.stack(hist_h_att_trans).dimshuffle((1, 0, 2))
            h_tm1_hatt_trans = T.dot(h_tm1, self.hatt_h_W1)

            hatt_hidden = T.tanh(hist_h_att_trans + h_tm1_hatt_trans[:, None, :])
            hatt_raw = T.dot(hatt_hidden, self.hatt_W2) + self.hatt_b2
            hatt_raw = hatt_raw.flatten(2)
            h_att_weights = T.nnet.softmax(hatt_raw)

            # (batch_size, output_dim)
            h_ctx_vec = T.sum(hist_h * h_att_weights[:, :, None], axis=1)
        else:
            h_ctx_vec = T.zeros_like(h_tm1)

        ##### attention over history #####

        i_t = self.inner_activation(xi_t + T.dot(h_tm1 * b_u[0], self.U_i) + T.dot(ctx_vec, self.C_i) + T.dot(h_ctx_vec, self.H_i))
        f_t = self.inner_activation(xf_t + T.dot(h_tm1 * b_u[1], self.U_f) + T.dot(ctx_vec, self.C_f) + T.dot(h_ctx_vec, self.H_f))
        c_t = f_t * c_tm1 + i_t * self.activation(xc_t + T.dot(h_tm1 * b_u[2], self.U_c) + T.dot(ctx_vec, self.C_c) + T.dot(h_ctx_vec, self.H_c))
        o_t = self.inner_activation(xo_t + T.dot(h_tm1 * b_u[3], self.U_o) + T.dot(ctx_vec, self.C_o) + T.dot(h_ctx_vec, self.H_o))
        h_t = o_t * self.activation(c_t)

        h_t = (1 - mask_t) * h_tm1 + mask_t * h_t
        c_t = (1 - mask_t) * c_tm1 + mask_t * c_t

        # ctx_vec = theano.printing.Print('ctx_vec')(ctx_vec)

        return h_t, c_t, ctx_vec
开发者ID:chubbymaggie,项目名称:NL2code,代码行数:60,代码来源:components.py

示例15: forward_prop_step_stack

        def forward_prop_step_stack(x_t, masks, h_prevs, c_prevs, stack_prevs, ptrs_to_top_prevs):
            # determine, for all layers, if this input was a push/pop
            is_push, is_pop = map_push_pop(x_t, self.PUSH, self.POP)
            is_null = get_is_null(x_t, self.NULL)

            nonsymbolic_hs = []
            nonsymbolic_cs = []
            nonsymbolic_stacks = []
            nonsymbolic_ptrs_to_tops = []

            h = x_t
            for i,layer in enumerate(self.layers):
                h, c, stack, ptrs_to_top = layer.forward_prop_stack(h, h_prevs[i,:,:], c_prevs[i,:,:], stack_prevs[i,:,:,:], ptrs_to_top_prevs[i,:,:,:], is_push, is_pop, is_null)
                h = h*masks[:,:,i] / self.dropout # inverted dropout for scaling

                nonsymbolic_hs.append(h)
                nonsymbolic_cs.append(c)
                nonsymbolic_stacks.append(stack)
                nonsymbolic_ptrs_to_tops.append(ptrs_to_top)
            
            h_s = T.stack(nonsymbolic_hs)
            c_s = T.stack(nonsymbolic_cs)
            stack_s = T.stack(nonsymbolic_stacks)
            ptrs_to_top_s = T.stack(nonsymbolic_ptrs_to_tops)

            o_t = self.W_hy.dot(h)
            
            return o_t, h_s, c_s, stack_s, ptrs_to_top_s
开发者ID:the1mane1event,项目名称:vprnn,代码行数:28,代码来源:NWLSTM_Net.py


注:本文中的theano.tensor.stack函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。