当前位置: 首页>>代码示例>>Python>>正文


Python tensor.repeat函数代码示例

本文整理汇总了Python中theano.tensor.repeat函数的典型用法代码示例。如果您正苦于以下问题:Python repeat函数的具体用法?Python repeat怎么用?Python repeat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了repeat函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: keep_max

def keep_max(input, theta, k, sent_mask):
    sig_input = T.nnet.sigmoid(T.dot(input, theta))
    sent_mask = sent_mask.dimshuffle(0, 'x', 1, 'x')
    sig_input = sig_input * sent_mask
    #sig_input = T.dot(input, theta)
    if k == 0:
        result = input * T.addbroadcast(sig_input, 3)
        return result, sig_input

    # get the sorted idx
    sort_idx = T.argsort(sig_input, axis=2)
    k_max_ids = sort_idx[:,:,-k:,:]
    dim0, dim1, dim2, dim3 = k_max_ids.shape
    batchids = T.repeat(T.arange(dim0), dim1*dim2*dim3)
    mapids = T.repeat(T.arange(dim1), dim2*dim3).reshape((1, dim2*dim3))
    mapids = T.repeat(mapids, dim0, axis=0).flatten()
    rowids = k_max_ids.flatten()
    colids = T.arange(dim3).reshape((1, dim3))
    colids = T.repeat(colids, dim0*dim1*dim2, axis=0).flatten()
    sig_mask = T.zeros_like(sig_input)
    choosed = sig_input[batchids, mapids, rowids, colids]
    sig_mask = T.set_subtensor(sig_mask[batchids, mapids, rowids, colids], 1)
    input_mask = sig_mask * sig_input
    result = input * T.addbroadcast(input_mask, 3)
    return result, sig_input
开发者ID:Tskatom,项目名称:Protest_Event_Encoder,代码行数:25,代码来源:DLBE_Event_Advance.py

示例2: __theano__unpool

    def __theano__unpool(self, inp, us, dim=None, issequence=False):

        # Determine the dimensionality of convolution (2 or 3?)
        if dim is None:
            dim = 3 if not issequence and len(us) == 3 and inp.ndim == 5 else 2

        # Reshape 2D sequential data if required
        # Log input shape
        inpshape = inp.shape
        reallyissequential = issequence and inp.ndim == 5
        if issequence:
            if reallyissequential:
                # Reshape
                inp = inp.reshape((inpshape[0] * inpshape[1], inpshape[2], inpshape[3], inpshape[4]), ndim=4)
                us = us[0:2]
            else:
                warn("Expected 5D sequential output, but got 4D non-sequential instead.")

        if dim == 2:
            y = T.repeat(T.repeat(inp, us[0], axis=2), us[1], axis=3)
        elif dim == 3:
            y = T.repeat(T.repeat(T.repeat(inp, us[0], axis=3), us[1], axis=4), us[2], axis=1)
        else:
            raise NotImplementedError("Upsampling is implemented in 2D and 3D.")

        if issequence and reallyissequential:
            # Reshape sequential data (and remember that the spatial size has doubled)
            y = y.reshape((inpshape[0], inpshape[1], inpshape[2], us[0] * inpshape[3], us[1] * inpshape[4]), ndim=5)

        return y
开发者ID:abailoni,项目名称:greedy_CNN,代码行数:30,代码来源:backend.py

示例3: simple_upsample3d

def simple_upsample3d(inpt, up_factor):
    inpt = T.repeat(inpt, up_factor[0], axis=3)
    inpt = T.repeat(inpt, up_factor[1], axis=4)
    inpt = T.repeat(inpt, up_factor[2], axis=1)
    #rep = [1, up_factor[2], 1, up_factor[0], up_factor[1]]
    #inpt = T.tile(inpt, rep, ndim=5)
    return inpt
开发者ID:jhzhou1111,项目名称:CNNbasedMedicalSegmentation,代码行数:7,代码来源:basic.py

示例4: run

    def run(self, h):
        channels = self.channels#images.shape[1]
        if not self.test:
            gx,gy,dx,dy,s2,g = self.get_params(h)
        else:
            gx,gy,dx,dy,s2,g = self.get_params_test(h)

        w = self.w_transform.run(h)

        w = w.reshape((self.batch_size*self.channels, self.N, self.N))


        muX = gx.dimshuffle([0,'x']) + dx.dimshuffle([0,'x']) * (T.arange(self.N).astype(theano.config.floatX) - self.N/2 - 0.5)
        muY = gy.dimshuffle([0,'x']) + dy.dimshuffle([0,'x']) * (T.arange(self.N).astype(theano.config.floatX) - self.N/2 - 0.5)

        a = T.arange(self.width).astype(theano.config.floatX)
        b = T.arange(self.height).astype(theano.config.floatX)

        Fx = T.exp(-(a-muX.dimshuffle([0,1,'x']))**2 / 2. / s2.dimshuffle([0,'x','x'])**2)
        Fy = T.exp(-(b-muY.dimshuffle([0,1,'x']))**2 / 2. / s2.dimshuffle([0,'x','x'])**2)

        Fx = Fx / (Fx.sum(axis=-1).dimshuffle([0,1,'x']) + 1e-4)
        Fy = Fy / (Fy.sum(axis=-1).dimshuffle([0,1,'x']) + 1e-4)

        self.Fx = T.repeat(Fx, channels, axis=0)
        self.Fy = T.repeat(Fy, channels, axis=0)

        self.fint = self.batched_dot(self.Fy.transpose((0,2,1)), w)

        self.fim = self.batched_dot(self.fint, self.Fx).reshape((self.batch_size, self.channels*self.width*self.height))

        return 1./g * self.fim, (gx, gy, dx, dy, self.fint)
开发者ID:piergiaj,项目名称:generate-image,代码行数:32,代码来源:write_layer.py

示例5: neglog_2d

def neglog_2d(output, target):
    i = T.arange(target.shape[0]).reshape((target.shape[0], 1))
    i = T.repeat(i, target.shape[1], axis=1).flatten()
    j = T.arange(target.shape[1]).reshape((1, target.shape[1]))
    j = T.repeat(j, target.shape[0], axis=0).flatten()
    k = target.flatten()
    return -T.mean(T.log(output)[i, j, k])
开发者ID:Cysu,项目名称:dlearn,代码行数:7,代码来源:costfuncs.py

示例6: run

    def run(self, images, h):#, error_images, h):
        channels = self.channels#images.shape[1]
        if not self.test:
            gx,gy,dx,dy,s2,g = self.get_params(h)
        else:
            gx,gy,dx,dy,s2,g = self.get_params_test(h)

        # how to handle variable sized input images? (mask??)
        I = images.reshape((self.batch_size*self.channels, self.height, self.width))

        muX = gx.dimshuffle([0,'x']) + dx.dimshuffle([0,'x']) * (T.arange(self.N).astype(theano.config.floatX) - self.N/2 - 0.5)
        muY = gy.dimshuffle([0,'x']) + dy.dimshuffle([0,'x']) * (T.arange(self.N).astype(theano.config.floatX) - self.N/2 - 0.5)

        a = T.arange(self.width).astype(theano.config.floatX)
        b = T.arange(self.height).astype(theano.config.floatX)

        Fx = T.exp(-(a-muX.dimshuffle([0,1,'x']))**2 / 2. / s2.dimshuffle([0,'x','x'])**2)
        Fy = T.exp(-(b-muY.dimshuffle([0,1,'x']))**2 / 2. / s2.dimshuffle([0,'x','x'])**2)

        Fx = Fx / (Fx.sum(axis=-1).dimshuffle([0,1,'x']) + 1e-4)
        Fy = Fy / (Fy.sum(axis=-1).dimshuffle([0,1,'x']) + 1e-4)

        self.Fx = T.repeat(Fx, channels, axis=0)
        self.Fy = T.repeat(Fy, channels, axis=0)

        self.fint = self.batched_dot(self.Fy, I)
#        self.efint = T.dot(self.Fx, error_images)
        self.fim = self.batched_dot(self.fint, self.Fx.transpose([0,2,1])).reshape(
            (self.batch_size, self.channels*self.N*self.N))
#        self.feim = T.dot(self.efint, self.Fy.transpose([0,2,1])).reshape(
#            (self.batch_size, channels,self.N,self.N))
        return g * self.fim, (gx, gy, dx, dy, self.fint)#$T.concatenate([self.fim, self.feim], axis=1)
开发者ID:piergiaj,项目名称:generate-image,代码行数:32,代码来源:read_layer.py

示例7: output

    def output(self, train):
        X = self.get_input(train) # shape: (nb_samples, time (padded with zeros at the end), input_dim)
        # new shape: (time, nb_samples, input_dim) -> because theano.scan iterates over main dimension
        X = X.dimshuffle((1, 0, 2))

        xf = self.activation(T.dot(X, self.W_if) + self.b_if)
        xb = self.activation(T.dot(X, self.W_ib) + self.b_ib)
        b_o=self.b_o
        b_on= T.repeat(T.repeat(b_o.reshape((1,self.output_dim)),X.shape[0],axis=0).reshape((1,X.shape[0],self.output_dim)),X.shape[1],axis=0)
        # Iterate forward over the first dimension of the x array (=time).
        outputs_f, updates_f = theano.scan(
            self._step,  # this will be called with arguments (sequences[i], outputs[i-1], non_sequences[i])
            sequences=xf,  # tensors to iterate over, inputs to _step
            # initialization of the output. Input to _step with default tap=-1.
            outputs_info=alloc_zeros_matrix(X.shape[1], self.output_dim),
            non_sequences=[self.W_ff,self.b_f],  # static inputs to _step
            truncate_gradient=self.truncate_gradient
        )
        # Iterate backward over the first dimension of the x array (=time).
        outputs_b, updates_b = theano.scan(
            self._step,  # this will be called with arguments (sequences[i], outputs[i-1], non_sequences[i])
            sequences=xb,  # tensors to iterate over, inputs to _step
            # initialization of the output. Input to _step with default tap=-1.
            outputs_info=alloc_zeros_matrix(X.shape[1], self.output_dim),
            non_sequences=[self.W_bb,self.b_b],  # static inputs to _step
            truncate_gradient=self.truncate_gradient,
            go_backwards=True  # Iterate backwards through time
        )
        #return outputs_f.dimshuffle((1, 0, 2))
        if self.return_sequences:
            return T.add(T.tensordot(T.add(outputs_f.dimshuffle((1, 0, 2)), outputs_b[::-1].dimshuffle((1,0,2))),self.W_o,[[2],[0]]),b_on)
        return T.concatenate((outputs_f[-1], outputs_b[0]))
开发者ID:CVML,项目名称:CRCN,代码行数:32,代码来源:recurrent.py

示例8: get_output

    def get_output(self, train=False):
        X = self.get_input(train)
        # mask = self.get_padded_shuffled_mask(train, X, pad=0)
        mask = self.get_input_mask(train=train)
        ind = T.switch(T.eq(mask[:, -1], 1.), mask.shape[-1], T.argmin(mask, axis=-1)).astype('int32').ravel()
        max_time = T.max(ind)
        X = X.dimshuffle((1, 0, 2))
        Y = T.dot(X, self.W) + self.b
        # h0 = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
        h0 = T.repeat(self.h_m1, X.shape[1], axis=0)
        c0 = T.repeat(self.c_m1, X.shape[1], axis=0)

        [outputs, _], updates = theano.scan(
            self._step,
            sequences=Y,
            outputs_info=[h0, c0],
            non_sequences=[self.R], n_steps=max_time,
            truncate_gradient=self.truncate_gradient, strict=True,
            allow_gc=theano.config.scan.allow_gc)

        res = T.concatenate([h0.dimshuffle('x', 0, 1), outputs], axis=0).dimshuffle((1, 0, 2))
        if self.return_sequences:
            return res
        #return outputs[-1]
        return res[T.arange(mask.shape[0], dtype='int32'), ind]
开发者ID:chenych11,项目名称:keras,代码行数:25,代码来源:recurrent.py

示例9: fprop

    def fprop(self, X):
        x, x_hat, z = X
        batch_size, num_channel, height, width = self.input_shape
        x = x.reshape((batch_size*num_channel, height, width))
        x_hat = x_hat.reshape((batch_size*num_channel, height, width))

        centey = z[:, 0]
        centex = z[:, 1]
        logdel = z[:, 2]
        logsig = z[:, 3]
        loggam = z[:, 4]

        centy = 0.5 * (self.input_shape[2] + 1) * (centey + 1)
        centx = 0.5 * (self.input_shape[3] + 1) * (centex + 1)
        delta = T.exp(logdel)
        delta = (max(self.input_shape[2], self.input_shape[3]) - 1) * delta /\
                 (max(self.glimpse_shape[2], self.glimpse_shape[3]) - 1)
        sigma = T.exp(0.5 * logsig)
        gamma = T.exp(loggam).dimshuffle(0, 'x')

        Fy, Fx = self.filter_bank(centx, centy, delta, sigma)
        if num_channel > 1:
            Fx = T.repeat(Fx, num_channel, axis=0)
            Fy = T.repeat(Fy, num_channel, axis=0)

        x = batched_dot(batched_dot(Fy, x), Fx.transpose(0, 2, 1))
        x_hat = batched_dot(batched_dot(Fy, x_hat), Fx.transpose(0, 2, 1))
        reshape_shape = (batch_size,
                         num_channel*self.glimpse_shape[2]*self.glimpse_shape[3])
        return gamma * T.concatenate([x.reshape(reshape_shape), x_hat.reshape(reshape_shape)], axis=1)
开发者ID:Beronx86,项目名称:cle,代码行数:30,代码来源:draw.py

示例10: create_prediction

 def create_prediction(self):#做一次predict的方法
     gfs=self.gfs
     pm25in=self.pm25in
     #初始第一次前传
     x=T.concatenate([gfs[:,0],gfs[:,1],gfs[:,2],pm25in[:,0],pm25in[:,1],self.cnt[:,:,0]],axis=1)
     if self.celltype==RNN:
         init_hiddens = [(T.repeat(T.shape_padleft(create_shared(layer.hidden_size, name="RNN.initial_hidden_state")),
                                   x.shape[0], axis=0)
                          if x.ndim > 1 else create_shared(layer.hidden_size, name="RNN.initial_hidden_state"))
                         if hasattr(layer, 'initial_hidden_state') else None
                         for layer in self.model.layers]
     if self.celltype==LSTM:
         init_hiddens = [(T.repeat(T.shape_padleft(create_shared(layer.hidden_size * 2, name="LSTM.initial_hidden_state")),
                                   x.shape[0], axis=0)
                          if x.ndim > 1 else create_shared(layer.hidden_size * 2, name="LSTM.initial_hidden_state"))
                         if hasattr(layer, 'initial_hidden_state') else None
                         for layer in self.model.layers]
     self.layerstatus=self.model.forward(x,init_hiddens)
     #results.shape?40*1
     self.results=self.layerstatus[-1]
     if self.steps > 1:
         self.layerstatus=self.model.forward(T.concatenate([gfs[:,1],gfs[:,2],gfs[:,3],pm25in[:,1],self.results,self.cnt[:,:,1]],axis=1),self.layerstatus)
         self.results=T.concatenate([self.results,self.layerstatus[-1]],axis=1)      
         #前传之后step-2次
         for i in xrange(2,self.steps):
             self.layerstatus=self.model.forward(T.concatenate([gfs[:,i],gfs[:,i+1],gfs[:,i+2],T.shape_padright(self.results[:,i-2]),T.shape_padright(self.results[:,i-1]),self.cnt[:,:,i]],axis=1),self.layerstatus)
             #need T.shape_padright???
             self.results=T.concatenate([self.results,self.layerstatus[-1]],axis=1)
     return self.results
开发者ID:subercui,项目名称:RNN_pm25,代码行数:29,代码来源:Pm25RNN_ZEROINIT.py

示例11: keep_max

def keep_max(input, theta, k):
    """
    :type input: theano.tensor.tensor4
    :param input: the input data
                
    :type theta: theano.tensor.matrix
    :param theta: the parameter for sigmoid function
                            
    :type k: int 
    :param k: the number k used to define top k sentence to remain
    """
    sig_input = T.nnet.sigmoid(T.dot(input, theta))
    if k == 0: # using all the sentences
        result = input * T.addbroadcast(sig_input, 3)
        return result, sig_input

    # get the sorted idx
    sort_idx = T.argsort(sig_input, axis=2)
    k_max_ids = sort_idx[:,:,-k:,:]
    dim0, dim1, dim2, dim3 = k_max_ids.shape
    batchids = T.repeat(T.arange(dim0), dim1*dim2*dim3)
    mapids = T.repeat(T.arange(dim1), dim2*dim3).reshape((1, dim2*dim3))
    mapids = T.repeat(mapids, dim0, axis=0).flatten()
    rowids = k_max_ids.flatten()
    colids = T.arange(dim3).reshape((1, dim3))
    colids = T.repeat(colids, dim0*dim1*dim2, axis=0).flatten()
    # construct masked data
    sig_mask = T.zeros_like(sig_input)
    choosed = sig_input[batchids, mapids, rowids, colids]
    sig_mask = T.set_subtensor(sig_mask[batchids, mapids, rowids, colids], 1)

    input_mask = sig_mask * sig_input
    result = input * T.addbroadcast(input_mask, 3)
    return result, sig_input
开发者ID:Tskatom,项目名称:Protest_Event_Encoder,代码行数:34,代码来源:MIL_SIG_cnn.py

示例12: construct_graph_ref

    def construct_graph_ref(self, args, x, length, popstats=None):

        p = self.allocate_parameters(args)

        if args.baseline:
            def bn(x, gammas, betas):
                return x + betas
        else:
            def bn(x, gammas, betas):
                mean, var = x.mean(axis=0, keepdims=True), x.var(axis=0, keepdims=True)
                # if only
                mean.tag.batchstat, var.tag.batchstat = True, True
                #var = T.maximum(var, args.epsilon)
                var = var + args.epsilon
                return (x - mean) / T.sqrt(var) * gammas + betas

        def stepfn(x, dummy_h, dummy_c, h, c):
            # a_mean, b_mean, c_mean,
            # a_var, b_var, c_var):

            a_mean, b_mean, c_mean = 0, 0, 0
            a_var, b_var, c_var = 0, 0, 0

            atilde = T.dot(h, p.Wa)
            btilde = x
            a_normal = bn(atilde, p.a_gammas, p.ab_betas)
            b_normal = bn(btilde, p.b_gammas, 0)
            ab = a_normal + b_normal
            g, f, i, o = [fn(ab[:, j * args.num_hidden:(j + 1) * args.num_hidden])
                          for j, fn in enumerate([self.activation] + 3 * [T.nnet.sigmoid])]
            c = dummy_c + f * c + i * g
            c_normal = bn(c, p.c_gammas, p.c_betas)
            h = dummy_h + o * self.activation(c_normal)
            return h, c, atilde, btilde, c_normal



        xtilde = T.dot(x, p.Wx)

        if args.noise:
            # prime h with white noise
            Trng = MRG_RandomStreams()
            h_prime = Trng.normal((xtilde.shape[1], args.num_hidden), std=args.noise)
        elif args.summarize:
            # prime h with mean of example
            h_prime = x.mean(axis=[0, 2])[:, None]
        else:
            h_prime = 0

        dummy_states = dict(h=T.zeros((xtilde.shape[0], xtilde.shape[1], args.num_hidden)),
                            c=T.zeros((xtilde.shape[0], xtilde.shape[1], args.num_hidden)))

        [h, c, atilde, btilde, htilde], _ = theano.scan(
            stepfn,
            sequences=[xtilde, dummy_states["h"], dummy_states["c"]],
            outputs_info=[T.repeat(p.h0[None, :], xtilde.shape[1], axis=0) + h_prime,
                          T.repeat(p.c0[None, :], xtilde.shape[1], axis=0),
                          None, None, None])
        return dict(h=h, c=c,
                    atilde=atilde, btilde=btilde, htilde=htilde), [], dummy_states, popstats
开发者ID:EricDoug,项目名称:recurrent-batch-normalization,代码行数:60,代码来源:sequential_mnist.py

示例13: initial_states

 def initial_states(self, batch_size, *args, **kwargs):
     return [
         tensor.repeat(self.initial_state_[None, :], batch_size, 0),
         tensor.repeat(self.initial_cells[None, :], batch_size, 0),
         tensor.repeat(self.initial_location[None, :], batch_size, 0),
         tensor.repeat(self.initial_scale[None, :], batch_size, 0),
     ]
开发者ID:mohammadpz,项目名称:LSTM-Attention,代码行数:7,代码来源:LSTM_attention_model.py

示例14: initial_state_with_taps

 def initial_state_with_taps(self, num=None):
     if num is not None:
         cell = T.repeat(self.default_cell, num, axis=0)
         output = T.repeat(self.default_output, num, axis=0)
     else:
         cell = self.default_cell
         output = self.default_output
     return dict(initial=output, taps=[-1]), dict(initial=cell, taps=[-1])
开发者ID:songjun54cm,项目名称:ml_idiot,代码行数:8,代码来源:LSTM.py

示例15: softmax

def softmax( y ):
    y_max = T.max(y , axis = 2)
    y_max_rep = y_max.reshape( ( y_max.shape[0] , y_max.shape[1] , 1))
    y_opt = y - T.repeat (y_max_rep , y.shape[2] , axis = 2)
    y_sum = T.sum( T.exp(y_opt) , axis = 2 )
    y_reshape = y_sum.reshape( (y_sum.shape[0] , y_sum.shape[1] , 1) )
    a = ( T.exp(y_opt) / T.repeat  (y_reshape , y.shape[2] , axis = 2  ))
    return a
开发者ID:BinbinBian,项目名称:MCTest-2,代码行数:8,代码来源:func.py


注:本文中的theano.tensor.repeat函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。