当前位置: 首页>>代码示例>>Python>>正文


Python extra_ops.repeat函数代码示例

本文整理汇总了Python中theano.tensor.extra_ops.repeat函数的典型用法代码示例。如果您正苦于以下问题:Python repeat函数的具体用法?Python repeat怎么用?Python repeat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了repeat函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: reverseConv

    def reverseConv(self, activations, img_shape, flipped_filter, dim2=1):
       
        # Reverse max pooling first
        self.zp = activations.reshape((self.output.shape[0] * self.output.shape[1] * self.output.shape[2], self.output.shape[3]))
        lengthen = repeat(activations, self.poolsize[0], axis=2)
        self.lengthen = repeat(lengthen, self.poolsize[1], axis=3)
        self.w_shape = self.W.shape
        self.changed_W = self.W.dimshuffle(1,0,2,3)
        
	# Reversing the convolutional step	
        rev_conv_out = conv.conv2d(input=self.lengthen, filters=self.changed_W[:,:,::-1,::-1],filter_shape=flipped_filter,image_shape=img_shape, border_mode='full')
     
        #convert to "same" (from full)
        s1 = numpy.floor((self.filter_shape[2]-1)/2.0).astype(int)
        e1 = numpy.ceil((self.filter_shape[2]-1)/2.0).astype(int)

	#Time must be the same forward = time is same, frequency is valid, backward = time is same, frequency is full

        if dim2: #convert to "valid" (from full) 
            s2 = numpy.floor((self.filter_shape[3]-1)/2.0).astype(int)
            e2 = numpy.ceil((self.filter_shape[3]-1)/2.0).astype(int)
            if s1 == e1:
		rev_conv_out = rev_conv_out[:,:,:,s2:-e2]
	    else:
		rev_conv_out = rev_conv_out[:,:,s1:-e1,s2:-e2]
        else:
            rev_conv_out = rev_conv_out[:,:,s1:-e1,:]

	self.reverseOutput=rev_conv_out
开发者ID:sl3368,项目名称:DeepBirdBrain,代码行数:29,代码来源:layer_classes.py

示例2: test_repeatOp

    def test_repeatOp(self):
        for ndim in range(3):
            x = T.TensorType(config.floatX, [False] * ndim)()
            a = np.random.random((10, ) * ndim).astype(config.floatX)

            for axis in self._possible_axis(ndim):
                for dtype in tensor.discrete_dtypes:
                    r_var = T.scalar(dtype=dtype)
                    r = numpy.asarray(3, dtype=dtype)
                    if dtype in self.numpy_unsupported_dtypes:
                        self.assertRaises(TypeError,
                                repeat, x, r_var, axis=axis)
                    else:
                        f = theano.function([x, r_var],
                                            repeat(x, r_var, axis=axis))
                        assert np.allclose(np.repeat(a, r, axis=axis),
                                           f(a, r))

                        r_var = T.vector(dtype=dtype)
                        if axis is None:
                            r = np.random.random_integers(
                                    5, size=a.size).astype(dtype)
                        else:
                            r = np.random.random_integers(
                                    5, size=(10,)).astype(dtype)

                        f = theano.function([x, r_var],
                                            repeat(x, r_var, axis=axis))
                        assert np.allclose(np.repeat(a, r, axis=axis),
                                           f(a, r))
开发者ID:317070,项目名称:Theano,代码行数:30,代码来源:test_extra_ops.py

示例3: output

    def output(self, input, n_batch=None):
        ###--- Unpool

        if self.poolsize[0] == 1 and self.poolsize[1] == 1:
            unpool_out = input
        else:
            unpool_out = Textra.repeat(Textra.repeat(input, self.poolsize[0], axis = 2), self.poolsize[1], axis = 3) * self.mask

        image_shape = list(self.image_shape)
        if n_batch is not None:
            image_shape[0] = n_batch

        ###--- Unpool + conv
        # convolve input feature maps with filters
        if self.border_mode == 'same':
            conv_out = dnn.dnn_conv(
                img=unpool_out,
                kerns=self.W,
                subsample=(1,1),
                border_mode=self.border,
                #conv_mode='cross'
            )
        else:
            raise Exception('Unknown conv type')  

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        lin_output = conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
        return (
            lin_output if self.activation is None
            else self.activation(lin_output)
        )
开发者ID:codeaudit,项目名称:mmdgm,代码行数:34,代码来源:UnpoolConvNon_DNN_DNN.py

示例4: drop_output

    def drop_output(self, input, drop=0, rng=None, p=0.5):
        ###--- Unpool

        if self.poolsize[0] == 1 and self.poolsize[1] == 1:
            unpool_out = input
        else:
            unpool_out = Textra.repeat(Textra.repeat(input, self.poolsize[0], axis = 2), self.poolsize[1], axis = 3) * self.mask

        image_shape = list(self.image_shape)
        if n_batch is not None:
            image_shape[0] = n_batch

        ###--- Unpool + conv
        # convolve input feature maps with filters
        if self.border_mode == 'valid':
            conv_out = conv.conv2d(
                input=unpool_out,
                filters=self.W,
                filter_shape=self.filter_shape,
                image_shape=image_shape,
                border_mode='valid'
            )
        elif self.border_mode == 'same':
            conv_out = conv.conv2d(
                input=unpool_out,
                filters=self.W,
                filter_shape=self.filter_shape,
                image_shape=image_shape,
                border_mode='full'
            )
            padding_w = theano.shared((self.filter_shape[2] - 1) / 2)
            padding_h = theano.shared((self.filter_shape[3] - 1) / 2)
            conv_out = conv_out[:,:,padding_w:-padding_w,padding_h:-padding_h]
        elif self.border_mode == 'full':
            conv_out = conv.conv2d(
                input=unpool_out,
                filters=self.W,
                filter_shape=self.filter_shape,
                image_shape=image_shape,
                border_mode='full'
            )
        else:
            raise Exception('Unknown conv type')

        # downsample each feature map individually, using maxpooling
        
        

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        lin_output = conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
        output= (
            lin_output if self.activation is None
            else self.activation(lin_output)
        )
        droppedOutput = nonlinearity.dropout(rng, output, p)
        return T.switch(T.neq(drop, 0), droppedOutput, output)
开发者ID:codeaudit,项目名称:mmdgm,代码行数:59,代码来源:UnpoolConvNon.py

示例5: test_repeatOp

    def test_repeatOp(self):
        for ndim in [1, 3]:
            x = T.TensorType(config.floatX, [False] * ndim)()
            a = np.random.random((10, ) * ndim).astype(config.floatX)

            for axis in self._possible_axis(ndim):
                for dtype in tensor.integer_dtypes:
                    r_var = T.scalar(dtype=dtype)
                    r = np.asarray(3, dtype=dtype)
                    if (dtype == 'uint64' or
                            (dtype in self.numpy_unsupported_dtypes and
                                r_var.ndim == 1)):
                        self.assertRaises(TypeError, repeat, x, r_var, axis=axis)
                    else:
                        f = theano.function([x, r_var],
                                            repeat(x, r_var, axis=axis))
                        assert np.allclose(np.repeat(a, r, axis=axis),
                                           f(a, r))

                        r_var = T.vector(dtype=dtype)
                        if axis is None:
                            r = np.random.randint(
                                1, 6, size=a.size).astype(dtype)
                        else:
                            r = np.random.randint(
                                1, 6, size=(10,)).astype(dtype)

                        if dtype in self.numpy_unsupported_dtypes and r_var.ndim == 1:
                            self.assertRaises(TypeError,
                                              repeat, x, r_var, axis=axis)
                        else:
                            f = theano.function([x, r_var],
                                                repeat(x, r_var, axis=axis))
                            assert np.allclose(np.repeat(a, r, axis=axis),
                                               f(a, r))

                        # check when r is a list of single integer, e.g. [3].
                        r = np.random.randint(
                            1, 11, size=()).astype(dtype) + 2
                        f = theano.function([x],
                                            repeat(x, [r], axis=axis))
                        assert np.allclose(np.repeat(a, r, axis=axis),
                                           f(a))
                        assert not np.any([isinstance(n.op, RepeatOp)
                                           for n in f.maker.fgraph.toposort()])

                        # check when r is  theano tensortype that broadcastable is (True,)
                        r_var = theano.tensor.TensorType(broadcastable=(True,),
                                                         dtype=dtype)()
                        r = np.random.randint(1, 6, size=(1,)).astype(dtype)
                        f = theano.function([x, r_var],
                                            repeat(x, r_var, axis=axis))
                        assert np.allclose(np.repeat(a, r[0], axis=axis),
                                           f(a, r))
                        assert not np.any([isinstance(n.op, RepeatOp)
                                           for n in f.maker.fgraph.toposort()])
开发者ID:Thrandis,项目名称:Theano,代码行数:56,代码来源:test_extra_ops.py

示例6: step

    def step(time_idx,lstm_hidden):
        M_pad = repeat(P.memory_init.dimshuffle((0,'x',1)) , lstm_hidden.shape[1] , axis=1 )
        M_curr_temp = T.concatenate([M_pad , lstm_hidden[:time_idx,:,:]] , axis=0)
        M_curr      = M_curr_temp.transpose((1,0,2))
        input_curr  = lstm_hidden[time_idx,:,:]

        weight_prev = T.zeros([input_curr.shape[0] , time_idx+1])
        weight_inter = weight_prev

        for head in heads:
            weight_inter, att_w_inter, key = build_head_curr(
                weight_inter, M_curr , head, input_curr)

        weight_curr = weight_inter
        entropy_temp = -1*(weight_curr*T.log(weight_curr))
        entropy = T.sum(entropy_temp , axis=1)

        key_normalize = T.nnet.softmax(key)
        key_entropy_temp = -1*(key_normalize*T.log(key_normalize))
        key_entropy = T.sum(key_entropy_temp , axis=1)

        att_w_curr  = att_w_inter

        att_M_curr = att_w_curr.dimshuffle(0,'x',1)*M_curr
        read_curr = build_read(att_M_curr, weight_curr)
        output = controller(input_curr, read_curr)

        return output,entropy,key_entropy
开发者ID:darongliu,项目名称:Lstm_Turing_LM,代码行数:28,代码来源:model.py

示例7: output

 def output(self, dropout_active=False):
     X = self.embedded()
     out, _ = theano.scan(self.op.step,
                          sequences=[X],
                          outputs_info=[repeat(self.op.id, X.shape[1], axis=0)]
                      )
     return out[-1]
开发者ID:gchrupala,项目名称:imaginet,代码行数:7,代码来源:layers.py

示例8: step

    def step(time_idx,lstm_hidden):
        M_pad = repeat(P.memory_init.dimshuffle((0,'x',1)) , lstm_hidden.shape[1] , axis=1 )
        M_curr_temp = T.concatenate([M_pad , lstm_hidden[:time_idx,:,:]] , axis=0)
        M_curr      = M_curr_temp.transpose((1,0,2))
        input_curr  = lstm_hidden[time_idx,:,:]

        weight_prev = T.zeros([input_curr.shape[0] , time_idx+1])
        weight_inter = weight_prev

        for head in heads:
            weight_inter, att_w_inter = build_head_curr(
                weight_inter, M_curr , head, input_curr)

        weight_curr = weight_inter
        pad_matrix = T.zeros((input_curr.shape[0],lstm_hidden.shape[0]-weight_curr.shape[1]),dtype='float32')
        weight_pad = T.concatenate([weight_curr,pad_matrix],axis=1)
        entropy_temp = -1*(weight_curr*T.log(weight_curr))
        entropy = T.sum(entropy_temp , axis=1)
        att_w_curr  = att_w_inter

        att_M_curr = att_w_curr.dimshuffle(0,'x',1)*M_curr
        read_curr = build_read(att_M_curr, weight_curr)
        output = controller(input_curr, read_curr)

        return output,entropy,weight_pad
开发者ID:darongliu,项目名称:Lstm_Turing_LM,代码行数:25,代码来源:model.py

示例9: drop_output

    def drop_output(self, input, drop=0, rng=None, p=0.5):
        ###--- Unpool

        if self.poolsize[0] == 1 and self.poolsize[1] == 1:
            unpool_out = input
        else:
            unpool_out = Textra.repeat(Textra.repeat(input, self.poolsize[0], axis = 2), self.poolsize[1], axis = 3) * self.mask

        image_shape = list(self.image_shape)
        if n_batch is not None:
            image_shape[0] = n_batch

        if self.border_mode == 'same':
            conv_out = dnn.dnn_conv(
                img=unpool_out,
                kerns=self.W,
                subsample=(1,1),
                border_mode=self.border,
                #conv_mode='cross'
            )
        else:
            raise Exception('Unknown conv type')
        
        if self.cnorm:
            print 'cnorm size', self.filter_shape[0]/8+1
            conv_out=ContrastCrossChannels.ContrastCrossChannels(input=conv_out, n=self.filter_shape[0]/8+1)

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        lin_output = conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
        output= (
            lin_output if self.activation is None
            else self.activation(lin_output)
        )
        droppedOutput = nonlinearity.dropout(rng, output, p)
        return T.switch(T.neq(drop, 0), droppedOutput, output)
开发者ID:codeaudit,项目名称:mmdgm,代码行数:38,代码来源:UnpoolConvNon_GauInit_DNN.py

示例10: step

    def step(time_idx,lstm_hidden,input_hidden,weighted_mem):#lstm_hidden is used to generate weight
        M_pad = repeat(P.memory_init.dimshuffle((0,'x',1)) , lstm_hidden.shape[1] , axis=1 )
        weighted_M_pad = repeat(P.weighted_memory_init.dimshuffle((0,'x',1)) , lstm_hidden.shape[1] , axis=1 )

        M_curr_temp = T.concatenate([M_pad , lstm_hidden[:time_idx,:,:]] , axis=0)
        weighted_M_curr_temp = T.concatenate([weighted_M_pad , weighted_mem[:time_idx,:,:]] , axis=0)

        M_curr      = M_curr_temp.transpose((1,0,2))
        weighted_M_curr      = weighted_M_curr_temp.transpose((1,0,2))
        input_curr  = input_hidden[time_idx,:,:]

        weight_prev = T.zeros([input_curr.shape[0] , time_idx+1])
        weight_inter = weight_prev

        for head in heads:
            weight_inter = build_head_curr(
                weight_inter, M_curr , head, input_curr)

        weight_curr = weight_inter

        read_curr = build_read(weighted_M_curr, weight_curr)
        output = controller(input_curr, read_curr)

        return output
开发者ID:darongliu,项目名称:Lstm_Turing_LM,代码行数:24,代码来源:model.py

示例11: output

 def output(self, dropout_active=False):
     X = self.l_in.output(dropout_active=dropout_active)
     if self.p_drop > 0. and dropout_active:
         X = dropout(X, self.p_drop)
     x_in = T.dot(X, self.w_in) + self.b_in
     out, _ = theano.scan(self.step,
         sequences=[x_in],
         outputs_info=[repeat(self.h0, x_in.shape[1], axis=0)],
         non_sequences=[self.w_rec],
         truncate_gradient=self.truncate_gradient
     )
     if self.seq_output:
         return out
     else:
         return out[-1]
开发者ID:gotomypc,项目名称:Passage,代码行数:15,代码来源:layers.py

示例12: fawn_recurrent

def fawn_recurrent(
    inpt_mean, inpt_var, weights_mean, weights_var,
    f,
    initial_mean, initial_var):

    f_transfer = lookup(f, transfer_)
    def step(inpt_mean, inpt_var, him_m1, hiv_m1, hom_m1, hov_m1):
        wm, wv = weights_mean, weights_var

        pres_mean = T.dot(inpt_mean, wm)
        pres_var = (T.dot(inpt_mean ** 2, wv)
                    + T.dot(inpt_var, wm ** 2)
                    + T.dot(inpt_var, wv)
                    )

        post_mean, post_var = f_transfer(pres_mean, pres_var)
        return pres_mean, pres_var, post_mean, post_var


    if initial_mean.ndim == 1:
        initial_mean = repeat(
            initial_mean.dimshuffle('x', 0), inpt_mean.shape[1], axis=0)
    if initial_var.ndim == 1:
        initial_var = repeat(
            initial_var.dimshuffle('x', 0), inpt_mean.shape[1], axis=0)

    (hidden_in_mean_rec, hidden_in_var_rec, hidden_mean_rec, hidden_var_rec), _ = theano.scan(
        step,
        sequences=[inpt_mean, inpt_var],
        outputs_info=[T.zeros_like(inpt_mean[0]),
                      T.zeros_like(inpt_mean[0]),
                      initial_mean,
                      initial_var])

    return (hidden_in_mean_rec, hidden_in_var_rec,
            hidden_mean_rec, hidden_var_rec)
开发者ID:Wiebke,项目名称:breze,代码行数:36,代码来源:sequential.py

示例13: recurrent_layer_stateful

def recurrent_layer_stateful(hidden_inpt, hidden_to_hidden, f, initial_hidden):
    def step(x, s_m1, hi_tm1, h_tm1):
        hi = T.dot(h_tm1, hidden_to_hidden)
        hi += x
        s, h = f(s_m1, hi)
        return s, hi, h

    initial_hidden_b = repeat(
        initial_hidden.dimshuffle('x', 0), hidden_inpt.shape[1], axis=0)

    (states, hidden_in_rec, hidden_rec), _ = theano.scan(
        step,
        sequences=hidden_inpt,
        outputs_info=[
            T.zeros_like(initial_hidden_b),
            T.zeros_like(hidden_inpt[0]),
            initial_hidden_b])

    return states, hidden_in_rec, hidden_rec
开发者ID:Wiebke,项目名称:breze,代码行数:19,代码来源:rnn.py

示例14: recurrent_layer

def recurrent_layer(hidden_inpt, hidden_to_hidden, f, initial_hidden):
    def step(x, hi_tm1):
        h_tm1 = f(hi_tm1)
        hi = T.dot(h_tm1, hidden_to_hidden) + x
        return hi

    # Modify the initial hidden state to obtain several copies of
    # it, one per sample.
    initial_hidden_b = repeat(initial_hidden, hidden_inpt.shape[1], axis=0)
    initial_hidden_b = initial_hidden_b.reshape(
        (hidden_inpt.shape[1], hidden_inpt.shape[2]))

    hidden_in_rec, _ = theano.scan(
        step,
        sequences=hidden_inpt,
        outputs_info=[initial_hidden_b])

    hidden_rec = f(hidden_in_rec)

    return hidden_in_rec, hidden_rec
开发者ID:ddofer,项目名称:breze,代码行数:20,代码来源:rnn.py

示例15: output

 def output(self, pool=True):
     X = self.input
     if self.backward:
         # flip along second axis
         X = X[:, ::-1]
         self.mask = self.mask[:, ::-1]
     # shuffle dimension so scan over axis 1
     X = X.dimshuffle(1, 0, 2)
     if self.mask is not None:
         mask = self.mask.dimshuffle(1, 0)
         seq_input = [mask, X]
         step = self.step_masked
     else:
         seq_input = [X]
         step = self.step
     out, _ = theano.scan(
         step,
         sequences=seq_input,
         outputs_info=[repeat(self.h0, X.shape[1], axis=0)],
         non_sequences=[self.u_z, self.u_r, self.u_h],
         truncate_gradient=self.truncate_gradient
     )
     # shuffle dimension back
     out = out.dimshuffle(1, 0, 2)
     if pool:
         if self.mask is not None:
             out = (out * self.mask[:, :, None]).sum(axis=1)
             out = out / self.mask.sum(axis=1)[:, None]
             return out
         return T.mean(out, axis=1)
     elif self.seq_output:
         if self.mask is not None:
             return out * self.mask[:, :, None]
         else:
             return out
     else:
         return out[-1]
开发者ID:csong27,项目名称:NgramNeuralNetworks,代码行数:37,代码来源:recurrent_layer.py


注:本文中的theano.tensor.extra_ops.repeat函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。