当前位置: 首页>>代码示例>>Python>>正文


Python RandomStreams.binomial方法代码示例

本文整理汇总了Python中theano.tensor.shared_randomstreams.RandomStreams.binomial方法的典型用法代码示例。如果您正苦于以下问题:Python RandomStreams.binomial方法的具体用法?Python RandomStreams.binomial怎么用?Python RandomStreams.binomial使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor.shared_randomstreams.RandomStreams的用法示例。


在下文中一共展示了RandomStreams.binomial方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_binomial_vector

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import binomial [as 别名]
    def test_binomial_vector(self):
        random = RandomStreams(utt.fetch_seed())
        n = tensor.lvector()
        prob = tensor.vector()
        out = random.binomial(n=n, p=prob)
        assert out.ndim == 1
        f = function([n, prob], out)

        n_val = [1, 2, 3]
        prob_val = numpy.asarray([.1, .2, .3], dtype=config.floatX)
        seed_gen = numpy.random.RandomState(utt.fetch_seed())
        numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))

        # Arguments of size (3,)
        val0 = f(n_val, prob_val)
        numpy_val0 = numpy_rng.binomial(n=n_val, p=prob_val)
        assert numpy.all(val0 == numpy_val0)

        # arguments of size (2,)
        val1 = f(n_val[:-1], prob_val[:-1])
        numpy_val1 = numpy_rng.binomial(n=n_val[:-1], p=prob_val[:-1])
        assert numpy.all(val1 == numpy_val1)

        # Specifying the size explicitly
        g = function([n, prob], random.binomial(n=n, p=prob, size=(3,)))
        val2 = g(n_val, prob_val)
        numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
        numpy_val2 = numpy_rng.binomial(n=n_val, p=prob_val, size=(3,))
        assert numpy.all(val2 == numpy_val2)
        self.assertRaises(ValueError, g, n_val[:-1], prob_val[:-1])
开发者ID:ChinaQuants,项目名称:Theano,代码行数:32,代码来源:test_shared_randomstreams.py

示例2: theano_sentence_prediction

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import binomial [as 别名]
    def theano_sentence_prediction(self, Sentence, Chars, WordLengths):

        input_lstm_res_f = self.input_lstm_forward_layer.function(Sentence, Chars, WordLengths)
        input_lstm_res_b = self.input_lstm_backward_layer.function(Sentence, Chars, WordLengths)
        input_combined = T.concatenate((input_lstm_res_f, input_lstm_res_b), axis=1)

        #Make pairwise features. This is really just "tensor product with concatenation instead of multiplication". Is there a command for that?
        full_matrix, _ = theano.scan(fn=self.__pairwise_features,
                                  outputs_info=None,
                                  sequences=input_combined,
                                  non_sequences=[input_combined, Sentence.shape[0]])

        if len(self.lstm_layers) > 0 and self.lstm_layers[0].training:
            srng = RandomStreams(seed=12345)
            full_matrix = T.switch(srng.binomial(size=(Sentence.shape[0], Sentence.shape[0]+1, self.hidden_dimension*4), p=0.5), full_matrix, 0)
        else:
            full_matrix = 0.5 * full_matrix

        full_matrix = self.transition_layer.function(full_matrix)
            
        for layer in self.lstm_layers:
            if layer.training:
                print("hah-train")
                full_matrix = T.switch(srng.binomial(size=(Sentence.shape[0], Sentence.shape[0]+1, self.hidden_dimension*4), p=0.5), full_matrix, 0)
            else:
                print("heh-notrain")
                full_matrix = 0.5 * full_matrix
            
            
            full_matrix = layer.function(full_matrix)
        
        final_matrix = self.output_convolution.function(full_matrix)

        return T.nnet.softmax(final_matrix)
开发者ID:MichSchli,项目名称:Speciale,代码行数:36,代码来源:fourway_lstm_both.py

示例3: theano_sentence_prediction

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import binomial [as 别名]
    def theano_sentence_prediction(self, Vs):
        #Make pairwise features. This is really just "tensor product with concatenation instead of multiplication". Is there a command for that?
        pairwise_vs, _ = theano.scan(fn=self.__pairwise_features,
                                  outputs_info=None,
                                  sequences=Vs,
                                  non_sequences=[Vs, Vs.shape[0]])

        if self.input_lstm_layer.training:
            srng = RandomStreams(seed=12345)
        
        full_matrix = self.input_lstm_layer.function(pairwise_vs)

        for layer in self.lstm_layers:            
            if self.input_lstm_layer.training:
                print("hah-train")
                full_matrix = T.switch(srng.binomial(size=(Vs.shape[0], Vs.shape[0]+1, self.hidden_dimension*4), p=0.5), full_matrix, 0)
            else:
                print("heh-notrain")
                full_matrix = 0.5 * full_matrix
            
            full_matrix = layer.function(full_matrix)

        if self.input_lstm_layer.training:
            print("hah-train")
            full_matrix = T.switch(srng.binomial(size=(Vs.shape[0], Vs.shape[0]+1, self.hidden_dimension*4), p=0.5), full_matrix, 0)
        else:
            print("heh-notrain")
            full_matrix = 0.5 * full_matrix
            
        final_matrix = self.output_convolution.function(full_matrix)

        return T.nnet.softmax(final_matrix)
开发者ID:MichSchli,项目名称:Speciale,代码行数:34,代码来源:corner_lstm.py

示例4: Dropout

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import binomial [as 别名]
class Dropout(Layer):
    def __init__(self, p):
        self.name = self.__class__.__name__
        self.train_flag = True
        self.p = p
        self.srng = RandomStreams(seed=np.random.randint(920927))

    def f(self, inputs, is_train):
        out = []
        if isinstance(self.p, list):
            if len(self.p) != len(inputs):
                raise ValueError(self.name + ': The number of inputs should equal to the number of p if p is list.')

        for i in xrange(len(inputs)):
            x = inputs[i]
            p = self.p[i] if isinstance(self.p, list) else self.p 
            if self.p > 0.:
                retain_p = 1. - p
                if is_train:
                    x *= self.srng.binomial(x.shape, p=retain_p, dtype=theano.config.floatX) 
                else:
                    x *= retain_p 
                out.append(x)
            else:
                out.append(x)
        return out 
开发者ID:yangli625,项目名称:ReId_theano,代码行数:28,代码来源:Layer_new.py

示例5: _dropout_from_layer

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import binomial [as 别名]
    def _dropout_from_layer(self, layer):

        stream = RandomStreams(self.numpy_range.randint(999999))

        mask = stream.binomial(size=layer.shape, n=1, p=(1-self._p), dtype=theano.config.floatX)

        return layer * Tensor.cast(mask, theano.config.floatX)
开发者ID:aviveise,项目名称:double_encoder,代码行数:9,代码来源:symmetric_dropout_hidden_layer.py

示例6: CTC_train

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import binomial [as 别名]
 def CTC_train(self):
     CTC_LOSSs = T.cast(T.mean(self.CTC_LOSS(), axis=0), "float32")
     train_data_d = []
     train_data_m = []
     train_data_m_s = [] 
     learning_rate = T.scalar()
     decay = T.scalar()
     seed = np.random.randint(10e6)
     rng = RandomStreams(seed=seed)
     grad_rate = 0.8
     for data in self.train_data:
         data_d = rng.binomial((1,), p=grad_rate, dtype="float32")[0]*T.grad(CTC_LOSSs, data)
         train_data_d.append(data_d)
         data_m_s = theano.shared(np.zeros(data.get_value().shape).astype(np.float32))
         train_data_m_s.append(data_m_s)
         data_m = data_m_s*decay + (1-decay)*data_d**2
         train_data_m.append(data_m)
     #self.grad_test = theano.function([self.X, self.Y], train_data_d[-4])
     #self.data_d_print = theano.function([self.X,self.Y],train_data_d[0][0])
     #upd = [(d,d-learning_rate*d_d)for d,d_d in zip(self.train_data,train_data_d)]
     upd = [(d, d-learning_rate*d_d/T.sqrt(d_m+1e-4))for d,d_d,d_m in zip(self.train_data,train_data_d,train_data_m)]
     upd1 = [(d_m_s, decay*d_m_s+(1-decay)*d_d**2) for d_m_s,d_d in zip(train_data_m_s,train_data_d)]
     upd +=upd1    
     #self.test = theano.function([self.X,self.Y],train_data_d[0])
     self.sgd_train = theano.function([self.X, self.Y, learning_rate, decay],
                                      [],
                                      updates = upd
                                      )
开发者ID:nightinwhite,项目名称:Theano-NN_Starter,代码行数:30,代码来源:Layer.py

示例7: __init__

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import binomial [as 别名]
    def __init__(self,X,mask,shape,is_train=1,p=0.5,state_pre=None):
        prefix="GRU"
        self.in_size,self.hidden_size=shape

        self.W_xr=theano.shared(value=np.asarray((np.random.randn(self.in_size,self.hidden_size) * 0.1),dtype=theano.config.floatX),
                                name=prefix+'_W_xr')
        self.W_hr=theano.shared(value=np.asarray((np.random.randn(self.hidden_size,self.hidden_size) * 0.1),dtype=theano.config.floatX),
                                name=prefix+'_W_hr')
        self.b_r=theano.shared(value=np.asarray(np.zeros(self.hidden_size),dtype=theano.config.floatX),
                               name=prefix+'_b_r')

        self.W_xz=theano.shared(value=np.asarray((np.random.randn(self.in_size,self.hidden_size) * 0.1),dtype=theano.config.floatX),
                                name=prefix+'_W_xz')
        self.W_hz=theano.shared(value=np.asarray((np.random.randn(self.hidden_size,self.hidden_size) * 0.1),dtype=theano.config.floatX),
                                name=prefix+'_W_hz')
        self.b_z=theano.shared(value=np.asarray(np.zeros(self.hidden_size),dtype=theano.config.floatX),
                               name=prefix+'_b_z')

        self.W_xh=theano.shared(value=np.asarray((np.random.randn(self.in_size,self.hidden_size) * 0.1),dtype=theano.config.floatX),
                                name=prefix+'_W_xh')
        self.W_hh=theano.shared(value=np.asarray((np.random.randn(self.hidden_size,self.hidden_size) * 0.1),dtype=theano.config.floatX),
                                name=prefix+'_W_hh')
        self.b_h=theano.shared(value=np.asarray(np.zeros(self.hidden_size),dtype=theano.config.floatX),
                               name=prefix+'_b_h')

        self.X=X
        self.mask=mask


        batch_size=self.X.shape[1]
        if state_pre==None:
            state_pre=T.zeros((batch_size,self.hidden_size),dtype=theano.config.floatX)

        def _step(x,m,h_tm1):
            r=T.nnet.sigmoid(T.dot(x,self.W_xr) + T.dot(h_tm1,self.W_hr) +self.b_r)
            z=T.nnet.sigmoid(T.dot(x,self.W_xz) + T.dot(h_tm1,self.W_hz) +self.b_z)

            gh=T.tanh(T.dot(x , self.W_xh) + T.dot(r * h_tm1 , self.W_hh) + self.b_h)

            h_t=z * h_tm1 + (T.ones_like(z) - z) * gh

            h_t = h_t * m[:,None]

            return h_t

        h,_=theano.scan(fn=_step,
                        sequences=[self.X,self.mask],
                        outputs_info=state_pre)
        self.h=h
        if p>0:
            trng=RandomStreams(12345)
            drop_mask=trng.binomial(n=1,p=1-p,size=h.shape,dtype=theano.config.floatX)
            self.activation=T.switch(T.eq(is_train,1),h*drop_mask,h*(1-p))
        else:
            self.activation=T.switch(T.eq(is_train,1),h,h)

        self.params=[self.W_xr,self.W_hr,self.b_r,
                     self.W_xz,self.W_hz,self.b_z,
                     self.W_xh,self.W_hh,self.b_h]
开发者ID:jiangnanHugo,项目名称:seq2seq,代码行数:61,代码来源:gru.py

示例8: __init__

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import binomial [as 别名]
  def __init__(self, input, filter_shape, corruption_level = 0.1, 
               shared_W = None, shared_b = None, image_shape = None, 
               poolsize = (2,2)):

    theano_rng = RandomStreams()
    
    fan_in = numpy.prod(filter_shape[1:])
    fan_out = filter_shape[0] * numpy.prod(filter_shape[2:])

    center = theano.shared(value = 1, name="center")
    scale = theano.shared(value = 2, name="scale")

    if shared_W != None and shared_b != None :
        self.W = shared_W
        self.b = shared_b
    else:
        initial_W = numpy.asarray( numpy.random.uniform(
              low = -numpy.sqrt(6./(fan_in+fan_out)),
              high = numpy.sqrt(6./(fan_in+fan_out)),
              size = filter_shape), dtype = theano.config.floatX)
        initial_b = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
        self.W = theano.shared(value = initial_W, name = "W")
        self.b = theano.shared(value = initial_b, name = "b")
    
 
    initial_b_prime= numpy.zeros((filter_shape[1],),dtype=theano.config.floatX)

    self.b_prime = theano.shared(value = initial_b_prime, name = "b_prime")
 
    self.x = input

    self.tilde_x = theano_rng.binomial( self.x.shape, 1, 1 - corruption_level,dtype=theano.config.floatX) * self.x

    conv1_out = conv.conv2d(self.tilde_x, self.W, filter_shape=filter_shape,
                            image_shape=image_shape, border_mode='valid')
    
    self.y = T.tanh(conv1_out + self.b.dimshuffle('x', 0, 'x', 'x'))
    
    da_filter_shape = [ filter_shape[1], filter_shape[0], 
                        filter_shape[2], filter_shape[3] ]
    initial_W_prime =  numpy.asarray( numpy.random.uniform( \
              low = -numpy.sqrt(6./(fan_in+fan_out)), \
              high = numpy.sqrt(6./(fan_in+fan_out)), \
              size = da_filter_shape), dtype = theano.config.floatX)
    self.W_prime = theano.shared(value = initial_W_prime, name = "W_prime")

    conv2_out = conv.conv2d(self.y, self.W_prime,
                            filter_shape = da_filter_shape,
                            border_mode='full')

    self.z =  (T.tanh(conv2_out + self.b_prime.dimshuffle('x', 0, 'x', 'x'))+center) / scale

    scaled_x = (self.x + center) / scale

    self.L = - T.sum( scaled_x*T.log(self.z) + (1-scaled_x)*T.log(1-self.z), axis=1 )

    self.cost = T.mean(self.L)

    self.params = [ self.W, self.b, self.b_prime ] 
开发者ID:sauravbiswasiupr,项目名称:image_transformations,代码行数:61,代码来源:stacked_convolutional_dae.py

示例9: dropout

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import binomial [as 别名]
def dropout(random_state, X, keep_prob=0.5):
    if keep_prob > 0. and keep_prob < 1.:
        seed = random_state.randint(2 ** 30)
        srng = RandomStreams(seed)
        mask = srng.binomial(n=1, p=keep_prob, size=X.shape,
                             dtype=theano.config.floatX)
        return X * mask
    return X
开发者ID:youralien,项目名称:minet,代码行数:10,代码来源:minet.py

示例10: dropout

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import binomial [as 别名]
def dropout(rng, x, p=0.5):
    """ Zero-out random values in x with probability p using rng """
    if p > 0. and p < 1.:
        seed = rng.randint(2 ** 30)
        srng = RandomStreams(seed)
        mask = srng.binomial(n=1, p=1.-p, size=x.shape, dtype=theano.config.floatX)
        return x * mask
    return x
开发者ID:BinbinBian,项目名称:deep-qa,代码行数:10,代码来源:nn_layers.py

示例11: __init__

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import binomial [as 别名]
    def __init__(self, rng, x, n_in, n_h, p, training, rnn_batch_training=False):
        """ This is to initialise a standard RNN hidden unit

        :param rng: random state, fixed value for randome state for reproducible objective results
        :param x: input data to current layer
        :param n_in: dimension of input data
        :param n_h: number of hidden units/blocks
        :param p: the probability of dropout
        :param training: a binary value to indicate training or testing (for dropout training)
        """
        self.input = x

        if p > 0.0:
            if training==1:
                srng = RandomStreams(seed=123456)
                self.input = T.switch(srng.binomial(size=x.shape,p=p), x, 0)
            else:
                self.input =  (1-p) * x #(1-p) *

        self.n_in = int(n_in)
        self.n_h  = int(n_h)

        self.rnn_batch_training = rnn_batch_training

        # random initialisation
        Wx_value = np.asarray(rng.normal(0.0, 1.0/np.sqrt(n_in), size=(n_in, n_h)), dtype=config.floatX)
        Wh_value = np.asarray(rng.normal(0.0, 1.0/np.sqrt(n_h), size=(n_h, n_h)), dtype=config.floatX)

        # Input gate weights
        self.W_xi = theano.shared(value=Wx_value, name='W_xi')
        self.W_hi = theano.shared(value=Wh_value, name='W_hi')

        # bias
        self.b_i = theano.shared(value=np.zeros((n_h, ), dtype=config.floatX), name='b_i')


        # initial value of hidden and cell state
        if self.rnn_batch_training:
            self.h0 = theano.shared(value=np.zeros((1, n_h), dtype = config.floatX), name = 'h0')
            self.c0 = theano.shared(value=np.zeros((1, n_h), dtype = config.floatX), name = 'c0')

            self.h0 = T.repeat(self.h0, x.shape[1], 0)
            self.c0 = T.repeat(self.c0, x.shape[1], 0)
        else:
            self.h0 = theano.shared(value=np.zeros((n_h, ), dtype = config.floatX), name = 'h0')
            self.c0 = theano.shared(value=np.zeros((n_h, ), dtype = config.floatX), name = 'c0')


        self.Wix = T.dot(self.input, self.W_xi)

        [self.h, self.c], _ = theano.scan(self.recurrent_as_activation_function, sequences = [self.Wix],
                                                                      outputs_info = [self.h0, self.c0])

        self.output = self.h

        self.params = [self.W_xi, self.W_hi, self.b_i]

        self.L2_cost = (self.W_xi ** 2).sum() + (self.W_hi ** 2).sum()
开发者ID:CSTR-Edinburgh,项目名称:merlin,代码行数:60,代码来源:gating.py

示例12: test_binomial_from_uniform_cpu

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import binomial [as 别名]
    def test_binomial_from_uniform_cpu(self):
        #Test using numpy
        rng = np.random.RandomState(42)
        probs = rng.rand(10)

        seed = 1337
        nb_samples = 1000000
        rng = np.random.RandomState(seed)
        success1 = np.zeros(len(probs))
        for i in range(nb_samples):
            success1 += rng.binomial(n=1, p=probs)

        rng = np.random.RandomState(seed)
        success2 = np.zeros(len(probs))
        for i in range(nb_samples):
            success2 += (rng.rand(len(probs)) < probs).astype('int')

        success1 = success1 / nb_samples
        success2 = success2 / nb_samples

        assert_array_almost_equal(success1, success2)

        #Test using Theano's default RandomStreams
        theano_rng = RandomStreams(1337)
        rng_bin = theano_rng.binomial(size=probs.shape, n=1, p=probs, dtype=theano.config.floatX)
        success1 = np.zeros(len(probs))
        for i in range(nb_samples):
            success1 += rng_bin.eval()

        theano_rng = RandomStreams(1337)
        rng_bin = theano_rng.uniform(size=probs.shape, dtype=theano.config.floatX) < probs
        success2 = np.zeros(len(probs))
        for i in range(nb_samples):
            success2 += rng_bin.eval()

        assert_array_almost_equal(success1/nb_samples, success2/nb_samples)

        #Test using Theano's sandbox MRG RandomStreams
        theano_rng = MRG_RandomStreams(1337)
        success1 = theano_rng.binomial(size=probs.shape, n=1, p=probs, dtype=theano.config.floatX)

        theano_rng = MRG_RandomStreams(1337)
        success2 = theano_rng.uniform(size=probs.shape, dtype=theano.config.floatX) < probs

        assert_array_equal(success1.eval(), success2.eval())
开发者ID:MarcCote,项目名称:iRBM,代码行数:47,代码来源:test_rbm.py

示例13: DropoutHiddenLayer

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import binomial [as 别名]
class DropoutHiddenLayer(HiddenLayer):
	def __init__(self, rng, input, n_in, n_out, W=None, b=None, activation=T.tanh,
			adv_activation_method = None, pool_size = 1,pnorm_order=1,dropout_factor=0.5):
		super(DropoutHiddenLayer, self).__init__(rng=rng, input=input, n_in=n_in, n_out=n_out, W=W, b=b,
				activation=activation, adv_activation_method = adv_activation_method, pool_size = pool_size,pnorm_order=pnorm_order)
		self.theano_rng = RandomStreams(rng.randint(2 ** 30))
		dropout_prob = self.theano_rng.binomial(n=1, p=1-dropout_factor, size=self.output.shape,
			dtype=theano.config.floatX)	
		self.dropout_output = dropout_prob * self.output
开发者ID:jomkuriakose,项目名称:python-dnn,代码行数:11,代码来源:mlp.py

示例14: DenoisingAutoEncoder

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import binomial [as 别名]
class DenoisingAutoEncoder(object):
    def __init__(self, n_visible, n_hidden, weights=None, hidden_bias=None, visible_bias=None, random_on_gpu=False,
                 seed=69, activation=T.nnet.sigmoid):
        self.n_visible = n_visible
        self.n_hidden = n_hidden

        if random_on_gpu:
            self.t_rng = GPU_RandomStreams(seed)
        else:
            self.t_rng = RandomStreams(seed)

        if not weights:
            weights = np.asarray(
                np.random.normal(
                    scale=0.01,
                    size=(self.n_visible, self.n_hidden)),
                dtype=theano.config.floatX)
        self.ts_weights = theano.shared(value=weights, name='W', borrow=True)

        if not hidden_bias:
            hidden_bias = np.zeros(n_hidden, dtype=theano.config.floatX)

        self.ts_hidden_bias = theano.shared(value=hidden_bias, name='hb', borrow=True)

        if not visible_bias:
            visible_bias = np.zeros(n_visible, dtype=theano.config.floatX)

        self.ts_visible_bias = theano.shared(value=visible_bias, name='vb', borrow=True)

        self.x = T.matrix(name='x')

        self.activation = activation

        self.params = [self.ts_weights, self.ts_hidden_bias, self.ts_visible_bias]

    def get_corrupted_input(self, x, corruption_level):
        return self.t_rng.binomial(size=x.shape, n=1, p=1 - corruption_level) * x

    def hidden_values(self, x):
        return self.activation(T.dot(x, self.ts_weights) + self.ts_hidden_bias)

    def reconstruction(self, hidden):
        return self.activation(T.dot(hidden, self.ts_weights.T) + self.ts_visible_bias)

    def get_cost_updates(self, corruption_level, learning_rate):
        corrupted_input = self.get_corrupted_input(self.x, corruption_level)
        hidden = self.hidden_values(corrupted_input)
        reconstruction = self.reconstruction(hidden)
        loss = -T.sum(self.x * T.log(reconstruction) + (1 - self.x) * T.log(1 - reconstruction), axis=1)
        cost = T.mean(loss)
        gparams = T.grad(cost, self.params)

        updates = []
        for param, gparam in zip(self.params, gparams):
            updates.append((param, param - learning_rate * gparam))
        return cost, updates
开发者ID:Craftinity,项目名称:DA-AGH,代码行数:58,代码来源:autoencoders.py

示例15: DropoutConvLayer

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import binomial [as 别名]
class DropoutConvLayer(ConvLayer):
	def __init__(self, numpy_rng, input, input_shape, filter_shape, poolsize, activation, 
			W=None, b=None, border_mode = 'valid', use_fast = False,dropout_factor=0.5):
		super(DropoutConvLayer, self).__init__(numpy_rng=numpy_rng, input=input, input_shape=input_shape,
				filter_shape=filter_shape,poolsize=poolsize,activation=activation, W=W, b=b,
				border_mode=border_mode,use_fast=use_fast)
		self.theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
		dropout_prob = self.theano_rng.binomial(n=1, p=1-dropout_factor, size=self.output.shape,
			dtype=theano.config.floatX)	
		self.dropout_output = dropout_prob * self.output
开发者ID:IITM-DONLAB,项目名称:python-dnn,代码行数:12,代码来源:cnn.py


注:本文中的theano.tensor.shared_randomstreams.RandomStreams.binomial方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。