当前位置: 首页>>代码示例>>Python>>正文


Python RandomStreams.random_integers方法代码示例

本文整理汇总了Python中theano.tensor.shared_randomstreams.RandomStreams.random_integers方法的典型用法代码示例。如果您正苦于以下问题:Python RandomStreams.random_integers方法的具体用法?Python RandomStreams.random_integers怎么用?Python RandomStreams.random_integers使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor.shared_randomstreams.RandomStreams的用法示例。


在下文中一共展示了RandomStreams.random_integers方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_random_integers_vector

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import random_integers [as 别名]
    def test_random_integers_vector(self):
        random = RandomStreams(utt.fetch_seed())
        low = tensor.lvector()
        high = tensor.lvector()
        out = random.random_integers(low=low, high=high)
        assert out.ndim == 1
        f = function([low, high], out)

        low_val = [100, 200, 300]
        high_val = [110, 220, 330]
        seed_gen = numpy.random.RandomState(utt.fetch_seed())
        numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))

        # Arguments of size (3,)
        val0 = f(low_val, high_val)
        numpy_val0 = numpy.asarray([numpy_rng.randint(low=lv, high=hv+1)
            for lv, hv in zip(low_val, high_val)])
        assert numpy.all(val0 == numpy_val0)

        # arguments of size (2,)
        val1 = f(low_val[:-1], high_val[:-1])
        numpy_val1 = numpy.asarray([numpy_rng.randint(low=lv, high=hv+1)
            for lv, hv in zip(low_val[:-1], high_val[:-1])])
        assert numpy.all(val1 == numpy_val1)

        # Specifying the size explicitly
        g = function([low, high], random.random_integers(low=low, high=high, size=(3,)))
        val2 = g(low_val, high_val)
        numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
        numpy_val2 = numpy.asarray([numpy_rng.randint(low=lv, high=hv+1)
            for lv, hv in zip(low_val, high_val)])
        assert numpy.all(val2 == numpy_val2)
        self.assertRaises(ValueError, g, low_val[:-1], high_val[:-1])
开发者ID:ChinaQuants,项目名称:Theano,代码行数:35,代码来源:test_shared_randomstreams.py

示例2: cost_from_X_wrong

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import random_integers [as 别名]
    def cost_from_X_wrong(self, data):
        X, Y = data
        theano_rng = RandomStreams(seed = self.rng.randint(2 ** 15))
        noise = theano_rng.random_integers(size = (X.shape[0] * self.k,), low=0, high = self.dict_size - 1)
        p_n = 1. / self.dict_size

        pos = T.nnet.sigmoid(self.delta(data) - T.log(self.k * p_n))
        neg = T.nnet.sigmoid(self.delta((T.tile(X, (self.k, 1)), noise)) - T.log(self.k * p_n))
        neg =neg.reshape((X.shape[0], self.k))

        rval = -T.log(pos) - T.log(1 - neg).sum(axis=1)
        return rval.mean()
开发者ID:Sandy4321,项目名称:lisa_intern,代码行数:14,代码来源:__init__.py

示例3: score

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import random_integers [as 别名]
    def score(self, Y, Y_hat):
        # TODO fix me later when using IndexSpace

        assert hasattr(Y_hat, 'owner')
        owner = Y_hat.owner
        assert owner is not None
        op = owner.op
        if isinstance(op, Print):
            assert len(owner.inputs) == 1
            Y_hat, = owner.inputs
            owner = Y_hat.owner
            op = owner.op
        assert isinstance(op, T.nnet.Softmax)
        state_below, = owner.inputs
        assert state_below.ndim == 2

        # TODO make this more generic like above
        state_below = state_below.owner.inputs[0].owner.inputs[0]

        Y = T.argmax(Y, axis = 1)
        k = self.num_noise_samples

        if self.noise_prob is None:
            theano_rng = RandomStreams(seed = self.mlp.rng.randint(2 ** 15))
            noise = theano_rng.random_integers(size = (state_below.shape[0], self.num_noise_samples,), low=0, high = self.n_classes - 1)
            p_n = 1. / self.n_classes
            p_w = T.nnet.sigmoid((state_below * self.W[:, Y].T).sum(axis=1) + self.b[Y])
            p_x = T.nnet.sigmoid((T.concatenate([state_below] * k) * self.W[:, noise.flatten()].T).sum(axis=1) + self.b[noise.flatten()])
            # TODO is this reshape necessary?
            p_x = p_x.reshape((state_below.shape[0], k))

            #pos = k * p_n / (p_w + k * p_n) * T.log(p_w)
            #neg = (p_x / (p_x + k * p_n) * T.log(p_x)).sum(axis=1)
        else:
            #import ipdb
            #ipdb.set_trace()
            theano_rng = MRG_RandomStreams(max(self.mlp.rng.randint(2 ** 15), 1))
            assert self.mlp.batch_size is not None
            noise = theano_rng.multinomial(pvals = np.tile(self.noise_prob.get_value(), (k * self.mlp.batch_size, 1)))
            noise = T.argmax(noise, axis = 1)
            p_n = self.noise_prob
            p_w = T.nnet.sigmoid((state_below * self.W[:, Y].T).sum(axis=1) + self.b[Y])
            p_x = T.nnet.sigmoid((T.concatenate([state_below] * k) * self.W[:, noise.flatten()].T).sum(axis=1) + self.b[noise.flatten()])
            p_x = p_x.reshape((state_below.shape[0], k))

            pos = k * p_n[Y] / (p_w + k * p_n[Y]) * T.log(p_w)
            neg = (p_x / (p_x + k * p_n[noise].reshape(p_x.shape)) * T.log(p_x)).sum(axis=1)


        #return -(pos - neg).mean()
        return p_w, p_x
开发者ID:Sandy4321,项目名称:lisa_intern,代码行数:53,代码来源:__init__.py

示例4: test_dtype

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import random_integers [as 别名]
    def test_dtype(self):
        random = RandomStreams(utt.fetch_seed())
        low = tensor.lscalar()
        high = tensor.lscalar()
        out = random.random_integers(low=low, high=high, size=(20,), dtype='int8')
        assert out.dtype == 'int8'
        f = function([low, high], out)

        val0 = f(0, 9)
        assert val0.dtype == 'int8'

        val1 = f(255, 257)
        assert val1.dtype == 'int8'
        assert numpy.all(abs(val1) <= 1)
开发者ID:ChinaQuants,项目名称:Theano,代码行数:16,代码来源:test_shared_randomstreams.py

示例5: OneHotDistribution

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import random_integers [as 别名]
class OneHotDistribution(Distribution):
    """Randomly samples from a distribution of one-hot vectors."""

    def __init__(self, space, rng=None):
        super(OneHotDistribution, self).__init__(space)

        self.dim = space.get_total_dimension()
        self.formatter = OneHotFormatter(self.dim, dtype=space.dtype)

        self.rng = RandomStreams() if rng is None else rng

    def sample(self, n):
        idxs = self.rng.random_integers((n, 1), low=0, high=self.dim - 1)
        return self.formatter.theano_expr(idxs, mode='concatenate')
开发者ID:HyoungWooPark,项目名称:adversarial,代码行数:16,代码来源:distributions.py

示例6: test_random_integers

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import random_integers [as 别名]
    def test_random_integers(self):
        """Test that RandomStreams.random_integers generates the same results as numpy"""
        # Check over two calls to see if the random state is correctly updated.
        random = RandomStreams(utt.fetch_seed())
        fn = function([], random.random_integers((20, 20), -5, 5))
        fn_val0 = fn()
        fn_val1 = fn()

        rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)
        rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit
        numpy_val0 = rng.random_integers(-5, 5, size=(20,20))
        numpy_val1 = rng.random_integers(-5, 5, size=(20,20))

        assert numpy.all(fn_val0 == numpy_val0)
        assert numpy.all(fn_val1 == numpy_val1)
开发者ID:HaniAlmousli,项目名称:Theano,代码行数:17,代码来源:test_shared_randomstreams.py

示例7: get_gradients

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import random_integers [as 别名]
    def get_gradients(self, model, data, ** kwargs):

        space,  sources = self.get_data_specs(model)
        space.validate(data)
        X, Y = data


        theano_rng = RandomStreams(seed = model.rng.randint(2 ** 15))
        noise = theano_rng.random_integers(size = (X.shape[0] * model.k,), low=0, high = model.dict_size - 1)


        delta = model.delta(data)
        p = model.score(X, Y)
        params = model.get_params()

        pos_ = T.jacobian(model.score(X, Y), params, disconnected_inputs='ignore')
        pos_coeff = 1 - T.nnet.sigmoid(model.delta(data))
        pos = []
        for param in pos_:
            axes = [0]
            axes.extend(['x' for item in range(param.ndim - 1)])
            pos.append(pos_coeff.dimshuffle(axes) * param)
        del pos_, pos_coeff

        noise_x = T.tile(X, (model.k, 1))
        neg_ = T.jacobian(model.score(noise_x, noise), params, disconnected_inputs='ignore')
        neg_coeff = T.nnet.sigmoid(model.delta((noise_x, noise)))
        neg = []
        for param in neg_:
            axes = [0]
            axes.extend(['x' for item in range(param.ndim - 1)])
            tmp = neg_coeff.dimshuffle(axes) * param
            new_shape = [X.shape[0], model.k]
            new_shape.extend([tmp.shape[i] for i in range(1, tmp.ndim)])
            neg.append(tmp.reshape(new_shape).sum(axis=1))
        del neg_, neg_coeff


        grads = [(pos_ - neg_).mean(axis=0) for pos_, neg_ in zip(pos, neg)]
        gradients = OrderedDict(izip(params, grads))
        updates = OrderedDict()

        return gradients, updates
开发者ID:Sandy4321,项目名称:lisa_intern,代码行数:45,代码来源:cost.py

示例8: cost_

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import random_integers [as 别名]
    def cost_(self, Y, Y_hat):
        # TODO fix me later when using IndexSpace

        assert hasattr(Y_hat, 'owner')
        owner = Y_hat.owner
        assert owner is not None
        op = owner.op
        if isinstance(op, Print):
            assert len(owner.inputs) == 1
            Y_hat, = owner.inputs
            owner = Y_hat.owner
            op = owner.op
        assert isinstance(op, T.nnet.Softmax)
        state_below, = owner.inputs
        assert state_below.ndim == 2

        # TODO make this more generic like above
        state_below = state_below.owner.inputs[0].owner.inputs[0]

        #import ipdb
        #ipdb.set_trace()
        Y = T.argmax(Y, axis = 1)
        #Y = Y.astype('uint32')
        theano_rng = RandomStreams(seed = self.mlp.rng.randint(2 ** 15))
        noise = theano_rng.random_integers(size = (state_below.shape[0], self.num_noise_samples,), low=0, high = self.n_classes - 1)
        k = self.num_noise_samples
        p_n = 1. / self.n_classes

        pos = T.nnet.sigmoid((state_below * self.W[:, Y].T).sum(axis=1) + self.b[Y] - T.log(k * p_n))
        neg = T.nnet.sigmoid((T.concatenate([state_below] * k) * self.W[:, noise.flatten()].T).sum(axis=1) + self.b[noise.flatten()] - T.log(k * p_n))
        # TODO is this reshape necessary?
        neg = neg.reshape((state_below.shape[0], k)).sum(axis=1)


        rval =  -T.log(pos) - T.log(1 - neg)
        return rval.mean()
开发者ID:Sandy4321,项目名称:lisa_intern,代码行数:38,代码来源:__init__.py

示例9: RandomStreams

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import random_integers [as 别名]
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams


floatX = theano.config.floatX

vocabularySize = 10
embeddingSize = 10
contextSize = 2
samples = 10
wordIndices = T.ivector('wordIndices')

defaultEmbeddings = np.arange(0, vocabularySize * embeddingSize).reshape((vocabularySize, embeddingSize)).astype(floatX)

embeddings = theano.shared(defaultEmbeddings, name='embeddings', borrow=True)

random = RandomStreams(seed=234)
negativeSampleIndices = random.random_integers((contextSize * samples,), 0, vocabularySize - 1)

indicies = T.concatenate([wordIndices, negativeSampleIndices])
indicies = indicies.reshape((samples + 1, contextSize))

output = embeddings[indicies]
output = output.mean(axis=1)

getEmbeddings = theano.function(
    inputs=[wordIndices],
    outputs=output
)

print getEmbeddings(range(0, contextSize))
开发者ID:yuriyfilonov,项目名称:nplm,代码行数:33,代码来源:run.py

示例10: AverageSGM

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import random_integers [as 别名]
class AverageSGM(object):

    ''' A toy example showing the usage of `extheano.NodeDescriptor` and
    `extheano.jit`.
    This class performs the stochastic gradient method (SGM) to find the
    average of given data.

    Usage:
        >> data = np.arange(1000)
        >> a = AverageSGM(data)
        >> for _ in xrange(10000): a.calc_loss_with_onestep_SGM()
        >> est = a.get_estimation()
    '''

    # node descriptors for shared variables
    # whole data of which we will compute the average
    data = extheano.NodeDescriptor()
    # estimate of the average
    mu = extheano.NodeDescriptor()
    # learning rate (will be discounted as SGM goes on)
    lrate = extheano.NodeDescriptor()

    def __init__(self, data, batch_size=10, init_val=0., lrate=0.05,
                 degree=0.75, seed=None):
        '''Set parameters for the SGM

        :param data:        array-like with its dimension one
        :param batch_size:  size of the mini batch in integer
        :param init_val:    initial guess of the average in float
        :param lrate:       initial learning rate in float
        :param degree:      degree of learning rate decreasing in float
        :param seed:        seed for RNG in integer
        '''

        # pure-python variables (assumed to be invariant until recompilation)
        self.batch_size = batch_size
        self.n_batches = len(data) / batch_size
        self.degree = degree
        self.init_lrate = lrate

        # initialize the nodes
        self.data = theano.shared(data.astype(float), 'data', borrow=True)
        self.mu = theano.shared(float(init_val), 'mu')
        self.lrate = theano.shared(float(lrate), 'lrate')

        # shared random streams
        self.rng = RandomStreams(seed)

    def quadratic_loss(self, minibatch):
        '''Get the quadratic loss against the given input'''
        return ((minibatch - self.mu) ** 2).mean()

    def gradient_descent(self, loss, lrate):
        '''Perform one step of the gradient descent on the given loss

        Note that you can update `self.mu` with the normal assignment
        operation since it is a descriptor.
        '''
        # calculate the gradient
        grad = -T.grad(loss, self.mu)
        # update the estimation
        self.mu = self.mu + lrate * grad

    def next_lrate(self, lr):
        '''Return the discounted learning rate

        The learning rate will be proportional to the number of iterations with
        minus `self.degree` on the exponent.
        '''
        time = (self.init_lrate / lr) ** (1. / self.degree)
        ratio = (1. - 1. / (1. + time)) ** self.degree
        return lr * ratio

    # With the decorator `@extheano.jit`, you can compile your theano-function
    # 'just in time'. Use `@extheano.jit.parse` instead if it has arguments with
    # default values.
    @extheano.jit.parse
    def calc_loss_with_onestep_SGM(self, scale=1.):
        '''Calculate the quadratic loss and perform one step of the SGM
        '''
        # assign a random batch to the input
        batch_start = self.batch_size * \
            self.rng.random_integers(low=0, high=self.n_batches - 1)
        batch_stop = batch_start + self.batch_size
        minibatch = self.data[batch_start: batch_stop]

        # perform SGM and discount the learning rate
        loss = self.quadratic_loss(minibatch)
        self.gradient_descent(loss, self.lrate * scale)
        self.lrate = self.next_lrate(self.lrate)
        return loss

    @extheano.jit
    def set_estimation(self, val):
        '''Set the estimation of the average'''
        self.mu = T.cast(val, theano.config.floatX)

    @extheano.jit
    def get_estimation(self):
        '''Get the estimation of the average'''
#.........这里部分代码省略.........
开发者ID:koheimiya,项目名称:extheano,代码行数:103,代码来源:test_tutorial.py

示例11: DropModality

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import random_integers [as 别名]
class DropModality(Layer):
	'''
	drop a modality alltogether
	'''
	def __init__(self, input_shapes = [], **kwargs):
		self.trng = RandomStreams(seed=np.random.randint(10e6))
		self.params = []
		self.input_shapes = input_shapes

	def set_prev_shape(self, input_shapes):
		self.input_shapes = input_shapes

	def get_output(self, train=False):

		X = self.get_input(train)

		full = T.ones_like(X)
		masks = [full]

		for i in xrange(len(self.input_shapes)):
			mask = T.ones_like(X)
			idx = 0
			for j in xrange(len(self.input_shapes)):
				if i == j:
					try:
						ishape = len(self.input_shapes[0])
					except:
						ishape = [1]
						pass
					if len(ishape)  == 3:
						mask = T.set_subtensor(mask[:,:,idx : idx+ self.input_shapes[j]], 0)
					elif len(ishape) == 2:
						mask = T.set_subtensor(mask[:,idx : idx+ self.input_shapes[j]], 0)
					elif len(ishape) == 1:
						mask = T.set_subtensor(mask[idx : idx+ self.input_shapes[j]], 0)
					else:
						raise NotImplementedError()
				idx =  idx + self.input_shapes[j]
			masks += [mask]
		masked = T.stack(masks)

		if train:
			index  = self.trng.random_integers(size=(1,),low = 0, high = len(masks)-1)[0]
		else:
			index = 0
		masked_output = X * masked[index]
		return masked_output

	def get_masked(self, train=False):
		X = self.get_input(train)

		full = T.ones_like(X)
		masks = [full]

		for i in xrange(len(self.input_shapes)):
			mask = T.ones_like(X)
			idx = 0
			for j in xrange(len(self.input_shapes)):
				if i == j:
					mask = T.set_subtensor(mask[:,:,idx : idx+ self.input_shapes[j]], 0)
				idx =  idx + self.input_shapes[j]
			masks += [mask]
		masked = T.stack(masks)
		index  = self.trng.random_integers(size=(1,),low = 0, high = len(masks)-1)[0]

		return masked, index

	def get_input_shapes(self):
		return self.input_shapes

	def get_config(self):
		config = {"name": self.__class__.__name__,
				  "input_shapes" : self.input_shapes
				  }
		base_config = super(DropModality, self).get_config()
		return dict(list(base_config.items()) + list(config.items()))
开发者ID:hongyuanzhu,项目名称:keras,代码行数:78,代码来源:dropmodality.py

示例12: LDmodel

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import random_integers [as 别名]

#.........这里部分代码省略.........
		sterm=-T.mean(T.sum(T.abs_((s2_samps-s_pred)/self.b),axis=1)) - T.sum(T.log(self.b))
		
		#xterm1=-T.mean(T.sum((x1_recons-T.reshape(x1,(self.nx,1)))**2,axis=0)/(2.0*self.xvar**2))
		xterm2=-T.mean(T.sum((x2_recons-T.reshape(x2,(self.nx,1)))**2,axis=0)/(2.0*self.xvar**2))
		
		#energy = hterm1 + xterm1 + hterm2 + xterm2 + sterm -T.sum(T.sum(self.A**2))
		#energy = hterm1 + xterm2 + sterm 
		energy = xterm2 + sterm 
		
		learning_params=[self.params[i] for i in range(len(self.params)) if self.rel_lrates[i]!=0.0]
		learning_rel_lrates=[self.rel_lrates[i] for i in range(len(self.params)) if self.rel_lrates[i]!=0.0]
		gparams=T.grad(energy, learning_params, consider_constant=[s1_samps, s2_samps])
		
		# constructs the update dictionary
		for gparam, param, rel_lr in zip(gparams, learning_params, learning_rel_lrates):
			#gnat=T.dot(param, T.dot(param.T,param))
			if param==self.M:
				#I do this so the derivative of M doesn't depend on the sparsity parameters
				updates[param] = T.cast(param + gparam*T.reshape(self.b,(1,self.ns))*lrate*rel_lr,'float32')
			elif param==self.b:
				updates[param] = T.cast(param + gparam*T.reshape(1.0/self.b,(1,self.ns))*lrate*rel_lr,'float32')
			else:
				updates[param] = T.cast(param + gparam*lrate*rel_lr,'float32')
		
		return energy, updates
		
	
	def get_ESS(self):
		
		return 1.0/T.sum(self.weights_now**2)
	
	
	def resample(self):
		
		updates={}
		#samp=self.theano_rng.multinomial(size=self.weights_now.shape,pvals=self.weights_now)
		idxs=self.sample_multinomial(self.weights_now,3)
		#idxs=T.cast(T.sum(samp*self.idx_mat,axis=1),'int32')
		s_samps=self.s_now[idxs]
		updates[self.s_now]=s_samps
		updates[self.weights_now]=T.cast(T.ones_like(self.weights_now)/T.cast(self.npcl,'float32'),'float32') #dtype paranoia
		
		return updates
	
	
	def simulate_step(self, s):
		
		s=T.reshape(s,(1,self.ns))
		
		sp=self.get_prediction(s)
		
		xp=T.dot(self.W, sp.T)
		
		return T.cast(sp,'float32'), T.cast(xp,'float32')
		
	
	def simulate_forward(self, n_steps):
		
		
		s0=T.sum(self.s_now*T.reshape(self.weights_now,(self.npcl,1)),axis=0)
		s0=T.reshape(s0,(1,self.ns))
		[sp, xp], updates = theano.scan(fn=self.simulate_step,
										outputs_info=[s0, None],
										n_steps=n_steps)
		
		return sp, xp, updates
	
	
	def multinomial_step(self,samp,weights):
		
		u=self.theano_rng.uniform(size=self.weights_now.shape)
		i=self.theano_rng.random_integers(size=self.weights_now.shape, low=0, high=self.npcl-1)
		Wnow=weights[samp]
		Wstep=weights[i]
		probs=Wstep/Wnow
		out=T.switch(u<probs, i, samp)
		return out
	
	
	def sample_multinomial(self,weights,nsteps):
		
		#this function samples from a multinomial distribution using
		#the Metropolis method as in [Murray, Lee, Jacob 2013]
		#weights are unnormalized
		#this is biased for small nsteps, but could be faster than the
		#native theano multinomial sampler and the use of unnormalized
		#weights improves numerical stability
		samp0=self.init_multi_samp
		samps, updates = theano.scan(fn=self.multinomial_step,
										outputs_info=[samp0],
										non_sequences=[weights],
										n_steps=nsteps)
		
		return samps[-1]
	
	
	def set_rel_lrates(self, new_rel_lrates):
		updates={}
		updates[self.rel_lrates]=new_rel_lrates
		return updates
开发者ID:float650,项目名称:Video-Dynamics,代码行数:104,代码来源:LDmodel_3.py

示例13: __init__

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import random_integers [as 别名]
    def __init__(self, numargs, embed_size, pred_vocab_size, arg_vocab_size, initial_pred_rep=None, initial_arg_rep = None, margin = 5, lr=0.01, activation=T.nnet.sigmoid):
        numpy_rng = numpy.random.RandomState(12345)
        theano_rng = RandomStreams(54321)
        self.lr = lr
        #margin = 5
        # Initializing predicate representations
        if initial_pred_rep is not None:
            num_preds, pred_dim = initial_pred_rep.shape
            assert pred_vocab_size == num_arrays, "Initial predicate representation is not the same size as pred_vocab_size"
            assert embed_size == pred_dim, "Initial predicate representation does not have the same dimensionality as embed_size"
        else:
            initial_pred_rep_range = 4 * numpy.sqrt(6. / (pred_vocab_size + embed_size))
            initial_pred_rep = numpy.asarray(numpy_rng.uniform(low = -initial_pred_rep_range, high = initial_pred_rep_range, size = (pred_vocab_size, embed_size)))
            
        self.pred_rep = theano.shared(value=initial_pred_rep, name='P')
        
        # Initializing argument representations
        if initial_arg_rep is not None:
            arg_rep_len, arg_dim = initial_arg_rep.shape
            assert arg_vocab_size == arg_rep_len, "Initial argument representation is not the same size as arg_vocab_size"
            assert embed_size == arg_dim, "Initial argument representation does not have the same dimensionality as embed_size"
        else:
            initial_arg_rep_range = 4 * numpy.sqrt(6. / (arg_vocab_size + embed_size))
            initial_arg_rep = numpy.asarray(numpy_rng.uniform(low = -initial_arg_rep_range, high = initial_arg_rep_range, size = (arg_vocab_size, embed_size)))
            
        self.arg_rep = theano.shared(value=initial_arg_rep, name='A')
        
        # Initialize scorer
        scorer_dim = embed_size * (numargs + 1) # Predicate is +1
        initial_scorer_range = 4 * numpy.sqrt(6. / scorer_dim)
        initial_scorer = numpy.asarray(numpy_rng.uniform(low = -initial_scorer_range, high = initial_scorer_range, size = scorer_dim))
        self.scorer = theano.shared(value=initial_scorer, name='s')
        
        # Initialize indicator
        indicator_dim = embed_size * (numargs + 1) # Predicate is +1
        initial_indicator_range = 4 * numpy.sqrt(6. / (indicator_dim + numargs))
        initial_indicator = numpy.asarray(numpy_rng.uniform(low = -initial_indicator_range, high = initial_indicator_range, size = (indicator_dim, numargs)))
        self.indicator = theano.shared(value=initial_indicator, name='I')
        
        # Define symbolic pred-arg
        self.pred_ind = T.iscalar('p')
        self.arg_inds = T.iscalars(numargs)
        pred = self.pred_rep[self.pred_ind].reshape((1, embed_size))
        args = self.arg_rep[self.arg_inds].reshape((1, embed_size * numargs))
        pred_arg = activation(T.concatenate([pred, args], axis=1))
        
        # Define symbolic rand pred-arg for training scorer
        rand_pred_ind = theano_rng.random_integers(low=0, high=pred_vocab_size-1)
        rand_arg_inds = theano_rng.random_integers([1, numargs], low=0, high=arg_vocab_size-1)
        rand_pred = self.pred_rep[rand_pred_ind].reshape((1, embed_size))
        rand_args = self.arg_rep[rand_arg_inds].reshape((1, embed_size * numargs))
        rand_pred_arg = activation(T.concatenate([rand_pred, rand_args], axis=1))

        # Define symbolic pred_rand-arg for training indicator
        pred_rand_arg = activation(T.concatenate([pred, rand_args], axis=1))

        # Define scores and loss
        self.corr_score = T.sum(T.dot(pred_arg, self.scorer))
        rand_score = T.sum(T.dot(rand_pred_arg, self.scorer))
        self.margin_loss = T.maximum(0, margin - self.corr_score + rand_score)
        
        # Define indicator values and loss
        orig_ind_labels = T.constant(numpy.zeros(numargs))
        self.indicator_pred = T.nnet.sigmoid(T.dot(pred_arg, self.indicator))
        rand_ind_labels = T.constant(numpy.ones(numargs))
        rand_indicator_pred = T.nnet.sigmoid(T.dot(pred_rand_arg, self.indicator))
        self.indicator_loss = T.mean((self.indicator_pred - orig_ind_labels) ** 2) + T.mean((rand_indicator_pred - rand_ind_labels) ** 2)
        
        # Define params and inputs
        self.score_params = [self.pred_rep, self.arg_rep, self.scorer]
        self.indicator_params = [self.pred_rep, self.arg_rep, self.indicator]
        self.score_ind_inputs = [self.pred_ind] + list(self.arg_inds)
开发者ID:pdasigi,项目名称:spscorer,代码行数:74,代码来源:sp_scorer.py

示例14: __init__

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import random_integers [as 别名]
class Neural_network_layer:
    '''Represents the units within a layer and the units
       activations and dropout functions.
    '''
    
    def __init__(self, size, activation_function, dropout_type, dropout, dropout_decay, batch_size, frequency):
        
        
        self.drop_count = 0
        self.size = size  
        self.frequency = frequency
        self.dropout = dropout    
        self.dropout_init = dropout    
        self.dropout_decay = dropout_decay  
        self.dropout_type = dropout_type    
        self.rdm = RandomStreams(seed=1234)  
        self.batch_size = batch_size   
        self.sample_range = 100000       
        self.create_dropout_sample_functions()  
        self.activation_crossvalidation = activation_function 
        self.activation_function = self.set_dropout(dropout, activation_function)
        self.activation_derivative = lambda X: g(T.mul(X, (1.0 - X)))   
        self.activation_tracker = self.set_activation_tracker(activation_function)             
        
        pass
    
    
    def set_dropout(self, dropout, activation_function):
        action_with_drop = None
        if dropout > 0:
            action_with_drop = lambda X: T.mul(activation_function(X),self.dropout_function)            
            self.activation_cv_dropout = lambda X: T.mul(activation_function(X),self.dropout_function_cv)
        else:
            action_with_drop = activation_function
            self.activation_cv_dropout = activation_function
            
        return action_with_drop
     
    def set_activation_tracker(self, activation_function): 
        '''Sets a tracker function that logs the activations that exceed 0.75.
        '''
        if activation_function == Activation_function.sigmoid:
            activation_tracker = lambda X: T.gt(activation_function(X),0.75)
        else:
            activation_tracker = None
        return activation_tracker
    
    def create_dropout_sample_functions(self, reset = False):
        '''Creates functions of sample vectors which can be index with random
           integers to create a pseudo random sample for dropout. This greatly
           speeds up sampling as no new samples have to be created.
        '''
        if reset:
            self.dropout = self.dropout_init
            print 'Reset dropout to ' + str(self.dropout)
        
        self.dropout_function = None
        sample_function = None
        if self.dropout > 0:
            if self.dropout_type == Dropout.drop_activation:
                if reset:
                    self.bino_sample_vector.set_value(np.matrix(np.float32(
                                        np.random.binomial(1,1-self.dropout,(10000000,1)))),
                                        borrow=True) 
                else:
                    self.bino_sample_vector = shared(np.matrix(np.float32(
                                            np.random.binomial(1,1-self.dropout,(10000000,1)))),
                                            'float32', borrow=True) 
            
                sample_function = lambda rand: g(T.reshape(self.bino_sample_vector[rand:rand + (self.batch_size*self.size)],(self.batch_size,self.size)))
                sample_function_cv = lambda rand: g(T.reshape(self.bino_sample_vector[rand:rand + (4200*self.size)],(4200,self.size)))
                self.dropout_function = sample_function(self.rdm.random_integers(low=0, high=self.sample_range))  
                self.dropout_function_cv = sample_function_cv(self.rdm.random_integers(low=0, high=self.sample_range))  
             
                
    def handle_dropout_decay(self, epoch):
        '''Handles automatically the dropout decay by decreasing the dropout by
           the given amount after the given number of epochs.
        '''
        if self.dropout_function and self.frequency[self.drop_count] > 0 and epoch % self.frequency[self.drop_count] == 0  and epoch > 0:
            print 'Setting dropout from  '  + str(self.dropout)  + ' to ' + str(np.float32(self.dropout*(1-self.dropout_decay[self.drop_count])))   
            
            self.dropout = np.float32(self.dropout*(1-self.dropout_decay[self.drop_count]))       
            
            if self.dropout_type == Dropout.drop_activation:
                self.bino_sample_vector.set_value(np.matrix(np.float32( 
                                        np.random.binomial(1,1-self.dropout,(10000000,1)))),
                                        borrow=True) 
            self.drop_count += 1   
            if self.drop_count > len(self.dropout_decay)-1:
                self.drop_count -= 1
开发者ID:ANB2,项目名称:MachineLearning,代码行数:93,代码来源:neural_network_layer.py

示例15: __init__

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import random_integers [as 别名]
	def __init__(self, train_x, train_y, valid_x, valid_y, test_x, test_y, batchSize):
		rng = numpy.random.RandomState(42)

		self.train_x = theano.shared(train_x.astype('float32'))
		self.train_y = theano.shared(train_y.astype('int32'))
		self.valid_x = theano.shared(valid_x.astype('float32')).reshape((valid_x.shape[0],1,28,28))
		self.valid_y = theano.shared(valid_y.astype('int32'))
		self.test_x = theano.shared(test_x.astype('float32')).reshape((test_x.shape[0],1,28,28))
		self.test_y = theano.shared(test_y.astype('int32'))

		x = T.matrix()
		y = T.ivector()
		index = T.lscalar()
		learningRate = T.scalar()
		L1_reg = 0.0
		L2_reg = 0.0

		random_stream = RandomStreams(seed=420)
		indices = random_stream.random_integers((batchSize,), low=0, high=train_x.shape[0]-1)
		x = self.train_x.take(indices, axis=0)
		y = self.train_y.take(indices, axis=0)

		layer0Input = x.reshape((batchSize,1,28,28))

		layer0 = ConvPoolLayer(
			rng=rng,
			input=layer0Input,
			filter_shape=(64,1,3,3),
			image_shape=(None,1,28,28),
			poolsize=(2,2)
		)

		layer1 = ConvPoolLayer(
			rng=rng,
			input=layer0.output,
			filter_shape=(128,64,3,3),
			image_shape=(None,64,13,13),
			poolsize=(2,2)
		)

		layer1Out = layer1.output.flatten(2)

		layer2 = HiddenLayer(
			rng=rng,
			input=layer1Out,
			n_in=128*5*5,
			n_out=512,
			activation=relu
		)

		layer3 = LogisticRegression(
			rng=rng,
			input=layer2.output,
			n_in=layer2.n_out,
			n_out=10
		)

		L1 = abs(layer0.W).sum() + abs(layer1.W).sum() + abs(layer2.W).sum() + abs(layer3.W).sum()
		L2 = (layer0.W**2).sum() + (layer1.W**2).sum() + (layer2.W**2).sum() + (layer3.W**2).sum()
		cost = layer3.negative_log_likelihood(y) + L1_reg * L1 + L2_reg * L2

		self.test_model = theano.function(
			[index],
			layer3.errors(y),
			givens={
				layer0Input: self.test_x[index * 1000:(index+1)*1000,:,:,:],
				y: self.test_y[index * 1000:(index+1)*1000]
			}
		)

		self.validate_model = theano.function(
			[index],
			[layer3.errors(y), cost],
			givens={
				layer0Input: self.valid_x[index * 1000:(index+1)*1000,:,:,:],
				y: self.valid_y[index * 1000:(index+1)*1000]
			}
		)
		
		self.forward = theano.function([layer0Input], [layer3.p_y_given_x])
		
		self.params = layer3.params + layer2.params + layer1.params + layer0.params
		updates = self.rmsProp(cost, self.params, 0.7, 0.01, learningRate)
		self.train_model = theano.function(
			[learningRate],
			cost,
			updates=updates
		)
开发者ID:Levoila,项目名称:INF4215,代码行数:90,代码来源:run.py


注:本文中的theano.tensor.shared_randomstreams.RandomStreams.random_integers方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。