当前位置: 首页>>代码示例>>Python>>正文


Python RandomStreams.seed方法代码示例

本文整理汇总了Python中theano.tensor.shared_randomstreams.RandomStreams.seed方法的典型用法代码示例。如果您正苦于以下问题:Python RandomStreams.seed方法的具体用法?Python RandomStreams.seed怎么用?Python RandomStreams.seed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor.shared_randomstreams.RandomStreams的用法示例。


在下文中一共展示了RandomStreams.seed方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_setitem

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import seed [as 别名]
    def test_setitem(self):

        random = RandomStreams(234)
        out = random.uniform((2, 2))
        fn = function([], out, updates=random.updates())

        random.seed(888)

        rng = numpy.random.RandomState(utt.fetch_seed())
        random[out.rng] = numpy.random.RandomState(utt.fetch_seed())

        fn_val0 = fn()
        fn_val1 = fn()
        numpy_val0 = rng.uniform(size=(2, 2))
        numpy_val1 = rng.uniform(size=(2, 2))
        assert numpy.allclose(fn_val0, numpy_val0)
        assert numpy.allclose(fn_val1, numpy_val1)
开发者ID:ChinaQuants,项目名称:Theano,代码行数:19,代码来源:test_shared_randomstreams.py

示例2: test_seed_fn

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import seed [as 别名]
    def test_seed_fn(self):
        random = RandomStreams(234)
        fn = function([], random.uniform((2, 2)), updates=random.updates())

        random.seed(utt.fetch_seed())

        fn_val0 = fn()
        fn_val1 = fn()

        rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)
        rng = numpy.random.RandomState(int(rng_seed))  #int() is for 32bit

        # print fn_val0
        numpy_val0 = rng.uniform(size=(2, 2))
        numpy_val1 = rng.uniform(size=(2, 2))
        # print numpy_val0

        assert numpy.allclose(fn_val0, numpy_val0)
        assert numpy.allclose(fn_val1, numpy_val1)
开发者ID:ChinaQuants,项目名称:Theano,代码行数:21,代码来源:test_shared_randomstreams.py

示例3: test_examples_9

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import seed [as 别名]
    def test_examples_9(self):

        from theano.tensor.shared_randomstreams import RandomStreams
        srng = RandomStreams(seed=234)
        rv_u = srng.uniform((2,2))
        rv_n = srng.normal((2,2))
        f = function([], rv_u)
        g = function([], rv_n, no_default_updates=True)    #Not updating rv_n.rng
        nearly_zeros = function([], rv_u + rv_u - 2 * rv_u)


        f_val0 = f()
        f_val1 = f()  #different numbers from f_val0
        assert numpy.all(f_val0 != f_val1)

        g_val0 = g()  # different numbers from f_val0 and f_val1
        g_val1 = g()  # same numbers as g_val0 !!!

        assert numpy.all(g_val0 == g_val1)
        assert numpy.all(g_val0 != f_val0)
        assert numpy.all(g_val0 != f_val1)

        nearly_zeros = function([], rv_u + rv_u - 2 * rv_u)
        assert numpy.allclose(nearly_zeros(), [[0.,0.],[0.,0.]])

        rng_val = rv_u.rng.get_value(borrow=True)   # Get the rng for rv_u
        rng_val.seed(89234)                         # seeds the generator
        rv_u.rng.set_value(rng_val, borrow=True)    # Assign back seeded rng

        srng.seed(902340)  # seeds rv_u and rv_n with different seeds each
        state_after_v0 = rv_u.rng.get_value().get_state()
        nearly_zeros()       # this affects rv_u's generator
        v1 = f()
        rng = rv_u.rng.get_value(borrow=True)
        rng.set_state(state_after_v0)
        rv_u.rng.set_value(rng, borrow=True)
        v2 = f()             # v2 != v1
        v3 = f()             # v3 == v1
        assert numpy.all(v1 != v2)
        assert numpy.all(v1 == v3)
开发者ID:AI-Cdrone,项目名称:Theano,代码行数:42,代码来源:test_tutorial.py

示例4: LocalNoiseEBM

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import seed [as 别名]

#.........这里部分代码省略.........
        #self.norm_c_func = function([X], T.sum(T.sqr(corrupted),axis=1).mean())
        #self.norm_d_func = function([X], T.sum(T.sqr(X),axis=1).mean())

        grads = [ T.grad(obj,param) for param in self.params ]

        learn_inputs = [ ipt for ipt in inputs ]
        learn_inputs.append(alpha)

        self.learn_func = function(learn_inputs, updates =
                [ (param, param - alpha * grad) for (param,grad)
                    in zip(self.params, grads) ] , name='learn_func')

        if self.energy_function != 'mse autoencoder':
            self.recons_func = function([X], self.gibbs_step_exp(X) , name = 'recons_func')
        #

        post_existing_names = dir(self)

        self.names_to_del = [ name for name in post_existing_names if name not in pre_existing_names]

    def learn(self, dataset, batch_size):
        self.learn_mini_batch([dataset.get_batch_design(batch_size) for x in xrange(1+self.different_examples)])


    def recons_func(self, x):
        rval = N.zeros(x.shape)
        for i in xrange(x.shape[0]):
            rval[i,:] = self.gibbs_step_exp(x[i,:])

        return rval


    def print_suite(self, dataset, batch_size, batches,  things_to_print):
        self.theano_rng.seed(5)

        tracker =  {}

        for thing in things_to_print:
            tracker[thing[0]] = []

        for i in xrange(batches):
            x = dataset.get_batch_design(batch_size)
            assert x.shape == (batch_size, self.nvis)

            if self.different_examples:
                inputs = [ x , dataset.get_batch_design(batch_size) ]
            else:
                inputs = [ x ]

            for thing in things_to_print:
                tracker[thing[0]].append(thing[1](*inputs))

        for thing in things_to_print:
            print thing[0] + ': '+str(N.asarray(tracker[thing[0]]).mean())
        #
    #

    def record_monitoring_error(self, dataset, batch_size, batches):
        assert self.error_record_mode == self.ERROR_RECORD_MODE_MONITORING

        print 'noise variance (before norm rescaling): '+str(self.noise_var.get_value())

        #always use the same seed for monitoring error
        self.theano_rng.seed(5)

        errors = []
开发者ID:cc13ny,项目名称:galatea,代码行数:70,代码来源:local_noise_ebm.py

示例5: Network

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import seed [as 别名]
class Network(object):

    ''' Core neural network class that forms the basis for all further implementations (e.g.
        MultilayerNet, Autoencoder, etc). Contains basic functions for propagating data forward
        and backwards through the network, as well as fitting the weights to data'''

    def __init__(self, d=None, k=None, num_hids=None, activs=None, loss_terms=[None], **loss_params):

        # Number of units in the output layer determined by k, so not explicitly specified in
        # num_hids. still need to check that there's one less hidden layer than number of activation
        # functions
        assert(len(num_hids) + 1 == len(activs))

        self.num_nodes = [d] + num_hids + [k]

        # needed mainly for gradient checking...
        self.num_params = 0
        for i, (n1, n2) in enumerate(zip(self.num_nodes[:-1], self.num_nodes[1:])):
            self.num_params += (n1 + 1) * n2

        self.activs = [None] * len(activs)
        for idx, activ in enumerate(activs):
            if activ == 'sigmoid':
                self.activs[idx] = na.sigmoid
            elif activ == 'tanh':
                self.activs[idx] = na.tanh
            elif activ == 'reLU':
                self.activs[idx] = na.reLU
            elif activ == 'softmax':
                self.activs[idx] = na.softmax
            else:
                sys.exit(ne.activ_err())

        self.loss_terms = loss_terms
        self.loss_params = loss_params
        self.srng = RandomStreams()
        self.srng.seed(np.random.randint(99999))

    def set_weights(self, wts=None, bs=None, init_method=None, scale_factor=None, seed=None):
        ''' Initializes the weights and biases of the neural network

        Parameters:
        -----------
        param: wts - weights
        type: np.ndarray, optional

        param: bs - biases
        type: np.ndarray, optional

        param: init_method - calls some pre-specified weight initialization routines
        type: string

        param: scale_factor - additional hyperparameter for weight initialization
        type: float, optional

        param: seed - seeds the random number generator
        type: int, optional
        '''
        if seed is not None:
            np.random.seed(seed=seed)
            self.srng.seed(seed)

        if wts is None and bs is None:
            wts = (len(self.num_nodes) - 1) * [None]
            bs = (len(self.num_nodes) - 1) * [None]

            if init_method == 'gauss':
                for i, (n1, n2) in enumerate(zip(self.num_nodes[:-1], self.num_nodes[1:])):
                    wts[i] = scale_factor * 1. / \
                        np.sqrt(n2) * np.random.randn(n1, n2)
                    bs[i] = np.zeros(n2)

            elif init_method == 'fan-io':
                for i, (n1, n2) in enumerate(zip(self.num_nodes[:-1], self.num_nodes[1:])):
                    v = scale_factor * np.sqrt(6. / (n1 + n2 + 1))
                    wts[i] = 2.0 * v * np.random.rand(n1, n2) - v
                    bs[i] = np.zeros(n2)
            else:
                sys.exit(ne.weight_error())

        else:
            # this scenario occurs most when doing unsupervised pre-training to initialize
            # the weights
            assert isinstance(wts, list)
            assert isinstance(bs, list)

        self.wts_ = [theano.shared(nu.floatX(wt), borrow=True) for wt in wts]
        self.bs_ = [theano.shared(nu.floatX(b), borrow=True) for b in bs]

    def fit(self, X_tr, y_tr, X_val=None, y_val=None, wts=None, bs=None, plotting=False, **optim_params):
        ''' The primary function which ingests data and fits to the neural network.

        Parameters:
        -----------
        param: X_tr - training data
        type: theano matrix

        param: y_tr - training labels
        type: theano matrix

#.........这里部分代码省略.........
开发者ID:avasbr,项目名称:nnet_theano,代码行数:103,代码来源:NeuralNetworkCore.py

示例6: print

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import seed [as 别名]
print(f()) # different uniform numbers
print(g()) # different normal numbers
print(g()) # same normal numbers as the prev. call

# NOTE: a single RV is sampled only once in one function call, regardless of how many
# times it appears in the formula (which makes sense, in math it is the same)
nearly_zeros = function([], rv_unif + rv_unif - 2*rv_unif)
print(nearly_zeros()) # returns 0

# Using seeds: you can seed each RV separately or all at once (pretty much to the same effect)
rng_val = rv_unif.rng.get_value(borrow=True)
rng_val.seed(81232)
rv_unif.rng.set_value(rng_val, borrow=True)

# or all at once
srng.seed(123321)

# and to explicitly show that RandomStreams have a shared state:
state_after_v0 = rv_unif.rng.get_value().get_state()
nearly_zeros()
v1 = f()
# Go one step back
rng = rv_unif.rng.get_value(borrow=True)
rng.set_state(state_after_v0)
rv_unif.rng.set_value(rng, borrow=True)
print(v1 == f()) # False
print(v1 == f()) # True

"""
Copying random states from one function to another
"""
开发者ID:taimir,项目名称:deep_learning,代码行数:33,代码来源:more_examples.py

示例7: function

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import seed [as 别名]
g = function([], rv_n, no_default_updates=True)
g = function([], ev_n, no_default_updates=True)
nearly_zeros = function([], rv_u + rv_u  - 2 * rv_u)
f_val0 = f()
f_val1 = f()
f_val0
f_val1
g_val0 = g()
g_val1 = g()
g_val0
g_val1
nearly_zeros()
rng_val = rv_u.rng.get_value(borrow=True)
rng_val.seed(89234)
rv_u.rng.set_value(rng_val, borrow=True)
srng.seed(902340)
rv_u
rv_u.get_value()
rv_u[0]
rv_u[0,0]
help(rv_u)
rv_u.all()
help(rv_u)
rv_u.argmax()
state_after_v0 = rv_u.rng.get_value().get_state()
nearly_zeros()
v1 = f()
rng = rv_u.rng.get_value(borrow=True)
rng.set_state(state_after_v0)
rv_u.rng.set_value(rng, borrow=True)
v2 = f()
开发者ID:yukoga,项目名称:python-igraph-sample,代码行数:33,代码来源:history.py

示例8: function

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import seed [as 别名]
rv_n = srng.normal((2,2))
f = function([], rv_u)
g = function([], rv_n, no_default_updates=True)    #Not updating rv_n.rng
nearly_zeros = function([], rv_u + rv_u - 2 * rv_u)

#Call the random number function - normally distributed
print f()
print f()

#same value every time - no_default_updates = True
print g()  # different numbers from f_val0 and f_val1
print g()

#Seeding streams

rng_val = rv_u.rng.get_value(borrow=True)   # Get the rng for rv_u
rng_val.seed(89234)                         # seeds the generator
rv_u.rng.set_value(rng_val, borrow=True)    # Assign back seeded rng

srng.seed(902340)  # seeds rv_u and rv_n with different seeds each

state_after_v0 = rv_u.rng.get_value().get_state()
nearly_zeros()       # this affects rv_u's generator
v1 = f()
rng = rv_u.rng.get_value(borrow=True)
rng.set_state(state_after_v0)
rv_u.rng.set_value(rng, borrow=True)
v2 = f()             # v2 != v1
v3 = f()             # v3 == v1

print v1, v2, v3
开发者ID:maym86,项目名称:theano_experiments,代码行数:33,代码来源:random_numbers.py

示例9: MaskGenerator

# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import seed [as 别名]
class MaskGenerator(object):

    def __init__(self, input_size, hidden_sizes, l, random_seed=1234):
        self._random_seed = random_seed
        self._mrng = MRG_RandomStreams(seed=random_seed)
        self._rng = RandomStreams(seed=random_seed)

        self._hidden_sizes = hidden_sizes
        self._input_size = input_size
        self._l = l

        self.ordering = theano.shared(value=np.arange(input_size, dtype=theano.config.floatX), name='ordering', borrow=False)

        # Initial layer connectivity
        self.layers_connectivity = [theano.shared(value=(self.ordering + 1).eval(), name='layer_connectivity_input', borrow=False)]
        for i in range(len(self._hidden_sizes)):
            self.layers_connectivity += [theano.shared(value=np.zeros((self._hidden_sizes[i]), dtype=theano.config.floatX), name='layer_connectivity_hidden{0}'.format(i), borrow=False)]
        self.layers_connectivity += [self.ordering]

        ## Theano functions
        new_ordering = self._rng.shuffle_row_elements(self.ordering)
        self.shuffle_ordering = theano.function(name='shuffle_ordering',
                                                inputs=[],
                                                updates=[(self.ordering, new_ordering), (self.layers_connectivity[0], new_ordering + 1)])

        self.layers_connectivity_updates = []
        for i in range(len(self._hidden_sizes)):
            self.layers_connectivity_updates += [self._get_hidden_layer_connectivity(i)]
        # self.layers_connectivity_updates = [self._get_hidden_layer_connectivity(i) for i in range(len(self._hidden_sizes))]  # WTF THIS DO NOT WORK
        self.sample_connectivity = theano.function(name='sample_connectivity',
                                                   inputs=[],
                                                   updates=[(self.layers_connectivity[i+1], self.layers_connectivity_updates[i]) for i in range(len(self._hidden_sizes))])

        # Save random initial state
        self._initial_mrng_rstate = copy.deepcopy(self._mrng.rstate)
        self._initial_mrng_state_updates = [state_update[0].get_value() for state_update in self._mrng.state_updates]

        # Ensuring valid initial connectivity
        self.sample_connectivity()

    def reset(self):
        # Set Original ordering
        self.ordering.set_value(np.arange(self._input_size, dtype=theano.config.floatX))

        # Reset RandomStreams
        self._rng.seed(self._random_seed)

        # Initial layer connectivity
        self.layers_connectivity[0].set_value((self.ordering + 1).eval())
        for i in range(1, len(self.layers_connectivity)-1):
            self.layers_connectivity[i].set_value(np.zeros((self._hidden_sizes[i-1]), dtype=theano.config.floatX))
        self.layers_connectivity[-1].set_value(self.ordering.get_value())

        # Reset MRG_RandomStreams (GPU)
        self._mrng.rstate = self._initial_mrng_rstate
        for state, value in zip(self._mrng.state_updates, self._initial_mrng_state_updates):
            state[0].set_value(value)

        self.sample_connectivity()

    def _get_p(self, start_choice):
        start_choice_idx = (start_choice-1).astype('int32')
        p_vals = T.concatenate([T.zeros((start_choice_idx,)), T.nnet.nnet.softmax(self._l * T.arange(start_choice, self._input_size, dtype=theano.config.floatX))[0]])
        p_vals = T.inc_subtensor(p_vals[start_choice_idx], 1.)  # Stupid hack because de multinomial does not contain a safety for numerical imprecision.
        return p_vals

    def _get_hidden_layer_connectivity(self, layerIdx):
        layer_size = self._hidden_sizes[layerIdx]
        if layerIdx == 0:
            p_vals = self._get_p(T.min(self.layers_connectivity[layerIdx]))
        else:
            p_vals = self._get_p(T.min(self.layers_connectivity_updates[layerIdx-1]))

        # #Implementations of np.choose in theano GPU
        # return T.nonzero(self._mrng.multinomial(pvals=[self._p_vals] * layer_size, dtype=theano.config.floatX))[1].astype(dtype=theano.config.floatX)
        # return T.argmax(self._mrng.multinomial(pvals=[self._p_vals] * layer_size, dtype=theano.config.floatX), axis=1)
        return T.sum(T.cumsum(self._mrng.multinomial(pvals=T.tile(p_vals[::-1][None, :], (layer_size, 1)), dtype=theano.config.floatX), axis=1), axis=1)

    def _get_mask(self, layerIdxIn, layerIdxOut):
        return (self.layers_connectivity[layerIdxIn][:, None] <= self.layers_connectivity[layerIdxOut][None, :]).astype(theano.config.floatX)

    def get_mask_layer_UPDATE(self, layerIdx):
        return self._get_mask(layerIdx, layerIdx + 1)

    def get_direct_input_mask_layer_UPDATE(self, layerIdx):
        return self._get_mask(0, layerIdx)

    def get_direct_output_mask_layer_UPDATE(self, layerIdx):
        return self._get_mask(layerIdx, -1)
开发者ID:amoliu,项目名称:MADE,代码行数:91,代码来源:mask_generator.py


注:本文中的theano.tensor.shared_randomstreams.RandomStreams.seed方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。