本文整理汇总了Python中theano.tensor.shared_randomstreams.RandomStreams.shuffle_row_elements方法的典型用法代码示例。如果您正苦于以下问题:Python RandomStreams.shuffle_row_elements方法的具体用法?Python RandomStreams.shuffle_row_elements怎么用?Python RandomStreams.shuffle_row_elements使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor.shared_randomstreams.RandomStreams
的用法示例。
在下文中一共展示了RandomStreams.shuffle_row_elements方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_shuffle_row_elements
# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import shuffle_row_elements [as 别名]
def test_shuffle_row_elements(self):
"""Test that RandomStreams.shuffle_row_elements generates the right results"""
# Check over two calls to see if the random state is correctly updated.
# On matrices, for each row, the elements of that row should be shuffled.
# Note that this differs from numpy.random.shuffle, where all the elements
# of the matrix are shuffled.
random = RandomStreams(utt.fetch_seed())
m_input = tensor.dmatrix()
f = function([m_input], random.shuffle_row_elements(m_input), updates=random.updates())
# Generate the elements to be shuffled
val_rng = numpy.random.RandomState(utt.fetch_seed()+42)
in_mval = val_rng.uniform(-2, 2, size=(20, 5))
fn_mval0 = f(in_mval)
fn_mval1 = f(in_mval)
print(in_mval[0])
print(fn_mval0[0])
print(fn_mval1[0])
assert not numpy.all(in_mval == fn_mval0)
assert not numpy.all(in_mval == fn_mval1)
assert not numpy.all(fn_mval0 == fn_mval1)
rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)
rng = numpy.random.RandomState(int(rng_seed))
numpy_mval0 = in_mval.copy()
numpy_mval1 = in_mval.copy()
for row in numpy_mval0:
rng.shuffle(row)
for row in numpy_mval1:
rng.shuffle(row)
assert numpy.all(numpy_mval0 == fn_mval0)
assert numpy.all(numpy_mval1 == fn_mval1)
# On vectors, the behaviour is the same as numpy.random.shuffle,
# except that it does not work in place, but returns a shuffled vector.
random1 = RandomStreams(utt.fetch_seed())
v_input = tensor.dvector()
f1 = function([v_input], random1.shuffle_row_elements(v_input))
in_vval = val_rng.uniform(-3, 3, size=(12,))
fn_vval = f1(in_vval)
numpy_vval = in_vval.copy()
vrng = numpy.random.RandomState(int(rng_seed))
vrng.shuffle(numpy_vval)
print(in_vval)
print(fn_vval)
print(numpy_vval)
assert numpy.all(numpy_vval == fn_vval)
# Trying to shuffle a vector with function that should shuffle
# matrices, or vice versa, raises a TypeError
self.assertRaises(TypeError, f1, in_mval)
self.assertRaises(TypeError, f, in_vval)
示例2: run
# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import shuffle_row_elements [as 别名]
#.........这里部分代码省略.........
# Intialize optimizera
optimizer = Optimizer()
updates = optimizer.init_optimizer(self.optimizer, cost, self.params, self.optimizerData)
srng = RandomStreams(seed=234)
perm = theano.shared(np.arange(train_set_x.eval().shape[0]))
# Train functions
if self.predict_only == False:
train_model = theano.function(
[index],
cost,
updates = updates,
givens = {
x: train_set_x[perm[index * self.batch_size: (index + 1) * self.batch_size]],
y: train_set_y[perm[index * self.batch_size: (index + 1) * self.batch_size]]
}
)
# Initialize result arrays
cost_results = []
val_results_pixel = []
time_results = []
predict_val = f.init_predict(valid_set_x, model_val,self.batch_size,x,index)
# Solver
try:
print '... Solving'
start_time = time.time()
for epoch in range(self.epochs):
t1 = time.time()
perm = srng.shuffle_row_elements(perm)
train_set_x,train_set_y = f.flip_rotate(train_set_x,
train_set_y,
self.in_window_shape,
self.out_window_shape,
perm,
index,
cost,
updates,
self.batch_size,
x,
y,
self.classifier,
self.layers_3D)
costs = [train_model(i) for i in xrange(n_train_batches)]
epoch_cost = np.mean(costs)
output_val = f.predict_set(predict_val,n_valid_batches,self.classifier, self.pred_window_size)
error_pixel,error_window = f.evaluate(output_val,valid_set_y.get_value(borrow=True),self.eval_window_size,self.classifier)
#error_pixel = 0.
#error_window = 0.
t2 = time.time()
epoch_time = (t2-t1)/60.
cost_results.append(epoch_cost)
val_results_pixel.append(error_pixel)
time_results.append(epoch_time)
# store parameters
self.save_params(self.get_params(), self.path)
if self.classifier in ["membrane","synapse"]:
示例3: MaskGenerator
# 需要导入模块: from theano.tensor.shared_randomstreams import RandomStreams [as 别名]
# 或者: from theano.tensor.shared_randomstreams.RandomStreams import shuffle_row_elements [as 别名]
class MaskGenerator(object):
def __init__(self, input_size, hidden_sizes, l, random_seed=1234):
self._random_seed = random_seed
self._mrng = MRG_RandomStreams(seed=random_seed)
self._rng = RandomStreams(seed=random_seed)
self._hidden_sizes = hidden_sizes
self._input_size = input_size
self._l = l
self.ordering = theano.shared(value=np.arange(input_size, dtype=theano.config.floatX), name='ordering', borrow=False)
# Initial layer connectivity
self.layers_connectivity = [theano.shared(value=(self.ordering + 1).eval(), name='layer_connectivity_input', borrow=False)]
for i in range(len(self._hidden_sizes)):
self.layers_connectivity += [theano.shared(value=np.zeros((self._hidden_sizes[i]), dtype=theano.config.floatX), name='layer_connectivity_hidden{0}'.format(i), borrow=False)]
self.layers_connectivity += [self.ordering]
## Theano functions
new_ordering = self._rng.shuffle_row_elements(self.ordering)
self.shuffle_ordering = theano.function(name='shuffle_ordering',
inputs=[],
updates=[(self.ordering, new_ordering), (self.layers_connectivity[0], new_ordering + 1)])
self.layers_connectivity_updates = []
for i in range(len(self._hidden_sizes)):
self.layers_connectivity_updates += [self._get_hidden_layer_connectivity(i)]
# self.layers_connectivity_updates = [self._get_hidden_layer_connectivity(i) for i in range(len(self._hidden_sizes))] # WTF THIS DO NOT WORK
self.sample_connectivity = theano.function(name='sample_connectivity',
inputs=[],
updates=[(self.layers_connectivity[i+1], self.layers_connectivity_updates[i]) for i in range(len(self._hidden_sizes))])
# Save random initial state
self._initial_mrng_rstate = copy.deepcopy(self._mrng.rstate)
self._initial_mrng_state_updates = [state_update[0].get_value() for state_update in self._mrng.state_updates]
# Ensuring valid initial connectivity
self.sample_connectivity()
def reset(self):
# Set Original ordering
self.ordering.set_value(np.arange(self._input_size, dtype=theano.config.floatX))
# Reset RandomStreams
self._rng.seed(self._random_seed)
# Initial layer connectivity
self.layers_connectivity[0].set_value((self.ordering + 1).eval())
for i in range(1, len(self.layers_connectivity)-1):
self.layers_connectivity[i].set_value(np.zeros((self._hidden_sizes[i-1]), dtype=theano.config.floatX))
self.layers_connectivity[-1].set_value(self.ordering.get_value())
# Reset MRG_RandomStreams (GPU)
self._mrng.rstate = self._initial_mrng_rstate
for state, value in zip(self._mrng.state_updates, self._initial_mrng_state_updates):
state[0].set_value(value)
self.sample_connectivity()
def _get_p(self, start_choice):
start_choice_idx = (start_choice-1).astype('int32')
p_vals = T.concatenate([T.zeros((start_choice_idx,)), T.nnet.nnet.softmax(self._l * T.arange(start_choice, self._input_size, dtype=theano.config.floatX))[0]])
p_vals = T.inc_subtensor(p_vals[start_choice_idx], 1.) # Stupid hack because de multinomial does not contain a safety for numerical imprecision.
return p_vals
def _get_hidden_layer_connectivity(self, layerIdx):
layer_size = self._hidden_sizes[layerIdx]
if layerIdx == 0:
p_vals = self._get_p(T.min(self.layers_connectivity[layerIdx]))
else:
p_vals = self._get_p(T.min(self.layers_connectivity_updates[layerIdx-1]))
# #Implementations of np.choose in theano GPU
# return T.nonzero(self._mrng.multinomial(pvals=[self._p_vals] * layer_size, dtype=theano.config.floatX))[1].astype(dtype=theano.config.floatX)
# return T.argmax(self._mrng.multinomial(pvals=[self._p_vals] * layer_size, dtype=theano.config.floatX), axis=1)
return T.sum(T.cumsum(self._mrng.multinomial(pvals=T.tile(p_vals[::-1][None, :], (layer_size, 1)), dtype=theano.config.floatX), axis=1), axis=1)
def _get_mask(self, layerIdxIn, layerIdxOut):
return (self.layers_connectivity[layerIdxIn][:, None] <= self.layers_connectivity[layerIdxOut][None, :]).astype(theano.config.floatX)
def get_mask_layer_UPDATE(self, layerIdx):
return self._get_mask(layerIdx, layerIdx + 1)
def get_direct_input_mask_layer_UPDATE(self, layerIdx):
return self._get_mask(0, layerIdx)
def get_direct_output_mask_layer_UPDATE(self, layerIdx):
return self._get_mask(layerIdx, -1)