本文整理汇总了Python中blocks.bricks.recurrent.SimpleRecurrent类的典型用法代码示例。如果您正苦于以下问题:Python SimpleRecurrent类的具体用法?Python SimpleRecurrent怎么用?Python SimpleRecurrent使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SimpleRecurrent类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TestBidirectional
class TestBidirectional(unittest.TestCase):
def setUp(self):
self.bidir = Bidirectional(weights_init=Orthogonal(),
prototype=SimpleRecurrent(
dim=3, activation=Tanh()))
self.simple = SimpleRecurrent(dim=3, weights_init=Orthogonal(),
activation=Tanh(), seed=1)
self.bidir.allocate()
self.simple.initialize()
self.bidir.children[0].params[0].set_value(
self.simple.params[0].get_value())
self.bidir.children[1].params[0].set_value(
self.simple.params[0].get_value())
self.x_val = 0.1 * numpy.asarray(
list(itertools.permutations(range(4))),
dtype=floatX)
self.x_val = (numpy.ones((24, 4, 3), dtype=floatX) *
self.x_val[..., None])
self.mask_val = numpy.ones((24, 4), dtype=floatX)
self.mask_val[12:24, 3] = 0
def test(self):
x = tensor.tensor3('x')
mask = tensor.matrix('mask')
calc_bidir = theano.function([x, mask],
[self.bidir.apply(x, mask=mask)])
calc_simple = theano.function([x, mask],
[self.simple.apply(x, mask=mask)])
h_bidir = calc_bidir(self.x_val, self.mask_val)[0]
h_simple = calc_simple(self.x_val, self.mask_val)[0]
h_simple_rev = calc_simple(self.x_val[::-1], self.mask_val[::-1])[0]
assert_allclose(h_simple, h_bidir[..., :3], rtol=1e-04)
assert_allclose(h_simple_rev, h_bidir[::-1, ..., 3:], rtol=1e-04)
示例2: FeedbackRNN
class FeedbackRNN(BaseRecurrent):
def __init__(self, dim, **kwargs):
super(FeedbackRNN, self).__init__(**kwargs)
self.dim = dim
self.first_recurrent_layer = SimpleRecurrent(
dim=self.dim, activation=Identity(), name='first_recurrent_layer',
weights_init=initialization.Identity())
self.second_recurrent_layer = SimpleRecurrent(
dim=self.dim, activation=Identity(), name='second_recurrent_layer',
weights_init=initialization.Identity())
self.children = [self.first_recurrent_layer,
self.second_recurrent_layer]
@recurrent(sequences=['inputs'], contexts=[],
states=['first_states', 'second_states'],
outputs=['first_states', 'second_states'])
def apply(self, inputs, first_states=None, second_states=None):
first_h = self.first_recurrent_layer.apply(
inputs=inputs, states=first_states + second_states, iterate=False)
second_h = self.second_recurrent_layer.apply(
inputs=first_h, states=second_states, iterate=False)
return first_h, second_h
def get_dim(self, name):
return (self.dim if name in ('inputs', 'first_states', 'second_states')
else super(FeedbackRNN, self).get_dim(name))
示例3: __init__
def __init__(self, batch_size, num_subwords, num_words, subword_embedding_size, input_vocab_size,
subword_RNN_hidden_state_size, add_one = True, **kwargs):
super(CompositionalLayerToyBidirectional, self).__init__(**kwargs)
self.batch_size = batch_size
self.num_subwords = num_subwords # number of subwords which make up a word
self.num_words = num_words # number of words in the sentence
self.subword_embedding_size = subword_embedding_size
self.input_vocab_size = input_vocab_size
self.subword_RNN_hidden_state_size = subword_RNN_hidden_state_size
self.add_one = add_one #adds 1 to the backwards embeddings
# create the look up table
self.lookup = LookupTable(length=self.input_vocab_size, dim=self.subword_embedding_size, name='input_lookup')
self.lookup.weights_init = Uniform(width=0.08)
self.lookup.biases_init = Constant(0)
# has one RNN which reads the subwords into a word embedding
self.compositional_subword_to_word_RNN_forward = SimpleRecurrent(
dim=self.subword_RNN_hidden_state_size, activation=Identity(), name='subword_RNN_forward',
weights_init=Identity_init())
self.compositional_subword_to_word_RNN_backward = SimpleRecurrent(
dim=self.subword_RNN_hidden_state_size, activation=Identity(), name='subword_RNN_backward',
weights_init=Identity_init())
self.children = [self.lookup, self.compositional_subword_to_word_RNN_forward,
self.compositional_subword_to_word_RNN_backward]
示例4: __init__
def __init__(self, dim, **kwargs):
super(FeedbackRNN, self).__init__(**kwargs)
self.dim = dim
self.first_recurrent_layer = SimpleRecurrent(
dim=self.dim, activation=Identity(), name='first_recurrent_layer',
weights_init=initialization.Identity())
self.second_recurrent_layer = SimpleRecurrent(
dim=self.dim, activation=Identity(), name='second_recurrent_layer',
weights_init=initialization.Identity())
self.children = [self.first_recurrent_layer,
self.second_recurrent_layer]
示例5: example5
def example5():
"""Bidir + simplereccurent. Adaptation from a unittest in blocks """
bidir = Bidirectional(weights_init=Orthogonal(),
prototype=SimpleRecurrent(
dim=3, activation=Tanh()))
simple = SimpleRecurrent(dim=3, weights_init=Orthogonal(),
activation=Tanh(), seed=1)
bidir.allocate()
simple.initialize()
bidir.children[0].parameters[0].set_value(
simple.parameters[0].get_value())
bidir.children[1].parameters[0].set_value(
simple.parameters[0].get_value())
#Initialize theano variables and functions
x = tensor.tensor3('x')
mask = tensor.matrix('mask')
calc_bidir = theano.function([x, mask],
[bidir.apply(x, mask=mask)])
calc_simple = theano.function([x, mask],
[simple.apply(x, mask=mask)])
#Testing time
x_val = 0.1 * np.asarray(
list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
x_val = (np.ones((24, 4, 3), dtype=theano.config.floatX) *
x_val[..., None])
mask_val = np.ones((24, 4), dtype=theano.config.floatX)
mask_val[12:24, 3] = 0
h_bidir = calc_bidir(x_val, mask_val)[0]
h_simple = calc_simple(x_val, mask_val)[0]
h_simple_rev = calc_simple(x_val[::-1], mask_val[::-1])[0]
print(h_bidir)
print(h_simple)
print(h_simple_rev)
示例6: MyRnn
class MyRnn(BaseRecurrent): # Extend the base recurrent class to create one of your own
def __init__(self, dim, **kwargs):
super(MyRnn, self).__init__(**kwargs)
self.dim = dim
self.layer1 = SimpleRecurrent(dim=self.dim, activation=Identity(), name='recurrent layer 1', weights_init=initialization.Identity())
self.layer2 = SimpleRecurrent(dim=self.dim, activation=Identity(), name='recurrent layer 2', weights_init=initialization.Identity())
self.children = [self.layer1, self.layer2]
def apply(self, inputs, first_states=None, second_states=None):
first_h = self.layer1.apply(inputs=inputs, states=first_states, iterate=False)
second_h = self.layer2.apply(inputs=first_h, states=second_states, iterate=False)
return first_h, second_h
def get_dim(self):
pass
示例7: TestSimpleRecurrent
class TestSimpleRecurrent(unittest.TestCase):
def setUp(self):
self.simple = SimpleRecurrent(dim=3, weights_init=Constant(2),
activation=Tanh())
self.simple.initialize()
def test_one_step(self):
h0 = tensor.matrix('h0')
x = tensor.matrix('x')
mask = tensor.vector('mask')
h1 = self.simple.apply(x, h0, mask=mask, iterate=False)
next_h = theano.function(inputs=[h0, x, mask], outputs=[h1])
h0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],
dtype=theano.config.floatX)
x_val = 0.1 * numpy.array([[1, 2, 3], [4, 5, 6]],
dtype=theano.config.floatX)
mask_val = numpy.array([1, 0]).astype(theano.config.floatX)
h1_val = numpy.tanh(h0_val.dot(2 * numpy.ones((3, 3))) + x_val)
h1_val = mask_val[:, None] * h1_val + (1 - mask_val[:, None]) * h0_val
assert_allclose(h1_val, next_h(h0_val, x_val, mask_val)[0])
def test_many_steps(self):
x = tensor.tensor3('x')
mask = tensor.matrix('mask')
h = self.simple.apply(x, mask=mask, iterate=True)
calc_h = theano.function(inputs=[x, mask], outputs=[h])
x_val = 0.1 * numpy.asarray(list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
x_val = numpy.ones((24, 4, 3),
dtype=theano.config.floatX) * x_val[..., None]
mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
mask_val[12:24, 3] = 0
h_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)
for i in range(1, 25):
h_val[i] = numpy.tanh(h_val[i - 1].dot(
2 * numpy.ones((3, 3))) + x_val[i - 1])
h_val[i] = (mask_val[i - 1, :, None] * h_val[i] +
(1 - mask_val[i - 1, :, None]) * h_val[i - 1])
h_val = h_val[1:]
assert_allclose(h_val, calc_h(x_val, mask_val)[0], rtol=1e-04)
# Also test that initial state is a parameter
initial_state, = VariableFilter(roles=[INITIAL_STATE])(
ComputationGraph(h))
assert is_shared_variable(initial_state)
assert initial_state.name == 'initial_state'
示例8: __init__
def __init__(self, dim_in, dim_hidden, dim_out, **kwargs):
self.dim_in = dim_in
self.dim_hidden = dim_hidden
self.dim_out = dim_out
self.input_layer = Linear(input_dim=self.dim_in, output_dim=self.dim_hidden,
weights_init=initialization.IsotropicGaussian(),
biases_init=initialization.Constant(0))
self.input_layer.initialize()
sparse_init = initialization.Sparse(num_init=15, weights_init=initialization.IsotropicGaussian())
self.recurrent_layer = SimpleRecurrent(
dim=self.dim_hidden, activation=Tanh(), name="first_recurrent_layer",
weights_init=sparse_init,
biases_init=initialization.Constant(0.01))
'''
self.recurrent_layer = LSTM(dim=self.dim_hidden, activation=Tanh(),
weights_init=initialization.IsotropicGaussian(std=0.001),
biases_init=initialization.Constant(0.01))
'''
self.recurrent_layer.initialize()
self.output_layer = Linear(input_dim=self.dim_hidden, output_dim=self.dim_out,
weights_init=initialization.Uniform(width=0.01),
biases_init=initialization.Constant(0.01))
self.output_layer.initialize()
self.children = [self.input_layer, self.recurrent_layer, self.output_layer]
示例9: CompositionalLayerToyWithTables
class CompositionalLayerToyWithTables(Initializable):
def __init__(self, batch_size, num_subwords, num_words, subword_embedding_size, input_vocab_size,
subword_RNN_hidden_state_size, **kwargs):
super(CompositionalLayerToyWithTables, self).__init__(**kwargs)
self.batch_size = batch_size
self.num_subwords = num_subwords # number of subwords which make up a word
self.num_words = num_words # number of words in the sentence
self.subword_embedding_size = subword_embedding_size
self.input_vocab_size = input_vocab_size
self.subword_RNN_hidden_state_size = subword_RNN_hidden_state_size
# create the look up table
self.lookup = LookupTable(length=self.input_vocab_size, dim=self.subword_embedding_size, name='input_lookup')
self.lookup.weights_init = Uniform(width=0.08)
self.lookup.biases_init = Constant(0)
# has one RNN which reads the subwords into a word embedding
self.compositional_subword_to_word_RNN = SimpleRecurrent(
dim=self.subword_RNN_hidden_state_size, activation=Identity(), name='subword_RNN',
weights_init=Identity_init())
self.children = [self.lookup, self.compositional_subword_to_word_RNN]
'''
subword_id_input_ is a 3d tensor with the dimensions of shape = (num_words, num_subwords, batch_size).
It is expected as a dtype=uint16 or equivalent
subword_id_input_mask_ is a 3d tensor with the dimensions of shape = (num_words, num_subwords, batch_size).
It is expected as a dtype=uint8 or equivalent and has binary values of 1 when there is data and zero otherwise.
The look up table will return a 4d tensor with shape = (num_words, num_subwords, batch_size, embedding size)
The RNN will eat up the subwords dimension, resulting in a
3d tensor of shape = (num_words, batch_size, RNN_hidden_value_size), which is returned as 'word_embeddings'
Also returned is a 2d tensor of shape = (num_words, batch_zize), which is the remaining mask indicated
the length of the sentence for each sentence in the batch. i.e., 1 when there is a word, 0 otherwise.
'''
@application(inputs=['subword_id_input_', 'subword_id_input_mask_'], outputs=['word_embeddings', 'word_embeddings_mask'])
def apply(self, subword_id_input_, subword_id_input_mask_):
##shape = (num_words, num_subwords, batch_size, embedding size)
subword_embeddings = self.lookup.apply(subword_id_input_)
result, updates = theano.scan( #loop over each word and have the rnn eat up the subwords
fn=lambda subword_embeddings, subword_id_input_mask_: self.compositional_subword_to_word_RNN.apply(subword_embeddings, mask=subword_id_input_mask_),
sequences= [subword_embeddings, subword_id_input_mask_])
word_embeddings = result.dimshuffle(1,0,2,3) #put the states as the last dimension
#remove this line to see the RNN states
word_embeddings = word_embeddings[-1] #take only the last state, since we dont need the others
#remove subword dim from mask
#if subword is empty then word is emptry the word is emptry, if not then the word is used
word_embeddings_mask = subword_id_input_mask_.max(axis=1)
return word_embeddings, word_embeddings_mask
示例10: test_saved_inner_graph
def test_saved_inner_graph():
"""Make sure that the original inner graph is saved."""
x = tensor.tensor3()
recurrent = SimpleRecurrent(dim=3, activation=Tanh())
y = recurrent.apply(x)
application_call = get_application_call(y)
assert application_call.inner_inputs
assert application_call.inner_outputs
cg = ComputationGraph(application_call.inner_outputs)
# Check that the inner scan graph is annotated
# with `recurrent.apply`
assert len(VariableFilter(application=recurrent.apply)(cg)) == 3
# Check that the inner graph is equivalent to the one
# produced by a stand-alone of `recurrent.apply`
assert is_same_graph(application_call.inner_outputs[0],
recurrent.apply(*application_call.inner_inputs,
iterate=False))
示例11: TextRNN
class TextRNN(object):
def __init__(self, dim_in, dim_hidden, dim_out, **kwargs):
self.dim_in = dim_in
self.dim_hidden = dim_hidden
self.dim_out = dim_out
self.input_layer = Linear(input_dim=self.dim_in, output_dim=self.dim_hidden,
weights_init=initialization.IsotropicGaussian(),
biases_init=initialization.Constant(0))
self.input_layer.initialize()
sparse_init = initialization.Sparse(num_init=15, weights_init=initialization.IsotropicGaussian())
self.recurrent_layer = SimpleRecurrent(
dim=self.dim_hidden, activation=Tanh(), name="first_recurrent_layer",
weights_init=sparse_init,
biases_init=initialization.Constant(0.01))
'''
self.recurrent_layer = LSTM(dim=self.dim_hidden, activation=Tanh(),
weights_init=initialization.IsotropicGaussian(std=0.001),
biases_init=initialization.Constant(0.01))
'''
self.recurrent_layer.initialize()
self.output_layer = Linear(input_dim=self.dim_hidden, output_dim=self.dim_out,
weights_init=initialization.Uniform(width=0.01),
biases_init=initialization.Constant(0.01))
self.output_layer.initialize()
self.children = [self.input_layer, self.recurrent_layer, self.output_layer]
'''
@recurrent(sequences=['inputs'],
states=['states'],
contexts=[],
outputs=['states', 'output'])
'''
def run(self, inputs):
output = self.output_layer.apply( self.recurrent_layer.apply(self.input_layer.apply(inputs)) )
return output
示例12: LanguageModelToy
class LanguageModelToy(Initializable):
"""
This takes the word embeddings from CompositionalLayerToyWithTables and creates sentence embeddings
Input is a 3d tensor with the dimensions of (num_words, num_subwords, batch_size) and
a 3d tensor a mask of size (num_words, num_subwords, batch_size)
All hidden state sizes are the same as the subword embedding size
This returns a 3d tensor with dimenstions of (num_words = num RNN states, batch_size, sentence embedding size)
"""
def __init__(self, batch_size, num_subwords, num_words, subword_embedding_size, input_vocab_size,
subword_RNN_hidden_state_size, LM_RNN_hidden_state_size, **kwargs):
super(LanguageModelToy, self).__init__(**kwargs)
self.batch_size = batch_size
self.num_subwords = num_subwords # number of subwords which make up a word
self.num_words = num_words # number of words in the sentence
self.subword_embedding_size = subword_embedding_size
self.input_vocab_size = input_vocab_size
self.subword_RNN_hidden_state_size = subword_RNN_hidden_state_size
self.LM_RNN_hidden_state_size = LM_RNN_hidden_state_size
self.compositional_layer = CompositionalLayerToyWithTables(self.batch_size, self.num_subwords, self.num_words,
self.subword_embedding_size, self.input_vocab_size,
self.subword_RNN_hidden_state_size, name='compositional_layer')
# has one RNN which reads the word embeddings into a sentence embedding
self.language_model_RNN = SimpleRecurrent(
dim=self.LM_RNN_hidden_state_size, activation=Identity(), name='language_model_RNN',
weights_init=Identity_init())
self.children = [self.compositional_layer, self.language_model_RNN]
@application(inputs=['subword_id_input_', 'subword_id_input_mask_'], outputs=['sentence_embeddings', 'sentence_embeddings_mask'])
def apply(self, subword_id_input_, subword_id_input_mask_):
"""
subword_id_input_ is a 3d tensor with the dimensions of shape = (num_words, num_subwords, batch_size).
It is expected as a dtype=uint16 or equivalent
subword_id_input_mask_ is a 3d tensor with the dimensions of shape = (num_words, num_subwords, batch_size).
It is expected as a dtype=uint8 or equivalent and has binary values of 1 when there is data and zero otherwise.
Returned is a 3d tensor of size (num_words = num RNN states, batch_size, sentence embedding size)
Also returned is a 1d tensor of size (batch_size) describing if the sentence is valid of empty in the batch
"""
word_embeddings, word_embeddings_mask = self.compositional_layer.apply(subword_id_input_, subword_id_input_mask_)
sentence_embeddings = self.language_model_RNN.apply(word_embeddings, mask=word_embeddings_mask)
sentence_embeddings_mask = word_embeddings_mask.max(axis=0).T
return sentence_embeddings, sentence_embeddings_mask
示例13: example
def example():
""" Simple reccurent example. Taken from : https://github.com/mdda/pycon.sg-2015_deep-learning/blob/master/ipynb/blocks-recurrent-docs.ipynb """
x = tensor.tensor3('x')
rnn = SimpleRecurrent(dim=3, activation=Identity(), weights_init=initialization.Identity())
rnn.initialize()
h = rnn.apply(x)
f = theano.function([x], h)
print(f(np.ones((3, 1, 3), dtype=theano.config.floatX)))
doubler = Linear(
input_dim=3, output_dim=3, weights_init=initialization.Identity(2),
biases_init=initialization.Constant(0))
doubler.initialize()
h_doubler = rnn.apply(doubler.apply(x))
f = theano.function([x], h_doubler)
print(f(np.ones((3, 1, 3), dtype=theano.config.floatX)))
#Initial State
h0 = tensor.matrix('h0')
h = rnn.apply(inputs=x, states=h0)
f = theano.function([x, h0], h)
print(f(np.ones((3, 1, 3), dtype=theano.config.floatX),
np.ones((1, 3), dtype=theano.config.floatX)))
示例14: setUp
def setUp(self):
self.bidir = Bidirectional(weights_init=Orthogonal(),
prototype=SimpleRecurrent(
dim=3, activation=Tanh()))
self.simple = SimpleRecurrent(dim=3, weights_init=Orthogonal(),
activation=Tanh(), seed=1)
self.bidir.allocate()
self.simple.initialize()
self.bidir.children[0].parameters[0].set_value(
self.simple.parameters[0].get_value())
self.bidir.children[1].parameters[0].set_value(
self.simple.parameters[0].get_value())
self.x_val = 0.1 * numpy.asarray(
list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
self.x_val = (numpy.ones((24, 4, 3), dtype=theano.config.floatX) *
self.x_val[..., None])
self.mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
self.mask_val[12:24, 3] = 0
示例15: __init__
def __init__(self, batch_size, num_subwords, num_words, subword_embedding_size, input_vocab_size,
subword_RNN_hidden_state_size, LM_RNN_hidden_state_size, **kwargs):
super(LanguageModelToy, self).__init__(**kwargs)
self.batch_size = batch_size
self.num_subwords = num_subwords # number of subwords which make up a word
self.num_words = num_words # number of words in the sentence
self.subword_embedding_size = subword_embedding_size
self.input_vocab_size = input_vocab_size
self.subword_RNN_hidden_state_size = subword_RNN_hidden_state_size
self.LM_RNN_hidden_state_size = LM_RNN_hidden_state_size
self.compositional_layer = CompositionalLayerToyWithTables(self.batch_size, self.num_subwords, self.num_words,
self.subword_embedding_size, self.input_vocab_size,
self.subword_RNN_hidden_state_size, name='compositional_layer')
# has one RNN which reads the word embeddings into a sentence embedding
self.language_model_RNN = SimpleRecurrent(
dim=self.LM_RNN_hidden_state_size, activation=Identity(), name='language_model_RNN',
weights_init=Identity_init())
self.children = [self.compositional_layer, self.language_model_RNN]