本文整理汇总了Python中blocks.bricks.recurrent.GatedRecurrent类的典型用法代码示例。如果您正苦于以下问题:Python GatedRecurrent类的具体用法?Python GatedRecurrent怎么用?Python GatedRecurrent使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了GatedRecurrent类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: gru_layer
def gru_layer(dim, h, n):
fork = Fork(output_names=['linear' + str(n), 'gates' + str(n)],
name='fork' + str(n), input_dim=dim, output_dims=[dim, dim * 2])
gru = GatedRecurrent(dim=dim, name='gru' + str(n))
initialize([fork, gru])
linear, gates = fork.apply(h)
return gru.apply(linear, gates)
示例2: example2
def example2():
"""GRU"""
x = tensor.tensor3('x')
dim = 3
fork = Fork(input_dim=dim, output_dims=[dim, dim*2],name='fork',output_names=["linear","gates"], weights_init=initialization.Identity(),biases_init=Constant(0))
gru = GatedRecurrent(dim=dim, weights_init=initialization.Identity(),biases_init=Constant(0))
fork.initialize()
gru.initialize()
linear, gate_inputs = fork.apply(x)
h = gru.apply(linear, gate_inputs)
f = theano.function([x], h)
print(f(np.ones((dim, 1, dim), dtype=theano.config.floatX)))
doubler = Linear(
input_dim=dim, output_dim=dim, weights_init=initialization.Identity(2),
biases_init=initialization.Constant(0))
doubler.initialize()
lin, gate = fork.apply(doubler.apply(x))
h_doubler = gru.apply(lin,gate)
f = theano.function([x], h_doubler)
print(f(np.ones((dim, 1, dim), dtype=theano.config.floatX)))
示例3: setUp
def setUp(self):
self.gated = GatedRecurrent(
dim=3, activation=Tanh(),
gate_activation=Tanh(), weights_init=Constant(2))
self.gated.initialize()
self.reset_only = GatedRecurrent(
dim=3, activation=Tanh(),
gate_activation=Tanh(),
weights_init=IsotropicGaussian(), seed=1)
self.reset_only.initialize()
示例4: gru_layer
def gru_layer(dim, h, n):
fork = Fork(
output_names=["linear" + str(n), "gates" + str(n)],
name="fork" + str(n),
input_dim=dim,
output_dims=[dim, dim * 2],
)
gru = GatedRecurrent(dim=dim, name="gru" + str(n))
initialize([fork, gru])
linear, gates = fork.apply(h)
return gru.apply(linear, gates)
示例5: __init__
def __init__(self, vocab_size, embedding_dim, state_dim, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.state_dim = state_dim
self.lookup = LookupTable(name='embeddings')
self.GRU = GatedRecurrent(activation=Tanh(), dim=state_dim)
self.children = [self.lookup, self.GRU]
示例6: InnerRecurrent
class InnerRecurrent(BaseRecurrent, Initializable):
def __init__(self, inner_input_dim, outer_input_dim, inner_dim, **kwargs):
self.inner_gru = GatedRecurrent(dim=inner_dim, name='inner_gru')
self.inner_input_fork = Fork(
output_names=[name for name in self.inner_gru.apply.sequences
if 'mask' not in name],
input_dim=inner_input_dim, name='inner_input_fork')
self.outer_input_fork = Fork(
output_names=[name for name in self.inner_gru.apply.sequences
if 'mask' not in name],
input_dim=outer_input_dim, name='inner_outer_fork')
super(InnerRecurrent, self).__init__(**kwargs)
self.children = [
self.inner_gru, self.inner_input_fork, self.outer_input_fork]
def _push_allocation_config(self):
self.inner_input_fork.output_dims = self.inner_gru.get_dims(
self.inner_input_fork.output_names)
self.outer_input_fork.output_dims = self.inner_gru.get_dims(
self.outer_input_fork.output_names)
@recurrent(sequences=['inner_inputs'], states=['states'],
contexts=['outer_inputs'], outputs=['states'])
def apply(self, inner_inputs, states, outer_inputs):
forked_inputs = self.inner_input_fork.apply(inner_inputs, as_dict=True)
forked_states = self.outer_input_fork.apply(outer_inputs, as_dict=True)
gru_inputs = {key: forked_inputs[key] + forked_states[key]
for key in forked_inputs.keys()}
new_states = self.inner_gru.apply(
iterate=False,
**dict_union(gru_inputs, {'states': states}))
return new_states # mean according to the time axis
def get_dim(self, name):
if name == 'states':
return self.inner_gru.get_dim(name)
else:
return AttributeError
示例7: __init__
def __init__(self, vocab_size, embedding_dim, state_dim, reverse=True,
**kwargs):
super(Encoder, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.state_dim = state_dim
self.reverse = reverse
self.lookup = LookupTable(name='embeddings')
self.transition = GatedRecurrent(Tanh(), name='encoder_transition')
self.fork = Fork([name for name in self.transition.apply.sequences
if name != 'mask'], prototype=Linear())
self.children = [self.lookup, self.transition, self.fork]
示例8: __init__
def __init__(self, blockid, vocab_size, embedding_dim, state_dim, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.state_dim = state_dim
self.blockid = blockid
self.lookup = LookupTable(name='embeddings' + '_' + self.blockid)
self.gru = GatedRecurrent(activation=Tanh(), dim=state_dim, name = "GatedRNN" + self.blockid)
self.fwd_fork = Fork(
[name for name in self.gru.apply.sequences
if name != 'mask'], prototype=Linear(), name='fwd_fork' + '_' + self.blockid)
self.children = [self.lookup, self.gru, self.fwd_fork]
示例9: Encoder
class Encoder(Initializable):
"""Encoder of RNNsearch model."""
def __init__(self, blockid, vocab_size, embedding_dim, state_dim, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.state_dim = state_dim
self.blockid = blockid
self.lookup = LookupTable(name='embeddings' + '_' + self.blockid)
self.gru = GatedRecurrent(activation=Tanh(), dim=state_dim, name = "GatedRNN" + self.blockid)
self.fwd_fork = Fork(
[name for name in self.gru.apply.sequences
if name != 'mask'], prototype=Linear(), name='fwd_fork' + '_' + self.blockid)
self.children = [self.lookup, self.gru, self.fwd_fork]
def _push_allocation_config(self):
self.lookup.length = self.vocab_size
self.lookup.dim = self.embedding_dim
self.fwd_fork.input_dim = self.embedding_dim
self.fwd_fork.output_dims = [self.gru.get_dim(name)
for name in self.fwd_fork.output_names]
@application(inputs=['source_sentence', 'source_sentence_mask'],
outputs=['representation'])
def apply(self, source_sentence, source_sentence_mask):
# Time as first dimension
source_sentence = source_sentence.T
source_sentence_mask = source_sentence_mask.T
embeddings = self.lookup.apply(source_sentence)
grupara = merge( self.fwd_fork.apply(embeddings, as_dict=True) , {'mask': source_sentence_mask})
representation = self.gru.apply(**grupara)
return representation
示例10: __init__
def __init__(self, hidden_dim, activation=None, gate_activation=None,
state_to_state_init=None, state_to_update_init=None, state_to_reset_init=None,
input_to_state_transform=None, input_to_update_transform=None, input_to_reset_transform=None,
**kwargs):
super(GatedRecurrentFull, self).__init__(**kwargs)
self.hidden_dim = hidden_dim
self.state_to_state_init = state_to_state_init
self.state_to_update_init = state_to_update_init
self.state_to_reset_init = state_to_reset_init
self.input_to_state_transform = input_to_state_transform
self.input_to_update_transform = input_to_update_transform
self.input_to_reset_transform = input_to_reset_transform
self.input_to_state_transform.name += "_input_to_state_transform"
self.input_to_update_transform.name += "_input_to_update_transform"
self.input_to_reset_transform.name += "_input_to_reset_transform"
self.use_mine = True
if self.use_mine:
self.rnn = GatedRecurrentFast(
weights_init=Constant(np.nan),
dim=self.hidden_dim,
activation=activation,
gate_activation=gate_activation)
else:
self.rnn = GatedRecurrent(
weights_init=Constant(np.nan),
dim=self.hidden_dim,
activation=activation,
gate_activation=gate_activation)
self.children = [self.rnn,
self.input_to_state_transform, self.input_to_update_transform, self.input_to_reset_transform]
self.children.extend(self.rnn.children)
示例11: GatedRecurrentWithContext
class GatedRecurrentWithContext(Initializable):
def __init__(self, *args, **kwargs):
self.gated_recurrent = GatedRecurrent(*args, **kwargs)
self.children = [self.gated_recurrent]
@application(states=['states'], outputs=['states'],
contexts=['readout_context', 'transition_context',
'update_context', 'reset_context'])
def apply(self, transition_context, update_context, reset_context,
*args, **kwargs):
kwargs['inputs'] += transition_context
kwargs['update_inputs'] += update_context
kwargs['reset_inputs'] += reset_context
# readout_context was only added for the Readout brick, discard it
kwargs.pop('readout_context')
return self.gated_recurrent.apply(*args, **kwargs)
def get_dim(self, name):
if name in ['readout_context', 'transition_context',
'update_context', 'reset_context']:
return self.dim
return self.gated_recurrent.get_dim(name)
def __getattr__(self, name):
if name == 'gated_recurrent':
raise AttributeError
return getattr(self.gated_recurrent, name)
@apply.property('sequences')
def apply_inputs(self):
sequences = ['mask', 'inputs']
if self.use_update_gate:
sequences.append('update_inputs')
if self.use_reset_gate:
sequences.append('reset_inputs')
return sequences
示例12: __init__
def __init__(self, inner_input_dim, outer_input_dim, inner_dim, **kwargs):
self.inner_gru = GatedRecurrent(dim=inner_dim, name='inner_gru')
self.inner_input_fork = Fork(
output_names=[name for name in self.inner_gru.apply.sequences
if 'mask' not in name],
input_dim=inner_input_dim, name='inner_input_fork')
self.outer_input_fork = Fork(
output_names=[name for name in self.inner_gru.apply.sequences
if 'mask' not in name],
input_dim=outer_input_dim, name='inner_outer_fork')
super(InnerRecurrent, self).__init__(**kwargs)
self.children = [
self.inner_gru, self.inner_input_fork, self.outer_input_fork]
示例13: Encoder
class Encoder(Initializable):
def __init__(self, vocab_size, embedding_dim, state_dim, reverse=True,
**kwargs):
super(Encoder, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.state_dim = state_dim
self.reverse = reverse
self.lookup = LookupTable(name='embeddings')
self.transition = GatedRecurrent(Tanh(), name='encoder_transition')
self.fork = Fork([name for name in self.transition.apply.sequences
if name != 'mask'], prototype=Linear())
self.children = [self.lookup, self.transition, self.fork]
def _push_allocation_config(self):
self.lookup.length = self.vocab_size
self.lookup.dim = self.embedding_dim
self.transition.dim = self.state_dim
self.fork.input_dim = self.embedding_dim
self.fork.output_dims = [self.state_dim
for _ in self.fork.output_names]
@application(inputs=['source_sentence', 'source_sentence_mask'],
outputs=['representation'])
def apply(self, source_sentence, source_sentence_mask):
# Time as first dimension
source_sentence = source_sentence.dimshuffle(1, 0)
source_sentence_mask = source_sentence_mask.T
if self.reverse:
source_sentence = source_sentence[::-1]
source_sentence_mask = source_sentence_mask[::-1]
embeddings = self.lookup.apply(source_sentence)
representation = self.transition.apply(**merge(
self.fork.apply(embeddings, as_dict=True),
{'mask': source_sentence_mask}
))
return representation[-1]
示例14: Encoder
class Encoder(Initializable):
def __init__(self, vocab_size, embedding_dim, state_dim, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.state_dim = state_dim
self.lookup = LookupTable(name='embeddings')
self.GRU = GatedRecurrent(activation=Tanh(), dim=state_dim)
self.children = [self.lookup, self.GRU]
def _push_allocation_config(self):
self.lookup.length = self.vocab_size
self.lookup.dim = self.embedding_dim
@application(inputs=['source_sentence', 'source_sentence_mask'],
outputs=['representation'])
def apply(self, source_sentence, source_sentence_mask):
source_sentence = source_sentence.T
source_sentence_mask = source_sentence_mask.T
embeddings = self.lookup.apply(source_sentence)
representation = self.GRU.apply(embeddings, embeddings)
return representation
示例15: main
def main():
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
parser = argparse.ArgumentParser(
"Case study of generating a Markov chain with RNN.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"mode", choices=["train", "sample"],
help="The mode to run. Use `train` to train a new model"
" and `sample` to sample a sequence generated by an"
" existing one.")
parser.add_argument(
"prefix", default="sine",
help="The prefix for model, timing and state files")
parser.add_argument(
"--steps", type=int, default=100,
help="Number of steps to plot")
args = parser.parse_args()
dim = 10
num_states = ChainIterator.num_states
feedback_dim = 8
transition = GatedRecurrent(name="transition", activation=Tanh(), dim=dim)
generator = SequenceGenerator(
LinearReadout(readout_dim=num_states, source_names=["states"],
emitter=SoftmaxEmitter(name="emitter"),
feedbacker=LookupFeedback(
num_states, feedback_dim, name='feedback'),
name="readout"),
transition,
weights_init=IsotropicGaussian(0.01), biases_init=Constant(0),
name="generator")
generator.allocate()
logger.debug("Parameters:\n" +
pprint.pformat(
[(key, value.get_value().shape) for key, value
in Selector(generator).get_params().items()],
width=120))
if args.mode == "train":
rng = numpy.random.RandomState(1)
batch_size = 50
generator.push_initialization_config()
transition.weights_init = Orthogonal()
generator.initialize()
logger.debug("transition.weights_init={}".format(
transition.weights_init))
cost = generator.cost(tensor.lmatrix('x')).sum()
gh_model = GroundhogModel(generator, cost)
state = GroundhogState(args.prefix, batch_size,
learning_rate=0.0001).as_dict()
data = ChainIterator(rng, 100, batch_size)
trainer = SGD(gh_model, state, data)
main_loop = MainLoop(data, None, None, gh_model, trainer, state, None)
main_loop.main()
elif args.mode == "sample":
load_params(generator, args.prefix + "model.npz")
sample = ComputationGraph(generator.generate(
n_steps=args.steps, batch_size=1, iterate=True)).function()
states, outputs, costs = [data[:, 0] for data in sample()]
numpy.set_printoptions(precision=3, suppress=True)
print("Generation cost:\n{}".format(costs.sum()))
freqs = numpy.bincount(outputs).astype(floatX)
freqs /= freqs.sum()
print("Frequencies:\n {} vs {}".format(freqs,
ChainIterator.equilibrium))
trans_freqs = numpy.zeros((num_states, num_states), dtype=floatX)
for a, b in zip(outputs, outputs[1:]):
trans_freqs[a, b] += 1
trans_freqs /= trans_freqs.sum(axis=1)[:, None]
print("Transition frequencies:\n{}\nvs\n{}".format(
trans_freqs, ChainIterator.trans_prob))
else:
assert False