本文整理汇总了Python中blocks.bricks.parallel.Fork类的典型用法代码示例。如果您正苦于以下问题:Python Fork类的具体用法?Python Fork怎么用?Python Fork使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Fork类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: gru_layer
def gru_layer(dim, h, n):
fork = Fork(output_names=['linear' + str(n), 'gates' + str(n)],
name='fork' + str(n), input_dim=dim, output_dims=[dim, dim * 2])
gru = GatedRecurrent(dim=dim, name='gru' + str(n))
initialize([fork, gru])
linear, gates = fork.apply(h)
return gru.apply(linear, gates)
示例2: __init__
def __init__(self, dimension, alphabet_size, **kwargs):
super(WordReverser, self).__init__(**kwargs)
encoder = Bidirectional(
SimpleRecurrent(dim=dimension, activation=Tanh()))
fork = Fork([name for name in encoder.prototype.apply.sequences
if name != 'mask'])
fork.input_dim = dimension
fork.output_dims = [encoder.prototype.get_dim(name) for name in fork.input_names]
lookup = LookupTable(alphabet_size, dimension)
transition = SimpleRecurrent(
activation=Tanh(),
dim=dimension, name="transition")
attention = SequenceContentAttention(
state_names=transition.apply.states,
attended_dim=2 * dimension, match_dim=dimension, name="attention")
readout = Readout(
readout_dim=alphabet_size,
source_names=[transition.apply.states[0],
attention.take_glimpses.outputs[0]],
emitter=SoftmaxEmitter(name="emitter"),
feedback_brick=LookupFeedback(alphabet_size, dimension),
name="readout")
generator = SequenceGenerator(
readout=readout, transition=transition, attention=attention,
name="generator")
self.lookup = lookup
self.fork = fork
self.encoder = encoder
self.generator = generator
self.children = [lookup, fork, encoder, generator]
示例3: example2
def example2():
"""GRU"""
x = tensor.tensor3('x')
dim = 3
fork = Fork(input_dim=dim, output_dims=[dim, dim*2],name='fork',output_names=["linear","gates"], weights_init=initialization.Identity(),biases_init=Constant(0))
gru = GatedRecurrent(dim=dim, weights_init=initialization.Identity(),biases_init=Constant(0))
fork.initialize()
gru.initialize()
linear, gate_inputs = fork.apply(x)
h = gru.apply(linear, gate_inputs)
f = theano.function([x], h)
print(f(np.ones((dim, 1, dim), dtype=theano.config.floatX)))
doubler = Linear(
input_dim=dim, output_dim=dim, weights_init=initialization.Identity(2),
biases_init=initialization.Constant(0))
doubler.initialize()
lin, gate = fork.apply(doubler.apply(x))
h_doubler = gru.apply(lin,gate)
f = theano.function([x], h_doubler)
print(f(np.ones((dim, 1, dim), dtype=theano.config.floatX)))
示例4: __init__
def __init__(self,
vocab_size,
embedding_dim,
n_layers,
skip_connections,
state_dim,
**kwargs):
"""Sole constructor.
Args:
vocab_size (int): Source vocabulary size
embedding_dim (int): Dimension of the embedding layer
n_layers (int): Number of layers. Layers share the same
weight matrices.
skip_connections (bool): Skip connections connect the
source word embeddings directly
with deeper layers to propagate
the gradient more efficiently
state_dim (int): Number of hidden units in the recurrent
layers.
"""
super(BidirectionalEncoder, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.n_layers = n_layers
self.state_dim = state_dim
self.skip_connections = skip_connections
self.lookup = LookupTable(name='embeddings')
if self.n_layers >= 1:
self.bidir = BidirectionalWMT15(
GatedRecurrent(activation=Tanh(), dim=state_dim))
self.fwd_fork = Fork(
[name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='fwd_fork')
self.back_fork = Fork(
[name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='back_fork')
self.children = [self.lookup, self.bidir,
self.fwd_fork, self.back_fork]
if self.n_layers > 1: # Deep encoder
self.mid_fwd_fork = Fork(
[name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='mid_fwd_fork')
self.mid_back_fork = Fork(
[name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='mid_back_fork')
self.children.append(self.mid_fwd_fork)
self.children.append(self.mid_back_fork)
elif self.n_layers == 0:
self.embedding_dim = state_dim*2
self.children = [self.lookup]
else:
logging.fatal("Number of encoder layers must be non-negative")
示例5: gru_layer
def gru_layer(dim, h, n):
fork = Fork(
output_names=["linear" + str(n), "gates" + str(n)],
name="fork" + str(n),
input_dim=dim,
output_dims=[dim, dim * 2],
)
gru = GatedRecurrent(dim=dim, name="gru" + str(n))
initialize([fork, gru])
linear, gates = fork.apply(h)
return gru.apply(linear, gates)
示例6: __init__
def __init__(self, vocab_size, embedding_dim, state_dim, **kwargs):
super(BidirectionalEncoder, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.state_dim = state_dim
self.lookup = LookupTable(name='embeddings')
self.bidir = BidirectionalWMT15(GatedRecurrent(activation=Tanh(), dim=state_dim))
self.fwd_fork = Fork([name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='fwd_fork')
self.back_fork = Fork([name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='back_fork')
self.children = [self.lookup, self.bidir, self.fwd_fork, self.back_fork]
示例7: BidirectionalEncoder
class BidirectionalEncoder(Initializable):
"""Encoder of RNNsearch model."""
def __init__(self, vocab_size, embedding_dim, state_dim, **kwargs):
super(BidirectionalEncoder, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.state_dim = state_dim
self.lookup = LookupTable(name='embeddings')
self.bidir = BidirectionalWMT15(
GatedRecurrent(activation=Tanh(), dim=state_dim))
self.fwd_fork = Fork(
[name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='fwd_fork')
self.back_fork = Fork(
[name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='back_fork')
self.children = [self.lookup, self.bidir,
self.fwd_fork, self.back_fork]
def _push_allocation_config(self):
self.lookup.length = self.vocab_size
self.lookup.dim = self.embedding_dim
self.fwd_fork.input_dim = self.embedding_dim
self.fwd_fork.output_dims = [self.bidir.children[0].get_dim(name)
for name in self.fwd_fork.output_names]
self.back_fork.input_dim = self.embedding_dim
self.back_fork.output_dims = [self.bidir.children[1].get_dim(name)
for name in self.back_fork.output_names]
@application(inputs=['source_sentence', 'source_sentence_mask'],
outputs=['representation'])
def apply(self, source_sentence, source_sentence_mask):
# Time as first dimension
source_sentence = source_sentence.T
source_sentence_mask = source_sentence_mask.T
embeddings = self.lookup.apply(source_sentence)
representation = self.bidir.apply(
merge(self.fwd_fork.apply(embeddings, as_dict=True),
{'mask': source_sentence_mask}),
merge(self.back_fork.apply(embeddings, as_dict=True),
{'mask': source_sentence_mask})
)
return representation
示例8: __init__
def __init__(self, nvis, nhid, encoding_mlp, encoding_lstm, decoding_mlp,
decoding_lstm, T=1, **kwargs):
super(DRAW, self).__init__(**kwargs)
self.nvis = nvis
self.nhid = nhid
self.T = T
self.encoding_mlp = encoding_mlp
self.encoding_mlp.name = 'encoder_mlp'
for i, child in enumerate(self.encoding_mlp.children):
child.name = '{}_{}'.format(self.encoding_mlp.name, i)
self.encoding_lstm = encoding_lstm
self.encoding_lstm.name = 'encoder_lstm'
self.encoding_parameter_mapping = Fork(
output_names=['mu_phi', 'log_sigma_phi'], prototype=Linear())
self.decoding_mlp = decoding_mlp
self.decoding_mlp.name = 'decoder_mlp'
for i, child in enumerate(self.decoding_mlp.children):
child.name = '{}_{}'.format(self.decoding_mlp.name, i)
self.decoding_lstm = decoding_lstm
self.decoding_lstm.name = 'decoder_lstm'
self.decoding_parameter_mapping = Linear(name='mu_theta')
self.prior_mu = tensor.zeros((self.nhid,))
self.prior_mu.name = 'prior_mu'
self.prior_log_sigma = tensor.zeros((self.nhid,))
self.prior_log_sigma.name = 'prior_log_sigma'
self.children = [self.encoding_mlp, self.encoding_lstm,
self.encoding_parameter_mapping,
self.decoding_mlp, self.decoding_lstm,
self.decoding_parameter_mapping]
示例9: RecurrentWithFork
class RecurrentWithFork(Initializable):
@lazy(allocation=['input_dim'])
def __init__(self, recurrent, input_dim, **kwargs):
super(RecurrentWithFork, self).__init__(**kwargs)
self.recurrent = recurrent
self.input_dim = input_dim
self.fork = Fork(
[name for name in self.recurrent.sequences
if name != 'mask'],
prototype=Linear())
self.children = [recurrent.brick, self.fork]
def _push_allocation_config(self):
self.fork.input_dim = self.input_dim
self.fork.output_dims = [self.recurrent.brick.get_dim(name)
for name in self.fork.output_names]
@application(inputs=['input_', 'mask'])
def apply(self, input_, mask=None, **kwargs):
return self.recurrent(
mask=mask, **dict_union(self.fork.apply(input_, as_dict=True),
kwargs))
@apply.property('outputs')
def apply_outputs(self):
return self.recurrent.states
示例10: __init__
def __init__(self, transition, input_dim, hidden_dim,
rec_weights_init, ff_weights_init, biases_init, **kwargs):
super(RecurrentWithFork, self).__init__(**kwargs)
self.rec_weights_init=rec_weights_init
self.ff_weights_init=ff_weights_init
self.biases_init=biases_init
self.input_dim=input_dim
self.hidden_dim=hidden_dim
self.transition=transition
self.transition.dim=self.hidden_dim
self.transition.weights_init=self.rec_weights_init
self.transition.bias_init=self.biases_init
self.fork = Fork(
[name for name in self.transition.apply.sequences if name != 'mask'],
prototype=Linear())
self.fork.input_dim = self.input_dim
self.fork.output_dims = [self.transition.apply.brick.get_dim(name)
for name in self.fork.output_names]
self.fork.weights_init=self.ff_weights_init
self.fork.biases_init=self.biases_init
self.children = [transition, self.fork]
示例11: __init__
def __init__(self, inner_input_dim, outer_input_dim, inner_dim, **kwargs):
self.inner_gru = GatedRecurrent(dim=inner_dim, name='inner_gru')
self.inner_input_fork = Fork(
output_names=[name for name in self.inner_gru.apply.sequences
if 'mask' not in name],
input_dim=inner_input_dim, name='inner_input_fork')
self.outer_input_fork = Fork(
output_names=[name for name in self.inner_gru.apply.sequences
if 'mask' not in name],
input_dim=outer_input_dim, name='inner_outer_fork')
super(InnerRecurrent, self).__init__(**kwargs)
self.children = [
self.inner_gru, self.inner_input_fork, self.outer_input_fork]
示例12: __init__
def __init__(self, recurrent, input_dim, **kwargs):
super(RecurrentWithFork, self).__init__(**kwargs)
self.recurrent = recurrent
self.input_dim = input_dim
self.fork = Fork(
[name for name in self.recurrent.sequences
if name != 'mask'], prototype=Linear())
self.children = [recurrent.brick, self.fork]
示例13: BidirectionalEncoder
class BidirectionalEncoder(Initializable):
""" Bidirectional GRU encoder. """
def __init__(self, embedding_dim, state_dim, **kwargs):
super(BidirectionalEncoder, self).__init__(**kwargs)
# Dimension of the word embeddings taken as input
self.embedding_dim = embedding_dim
# Hidden state dimension
self.state_dim = state_dim
# The bidir GRU
self.bidir = BidirectionalFromDict(
GatedRecurrent(activation=Tanh(), dim=state_dim))
# Forks to administer the inputs of GRU gates
self.fwd_fork = Fork(
[name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='fwd_fork')
self.back_fork = Fork(
[name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='back_fork')
self.children = [self.bidir,
self.fwd_fork, self.back_fork]
def _push_allocation_config(self):
self.fwd_fork.input_dim = self.embedding_dim
self.fwd_fork.output_dims = [self.bidir.children[0].get_dim(name)
for name in self.fwd_fork.output_names]
self.back_fork.input_dim = self.embedding_dim
self.back_fork.output_dims = [self.bidir.children[1].get_dim(name)
for name in self.back_fork.output_names]
@application(inputs=['source_sentence_tbf', 'source_sentence_mask_tb'],
outputs=['representation'])
def apply(self, source_sentence_tbf, source_sentence_mask_tb=None):
representation_tbf = self.bidir.apply(
merge(self.fwd_fork.apply(source_sentence_tbf, as_dict=True),
{'mask': source_sentence_mask_tb}),
merge(self.back_fork.apply(source_sentence_tbf, as_dict=True),
{'mask': source_sentence_mask_tb})
)
return representation_tbf
示例14: InnerRecurrent
class InnerRecurrent(BaseRecurrent, Initializable):
def __init__(self, inner_input_dim, outer_input_dim, inner_dim, **kwargs):
self.inner_gru = GatedRecurrent(dim=inner_dim, name='inner_gru')
self.inner_input_fork = Fork(
output_names=[name for name in self.inner_gru.apply.sequences
if 'mask' not in name],
input_dim=inner_input_dim, name='inner_input_fork')
self.outer_input_fork = Fork(
output_names=[name for name in self.inner_gru.apply.sequences
if 'mask' not in name],
input_dim=outer_input_dim, name='inner_outer_fork')
super(InnerRecurrent, self).__init__(**kwargs)
self.children = [
self.inner_gru, self.inner_input_fork, self.outer_input_fork]
def _push_allocation_config(self):
self.inner_input_fork.output_dims = self.inner_gru.get_dims(
self.inner_input_fork.output_names)
self.outer_input_fork.output_dims = self.inner_gru.get_dims(
self.outer_input_fork.output_names)
@recurrent(sequences=['inner_inputs'], states=['states'],
contexts=['outer_inputs'], outputs=['states'])
def apply(self, inner_inputs, states, outer_inputs):
forked_inputs = self.inner_input_fork.apply(inner_inputs, as_dict=True)
forked_states = self.outer_input_fork.apply(outer_inputs, as_dict=True)
gru_inputs = {key: forked_inputs[key] + forked_states[key]
for key in forked_inputs.keys()}
new_states = self.inner_gru.apply(
iterate=False,
**dict_union(gru_inputs, {'states': states}))
return new_states # mean according to the time axis
def get_dim(self, name):
if name == 'states':
return self.inner_gru.get_dim(name)
else:
return AttributeError
示例15: RecurrentWithFork
class RecurrentWithFork(Initializable):
@lazy(allocation=['input_dim'])
def __init__(self, transition, input_dim, hidden_dim,
rec_weights_init, ff_weights_init, biases_init, **kwargs):
super(RecurrentWithFork, self).__init__(**kwargs)
self.rec_weights_init=rec_weights_init
self.ff_weights_init=ff_weights_init
self.biases_init=biases_init
self.input_dim=input_dim
self.hidden_dim=hidden_dim
self.transition=transition
self.transition.dim=self.hidden_dim
self.transition.weights_init=self.rec_weights_init
self.transition.bias_init=self.biases_init
self.fork = Fork(
[name for name in self.transition.apply.sequences if name != 'mask'],
prototype=Linear())
self.fork.input_dim = self.input_dim
self.fork.output_dims = [self.transition.apply.brick.get_dim(name)
for name in self.fork.output_names]
self.fork.weights_init=self.ff_weights_init
self.fork.biases_init=self.biases_init
self.children = [transition, self.fork]
# def _push_allocation_config(self):#
# #super(RecurrentWithFork, self)._push_allocation_config()
# self.transition.dim=self.hidden_dim
# self.fork.input_dim = self.input_dim
# self.fork.output_dims = [self.transition.apply.brick.get_dim(name)
# for name in self.fork.output_names]
# def _push_initialization_config(self):
# #super(RecurrentWithFork, self)._push_initialization_config()
# self.fork.weights_init=self.ff_weights_init
# self.fork.biases_init=self.biases_init
# self.transition.weights_init=self.rec_weights_init
# self.transition.bias_init=self.biases_init
@application(inputs=['input_', 'mask'])
def apply(self, input_, mask=None, **kwargs):
states=self.transition.apply(
mask=mask, **dict_union(self.fork.apply(input_, as_dict=True), kwargs))
# I don't know, why blocks returns a list [states, cell] for LSTM
# but just states (no list) for GRU or normal RNN. We only want LSTM's states.
# cells should not be visible from outside.
return states[0] if isinstance(states,list) else states
@apply.property('outputs')
def apply_outputs(self):
return self.transition.apply.states