当前位置: 首页>>代码示例>>Python>>正文


Python Fork.apply方法代码示例

本文整理汇总了Python中blocks.bricks.parallel.Fork.apply方法的典型用法代码示例。如果您正苦于以下问题:Python Fork.apply方法的具体用法?Python Fork.apply怎么用?Python Fork.apply使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在blocks.bricks.parallel.Fork的用法示例。


在下文中一共展示了Fork.apply方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: example2

# 需要导入模块: from blocks.bricks.parallel import Fork [as 别名]
# 或者: from blocks.bricks.parallel.Fork import apply [as 别名]
def example2():
    """GRU"""
    x = tensor.tensor3('x')
    dim = 3

    fork = Fork(input_dim=dim, output_dims=[dim, dim*2],name='fork',output_names=["linear","gates"], weights_init=initialization.Identity(),biases_init=Constant(0))
    gru = GatedRecurrent(dim=dim, weights_init=initialization.Identity(),biases_init=Constant(0))

    fork.initialize()
    gru.initialize()

    linear, gate_inputs = fork.apply(x)
    h = gru.apply(linear, gate_inputs)

    f = theano.function([x], h)
    print(f(np.ones((dim, 1, dim), dtype=theano.config.floatX))) 

    doubler = Linear(
                 input_dim=dim, output_dim=dim, weights_init=initialization.Identity(2),
                 biases_init=initialization.Constant(0))
    doubler.initialize()

    lin, gate = fork.apply(doubler.apply(x))
    h_doubler = gru.apply(lin,gate)

    f = theano.function([x], h_doubler)
    print(f(np.ones((dim, 1, dim), dtype=theano.config.floatX))) 
开发者ID:DjAntaki,项目名称:IFT6266H16,代码行数:29,代码来源:rnn_examples.py

示例2: BidirectionalEncoder

# 需要导入模块: from blocks.bricks.parallel import Fork [as 别名]
# 或者: from blocks.bricks.parallel.Fork import apply [as 别名]
class BidirectionalEncoder(Initializable):
    """Encoder of RNNsearch model."""

    def __init__(self, vocab_size, embedding_dim, state_dim, **kwargs):
        super(BidirectionalEncoder, self).__init__(**kwargs)
        self.vocab_size = vocab_size
        self.embedding_dim = embedding_dim
        self.state_dim = state_dim

        self.lookup = LookupTable(name='embeddings')
        self.bidir = BidirectionalWMT15(
            GatedRecurrent(activation=Tanh(), dim=state_dim))
        self.fwd_fork = Fork(
            [name for name in self.bidir.prototype.apply.sequences
             if name != 'mask'], prototype=Linear(), name='fwd_fork')
        self.back_fork = Fork(
            [name for name in self.bidir.prototype.apply.sequences
             if name != 'mask'], prototype=Linear(), name='back_fork')

        self.children = [self.lookup, self.bidir,
                         self.fwd_fork, self.back_fork]

    def _push_allocation_config(self):
        self.lookup.length = self.vocab_size
        self.lookup.dim = self.embedding_dim

        self.fwd_fork.input_dim = self.embedding_dim
        self.fwd_fork.output_dims = [self.bidir.children[0].get_dim(name)
                                     for name in self.fwd_fork.output_names]
        self.back_fork.input_dim = self.embedding_dim
        self.back_fork.output_dims = [self.bidir.children[1].get_dim(name)
                                      for name in self.back_fork.output_names]

    @application(inputs=['source_sentence', 'source_sentence_mask'],
                 outputs=['representation'])
    def apply(self, source_sentence, source_sentence_mask):
        # Time as first dimension
        source_sentence = source_sentence.T
        source_sentence_mask = source_sentence_mask.T

        embeddings = self.lookup.apply(source_sentence)

        representation = self.bidir.apply(
            merge(self.fwd_fork.apply(embeddings, as_dict=True),
                  {'mask': source_sentence_mask}),
            merge(self.back_fork.apply(embeddings, as_dict=True),
                  {'mask': source_sentence_mask})
        )
        return representation
开发者ID:LynetteXing1991,项目名称:TAJA-Seq2Seq,代码行数:51,代码来源:model.py

示例3: RecurrentWithFork

# 需要导入模块: from blocks.bricks.parallel import Fork [as 别名]
# 或者: from blocks.bricks.parallel.Fork import apply [as 别名]
class RecurrentWithFork(Initializable):

    @lazy(allocation=['input_dim'])
    def __init__(self, recurrent, input_dim, **kwargs):
        super(RecurrentWithFork, self).__init__(**kwargs)
        self.recurrent = recurrent
        self.input_dim = input_dim
        self.fork = Fork(
            [name for name in self.recurrent.sequences
             if name != 'mask'],
             prototype=Linear())
        self.children = [recurrent.brick, self.fork]

    def _push_allocation_config(self):
        self.fork.input_dim = self.input_dim
        self.fork.output_dims = [self.recurrent.brick.get_dim(name)
                                 for name in self.fork.output_names]

    @application(inputs=['input_', 'mask'])
    def apply(self, input_, mask=None, **kwargs):
        return self.recurrent(
            mask=mask, **dict_union(self.fork.apply(input_, as_dict=True),
                                    kwargs))

    @apply.property('outputs')
    def apply_outputs(self):
        return self.recurrent.states
开发者ID:DingKe,项目名称:attention-lvcsr,代码行数:29,代码来源:__init__.py

示例4: gru_layer

# 需要导入模块: from blocks.bricks.parallel import Fork [as 别名]
# 或者: from blocks.bricks.parallel.Fork import apply [as 别名]
def gru_layer(dim, h, n):
    fork = Fork(output_names=['linear' + str(n), 'gates' + str(n)],
                name='fork' + str(n), input_dim=dim, output_dims=[dim, dim * 2])
    gru = GatedRecurrent(dim=dim, name='gru' + str(n))
    initialize([fork, gru])
    linear, gates = fork.apply(h)
    return gru.apply(linear, gates)
开发者ID:ixtel,项目名称:blocks-char-rnn,代码行数:9,代码来源:model.py

示例5: BidirectionalEncoder

# 需要导入模块: from blocks.bricks.parallel import Fork [as 别名]
# 或者: from blocks.bricks.parallel.Fork import apply [as 别名]
class BidirectionalEncoder(Initializable):
    """ Bidirectional GRU encoder. """

    def __init__(self, embedding_dim, state_dim, **kwargs):
        super(BidirectionalEncoder, self).__init__(**kwargs)
        # Dimension of the word embeddings taken as input
        self.embedding_dim = embedding_dim
        # Hidden state dimension
        self.state_dim = state_dim

        # The bidir GRU
        self.bidir = BidirectionalFromDict(
            GatedRecurrent(activation=Tanh(), dim=state_dim))
        # Forks to administer the inputs of GRU gates
        self.fwd_fork = Fork(
            [name for name in self.bidir.prototype.apply.sequences
             if name != 'mask'], prototype=Linear(), name='fwd_fork')
        self.back_fork = Fork(
            [name for name in self.bidir.prototype.apply.sequences
             if name != 'mask'], prototype=Linear(), name='back_fork')

        self.children = [self.bidir,
                         self.fwd_fork, self.back_fork]

    def _push_allocation_config(self):
        self.fwd_fork.input_dim = self.embedding_dim
        self.fwd_fork.output_dims = [self.bidir.children[0].get_dim(name)
                                     for name in self.fwd_fork.output_names]
        self.back_fork.input_dim = self.embedding_dim
        self.back_fork.output_dims = [self.bidir.children[1].get_dim(name)
                                      for name in self.back_fork.output_names]


    @application(inputs=['source_sentence_tbf', 'source_sentence_mask_tb'],
                 outputs=['representation'])
    def apply(self, source_sentence_tbf, source_sentence_mask_tb=None):

        representation_tbf = self.bidir.apply(
            merge(self.fwd_fork.apply(source_sentence_tbf, as_dict=True),
                  {'mask': source_sentence_mask_tb}),
            merge(self.back_fork.apply(source_sentence_tbf, as_dict=True),
                  {'mask': source_sentence_mask_tb})
        )
        return representation_tbf
开发者ID:BinbinBian,项目名称:asreader,代码行数:46,代码来源:text_comprehension_base.py

示例6: InnerRecurrent

# 需要导入模块: from blocks.bricks.parallel import Fork [as 别名]
# 或者: from blocks.bricks.parallel.Fork import apply [as 别名]
class InnerRecurrent(BaseRecurrent, Initializable):
    def __init__(self, inner_input_dim, outer_input_dim, inner_dim, **kwargs):
        self.inner_gru = GatedRecurrent(dim=inner_dim, name='inner_gru')

        self.inner_input_fork = Fork(
            output_names=[name for name in self.inner_gru.apply.sequences
                          if 'mask' not in name],
            input_dim=inner_input_dim, name='inner_input_fork')
        self.outer_input_fork = Fork(
            output_names=[name for name in self.inner_gru.apply.sequences
                          if 'mask' not in name],
            input_dim=outer_input_dim, name='inner_outer_fork')

        super(InnerRecurrent, self).__init__(**kwargs)

        self.children = [
            self.inner_gru, self.inner_input_fork, self.outer_input_fork]

    def _push_allocation_config(self):
        self.inner_input_fork.output_dims = self.inner_gru.get_dims(
            self.inner_input_fork.output_names)
        self.outer_input_fork.output_dims = self.inner_gru.get_dims(
            self.outer_input_fork.output_names)

    @recurrent(sequences=['inner_inputs'], states=['states'],
               contexts=['outer_inputs'], outputs=['states'])
    def apply(self, inner_inputs, states, outer_inputs):
        forked_inputs = self.inner_input_fork.apply(inner_inputs, as_dict=True)
        forked_states = self.outer_input_fork.apply(outer_inputs, as_dict=True)

        gru_inputs = {key: forked_inputs[key] + forked_states[key]
                      for key in forked_inputs.keys()}

        new_states = self.inner_gru.apply(
            iterate=False,
            **dict_union(gru_inputs, {'states': states}))
        return new_states  # mean according to the time axis

    def get_dim(self, name):
        if name == 'states':
            return self.inner_gru.get_dim(name)
        else:
            return AttributeError
开发者ID:Beronx86,项目名称:blocks,代码行数:45,代码来源:test_model.py

示例7: RecurrentWithFork

# 需要导入模块: from blocks.bricks.parallel import Fork [as 别名]
# 或者: from blocks.bricks.parallel.Fork import apply [as 别名]
class RecurrentWithFork(Initializable):

    @lazy(allocation=['input_dim'])
    def __init__(self, transition, input_dim, hidden_dim,
                 rec_weights_init, ff_weights_init, biases_init, **kwargs):
        super(RecurrentWithFork, self).__init__(**kwargs)
        self.rec_weights_init=rec_weights_init
        self.ff_weights_init=ff_weights_init
        self.biases_init=biases_init
        self.input_dim=input_dim
        self.hidden_dim=hidden_dim

        self.transition=transition
        self.transition.dim=self.hidden_dim
        self.transition.weights_init=self.rec_weights_init
        self.transition.bias_init=self.biases_init


        self.fork = Fork(
            [name for name in self.transition.apply.sequences if name != 'mask'],
             prototype=Linear())
        self.fork.input_dim = self.input_dim
        self.fork.output_dims = [self.transition.apply.brick.get_dim(name)
                                 for name in self.fork.output_names]
        self.fork.weights_init=self.ff_weights_init
        self.fork.biases_init=self.biases_init

        self.children = [transition, self.fork]

#    def _push_allocation_config(self):#
#        #super(RecurrentWithFork, self)._push_allocation_config()
#        self.transition.dim=self.hidden_dim
#        self.fork.input_dim = self.input_dim
#        self.fork.output_dims = [self.transition.apply.brick.get_dim(name)
#                                 for name in self.fork.output_names]

#    def _push_initialization_config(self):
#        #super(RecurrentWithFork, self)._push_initialization_config()
#        self.fork.weights_init=self.ff_weights_init
#        self.fork.biases_init=self.biases_init
#        self.transition.weights_init=self.rec_weights_init
#        self.transition.bias_init=self.biases_init

    @application(inputs=['input_', 'mask'])
    def apply(self, input_, mask=None, **kwargs):
        states=self.transition.apply(
            mask=mask, **dict_union(self.fork.apply(input_, as_dict=True), kwargs))
        # I don't know, why blocks returns a list [states, cell] for LSTM
        # but just states (no list) for GRU or normal RNN. We only want LSTM's states.
        # cells should not be visible from outside.
        return states[0] if isinstance(states,list) else states

    @apply.property('outputs')
    def apply_outputs(self):
        return self.transition.apply.states
开发者ID:Richi91,项目名称:SpeechRecognition,代码行数:57,代码来源:model.py

示例8: gru_layer

# 需要导入模块: from blocks.bricks.parallel import Fork [as 别名]
# 或者: from blocks.bricks.parallel.Fork import apply [as 别名]
def gru_layer(dim, h, n):
    fork = Fork(
        output_names=["linear" + str(n), "gates" + str(n)],
        name="fork" + str(n),
        input_dim=dim,
        output_dims=[dim, dim * 2],
    )
    gru = GatedRecurrent(dim=dim, name="gru" + str(n))
    initialize([fork, gru])
    linear, gates = fork.apply(h)
    return gru.apply(linear, gates)
开发者ID:teganmaharaj,项目名称:deeplearningclass,代码行数:13,代码来源:model.py

示例9: Feedback

# 需要导入模块: from blocks.bricks.parallel import Fork [as 别名]
# 或者: from blocks.bricks.parallel.Fork import apply [as 别名]
class Feedback(Initializable):
    """Feedback.

    Attributes
    ----------
    output_names : list
    output_dims : dict

    """
    @lazy(allocation=['output_names', 'output_dims'])
    def __init__(self, output_names, output_dims,
                 embedding=None, input_dim=0,
                 **kwargs):
        super(Feedback, self).__init__(**kwargs)

        self.output_names = output_names
        self.output_dims = output_dims
        self.input_dim = input_dim

        self.embedding = embedding
        self.fork = Fork(self.output_names)

        self.apply.inputs = ['input']
        self.apply.outputs = output_names

        self.children = [self.embedding, self.fork]
        self.children = [child for child in self.children if child]

    def _push_allocation_config(self):
        if self.fork:
            self.fork.output_dims = self.output_dims
        else:
            self.embedding.output_dim, = self.output_dims
        if self.embedding:
            self.embedding.input_dim = self.input_dim
            self.fork.input_dim = self.embedding.output_dim
        else:
            self.fork.input_dim = self.input_dim

    @application
    def apply(self, symbols):
        embedded_symbols = symbols
        if self.embedding:
            embedded_symbols = self.embedding.apply(symbols)
        if self.fork:
            return self.fork.apply(embedded_symbols)
        return embedded_symbols
开发者ID:MLDL,项目名称:blocks-extras,代码行数:49,代码来源:sequence_generator2.py

示例10: Encoder

# 需要导入模块: from blocks.bricks.parallel import Fork [as 别名]
# 或者: from blocks.bricks.parallel.Fork import apply [as 别名]
class Encoder(Initializable):
    def __init__(self, vocab_size, embedding_dim, state_dim, reverse=True,
                 **kwargs):
        super(Encoder, self).__init__(**kwargs)
        self.vocab_size = vocab_size
        self.embedding_dim = embedding_dim
        self.state_dim = state_dim
        self.reverse = reverse

        self.lookup = LookupTable(name='embeddings')
        self.transition = GatedRecurrent(Tanh(), name='encoder_transition')
        self.fork = Fork([name for name in self.transition.apply.sequences
                          if name != 'mask'], prototype=Linear())

        self.children = [self.lookup, self.transition, self.fork]

    def _push_allocation_config(self):
        self.lookup.length = self.vocab_size
        self.lookup.dim = self.embedding_dim
        self.transition.dim = self.state_dim
        self.fork.input_dim = self.embedding_dim
        self.fork.output_dims = [self.state_dim
                                 for _ in self.fork.output_names]

    @application(inputs=['source_sentence', 'source_sentence_mask'],
                 outputs=['representation'])
    def apply(self, source_sentence, source_sentence_mask):
        # Time as first dimension
        source_sentence = source_sentence.dimshuffle(1, 0)
        source_sentence_mask = source_sentence_mask.T
        if self.reverse:
            source_sentence = source_sentence[::-1]
            source_sentence_mask = source_sentence_mask[::-1]

        embeddings = self.lookup.apply(source_sentence)
        representation = self.transition.apply(**merge(
            self.fork.apply(embeddings, as_dict=True),
            {'mask': source_sentence_mask}
        ))
        return representation[-1]
开发者ID:rizar,项目名称:NMT,代码行数:42,代码来源:model_encdec.py

示例11: build_fork_lookup

# 需要导入模块: from blocks.bricks.parallel import Fork [as 别名]
# 或者: from blocks.bricks.parallel.Fork import apply [as 别名]
def build_fork_lookup(vocab_size, args):
    x = tensor.lmatrix('features')
    virtual_dim = 6
    time_length = 5
    mini_batch_size = 2
    skip_connections = True
    layers = 3

    # Build the model
    output_names = []
    output_dims = []
    for d in range(layers):
        if d > 0:
            suffix = '_' + str(d)
        else:
            suffix = ''
        if d == 0 or skip_connections:
            output_names.append("inputs" + suffix)
            output_dims.append(virtual_dim)

    print output_names
    print output_dims
    lookup = LookupTable(length=vocab_size, dim=virtual_dim)
    lookup.weights_init = initialization.IsotropicGaussian(0.1)
    lookup.biases_init = initialization.Constant(0)

    fork = Fork(output_names=output_names, input_dim=time_length,
                output_dims=output_dims,
                prototype=FeedforwardSequence(
                    [lookup.apply]))

    # Return list of 3D Tensor, one for each layer
    # (Batch X Time X embedding_dim)
    pre_rnn = fork.apply(x)
    fork.initialize()

    f = theano.function([x], pre_rnn)
    return f
开发者ID:ClemDoum,项目名称:RNN_Experiments,代码行数:40,代码来源:test_fork_lookup.py

示例12: Encoder

# 需要导入模块: from blocks.bricks.parallel import Fork [as 别名]
# 或者: from blocks.bricks.parallel.Fork import apply [as 别名]
class Encoder(Initializable):
    """Encoder of RNNsearch model."""

    def __init__(self, blockid, vocab_size, embedding_dim, state_dim, **kwargs):
        super(Encoder, self).__init__(**kwargs)
        self.vocab_size = vocab_size
        self.embedding_dim = embedding_dim
        self.state_dim = state_dim
        self.blockid = blockid

        self.lookup = LookupTable(name='embeddings' + '_' + self.blockid)
        self.gru = GatedRecurrent(activation=Tanh(), dim=state_dim, name = "GatedRNN" + self.blockid)
        self.fwd_fork = Fork(
            [name for name in self.gru.apply.sequences
             if name != 'mask'], prototype=Linear(), name='fwd_fork' + '_' + self.blockid)

        self.children = [self.lookup, self.gru, self.fwd_fork]

    def _push_allocation_config(self):
        self.lookup.length = self.vocab_size
        self.lookup.dim = self.embedding_dim

        self.fwd_fork.input_dim = self.embedding_dim
        self.fwd_fork.output_dims = [self.gru.get_dim(name)
                                     for name in self.fwd_fork.output_names]

    @application(inputs=['source_sentence', 'source_sentence_mask'],
                 outputs=['representation'])
    def apply(self, source_sentence, source_sentence_mask):
        # Time as first dimension
        source_sentence = source_sentence.T
        source_sentence_mask = source_sentence_mask.T

        embeddings = self.lookup.apply(source_sentence)
        grupara =  merge( self.fwd_fork.apply(embeddings, as_dict=True) , {'mask': source_sentence_mask})
        representation = self.gru.apply(**grupara)
        return representation
开发者ID:MtMoon,项目名称:PoemProject,代码行数:39,代码来源:model.py

示例13: get_prernn

# 需要导入模块: from blocks.bricks.parallel import Fork [as 别名]
# 或者: from blocks.bricks.parallel.Fork import apply [as 别名]
def get_prernn(args):

    # time x batch
    x_mask = tensor.fmatrix('mask')

    # Compute the state dim
    if args.rnn_type == 'lstm':
        state_dim = 4 * args.state_dim
    else:
        state_dim = args.state_dim

    # Prepare the arguments for the fork
    output_names = []
    output_dims = []
    for d in range(args.layers):
        if d > 0:
            suffix = RECURRENTSTACK_SEPARATOR + str(d)
        else:
            suffix = ''
        if d == 0 or args.skip_connections:
            output_names.append("inputs" + suffix)
            output_dims.append(state_dim)

    # Prepare the brick to be forked (LookupTable or Linear)
    # Check if the dataset provides indices (in the case of a
    # fixed vocabulary, x is 2D tensor) or if it gives raw values
    # (x is 3D tensor)
    if has_indices(args.dataset):
        features = args.mini_batch_size
        x = tensor.lmatrix('features')
        vocab_size = get_output_size(args.dataset)
        lookup = LookupTable(length=vocab_size, dim=state_dim)
        lookup.weights_init = initialization.IsotropicGaussian(0.1)
        lookup.biases_init = initialization.Constant(0)
        forked = FeedforwardSequence([lookup.apply])
        if not has_mask(args.dataset):
            x_mask = tensor.ones_like(x, dtype=floatX)

    else:
        x = tensor.tensor3('features', dtype=floatX)
        if args.used_inputs is not None:
            x = tensor.set_subtensor(x[args.used_inputs:, :, :],
                                     tensor.zeros_like(x[args.used_inputs:,
                                                         :, :],
                                                       dtype=floatX))
        features = get_output_size(args.dataset)
        forked = Linear(input_dim=features, output_dim=state_dim)
        forked.weights_init = initialization.IsotropicGaussian(0.1)
        forked.biases_init = initialization.Constant(0)

        if not has_mask(args.dataset):
            x_mask = tensor.ones_like(x[:, :, 0], dtype=floatX)

    # Define the fork
    fork = Fork(output_names=output_names, input_dim=features,
                output_dims=output_dims,
                prototype=forked)
    fork.initialize()

    # Apply the fork
    prernn = fork.apply(x)

    # Give a name to the input of each layer
    if args.skip_connections:
        for t in range(len(prernn)):
            prernn[t].name = "pre_rnn_" + str(t)
    else:
        prernn.name = "pre_rnn"

    return prernn, x_mask
开发者ID:anirudh9119,项目名称:RNN_Experiments,代码行数:72,代码来源:build_model_utils.py

示例14: AddParameters

# 需要导入模块: from blocks.bricks.parallel import Fork [as 别名]
# 或者: from blocks.bricks.parallel.Fork import apply [as 别名]
class AddParameters(Brick):
    """Adds dependency on parameters to a transition function.

    In fact an improved version of this brick should be moved
    to the main body of the library, because it is clearly reusable
    (e.g. it can be a part of Encoder-Decoder translation model.

    """
    @lazy
    def __init__(self, transition, num_params, params_name,
                 weights_init, biases_init, **kwargs):
        super(AddParameters, self).__init__(**kwargs)
        update_instance(self, locals())

        self.input_names = [name for name in transition.apply.sequences
                            if name != 'mask']
        self.state_name = transition.apply.states[0]
        assert len(transition.apply.states) == 1

        self.fork = Fork(self.input_names)
        # Could be also several init bricks, one for each of the states
        self.init = MLP([Identity()], name="init")
        self.children = [self.transition, self.fork, self.init]

    def _push_allocation_config(self):
        self.fork.input_dim = self.num_params
        self.fork.fork_dims = {name: self.transition.get_dim(name)
                               for name in self.input_names}
        self.init.dims[0] = self.num_params
        self.init.dims[-1] = self.transition.get_dim(self.state_name)

    def _push_initialization_config(self):
        for child in self.children:
            if self.weights_init:
                child.weights_init = self.weights_init
            if self.biases_init:
                child.biases_init = self.biases_init

    @application
    def apply(self, **kwargs):
        inputs = {name: kwargs.pop(name) for name in self.input_names}
        params = kwargs.pop("params")
        forks = self.fork.apply(params, return_dict=True)
        for name in self.input_names:
            inputs[name] = inputs[name] + forks[name]
        kwargs.update(inputs)
        if kwargs.get('iterate', True):
            kwargs[self.state_name] = self.initial_state(None, params=params)
        return self.transition.apply(**kwargs)

    @apply.delegate
    def apply_delegate(self):
        return self.transition.apply

    @apply.property('contexts')
    def apply_contexts(self):
        return [self.params_name] + self.transition.apply.contexts

    @application
    def initial_state(self, batch_size, *args, **kwargs):
        return self.init.apply(kwargs['params'])

    def get_dim(self, name):
        if name == 'params':
            return self.num_params
        return self.transition.get_dim(name)
开发者ID:sherjilozair,项目名称:blocks,代码行数:68,代码来源:sine.py

示例15: build_fork_lookup

# 需要导入模块: from blocks.bricks.parallel import Fork [as 别名]
# 或者: from blocks.bricks.parallel.Fork import apply [as 别名]
def build_fork_lookup(vocab_size, time_length, args):
    x = tensor.lmatrix('features')
    virtual_dim = 6
    state_dim = 6
    skip_connections = False
    layers = 1

    # Build the model
    output_names = []
    output_dims = []
    for d in range(layers):
        if d > 0:
            suffix = '_' + str(d)
        else:
            suffix = ''
        if d == 0 or skip_connections:
            output_names.append("inputs" + suffix)
            output_dims.append(virtual_dim)

    lookup = LookupTable(length=vocab_size, dim=virtual_dim)
    lookup.weights_init = initialization.IsotropicGaussian(0.1)
    lookup.biases_init = initialization.Constant(0)

    fork = Fork(output_names=output_names, input_dim=time_length,
                output_dims=output_dims,
                prototype=FeedforwardSequence(
                    [lookup.apply]))

    # Note that this order of the periods makes faster modules flow in slower
    # ones with is the opposite of the original paper
    transitions = [ClockworkBase(dim=state_dim, activation=Tanh(),
                                 period=2 ** i) for i in range(layers)]

    rnn = RecurrentStack(transitions, skip_connections=skip_connections)

    # Return list of 3D Tensor, one for each layer
    # (Batch X Time X embedding_dim)
    pre_rnn = fork.apply(x)

    # Give time as the first index for each element in the list:
    # (Time X Batch X embedding_dim)
    if layers > 1 and skip_connections:
        for t in range(len(pre_rnn)):
            pre_rnn[t] = pre_rnn[t].dimshuffle(1, 0, 2)
    else:
        pre_rnn = pre_rnn.dimshuffle(1, 0, 2)

    f_pre_rnn = theano.function([x], pre_rnn)

    # Prepare inputs for the RNN
    kwargs = OrderedDict()
    for d in range(layers):
        if d > 0:
            suffix = '_' + str(d)
        else:
            suffix = ''
        if d == 0 or skip_connections:
            if skip_connections:
                kwargs['inputs' + suffix] = pre_rnn[d]
            else:
                kwargs['inputs' + suffix] = pre_rnn

    print kwargs
    # Apply the RNN to the inputs
    h = rnn.apply(low_memory=True, **kwargs)

    fork.initialize()

    rnn.weights_init = initialization.Orthogonal()
    rnn.biases_init = initialization.Constant(0)
    rnn.initialize()

    f_h = theano.function([x], h)
    return f_pre_rnn, f_h
开发者ID:pjadzinsky,项目名称:RNN_Experiments,代码行数:76,代码来源:test_clockwork.py


注:本文中的blocks.bricks.parallel.Fork.apply方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。