当前位置: 首页>>代码示例>>Python>>正文


Python bricks.Linear方法代码示例

本文整理汇总了Python中blocks.bricks.Linear方法的典型用法代码示例。如果您正苦于以下问题:Python bricks.Linear方法的具体用法?Python bricks.Linear怎么用?Python bricks.Linear使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在blocks.bricks的用法示例。


在下文中一共展示了bricks.Linear方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Linear [as 别名]
def __init__(self, image_feature_dim, embedding_dim, **kwargs):
        super(Encoder, self).__init__(**kwargs)

        self.image_embedding = Linear(
              input_dim=image_feature_dim
            , output_dim=embedding_dim
            , name="image_embedding"
            )

        self.to_inputs = Linear(
              input_dim=embedding_dim
            , output_dim=embedding_dim*4 # times 4 cuz vstack(input, forget, cell, hidden)
            , name="to_inputs"
            )

        self.transition = LSTM(
            dim=embedding_dim, name="transition")

        self.children = [ self.image_embedding
                        , self.to_inputs
                        , self.transition
                        ] 
开发者ID:youralien,项目名称:image-captioning-for-mortals,代码行数:24,代码来源:bricks.py

示例2: softmax_layer

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Linear [as 别名]
def softmax_layer(h, y, vocab_size, hidden_size):
    hidden_to_output = Linear(name='hidden_to_output', input_dim=hidden_size,
                              output_dim=vocab_size)
    initialize([hidden_to_output])
    linear_output = hidden_to_output.apply(h)
    linear_output.name = 'linear_output'
    softmax = NDimensionalSoftmax()
    y_hat = softmax.apply(linear_output, extra_ndim=1)
    y_hat.name = 'y_hat'
    cost = softmax.categorical_cross_entropy(
        y, linear_output, extra_ndim=1).mean()
    cost.name = 'cost'
    return y_hat, cost 
开发者ID:johnarevalo,项目名称:blocks-char-rnn,代码行数:15,代码来源:model.py

示例3: rnn_layer

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Linear [as 别名]
def rnn_layer(dim, h, n):
    linear = Linear(input_dim=dim, output_dim=dim, name='linear' + str(n))
    rnn = SimpleRecurrent(dim=dim, activation=Tanh(), name='rnn' + str(n))
    initialize([linear, rnn])
    return rnn.apply(linear.apply(h)) 
开发者ID:johnarevalo,项目名称:blocks-char-rnn,代码行数:7,代码来源:model.py

示例4: lstm_layer

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Linear [as 别名]
def lstm_layer(dim, h, n):
    linear = Linear(input_dim=dim, output_dim=dim * 4, name='linear' + str(n))
    lstm = LSTM(dim=dim, name='lstm' + str(n))
    initialize([linear, lstm])
    return lstm.apply(linear.apply(h)) 
开发者ID:johnarevalo,项目名称:blocks-char-rnn,代码行数:7,代码来源:model.py

示例5: __init__

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Linear [as 别名]
def __init__(self, output_names, input_dim,  prototype=None, **kwargs):
        if not prototype:
            prototype = Linear()

        self.output_names = output_names
        self.input_dim = input_dim

        kwargs.setdefault('child_prefix', 'fork')
        super(Fork, self).__init__(output_names, prototype=prototype,
                                   **kwargs)
        self.input_dims = None 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:13,代码来源:parallel.py

示例6: test_variable_filter_roles_error

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Linear [as 别名]
def test_variable_filter_roles_error():
    # Creating computation graph
    brick1 = Linear(input_dim=2, output_dim=2, name='linear1')

    x = tensor.vector()
    h1 = brick1.apply(x)
    cg = ComputationGraph(h1)
    # testing role error
    VariableFilter(roles=PARAMETER)(cg.variables) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:11,代码来源:test_variable_filter.py

示例7: test_renamer

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Linear [as 别名]
def test_renamer():
    x = tensor.matrix('features')
    layer = Linear(10, 10)
    y = layer.apply(x)
    named = shared(name='named', value=numpy.zeros(2))
    tag_named = shared(value=numpy.zeros(2))
    tag_named.tag.name = 'tag_named'
    unnamed = shared(value=numpy.zeros(2))
    variables = [layer.W, named, tag_named, unnamed, unnamed, unnamed]
    renamer = _Renamer()
    names = [renamer(n) for n in variables]
    true_names = ['|linear.W', 'named', 'tag_named', 'parameter',
                  'parameter_2', 'parameter_3']
    assert set(names) == set(true_names) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:16,代码来源:test_serialization.py

示例8: test_linearlike_subclass_initialize_works_overridden_w

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Linear [as 别名]
def test_linearlike_subclass_initialize_works_overridden_w():
    class NotQuiteLinear(Linear):
        @property
        def W(self):
            W = super(NotQuiteLinear, self).W
            return W / tensor.sqrt((W ** 2).sum(axis=0))

    brick = NotQuiteLinear(5, 10, weights_init=IsotropicGaussian(0.02),
                           biases_init=Constant(1))
    brick.initialize()
    assert not numpy.isnan(brick.parameters[0].get_value()).any()
    numpy.testing.assert_allclose((brick.W ** 2).sum(axis=0).eval(), 1,
                                  rtol=1e-6) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:15,代码来源:test_interfaces.py

示例9: make_bidir_lstm_stack

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Linear [as 别名]
def make_bidir_lstm_stack(seq, seq_dim, mask, sizes, skip=True, name=''):
    bricks = []

    curr_dim = [seq_dim]
    curr_hidden = [seq]

    hidden_list = []
    for k, dim in enumerate(sizes):
        fwd_lstm_ins = [Linear(input_dim=d, output_dim=4*dim, name='%s_fwd_lstm_in_%d_%d'%(name,k,l)) for l, d in enumerate(curr_dim)]
        fwd_lstm = LSTM(dim=dim, activation=Tanh(), name='%s_fwd_lstm_%d'%(name,k))

        bwd_lstm_ins = [Linear(input_dim=d, output_dim=4*dim, name='%s_bwd_lstm_in_%d_%d'%(name,k,l)) for l, d in enumerate(curr_dim)]
        bwd_lstm = LSTM(dim=dim, activation=Tanh(), name='%s_bwd_lstm_%d'%(name,k))

        bricks = bricks + [fwd_lstm, bwd_lstm] + fwd_lstm_ins + bwd_lstm_ins

        fwd_tmp = sum(x.apply(v) for x, v in zip(fwd_lstm_ins, curr_hidden))
        bwd_tmp = sum(x.apply(v) for x, v in zip(bwd_lstm_ins, curr_hidden))
        fwd_hidden, _ = fwd_lstm.apply(fwd_tmp, mask=mask)
        bwd_hidden, _ = bwd_lstm.apply(bwd_tmp[::-1], mask=mask[::-1])
        hidden_list = hidden_list + [fwd_hidden, bwd_hidden]
        if skip:
            curr_hidden = [seq, fwd_hidden, bwd_hidden[::-1]]
            curr_dim = [seq_dim, dim, dim]
        else:
            curr_hidden = [fwd_hidden, bwd_hidden[::-1]]
            curr_dim = [dim, dim]

    return bricks, hidden_list 
开发者ID:thomasmesnard,项目名称:DeepMind-Teaching-Machines-to-Read-and-Comprehend,代码行数:31,代码来源:attentive_reader.py

示例10: __init__

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Linear [as 别名]
def __init__(self, match_dim, conv_n, conv_num_filters=1,
                 state_transformer=None,
                 attended_transformer=None, energy_computer=None,
                 prior=None, energy_normalizer=None, **kwargs):
        super(SequenceContentAndConvAttention, self).__init__(**kwargs)
        if not state_transformer:
            state_transformer = Linear(use_bias=False)

        self.match_dim = match_dim
        self.state_transformer = state_transformer

        self.state_transformers = Parallel(input_names=self.state_names,
                                           prototype=state_transformer,
                                           name="state_trans")
        if not attended_transformer:
            # Only this contributor to the match vector
            # is allowed to have biases
            attended_transformer = Linear(name="preprocess")

        if not energy_normalizer:
            energy_normalizer = 'softmax'
        self.energy_normalizer = energy_normalizer

        if not energy_computer:
            energy_computer = ShallowEnergyComputer(
                name="energy_comp",
                use_bias=self.energy_normalizer != 'softmax')
        self.filter_handler = Linear(name="handler", use_bias=False)
        self.attended_transformer = attended_transformer
        self.energy_computer = energy_computer

        if not prior:
            prior = dict(type='expanding', initial_begin=0, initial_end=10000,
                         min_speed=0, max_speed=0)
        self.prior = prior

        self.conv_n = conv_n
        self.conv_num_filters = conv_num_filters
        self.conv = Conv1D(conv_num_filters, 2 * conv_n + 1)

        self.children = [self.state_transformers, self.attended_transformer,
                         self.energy_computer, self.filter_handler, self.conv] 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:44,代码来源:attention.py


注:本文中的blocks.bricks.Linear方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。