当前位置: 首页>>代码示例>>Python>>正文


Python bricks.Tanh方法代码示例

本文整理汇总了Python中blocks.bricks.Tanh方法的典型用法代码示例。如果您正苦于以下问题:Python bricks.Tanh方法的具体用法?Python bricks.Tanh怎么用?Python bricks.Tanh使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在blocks.bricks的用法示例。


在下文中一共展示了bricks.Tanh方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Tanh [as 别名]
def __init__(self, dim, activation=None, gate_activation=None,
                 **kwargs):
        super(GatedRecurrent, self).__init__(**kwargs)
        self.dim = dim

        self.recurrent_weights_init = None
        self.initial_states_init = None

        if not activation:
            activation = Tanh()
        if not gate_activation:
            gate_activation = Logistic()
        self.activation = activation
        self.gate_activation = gate_activation

        self.children = [activation, gate_activation] 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:18,代码来源:recurrent.py

示例2: setUp

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Tanh [as 别名]
def setUp(self):
        self.bidir = Bidirectional(weights_init=Orthogonal(),
                                   prototype=SimpleRecurrent(
                                       dim=3, activation=Tanh()))
        self.simple = SimpleRecurrent(dim=3, weights_init=Orthogonal(),
                                      activation=Tanh(), seed=1)
        self.bidir.allocate()
        self.simple.initialize()
        self.bidir.children[0].parameters[0].set_value(
            self.simple.parameters[0].get_value())
        self.bidir.children[1].parameters[0].set_value(
            self.simple.parameters[0].get_value())
        self.x_val = 0.1 * numpy.asarray(
            list(itertools.permutations(range(4))),
            dtype=theano.config.floatX)
        self.x_val = (numpy.ones((24, 4, 3), dtype=theano.config.floatX) *
                      self.x_val[..., None])
        self.mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
        self.mask_val[12:24, 3] = 0 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:21,代码来源:test_recurrent.py

示例3: test_saved_inner_graph

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Tanh [as 别名]
def test_saved_inner_graph():
    """Make sure that the original inner graph is saved."""
    x = tensor.tensor3()
    recurrent = SimpleRecurrent(dim=3, activation=Tanh())
    y = recurrent.apply(x)

    application_call = get_application_call(y)
    assert application_call.inner_inputs
    assert application_call.inner_outputs

    cg = ComputationGraph(application_call.inner_outputs)
    # Check that the inner scan graph is annotated
    # with `recurrent.apply`
    assert len(VariableFilter(applications=[recurrent.apply])(cg)) == 3
    # Check that the inner graph is equivalent to the one
    # produced by a stand-alone of `recurrent.apply`
    assert is_same_graph(application_call.inner_outputs[0],
                         recurrent.apply(*application_call.inner_inputs,
                                         iterate=False)) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:21,代码来源:test_recurrent.py

示例4: test_super_in_recurrent_overrider

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Tanh [as 别名]
def test_super_in_recurrent_overrider():
    # A regression test for the issue #475
    class SimpleRecurrentWithContext(SimpleRecurrent):
        @application(contexts=['context'])
        def apply(self, context, *args, **kwargs):
            kwargs['inputs'] += context
            return super(SimpleRecurrentWithContext, self).apply(*args,
                                                                 **kwargs)

        @apply.delegate
        def apply_delegate(self):
            return super(SimpleRecurrentWithContext, self).apply

    brick = SimpleRecurrentWithContext(100, Tanh())
    inputs = tensor.tensor3('inputs')
    context = tensor.matrix('context').dimshuffle('x', 0, 1)
    brick.apply(context, inputs=inputs) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:19,代码来源:test_recurrent.py

示例5: __init__

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Tanh [as 别名]
def __init__(self, dimension, alphabet_size, **kwargs):
        super(SimpleGenerator, self).__init__(**kwargs)
        lookup = LookupTable(alphabet_size, dimension)
        transition = SimpleRecurrent(
            activation=Tanh(),
            dim=dimension, name="transition")
        attention = SequenceContentAttention(
            state_names=transition.apply.states,
            attended_dim=dimension, match_dim=dimension, name="attention")
        readout = Readout(
            readout_dim=alphabet_size,
            source_names=[transition.apply.states[0],
                          attention.take_glimpses.outputs[0]],
            emitter=SoftmaxEmitter(name="emitter"),
            feedback_brick=LookupFeedback(alphabet_size, dimension),
            name="readout")
        generator = SequenceGenerator(
            readout=readout, transition=transition, attention=attention,
            name="generator")

        self.lookup = lookup
        self.generator = generator
        self.children = [lookup, generator] 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:25,代码来源:test_search.py

示例6: rnn_layer

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Tanh [as 别名]
def rnn_layer(dim, h, n):
    linear = Linear(input_dim=dim, output_dim=dim, name='linear' + str(n))
    rnn = SimpleRecurrent(dim=dim, activation=Tanh(), name='rnn' + str(n))
    initialize([linear, rnn])
    return rnn.apply(linear.apply(h)) 
开发者ID:johnarevalo,项目名称:blocks-char-rnn,代码行数:7,代码来源:model.py

示例7: __init__

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Tanh [as 别名]
def __init__(self, dim, num_copies, use_W_xu, activation=None,
                 gate_activation=None, **kwargs):
        self.dim = dim
        self.num_copies = num_copies
        self.use_W_xu = use_W_xu

        # shape: C x F/2
        permutations = []
        indices = numpy.arange(self.dim / 2)
        for i in range(self.num_copies):
            numpy.random.shuffle(indices)
            permutations.append(numpy.concatenate(
                [indices,
                 [ind + self.dim / 2 for ind in indices]]))
        # C x F (numpy)
        self.permutations = numpy.vstack(permutations)

        if not activation:
            activation = Tanh()
        if not gate_activation:
            gate_activation = Logistic()
        self.activation = activation
        self.gate_activation = gate_activation

        children = ([self.activation, self.gate_activation] +
                    kwargs.get('children', []))
        super(AssociativeLSTM, self).__init__(children=children, **kwargs) 
开发者ID:mohammadpz,项目名称:Associative_LSTM,代码行数:29,代码来源:bricks.py

示例8: test_batch_normalized_mlp_construction

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Tanh [as 别名]
def test_batch_normalized_mlp_construction():
    """Test that BatchNormalizedMLP performs construction correctly."""
    mlp = BatchNormalizedMLP([Tanh(), Tanh()], [5, 7, 9])
    assert all(isinstance(a, Sequence) for a in mlp.activations)
    assert all(isinstance(a.children[0], BatchNormalization)
               for a in mlp.activations)
    assert all(isinstance(a.children[1], Tanh)
               for a in mlp.activations) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:10,代码来源:test_bn.py

示例9: test_batch_normalized_mlp_allocation

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Tanh [as 别名]
def test_batch_normalized_mlp_allocation():
    """Test that BatchNormalizedMLP performs allocation correctly."""
    mlp = BatchNormalizedMLP([Tanh(), Tanh()], [5, 7, 9])
    mlp.allocate()
    assert mlp.activations[0].children[0].input_dim == 7
    assert mlp.activations[1].children[0].input_dim == 9
    assert not any(l.use_bias for l in mlp.linear_transformations) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:9,代码来源:test_bn.py

示例10: test_batch_normalized_mlp_conserve_memory_propagated

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Tanh [as 别名]
def test_batch_normalized_mlp_conserve_memory_propagated():
    """Test that setting conserve_memory on a BatchNormalizedMLP works."""
    mlp = BatchNormalizedMLP([Tanh(), Tanh()], [5, 7, 9],
                             conserve_memory=False)
    assert not mlp.conserve_memory
    assert not any(act.children[0].conserve_memory for act in mlp.activations)
    mlp.conserve_memory = True
    assert mlp.conserve_memory
    assert all(act.children[0].conserve_memory for act in mlp.activations) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:11,代码来源:test_bn.py

示例11: test_batch_normalized_mlp_mean_only_propagated

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Tanh [as 别名]
def test_batch_normalized_mlp_mean_only_propagated():
    """Test that setting mean_only on a BatchNormalizedMLP works."""
    mlp = BatchNormalizedMLP([Tanh(), Tanh()], [5, 7, 9],
                             mean_only=False)
    assert not mlp.mean_only
    assert not any(act.children[0].mean_only for act in mlp.activations)
    mlp.mean_only = True
    assert mlp.mean_only
    assert all(act.children[0].mean_only for act in mlp.activations) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:11,代码来源:test_bn.py

示例12: test_convolutional_sequence_with_convolutions_raw_activation

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Tanh [as 别名]
def test_convolutional_sequence_with_convolutions_raw_activation():
    seq = ConvolutionalSequence(
        [Convolutional(filter_size=(3, 3), num_filters=4),
         Rectifier(),
         Convolutional(filter_size=(5, 5), num_filters=3, step=(2, 2)),
         Tanh()],
        num_channels=2,
        image_size=(21, 39))
    seq.allocate()
    x = theano.tensor.tensor4()
    out = seq.apply(x).eval({x: numpy.ones((10, 2, 21, 39),
                                           dtype=theano.config.floatX)})
    assert out.shape == (10, 3, 8, 17) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:15,代码来源:test_conv.py

示例13: test_convolutional_sequence_activation_get_dim

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Tanh [as 别名]
def test_convolutional_sequence_activation_get_dim():
    seq = ConvolutionalSequence([Tanh()], num_channels=9, image_size=(4, 6))
    seq.allocate()
    assert seq.get_dim('output') == (9, 4, 6)

    seq = ConvolutionalSequence([Convolutional(filter_size=(7, 7),
                                               num_filters=5,
                                               border_mode=(1, 1)),
                                 Tanh()], num_channels=8, image_size=(8, 11))
    seq.allocate()
    assert seq.get_dim('output') == (5, 4, 7) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:13,代码来源:test_conv.py

示例14: make_bidir_lstm_stack

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Tanh [as 别名]
def make_bidir_lstm_stack(seq, seq_dim, mask, sizes, skip=True, name=''):
    bricks = []

    curr_dim = [seq_dim]
    curr_hidden = [seq]

    hidden_list = []
    for k, dim in enumerate(sizes):
        fwd_lstm_ins = [Linear(input_dim=d, output_dim=4*dim, name='%s_fwd_lstm_in_%d_%d'%(name,k,l)) for l, d in enumerate(curr_dim)]
        fwd_lstm = LSTM(dim=dim, activation=Tanh(), name='%s_fwd_lstm_%d'%(name,k))

        bwd_lstm_ins = [Linear(input_dim=d, output_dim=4*dim, name='%s_bwd_lstm_in_%d_%d'%(name,k,l)) for l, d in enumerate(curr_dim)]
        bwd_lstm = LSTM(dim=dim, activation=Tanh(), name='%s_bwd_lstm_%d'%(name,k))

        bricks = bricks + [fwd_lstm, bwd_lstm] + fwd_lstm_ins + bwd_lstm_ins

        fwd_tmp = sum(x.apply(v) for x, v in zip(fwd_lstm_ins, curr_hidden))
        bwd_tmp = sum(x.apply(v) for x, v in zip(bwd_lstm_ins, curr_hidden))
        fwd_hidden, _ = fwd_lstm.apply(fwd_tmp, mask=mask)
        bwd_hidden, _ = bwd_lstm.apply(bwd_tmp[::-1], mask=mask[::-1])
        hidden_list = hidden_list + [fwd_hidden, bwd_hidden]
        if skip:
            curr_hidden = [seq, fwd_hidden, bwd_hidden[::-1]]
            curr_dim = [seq_dim, dim, dim]
        else:
            curr_hidden = [fwd_hidden, bwd_hidden[::-1]]
            curr_dim = [dim, dim]

    return bricks, hidden_list 
开发者ID:thomasmesnard,项目名称:DeepMind-Teaching-Machines-to-Read-and-Comprehend,代码行数:31,代码来源:attentive_reader.py

示例15: __init__

# 需要导入模块: from blocks import bricks [as 别名]
# 或者: from blocks.bricks import Tanh [as 别名]
def __init__(self, dimension, alphabet_size, **kwargs):
        super(WordReverser, self).__init__(**kwargs)
        encoder = Bidirectional(
            SimpleRecurrent(dim=dimension, activation=Tanh()))
        fork = Fork([name for name in encoder.prototype.apply.sequences
                    if name != 'mask'])
        fork.input_dim = dimension
        fork.output_dims = [encoder.prototype.get_dim(name) for name in fork.input_names]
        lookup = LookupTable(alphabet_size, dimension)
        transition = SimpleRecurrent(
            activation=Tanh(),
            dim=dimension, name="transition")
        attention = SequenceContentAttention(
            state_names=transition.apply.states,
            attended_dim=2 * dimension, match_dim=dimension, name="attention")
        readout = Readout(
            readout_dim=alphabet_size,
            source_names=[transition.apply.states[0],
                          attention.take_glimpses.outputs[0]],
            emitter=SoftmaxEmitter(name="emitter"),
            feedback_brick=LookupFeedback(alphabet_size, dimension),
            name="readout")
        generator = SequenceGenerator(
            readout=readout, transition=transition, attention=attention,
            name="generator")

        self.lookup = lookup
        self.fork = fork
        self.encoder = encoder
        self.generator = generator
        self.children = [lookup, fork, encoder, generator] 
开发者ID:mila-iqia,项目名称:blocks-examples,代码行数:33,代码来源:__init__.py


注:本文中的blocks.bricks.Tanh方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。