本文整理匯總了Python中blocks.bricks.Tanh方法的典型用法代碼示例。如果您正苦於以下問題:Python bricks.Tanh方法的具體用法?Python bricks.Tanh怎麽用?Python bricks.Tanh使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類blocks.bricks
的用法示例。
在下文中一共展示了bricks.Tanh方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from blocks import bricks [as 別名]
# 或者: from blocks.bricks import Tanh [as 別名]
def __init__(self, dim, activation=None, gate_activation=None,
**kwargs):
super(GatedRecurrent, self).__init__(**kwargs)
self.dim = dim
self.recurrent_weights_init = None
self.initial_states_init = None
if not activation:
activation = Tanh()
if not gate_activation:
gate_activation = Logistic()
self.activation = activation
self.gate_activation = gate_activation
self.children = [activation, gate_activation]
示例2: setUp
# 需要導入模塊: from blocks import bricks [as 別名]
# 或者: from blocks.bricks import Tanh [as 別名]
def setUp(self):
self.bidir = Bidirectional(weights_init=Orthogonal(),
prototype=SimpleRecurrent(
dim=3, activation=Tanh()))
self.simple = SimpleRecurrent(dim=3, weights_init=Orthogonal(),
activation=Tanh(), seed=1)
self.bidir.allocate()
self.simple.initialize()
self.bidir.children[0].parameters[0].set_value(
self.simple.parameters[0].get_value())
self.bidir.children[1].parameters[0].set_value(
self.simple.parameters[0].get_value())
self.x_val = 0.1 * numpy.asarray(
list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
self.x_val = (numpy.ones((24, 4, 3), dtype=theano.config.floatX) *
self.x_val[..., None])
self.mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
self.mask_val[12:24, 3] = 0
示例3: test_saved_inner_graph
# 需要導入模塊: from blocks import bricks [as 別名]
# 或者: from blocks.bricks import Tanh [as 別名]
def test_saved_inner_graph():
"""Make sure that the original inner graph is saved."""
x = tensor.tensor3()
recurrent = SimpleRecurrent(dim=3, activation=Tanh())
y = recurrent.apply(x)
application_call = get_application_call(y)
assert application_call.inner_inputs
assert application_call.inner_outputs
cg = ComputationGraph(application_call.inner_outputs)
# Check that the inner scan graph is annotated
# with `recurrent.apply`
assert len(VariableFilter(applications=[recurrent.apply])(cg)) == 3
# Check that the inner graph is equivalent to the one
# produced by a stand-alone of `recurrent.apply`
assert is_same_graph(application_call.inner_outputs[0],
recurrent.apply(*application_call.inner_inputs,
iterate=False))
示例4: test_super_in_recurrent_overrider
# 需要導入模塊: from blocks import bricks [as 別名]
# 或者: from blocks.bricks import Tanh [as 別名]
def test_super_in_recurrent_overrider():
# A regression test for the issue #475
class SimpleRecurrentWithContext(SimpleRecurrent):
@application(contexts=['context'])
def apply(self, context, *args, **kwargs):
kwargs['inputs'] += context
return super(SimpleRecurrentWithContext, self).apply(*args,
**kwargs)
@apply.delegate
def apply_delegate(self):
return super(SimpleRecurrentWithContext, self).apply
brick = SimpleRecurrentWithContext(100, Tanh())
inputs = tensor.tensor3('inputs')
context = tensor.matrix('context').dimshuffle('x', 0, 1)
brick.apply(context, inputs=inputs)
示例5: __init__
# 需要導入模塊: from blocks import bricks [as 別名]
# 或者: from blocks.bricks import Tanh [as 別名]
def __init__(self, dimension, alphabet_size, **kwargs):
super(SimpleGenerator, self).__init__(**kwargs)
lookup = LookupTable(alphabet_size, dimension)
transition = SimpleRecurrent(
activation=Tanh(),
dim=dimension, name="transition")
attention = SequenceContentAttention(
state_names=transition.apply.states,
attended_dim=dimension, match_dim=dimension, name="attention")
readout = Readout(
readout_dim=alphabet_size,
source_names=[transition.apply.states[0],
attention.take_glimpses.outputs[0]],
emitter=SoftmaxEmitter(name="emitter"),
feedback_brick=LookupFeedback(alphabet_size, dimension),
name="readout")
generator = SequenceGenerator(
readout=readout, transition=transition, attention=attention,
name="generator")
self.lookup = lookup
self.generator = generator
self.children = [lookup, generator]
示例6: rnn_layer
# 需要導入模塊: from blocks import bricks [as 別名]
# 或者: from blocks.bricks import Tanh [as 別名]
def rnn_layer(dim, h, n):
linear = Linear(input_dim=dim, output_dim=dim, name='linear' + str(n))
rnn = SimpleRecurrent(dim=dim, activation=Tanh(), name='rnn' + str(n))
initialize([linear, rnn])
return rnn.apply(linear.apply(h))
示例7: __init__
# 需要導入模塊: from blocks import bricks [as 別名]
# 或者: from blocks.bricks import Tanh [as 別名]
def __init__(self, dim, num_copies, use_W_xu, activation=None,
gate_activation=None, **kwargs):
self.dim = dim
self.num_copies = num_copies
self.use_W_xu = use_W_xu
# shape: C x F/2
permutations = []
indices = numpy.arange(self.dim / 2)
for i in range(self.num_copies):
numpy.random.shuffle(indices)
permutations.append(numpy.concatenate(
[indices,
[ind + self.dim / 2 for ind in indices]]))
# C x F (numpy)
self.permutations = numpy.vstack(permutations)
if not activation:
activation = Tanh()
if not gate_activation:
gate_activation = Logistic()
self.activation = activation
self.gate_activation = gate_activation
children = ([self.activation, self.gate_activation] +
kwargs.get('children', []))
super(AssociativeLSTM, self).__init__(children=children, **kwargs)
示例8: test_batch_normalized_mlp_construction
# 需要導入模塊: from blocks import bricks [as 別名]
# 或者: from blocks.bricks import Tanh [as 別名]
def test_batch_normalized_mlp_construction():
"""Test that BatchNormalizedMLP performs construction correctly."""
mlp = BatchNormalizedMLP([Tanh(), Tanh()], [5, 7, 9])
assert all(isinstance(a, Sequence) for a in mlp.activations)
assert all(isinstance(a.children[0], BatchNormalization)
for a in mlp.activations)
assert all(isinstance(a.children[1], Tanh)
for a in mlp.activations)
示例9: test_batch_normalized_mlp_allocation
# 需要導入模塊: from blocks import bricks [as 別名]
# 或者: from blocks.bricks import Tanh [as 別名]
def test_batch_normalized_mlp_allocation():
"""Test that BatchNormalizedMLP performs allocation correctly."""
mlp = BatchNormalizedMLP([Tanh(), Tanh()], [5, 7, 9])
mlp.allocate()
assert mlp.activations[0].children[0].input_dim == 7
assert mlp.activations[1].children[0].input_dim == 9
assert not any(l.use_bias for l in mlp.linear_transformations)
示例10: test_batch_normalized_mlp_conserve_memory_propagated
# 需要導入模塊: from blocks import bricks [as 別名]
# 或者: from blocks.bricks import Tanh [as 別名]
def test_batch_normalized_mlp_conserve_memory_propagated():
"""Test that setting conserve_memory on a BatchNormalizedMLP works."""
mlp = BatchNormalizedMLP([Tanh(), Tanh()], [5, 7, 9],
conserve_memory=False)
assert not mlp.conserve_memory
assert not any(act.children[0].conserve_memory for act in mlp.activations)
mlp.conserve_memory = True
assert mlp.conserve_memory
assert all(act.children[0].conserve_memory for act in mlp.activations)
示例11: test_batch_normalized_mlp_mean_only_propagated
# 需要導入模塊: from blocks import bricks [as 別名]
# 或者: from blocks.bricks import Tanh [as 別名]
def test_batch_normalized_mlp_mean_only_propagated():
"""Test that setting mean_only on a BatchNormalizedMLP works."""
mlp = BatchNormalizedMLP([Tanh(), Tanh()], [5, 7, 9],
mean_only=False)
assert not mlp.mean_only
assert not any(act.children[0].mean_only for act in mlp.activations)
mlp.mean_only = True
assert mlp.mean_only
assert all(act.children[0].mean_only for act in mlp.activations)
示例12: test_convolutional_sequence_with_convolutions_raw_activation
# 需要導入模塊: from blocks import bricks [as 別名]
# 或者: from blocks.bricks import Tanh [as 別名]
def test_convolutional_sequence_with_convolutions_raw_activation():
seq = ConvolutionalSequence(
[Convolutional(filter_size=(3, 3), num_filters=4),
Rectifier(),
Convolutional(filter_size=(5, 5), num_filters=3, step=(2, 2)),
Tanh()],
num_channels=2,
image_size=(21, 39))
seq.allocate()
x = theano.tensor.tensor4()
out = seq.apply(x).eval({x: numpy.ones((10, 2, 21, 39),
dtype=theano.config.floatX)})
assert out.shape == (10, 3, 8, 17)
示例13: test_convolutional_sequence_activation_get_dim
# 需要導入模塊: from blocks import bricks [as 別名]
# 或者: from blocks.bricks import Tanh [as 別名]
def test_convolutional_sequence_activation_get_dim():
seq = ConvolutionalSequence([Tanh()], num_channels=9, image_size=(4, 6))
seq.allocate()
assert seq.get_dim('output') == (9, 4, 6)
seq = ConvolutionalSequence([Convolutional(filter_size=(7, 7),
num_filters=5,
border_mode=(1, 1)),
Tanh()], num_channels=8, image_size=(8, 11))
seq.allocate()
assert seq.get_dim('output') == (5, 4, 7)
示例14: make_bidir_lstm_stack
# 需要導入模塊: from blocks import bricks [as 別名]
# 或者: from blocks.bricks import Tanh [as 別名]
def make_bidir_lstm_stack(seq, seq_dim, mask, sizes, skip=True, name=''):
bricks = []
curr_dim = [seq_dim]
curr_hidden = [seq]
hidden_list = []
for k, dim in enumerate(sizes):
fwd_lstm_ins = [Linear(input_dim=d, output_dim=4*dim, name='%s_fwd_lstm_in_%d_%d'%(name,k,l)) for l, d in enumerate(curr_dim)]
fwd_lstm = LSTM(dim=dim, activation=Tanh(), name='%s_fwd_lstm_%d'%(name,k))
bwd_lstm_ins = [Linear(input_dim=d, output_dim=4*dim, name='%s_bwd_lstm_in_%d_%d'%(name,k,l)) for l, d in enumerate(curr_dim)]
bwd_lstm = LSTM(dim=dim, activation=Tanh(), name='%s_bwd_lstm_%d'%(name,k))
bricks = bricks + [fwd_lstm, bwd_lstm] + fwd_lstm_ins + bwd_lstm_ins
fwd_tmp = sum(x.apply(v) for x, v in zip(fwd_lstm_ins, curr_hidden))
bwd_tmp = sum(x.apply(v) for x, v in zip(bwd_lstm_ins, curr_hidden))
fwd_hidden, _ = fwd_lstm.apply(fwd_tmp, mask=mask)
bwd_hidden, _ = bwd_lstm.apply(bwd_tmp[::-1], mask=mask[::-1])
hidden_list = hidden_list + [fwd_hidden, bwd_hidden]
if skip:
curr_hidden = [seq, fwd_hidden, bwd_hidden[::-1]]
curr_dim = [seq_dim, dim, dim]
else:
curr_hidden = [fwd_hidden, bwd_hidden[::-1]]
curr_dim = [dim, dim]
return bricks, hidden_list
開發者ID:thomasmesnard,項目名稱:DeepMind-Teaching-Machines-to-Read-and-Comprehend,代碼行數:31,代碼來源:attentive_reader.py
示例15: __init__
# 需要導入模塊: from blocks import bricks [as 別名]
# 或者: from blocks.bricks import Tanh [as 別名]
def __init__(self, dimension, alphabet_size, **kwargs):
super(WordReverser, self).__init__(**kwargs)
encoder = Bidirectional(
SimpleRecurrent(dim=dimension, activation=Tanh()))
fork = Fork([name for name in encoder.prototype.apply.sequences
if name != 'mask'])
fork.input_dim = dimension
fork.output_dims = [encoder.prototype.get_dim(name) for name in fork.input_names]
lookup = LookupTable(alphabet_size, dimension)
transition = SimpleRecurrent(
activation=Tanh(),
dim=dimension, name="transition")
attention = SequenceContentAttention(
state_names=transition.apply.states,
attended_dim=2 * dimension, match_dim=dimension, name="attention")
readout = Readout(
readout_dim=alphabet_size,
source_names=[transition.apply.states[0],
attention.take_glimpses.outputs[0]],
emitter=SoftmaxEmitter(name="emitter"),
feedback_brick=LookupFeedback(alphabet_size, dimension),
name="readout")
generator = SequenceGenerator(
readout=readout, transition=transition, attention=attention,
name="generator")
self.lookup = lookup
self.fork = fork
self.encoder = encoder
self.generator = generator
self.children = [lookup, fork, encoder, generator]