本文整理汇总了Python中chainer.functions.dropout方法的典型用法代码示例。如果您正苦于以下问题:Python functions.dropout方法的具体用法?Python functions.dropout怎么用?Python functions.dropout使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.functions
的用法示例。
在下文中一共展示了functions.dropout方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: residual
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import dropout [as 别名]
def residual(self, x):
h = x
h = self.c1(h)
if self.bn:
h = self.b1(h)
if self.activation:
h = self.activation(h)
if self.mode:
h = self.mode(h)
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c2(h)
if self.bn:
h = self.b2(h)
if self.activation:
h = self.activation(h)
return h
示例2: forward_one_step
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import dropout [as 别名]
def forward_one_step(self, x_data, y_data, state, train=True, dropout_ratio=0.5):
x = Variable(x_data, volatile=not train)
t = Variable(y_data, volatile=not train)
h0 = self.embed(x)
h1_in = self.l1_x(F.dropout(h0, ratio=dropout_ratio, train=train)) + self.l1_h(state['h1'])
c1, h1 = F.lstm(state['c1'], h1_in)
h2_in = self.l2_x(F.dropout(h1, ratio=dropout_ratio, train=train)) + self.l2_h(state['h2'])
c2, h2 = F.lstm(state['c2'], h2_in)
y = self.l3(F.dropout(h2, ratio=dropout_ratio, train=train))
state = {'c1': c1, 'h1': h1, 'c2': c2, 'h2': h2}
if train:
return state, F.softmax_cross_entropy(y, t)
else:
return state, F.softmax(y)
示例3: block_embed
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import dropout [as 别名]
def block_embed(embed, x, dropout=0.):
"""Embedding function followed by convolution
Args:
embed (callable): A :func:`~chainer.functions.embed_id` function
or :class:`~chainer.links.EmbedID` link.
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Input variable, which
is a :math:`(B, L)`-shaped int array. Its first dimension
:math:`(B)` is assumed to be the *minibatch dimension*.
The second dimension :math:`(L)` is the length of padded
sentences.
dropout (float): Dropout ratio.
Returns:
~chainer.Variable: Output variable. A float array with shape
of :math:`(B, N, L, 1)`. :math:`(N)` is the number of dimensions
of word embedding.
"""
e = embed(x)
e = F.dropout(e, ratio=dropout)
e = F.transpose(e, (0, 2, 1))
e = e[:, :, :, None]
return e
示例4: __init__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import dropout [as 别名]
def __init__(self, n_layers, n_vocab, embed_size, hidden_size, dropout=0.1):
hidden_size /= 3
super(CNNEncoder, self).__init__(
embed=L.EmbedID(n_vocab, embed_size, ignore_label=-1,
initialW=embed_init),
cnn_w3=L.Convolution2D(
embed_size, hidden_size, ksize=(3, 1), stride=1, pad=(2, 0),
nobias=True),
cnn_w4=L.Convolution2D(
embed_size, hidden_size, ksize=(4, 1), stride=1, pad=(3, 0),
nobias=True),
cnn_w5=L.Convolution2D(
embed_size, hidden_size, ksize=(5, 1), stride=1, pad=(4, 0),
nobias=True),
mlp=MLP(n_layers, hidden_size * 3, dropout)
)
self.output_size = hidden_size * 3
self.dropout = dropout
示例5: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import dropout [as 别名]
def __call__(self, x, t):
h = F.relu(self.conv1_1(x))
h = F.relu(self.conv1_2(h))
h = F.max_pooling_2d(h, 2, 2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
h = F.max_pooling_2d(h, 2, 2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
h = F.max_pooling_2d(h, 2, 2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
h = F.max_pooling_2d(h, 2, 2)
h = F.relu(self.conv5_1(h))
h = F.relu(self.conv5_2(h))
h = F.relu(self.conv5_3(h))
h = F.max_pooling_2d(h, 2, 2)
h = F.dropout(F.relu(self.fc6(h)), ratio=0.5, train=self.train)
h = F.dropout(F.relu(self.fc7(h)), ratio=0.5, train=self.train)
h = self.score_fr(h)
h = self.upsample(h)
return h
示例6: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import dropout [as 别名]
def __call__(self, x, t):
h = F.relu(self.conv1(x))
h = F.max_pooling_2d(h, 2, 1)
h = F.relu(self.conv2(h))
h = F.relu(self.conv3(h))
h = F.dropout(F.relu(self.fc4(h)), train=self.train)
h = self.fc5(h)
h = F.reshape(h, (x.data.shape[0], 3, 16, 16))
h = self.channelwise_inhibited(h)
if self.train:
self.loss = F.softmax_cross_entropy(h, t, normalize=False)
return self.loss
else:
self.pred = F.softmax(h)
return self.pred
示例7: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import dropout [as 别名]
def __call__(self, prev_states, x_in):
input_below = x_in
states_cursor = 0
res = []
for i in six.moves.range(len(self)):
if self.dropout is not None and not (self.no_dropout_on_input and i == 0):
input_below = F.dropout(input_below, ratio=self.dropout)
new_states = self[i](prev_states[states_cursor:states_cursor + self.nb_of_states[i]], input_below)
states_cursor += self.nb_of_states[i]
if (self.residual_connection and
not (i == len(self) - 1 and self.no_residual_connection_on_output) and
not (i == 0 and self.no_residual_connection_on_input)):
input_below = new_states[-1] + input_below
else:
input_below = new_states[-1]
res += list(new_states)
return res
示例8: __init__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import dropout [as 别名]
def __init__(self, d_model = 512, n_heads = 8, experimental_relu=False, dropout=None):
if d_model%n_heads != 0:
raise ValueError("d_model(%i) should be divisible by n_head(%i)"%(d_model, n_heads))
super(ConstantSizeMultiBatchMultiHeadAttention, self).__init__(
w_Q = L.Linear(d_model, d_model, nobias=False),
w_K = L.Linear(d_model, d_model, nobias=True),
w_V = L.Linear(d_model, d_model, nobias=False),
)
if n_heads >= 2:
self.add_link("w_O", L.Linear(d_model, d_model)) #if n_heads == 1, it is redundant with w_V
self.d_model = d_model
self.n_heads = n_heads
self.head_size = d_model // n_heads
scaling_factor = 1.0 / self.xp.sqrt(self.xp.array([[[[self.head_size]]]], dtype=self.xp.float32))
self.add_persistent("scaling_factor", scaling_factor) #added as persistent so that it works with to_gpu/to_cpu
self.experimental_relu = experimental_relu
self.dropout = dropout
示例9: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import dropout [as 别名]
def __call__(self, sub_output, inpt):
if self.dropout is not None:
sub_output = F.dropout(sub_output, ratio=self.dropout)
if self.residual_mode == "normal":
added_output = sub_output + inpt
else:
added_output = sub_output
if self.no_normalize:
final_layer = added_output
else:
final_layer = self.apply_layer_normalization(added_output)
if self.residual_mode == "after":
final_layer = final_layer + inpt
return final_layer
########################################################################
# Feed Forward layer with pass-through and normalization
#
示例10: __init__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import dropout [as 别名]
def __init__(self, d_model, n_heads, d_ff=2048, experimental_relu=False, dropout=None,
residual_mode="normal", no_normalize=False):
super(DecoderLayer, self).__init__(
ff_layer = FeedForward(d_model, d_ff=d_ff, dropout=dropout, residual_mode=residual_mode, no_normalize=no_normalize),
self_attention_layer = AddAndNormalizedSelfAttentionLayer(d_model=d_model, n_heads=n_heads,
experimental_relu=experimental_relu,
dropout=dropout, residual_mode=residual_mode, no_normalize=no_normalize),
cross_attention_layer = AddAndNormalizedCrossAttentionLayer(d_model=d_model, n_heads=n_heads,
experimental_relu=experimental_relu,
dropout=dropout,
residual_mode=residual_mode if residual_mode != "none" else "normal", no_normalize=no_normalize) # Does not seem good to not let the cross attention be bypassed
)
self.n_heads = n_heads
self.d_model = d_model
示例11: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import dropout [as 别名]
def __call__(self, x):
h = F.relu(self.conv1_1(x))
h = F.relu(self.conv1_2(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D)
h = F.tanh(self.fc4(h))
h = F.dropout(h, ratio=.5, train=self.train)
h = F.tanh(self.fc5(h))
h = F.dropout(h, ratio=.5, train=self.train)
h = self.fc6(h)
return h
示例12: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import dropout [as 别名]
def __call__(self, x):
h = F.relu(self.conv1_1(x))
h = F.relu(self.conv1_2(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv5_1(h))
h = F.relu(self.conv5_2(h))
h = F.relu(self.conv5_3(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.dropout(F.relu(self.fc6(h)), train=self.train, ratio=0.5)
h = F.dropout(F.relu(self.fc7(h)), train=self.train, ratio=0.5)
h = self.fc8(h)
return h
示例13: forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import dropout [as 别名]
def forward(self, x, t):
# def forward(self, x):
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv1(x))), 3, stride=2)
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv2(h))), 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
h = F.dropout(F.relu(self.fc6(h)))
h = F.dropout(F.relu(self.fc7(h)))
h = self.fc8(h)
loss = F.softmax_cross_entropy(h, t)
#loss = h
# chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
# from https://github.com/chainer/chainer/blob/master/examples/imagenet/alex.py
示例14: forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import dropout [as 别名]
def forward(self, x, t):
# def forward(self, x):
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv1(x))), 3, stride=2)
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv2(h))), 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
h = F.dropout(F.relu(self.fc6(h)))
h = F.dropout(F.relu(self.fc7(h)))
h = self.fc8(h)
loss = F.softmax_cross_entropy(h, t)
#loss = h
# chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
示例15: forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import dropout [as 别名]
def forward(self, x, t):
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv1(x))), 3, stride=2)
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv2(h))), 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
h = F.dropout(F.relu(self.fc6(h)))
h = F.dropout(F.relu(self.fc7(h)))
h = self.fc8(h)
# EDIT(hamaji): ONNX-chainer cannot output SoftmaxCrossEntropy.
# loss = F.softmax_cross_entropy(h, t)
loss = self.softmax_cross_entropy(h, t)
if self.compute_accuracy:
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
else:
chainer.report({'loss': loss}, self)
return loss