本文整理汇总了Python中chainer.links方法的典型用法代码示例。如果您正苦于以下问题:Python chainer.links方法的具体用法?Python chainer.links怎么用?Python chainer.links使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer
的用法示例。
在下文中一共展示了chainer.links方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_q_func
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import links [as 别名]
def make_q_func(self, env):
obs_size = env.observation_space.low.size
hidden_size = 64
return iqn.StatelessRecurrentImplicitQuantileQFunction(
psi=chainerrl.links.StatelessRecurrentSequential(
L.Linear(obs_size, hidden_size),
F.relu,
L.NStepRNNTanh(1, hidden_size, hidden_size, 0),
),
phi=chainerrl.links.Sequence(
chainerrl.agents.iqn.CosineBasisLinear(32, hidden_size),
F.relu,
),
f=L.Linear(hidden_size, env.action_space.n,
initialW=chainer.initializers.LeCunNormal(1e-1)),
)
示例2: block_embed
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import links [as 别名]
def block_embed(embed, x, dropout=0.):
"""Embedding function followed by convolution
Args:
embed (callable): A :func:`~chainer.functions.embed_id` function
or :class:`~chainer.links.EmbedID` link.
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Input variable, which
is a :math:`(B, L)`-shaped int array. Its first dimension
:math:`(B)` is assumed to be the *minibatch dimension*.
The second dimension :math:`(L)` is the length of padded
sentences.
dropout (float): Dropout ratio.
Returns:
~chainer.Variable: Output variable. A float array with shape
of :math:`(B, N, L, 1)`. :math:`(N)` is the number of dimensions
of word embedding.
"""
e = embed(x)
e = F.dropout(e, ratio=dropout)
e = F.transpose(e, (0, 2, 1))
e = e[:, :, :, None]
return e
示例3: convert_onnx_chainer_convolution2d
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import links [as 别名]
def convert_onnx_chainer_convolution2d(onnx_graph: 'ONNXGraph', node: 'nodes.NodeCall'):
chainer_inst = node.func.owner.inst # type: chainer.links.Convolution2D
ksize = oc.size2d(chainer_inst.ksize)
stride = oc.size2d(chainer_inst.stride)
ps = oc.size2d(chainer_inst.pad)
pads = ps + ps
x = oc.ONNXValue(onnx_graph, node.args.get_value('x'))
o = oc.ONNXValue(onnx_graph, node.outputs[0])
w = oc.ONNXValue(onnx_graph, chainer_inst.W)
b = None
if chainer_inst.b is not None:
b = oc.ONNXValue(onnx_graph, chainer_inst.b)
onnx_graph.add_node(
'Conv',
[x, w] + ([] if b is None else [b]),
[o],
str(node.lineprop),
kernel_shape=ksize,
pads=pads,
strides=stride)
示例4: convert_onnx_chainer_convolutionnd
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import links [as 别名]
def convert_onnx_chainer_convolutionnd(onnx_graph: 'ONNXGraph', node: 'nodes.NodeCall'):
chainer_inst = node.func.owner.inst # type: chainer.links.ConvolutionND
nd = chainer_inst.W.ndim - 2
ksize = oc.size_nd(chainer_inst.ksize, nd)
stride = oc.size_nd(chainer_inst.stride, nd)
ps = oc.size_nd(chainer_inst.pad, nd)
pads = ps + ps
x = oc.ONNXValue(onnx_graph, node.args.get_value('x'))
o = oc.ONNXValue(onnx_graph, node.outputs[0])
w = oc.ONNXValue(onnx_graph, chainer_inst.W)
b = None
if chainer_inst.b is not None:
b = oc.ONNXValue(onnx_graph, chainer_inst.b)
onnx_graph.add_node(
'Conv',
[x, w] + ([] if b is None else [b]),
[o],
str(node.lineprop),
kernel_shape=ksize,
pads=pads,
strides=stride)
示例5: convert_onnx_chainer_batch_normalization
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import links [as 别名]
def convert_onnx_chainer_batch_normalization(onnx_graph: 'ONNXGraph', node: 'nodes.NodeCall'):
chainer_inst = node.func.owner.inst # type: chainer.links.BatchNormalization
assert(chainer_inst.axis is None) # not support yet
x = oc.ONNXValue(onnx_graph, node.args.get_value('x'))
o = oc.ONNXValue(onnx_graph, node.outputs[0])
gamma = oc.ONNXValue(onnx_graph, chainer_inst.gamma)
beta = oc.ONNXValue(onnx_graph, chainer_inst.beta)
avg_mean = oc.ONNXValue(onnx_graph, chainer_inst.avg_mean, [node, 'mean'])
avg_var = oc.ONNXValue(onnx_graph, chainer_inst.avg_var, [node, 'var'])
eps = chainer_inst.eps
momentum = chainer_inst.decay
onnx_graph.add_node(
'BatchNormalization',
[x, gamma, beta, avg_mean, avg_var],
[o],
str(node.lineprop),
epsilon=eps,
momentum=momentum)
示例6: convert_onnx_chainer_EmbedID
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import links [as 别名]
def convert_onnx_chainer_EmbedID(onnx_graph: 'ONNXGraph', node: 'nodes.NodeCall'):
chainer_inst = node.func.owner.inst # type: chainer.links.EmbedID
n_vocab = chainer_inst.W.shape[0]
n_out = chainer_inst.W.shape[1]
w = oc.ONNXValue(onnx_graph, chainer_inst.W)
parser = oc.NodeParse()
parser.add_def('x', oc.ParseType.In)
parser.parse(onnx_graph, node)
x = parser.get('x').create_tensor(node.lineprop)
onnx_graph.add_node(
'Gather',
[w, x],
[node.outputs[0]],
str(node.lineprop))
示例7: __init__
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import links [as 别名]
def __init__(self, comm, in_channels, out_channels, ksize, pad=1):
super(Block, self).__init__()
with self.init_scope():
if comm.size <= in_channels:
self.conv = ParallelConvolution2D(comm,
in_channels,
out_channels,
ksize,
pad=pad,
nobias=True)
else:
self.conv = chainer.links.Convolution2D(in_channels,
out_channels,
ksize,
pad=pad,
nobias=True)
self.bn = L.BatchNormalization(out_channels)
示例8: test_convolution
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import links [as 别名]
def test_convolution(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
f = self.func.l1
self.assertIsInstance(f, links.Convolution2D)
for i in range(3): # 3 == group
in_slice = slice(i * 4, (i + 1) * 4) # 4 == channels
out_slice = slice(i * 2, (i + 1) * 2) # 2 == num / group
w = f.W.data[out_slice, in_slice]
numpy.testing.assert_array_equal(
w.flatten(), range(i * 32, (i + 1) * 32))
numpy.testing.assert_array_equal(
f.b.data, range(6))
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
示例9: test_deconvolution
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import links [as 别名]
def test_deconvolution(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
f = self.func.l1
self.assertIsInstance(f, links.Deconvolution2D)
for i in range(3): # 3 == group
in_slice = slice(i * 4, (i + 1) * 4) # 4 == channels
out_slice = slice(i * 2, (i + 1) * 2) # 2 == num / group
w = f.W.data[out_slice, in_slice]
numpy.testing.assert_array_equal(
w.flatten(), range(i * 32, (i + 1) * 32))
numpy.testing.assert_array_equal(
f.b.data, range(12))
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
示例10: __init__
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import links [as 别名]
def __init__(self, n_layers, n_units, width=3, dropout=0.2):
super(ConvGLUDecoder, self).__init__()
links = [('l{}'.format(i + 1),
ConvGLU(n_units, width=width,
dropout=dropout, nopad=True))
for i in range(n_layers)]
for link in links:
self.add_link(*link)
self.conv_names = [name for name, _ in links]
self.width = width
init_preatt = VarInNormal(1.)
links = [('preatt{}'.format(i + 1),
L.Linear(n_units, n_units, initialW=init_preatt))
for i in range(n_layers)]
for link in links:
self.add_link(*link)
self.preatt_names = [name for name, _ in links]
示例11: __init__
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import links [as 别名]
def __init__(self):
super(Darknet53Extractor, self).__init__()
# Darknet53
self.append(Conv2DBNActiv(32, 3, pad=1, activ=_leaky_relu))
for k, n_block in enumerate((1, 2, 8, 8, 4)):
self.append(Conv2DBNActiv(
32 << (k + 1), 3, stride=2, pad=1, activ=_leaky_relu))
for _ in range(n_block):
self.append(ResidualBlock(
Conv2DBNActiv(32 << k, 1, activ=_leaky_relu),
Conv2DBNActiv(32 << (k + 1), 3, pad=1, activ=_leaky_relu)))
# additional links
for i, n in enumerate((512, 256, 128)):
if i > 0:
self.append(Conv2DBNActiv(n, 1, activ=_leaky_relu))
self.append(Conv2DBNActiv(n, 1, activ=_leaky_relu))
self.append(Conv2DBNActiv(n * 2, 3, pad=1, activ=_leaky_relu))
self.append(Conv2DBNActiv(n, 1, activ=_leaky_relu))
self.append(Conv2DBNActiv(n * 2, 3, pad=1, activ=_leaky_relu))
self.append(Conv2DBNActiv(n, 1, activ=_leaky_relu))
示例12: __init__
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import links [as 别名]
def __init__(self):
super(Darknet19Extractor, self).__init__()
# Darknet19
for k, n_conv in enumerate((1, 1, 3, 3, 5, 5)):
for i in range(n_conv):
if i % 2 == 0:
self.append(
Conv2DBNActiv(32 << k, 3, pad=1, activ=_leaky_relu))
else:
self.append(
Conv2DBNActiv(32 << (k - 1), 1, activ=_leaky_relu))
# additional links
self.append(Conv2DBNActiv(1024, 3, pad=1, activ=_leaky_relu))
self.append(Conv2DBNActiv(1024, 3, pad=1, activ=_leaky_relu))
self.append(Conv2DBNActiv(64, 1, activ=_leaky_relu))
self.append(Conv2DBNActiv(1024, 3, pad=1, activ=_leaky_relu))
示例13: block_embed
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import links [as 别名]
def block_embed(embed, x, dropout=0.0):
"""Embedding function followed by convolution
Args:
embed (callable): A :func:`~chainer.functions.embed_id` function
or :class:`~chainer.links.EmbedID` link.
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Input variable, which
is a :math:`(B, L)`-shaped int array. Its first dimension
:math:`(B)` is assumed to be the *minibatch dimension*.
The second dimension :math:`(L)` is the length of padded
sentences.
dropout (float): Dropout ratio.
Returns:
~chainer.Variable: Output variable. A float array with shape
of :math:`(B, N, L, 1)`. :math:`(N)` is the number of dimensions
of word embedding.
"""
e = embed(x)
e = F.dropout(e, ratio=dropout)
e = F.transpose(e, (0, 2, 1))
e = e[:, :, :, None]
return e
示例14: __init__
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import links [as 别名]
def __init__(self, out_dims=64, normalize_output=False, n_proxy=None):
super(ModifiedGoogLeNet, self).__init__()
# remove links and functions
for name in [n for n in self._children if n.startswith('loss')]:
self._children.remove(name)
delattr(self, name)
self.functions.pop('loss3_fc')
self.functions.pop('prob')
self.add_link('bn_fc', L.BatchNormalization(1024))
self.add_link('fc', L.Linear(1024, out_dims))
# For Proxy-NCA
if isinstance(n_proxy, int) and n_proxy > 0:
proxy = np.random.randn(n_proxy, out_dims).astype(np.float32)
self.add_param('P', initializer=proxy)
image_mean = np.array([123, 117, 104], dtype=np.float32) # RGB
self._image_mean = image_mean[None, :, None, None]
self.normalize_output = normalize_output
示例15: __init__
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import links [as 别名]
def __init__(self, ksize, n_out, initializer):
super(ConvBlock, self).__init__()
pad_size = ksize // 2
links = [('conv1', L.Convolution2D(None, n_out, ksize, pad=pad_size, initialW=initializer))]
links += [('bn1', L.BatchNormalization(n_out))]
for link in links:
self.add_link(*link)
self.forward = links