本文整理汇总了Python中chainer.backends.cuda.to_cpu方法的典型用法代码示例。如果您正苦于以下问题:Python cuda.to_cpu方法的具体用法?Python cuda.to_cpu怎么用?Python cuda.to_cpu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.backends.cuda
的用法示例。
在下文中一共展示了cuda.to_cpu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_cpu [as 别名]
def forward(self, xs, ilens):
'''BLSTM forward (the modified version)
:param xs:
:param ilens:
:return:
'''
logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))
# need to move ilens to cpu
ilens = cuda.to_cpu(ilens)
hy, cy, ys = self.nblstm(None, None, xs)
ys = self.l_last(F.vstack(ys)) # (sum _utt frame_utt) x dim
xs = F.split_axis(ys, np.cumsum(ilens[:-1]), axis=0)
del hy, cy
# final tanh operation
xs = F.split_axis(F.tanh(F.vstack(xs)), np.cumsum(ilens[:-1]), axis=0)
# EDIT(hamaji): Unnecessary, as `force_tuple` is True by default.
# # 1 utterance case, it becomes an array, so need to make a utt tuple
# if not isinstance(xs, tuple):
# xs = [xs]
return xs, ilens # x: utt list of frame x dim
示例2: original
# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_cpu [as 别名]
def original(self, xs, ilens):
'''BLSTM forward (the original implementation)
:param xs:
:param ilens:
:return:
'''
logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))
# need to move ilens to cpu
ilens = cuda.to_cpu(ilens)
hy, cy, ys = self.nblstm(None, None, xs)
ys = self.l_last(F.vstack(ys)) # (sum _utt frame_utt) x dim
xs = F.split_axis(ys, np.cumsum(ilens[:-1]), axis=0)
del hy, cy
# final tanh operation
xs = F.split_axis(F.tanh(F.vstack(xs)), np.cumsum(ilens[:-1]), axis=0)
# 1 utterance case, it becomes an array, so need to make a utt tuple
if not isinstance(xs, tuple):
xs = [xs]
return xs, ilens # x: utt list of frame x dim
示例3: _check_deepcopy
# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_cpu [as 别名]
def _check_deepcopy(self, link):
self.assertIsInstance(link._params, set)
self.assertIsInstance(link._persistent, set)
self.assertTrue(hasattr(link, 'x'))
self.assertTrue(hasattr(link, 'y'))
self.assertTrue(hasattr(link, 'u'))
self.assertTrue(hasattr(link, 'p'))
self.assertIsNot(link.x, self.link.x)
self.assertIsNot(link.x.data, self.link.x.data)
numpy.testing.assert_array_equal(cuda.to_cpu(link.x.data),
cuda.to_cpu(self.link.x.data))
self.assertIsNot(link.y, self.link.y)
self.assertIsNot(link.y.data, self.link.y.data)
numpy.testing.assert_array_equal(cuda.to_cpu(link.y.data),
cuda.to_cpu(self.link.y.data))
self.assertIsNone(link.u.data)
self.assertIsNot(link.p, self.link.p)
self.assertEqual(link.name, self.link.name)
示例4: test_to_cpu_on_cpu
# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_cpu [as 别名]
def test_to_cpu_on_cpu(self):
x = self.link.x.data
gx = self.link.x.grad
y = self.link.y.data
gy = self.link.y.grad
p = self.link.p
with testing.assert_warns(DeprecationWarning):
self.link.to_cpu()
self.assertIs(self.link.x.data, x)
self.assertIs(self.link.x.grad, gx)
self.assertIs(self.link.y.data, y)
self.assertIs(self.link.y.grad, gy)
self.assertIsNone(self.link.u.data)
u = self.link.u
with pytest.raises(RuntimeError):
u.grad
self.assertIs(self.link.p, p)
示例5: test_to_cpu
# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_cpu [as 别名]
def test_to_cpu(self):
self.set_count_parameters()
with testing.assert_warns(DeprecationWarning):
self.c2.to_gpu()
with testing.assert_warns(DeprecationWarning):
self.c2.to_cpu()
self.assertIs(self.c2.xp, numpy)
self.assertIs(self.c1.xp, numpy)
self.assertIs(self.l1.xp, numpy)
self.assertIs(self.l2.xp, numpy)
self.assertIs(self.l3.xp, numpy)
self.assertIsInstance(self.l1.x.data, numpy.ndarray)
self.assertIsInstance(self.l1.x.grad, numpy.ndarray)
self.assertIsInstance(self.l2.x.data, numpy.ndarray)
self.assertIsInstance(self.l2.x.grad, numpy.ndarray)
self.assertIsNone(self.l3.x.data)
self.assertIsNone(self.l3.x.grad)
self.l3.x.initialize(3)
self.assertIsInstance(self.l3.x.data, numpy.ndarray)
self.assertIsInstance(self.l3.x.grad, numpy.ndarray)
示例6: test_intel64_to_cpu
# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_cpu [as 别名]
def test_intel64_to_cpu(self):
link = self.link
with testing.assert_warns(DeprecationWarning):
link.to_intel64()
assert isinstance(link.device, backend.Intel64Device)
with testing.assert_warns(DeprecationWarning):
link.to_cpu()
assert isinstance(link.device, backend.CpuDevice)
# Arrays should be converted to numpy.ndarray
# Initialized parameter
assert isinstance(link.y.data, numpy.ndarray)
_assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, numpy.ndarray)
_assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
示例7: check_tuple_dataset
# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_cpu [as 别名]
def check_tuple_dataset(self, x0, x1):
td = datasets.TupleDataset(x0, x1)
self.assertEqual(len(td), len(x0))
for i in range(len(x0)):
example = td[i]
self.assertEqual(len(example), 2)
numpy.testing.assert_array_equal(
cuda.to_cpu(example[0]), cuda.to_cpu(x0[i]))
numpy.testing.assert_array_equal(
cuda.to_cpu(example[1]), cuda.to_cpu(x1[i]))
example_range = td[0: len(x0)]
for i in range(len(x0)):
example = example_range[i]
self.assertEqual(len(example), 2)
numpy.testing.assert_array_equal(
cuda.to_cpu(example[0]), cuda.to_cpu(x0[i]))
numpy.testing.assert_array_equal(
cuda.to_cpu(example[1]), cuda.to_cpu(x1[i]))
示例8: check_dict_dataset
# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_cpu [as 别名]
def check_dict_dataset(self, x, y):
dd = datasets.DictDataset(x=x, y=y)
self.assertEqual(len(dd), len(x))
for i in range(len(x)):
example = dd[i]
self.assertIn('x', example)
self.assertIn('y', example)
numpy.testing.assert_array_equal(
cuda.to_cpu(example['x']), cuda.to_cpu(x[i]))
numpy.testing.assert_array_equal(
cuda.to_cpu(example['y']), cuda.to_cpu(y[i]))
example_range = dd[0: len(x)]
for i in range(len(x)):
example = example_range[i]
self.assertIn('x', example)
self.assertIn('y', example)
numpy.testing.assert_array_equal(
cuda.to_cpu(example['x']), cuda.to_cpu(x[i]))
numpy.testing.assert_array_equal(
cuda.to_cpu(example['y']), cuda.to_cpu(y[i]))
示例9: check_extract
# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_cpu [as 别名]
def check_extract(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
with numpy.errstate(divide='ignore'):
result = self.link.extract([x1, x2], layers=['res3', 'pool5'])
assert len(result) == 2
y1 = cuda.to_cpu(result['res3'].data)
assert y1.shape == (2, 512, 28, 28)
assert y1.dtype == self.dtype
y2 = cuda.to_cpu(result['pool5'].data)
assert y2.shape == (2, 2048)
assert y2.dtype == self.dtype
x3 = numpy.random.uniform(0, 255, (80, 60)).astype(numpy.uint8)
result = self.link.extract([x3], layers=['res2'], size=None)
assert len(result) == 1
y3 = cuda.to_cpu(result['res2'].data)
assert y3.shape == (1, 256, 20, 15)
assert y3.dtype == self.dtype
示例10: check_forward
# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_cpu [as 别名]
def check_forward(self, theta, output_shape):
grid = functions.spatial_transformer_grid(theta, output_shape).data
theta = cuda.to_cpu(theta)
B = theta.shape[0]
H, W = output_shape
expected = []
for b in range(B):
for i in numpy.linspace(-1., 1., H):
for j in numpy.linspace(-1., 1., W):
coord = numpy.array([j, i, 1])
expected.append(self.theta[b].dot(coord))
expected = numpy.array(
expected).reshape(B, H, W, 2).transpose(0, 3, 1, 2)
testing.assert_allclose(grid, expected, **self.check_forward_options)
self.assertEqual(grid.dtype, self.dtype)
示例11: check_forward_no_reduction
# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_cpu [as 别名]
def check_forward_no_reduction(self, x_data, t_data):
x_val = chainer.Variable(x_data)
t_val = chainer.Variable(t_data)
loss = functions.sigmoid_cross_entropy(
x_val, t_val, self.normalize, reduce='no')
self.assertEqual(loss.data.shape, self.x.shape)
self.assertEqual(loss.data.dtype, self.dtype)
loss_value = cuda.to_cpu(loss.data)
# Compute expected value
if not getattr(self, 'ignore_all', False):
for i in six.moves.range(self.x.shape[0]):
for j in six.moves.range(self.x.shape[1]):
xd, td = self.x[i, j], self.t[i, j]
if td == -1:
loss_expect = 0
else:
loss_expect = -(
xd * (td - (xd >= 0)) -
math.log(1 + math.exp(-numpy.abs(xd))))
self.assertAlmostEqual(
loss_expect, loss_value[i, j], places=self.places)
示例12: check_forward
# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_cpu [as 别名]
def check_forward(self, x_data, t_data):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.huber_loss(x, t, delta=1, reduce=self.reduce)
self.assertEqual(loss.data.dtype, self.dtype)
loss_value = cuda.to_cpu(loss.data)
diff_data = cuda.to_cpu(x_data) - cuda.to_cpu(t_data)
loss_expect = numpy.zeros(self.shape)
mask = numpy.abs(diff_data) < 1
loss_expect[mask] = 0.5 * diff_data[mask] ** 2
loss_expect[~mask] = numpy.abs(diff_data[~mask]) - 0.5
if self.reduce == 'sum_along_second_axis':
loss_expect = numpy.sum(loss_expect, axis=1)
testing.assert_allclose(
loss_value, loss_expect, **self.forward_options)
示例13: check_forward
# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_cpu [as 别名]
def check_forward(self, a_data, p_data, n_data):
a_val = chainer.Variable(a_data)
p_val = chainer.Variable(p_data)
n_val = chainer.Variable(n_data)
loss = functions.triplet(a_val, p_val, n_val, self.margin, self.reduce)
if self.reduce == 'mean':
self.assertEqual(loss.data.shape, ())
else:
self.assertEqual(loss.data.shape, (self.batchsize,))
self.assertEqual(loss.data.dtype, self.dtype)
loss_value = cuda.to_cpu(loss.data)
#
# Compute expected value
#
loss_expect = numpy.empty((self.a.shape[0],), dtype=self.dtype)
for i in six.moves.range(self.a.shape[0]):
ad, pd, nd = self.a[i], self.p[i], self.n[i]
dp = numpy.sum((ad - pd) ** 2)
dn = numpy.sum((ad - nd) ** 2)
loss_expect[i] = max((dp - dn + self.margin), 0)
if self.reduce == 'mean':
loss_expect = loss_expect.mean()
numpy.testing.assert_allclose(
loss_expect, loss_value, **self.check_forward_options)
示例14: _check
# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_cpu [as 别名]
def _check(self, backend_config):
mask = self.mask if self.specify_mask else None
x, mask = backend_config.get_array((self.x, mask))
with chainer.using_config('train', self.train), backend_config:
out, out_mask = functions.dropout(
x, 0.5, mask=mask, return_mask=True)
if self.train:
assert isinstance(out_mask, type(out.array))
if mask is None:
assert out_mask.shape == out.array.shape
else:
assert out_mask is mask
else:
assert out_mask is None
with chainer.using_config('train', self.train):
out2 = functions.dropout(self.x, 0.5, mask=cuda.to_cpu(out_mask))
testing.assert_allclose(out.array, out2.array)
示例15: to_cpu
# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_cpu [as 别名]
def to_cpu(array):
if isinstance(array, cp.ndarray):
return cuda.to_cpu(array)
return array