本文整理汇总了Python中chainer.cuda方法的典型用法代码示例。如果您正苦于以下问题:Python chainer.cuda方法的具体用法?Python chainer.cuda怎么用?Python chainer.cuda使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer
的用法示例。
在下文中一共展示了chainer.cuda方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: out_generated_image
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import cuda [as 别名]
def out_generated_image(gen, dis, rows, cols, seed, dst):
@chainer.training.make_extension()
def make_image(trainer):
np.random.seed(seed)
n_images = rows * cols
xp = gen.xp
z = Variable(xp.asarray(gen.make_hidden(n_images)))
with chainer.using_config('train', False):
x = gen(z)
x = chainer.cuda.to_cpu(x.array)
np.random.seed()
x = np.asarray(np.clip(x * 255, 0.0, 255.0), dtype=np.uint8)
_, _, H, W = x.shape
x = x.reshape((rows, cols, 3, H, W))
x = x.transpose(0, 3, 1, 4, 2)
x = x.reshape((rows * H, cols * W, 3))
preview_dir = '{}/preview'.format(dst)
preview_path = preview_dir +\
'/image{:0>8}.png'.format(trainer.updater.iteration)
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
Image.fromarray(x).save(preview_path)
return make_image
示例2: _check_list_tuple
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import cuda [as 别名]
def _check_list_tuple(self, typ):
assert typ in (list, tuple)
a = numpy.random.uniform(-1, 1, (0,))
b = numpy.random.uniform(-1, 1, (2, 3))
c = cuda.cupy.random.uniform(-1, 1, (0,))
d = cuda.cupy.random.uniform(-1, 1, (2, 2))
xs = typ([a, b, c, d, None, a, b, None, c, d])
xs_cpu = cuda.to_cpu(xs)
assert isinstance(xs_cpu, typ)
assert len(xs) == len(xs_cpu)
for i in (0, 1, 2, 3, 5, 6, 8, 9):
assert isinstance(xs_cpu[i], numpy.ndarray)
cuda.cupy.testing.assert_array_equal(xs[i], xs_cpu[i])
assert xs_cpu[0] is a
assert xs_cpu[1] is b
assert xs_cpu[2] is xs_cpu[8]
assert xs_cpu[3] is xs_cpu[9]
assert xs_cpu[4] is None
assert xs_cpu[5] is a
assert xs_cpu[6] is b
assert xs_cpu[7] is None
示例3: test_numpy_scalar
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import cuda [as 别名]
def test_numpy_scalar(self):
dtype = self.dtype
if dtype is numpy.bool_:
x = dtype(True)
elif issubclass(dtype, numpy.complex_):
x = dtype(3.2 - 2.4j)
elif issubclass(dtype, numpy.integer):
x = dtype(3)
elif issubclass(dtype, numpy.floating):
x = dtype(3.2)
else:
assert False
y = cuda.to_gpu(x)
assert isinstance(y, cuda.ndarray)
assert y.shape == ()
assert y.dtype == dtype
assert y == x
示例4: _get_device_or_current
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import cuda [as 别名]
def _get_device_or_current(
device: tp.Optional[types.CudaDeviceSpec]
) -> Device:
# Returns cuda.Device.
# - If cuda.Device instance, it's returned intact.
# - If None, the current device is returned.
# - If non-negative integer, cuda.Device is returned.
# - Otherwise: error.
if device is None:
return cuda.Device()
if isinstance(device, Device):
return device
if not (isinstance(device, _integer_types) and device >= 0):
raise ValueError('Invalid CUDA device specifier: {}'.format(device))
return cuda.Device(int(device))
# ------------------------------------------------------------------------------
# cupy.ndarray allocation and copy
# ------------------------------------------------------------------------------
示例5: to_cpu
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import cuda [as 别名]
def to_cpu(array, stream=None):
"""Copies the given GPU array to host CPU.
Args:
array (*array*, None, list or tuple):
Array or arrays to be sent to CPU.
stream (cupy.cuda.Stream): CUDA stream.
Returns:
numpy.ndarray, list or tuple: Array on CPU.
If some of the arrays are already on CPU, then this function just
returns those arrays without performing any copy.
If input arrays include `None`, it is returned as `None` as is.
"""
return _backend._convert_arrays(
array, lambda arr: _array_to_cpu(arr, stream))
示例6: reduce
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import cuda [as 别名]
def reduce(in_params, out_params, map_expr, reduce_expr, post_map_expr,
identity, name, **kwargs):
"""Creates a global reduction kernel function.
This function uses :func:`~chainer.backends.cuda.memoize` to cache the
resulting kernel object, i.e. the resulting kernel object is cached for
each argument combination and CUDA device.
The arguments are the same as those for
:class:`cupy.ReductionKernel`, except that the ``name`` argument is
mandatory.
"""
check_cuda_available()
return cupy.ReductionKernel(
in_params, out_params, map_expr, reduce_expr, post_map_expr,
identity, name, **kwargs)
示例7: raw
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import cuda [as 别名]
def raw(code, name, *args, **kwargs):
"""Creates a raw kernel function.
This function uses :func:`~chainer.backends.cuda.memoize` to cache the
resulting kernel object, i.e. the resulting kernel object is cached for
each argument combination and CUDA device.
The arguments are the same as those for :class:`cupy.RawKernel`.
"""
check_cuda_available()
return cupy.RawKernel(code, name, *args, **kwargs)
# ------------------------------------------------------------------------------
# numpy/cupy compatible coding
# ------------------------------------------------------------------------------
示例8: sample_generate_light
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import cuda [as 别名]
def sample_generate_light(gen, dst, rows=5, cols=5, seed=0):
@chainer.training.make_extension()
def make_image(trainer):
np.random.seed(seed)
n_images = rows * cols
xp = gen.xp
z = Variable(xp.asarray(gen.make_hidden(n_images)))
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
x = gen(z, stage=trainer.updater.stage)
x = chainer.cuda.to_cpu(x.data)
np.random.seed()
x = np.asarray(np.clip(x * 127.5 + 127.5, 0.0, 255.0), dtype=np.uint8)
_, _, H, W = x.shape
x = x.reshape((rows, cols, 3, H, W))
x = x.transpose(0, 3, 1, 4, 2)
x = x.reshape((rows * H, cols * W, 3))
preview_dir = '{}/preview'.format(dst)
preview_path = preview_dir + '/image_latest.png'
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
Image.fromarray(x).save(preview_path)
return make_image
示例9: sample_generate
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import cuda [as 别名]
def sample_generate(gen, dst, rows=10, cols=10, seed=0):
@chainer.training.make_extension()
def make_image(trainer):
np.random.seed(seed)
n_images = rows * cols
xp = gen.xp
z = Variable(xp.asarray(gen.make_hidden(n_images)))
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
x = gen(z, stage=trainer.updater.stage)
x = chainer.cuda.to_cpu(x.data)
np.random.seed()
x = np.asarray(np.clip(x * 127.5 + 127.5, 0.0, 255.0), dtype=np.uint8)
_, _, h, w = x.shape
x = x.reshape((rows, cols, 3, h, w))
x = x.transpose(0, 3, 1, 4, 2)
x = x.reshape((rows * h, cols * w, 3))
preview_dir = '{}/preview'.format(dst)
preview_path = preview_dir + '/image{:0>8}.png'.format(trainer.updater.iteration)
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
Image.fromarray(x).save(preview_path)
return make_image
示例10: sample_generate_light
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import cuda [as 别名]
def sample_generate_light(gen, dst, rows=5, cols=5, seed=0):
@chainer.training.make_extension()
def make_image(trainer):
np.random.seed(seed)
n_images = rows * cols
xp = gen.xp
z = Variable(xp.asarray(gen.make_hidden(n_images)))
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
x = gen(z)
x = chainer.cuda.to_cpu(x.data)
np.random.seed()
x = np.asarray(np.clip(x * 127.5 + 127.5, 0.0, 255.0), dtype=np.uint8)
_, _, H, W = x.shape
x = x.reshape((rows, cols, 3, H, W))
x = x.transpose(0, 3, 1, 4, 2)
x = x.reshape((rows * H, cols * W, 3))
preview_dir = '{}/preview'.format(dst)
preview_path = preview_dir + '/image_latest.png'
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
Image.fromarray(x).save(preview_path)
return make_image
示例11: test_forward_consistency
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import cuda [as 别名]
def test_forward_consistency(self, nobias=False):
x_cpu = chainer.Variable(self.x)
W_cpu = chainer.Variable(self.W)
b_cpu = None if nobias else chainer.Variable(self.b)
func_cpu = graph_convolution.GraphConvolutionFunction(self.L, self.K)
func_cpu.to_cpu()
args_cpu = (x_cpu, W_cpu)
if b_cpu is not None:
args_cpu += (b_cpu, )
y_cpu = func_cpu(*args_cpu)
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
W_gpu = chainer.Variable(cuda.to_gpu(self.W))
b_gpu = None if nobias else chainer.Variable(cuda.to_gpu(self.b))
func_gpu = graph_convolution.GraphConvolutionFunction(self.L, self.K)
func_gpu.to_gpu()
args_gpu = (x_gpu, W_gpu)
if b_gpu is not None:
args_gpu += (b_gpu, )
y_gpu = func_gpu(*args_gpu)
testing.assert_allclose(
y_cpu.data, y_gpu.data.get(), **self.check_forward_options)
示例12: forward_gpu
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import cuda [as 别名]
def forward_gpu(self, inputs):
x, W = inputs[:2]
n_batch, c_in, N = x.shape
b = inputs[2] if len(inputs) == 3 else None
xp = cuda.get_array_module(x)
with cuda.get_device(x.data):
K = self.K
LmI_data, LmI_indices, LmI_indptr = self.LmI_tuple
if x.dtype != LmI_data.dtype:
LmI_data = LmI_data.astype(x.dtype)
C = xp.empty((K, N, c_in, n_batch), dtype=x.dtype)
chebyshev_matvec_gpu(C, x, K, n_batch,
LmI_data, LmI_indices, LmI_indptr)
C = C.transpose((3, 2, 0, 1))
self.C = C
y = xp.tensordot(C, W, ((1, 2), (1, 2)))
if b is not None:
y += b
return xp.rollaxis(y, 2, 1), # y.shape = (n_batch, c_out, N)
示例13: forward_gpu
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import cuda [as 别名]
def forward_gpu(self, inputs):
x = inputs[0]
xp = cuda.get_array_module(x)
n_batch, c, N = x.shape
N_coarse = len(self.pooling_inds)
with cuda.get_device(x.data):
x = x.transpose((2, 1, 0))
p_dim = self.pooling_inds.shape[1]
y = xp.empty((N_coarse, c, n_batch), dtype=x.dtype)
self.max_inds = xp.empty((N_coarse, c, n_batch), dtype=np.int32)
pooling_inds = cuda.to_gpu(self.pooling_inds)
gpu_graphpool_fwd(N_coarse, p_dim, pooling_inds,
x, y, self.max_inds)
y = y.transpose((2, 1, 0))
return y,
示例14: classify
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import cuda [as 别名]
def classify(self,x=None):
if x is None:
x=Tensor.context
if not isinstance(x,ImageTensor):
x=Input(x)
xp = Deel.xp
x_data = xp.asarray(self.x_batch)
xv = chainer.Variable(x.value, volatile=True)
h, w = xv.data.shape[2:]
cls_score, bbox_pred = self.func(xv,np.array([[h, w, x.im_scale]]))
draw_rois(x.content,x.im_scale,self.func.rois,bbox_pred,cls_score.data)
if Deel.gpu >= 0:
cls_score = chainer.cuda.cupy.asnumpy(cls_score)
bbox_pred = chainer.cuda.cupy.asnumpy(bbox_pred)
result = draw_result(x.content, 1.0, cls_score.data, bbox_pred,0.3,0.8)
cv.imshow("res",result)
cv.waitKey(0)
示例15: sample_generate_light
# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import cuda [as 别名]
def sample_generate_light(gen, mapping, dst, rows=8, cols=8, z=None, seed=0, subdir='preview'):
@chainer.training.make_extension()
def make_image(trainer):
nonlocal rows, cols, z
if trainer.updater.stage > 15:
rows = min(rows, 2)
cols = min(cols, 2)
elif trainer.updater.stage > 13:
rows = min(rows, 3)
cols = min(cols, 3)
elif trainer.updater.stage > 11:
rows = min(rows, 4)
cols = min(cols, 4)
np.random.seed(seed)
n_images = rows * cols
xp = gen.xp
if z is None:
z = Variable(xp.asarray(mapping.make_hidden(n_images)))
else:
z = z[:n_images]
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
x = gen(mapping(z), stage=trainer.updater.stage)
x = chainer.cuda.to_cpu(x.data)
np.random.seed()
x = convert_batch_images(x, rows, cols)
preview_dir = '{}/{}'.format(dst, subdir)
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
preview_path = preview_dir + '/image_latest.png'
Image.fromarray(x).save(preview_path)
preview_path = preview_dir + '/image{:0>8}.png'.format(trainer.updater.iteration)
Image.fromarray(x).save(preview_path)
return make_image