当前位置: 首页>>代码示例>>Python>>正文


Python cuda.to_gpu方法代码示例

本文整理汇总了Python中chainer.backends.cuda.to_gpu方法的典型用法代码示例。如果您正苦于以下问题:Python cuda.to_gpu方法的具体用法?Python cuda.to_gpu怎么用?Python cuda.to_gpu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.backends.cuda的用法示例。


在下文中一共展示了cuda.to_gpu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_forward_gpu

# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_gpu [as 别名]
def test_forward_gpu(self):
        self.f.add_hook(self.h)
        self.f(chainer.Variable(cuda.to_gpu(self.x)),
               chainer.Variable(cuda.to_gpu(self.x)))
        expect = r'''^function\tDummyFunction
input data
<variable at 0x[0-9a-f]+>
- device: <CUDA Device 0>
- backend: <(type|class) 'cupy.core.core.ndarray'>
- shape: \(3L?, 5L?\)
- dtype: float32
- statistics: mean=[0-9.\-e]+, std=[0-9.\-e]+
- grad: None
<variable at 0x[0-9a-f]+>
- device: <CUDA Device 0>
- backend: <(type|class) 'cupy.core.core.ndarray'>
- shape: \(3L?, 5L?\)
- dtype: float32
- statistics: mean=[0-9.\-e]+, std=[0-9.\-e]+
- grad: None$
'''
        actual = self.io.getvalue()
        self.assertTrue(re.match(expect, actual), actual) 
开发者ID:chainer,项目名称:chainer,代码行数:25,代码来源:test_debug_print.py

示例2: test_inconsistent_input_backends

# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_gpu [as 别名]
def test_inconsistent_input_backends(self):
        class FunctionNode(chainer.FunctionNode):

            def forward(self, inputs):
                return inputs

        f = FunctionNode()

        # Cause inconsistency between inputs
        x1 = cuda.to_gpu(self.x1)

        x1 = chainer.Variable(x1)
        x2 = chainer.Variable(self.x2)

        with self.assertRaises(TypeError):
            f.apply((x1, x2)) 
开发者ID:chainer,项目名称:chainer,代码行数:18,代码来源:test_function_node.py

示例3: to_gpu

# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_gpu [as 别名]
def to_gpu(self):
        self.parameters.to_gpu() 
开发者ID:musyoku,项目名称:chainer-gqn,代码行数:4,代码来源:model.py

示例4: compute_observation_representation

# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_gpu [as 别名]
def compute_observation_representation(self, images, viewpoints):
        batch_size = images.shape[0]
        num_views = images.shape[1]

        # (batch, views, channels, height, width) -> (batch * views, channels, height, width)
        images = images.reshape((batch_size * num_views, ) + images.shape[2:])
        viewpoints = viewpoints.reshape((batch_size * num_views, 7, 1, 1))

        # Transfer to gpu
        xp = self.parameters.xp
        if xp is cupy:
            images = cuda.to_gpu(images)
            viewpoints = cuda.to_gpu(viewpoints)

        # Add noise
        # images += xp.random.uniform(
        #     0, 1.0 / 256.0, size=images.shape).astype(xp.float32)

        r = self.representation_network(images, viewpoints)

        # (batch * views, channels, height, width) -> (batch, views, channels, height, width)
        r = r.reshape((batch_size, num_views) + r.shape[1:])

        # Sum element-wise across views
        r = cf.sum(r, axis=1)

        return r 
开发者ID:musyoku,项目名称:chainer-gqn,代码行数:29,代码来源:model.py

示例5: argsort

# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_gpu [as 别名]
def argsort(x):
    xp = cuda.get_array_module(x)
    i = np.argsort(cuda.to_cpu(x))
    if xp is np:
        return i
    else:
        return cuda.to_gpu(i)


# to avoid out of memory 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:12,代码来源:misc.py

示例6: choice

# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_gpu [as 别名]
def choice(x, size):
    xp = cuda.get_array_module(x)
    y = np.random.choice(cuda.to_cpu(x), size, replace=False)
    if xp is np:
        return y
    else:
        return cuda.to_gpu(y) 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:9,代码来源:misc.py

示例7: __call__

# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_gpu [as 别名]
def __call__(self, inputs, device=None):
        """Convert DALI arrays to Numpy/CuPy arrays"""

        xp = chainer.backend.get_array_module(self.perturbation)
        if xp is not cuda.cupy:
            self.perturbation = cuda.to_gpu(self.perturbation, device)

        outputs = []
        for i in range(len(inputs)):
            x = inputs[i].as_tensor()
            if (isinstance(x, dali.backend_impl.TensorCPU)):
                x = np.array(x)
                if x.ndim == 2 and x.shape[1] == 1:
                    x = x.squeeze(axis=1)
                if device is not None and device >= 0:
                    x = cuda.to_gpu(x, device)
            elif (isinstance(x, dali.backend_impl.TensorGPU)):
                x_cupy = cuda.cupy.empty(shape=x.shape(), dtype=x.dtype())
                # Synchronization is necessary here to avoid data corruption
                # because DALI and CuPy will use different CUDA streams.
                cuda.cupy.cuda.runtime.deviceSynchronize()
                # copy data from DALI array to CuPy array
                x.copy_to_external(ctypes.c_void_p(x_cupy.data.ptr))
                cuda.cupy.cuda.runtime.deviceSynchronize()
                x = x_cupy.astype(chainer.get_dtype())
                if self.perturbation is not None:
                    x = x - self.perturbation
                if device is not None and device < 0:
                    x = cuda.to_cpu(x)
            else:
                raise ValueError('Unexpected object')
            outputs.append(x)
        return tuple(outputs) 
开发者ID:chainer,项目名称:chainer,代码行数:35,代码来源:dali_util.py

示例8: dali_converter

# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_gpu [as 别名]
def dali_converter(inputs, device=None):
    """Convert DALI arrays to Numpy/CuPy arrays"""

    outputs = []
    for i in range(len(inputs)):
        x = inputs[i].as_tensor()
        if (isinstance(x, dali.backend_impl.TensorCPU)):
            x = np.array(x)
            if x.ndim == 2 and x.shape[1] == 1:
                x = x.squeeze(axis=1)
            if device is not None and device >= 0:
                x = cuda.to_gpu(x, device)
        elif (isinstance(x, dali.backend_impl.TensorGPU)):
            x_cupy = cuda.cupy.empty(shape=x.shape(), dtype=x.dtype())
            # Synchronization is necessary here to avoid data corruption
            # because DALI and CuPy will use different CUDA streams.
            cuda.cupy.cuda.runtime.deviceSynchronize()
            # copy data from DALI array to CuPy array
            x.copy_to_external(ctypes.c_void_p(x_cupy.data.ptr))
            cuda.cupy.cuda.runtime.deviceSynchronize()
            x = x_cupy.astype(chainer.get_dtype())
            if device is not None and device < 0:
                x = cuda.to_cpu(x)
        else:
            raise ValueError('Unexpected object')
        outputs.append(x)
    return tuple(outputs) 
开发者ID:chainer,项目名称:chainer,代码行数:29,代码来源:dali_util.py

示例9: test_forward_gpu

# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_gpu [as 别名]
def test_forward_gpu(self):
        self.check_forward(cuda.to_gpu(self.x)) 
开发者ID:chainer,项目名称:chainer,代码行数:4,代码来源:test_cuda_profile.py

示例10: test_backward_gpu

# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_gpu [as 别名]
def test_backward_gpu(self):
        self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy)) 
开发者ID:chainer,项目名称:chainer,代码行数:4,代码来源:test_cuda_profile.py

示例11: test_forward_gpu

# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_gpu [as 别名]
def test_forward_gpu(self):
        with testing.assert_warns(DeprecationWarning):
            self.layer.to_gpu()
        self.check_forward(cuda.to_gpu(self.x)) 
开发者ID:chainer,项目名称:chainer,代码行数:6,代码来源:test_timer.py

示例12: test_forward_gpu

# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_gpu [as 别名]
def test_forward_gpu(self):
        with testing.assert_warns(DeprecationWarning):
            self.l.to_gpu()
        self.check_forward(cuda.to_gpu(self.x)) 
开发者ID:chainer,项目名称:chainer,代码行数:6,代码来源:test_cupy_memory_profile.py

示例13: test_backward_gpu

# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_gpu [as 别名]
def test_backward_gpu(self):
        with testing.assert_warns(DeprecationWarning):
            self.l.to_gpu()
        self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy)) 
开发者ID:chainer,项目名称:chainer,代码行数:6,代码来源:test_cupy_memory_profile.py

示例14: setUp

# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_gpu [as 别名]
def setUp(self):
        cuda.memory_pool.free_all_blocks()
        self.h = function_hooks.CupyMemoryProfileHook()
        f1 = functions.exp
        f2 = functions.relu
        self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
        x = cuda.to_gpu(self.x)
        with self.h:
            f1(chainer.Variable(x))
            f1(chainer.Variable(x))
            f2(chainer.Variable(x))
            f2(chainer.Variable(x)) 
开发者ID:chainer,项目名称:chainer,代码行数:14,代码来源:test_cupy_memory_profile.py

示例15: test_backward_gpu

# 需要导入模块: from chainer.backends import cuda [as 别名]
# 或者: from chainer.backends.cuda import to_gpu [as 别名]
def test_backward_gpu(self):
        y = self.f(chainer.Variable(cuda.to_gpu(self.x)),
                   chainer.Variable(cuda.to_gpu(self.x)))
        y.grad = cuda.to_gpu(self.gy)
        self.f.add_hook(self.h)
        y.backward()
        expect = r'''^function\tDummyFunction
input data
<variable at 0x[0-9a-f]+>
- device: <CUDA Device 0>
- backend: <(type|class) 'cupy.core.core.ndarray'>
- shape: \(3L?, 5L?\)
- dtype: float32
- statistics: mean=[0-9.\-e]+, std=[0-9.\-e]+
- grad: None
\(removed\)
output gradient
<variable at 0x[0-9a-f]+>
- device: <CUDA Device 0>
- backend: <(type|class) 'cupy.core.core.ndarray'>
- shape: \(3L?, 5L?\)
- dtype: float32
- statistics: mean=[0-9.\-e]+, std=[0-9.\-e]+
- grad: mean=[0-9.\-e]+, std=[0-9.\-e]+$
'''
        actual = self.io.getvalue()
        self.assertTrue(re.match(expect, actual), actual) 
开发者ID:chainer,项目名称:chainer,代码行数:29,代码来源:test_debug_print.py


注:本文中的chainer.backends.cuda.to_gpu方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。