当前位置: 首页>>代码示例>>Python>>正文


Python chainer.backends方法代码示例

本文整理汇总了Python中chainer.backends方法的典型用法代码示例。如果您正苦于以下问题:Python chainer.backends方法的具体用法?Python chainer.backends怎么用?Python chainer.backends使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer的用法示例。


在下文中一共展示了chainer.backends方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward_expected

# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import backends [as 别名]
def forward_expected(self, inputs):
        """
        Current forward_expected implementation depends on
        F.convolution_2d itself and thus it's only capable
        of checking consistency between backends, not absolute
        correctness of computations
        """
        if self.nobias:
            x, W = inputs
            b = None
        else:
            x, W, b = inputs
        with chainer.using_config('use_ideep', 'never'):
            y_expected = F.convolution_2d(
                x, W, b, stride=self.stride, pad=self.pad,
                cover_all=self.cover_all, dilate=self.dilate,
                groups=self.groups)
        if self.old_numpy_fp16:
            return y_expected.array*0,
        return y_expected.array, 
开发者ID:chainer,项目名称:chainer,代码行数:22,代码来源:test_convolution_2d.py

示例2: forward_expected

# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import backends [as 别名]
def forward_expected(self, inputs):
        """
        Current forward_expected implementation depends on
        F.convolution_nd itself and thus it's only capable
        of checking consistency between backends, not absolute
        correctness of computations
        """
        if self.nobias:
            x, W = inputs
            b = None
        else:
            x, W, b = inputs
        y_expected = F.convolution_nd(
            x, W, b, stride=self.stride, pad=self.pad,
            cover_all=self.cover_all, dilate=self.dilate,
            groups=self.groups)
        return y_expected.array, 
开发者ID:chainer,项目名称:chainer,代码行数:19,代码来源:test_convolution_nd.py

示例3: forward_expected

# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import backends [as 别名]
def forward_expected(self, inputs):
        """
        Current forward_expected implementation depends on
        F.deconvolution_nd itself and thus it's only capable
        of checking consistency between backends, not absolute
        correctness of computations
        """
        if self.nobias:
            x, W = inputs
            b = None
        else:
            x, W, b = inputs
        y_expected = F.deconvolution_nd(
            x, W, b, stride=self.stride, pad=self.pad,
            outsize=self.outsize, dilate=self.dilate,
            groups=self.groups)
        return y_expected.array, 
开发者ID:chainer,项目名称:chainer,代码行数:19,代码来源:test_deconvolution_nd.py

示例4: forward_expected

# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import backends [as 别名]
def forward_expected(self, inputs):
        """
        Current forward_expected implementation depends on
        F.deconvolution_2d itself and thus it's only capable
        of checking consistency between backends, not absolute
        correctness of computations
        """
        if self.nobias:
            x, W = inputs
            b = None
        else:
            x, W, b = inputs
        y_expected = F.deconvolution_2d(
            x, W, b, stride=self.stride, pad=self.pad,
            outsize=self.outsize, dilate=self.dilate,
            groups=self.groups)
        return y_expected.array, 
开发者ID:chainer,项目名称:chainer,代码行数:19,代码来源:test_deconvolution_2d.py

示例5: setup_communicator

# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import backends [as 别名]
def setup_communicator(gpu):
    if gpu:
        communicator = chainermn.create_communicator('flat')
        chainer.backends.cuda.get_device_from_id(
            communicator.intra_rank).use()
    else:
        communicator = chainermn.create_communicator('naive')

    if communicator.size < 2:
        pytest.skip('This test is for multinode only')

    rank_next = communicator.rank + 1
    rank_prev = communicator.rank - 1

    if rank_prev < 0:
        rank_prev = None

    if rank_next >= communicator.size:
        rank_next = None

    return communicator, rank_prev, rank_next 
开发者ID:chainer,项目名称:chainer,代码行数:23,代码来源:test_n_step_rnn.py

示例6: check_crossing_model

# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import backends [as 别名]
def check_crossing_model(gpu, param):
    communicator, rank_next, rank_prev = create_communicator(gpu)

    n, d = 100, 10
    X = np.random.randn(n, d).astype(param.dtype)
    Y = (np.random.rand(n) * 2).astype(np.int32)

    with chainer.using_config('dtype', param.dtype):
        if communicator.rank == 0:
            model = L.Classifier(Cross0(
                d, communicator, rank_next, rank_prev))
        else:
            model = L.Classifier(Cross1(
                d, communicator, rank_next, rank_prev))

        if gpu:
            model.to_device(cupy.cuda.Device())
            X = chainer.backends.cuda.to_gpu(X)
            Y = chainer.backends.cuda.to_gpu(Y)

        for i in range(n):
            err = model(X[i:i + 1], Y[i:i + 1])
            err.backward() 
开发者ID:chainer,项目名称:chainer,代码行数:25,代码来源:test_multi_node_chain_list.py

示例7: reduce

# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import backends [as 别名]
def reduce(in_params, out_params, map_expr, reduce_expr, post_map_expr,
           identity, name, **kwargs):
    """Creates a global reduction kernel function.

    This function uses :func:`~chainer.backends.cuda.memoize` to cache the
    resulting kernel object, i.e. the resulting kernel object is cached for
    each argument combination and CUDA device.

    The arguments are the same as those for
    :class:`cupy.ReductionKernel`, except that the ``name`` argument is
    mandatory.

    """
    check_cuda_available()
    return cupy.ReductionKernel(
        in_params, out_params, map_expr, reduce_expr, post_map_expr,
        identity, name, **kwargs) 
开发者ID:chainer,项目名称:chainer,代码行数:19,代码来源:cuda.py

示例8: _list_to_flat

# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import backends [as 别名]
def _list_to_flat(array_list):
    xp = chainer.backends.cuda.get_array_module(array_list[0])

    indices = xp.concatenate(
        [i * xp.ones((len(array),), dtype=np.int32) for
         i, array in enumerate(array_list)], axis=0)
    flat = xp.concatenate(array_list, axis=0)
    return flat, indices 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:10,代码来源:faster_rcnn.py

示例9: decode

# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import backends [as 别名]
def decode(self, segms, bboxes, labels, sizes):
        """Decodes back to masks.

        Args:
            segms (iterable of arrays): An iterable of arrays of
                shape :math:`(R_n, n\_class, M, M)`.
            bboxes (iterable of arrays): An iterable of arrays of
                shape :math:`(R_n, 4)`.
            labels (iterable of arrays): An iterable of arrays of
                shape :math:`(R_n,)`.
            sizes (list of tuples of two ints): A list of
                :math:`(H_n, W_n)`, where :math:`H_n` and :math:`W_n`
                are height and width of the :math:`n`-th image.

        Returns:
            list of arrays:
            This list contains instance segmentation for each image
            in the batch.
            More precisely, this is a list of boolean arrays of shape
            :math:`(R'_n, H_n, W_n)`, where :math:`R'_n` is the number of
            bounding boxes in the :math:`n`-th image.
        """

        xp = chainer.backends.cuda.get_array_module(*segms)
        if xp != np:
            raise ValueError(
                'MaskHead.decode only supports numpy inputs for now.')
        masks = []
        for bbox, segm, label, size in zip(
                bboxes, segms, labels, sizes):
            if len(segm) > 0:
                masks.append(
                    segm_to_mask(segm[np.arange(len(label)), label + 1],
                                 bbox, size))
            else:
                masks.append(np.zeros((0,) + size, dtype=np.bool))
        return masks 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:39,代码来源:mask_head.py

示例10: check_backward

# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import backends [as 别名]
def check_backward(self, x_data, y_grad):
        slices = []
        for i, s in enumerate(self.slices):
            if isinstance(s, numpy.ndarray):
                s = chainer.backends.cuda.cupy.array(s)
            if isinstance(s, list):
                s = chainer.backends.cuda.cupy.array(s, dtype=numpy.int32)
            slices.append(s)
        slices = tuple(slices)

        def f(x):
            return functions.get_item(x, slices)

        gradient_check.check_backward(
            f, (x_data,), y_grad, dtype='d') 
开发者ID:chainer,项目名称:chainer,代码行数:17,代码来源:test_get_item.py

示例11: test_forward_int

# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import backends [as 别名]
def test_forward_int(self, src_backend_config, dst_backend_config):
        assert dst_backend_config.xp is not chainerx
        src_device = src_backend_config.device
        dst_device = dst_backend_config.device
        if dst_device.xp is numpy:
            dst_device_spec = -1
        elif dst_device.xp is chainer.backends.cuda.cupy:
            dst_device_spec = dst_device.device.id
        else:
            assert False, dst_device

        self.check_forward(
            dst_device_spec,
            src_device,
            dst_device) 
开发者ID:chainer,项目名称:chainer,代码行数:17,代码来源:test_copy.py

示例12: test_forward_str

# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import backends [as 别名]
def test_forward_str(self, src_backend_config, dst_backend_config):
        assert dst_backend_config.xp is not chainerx
        src_device = src_backend_config.device
        dst_device = dst_backend_config.device
        if dst_device.xp is numpy:
            dst_device_spec = '@numpy'
        elif dst_device.xp is chainer.backends.cuda.cupy:
            dst_device_spec = '@cupy:{}'.format(dst_device.device.id)
        else:
            assert False, dst_device

        self.check_forward(
            dst_device_spec,
            src_device,
            dst_device) 
开发者ID:chainer,项目名称:chainer,代码行数:17,代码来源:test_copy.py

示例13: test_call_cudnn_forward

# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import backends [as 别名]
def test_call_cudnn_forward(self):
        with chainer.using_config('use_cudnn', self.use_cudnn):
            with testing.patch(
                    'chainer.backends.cuda.get_cudnn_dropout_states') as func:
                self.forward()
                assert func.called == (self.use_cudnn == 'always') 
开发者ID:chainer,项目名称:chainer,代码行数:8,代码来源:test_dropout.py

示例14: test_call_cudnn_backward

# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import backends [as 别名]
def test_call_cudnn_backward(self):
        with chainer.using_config('use_cudnn', self.use_cudnn):
            y = self.forward()
            y.grad = self.gy
            with testing.patch(
                    'chainer.backends.cuda.get_cudnn_dropout_states') as func:
                y.backward()
                assert func.called == (self.use_cudnn == 'always') 
开发者ID:chainer,项目名称:chainer,代码行数:10,代码来源:test_dropout.py

示例15: test_invalid

# 需要导入模块: import chainer [as 别名]
# 或者: from chainer import backends [as 别名]
def test_invalid(self):
        eps = -0.1
        if chainer.backends.cuda.libcudnn.get_build_version() < 7500:
            eps = 2e-6
        with self.assertRaises(RuntimeError):
            functions.fixed_batch_normalization(*self.args, eps=eps) 
开发者ID:chainer,项目名称:chainer,代码行数:8,代码来源:test_batch_normalization.py


注:本文中的chainer.backends方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。