当前位置: 首页>>代码示例>>Python>>正文


Python cuda.CudaNdarrayType方法代码示例

本文整理汇总了Python中theano.sandbox.cuda.CudaNdarrayType方法的典型用法代码示例。如果您正苦于以下问题:Python cuda.CudaNdarrayType方法的具体用法?Python cuda.CudaNdarrayType怎么用?Python cuda.CudaNdarrayType使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.sandbox.cuda的用法示例。


在下文中一共展示了cuda.CudaNdarrayType方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: make_node

# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 别名]
def make_node(self, *inputs):
        assert self.nout == 1
        assert len(inputs) == 2  # TODO remove
        _inputs = [gpu_contiguous(as_cuda_ndarray_variable(i)) for i in inputs]
        if self.nin > 0 and len(_inputs) != self.nin:
            raise TypeError('Wrong argument count', (self.nin, len(_inputs)))
        for i in _inputs[1:]:
            if i.type.ndim != inputs[0].type.ndim:
                raise TypeError('different ranks among inputs')

        if any([any(i.type.broadcastable) for i in inputs]):
            raise Exception("pycuda don't support broadcasted dimensions")

        otype = CudaNdarrayType(broadcastable=[False] * _inputs[0].type.ndim)
        out_node = Apply(self, _inputs, [otype() for o in xrange(self.nout)])
        return out_node 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:18,代码来源:pycuda_example.py

示例2: make_node

# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 别名]
def make_node(self, pvals, unis):
        assert pvals.dtype == 'float32'
        assert unis.dtype == 'float32'
        if not isinstance(pvals.type, CudaNdarrayType):
            raise TypeError('pvals must be cudandarray', pvals)
        if not isinstance(unis.type, CudaNdarrayType):
            raise TypeError('unis must be cudandarray', unis)
        if self.odtype == 'auto':
            odtype = pvals.dtype
        else:
            odtype = self.odtype
        if odtype != pvals.dtype:
            raise NotImplementedError(
                'GpuMultinomialFromUniform works only if '
                'self.odtype == pvals.dtype', odtype, pvals.dtype)
        br = (pvals.broadcastable[1], pvals.broadcastable[0])
        out = CudaNdarrayType(broadcastable=br)()
        return Apply(self, [pvals, unis], [out]) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:20,代码来源:multinomial.py

示例3: make_node

# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 别名]
def make_node(self, V, W, b, d):
        """

        Parameters
        ----------
        V
            Visible unit, input.
        W
            Weights, filter.
        b
            Bias.
        d
            Strides when moving the filter over the input.

        """
        V_ = as_cuda_ndarray_variable(V)
        W_ = as_cuda_ndarray_variable(W)
        b_ = as_cuda_ndarray_variable(b)
        d_ = T.as_tensor_variable(d)
        broad = (V_.broadcastable[0], W_.broadcastable[0], False, False, False)
        return theano.Apply(self, inputs=[V_, W_, b_, d_],
                            outputs=[CudaNdarrayType(dtype=V_.dtype,
                                                     broadcastable=broad)()]) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:25,代码来源:GpuConv3D.py

示例4: speed_elemwise_collapse

# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 别名]
def speed_elemwise_collapse():
    """ used to time if the collapse of ccontiguous dims are useful """

    shape = (30, 40, 50, 600)
    a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
                                                 dtype='float32'))
    a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
    a2 = tcn.shared_constructor(a, 'a')
    a3 = a2[:, ::2, :, :]
    b = tcn.CudaNdarrayType((False, False, False, False))()
    c = a3 + b * tensor.exp(1 + b ** a3)
    f = pfunc([b], [c], mode=mode_with_gpu)

    v = theano._asarray(numpy.random.rand(*shape), dtype='float32')
    v = v[:, ::2, :, :]
    v = cuda_ndarray.CudaNdarray(v)
    t1 = time.time()
    for i in range(100):
        # let debugmode catch errors
        f(v)
    t2 = time.time() 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:23,代码来源:test_basic_ops.py

示例5: speed_elemwise_collapse2

# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 别名]
def speed_elemwise_collapse2():
    """ used to test the speed up of the generalised collapse of
    ccontiguous dims"""

    shape = (30, 40, 50, 600)
    a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
                                                 dtype='float32'))
    a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
    a2 = tcn.shared_constructor(a, 'a')
    a3 = a2[:, :, :, ::2]
    b = tcn.CudaNdarrayType((False, False, False, False))()
    c = a3 + b * tensor.exp(1 + b ** a3)
    f = pfunc([b], [c], mode=mode_with_gpu)

    v = theano._asarray(numpy.random.rand(*shape), dtype='float32')
    v = v[:, :, :, ::2]
    v = cuda_ndarray.CudaNdarray(v)
    t1 = time.time()
    for i in range(100):
        # let debugmode catch errors
        f(v)
    t2 = time.time() 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:24,代码来源:test_basic_ops.py

示例6: test_elemwise_collapse2

# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 别名]
def test_elemwise_collapse2():
    """ Test when only one inputs have one broadcastable dimension """

    shape = (4, 5, 9)
    a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
                                                 dtype='float32'))
    a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
    a2 = tcn.shared_constructor(a, 'a')
    a3 = a2.dimshuffle(0, 'x', 1, 2)
    b = tcn.CudaNdarrayType((False, False, False, False))()
    c = a3 + b
    f = pfunc([b], [c], mode=mode_with_gpu)

    v = theano._asarray(numpy.random.rand(shape[0], 5, *shape[1:]),
                        dtype='float32')
    v = cuda_ndarray.CudaNdarray(v)
    # let debugmode catch errors
    out = f(v)[0]
    assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v)
    # print "Expected collapse to 3 dimensions" 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:22,代码来源:test_basic_ops.py

示例7: test_elemwise_collapse3

# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 别名]
def test_elemwise_collapse3():
    """ Test when only one inputs have two broadcastable dimension at each ends """

    shape = (4, 5)
    a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
                                                 dtype='float32'))
    a = theano._asarray(numpy.random.rand(*shape),
                        dtype='float32')
    a2 = tcn.shared_constructor(a, 'a')
    a3 = a2.dimshuffle('x', 0, 1, 'x')
    b = tcn.CudaNdarrayType((False, False, False, False))()
    c = (a3 + b)
    f = pfunc([b], [c], mode=mode_with_gpu)

    v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4),
                        dtype='float32')
    v = cuda_ndarray.CudaNdarray(v)

    # let debugmode catch errors
    out = f(v)[0]
    assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v)
    # print "Expected collapse to 3 dimensions" 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:24,代码来源:test_basic_ops.py

示例8: test_elemwise_collapse4

# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 别名]
def test_elemwise_collapse4():
    """ Test when only one inputs have two broadcastable dimension at
    each ends and we add a scalar"""

    shape = (4, 5)
    a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
                                                 dtype='float32'))
    a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
    a2 = tcn.shared_constructor(a, 'a')
    a3 = a2.dimshuffle('x', 0, 1, 'x')
    b = tcn.CudaNdarrayType((False, False, False, False))()
    c = (a3 + b + 2)
    f = pfunc([b], [c], mode=mode_with_gpu)

    v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4),
                        dtype='float32')
    v = cuda_ndarray.CudaNdarray(v)
    # let debugmode catch errors
    out = f(v)[0]
    assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v + 2)
    # print "Expected collapse to 3 dimensions" 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:23,代码来源:test_basic_ops.py

示例9: test_elemwise_collapse5

# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 别名]
def test_elemwise_collapse5():
    """ Test when only one inputs have two broadcastable dimension at
    the beginning and we add a scalar"""

    shape = (4, 5)
    a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
                                                 dtype='float32'))
    a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
    a2 = tcn.shared_constructor(a, 'a')
    a3 = a2.dimshuffle('x', 'x', 0, 1)
    b = tcn.CudaNdarrayType((False, False, False, False))()
    c = (a3 + b + 2)
    f = pfunc([b], [c], mode=mode_with_gpu)

    v = theano._asarray(numpy.random.rand(5, 4, shape[0], shape[1]),
                        dtype='float32')
    v = cuda_ndarray.CudaNdarray(v)

    # let debugmode catch errors
    out = f(v)[0]
    assert numpy.allclose(out, a.reshape(1, 1, shape[0], shape[1]) + v + 2)
    # print "Expected collapse to 2 dimensions" 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:24,代码来源:test_basic_ops.py

示例10: test_elemwise_collapse6

# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 别名]
def test_elemwise_collapse6():
    """ Test when all inputs have two broadcastable dimension at the
    beginning"""

    shape = (4, 5)
    a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
                                                 dtype='float32'))
    a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
    a2 = tcn.shared_constructor(a, 'a')
    a3 = a2.dimshuffle('x', 'x', 0, 1)
    b = tcn.CudaNdarrayType((True, True, False, False))()
    f = pfunc([b], [a3 + b], mode=mode_with_gpu)

    v = theano._asarray(numpy.random.rand(1, 1, shape[0], shape[1]),
                        dtype='float32')
    v = cuda_ndarray.CudaNdarray(v)
    # let debugmode catch errors
    out = f(v)[0]
    assert numpy.allclose(out, a.reshape(1, 1, shape[0], shape[1]) + v)
    # print "Expected collapse to c contiguous" 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:22,代码来源:test_basic_ops.py

示例11: make_node

# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 别名]
def make_node(self, pvals, unis):
        assert pvals.dtype == 'float32'
        assert unis.dtype == 'float32'
        if not isinstance(pvals.type, CudaNdarrayType):
            raise TypeError('pvals must be cudandarray', pvals)
        if not isinstance(unis.type, CudaNdarrayType):
            raise TypeError('unis must be cudandarray', unis)       
        if self.odtype == 'auto':
            odtype = pvals.dtype
        else:
            odtype = self.odtype
        if odtype != pvals.dtype:
            raise NotImplementedError('GpuMultinomialFromUniform2 works only if'
                'self.odtype == pvals.dtype', odtype, pvals.dtype)
        br = (unis.broadcastable[0], unis.broadcastable[1])
        out = CudaNdarrayType(broadcastable=br)()
        return Apply(self, [pvals, unis], [out]) 
开发者ID:sordonia,项目名称:hred-qs,代码行数:19,代码来源:theano_extensions.py

示例12: make_node

# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 别名]
def make_node(self, images, filters):
        """
        .. todo::

            WRITEME
        """
        ibcast = images.broadcastable
        fbcast = filters.broadcastable
        igroups, icolors_per_group, irows, icols, icount = ibcast
        fmodulesR, fmodulesC, fcolors, frows, fcols = fbcast[:-2]
        fgroups, filters_per_group = fbcast[-2:]
        hbcast = (fgroups, filters_per_group, fmodulesR, fmodulesC, icount)
        if not isinstance(images.type, CudaNdarrayType):
            raise TypeError('gpu_filter_acts requires CudaNdarray images',
                    images)
        if not isinstance(filters.type, CudaNdarrayType):
            raise TypeError('gpu_filter_acts requires CudaNdarray filters',
                    filters)
        htype = CudaNdarrayType(broadcastable=hbcast)
        return theano.gof.Apply(self,
                [images, filters],
                [htype()]) 
开发者ID:zchengquan,项目名称:TextDetector,代码行数:24,代码来源:gpu_unshared_conv.py

示例13: make_node

# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 别名]
def make_node(self, images):
        """
        .. todo::

            WRITEME
        """
        images = as_cuda_ndarray_variable(images)

        assert images.ndim == 4

        channels_broadcastable = images.type.broadcastable[0]
        batch_broadcastable = images.type.broadcastable[3]

        rows_broadcastable = False
        cols_broadcastable = False

        targets_broadcastable = (channels_broadcastable, rows_broadcastable,
                cols_broadcastable, batch_broadcastable)
        targets_type = CudaNdarrayType(broadcastable=targets_broadcastable)
        targets = targets_type()
        seed = self.seed_state
        seed = as_cuda_ndarray_variable(seed)
        return Apply(self, [images, seed], [targets]) 
开发者ID:zchengquan,项目名称:TextDetector,代码行数:25,代码来源:stochastic_pool.py

示例14: make_node

# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 别名]
def make_node(self, images):
        """
        .. todo::

            WRITEME
        """
        images = as_cuda_ndarray_variable(images)

        assert images.ndim == 4

        channels_broadcastable = images.type.broadcastable[0]
        batch_broadcastable = images.type.broadcastable[3]

        rows_broadcastable = False
        cols_broadcastable = False

        targets_broadcastable = (channels_broadcastable, rows_broadcastable,
                cols_broadcastable, batch_broadcastable)
        targets_type = CudaNdarrayType(broadcastable=targets_broadcastable)
        targets = targets_type()

        return Apply(self, [images], [targets]) 
开发者ID:zchengquan,项目名称:TextDetector,代码行数:24,代码来源:pool.py

示例15: make_node

# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 别名]
def make_node(self, images):
        """
        .. todo::

            WRITEME
        """
        if not isinstance(images.type, CudaNdarrayType):
            raise TypeError("CrossMapNorm: expected images.type to be CudaNdarrayType, "
                    "got " + str(images.type))

        assert images.ndim == 4

        targets_broadcastable = images.type.broadcastable
        targets_type = CudaNdarrayType(broadcastable=targets_broadcastable)
        denoms = targets_type()
        targets = targets_type()

        return Apply(self, [images], [targets, denoms]) 
开发者ID:zchengquan,项目名称:TextDetector,代码行数:20,代码来源:response_norm.py


注:本文中的theano.sandbox.cuda.CudaNdarrayType方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。