本文整理匯總了Python中theano.sandbox.cuda.CudaNdarrayType方法的典型用法代碼示例。如果您正苦於以下問題:Python cuda.CudaNdarrayType方法的具體用法?Python cuda.CudaNdarrayType怎麽用?Python cuda.CudaNdarrayType使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano.sandbox.cuda
的用法示例。
在下文中一共展示了cuda.CudaNdarrayType方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: make_node
# 需要導入模塊: from theano.sandbox import cuda [as 別名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 別名]
def make_node(self, *inputs):
assert self.nout == 1
assert len(inputs) == 2 # TODO remove
_inputs = [gpu_contiguous(as_cuda_ndarray_variable(i)) for i in inputs]
if self.nin > 0 and len(_inputs) != self.nin:
raise TypeError('Wrong argument count', (self.nin, len(_inputs)))
for i in _inputs[1:]:
if i.type.ndim != inputs[0].type.ndim:
raise TypeError('different ranks among inputs')
if any([any(i.type.broadcastable) for i in inputs]):
raise Exception("pycuda don't support broadcasted dimensions")
otype = CudaNdarrayType(broadcastable=[False] * _inputs[0].type.ndim)
out_node = Apply(self, _inputs, [otype() for o in xrange(self.nout)])
return out_node
示例2: make_node
# 需要導入模塊: from theano.sandbox import cuda [as 別名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 別名]
def make_node(self, pvals, unis):
assert pvals.dtype == 'float32'
assert unis.dtype == 'float32'
if not isinstance(pvals.type, CudaNdarrayType):
raise TypeError('pvals must be cudandarray', pvals)
if not isinstance(unis.type, CudaNdarrayType):
raise TypeError('unis must be cudandarray', unis)
if self.odtype == 'auto':
odtype = pvals.dtype
else:
odtype = self.odtype
if odtype != pvals.dtype:
raise NotImplementedError(
'GpuMultinomialFromUniform works only if '
'self.odtype == pvals.dtype', odtype, pvals.dtype)
br = (pvals.broadcastable[1], pvals.broadcastable[0])
out = CudaNdarrayType(broadcastable=br)()
return Apply(self, [pvals, unis], [out])
示例3: make_node
# 需要導入模塊: from theano.sandbox import cuda [as 別名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 別名]
def make_node(self, V, W, b, d):
"""
Parameters
----------
V
Visible unit, input.
W
Weights, filter.
b
Bias.
d
Strides when moving the filter over the input.
"""
V_ = as_cuda_ndarray_variable(V)
W_ = as_cuda_ndarray_variable(W)
b_ = as_cuda_ndarray_variable(b)
d_ = T.as_tensor_variable(d)
broad = (V_.broadcastable[0], W_.broadcastable[0], False, False, False)
return theano.Apply(self, inputs=[V_, W_, b_, d_],
outputs=[CudaNdarrayType(dtype=V_.dtype,
broadcastable=broad)()])
示例4: speed_elemwise_collapse
# 需要導入模塊: from theano.sandbox import cuda [as 別名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 別名]
def speed_elemwise_collapse():
""" used to time if the collapse of ccontiguous dims are useful """
shape = (30, 40, 50, 600)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2[:, ::2, :, :]
b = tcn.CudaNdarrayType((False, False, False, False))()
c = a3 + b * tensor.exp(1 + b ** a3)
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(*shape), dtype='float32')
v = v[:, ::2, :, :]
v = cuda_ndarray.CudaNdarray(v)
t1 = time.time()
for i in range(100):
# let debugmode catch errors
f(v)
t2 = time.time()
示例5: speed_elemwise_collapse2
# 需要導入模塊: from theano.sandbox import cuda [as 別名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 別名]
def speed_elemwise_collapse2():
""" used to test the speed up of the generalised collapse of
ccontiguous dims"""
shape = (30, 40, 50, 600)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2[:, :, :, ::2]
b = tcn.CudaNdarrayType((False, False, False, False))()
c = a3 + b * tensor.exp(1 + b ** a3)
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(*shape), dtype='float32')
v = v[:, :, :, ::2]
v = cuda_ndarray.CudaNdarray(v)
t1 = time.time()
for i in range(100):
# let debugmode catch errors
f(v)
t2 = time.time()
示例6: test_elemwise_collapse2
# 需要導入模塊: from theano.sandbox import cuda [as 別名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 別名]
def test_elemwise_collapse2():
""" Test when only one inputs have one broadcastable dimension """
shape = (4, 5, 9)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle(0, 'x', 1, 2)
b = tcn.CudaNdarrayType((False, False, False, False))()
c = a3 + b
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(shape[0], 5, *shape[1:]),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
# let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v)
# print "Expected collapse to 3 dimensions"
示例7: test_elemwise_collapse3
# 需要導入模塊: from theano.sandbox import cuda [as 別名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 別名]
def test_elemwise_collapse3():
""" Test when only one inputs have two broadcastable dimension at each ends """
shape = (4, 5)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape),
dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x', 0, 1, 'x')
b = tcn.CudaNdarrayType((False, False, False, False))()
c = (a3 + b)
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
# let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v)
# print "Expected collapse to 3 dimensions"
示例8: test_elemwise_collapse4
# 需要導入模塊: from theano.sandbox import cuda [as 別名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 別名]
def test_elemwise_collapse4():
""" Test when only one inputs have two broadcastable dimension at
each ends and we add a scalar"""
shape = (4, 5)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x', 0, 1, 'x')
b = tcn.CudaNdarrayType((False, False, False, False))()
c = (a3 + b + 2)
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
# let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v + 2)
# print "Expected collapse to 3 dimensions"
示例9: test_elemwise_collapse5
# 需要導入模塊: from theano.sandbox import cuda [as 別名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 別名]
def test_elemwise_collapse5():
""" Test when only one inputs have two broadcastable dimension at
the beginning and we add a scalar"""
shape = (4, 5)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x', 'x', 0, 1)
b = tcn.CudaNdarrayType((False, False, False, False))()
c = (a3 + b + 2)
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(5, 4, shape[0], shape[1]),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
# let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(1, 1, shape[0], shape[1]) + v + 2)
# print "Expected collapse to 2 dimensions"
示例10: test_elemwise_collapse6
# 需要導入模塊: from theano.sandbox import cuda [as 別名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 別名]
def test_elemwise_collapse6():
""" Test when all inputs have two broadcastable dimension at the
beginning"""
shape = (4, 5)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x', 'x', 0, 1)
b = tcn.CudaNdarrayType((True, True, False, False))()
f = pfunc([b], [a3 + b], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(1, 1, shape[0], shape[1]),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
# let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(1, 1, shape[0], shape[1]) + v)
# print "Expected collapse to c contiguous"
示例11: make_node
# 需要導入模塊: from theano.sandbox import cuda [as 別名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 別名]
def make_node(self, pvals, unis):
assert pvals.dtype == 'float32'
assert unis.dtype == 'float32'
if not isinstance(pvals.type, CudaNdarrayType):
raise TypeError('pvals must be cudandarray', pvals)
if not isinstance(unis.type, CudaNdarrayType):
raise TypeError('unis must be cudandarray', unis)
if self.odtype == 'auto':
odtype = pvals.dtype
else:
odtype = self.odtype
if odtype != pvals.dtype:
raise NotImplementedError('GpuMultinomialFromUniform2 works only if'
'self.odtype == pvals.dtype', odtype, pvals.dtype)
br = (unis.broadcastable[0], unis.broadcastable[1])
out = CudaNdarrayType(broadcastable=br)()
return Apply(self, [pvals, unis], [out])
示例12: make_node
# 需要導入模塊: from theano.sandbox import cuda [as 別名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 別名]
def make_node(self, images, filters):
"""
.. todo::
WRITEME
"""
ibcast = images.broadcastable
fbcast = filters.broadcastable
igroups, icolors_per_group, irows, icols, icount = ibcast
fmodulesR, fmodulesC, fcolors, frows, fcols = fbcast[:-2]
fgroups, filters_per_group = fbcast[-2:]
hbcast = (fgroups, filters_per_group, fmodulesR, fmodulesC, icount)
if not isinstance(images.type, CudaNdarrayType):
raise TypeError('gpu_filter_acts requires CudaNdarray images',
images)
if not isinstance(filters.type, CudaNdarrayType):
raise TypeError('gpu_filter_acts requires CudaNdarray filters',
filters)
htype = CudaNdarrayType(broadcastable=hbcast)
return theano.gof.Apply(self,
[images, filters],
[htype()])
示例13: make_node
# 需要導入模塊: from theano.sandbox import cuda [as 別名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 別名]
def make_node(self, images):
"""
.. todo::
WRITEME
"""
images = as_cuda_ndarray_variable(images)
assert images.ndim == 4
channels_broadcastable = images.type.broadcastable[0]
batch_broadcastable = images.type.broadcastable[3]
rows_broadcastable = False
cols_broadcastable = False
targets_broadcastable = (channels_broadcastable, rows_broadcastable,
cols_broadcastable, batch_broadcastable)
targets_type = CudaNdarrayType(broadcastable=targets_broadcastable)
targets = targets_type()
seed = self.seed_state
seed = as_cuda_ndarray_variable(seed)
return Apply(self, [images, seed], [targets])
示例14: make_node
# 需要導入模塊: from theano.sandbox import cuda [as 別名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 別名]
def make_node(self, images):
"""
.. todo::
WRITEME
"""
images = as_cuda_ndarray_variable(images)
assert images.ndim == 4
channels_broadcastable = images.type.broadcastable[0]
batch_broadcastable = images.type.broadcastable[3]
rows_broadcastable = False
cols_broadcastable = False
targets_broadcastable = (channels_broadcastable, rows_broadcastable,
cols_broadcastable, batch_broadcastable)
targets_type = CudaNdarrayType(broadcastable=targets_broadcastable)
targets = targets_type()
return Apply(self, [images], [targets])
示例15: make_node
# 需要導入模塊: from theano.sandbox import cuda [as 別名]
# 或者: from theano.sandbox.cuda import CudaNdarrayType [as 別名]
def make_node(self, images):
"""
.. todo::
WRITEME
"""
if not isinstance(images.type, CudaNdarrayType):
raise TypeError("CrossMapNorm: expected images.type to be CudaNdarrayType, "
"got " + str(images.type))
assert images.ndim == 4
targets_broadcastable = images.type.broadcastable
targets_type = CudaNdarrayType(broadcastable=targets_broadcastable)
denoms = targets_type()
targets = targets_type()
return Apply(self, [images], [targets, denoms])