本文整理汇总了Python中theano.sandbox.cuda.gpu_from_host函数的典型用法代码示例。如果您正苦于以下问题:Python gpu_from_host函数的具体用法?Python gpu_from_host怎么用?Python gpu_from_host使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了gpu_from_host函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_reject_rect
def test_reject_rect():
for cls in (FilterActs, ImageActs):
# Tests that running FilterActs with a non-square
# kernel is an error
rng = np.random.RandomState([2012, 10, 9])
batch_size = 5
rows = 10
cols = 9
channels = 3
filter_rows = 4
filter_cols = filter_rows + 1
num_filters = 6
images = shared(rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32'), name='images')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
if cls is ImageActs:
output = cls()(gpu_images, gpu_filters,
as_tensor_variable((rows, cols)))
else:
output = cls()(gpu_images, gpu_filters)
f = function([], output)
try:
output = f()
except ValueError:
continue
assert False
示例2: local_to_gpu
def local_to_gpu(node):
"""
op(host_from_gpu()) -> host_from_gpu(op)
gpu_from_host(op) -> op(gpu_from_host)
"""
if isinstance(node.op, op):
#op(host_from_gpu()) -> host_from_gpu(op)
#If any of the input that go on the GPU are on the GPU,
#move the op to the gpu.
if any(node.inputs[idx].owner and
isinstance(node.inputs[idx].owner.op, cuda.HostFromGpu)
for idx in to_gpu):
new_inp = list(node.inputs)
for idx in to_gpu:
new_inp[idx] = cuda.gpu_from_host(new_inp[idx])
return [cuda.host_from_gpu(op()(*new_inp))]
if node.op == cuda.gpu_from_host:
#gpu_from_host(op) -> op(gpu_from_host)
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op,
op):
op_node = host_input.owner
new_inp = list(op_node.inputs)
for idx in to_gpu:
new_inp[idx] = cuda.gpu_from_host(new_inp[idx])
return [op()(*new_inp)]
return False
示例3: local_to_gpu
def local_to_gpu(node):
"""
op(host_from_gpu()) -> host_from_gpu(op)
gpu_from_host(op) -> op(gpu_from_host)
"""
if isinstance(node.op, op):
# op(host_from_gpu()) -> host_from_gpu(op)
# If any of the input that go on the GPU are on the GPU,
# move the op to the gpu.
if any(node.inputs[idx].owner and
isinstance(node.inputs[idx].owner.op, cuda.HostFromGpu)
for idx in to_gpu):
new_inp = list(node.inputs)
for idx in to_gpu:
new_inp[idx] = cuda.gpu_from_host(new_inp[idx])
result_node = op()(*new_inp)
copy_stack_trace(node.outputs[0], result_node)
transfer_node = result_node.transfer('cpu')
copy_stack_trace(node.outputs[0], transfer_node)
return [transfer_node]
if node.op == cuda.gpu_from_host:
# gpu_from_host(op) -> op(gpu_from_host)
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op,
op):
op_node = host_input.owner
new_inp = list(op_node.inputs)
for idx in to_gpu:
new_inp[idx] = cuda.gpu_from_host(new_inp[idx])
new_node = op()(*new_inp)
copy_stack_trace(host_input, new_node)
return [new_node]
return False
示例4: test_gpu_opt
def test_gpu_opt():
if not cuda.cuda_available:
# Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
raise SkipTest('Optional package cuda not available')
# We test the case where we put the op on the gpu when the output
# is moved to the gpu.
p = tensor.fmatrix()
u = tensor.fvector()
m = multinomial.MultinomialFromUniform('auto')(p, u)
assert m.dtype == 'float32', m.dtype
m_gpu = cuda.gpu_from_host(m)
f = function([p, u], m_gpu, allow_input_downcast=True, mode=get_mode(True))
assert any([type(node.op) is multinomial.GpuMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = numpy.arange(10000 * 4, dtype='float32').reshape((10000, 4))+0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = numpy.ones_like(pval[:, 0]) * 0.5
mval = f(pval, uval)
# Test with a row, it was failing in the past.
r = tensor.frow()
m = multinomial.MultinomialFromUniform('auto')(r, u)
assert m.dtype == 'float32', m.dtype
m_gpu = cuda.gpu_from_host(m)
f = function([r, u], m_gpu, allow_input_downcast=True, mode=get_mode(True))
assert any([type(node.op) is multinomial.GpuMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = numpy.arange(1 * 4, dtype='float32').reshape((1, 4))+0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = numpy.ones_like(pval[:, 0]) * 0.5
mval2 = f(pval, uval)
示例5: test_alloc_memset_0
def test_alloc_memset_0():
i = tensor.iscalar()
z = numpy.zeros((1,), dtype='float32')
o = numpy.ones((1,), dtype='float32')
ones = numpy.ones((2,), dtype='float32')
# Test with 0
a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(z)), i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, basic_ops.GpuAlloc) and topo[0].op.memset_0
assert (numpy.asarray(f(6)) == 0).all()
# Test with 1
a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(o)), i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, basic_ops.GpuAlloc)
assert not topo[0].op.memset_0
assert (numpy.asarray(f(6)) == 1).all()
# Test with 1, 1
a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(ones)), i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, basic_ops.GpuAlloc)
assert not topo[0].op.memset_0
assert (numpy.asarray(f(2)) == 1).all()
示例6: test_reject_bad_filt_number
def test_reject_bad_filt_number():
for cls in (FilterActs, ImageActs):
# Tests that running FilterActs with a # of filters per
# group that is not 16 is an error
rng = np.random.RandomState([2012, 10, 9])
batch_size = 5
rows = 10
cols = 9
channels = 3
filter_rows = 4
filter_cols = filter_rows
num_filters = 6
images = shared(rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32'), name='images')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
output = cls()(gpu_images, gpu_filters)
f = function([], output)
try:
output = f()
except ValueError:
continue
assert False
示例7: test_grad
def test_grad():
rng = np.random.RandomState([2012, 10, 9])
batch_size = 5
rows = 10
cols = 9
channels = 3
filter_rows = 4
filter_cols = filter_rows
num_filters = 16
images = shared(rng.uniform(-1.0, 1.0, (channels, rows, cols, batch_size)).astype("float32"), name="images")
filters = shared(
rng.uniform(-1.0, 1.0, (channels, filter_rows, filter_cols, num_filters)).astype("float32"), name="filters"
)
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
output = FilterActs()(gpu_images, gpu_filters)
output = host_from_gpu(output)
# XXX: use verify_grad
output_grad = grad(output.sum(), images)
images_bc01 = images.dimshuffle(3, 0, 1, 2)
filters_bc01 = filters.dimshuffle(3, 0, 1, 2)
filters_bc01 = filters_bc01[:, :, ::-1, ::-1]
output_conv2d = conv2d(images_bc01, filters_bc01, border_mode="valid")
output_conv2d = output_conv2d.dimshuffle(1, 2, 3, 0)
# XXX: use verify_grad
output_conv2d_grad = grad(output_conv2d.sum(), images)
f = function([], [output_grad, output_conv2d_grad])
output_grad, output_conv2d_grad = f()
warnings.warn(
"""test_match_valid_conv success criterion is not very strict. Can we verify that this is OK?
One possibility is that theano is numerically unstable and Alex's code is better.
Probably theano CPU 64 bit is OK but it's worth checking the others."""
)
if np.abs(output_grad - output_conv2d_grad).max() > 7.7e-6:
assert type(output_grad) == type(output_conv2d_grad)
assert output_grad.dtype == output_conv2d_grad.dtype
if output_grad.shape != output_conv2d_grad.shape:
print "cuda-convnet shape: ", output_grad.shape
print "theano shape: ", output_conv2d_grad.shape
assert False
err = np.abs(output_grad - output_conv2d_grad)
print "absolute error range: ", (err.min(), err.max())
print "mean absolute error: ", err.mean()
print "cuda-convnet value range: ", (output_grad.min(), output_grad.max())
print "theano value range: ", (output_conv2d_grad.min(), output_conv2d_grad.max())
assert False
示例8: test_match_valid_conv_strided
def test_match_valid_conv_strided():
# Tests that running FilterActs with stride is the same as running
# theano's conv2D in valid mode and then downsampling
rng = np.random.RandomState([2012,10,9])
batch_size = 5
rows = 9
cols = 9
channels = 3
filter_rows = 3
filter_cols = filter_rows
stride = 3
num_filters = 16
images = shared(rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32'), name='images')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
output = FilterActs(stride=stride)(gpu_images, gpu_filters)
output = host_from_gpu(output)
images_bc01 = images.dimshuffle(3,0,1,2)
filters_bc01 = filters.dimshuffle(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid', subsample=(stride, stride))
output_conv2d_orig = output_conv2d.dimshuffle(1,2,3,0)
output_conv2d = output_conv2d_orig # [:, ::stride, ::stride, :]
f = function([], [output, output_conv2d, output_conv2d_orig])
output, output_conv2d, output_conv2d_orig = f()
warnings.warn("""test_match_valid_conv success criterion is not very strict. Can we verify that this is OK?
One possibility is that theano is numerically unstable and Alex's code is better.
Probably theano CPU 64 bit is OK but it's worth checking the others.""")
if np.abs(output - output_conv2d).max() > 2.4e-6:
assert type(output) == type(output_conv2d)
assert output.dtype == output_conv2d.dtype
if output.shape != output_conv2d.shape:
print 'cuda-convnet shape: ',output.shape
print 'theano shape: ',output_conv2d.shape
assert False
err = np.abs(output - output_conv2d)
print 'absolute error range: ', (err.min(), err.max())
print 'mean absolute error: ', err.mean()
print 'cuda-convnet value range: ', (output.min(), output.max())
print 'theano value range: ', (output_conv2d.min(), output_conv2d.max())
assert False
示例9: test_match_valid_conv
def test_match_valid_conv():
# Tests that running FilterActs with no padding is the same as running
# theano's conv2D in valid mode
rng = np.random.RandomState([2012,10,9])
batch_size = 5
rows = 10
cols = 9
channels = 3
filter_rows = 4
filter_cols = filter_rows
num_filters = 16
images = shared(rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32'), name='images')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
output = FilterActs()(gpu_images, gpu_filters)
output = host_from_gpu(output)
images_bc01 = images.dimshuffle(3,0,1,2)
filters_bc01 = filters.dimshuffle(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid')
output_conv2d = output_conv2d.dimshuffle(1,2,3,0)
try:
f = function([], [output, output_conv2d])
except:
raise KnownFailureTest("cuda-convnet code depends on an unmerged theano feature.")
output, output_conv2d = f()
warnings.warn("test_match_valid_conv success criterion is not very strict. Can we verify that this is OK?")
if np.abs(output - output_conv2d).max() > 2.4e-6:
assert type(output) == type(output_conv2d)
assert output.dtype == output_conv2d.dtype
if output.shape != output_conv2d.shape:
print 'cuda-convnet shape: ',output.shape
print 'theano shape: ',output_conv2d.shape
assert False
err = np.abs(output - output_conv2d)
print 'absolute error range: ', (err.min(), err.max())
print 'mean absolute error: ', err.mean()
print 'cuda-convnet value range: ', (output.min(), output.max())
print 'theano value range: ', (output_conv2d.min(), output_conv2d.max())
assert False
示例10: insert_gpu_filter_acts
def insert_gpu_filter_acts(node):
if isinstance(node.op, FilterActs):
images, filters = node.inputs
if any_from_gpu(images, filters) or any_gpu_client(*node.outputs):
gpu_filter_acts = GpuFilterActs(
module_stride=node.op.module_stride,
partial_sum=1)
return [host_from_gpu(gpu_filter_acts(
gpu_from_host(images),
gpu_from_host(filters)))]
示例11: insert_gpu_weight_acts
def insert_gpu_weight_acts(node):
if isinstance(node.op, WeightActs):
images, hidacts, frows, fcols = node.inputs
if any_from_gpu(images, hidacts) or any_gpu_client(*node.outputs):
gpu_weight_acts = GpuWeightActs(
module_stride=node.op.module_stride,
partial_sum=1)
return [host_from_gpu(gpu_weight_acts(
gpu_from_host(images),
gpu_from_host(hidacts),
frows,
fcols,
))]
示例12: insert_gpu_img_acts
def insert_gpu_img_acts(node):
if isinstance(node.op, ImgActs):
filters, hidacts, irows, icols = node.inputs
if any_from_gpu(filters, hidacts) or any_gpu_client(*node.outputs):
gpu_img_acts = GpuImgActs(
module_stride=node.op.module_stride,
partial_sum=1)
return [host_from_gpu(gpu_img_acts(
gpu_from_host(filters),
gpu_from_host(hidacts),
irows,
icols,
))]
示例13: traverse
def traverse(out, x, x_copy, d, visited=None):
''' Function used by scan to parse the tree and figure out which nodes
it needs to replace. There are two options :
1) x and x_copy or on host, then you would replace x with x_copy
2) x is on gpu, x_copy on host, then you need to replace
host_from_gpu(x) with x_copy
This happens because initially shared variables are on GPU .. which is
fine for the main computational graph but confuses things a bit for the
inner graph of scan '''
# ``visited`` is a set of nodes that are already known and don't need to be
# checked again, speeding up the traversal of multiply-connected graphs.
# if a ``visited`` set is given, it will be updated in-place so the callee
# knows which nodes we have seen.
if visited is None:
visited = set()
if out in visited:
return d
visited.add(out)
import theano.sandbox.cuda as cuda
if out == x:
d[out] = cuda.gpu_from_host(x_copy)
return d
elif out.owner is None:
return d
elif (cuda.cuda_available and
out.owner.op == cuda.host_from_gpu and
out.owner.inputs == [x]):
d[out] = tensor.as_tensor_variable(x_copy)
return d
else:
for inp in out.owner.inputs:
d = traverse(inp, x, x_copy, d, visited)
return d
示例14: lmul
def lmul(self, x):
"""
dot(x, A)
aka, do convolution with input image x
"""
check_cuda(str(type(self)) + ".lmul")
# TODO Why is it CPU??
print "Por que?!?!", type(x)
cpu = "Cuda" not in str(type(x))
if cpu:
x = gpu_from_host(x)
assert x.ndim == 5
x_axes = self.input_axes
assert len(x_axes) == 5
op_axes = ("c", 0, 1, "t", "b")
if tuple(x_axes) != op_axes:
print "ssssssssssssssss"
x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes])
_x_4d_shape = (
self.signal_shape[0],
self.signal_shape[1],
self.signal_shape[2],
self.signal_shape[3] * self.signal_shape[4],
)
x = x.reshape(_x_4d_shape)
x = gpu_contiguous(x)
rval = FilterActs(self.pad, self.partial_sum, self.kernel_stride[0])(x, self._filters)
if cpu:
rval = host_from_gpu(rval)
rval = rval.reshape(
(
self.filter_shape[3],
self.filter_shape[4],
rval.shape[1],
rval.shape[2],
self.signal_shape[3],
self.signal_shape[4],
)
)
rval = diagonal_subtensor(rval, 4, 0).sum(axis=0)
# Format the output based on the output space
rval_axes = self.output_axes
assert len(rval_axes) == 5
if tuple(rval_axes) != op_axes:
rval = rval.dimshuffle(*[op_axes.index(axis) for axis in rval_axes])
return rval
示例15: lmul
def lmul(self, x):
"""
dot(x, A)
aka, do convolution with input image x
"""
check_cuda(str(type(self)) + ".lmul")
cpu = 'Cuda' not in str(type(x))
#assert cpu
if cpu:
x = gpu_from_host(x)
assert x.ndim == 5
x_axes = self.input_axes
assert len(x_axes) == 5
#x = shapeprint(x)
op_axes = ('b', 'c', 0, 1, 't')
print x_axes, op_axes
#if tuple(x_axes) != op_axes:
# x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes])
#x = shapeprint(x)
#self._filters = shapeprint(self._filters)
rval = cuda.blas.GpuCorr3dMM(border_mode= 'valid',
subsample = tuple(self.kernel_stride),
pad=tuple(self.pad))(x, self._filters)
#rval = conv3d(im, filt, None, None, (self.kernel_stride[0], self.kernel_stride[1]) )
#rval = rval.dimshuffle(0,4,1,2,3)
#print "hello"
return rval