本文整理汇总了Python中theano.sandbox.cuda.GpuOp方法的典型用法代码示例。如果您正苦于以下问题:Python cuda.GpuOp方法的具体用法?Python cuda.GpuOp怎么用?Python cuda.GpuOp使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.sandbox.cuda
的用法示例。
在下文中一共展示了cuda.GpuOp方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: local_assert_no_cpu_op
# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import GpuOp [as 别名]
def local_assert_no_cpu_op(node):
if (not isinstance(node.op, GpuOp) and
all([var.owner and isinstance(var.owner.op, HostFromGpu)
for var in node.inputs]) and
any([[c for c in var.clients if isinstance(c[0].op, GpuFromHost)]
for var in node.outputs])):
if config.assert_no_cpu_op == "warn":
_logger.warning(("CPU op %s is detected in the computational"
" graph") % node)
elif config.assert_no_cpu_op == "raise":
raise AssertionError("The op %s is on CPU." % node)
elif config.assert_no_cpu_op == "pdb":
pdb.set_trace()
return None
# Register the local_assert_no_cpu_op:
示例2: local_gpu_subtensor
# 需要导入模块: from theano.sandbox import cuda [as 别名]
# 或者: from theano.sandbox.cuda import GpuOp [as 别名]
def local_gpu_subtensor(node):
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if host_input.owner and \
isinstance(host_input.owner.op, tensor.Subtensor):
subt = host_input.owner.op
x = host_input.owner.inputs[0]
if len(x.clients) == 1:
# It mean, the input of the subtensor is used only by
# the subtensor. We do not want to move the subtensor
# to the GPU in that case.
return
coords = host_input.owner.inputs[1:]
return [GpuSubtensor(subt.idx_list)(as_cuda_ndarray_variable(x),
*coords)]
if isinstance(node.op, tensor.Subtensor):
x = node.inputs[0]
if (x.owner and
isinstance(x.owner.op, HostFromGpu) and
x.dtype == "float32"):
gpu_x = x.owner.inputs[0]
if (gpu_x.owner and
isinstance(gpu_x.owner.op, GpuFromHost) and
# And it is a shared var or an input of the graph.
not gpu_x.owner.inputs[0].owner):
if len(x.clients) == 1:
if any([n == 'output' or isinstance(n.op, GpuOp)
for n, _ in node.outputs[0].clients]):
return
else:
return [host_from_gpu(as_cuda_ndarray_variable(
node.outputs[0]))]
return
gpu_x, = x.owner.inputs
coords = node.inputs[1:]
return [host_from_gpu(GpuSubtensor(
node.op.idx_list)(gpu_x, *coords))]
return False