本文整理汇总了Python中pygpu.empty函数的典型用法代码示例。如果您正苦于以下问题:Python empty函数的具体用法?Python empty怎么用?Python empty使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了empty函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: init_dev
def init_dev(dev, name=None):
global pygpu_activated
if not config.cxx:
raise RuntimeError("The new gpu-backend need a c++ compiler.")
if (pygpu.version.major, pygpu.version.minor) < (0, 6):
raise ValueError("Your installed version of pygpu is too old, please upgrade to 0.6 or later")
# This is for the C headers API
if pygpu.gpuarray.api_version()[0] < 0:
raise ValueError("Your installed libgpuarray is too old, please update")
if dev not in init_dev.devmap:
context = pygpu.init(
dev,
disable_alloc_cache=config.gpuarray.preallocate < 0,
single_stream=config.gpuarray.single_stream,
sched=config.gpuarray.sched,
)
context.dev = dev
init_dev.devmap[dev] = context
reg_context(name, context)
if dev.startswith("cuda"):
avail = dnn.dnn_available(name)
if avail:
context.cudnn_handle = dnn._make_handle(context)
if config.print_active_device:
if avail:
print("Using cuDNN version %d on context %s" % (dnn.version(), name), file=sys.stderr)
else:
print("Can not use cuDNN on context %s: %s" % (name, dnn.dnn_available.msg), file=sys.stderr)
if config.gpuarray.preallocate < 0:
print("Disabling allocation cache on %s" % (dev,))
elif config.gpuarray.preallocate > 0:
MB = 1024 * 1024
if config.gpuarray.preallocate <= 1:
gmem = min(config.gpuarray.preallocate, 0.95) * context.total_gmem
else:
gmem = config.gpuarray.preallocate * MB
if gmem > context.free_gmem - 50 * MB:
print("WARNING: Preallocating too much memory can prevent cudnn and cublas from working properly")
# This will allocate and immediatly free an object of size gmem
# which will reserve that amount of memory on the GPU.
pygpu.empty((gmem,), dtype="int8", context=context)
if config.print_active_device:
print(
"Preallocating %d/%d Mb (%f) on %s"
% (gmem // MB, context.total_gmem // MB, gmem / context.total_gmem, dev),
file=sys.stderr,
)
else:
context = init_dev.devmap[dev]
# This will map the context name to the real context object.
if config.print_active_device:
try:
pcibusid = "(" + context.pcibusid + ")"
except pygpu.gpuarray.UnsupportedException:
pcibusid = ""
print("Mapped name %s to device %s: %s %s" % (name, dev, context.devname, pcibusid), file=sys.stderr)
pygpu_activated = True
示例2: init_dev
def init_dev(dev, name=None):
v = pygpu.gpuarray.api_version()
expected = -9997
if v[0] != expected:
raise RuntimeError("Wrong major API version for gpuarray:", v[0],
"Make sure Theano and libgpuarray/pygpu "
"are in sync. Expected", expected)
if v[1] < 0:
raise RuntimeError("Wrong minor API version for gpuarray:", v[1],
"Please update libgpuarray/pygpu.")
if len(v) < 3:
vpy = -1
else:
vpy = v[2]
vpye = 0
if vpy < vpye:
print("Wrong python API version for gpuarray:", vpy, "expected:", vpye,
"Some python ops may not work correctly and/or crash. "
"Consider updating pygpu.", file=sys.stderr)
global pygpu_activated
if dev not in init_dev.devmap:
ctx = pygpu.init(dev,
disable_alloc_cache=config.gpuarray.preallocate < 0,
single_stream=config.gpuarray.single_stream,
sched=config.gpuarray.sched)
init_dev.devmap[dev] = ctx
if config.gpuarray.preallocate > 0:
MB = (1024 * 1024)
if config.gpuarray.preallocate <= 1:
gmem = min(config.gpuarray.preallocate, 0.95) * ctx.total_gmem
else:
gmem = config.gpuarray.preallocate * MB
# This will allocate and immediatly free an object of size gmem
# which will reserve that amount of memory on the GPU.
pygpu.empty((gmem,), dtype='int8', context=ctx)
if config.print_active_device:
print("Preallocating %d/%d Mb (%f) on %s" %
(gmem//MB, ctx.total_gmem//MB, gmem/ctx.total_gmem, dev),
file=sys.stderr)
context = init_dev.devmap[dev]
# This will map the context name to the real context object.
reg_context(name, context)
if config.print_active_device:
print("Mapped name %s to device %s: %s" %
(name, dev, context.devname),
file=sys.stderr)
pygpu_activated = True
if dev.startswith('cuda'):
try:
cudnn_version = dnn.version()
# 5200 should not print warning with cudnn 5.1 final.
if cudnn_version >= 5200:
warnings.warn("Your cuDNN version is more recent than Theano."
" If you see problems, try updating Theano or"
" downgrading cuDNN to version 5.1.")
if config.print_active_device:
print("Using cuDNN version %d on context %s" %
(cudnn_version, name), file=sys.stderr)
except Exception:
pass
示例3: init_dev
def init_dev(dev, name=None):
v = pygpu.gpuarray.api_version()
if v[0] != -10000:
raise RuntimeError("Wrong major API version for gpuarray:", v[0],
"Make sure Theano and libgpuarray/pygpu "
"are in sync.")
if v[1] < 0:
raise RuntimeError("Wrong minor API version for gpuarray:", v[1],
"Please update libgpuarray/pygpu.")
global pygpu_activated
if dev not in init_dev.devmap:
ctx = pygpu.init(dev)
init_dev.devmap[dev] = ctx
if config.gpuarray.preallocate != 0:
if config.gpuarray.preallocate < 1:
gmem = min(config.gpuarray.preallocate, 0.98) * ctx.total_gmem
else:
gmem = config.gpuarray.preallocate * (1024*1024)
# This will allocate and immediatly free an object of size gmem
# which will reserve that amount of memory on the GPU.
pygpu.empty((gmem,), dtype='int8', context=ctx)
context = init_dev.devmap[dev]
# This will map the context name to the real context object.
reg_context(name, context)
pygpu_activated = True
if config.print_active_device:
print("Mapped name %s to device %s: %s" % (name, dev, context.devname),
file=sys.stderr)
示例4: perform
def perform(self, node, inputs, outputs):
x, y = inputs
out = pygpu.empty((x.shape[0], y.shape[1]), dtype=x.dtype,
context=x.context)
outputs[0][0] = blas.gemm(1., x, y, 0., out,
overwrite_c=True)
示例5: test_hash
def test_hash():
g = pygpu.empty((2, 3), context=ctx)
exc = None
try:
h = hash(g)
except TypeError as e:
exc = e
assert exc is not None
示例6: init_dev
def init_dev(dev, name=None):
v = pygpu.gpuarray.api_version()
if v[0] != -10000:
raise RuntimeError("Wrong major API version for gpuarray:", v[0],
"Make sure Theano and libgpuarray/pygpu "
"are in sync.")
if v[1] < 0:
raise RuntimeError("Wrong minor API version for gpuarray:", v[1],
"Please update libgpuarray/pygpu.")
global pygpu_activated
if dev not in init_dev.devmap:
ctx = pygpu.init(dev)
init_dev.devmap[dev] = ctx
if config.gpuarray.preallocate != 0:
if config.gpuarray.preallocate < 1:
gmem = min(config.gpuarray.preallocate, 0.98) * ctx.total_gmem
else:
gmem = config.gpuarray.preallocate * (1024*1024)
# This will allocate and immediatly free an object of size gmem
# which will reserve that amount of memory on the GPU.
pygpu.empty((gmem,), dtype='int8', context=ctx)
context = init_dev.devmap[dev]
# This will map the context name to the real context object.
reg_context(name, context)
pygpu_activated = True
if config.print_active_device:
warn = None
cudnn_version = ""
if dev.startswith('cuda'):
cudnn_version = " (CuDNN not available)"
try:
cudnn_version = dnn.version()
# 4100 should not print warning with cudnn 4 final.
if cudnn_version > 4100:
warn = ("Your CuDNN version is more recent than Theano."
" If you see problems, try updating Theano or"
" downgrading CuDNN to version 4.")
cudnn_version = " (CuDNN version %s)" % cudnn_version
except Exception:
pass
print("Mapped name %s to device %s: %s%s" % (
name, dev, context.devname, cudnn_version),
file=sys.stderr)
if warn:
warnings.warn(warn)
示例7: ensure_allocated
def ensure_allocated(storage, shape, dtype):
odat = storage[0]
if odat is not None:
if odat.shape != shape:
# It is unsafe to try to resize odat,
# we have to allocate output storage.
odat = None
if odat is None:
odat = pygpu.empty(shape, dtype=dtype)
storage[0] = odat
return odat
示例8: init_dev
def init_dev(dev, name=None, preallocate=None):
global pygpu_activated
if not config.cxx:
raise RuntimeError("The new gpu-backend need a c++ compiler.")
if (pygpu.version.major, pygpu.version.minor, pygpu.version.patch) < (0, 6, 1):
raise ValueError(
"Your installed version of pygpu is too old, please upgrade to 0.6.1 or later")
# This is for the C headers API, we need to match the exact version.
if pygpu.gpuarray.api_version()[0] != 1:
raise ValueError(
"Your installed libgpuarray is not in sync, please make sure to have the appropriate version")
if dev not in init_dev.devmap:
if config.gpuarray.cache_path != '':
os.environ['GPUARRAY_CACHE_PATH'] = config.gpuarray.cache_path
if preallocate is None:
preallocate = config.gpuarray.preallocate
context = pygpu.init(
dev,
disable_alloc_cache=preallocate < 0,
single_stream=config.gpuarray.single_stream,
sched=config.gpuarray.sched)
context.dev = dev
init_dev.devmap[dev] = context
reg_context(name, context)
if dev.startswith('cuda'):
avail = dnn.dnn_available(name)
if avail:
context.cudnn_handle = dnn._make_handle(context)
elif config.dnn.enabled == 'True':
raise RuntimeError(
"You enabled cuDNN, but we aren't able to use it: %s" %
dnn.dnn_available.msg)
if config.print_active_device:
if avail:
print("Using cuDNN version %d on context %s" % (dnn.version(), name),
file=sys.stderr)
else:
print("Can not use cuDNN on context %s: %s" % (name, dnn.dnn_available.msg),
file=sys.stderr)
if preallocate < 0:
print("Disabling allocation cache on %s" % (dev,))
elif preallocate > 0:
MB = (1024 * 1024)
if preallocate <= 1:
gmem = min(preallocate, 0.95) * context.total_gmem
else:
gmem = preallocate * MB
if gmem > context.free_gmem - 50 * MB:
print(
"WARNING: Preallocating too much memory can prevent cudnn and cublas from working properly")
# This will allocate and immediatly free an object of size gmem
# which will reserve that amount of memory on the GPU.
pygpu.empty((gmem,), dtype='int8', context=context)
if config.print_active_device:
print("Preallocating %d/%d Mb (%f) on %s" %
(gmem // MB, context.total_gmem // MB,
gmem / context.total_gmem, dev),
file=sys.stderr)
# Initialise the blas kernels. We do this after the
# preallocation to not fragment the heap accidentally.
tmp = pygpu.empty((2, 2), dtype='float32', context=context)
if dev.startswith('cuda'):
# In OpenCL, BLAS isn't always available
pygpu.blas.gemm(0, tmp, tmp, 0, tmp, overwrite_c=True)
del tmp
else:
context = init_dev.devmap[dev]
# This will map the context name to the real context object.
if config.print_active_device:
try:
pcibusid = '(' + context.pcibusid + ')'
except pygpu.gpuarray.UnsupportedException:
pcibusid = ''
print("Mapped name %s to device %s: %s %s" %
(name, dev, context.devname, pcibusid),
file=sys.stderr)
pygpu_activated = True
示例9: perform
def perform(self, node, inputs, out_):
out = out_[0]
sh = [int(i) for i in inputs]
if out[0] is None or out[0].shape != sh:
out[0] = pygpu.empty(sh, dtype=self.dtype)
示例10: test_empty_no_params
def test_empty_no_params():
try:
pygpu.empty()
assert False
except TypeError:
pass
示例11: test_empty_no_dtype
def test_empty_no_dtype():
x = pygpu.empty((), context=ctx)# no dtype and order param
y = numpy.empty(())
check_meta(x, y)
示例12: empty
def empty(shp, order, dtype):
x = pygpu.empty(shp, dtype, order, context=ctx)
y = numpy.empty(shp, dtype, order)
check_meta(x, y)
示例13: print
sock.bind('tcp://*:{0}'.format(sock_data))
except zmq.error.ZMQError:
import os
print('[load] %s port %d zmq error' % (os.getpid(),sock_data))
sock.close()
zmq.Context().term()
raise
finally:
pass
shape, dtype, h = sock.recv_pyobj()
if verbose: print('[load] 1. shared_x information received')
gpu_data_remote_b = pygpu.gpuarray.open_ipc_handle(ctx, h, np.prod(shape)*dtype.itemsize)
gpu_data_remote = pygpu.gpuarray.from_gpudata(gpu_data_remote_b, 0, dtype, shape, ctx)
gpu_data = pygpu.empty(shape, dtype, context=ctx)
# img_mean = icomm.recv(source=MPI.ANY_SOURCE, tag=66)
# if verbose: print '[load] 2. img_mean received'
import os
print('loading %s started' % os.getpid())
count=0
mode=None
import time
while True:
# 3. load the very first filename in 'train' or 'val' mode
message = icomm.recv(source=0, tag=40)
if message == 'stop':
示例14: init_dev
def init_dev(dev, name=None, preallocate=None):
global pygpu_activated
if not config.cxx:
raise RuntimeError("The new gpu-backend need a c++ compiler.")
pygpu_version = pygpu_parse_version(pygpu.__version__)
if (pygpu_version.major != 0 or pygpu_version.minor != 7 or
pygpu_version.patch < 0):
raise ValueError(
"Your installed version of pygpu(%s) is too old, please upgrade to 0.7.0 or later" %
pygpu_version.fullversion)
# This is for the C headers API, we need to match the exact version.
gpuarray_version_major_supported = 2
gpuarray_version_major_detected = pygpu.gpuarray.api_version()[0]
if gpuarray_version_major_detected != gpuarray_version_major_supported:
raise ValueError(
"Your installed version of libgpuarray is not in sync with the current Theano"
" version. The installed libgpuarray version supports API version %d,"
" while current Theano supports API version %d. Change the version of"
" libgpuarray or Theano to fix this problem.",
gpuarray_version_major_detected,
gpuarray_version_major_supported)
if dev not in init_dev.devmap:
args = dict()
if config.gpuarray.cache_path != '':
args['kernel_cache_path'] = config.gpuarray.cache_path
if preallocate is None:
preallocate = config.gpuarray.preallocate
if preallocate < 0:
args['max_cache_size'] = 0
else:
args['initial_cache_size'] = preallocate
context = pygpu.init(
dev,
sched=config.gpuarray.sched,
single_stream=config.gpuarray.single_stream,
**args)
context.dev = dev
init_dev.devmap[dev] = context
reg_context(name, context)
MB = (1024 * 1024)
if dev.startswith('cuda'):
avail = dnn.dnn_available(name)
# If we try to enable cudnn and there isn't enough GPU
# memory, there will be an unclear error message. So do
# not even try a clear error.
if avail and context.free_gmem < 75 * MB:
raise RuntimeError(
"Can not enable cuDNN as there is only %d MB of free GPU memory." %
(context.free_gmem/MB))
elif avail:
context.cudnn_handle = dnn._make_handle(context)
elif config.dnn.enabled == 'True':
raise RuntimeError(
"You enabled cuDNN, but we aren't able to use it: %s" %
dnn.dnn_available.msg)
if config.print_active_device:
if avail:
print("Using cuDNN version %d on context %s" % (dnn.version(), name),
file=sys.stderr)
else:
print("Can not use cuDNN on context %s: %s" % (name, dnn.dnn_available.msg),
file=sys.stderr)
if preallocate < 0:
print("Disabling allocation cache on %s" % (dev,))
elif preallocate > 0:
if preallocate <= 1:
gmem = min(preallocate, 0.95) * context.total_gmem
else:
gmem = preallocate * MB
if gmem > context.free_gmem:
raise RuntimeError(
"Trying to preallocate %d MB of GPU memory while only"
" %d MB are available." % (gmem / MB,
context.free_gmem / MB))
elif gmem > context.free_gmem - 50 * MB:
print(
"WARNING: Preallocating too much memory can prevent cudnn and cublas from working properly")
# This will allocate and immediatly free an object of size gmem
# which will reserve that amount of memory on the GPU.
pygpu.empty((gmem,), dtype='int8', context=context)
if config.print_active_device:
print("Preallocating %d/%d Mb (%f) on %s" %
(gmem // MB, context.total_gmem // MB,
gmem / context.total_gmem, dev),
file=sys.stderr)
# Initialise the blas kernels. We do this after the
# preallocation to not fragment the heap accidentally.
tmp = pygpu.empty((2, 2), dtype='float32', context=context)
if dev.startswith('cuda'):
# In OpenCL, BLAS isn't always available
pygpu.blas.gemm(0, tmp, tmp, 0, tmp, overwrite_c=True)
del tmp
else:
context = init_dev.devmap[dev]
# This will map the context name to the real context object.
if config.print_active_device:
try:
#.........这里部分代码省略.........
示例15: __init__
def __init__(self, size, dtype):
self.clary = pygpu.empty((size,), dtype=dtype, cls=elemary)
super(Base, self).__init__(size, dtype)