本文整理匯總了Python中numpy.memmap方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.memmap方法的具體用法?Python numpy.memmap怎麽用?Python numpy.memmap使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類numpy
的用法示例。
在下文中一共展示了numpy.memmap方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: walk
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import memmap [as 別名]
def walk(self, size):
if self.eof: return None
end_point = self.offset + 4 * size
assert end_point <= self.size, \
'Over-read {}'.format(self.path)
float32_1D_array = np.memmap(
self.path, shape = (), mode = 'r',
offset = self.offset,
dtype='({})float32,'.format(size)
)
self.offset = end_point
if end_point == self.size:
self.eof = True
return float32_1D_array
示例2: flush
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import memmap [as 別名]
def flush(self):
"""
Write any changes in the array to the file on disk.
For further information, see `memmap`.
Parameters
----------
None
See Also
--------
memmap
"""
if self.base is not None and hasattr(self.base, 'flush'):
self.base.flush()
示例3: __init__
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import memmap [as 別名]
def __init__(self, sglx_file):
self.file_bin = Path(sglx_file)
self.nbytes = self.file_bin.stat().st_size
file_meta_data = Path(sglx_file).with_suffix('.meta')
if not file_meta_data.exists():
self.file_meta_data = None
self.meta = None
self.channel_conversion_sample2v = 1
_logger.warning(str(sglx_file) + " : no metadata file found. Very limited support")
return
# normal case we continue reading and interpreting the metadata file
self.file_meta_data = file_meta_data
self.meta = read_meta_data(file_meta_data)
self.channel_conversion_sample2v = _conversion_sample2v_from_meta(self.meta)
# if we are not looking at a compressed file, use a memmap, otherwise instantiate mtscomp
if self.is_mtscomp:
self.data = mtscomp.Reader()
self.data.open(self.file_bin, self.file_bin.with_suffix('.ch'))
else:
if self.nc * self.ns * 2 != self.nbytes:
_logger.warning(str(sglx_file) + " : meta data and filesize do not checkout")
self.data = np.memmap(sglx_file, dtype='int16', mode='r', shape=(self.ns, self.nc))
示例4: _readData1
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import memmap [as 別名]
def _readData1(self, fd, meta, mmap=False, **kwds):
## Read array data from the file descriptor for MetaArray v1 files
## read in axis values for any axis that specifies a length
frameSize = 1
for ax in meta['info']:
if 'values_len' in ax:
ax['values'] = np.fromstring(fd.read(ax['values_len']), dtype=ax['values_type'])
frameSize *= ax['values_len']
del ax['values_len']
del ax['values_type']
self._info = meta['info']
if not kwds.get("readAllData", True):
return
## the remaining data is the actual array
if mmap:
subarr = np.memmap(fd, dtype=meta['type'], mode='r', shape=meta['shape'])
else:
subarr = np.fromstring(fd.read(), dtype=meta['type'])
subarr.shape = meta['shape']
self._data = subarr
示例5: __init__
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import memmap [as 別名]
def __init__(self, path):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = struct.unpack('<Q', stream.read(8))
assert (1,) == version
dtype_code, = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len,
offset=offset + self._sizes.nbytes)
示例6: perform
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import memmap [as 別名]
def perform(self, node, inp, out):
input, = inp
storage, = out
# drop
res = input
if type(res) != numpy.ndarray and type(res) != numpy.memmap:
raise TypeError(res)
# transpose
res = res.transpose(self.shuffle + self.drop)
# augment
shape = list(res.shape[:len(self.shuffle)])
for augm in self.augment:
shape.insert(augm, 1)
res = res.reshape(shape)
# copy (if not inplace)
if not self.inplace:
res = numpy.copy(res)
storage[0] = numpy.asarray(res) # asarray puts scalars back into array
示例7: test_allocate_arrays
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import memmap [as 別名]
def test_allocate_arrays(self):
shape = (30, 1000)
dtype = 'int16'
arr_in_memory = self.RX.allocate_array(shape=shape, dtype=dtype, memmap=False)
arr_memmap = self.RX.allocate_array(shape=shape, dtype=dtype, memmap=True)
assert isinstance(arr_in_memory, np.ndarray)
assert isinstance(arr_memmap, np.memmap)
assert arr_in_memory.shape == shape
assert arr_memmap.shape == shape
assert arr_in_memory.dtype == dtype
assert arr_memmap.dtype == dtype
arr_in_memory = self.SX.allocate_array(shape=shape, dtype=dtype, memmap=False)
arr_memmap = self.SX.allocate_array(shape=shape, dtype=dtype, memmap=True)
assert isinstance(arr_in_memory, np.ndarray)
assert isinstance(arr_memmap, np.memmap)
assert arr_in_memory.shape == shape
assert arr_memmap.shape == shape
assert arr_in_memory.dtype == dtype
assert arr_memmap.dtype == dtype
示例8: makeMemMapRaw
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import memmap [as 別名]
def makeMemMapRaw(binFullPath, meta):
nChan = int(meta['nSavedChans'])
nFileSamp = int(int(meta['fileSizeBytes'])/(2*nChan))
print("nChan: %d, nFileSamp: %d" % (nChan, nFileSamp))
rawData = np.memmap(binFullPath, dtype='int16', mode='r',
shape=(nChan, nFileSamp), offset=0, order='F')
return(rawData)
# Return an array [lines X timepoints] of uint8 values for a
# specified set of digital lines.
#
# - dwReq is the zero-based index into the saved file of the
# 16-bit word that contains the digital lines of interest.
# - dLineList is a zero-based list of one or more lines/bits
# to scan from word dwReq.
#
示例9: load
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import memmap [as 別名]
def load(self, path, num_examples_limit: Optional[int] = None):
with PathManager.open(path, "rb") as f:
npz = np.load(f)
# For big input data, we don't want the cpu to OOM.
# Therefore, we are loading the huge buffer array into disc
# and reading it from disc instead of memory.
if npz["buffer"].nbytes > ARRAY_SIZE_LIMIT_FOR_MEMORY:
self.buffer = np.memmap(
tempfile.NamedTemporaryFile().name,
dtype="float32",
mode="w+",
shape=npz["buffer"].shape,
)
self.buffer[:] = npz["buffer"][:]
else:
self.buffer = npz["buffer"]
self.offsets = npz["offsets"]
if num_examples_limit is not None and len(self.offsets) > num_examples_limit:
self.offsets = self.offsets[: num_examples_limit + 1]
self.buffer = self.buffer[: self.offsets[-1]]
self.sizes = self.offsets[1:] - self.offsets[:-1]
示例10: __init__
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import memmap [as 別名]
def __init__(self, hash_name='md5', coerce_mmap=False):
"""
Parameters
----------
hash_name: string
The hash algorithm to be used
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
objects.
"""
self.coerce_mmap = coerce_mmap
Hasher.__init__(self, hash_name=hash_name)
# delayed import of numpy, to avoid tight coupling
import numpy as np
self.np = np
if hasattr(np, 'getbuffer'):
self._getbuffer = np.getbuffer
else:
self._getbuffer = memoryview
示例11: hash
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import memmap [as 別名]
def hash(obj, hash_name='md5', coerce_mmap=False):
""" Quick calculation of a hash to identify uniquely Python objects
containing numpy arrays.
Parameters
-----------
hash_name: 'md5' or 'sha1'
Hashing algorithm used. sha1 is supposedly safer, but md5 is
faster.
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
"""
if 'numpy' in sys.modules:
hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)
else:
hasher = Hasher(hash_name=hash_name)
return hasher.hash(obj)
示例12: _gen_prediction_array
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import memmap [as 別名]
def _gen_prediction_array(self, task, job, threading):
"""Generate prediction array either in-memory or persist to disk."""
shape = task.shape(job)
if threading:
self.job.predict_out = np.zeros(shape, dtype=_dtype(task))
else:
f = os.path.join(self.job.dir, '%s_out_array.mmap' % task.name)
try:
self.job.predict_out = np.memmap(
filename=f, dtype=_dtype(task), mode='w+', shape=shape)
except Exception as exc:
raise OSError(
"Cannot create prediction matrix of shape ("
"%i, %i), size %i MBs, for %s.\n Details:\n%r" %
(shape[0], shape[1], 8 * shape[0] * shape[1] / (1024 ** 2),
task.name, exc))
示例13: __init__
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import memmap [as 別名]
def __init__(self, path):
self.eof = False # end of file
self.path = path # current pos
if path is None:
self.eof = True
return
else:
self.size = os.path.getsize(path)# save the path
major, minor, revision, seen = np.memmap(path,
shape = (), mode = 'r', offset = 0,
dtype = '({})i4,'.format(4))
self.transpose = major > 1000 or minor > 1000
self.offset = 16
示例14: raw_data_from_fileobj
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import memmap [as 別名]
def raw_data_from_fileobj(self, fileobj):
"""Returns memmap array of raw unscaled image data.
Array axes correspond to x,y,z,t.
"""
# memmap the data -- it is guaranteed to be uncompressed and all
# properties are known
# read in Fortran order to have spatial axes first
data = np.memmap(fileobj,
dtype=self.get_data_dtype(),
mode='c', # copy-on-write
shape=self.get_data_shape_in_file(),
order='F')
return data
示例15: memmap
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import memmap [as 別名]
def memmap(self, dtype=np.uint8, mode=None, offset=None, shape=None,
order='C'):
"""Map part of the file in memory.
Note that the map cannnot span multiple underlying files.
Parameters are as for `~numpy.memmap`.
"""
if self.closed:
raise ValueError('memmap of closed file.')
dtype = np.dtype(dtype)
if mode is None:
mode = self.mode.replace('b', '')
if offset is not None and offset != self.tell():
# seek will fail for SequentialFileWriter, so we try to avoid it.
self.seek(offset)
elif self.fh.tell() == self._file_sizes[self.file_nr]:
self._open(self.file_nr + 1)
if shape is None:
count = self.size - self.tell()
if count % dtype.itemsize:
raise ValueError("size of available data is not a "
"multiple of the data-type size.")
shape = (count // dtype.itemsize,)
else:
if not isinstance(shape, tuple):
shape = (shape,)
count = dtype.itemsize
for k in shape:
count *= k
if self.fh.tell() + count > self._file_sizes[self.file_nr]:
raise ValueError('mmap length exceeds individual file size')
file_offset = self.fh.tell()
mm = np.memmap(self.fh, dtype, mode, file_offset, shape, order)
self.fh.seek(file_offset + count)
return mm