本文整理汇总了Python中mmap.ALLOCATIONGRANULARITY属性的典型用法代码示例。如果您正苦于以下问题:Python mmap.ALLOCATIONGRANULARITY属性的具体用法?Python mmap.ALLOCATIONGRANULARITY怎么用?Python mmap.ALLOCATIONGRANULARITY使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类mmap
的用法示例。
在下文中一共展示了mmap.ALLOCATIONGRANULARITY属性的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: tofile
# 需要导入模块: import mmap [as 别名]
# 或者: from mmap import ALLOCATIONGRANULARITY [as 别名]
def tofile(self, dbfile):
"""Save the address set to a file
:param dbfile: an open file object where the set is saved (overwriting it)
:type dbfile: io.FileIO or file
"""
if dbfile.tell() % mmap.ALLOCATIONGRANULARITY != 0:
print("AddressSet: warning: if header position in file isn't a multiple of {}, it probably can't be loaded with fromfile()"
.format(mmap.ALLOCATIONGRANULARITY), file=sys.stderr)
if "b" not in dbfile.mode:
raise ValueError("must open file in binary mode")
# Windows Python 2 file objects can't handle writes >= 4GiB. Objects returned
# by io.open() work around this issue, see https://bugs.python.org/issue9611
if not isinstance(dbfile, io.BufferedIOBase) and self._table_bytes >= 1 << 32:
raise ValueError("must open file with io.open if size >= 4GiB")
dbfile.truncate(dbfile.tell() + self.HEADER_LEN + self._table_bytes)
dbfile.write(self._header())
dbfile.write(self._data)
示例2: test_mmap_offset_greater_than_allocation_granularity
# 需要导入模块: import mmap [as 别名]
# 或者: from mmap import ALLOCATIONGRANULARITY [as 别名]
def test_mmap_offset_greater_than_allocation_granularity(self):
size = 5 * mmap.ALLOCATIONGRANULARITY
offset = mmap.ALLOCATIONGRANULARITY + 1
fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset)
assert_(fp.offset == offset)
示例3: strings
# 需要导入模块: import mmap [as 别名]
# 或者: from mmap import ALLOCATIONGRANULARITY [as 别名]
def strings(file_name, sections=None, min_length=4):
"""
Finds all strings in a file; if it's an ELF file, you can specify where (in which section) to
look for the strings.
:param file_name: name of the file to be examined
:param sections: a list of strings which identify the ELF sections; should be used only with ELF files
:param min_length:
:return:
"""
pattern = '([\x20-\x7E]{' + str(min_length) + '}[\x20-\x7E]*)' # ASCII table from character space to tilde
pattern = pattern.encode()
regexp = re.compile(pattern)
if not sections:
with open(file_name, 'rb') as f, mmap(f.fileno(), 0, access=ACCESS_READ) as m:
for match in regexp.finditer(m):
yield str(match.group(0), 'utf-8')
else:
with open(file_name, 'rb') as f:
elffile = ELFFile(f)
for section in sections:
try:
sec = elffile.get_section_by_name(section)
except AttributeError:
# section not found
continue
# skip section if missing in elf file
if not sec:
continue
offset = sec['sh_offset']
size = sec['sh_size']
if offset is None or size is None:
continue
# round to allocation granularity for mmap
offset = max(offset - offset % ALLOCATIONGRANULARITY, 0)
with mmap(f.fileno(), size, access=ACCESS_READ, offset=offset) as m:
for match in regexp.finditer(m):
yield str(match.group(0), 'utf-8')
示例4: align_to_mmap
# 需要导入模块: import mmap [as 别名]
# 或者: from mmap import ALLOCATIONGRANULARITY [as 别名]
def align_to_mmap(num, round_up):
"""
Align the given integer number to the closest page offset, which usually is 4096 bytes.
:param round_up: if True, the next higher multiple of page size is used, otherwise
the lower page_size will be used (i.e. if True, 1 becomes 4096, otherwise it becomes 0)
:return: num rounded to closest page"""
res = (num // ALLOCATIONGRANULARITY) * ALLOCATIONGRANULARITY
if round_up and (res != num):
res += ALLOCATIONGRANULARITY
# END handle size
return res
示例5: mmap_move
# 需要导入模块: import mmap [as 别名]
# 或者: from mmap import ALLOCATIONGRANULARITY [as 别名]
def mmap_move(fileobj, dest, src, count):
"""Mmaps the file object if possible and moves 'count' data
from 'src' to 'dest'. All data has to be inside the file size
(enlarging the file through this function isn't possible)
Will adjust the file offset.
Args:
fileobj (fileobj)
dest (int): The destination offset
src (int): The source offset
count (int) The amount of data to move
Raises:
mmap.error: In case move failed
IOError: In case an operation on the fileobj fails
ValueError: In case invalid parameters were given
"""
assert mmap is not None, "no mmap support"
if dest < 0 or src < 0 or count < 0:
raise ValueError("Invalid parameters")
try:
fileno = fileobj.fileno()
except (AttributeError, IOError):
raise mmap.error(
"File object does not expose/support a file descriptor")
fileobj.seek(0, 2)
filesize = fileobj.tell()
length = max(dest, src) + count
if length > filesize:
raise ValueError("Not in file size boundary")
offset = ((min(dest, src) // mmap.ALLOCATIONGRANULARITY) *
mmap.ALLOCATIONGRANULARITY)
assert dest >= offset
assert src >= offset
assert offset % mmap.ALLOCATIONGRANULARITY == 0
# Windows doesn't handle empty mappings, add a fast path here instead
if count == 0:
return
# fast path
if src == dest:
return
fileobj.flush()
file_map = mmap.mmap(fileno, length - offset, offset=offset)
try:
file_map.move(dest - offset, src - offset, count)
finally:
file_map.close()
示例6: fromfile
# 需要导入模块: import mmap [as 别名]
# 或者: from mmap import ALLOCATIONGRANULARITY [as 别名]
def fromfile(cls, dbfile, mmap_access = mmap.ACCESS_READ, preload = True):
"""Load the address set from a file
:param dbfile: an open file object from which the set is loaded;
it will be closed by AddressSet when no longer needed
:type dbfile: io.FileIO or file
:param mmap_access: mmap.ACCESS_READ, .ACCESS_WRITE, or .ACCESS_COPY
:type mmap_access: int
:param preload: True to preload the entire address set, False to load on demand
:type preload: bool
"""
if "b" not in dbfile.mode:
raise ValueError("must open file in binary mode")
header_pos = dbfile.tell()
if header_pos % mmap.ALLOCATIONGRANULARITY != 0:
raise ValueError("header position in file must be a multiple of {}".format(mmap.ALLOCATIONGRANULARITY))
#
# Read in the header safely (ast.literal_eval() is safe for untrusted data)
header = dbfile.read(cls.HEADER_LEN)
if not header.startswith(cls.MAGIC):
raise ValueError("unrecognized file format (invalid magic)")
magic_len = len(cls.MAGIC)
config_end = header.find(b"\0", magic_len, cls.HEADER_LEN)
assert config_end > 0
config = ast.literal_eval(header[magic_len:config_end])
if config["version"] != cls.VERSION:
raise ValueError("can't load address database version {} (only supports {})"
.format(config["version"], cls.VERSION))
#
# Create an AddressSet object and replace its attributes
self = cls(1) # (size is irrelevant since it's getting replaced)
cls._remove_nonheader_attribs(self.__dict__)
for attr in self.__dict__.keys(): # only load expected attributes from untrusted data
self.__dict__[attr] = config[attr]
self._mmap_access = mmap_access
#
# The hash table is memory-mapped directly from the file instead of being loaded
self._data = mmap.mmap(dbfile.fileno(), self._table_bytes, access=mmap_access,
offset= header_pos + cls.HEADER_LEN)
if mmap_access == mmap.ACCESS_WRITE:
dbfile.seek(header_pos) # prepare for writing an updated header in close()
else:
dbfile.close()
self._dbfile = dbfile
#
# Most of the time it makes sense to load the file serially instead of letting
# the OS load each page as it's touched in random order, especially with HDDs;
# reading a byte from each page is sufficient (CPython doesn't optimize this away)
if preload:
for i in xrange(self._table_bytes // mmap.PAGESIZE):
self._data[i * mmap.PAGESIZE]
#
return self