本文整理汇总了Python中bitstring.BitStream.overwrite方法的典型用法代码示例。如果您正苦于以下问题:Python BitStream.overwrite方法的具体用法?Python BitStream.overwrite怎么用?Python BitStream.overwrite使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类bitstring.BitStream
的用法示例。
在下文中一共展示了BitStream.overwrite方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: pack_pak
# 需要导入模块: from bitstring import BitStream [as 别名]
# 或者: from bitstring.BitStream import overwrite [as 别名]
def pack_pak(dir, file_list = None, align_toc = 16, align_files = 16, eof = False):
if file_list == None:
file_list = sorted(os.listdir(dir))
num_files = len(file_list)
toc_length = (num_files + 1) * 4
if eof:
toc_length += 1
if toc_length % align_toc > 0:
toc_length += align_toc - (toc_length % align_toc)
archive_data = BitStream(uintle = 0, length = toc_length * 8)
archive_data.overwrite(bitstring.pack("uintle:32", num_files), 0)
for file_num, item in enumerate(file_list):
full_path = os.path.join(dir, item)
if os.path.isfile(full_path):
data = pack_file(full_path)
else:
data = pack_dir(full_path, align_toc, align_files, eof)
file_size = data.len / 8
padding = 0
if file_size % align_files > 0:
padding = align_files - (file_size % align_files)
data.append(BitStream(uintle = 0, length = padding * 8))
file_pos = archive_data.len / 8
archive_data.overwrite(bitstring.pack("uintle:32", file_pos), (file_num + 1) * 32)
archive_data.append(data)
del data
if eof:
archive_data.overwrite(bitstring.pack("uintle:32", archive_data.len / 8), (num_files + 1) * 32)
return archive_data
示例2: create_archives
# 需要导入模块: from bitstring import BitStream [as 别名]
# 或者: from bitstring.BitStream import overwrite [as 别名]
#.........这里部分代码省略.........
csv_template_f = open(archive["csv"], "rb")
csv_template = csv.reader(csv_template_f)
csv_out_path = os.path.join(temp_dir, "cpk.csv")
csv_out_f = open(csv_out_path, "wb")
csv_out = csv.writer(csv_out_f)
for row in csv_template:
if len(row) < 4:
continue
base_path = row[0]
real_path = os.path.join(archive["dir"], base_path)
out_path = os.path.join(temp_dir, archive["name"], base_path)
self.progress.setValue(self.progress.value() + 1)
self.progress.setLabelText("Reading...\n%s" % real_path)
# All items in the CPK list should be files.
# Therefore, if we have a directory, then it needs to be packed.
if os.path.isdir(real_path):
if self.__cache_outdated(real_path, out_path):
out_dir = os.path.dirname(out_path)
try:
os.makedirs(out_dir)
except:
pass
data = pack_dir(real_path)
with open(out_path, "wb") as out_file:
data.tofile(out_file)
del data
elif os.path.isfile(real_path):
# If it's a file, though, we can just use it directly.
out_path = real_path
row[0] = out_path
csv_out.writerow(row)
csv_template_f.close()
csv_out_f.close()
self.__pack_cpk(csv_out_path, archive["cpk"])
# We're playing fast and loose with the file count anyway, so why not?
self.file_count += 1
self.progress.setValue(self.file_count)
self.progress.setLabelText("Saving " + archive["name"] + "...")
if archive["toc"]:
for entry in table_of_contents:
if not entry in toc_info:
_LOGGER.warning("%s missing from %s table of contents." % (entry, archive["name"]))
continue
file_pos = table_of_contents[entry]["pos"]
file_size = table_of_contents[entry]["size"]
eboot.overwrite(BitStream(uintle = file_pos, length = 32), toc_info[entry][0] * 8)
eboot.overwrite(BitStream(uintle = file_size, length = 32), toc_info[entry][1] * 8)
del table_of_contents
self.progress.setWindowTitle("Building...")
self.progress.setLabelText("Saving EBOOT.BIN...")
self.progress.setValue(self.progress.maximum())
with open(eboot_path, "wb") as f:
eboot.tofile(f)
# Text replacement
to_replace = eboot_text.get_eboot_text()
for replacement in to_replace:
orig = bytearray(replacement.orig, encoding = replacement.enc)
# If they left something blank, write the original text back.
if len(replacement.text) == 0:
data = orig
else:
data = bytearray(replacement.text, encoding = replacement.enc)
pos = replacement.pos.int + eboot_offset
padding = len(orig) - len(data)
if padding > 0:
# Null bytes to fill the rest of the space the original took.
data.extend(bytearray(padding))
data = ConstBitStream(bytes = data)
eboot.overwrite(data, pos * 8)
eboot_out = os.path.join(common.editor_config.iso_dir, "PSP_GAME", "SYSDIR", "EBOOT.BIN")
with open(eboot_out, "wb") as f:
eboot.tofile(f)
self.progress.close()
示例3: GmoFile
# 需要导入模块: from bitstring import BitStream [as 别名]
# 或者: from bitstring.BitStream import overwrite [as 别名]
#.........这里部分代码省略.........
def save(self, filename):
with open(filename, "wb") as f:
self.data.tofile(f)
def __find_gims(self):
if self.data == None:
return
self.__gim_files = []
for gim_start in self.data.findall(GIM_MAGIC, bytealigned = True):
gim_size_pos = gim_start + (GIM_SIZE_OFFSET * 8) # Bit pos.
gim_size = self.data[gim_size_pos : gim_size_pos + 32].uintle + GIM_SIZE_DIFF
# And turn it into a byte position.
gim_start /= 8
self.__gim_files.append((gim_start, gim_size))
def gim_count(self):
return len(self.__gim_files)
def get_gim(self, gim_id):
if gim_id >= self.gim_count():
raise GimIndexError("Invalid GIM ID.")
gim_start, gim_size = self.__gim_files[gim_id]
gim_data = self.data[gim_start * 8 : (gim_start + gim_size) * 8]
return gim_data
def replace_png_file(self, gim_id, filename, quantize_to_fit = True):
if quantize_to_fit:
quantize_order = [QuantizeType.auto, QuantizeType.index8, QuantizeType.index4]
else:
quantize_order = [QuantizeType.auto]
quantize_id = 0
(fd, temp_gim) = tempfile.mkstemp(suffix = ".gim", prefix = "sdse-")
os.close(fd) # Don't need the open file handle.
while True:
self.gimconv.png_to_gim(filename, temp_gim, quantize_order[quantize_id])
try:
self.replace_gim_file(gim_id, temp_gim)
except GimSizeError:
quantize_id += 1
except GimIndexError:
os.remove(temp_gim)
raise
else:
# If we didn't except, that means we succeeded, so we can leave.
_LOGGER.debug("Quantized PNG to %s" % quantize_order[quantize_id])
break
if quantize_id > len(quantize_order):
_LOGGER.error("Unable to convert %s into a GIM small enough to insert." % filename)
break
os.remove(temp_gim)
def replace_gim_file(self, gim_id, filename):
gim_data = BitStream(filename = filename)
self.replace_gim(gim_id, gim_data)
def replace_gim(self, gim_id, gim_data):
if gim_id >= self.gim_count():
raise GimIndexError("Invalid GIM ID.")
gim_start, gim_size = self.__gim_files[gim_id]
if gim_data.len / 8 > gim_size:
raise GimSizeError("GIM too large. %d bytes > %d bytes" % (gim_data.len / 8, gim_size))
# return
self.data.overwrite(gim_data, gim_start * 8)
# Leave the length alone, though, because we know we have that much space
# to work with from the original GIM file that was there, and there's no
# point in shrinking that down if someone happens to want to re-replace
# this GIM file without reloading the whole thing.
def extract(self, directory, to_png = False):
if not os.path.isdir(directory):
os.makedirs(directory)
for id in range(self.gim_count()):
gim = self.get_gim(id)
out_gim = os.path.join(directory, "%04d.gim" % id)
out_png = os.path.join(directory, "%04d.png" % id)
with open(out_gim, "wb") as f:
gim.tofile(f)
if to_png:
self.gimconv.gim_to_png(out_gim, out_png)
os.remove(out_gim)
示例4: ModelPak
# 需要导入模块: from bitstring import BitStream [as 别名]
# 或者: from bitstring.BitStream import overwrite [as 别名]
class ModelPak():
def __init__(self, filename = None):
self.__data = None
self.__gmo_files = []
if filename:
self.load_file(filename)
def load_file(self, filename):
data = BitStream(filename = filename)
self.load_data(data)
def load_data(self, data):
files = [entry_data for (entry_name, entry_data) in get_pak_files(data)]
# There are always at least four files in a model pak.
# The first three I don't know a lot about, and then
# the GMO files come after that.
if len(files) < 4:
_LOGGER.error("Invalid model PAK. %d files found, but at least 4 needed." % len(files))
return
# The name pak contains a list of null-terminated names for
# each of the models, stored in our standard pak format.
name_pak = files[0]
names = [entry_data.bytes.strip('\0') for (entry_name, entry_data) in get_pak_files(name_pak)]
# Most of the model paks in SDR2 have a fourth unknown file before the models
# start, so we'll just take everything from the back end and call it a day.
models = files[-len(names):]
# Now, we don't get file positions from the unpacker, so let's find those
# and start filling out our internal list of GMO files.
file_starts, file_ends = parse_pak_toc(data)
model_starts = file_starts[-len(names):]
for i, model in enumerate(models):
# First of all, not all of the "models" present are actually GMO files.
# It's rare, but there is the occasional other unknown format.
# So let's make sure we have a GMO file.
if not model[:GMO_MAGIC.len] == GMO_MAGIC:
# print i, "Not a GMO."
continue
name = names[i]
gmo = GmoFile(data = model)
size = model.len / 8
start = model_starts[i]
self.__gmo_files.append({
_NAME: name,
_START: start,
_SIZE: size,
_DATA: gmo,
})
self.__data = BitStream(data)
def save(self, filename):
self.__update_data()
with open(filename, "wb") as f:
self.__data.tofile(f)
def __update_data(self):
for gmo in self.__gmo_files:
start = gmo[_START] * 8
data = gmo[_DATA].data
self.__data.overwrite(data, start)
def get_data(self):
self.__update_data()
return self.__data
def gmo_count(self):
return len(self.__gmo_files)
def get_gmo(self, index):
if index >= self.gmo_count() or index == None:
_LOGGER.error("Invalid GMO ID %d." % index)
return None
return self.__gmo_files[index][_DATA]
def get_gmos(self):
return [gmo[_DATA] for gmo in self.__gmo_files]
def get_name(self, index):
if index >= self.gmo_count():
_LOGGER.error("Invalid GMO ID %d." % index)
return None
return self.__gmo_files[index][_NAME]
def get_names(self):
return [gmo[_NAME] for gmo in self.__gmo_files]
def id_from_name(self, name):
for i in range(self.gmo_count()):
#.........这里部分代码省略.........