本文整理汇总了Python中msgpack.dump方法的典型用法代码示例。如果您正苦于以下问题:Python msgpack.dump方法的具体用法?Python msgpack.dump怎么用?Python msgpack.dump使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类msgpack
的用法示例。
在下文中一共展示了msgpack.dump方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: save_to_file
# 需要导入模块: import msgpack [as 别名]
# 或者: from msgpack import dump [as 别名]
def save_to_file(self, filename):
"""Save only the bare minimum needed to reconstruct this CoverageDB.
This serializes the data to a single file and cab reduce the disk footprint of
block coverage significantly (depending on overlap and number of files)."""
if file_backing_disabled:
raise Exception("[!] Can't save/load coverage db files without msgpack. Try `pip install msgpack`")
save_dict = dict()
save_dict["version"] = 1 # serialized covdb version
save_dict["module_name"] = self.module_name
save_dict["module_base"] = self.module_base
save_dict["coverage_files"] = self.coverage_files
# save tighter version of block dict {int: int} vice {int: str}
block_dict_to_save = {}
file_index_map = {filepath: self.coverage_files.index(filepath) for filepath in self.coverage_files}
for block, trace_list in self.block_dict.items():
trace_id_list = [file_index_map[name] for name in trace_list]
block_dict_to_save[block] = trace_id_list
save_dict["block_dict"] = block_dict_to_save
# write packed version to file
with open(filename, "wb") as f:
msgpack.dump(save_dict, f)
self.filename = filename
示例2: load_embeddings
# 需要导入模块: import msgpack [as 别名]
# 或者: from msgpack import dump [as 别名]
def load_embeddings(self):
"""generate embeddings suited for the current vocab or load previously cached ones."""
assert self.args.pretrained_embeddings
embedding_file = os.path.join(self.args.output_dir, 'embedding.msgpack')
if not os.path.exists(embedding_file):
embeddings = load_embeddings(self.args.pretrained_embeddings, self.vocab,
self.args.embedding_dim, mode=self.args.embedding_mode,
lower=self.args.lower_case)
with open(embedding_file, 'wb') as f:
msgpack.dump(embeddings, f)
else:
with open(embedding_file, 'rb') as f:
embeddings = msgpack.load(f)
return embeddings
示例3: dump
# 需要导入模块: import msgpack [as 别名]
# 或者: from msgpack import dump [as 别名]
def dump(filepath, data, options=None):
"""Dump the output to disk (JSON, msgpack, etc)
:param filepath: output file path
:param data: serializable data to write to disk
:param options: (Default value = None)
:type options: dict
"""
options = options or {}
logger.debug("io.dump(%s, data, options=%s)", filepath, options)
compress = options.get(constants.COMPRESSION, constants.NONE)
if compress == constants.MSGPACK:
try:
import msgpack
except ImportError:
logger.error("msgpack module not found")
raise
logger.info("Dumping to msgpack")
func = lambda x, y: msgpack.dump(x, y)
mode = 'wb'
else:
round_off = options.get(constants.ENABLE_PRECISION)
if round_off:
_json.ROUND = options[constants.PRECISION]
else:
_json.ROUND = None
indent = options.get(constants.INDENT, True)
indent = 4 if indent else None
logger.info("Dumping to JSON")
func = lambda x, y: _json.json.dump(x, y, indent=indent)
mode = 'w'
logger.info("Writing to %s", filepath)
with open(filepath, mode=mode) as stream:
func(data, stream)
示例4: save_yaml
# 需要导入模块: import msgpack [as 别名]
# 或者: from msgpack import dump [as 别名]
def save_yaml(cls, filename, data):
"""Save data into YAML file
Parameters
----------
filename : str
Filename path
data :
Data to be stored
Returns
-------
None
"""
try:
import yaml
except ImportError:
message = '{name}: Unable to import yaml module. You can install it with `pip install pyyaml`.'.format(
name=cls.__class__.__name__
)
cls.logger().exception(message)
raise ImportError(message)
with open(filename, 'w') as outfile:
outfile.write(yaml.dump(data, default_flow_style=False))
示例5: save_cpickle
# 需要导入模块: import msgpack [as 别名]
# 或者: from msgpack import dump [as 别名]
def save_cpickle(cls, filename, data):
"""Save data into CPICKLE file
Parameters
----------
filename : str
Filename path
data :
Data to be stored
Returns
-------
None
"""
try:
import cPickle as pickle
except ImportError:
try:
import pickle
except ImportError:
message = '{name}: Unable to import pickle module.'.format(
name=cls.__class__.__name__
)
cls.logger().exception(message)
raise ImportError(message)
pickle.dump(data, open(filename, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
示例6: save_json
# 需要导入模块: import msgpack [as 别名]
# 或者: from msgpack import dump [as 别名]
def save_json(cls, filename, data):
"""Save data into JSON file
Parameters
----------
filename : str
Filename path
data :
Data to be stored
Returns
-------
None
"""
try:
import ujson as json
except ImportError:
try:
import json
except ImportError:
message = '{name}: Unable to import json module. You can install it with `pip install ujson`.'.format(
name=cls.__class__.__name__
)
cls.logger().exception(message)
raise ImportError(message)
json.dump(data, open(filename, 'w'))
示例7: save_msgpack
# 需要导入模块: import msgpack [as 别名]
# 或者: from msgpack import dump [as 别名]
def save_msgpack(cls, filename, data):
"""Save data into MSGPACK file
Parameters
----------
filename : str
Filename path
data :
Data to be stored
Returns
-------
None
"""
try:
import msgpack
except ImportError:
message = '{name}: Unable to import msgpack module. You can install it with `pip install msgpack-python`.'.format(
name=cls.__class__.__name__
)
cls.logger().exception(message)
raise ImportError(message)
msgpack.dump(data, open(filename, 'wb'), use_bin_type=True)
示例8: save_marshal
# 需要导入模块: import msgpack [as 别名]
# 或者: from msgpack import dump [as 别名]
def save_marshal(cls, filename, data):
"""Save data into MARSHAL file
Parameters
----------
filename : str
Filename path
data :
Data to be stored
Returns
-------
None
"""
try:
import marshal
except ImportError:
message = '{name}: Unable to import marshal module. You can install it with `pip install pymarshal`.'.format(
name=cls.__class__.__name__
)
cls.logger().exception(message)
raise ImportError(message)
marshal.dump(data, open(filename, 'wb'))
示例9: to_msgpack
# 需要导入模块: import msgpack [as 别名]
# 或者: from msgpack import dump [as 别名]
def to_msgpack(self, *args, **kwargs):
return self.__dict__ #msgpack.dump(self.to_dict(*args, **kwargs))
示例10: load_embeddings
# 需要导入模块: import msgpack [as 别名]
# 或者: from msgpack import dump [as 别名]
def load_embeddings(self):
"""generate embeddings suited for the current vocab or load previously cached ones."""
embedding_file = os.path.join(self.args.output_dir, 'embedding.msgpack')
if not os.path.exists(embedding_file):
embeddings = load_embeddings(self.args.pretrained_embeddings, self.vocab,
self.args.embedding_dim, mode=self.args.embedding_mode,
lower=self.args.lower_case)
with open(embedding_file, 'wb') as f:
msgpack.dump(embeddings, f)
else:
with open(embedding_file, 'rb') as f:
embeddings = msgpack.load(f)
return embeddings
示例11: _retrieve_data
# 需要导入模块: import msgpack [as 别名]
# 或者: from msgpack import dump [as 别名]
def _retrieve_data(shelf_name, shelve_stacks, tid, chrom, key, score, logger,
merged_transcripts, chains, monoexonic_tree):
shelf = shelve_stacks[shelf_name]
strand, dumped = next(shelf["cursor"].execute("select strand, features from dump where tid = ?", (tid,)))
dumped = json.loads(dumped)
try:
features = dumped["features"]
exon_set = tuple(sorted([(exon[0], exon[1], strand) for exon in features["exon"]],
key=operator.itemgetter(0, 1)))
if len(exon_set) > 1:
introns = tuple([(_[0] + 1, _[1] - 1) for _ in zip([_[1] for _ in exon_set][:-1],
[_[0] for _ in exon_set][1:])])
else:
introns = tuple([tuple([exon_set[0][0], exon_set[0][1]])])
cds_set = tuple(sorted([(exon[0], exon[1]) for exon in features.get("CDS", [])],
key=operator.itemgetter(0, 1)))
monoexonic = not (len(exon_set) > 1)
data = dict()
data["introns"], data["strand"], data["score"] = introns, strand, score
data["monoexonic"] = monoexonic
data["is_reference"], data["keep_redundant"] = dumped["is_reference"], dumped["keep_redundant"]
data["start"], data["end"], data["cds_set"] = key[0], key[1], cds_set
data["key"] = (tuple([tid, shelf_name]), chrom, (data["start"], data["end"]))
if data["monoexonic"] is True:
# Additional check at the end because the intervaltree class does not support item removal yet.
caught = dict((i.value, merged_transcripts[i.value])
for i in monoexonic_tree.find(data["start"], data["end"])
if i.value in merged_transcripts)
else:
caught = dict((i, merged_transcripts[i]) for i in chains.get(data["introns"], []))
return data, caught
except (TypeError, IndexError, ValueError, KeyError) as exc:
logger.error("Error in analysing %s. Skipping. Error: %s", tid, exc)
return None, None
示例12: store_transcripts
# 需要导入模块: import msgpack [as 别名]
# 或者: from msgpack import dump [as 别名]
def store_transcripts(shelf_stacks, logger, seed=None):
"""
Function that analyses the exon lines from the original file
and organises the data into a proper dictionary.
:param shelf_stacks: dictionary containing the name and the handles of the shelf DBs
:type shelf_stacks: dict
:param logger: logger instance.
:type logger: logging.Logger
:param keep_redundant: boolean flag. If set to True, redundant transcripts will be kept in the output.
:type keep_redundant: bool
:return: transcripts: dictionary which will be the final output
:rtype: transcripts
"""
transcripts = collections.defaultdict(dict)
# Read from the temporary databases the indices (chrom, start, end, strand and transcript ID).
# Then store
for shelf_name in shelf_stacks:
shelf_score = shelf_stacks[shelf_name]["score"]
is_reference = shelf_stacks[shelf_name]["is_reference"]
redundants_to_keep = shelf_stacks[shelf_name]["keep_redundant"]
# redundants_to_keep = shelf_stacks[shelf_name]["keep_redundant"]
try:
for values in shelf_stacks[shelf_name]["cursor"].execute("SELECT chrom, start, end, strand, tid FROM dump"):
chrom, start, end, strand, tid = values
if (start, end) not in transcripts[chrom]:
transcripts[chrom][(start, end)] = list()
transcripts[chrom][(start, end)].append((tid, shelf_name, shelf_score, is_reference,
redundants_to_keep))
except sqlite3.OperationalError as exc:
raise sqlite3.OperationalError("dump not found in {}; excecption: {}".format(shelf_name, exc))
np.random.seed(seed)
for chrom in sorted(transcripts.keys()):
logger.debug("Starting with %s (%d positions)",
chrom,
len(transcripts[chrom]))
yield from _analyse_chrom(chrom, transcripts[chrom], shelve_stacks=shelf_stacks,
logger=logger)
示例13: test_normal
# 需要导入模块: import msgpack [as 别名]
# 或者: from msgpack import dump [as 别名]
def test_normal(self):
logger, listener, logging_queue = self.create_logger("test_normal")
with self.assertLogs(logger=logger, level="DEBUG") as cmo:
# FASTA out and GTF out are just the file names, without the temporary directory
# Moreover they will be complemented by the identifier!
batch_file = tempfile.NamedTemporaryFile(mode="wb", delete=False)
proc = ProcRunner(checking.CheckingProcess,
batch_file,
logging_queue,
shelve_stacks=[],
fasta=self.fasta,
identifier=0,
fasta_out=self.fasta_out,
gtf_out=self.gtf_out,
seed=None,
tmpdir=tempfile.gettempdir(),
log_level="DEBUG")
import msgpack
msgpack.dump([], batch_file)
proc.start()
time.sleep(0.1) # Necessary otherwise the check might be too fast for the FileSystem
self.assertEqual(proc.func.fasta_out, os.path.join(tempfile.gettempdir(), self.fasta_out + "-0"))
self.assertTrue(os.path.exists(proc.func.fasta_out), proc.func.fasta_out)
self.assertEqual(proc.func.gtf_out, os.path.join(tempfile.gettempdir(), self.gtf_out + "-0"))
self.assertTrue(os.path.exists(proc.func.gtf_out), proc.func.gtf_out)
self.submission_queue.put(("EXIT", None, None, None))
time.sleep(0.1)
proc.stop()
os.remove(proc.func.fasta_out)
os.remove(proc.func.gtf_out)
batch_file.close()
assert not proc.is_alive()
self.maxDiff = 10000
# self.assertEqual(cmo.output, [
# "DEBUG:Checker-0:Starting Checker-0",
# "DEBUG:Checker-0:Created output FASTA {} and GTF {}".format(proc.func.fasta_out, proc.func.gtf_out),
# "DEBUG:Checker-0:(('GT', 'AG'), ('GC', 'AG'), ('AT', 'AC'))",
# "DEBUG:Checker-0:Finished for Checker-0"])
self.assertIsInstance(proc.func, mp.Process)
with self.assertRaises(TypeError):
_ = pickle.dumps(proc.func)