本文整理汇总了Python中dill.dumps方法的典型用法代码示例。如果您正苦于以下问题:Python dill.dumps方法的具体用法?Python dill.dumps怎么用?Python dill.dumps使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类dill
的用法示例。
在下文中一共展示了dill.dumps方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _to_java
# 需要导入模块: import dill [as 别名]
# 或者: from dill import dumps [as 别名]
def _to_java(self):
"""
Convert this instance to a dill dump, then to a list of strings with the unicode integer values of each character.
Use this list as a set of dumby stopwords and store in a StopWordsRemover instance
:return: Java object equivalent to this instance.
"""
dmp = dill.dumps(self)
dmp = zlib.compress(dmp)
sc = SparkContext._active_spark_context
pylist = [str(i) + ',' for i in bytearray(dmp)]
# convert bytes to string integer list
pylist = [''.join(pylist)]
pylist.append(PysparkObjId._getPyObjId()) # add our id so PysparkPipelineWrapper can id us.
java_class = sc._gateway.jvm.java.lang.String
java_array = sc._gateway.new_array(java_class, len(pylist))
java_array[0:2] = pylist[0:2]
_java_obj = JavaParams._new_java_obj(PysparkObjId._getCarrierClass(javaName=True), self.uid)
_java_obj.setStopWords(java_array)
return _java_obj
示例2: serialize_torch_obj
# 需要导入模块: import dill [as 别名]
# 或者: from dill import dumps [as 别名]
def serialize_torch_obj(
model: nn.Module,
criterion: Any,
optimizer: Type[Optimizer],
**kwargs
) -> str:
model_encoded = torch_encoder(
TorchObj(
model=model,
criterion=criterion,
optimizer=optimizer,
optimizer_params=kwargs,
is_lazy=False,
model_parameters=None
)
)
return json.dumps({
'torch_obj': model_encoded,
'shapes': [list(ps.shape) for ps in model.parameters()]
})
示例3: update_table
# 需要导入模块: import dill [as 别名]
# 或者: from dill import dumps [as 别名]
def update_table(self, job_status_list=None):
"""
Update the pyiron table object, add new columns if a new function was added or add new rows for new jobs
Args:
job_status_list (list/None): List of job status which are added to the table by default ["finished"]
"""
if job_status_list is None:
job_status_list = ["finished"]
self.project.db.item_update({"timestart": datetime.now()}, self.job_id)
with self.project_hdf5.open("input") as hdf5_input:
self._pyiron_table.create_table(
enforce_update=self._enforce_update,
file=hdf5_input,
level=self._project_level,
job_status_list=job_status_list,
)
self.to_hdf()
self._pyiron_table._df.to_csv(
os.path.join(self.working_directory, "pyirontable.csv"), index=False
)
with self.project_hdf5.open("output") as hdf5_output:
hdf5_output["table"] = json.dumps(self.pyiron_table._df.to_dict())
self.project.db.item_update(self._runtime(), self.job_id)
示例4: _run_several_circuits
# 需要导入模块: import dill [as 别名]
# 或者: from dill import dumps [as 别名]
def _run_several_circuits(
self,
circuits: List[QuantumCircuit],
output_name: str = None,
callback: Callable = None
) -> List[QuantumCircuit]:
"""Run all the passes on the specified ``circuits``.
Args:
circuits: Circuits to transform via all the registered passes.
output_name: The output circuit name. If ``None``, it will be set to the same as the
input circuit name.
callback: A callback function that will be called after each pass execution.
Returns:
The transformed circuits.
"""
# TODO support for List(output_name) and List(callback)
del output_name
del callback
return parallel_map(PassManager._in_parallel, circuits,
task_kwargs={'pm_dill': dill.dumps(self)})
示例5: invoke
# 需要导入模块: import dill [as 别名]
# 或者: from dill import dumps [as 别名]
def invoke(self, event, context, inv_type=None, log_type='None', version=None):
"""Invoke the lambda function This is basically a low-level lambda interface.
In most cases, you won't need to use this by yourself.
:param event: lambda input
:param context: lambda execution client context
:param inv_type: invocation type
:param log_type: log type
:param version: version
"""
if not self._was_updated and self.create_options & UPDATE_LAZY == UPDATE_LAZY:
self._create_or_update()
params = dict(
FunctionName=self.name,
InvocationType=inv_type or self._inv_type,
LogType=log_type,
ClientContext=json.dumps(context),
Payload=json.dumps(event),
)
if version:
params['Qualifier'] = version
return self.client.invoke(**params)
示例6: _register_dill
# 需要导入模块: import dill [as 别名]
# 或者: from dill import dumps [as 别名]
def _register_dill(self):
def encode(obj, dumper=dill_dumps):
return dumper(obj, protocol=pickle_protocol)
def decode(s):
return pickle_loads(str_to_bytes(s), load=dill_load)
registry.register(
name='dill',
encoder=encode,
decoder=decode,
content_type='application/x-python-serialize',
content_encoding='binary'
)
# the same as upstream, but we need to copy it here so we can access it
示例7: add_function
# 需要导入模块: import dill [as 别名]
# 或者: from dill import dumps [as 别名]
def add_function(self, cond, name, func):
"""Add a new function that will be executed as a when intercepting
packets.
Parameters
----------
cond : :obj:`str`
Name of the condition set (preconditions, postconditions,
executions).
name : :obj:`str`
Name to identify the function in the `Template`.
func : :obj:`function`
Pointer to a function.
"""
fdump = dill.dumps(func)
self._functions[cond][name] = fdump.hex()
示例8: create
# 需要导入模块: import dill [as 别名]
# 或者: from dill import dumps [as 别名]
def create(cls, function, *args, **kwargs):
"""
Capture the given callable and arguments as an instance of this class.
:param callable function: The deferred action to take in the form of a function
:param tuple args: Non-keyword arguments to the function
:param dict kwargs: Keyword arguments to the function
"""
# The general principle is to deserialize as late as possible, i.e. when the function is
# to be invoked, as that will avoid redundantly deserializing deferred functions for
# concurrently running jobs when the cache state is loaded from disk. By implication we
# should serialize as early as possible. We need to serialize the function as well as its
# arguments.
return cls(*list(map(dill.dumps, (function, args, kwargs))),
name=function.__name__,
module=ModuleDescriptor.forModule(function.__module__).globalize())
示例9: safe_call
# 需要导入模块: import dill [as 别名]
# 或者: from dill import dumps [as 别名]
def safe_call(self, method, *args, **kwargs):
"""
A safe call to a method.
A safe call is simply sent to be executed by the main thread.
Parameters
----------
method : str
Method name to be executed by the main thread.
*args : arguments
Method arguments.
*kwargs : keyword arguments
Method keyword arguments.
"""
if not self._running:
raise RuntimeError(
'Agent must be running to safely execute methods!'
)
data = cloudpickle.dumps((method, args, kwargs))
return self._loopback_reqrep('inproc://_loopback_safe', data)
示例10: __init__
# 需要导入模块: import dill [as 别名]
# 或者: from dill import dumps [as 别名]
def __init__(
self,
name='',
nsaddr=None,
addr=None,
serializer=None,
transport=None,
base=Agent,
attributes=None,
):
super().__init__()
self.name = name
self._daemon = None
self._host, self.port = address_to_host_port(addr)
if self.port is None:
self.port = 0
self.nsaddr = nsaddr
self._serializer = serializer
self._transport = transport
self.base = cloudpickle.dumps(base)
self._shutdown_event = multiprocessing.Event()
self._queue = multiprocessing.Queue()
self._sigint = False
self.attributes = attributes
示例11: to_json
# 需要导入模块: import dill [as 别名]
# 或者: from dill import dumps [as 别名]
def to_json(self):
"""
This function converts Prediction Problems to JSON. It captures the
table_meta, but not the cutoff_strategy
Parameters
----------
None
Returns
-------
json: JSON representation of the Prediction Problem.
"""
table_meta_json = None
if self.table_meta:
table_meta_json = self.table_meta.to_json()
return json.dumps(
{"operations": [
json.loads(op_to_json(op)) for op in self.operations],
"entity_id_col": self.entity_id_col,
"label_col": self.label_col,
"table_meta": table_meta_json})
示例12: load_async
# 需要导入模块: import dill [as 别名]
# 或者: from dill import dumps [as 别名]
def load_async(pool: Pool, fn: Callable, *args, callback: Callable = None, **kwargs) -> Any:
"""
Load data asynchronously and serialize data via dill
Args:
pool: multiprocessing pool to use for :func:`apply_async`
fn: function to load a single sample
*args: positional arguments to dump with dill
callback: optional callback. defaults to None.
**kwargs: keyword arguments to dump with dill
Returns:
Any: reference to obtain data with :func:`get`
"""
if not DILL_AVAILABLE:
raise RuntimeError('dill is not installed. For async loading '
'please install it')
payload = dill.dumps((fn, args, kwargs))
return pool.apply_async(dill_helper, (payload,), callback=callback)
示例13: load_plugins
# 需要导入模块: import dill [as 别名]
# 或者: from dill import dumps [as 别名]
def load_plugins(self):
with indent(4):
logger.debug("PLUGINS: %s", self._settings['PLUGINS'])
for plugin in self._settings['PLUGINS']:
for class_name, cls in import_string(plugin):
if issubclass(cls, MachineBasePlugin) and cls is not MachineBasePlugin:
logger.debug("Found a Machine plugin: {}".format(plugin))
storage = PluginStorage(class_name)
instance = cls(SlackClient(), self._settings, storage)
missing_settings = self._register_plugin(class_name, instance)
if missing_settings:
show_invalid(class_name)
with indent(4):
error_msg = "The following settings are missing: {}".format(
", ".join(missing_settings)
)
puts(colored.red(error_msg))
puts(colored.red("This plugin will not be loaded!"))
del instance
else:
instance.init()
show_valid(class_name)
self._storage.set('manual', dill.dumps(self._help))
示例14: test_blue_io
# 需要导入模块: import dill [as 别名]
# 或者: from dill import dumps [as 别名]
def test_blue_io(self):
import dill
s0 = self.dsp()
pre_dsp = dill.dumps(self.dsp)
blue = self.dsp.blue()
self.assertEqual(pre_dsp, dill.dumps(self.dsp))
pre = dill.dumps(blue), pre_dsp
sol = blue()
post = dill.dumps(blue), dill.dumps(self.dsp)
self.assertEqual(pre, post)
s = self.dsp()
post = dill.dumps(blue), dill.dumps(self.dsp)
self.assertEqual(pre, post)
self.assertEqual(s, sol)
self.assertEqual(s0, sol)
self.assertLess(*map(len, post))
self.assertLess(len(post[1]), len(dill.dumps(s)))
blue, dsp = list(map(dill.loads, post))
self.assertEqual(dsp.solution, {})
self.assertEqual(s, dsp())
self.assertEqual(s, blue())
示例15: __init__
# 需要导入模块: import dill [as 别名]
# 或者: from dill import dumps [as 别名]
def __init__(self, algo, domain, trials, rstate, asynchronous=None,
max_queue_len=1,
poll_interval_secs=1.0,
max_evals=sys.maxsize,
verbose=0,
):
self.algo = algo
self.domain = domain
self.trials = trials
if asynchronous is None:
self.asynchronous = trials.asynchronous
else:
self.asynchronous = asynchronous
self.poll_interval_secs = poll_interval_secs
self.max_queue_len = max_queue_len
self.max_evals = max_evals
self.rstate = rstate
if self.asynchronous:
if 'FMinIter_Domain' in trials.attachments:
logger.warn('over-writing old domain trials attachment')
msg = pickler.dumps(domain)
# -- sanity check for unpickling
pickler.loads(msg)
trials.attachments['FMinIter_Domain'] = msg