当前位置: 首页>>代码示例>>Python>>正文


Python cloudpickle.dumps方法代码示例

本文整理汇总了Python中cloudpickle.dumps方法的典型用法代码示例。如果您正苦于以下问题:Python cloudpickle.dumps方法的具体用法?Python cloudpickle.dumps怎么用?Python cloudpickle.dumps使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cloudpickle的用法示例。


在下文中一共展示了cloudpickle.dumps方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: dumps

# 需要导入模块: import cloudpickle [as 别名]
# 或者: from cloudpickle import dumps [as 别名]
def dumps(obj, protocol=None):
    """Serialize obj as a string of bytes allocated in memory

    protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
    pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed
    between processes running the same Python version.

    Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
    compatibility with older versions of Python.
    """
    file = StringIO()
    try:
        cp = CloudPickler(file, protocol=protocol)
        cp.dump(obj)
        return file.getvalue()
    finally:
        file.close()


# including pickles unloading functions in this namespace 
开发者ID:pywren,项目名称:pywren-ibm-cloud,代码行数:22,代码来源:cloudpickle.py

示例2: _run_on_cluster

# 需要导入模块: import cloudpickle [as 别名]
# 或者: from cloudpickle import dumps [as 别名]
def _run_on_cluster(
    experiment_fn: ExperimentFn,
    skein_cluster: SkeinCluster,
    eval_monitor_log_thresholds: Dict[str, Tuple[float, float]] = None,
    n_try: int = 0
) -> Optional[metrics.Metrics]:
    def _new_experiment_fn():
        return _add_monitor_to_experiment(experiment_fn())
    new_experiment_fn = _new_experiment_fn

    # Attempt serialization early to avoid allocating unnecesary resources
    serialized_fn = cloudpickle.dumps(new_experiment_fn)
    with skein_cluster.client:
        return _execute_and_await_termination(
            skein_cluster,
            serialized_fn,
            eval_monitor_log_thresholds,
            n_try=n_try
        ) 
开发者ID:criteo,项目名称:tf-yarn,代码行数:21,代码来源:client.py

示例3: get_safe_experiment_fn

# 需要导入模块: import cloudpickle [as 别名]
# 或者: from cloudpickle import dumps [as 别名]
def get_safe_experiment_fn(full_fn_name: str, *args):
    """
    tf-yarn serializes the provided experiment function with cloudpickle.dumps.
    This is good for interactive experiments but can sometimes fail
    because the function is not serializable.
    You can use this wrapper function
    if you ship your experiment function (via conda, pex) manually to the workers.

    full_fn_name
        the name of the function ( with the full path to package and module)
        i.e. tf_yarn.my_module.my_experiment_fn

    args
        arguments to be provided to this function

    """
    module_name, fn_name = full_fn_name.rsplit('.', 1)
    module = importlib.import_module(module_name)
    experiment_fn = getattr(module, fn_name)
    return partial(experiment_fn, *args) 
开发者ID:criteo,项目名称:tf-yarn,代码行数:22,代码来源:client.py

示例4: test__prepare_container

# 需要导入模块: import cloudpickle [as 别名]
# 或者: from cloudpickle import dumps [as 别名]
def test__prepare_container():
    with contextlib.ExitStack() as stack:
        # mock modules
        mocked_client_call = stack.enter_context(
            patch(f"{MODULE_TO_TEST}.skein.ApplicationClient.from_current"))
        mocked_logs = stack.enter_context(patch(f'{MODULE_TO_TEST}._setup_container_logs'))
        mocked_cluster_spec = stack.enter_context(patch(f'{MODULE_TO_TEST}.cluster.start_cluster'))

        # fill client mock
        mocked_client = mock.MagicMock(spec=skein.ApplicationClient)
        host_port = ('localhost', 1234)
        instances = [('worker', 10), ('chief', 1)]
        mocked_client.kv.wait.return_value = json.dumps(instances).encode()
        mocked_client_call.return_value = mocked_client
        (client, cluster_spec, cluster_tasks) = _prepare_container(host_port)

        # checks
        mocked_logs.assert_called_once()
        mocked_cluster_spec.assert_called_once_with(host_port, mocked_client, cluster_tasks)
        assert client == mocked_client
        assert cluster_tasks == list(iter_tasks(instances)) 
开发者ID:criteo,项目名称:tf-yarn,代码行数:23,代码来源:test__task_commons.py

示例5: as_dict

# 需要导入模块: import cloudpickle [as 别名]
# 或者: from cloudpickle import dumps [as 别名]
def as_dict(self):
        """Convert this objet in a json seriable dict (can be use in __init__)
        """

        # Get the properties inherited from OptiGenAlg
        OptiGenAlgNsga2Deap_dict = super(OptiGenAlgNsga2Deap, self).as_dict()
        if self.toolbox is None:
            OptiGenAlgNsga2Deap_dict["toolbox"] = None
        else:  # Store serialized data (using cloudpickle) and str to read it in json save files
            OptiGenAlgNsga2Deap_dict["toolbox"] = {
                "__class__": str(type(self._toolbox)),
                "__repr__": str(self._toolbox.__repr__()),
                "serialized": dumps(self._toolbox).decode("ISO-8859-2"),
            }
        # The class name is added to the dict fordeserialisation purpose
        # Overwrite the mother class name
        OptiGenAlgNsga2Deap_dict["__class__"] = "OptiGenAlgNsga2Deap"
        return OptiGenAlgNsga2Deap_dict 
开发者ID:Eomys,项目名称:pyleecan,代码行数:20,代码来源:OptiGenAlgNsga2Deap.py

示例6: test_pickle_meta_evaluator

# 需要导入模块: import cloudpickle [as 别名]
# 或者: from cloudpickle import dumps [as 别名]
def test_pickle_meta_evaluator():
    set_seed(100)
    tasks = SetTaskSampler(lambda: GarageEnv(PointEnv()))
    max_path_length = 200
    env = GarageEnv(PointEnv())
    n_traj = 3
    with tempfile.TemporaryDirectory() as log_dir_name:
        runner = LocalRunner(
            SnapshotConfig(snapshot_dir=log_dir_name,
                           snapshot_mode='last',
                           snapshot_gap=1))
        meta_eval = MetaEvaluator(test_task_sampler=tasks,
                                  max_path_length=max_path_length,
                                  n_test_tasks=10,
                                  n_exploration_traj=n_traj)
        policy = RandomPolicy(env.spec.action_space)
        algo = MockAlgo(env, policy, max_path_length, n_traj, meta_eval)
        runner.setup(algo, env)
        log_file = tempfile.NamedTemporaryFile()
        csv_output = CsvOutput(log_file.name)
        logger.add_output(csv_output)
        meta_eval.evaluate(algo)
        meta_eval_pickle = cloudpickle.dumps(meta_eval)
        meta_eval2 = cloudpickle.loads(meta_eval_pickle)
        meta_eval2.evaluate(algo) 
开发者ID:rlworkgroup,项目名称:garage,代码行数:27,代码来源:test_meta_evaluator.py

示例7: tcp_send_object

# 需要导入模块: import cloudpickle [as 别名]
# 或者: from cloudpickle import dumps [as 别名]
def tcp_send_object(sock, obj, compress=False, pre_pickled=False):
    """Sends any python object over TCP using cloud-pickle with optional LZ4
    compression. Returns True if sent, False if connection closed"""
    data = cloudpickle.dumps(obj) if not pre_pickled else obj
    if compress:
        import lz4.frame
        data = lz4.frame.compress(data)

    # Send metadata to receiver: Size of the data buffer and whether
    # compression is enabled
    sock.send(struct.pack("II",len(data), 1 if compress else 0))
    sent = sock.send(data)
    if not sent:
        return False
    # Assumed either connection closed and sent=0, or the full thing was sent?
    # Maybe not if XFR stopped in the middle??
    assert(sent == len(data))
    return True 
开发者ID:opherlieber,项目名称:rltime,代码行数:20,代码来源:utils.py

示例8: __init__

# 需要导入模块: import cloudpickle [as 别名]
# 或者: from cloudpickle import dumps [as 别名]
def __init__(self, history_cls, history_args):
        super().__init__()

        self._last_needed_feed_count = 0
        self.results = {}
        self.pending_counts = {}

        # Make sure to use 'spawn' and not 'fork' to allow shared CUDA tensors
        # on linux
        ctx = mp.get_context('spawn')
        self.close_event = ctx.Event()
        self.qevent = ctx.Event()
        # Queue for requests, such as getting training data
        self.request_queue = ctx.Queue(10)
        # Queue for updates like new acting samples and priority updates
        self.update_queue = ctx.Queue(10)
        # Queue for sending back request results
        self.result_queue = ctx.Queue()

        self._process = ctx.Process(
            target=self.run,
            args=(history_cls, cloudpickle.dumps(history_args)))

        self._process.start() 
开发者ID:opherlieber,项目名称:rltime,代码行数:26,代码来源:parallel_history.py

示例9: run_with_logger

# 需要导入模块: import cloudpickle [as 别名]
# 或者: from cloudpickle import dumps [as 别名]
def run_with_logger(thunk, logdir):
    from epg.launching import logger
    from mpi4py import MPI
    rank = MPI.COMM_WORLD.Get_rank()
    if rank == 0:
        os.makedirs(logdir, exist_ok=True)
    try:
        with logger.scoped_configure(dir=logdir, format_strs=None if rank == 0 else []):
            retval = thunk()
            if rank == 0:
                atomic_write(pickle.dumps(retval, protocol=-1), os.path.join(logdir, 'retval.pkl'))
            return retval
    except Exception as e:
        with open(os.path.join(logdir, "exception%i.txt" % rank), 'wt') as fh:
            fh.write(traceback.format_exc())
        raise e 
开发者ID:openai,项目名称:EPG,代码行数:18,代码来源:launcher.py

示例10: __init__

# 需要导入模块: import cloudpickle [as 别名]
# 或者: from cloudpickle import dumps [as 别名]
def __init__(self, make_env, observation_space):
        self.observation_space = observation_space
        if isinstance(observation_space, gym.spaces.Box):
            num_elems = len(np.array(observation_space.low).flatten())
            zeros = [0] * num_elems
            self._obs_buf = Array('b', zeros)
        else:
            self._obs_buf = None
        self._pipe, other_end = Pipe()
        self._proc = Process(target=self._worker,
                             args=(other_end,
                                   self._obs_buf,
                                   cloudpickle.dumps(make_env)),
                             daemon=True)
        self._proc.start()
        self._running_cmd = None
        other_end.close()
        self._pipe.send(('action_space', None))
        self.action_space = self._get_response() 
开发者ID:flyyufelix,项目名称:sonic_contest,代码行数:21,代码来源:gym.py

示例11: create_worker

# 需要导入模块: import cloudpickle [as 别名]
# 或者: from cloudpickle import dumps [as 别名]
def create_worker(
        worker_id: int,
        step_queue: Queue,
        env_factory: Callable[[int, List[SideChannel]], BaseEnv],
        engine_configuration: EngineConfig,
    ) -> UnityEnvWorker:
        parent_conn, child_conn = Pipe()

        # Need to use cloudpickle for the env factory function since function objects aren't picklable
        # on Windows as of Python 3.6.
        pickled_env_factory = cloudpickle.dumps(env_factory)
        child_process = Process(
            target=worker,
            args=(
                child_conn,
                step_queue,
                pickled_env_factory,
                worker_id,
                engine_configuration,
                logger.level,
            ),
        )
        child_process.start()
        return UnityEnvWorker(child_process, worker_id, parent_conn) 
开发者ID:StepNeverStop,项目名称:RLs,代码行数:26,代码来源:subprocess_env_manager.py

示例12: store

# 需要导入模块: import cloudpickle [as 别名]
# 或者: from cloudpickle import dumps [as 别名]
def store(key, value, chunksize=950000):
    serialized = cloudpickle.dumps(value, 2)
    values = {}
    for i in xrange(0, len(serialized), chunksize):
        values['%s.%s' % (key, i // chunksize)] = serialized[i:i + chunksize]
    return memcache.set_multi(values) 
开发者ID:doitintl,项目名称:iris,代码行数:8,代码来源:main.py

示例13: __getstate__

# 需要导入模块: import cloudpickle [as 别名]
# 或者: from cloudpickle import dumps [as 别名]
def __getstate__(self):
        import cloudpickle
        return cloudpickle.dumps(self.x) 
开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:5,代码来源:__init__.py

示例14: _setup_cluster_spec

# 需要导入模块: import cloudpickle [as 别名]
# 或者: from cloudpickle import dumps [as 别名]
def _setup_cluster_spec(
    task_instances: List[Tuple[str, int]],
    app: skein.ApplicationClient
) -> tf.train.ClusterSpec:
    tasks_not_in_cluster = ['evaluator', 'tensorboard']
    cluster_instances = [t for t in task_instances if t[0] not in tasks_not_in_cluster]
    app.kv[constants.KV_CLUSTER_INSTANCES] = json.dumps(cluster_instances).encode()
    return tf.train.ClusterSpec(
        cluster.aggregate_spec(app, list(_internal.iter_tasks(cluster_instances)))
    ) 
开发者ID:criteo,项目名称:tf-yarn,代码行数:12,代码来源:client.py

示例15: _send_config_proto

# 需要导入模块: import cloudpickle [as 别名]
# 或者: from cloudpickle import dumps [as 别名]
def _send_config_proto(
        skein_cluster: SkeinCluster,
        tf_session_config: tf.compat.v1.ConfigProto):
    serialized_fn = cloudpickle.dumps(tf_session_config)
    skein_cluster.app.kv[constants.KV_TF_SESSION_CONFIG] = serialized_fn 
开发者ID:criteo,项目名称:tf-yarn,代码行数:7,代码来源:client.py


注:本文中的cloudpickle.dumps方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。