當前位置: 首頁>>代碼示例>>Python>>正文


Python distributed.Client方法代碼示例

本文整理匯總了Python中dask.distributed.Client方法的典型用法代碼示例。如果您正苦於以下問題:Python distributed.Client方法的具體用法?Python distributed.Client怎麽用?Python distributed.Client使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在dask.distributed的用法示例。


在下文中一共展示了distributed.Client方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: LaunchDaskDistributedClient

# 需要導入模塊: from dask import distributed [as 別名]
# 或者: from dask.distributed import Client [as 別名]
def LaunchDaskDistributedClient(self, scheduler_ip=None, scheduler_port=None):

        if self.parallel and self.parallel_model == "dask" and self.is_dask_scheduler_initialised is False:

            from multiprocessing.pool import ThreadPool
            try:
                import dask
                from dask.distributed import Client, LocalCluster
            except ImportError:
                raise ImportError("dask is not installed. Install it 'using pip install dask[complete]'")

            dask.config.set(pool=ThreadPool(self.no_of_cpu_cores))
            # INITIALISE CLUSTER
            if scheduler_ip is None:
                cluster = LocalCluster(n_workers=self.no_of_cpu_cores, processes=False, threads_per_worker=None)
                client = Client(cluster)
            else:
                client = Client(scheduler_ip)

            self.dask_client = client

            self.is_dask_scheduler_initialised = True 
開發者ID:romeric,項目名稱:florence,代碼行數:24,代碼來源:FEMSolver.py

示例2: dask_client_create

# 需要導入模塊: from dask import distributed [as 別名]
# 或者: from dask.distributed import Client [as 別名]
def dask_client_create(**kwargs):
    """
    Create Dask client object. The function is trivial and introduced so that
    Dask client is created in uniform way throughout the program.

    Parameters
    ----------
    kwargs: dict, optional
        kwargs will be passed to the Dask client constructor

    Returns
    -------
    client: dask.distributed.Client
        Dask client object
    """
    _kwargs = {"processes": True, "silence_logs": logging.ERROR}
    _kwargs.update(kwargs)
    client = Client(**_kwargs)
    dask.config.set(shuffle="disk")
    path_dask_data = os.path.expanduser("~/.dask")
    dask.config.set({"temporary_directory": path_dask_data})
    return client 
開發者ID:NSLS-II,項目名稱:PyXRF,代碼行數:24,代碼來源:map_processing.py

示例3: activate_client

# 需要導入模塊: from dask import distributed [as 別名]
# 或者: from dask.distributed import Client [as 別名]
def activate_client(self,
                        LSF = True,
                        num_processes = 2,
                        adapt = False):

        if LSF:
            from dask_jobqueue import LSFCluster
            cluster = LSFCluster()
            self._adapt = adapt
            self.num_processes = num_processes

            if self._adapt:
                _logger.debug(f"adapting cluster from 1 to {self.num_processes} processes")
                cluster.adapt(minimum = 2, maximum = self.num_processes, interval = "1s")
            else:
                _logger.debug(f"scaling cluster to {self.num_processes} processes")
                cluster.scale(self.num_processes)

            _logger.debug(f"scheduling cluster with client")
            self.client = distributed.Client(cluster)
        else:
            self.client = None
            self._adapt = False
            self.num_processes = 0 
開發者ID:choderalab,項目名稱:perses,代碼行數:26,代碼來源:relative_setup.py

示例4: setup_dask

# 需要導入模塊: from dask import distributed [as 別名]
# 或者: from dask.distributed import Client [as 別名]
def setup_dask(scheduler, retries=-1):
    if scheduler is None or scheduler == "{scheduler}":
        print("Setting up local cluster...")
        return Client()
    succeeded = False
    try_num = 0
    while not succeeded:
        try_num += 1
        if try_num == retries:
            raise Exception("Failed to connect to Dask client")
        try:
            client = Client(scheduler, timeout=60)
            succeeded = True
        except Exception as e:  # pylint: disable=broad-except
            print(e)
        time.sleep(15)

    return client 
開發者ID:crisbodnar,項目名稱:TensorFlow-NEAT,代碼行數:20,代碼來源:dask_helpers.py

示例5: main

# 需要導入模塊: from dask import distributed [as 別名]
# 或者: from dask.distributed import Client [as 別名]
def main():
    client = Client()  # noqa

    categories = ["category_%d" % i for i in range(26)]
    columns = ["click"] + ["numeric_%d" % i for i in range(13)] + categories

    df = dd.read_csv("day_1", sep="\t", names=columns, header=None)

    encoding = {c: "bytes" for c in categories}
    fixed = {c: 8 for c in categories}
    df.to_parquet(
        "day-1-bytes.parquet",
        object_encoding=encoding,
        fixed_text=fixed,
        compression="SNAPPY",
    ) 
開發者ID:dask,項目名稱:dask-ml,代碼行數:18,代碼來源:make_parquet.py

示例6: run_search

# 需要導入模塊: from dask import distributed [as 別名]
# 或者: from dask.distributed import Client [as 別名]
def run_search():

    from dask.distributed import Client, LocalCluster
    import joblib
    import hypertunity as ht

    #client = Client(scheduler_file='scheduler.json')
    client = Client()
    print(client)

    domain = ht.Domain({
                    "cost_rate": set([-.8])
    })

    # with joblib.parallel_backend('dask'):
    #     with joblib.Parallel() as parallel:
    #         print("Doing the work ... ")
    #         results = parallel(joblib.delayed(run_games)(*domain.sample().as_namedtuple()) for s in range(1))
    #
    # print(results)
    run_games(-.8) 
開發者ID:PrincetonUniversity,項目名稱:PsyNeuLink,代碼行數:23,代碼來源:predator_prey_dmt.py

示例7: create_cluster

# 需要導入模塊: from dask import distributed [as 別名]
# 或者: from dask.distributed import Client [as 別名]
def create_cluster(self):
        self.cluster = LocalCluster(
            n_workers=1, processes=False, silence_logs=logging.DEBUG)
        self.client = Client(self.cluster) 
開發者ID:int-brain-lab,項目名稱:ibllib,代碼行數:6,代碼來源:iblpipe.py

示例8: test_dask_multiprocessing

# 需要導入模塊: from dask import distributed [as 別名]
# 或者: from dask.distributed import Client [as 別名]
def test_dask_multiprocessing(tmpdir):
    """
    Test that dask multiprocessing works on Python 3.
    """
    # Command to start the kernel
    cmd = "from spyder_kernels.console import start; start.main()"

    with setup_kernel(cmd) as client:
        # Remove all variables
        client.execute("%reset -f")
        client.get_shell_msg(block=True, timeout=TIMEOUT)

        # Write multiprocessing code to a file
        # Runs two times to verify that in the second case it doesn't break
        code = """
from dask.distributed import Client

if __name__=='__main__':
    client = Client()
    client.close()
    x = 'hello'
"""
        p = tmpdir.join("mp-test.py")
        p.write(code)

        # Run code two times
        client.execute("runfile(r'{}')".format(to_text_string(p)))
        client.get_shell_msg(block=True, timeout=TIMEOUT)

        client.execute("runfile(r'{}')".format(to_text_string(p)))
        client.get_shell_msg(block=True, timeout=TIMEOUT)

        # Verify that the `x` variable is defined
        client.inspect('x')
        msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
        content = msg['content']
        assert content['found'] 
開發者ID:spyder-ide,項目名稱:spyder-kernels,代碼行數:39,代碼來源:test_console_kernel.py

示例9: test_cross_val_score_client

# 需要導入模塊: from dask import distributed [as 別名]
# 或者: from dask.distributed import Client [as 別名]
def test_cross_val_score_client(trend):
    "Test the deprecated dask Client interface"
    coords, data = trend[:2]
    model = Trend(degree=1)
    nsplits = 5
    cross_validator = ShuffleSplit(n_splits=nsplits, random_state=0)
    client = Client(processes=False)
    futures = cross_val_score(model, coords, data, cv=cross_validator, client=client)
    scores = [future.result() for future in futures]
    client.close()
    assert len(scores) == nsplits
    npt.assert_allclose(scores, 1) 
開發者ID:fatiando,項目名稱:verde,代碼行數:14,代碼來源:test_model_selection.py

示例10: setup

# 需要導入模塊: from dask import distributed [as 別名]
# 或者: from dask.distributed import Client [as 別名]
def setup(self, *args, **kwargs):
        """Benchmark time and peak memory of `compute_hindcast` and
        `bootstrap_hindcast`. This executes the same tests as `Compute` but
        on chunked data with dask.distributed.Client."""
        requires_dask()
        # magic taken from
        # https://github.com/pydata/xarray/blob/stable/asv_bench/benchmarks/rolling.py
        super().setup(**kwargs)
        self.client = Client() 
開發者ID:bradyrx,項目名稱:climpred,代碼行數:11,代碼來源:benchmarks_hindcast.py

示例11: setup

# 需要導入模塊: from dask import distributed [as 別名]
# 或者: from dask.distributed import Client [as 別名]
def setup(self, *args, **kwargs):
        """Benchmark time and peak memory of `compute_perfect_model` and
        `bootstrap_perfect_model`. This executes the same tests as `Compute` but
        on chunked data with dask.distributed.Client."""
        requires_dask()
        # magic taken from
        # https://github.com/pydata/xarray/blob/stable/asv_bench/benchmarks/rolling.py
        super().setup(**kwargs)
        self.client = Client() 
開發者ID:bradyrx,項目名稱:climpred,代碼行數:11,代碼來源:benchmarks_perfect_model.py

示例12: test_run_dask

# 需要導入模塊: from dask import distributed [as 別名]
# 或者: from dask.distributed import Client [as 別名]
def test_run_dask(fix_task_env):
    import numpy as np
    from dask import delayed as dl
    from dask.distributed import Client

    dc = Client(processes=False)

    input_task_example, gathered_task_example, post_processing_task_example = (
        fix_task_env
    )
    parts = {"a": [0.0, 1.0, 2.0], "b": [-3.0, 10.0, 2.0], "c": [20.0]}
    numpoints = 20
    prefactor = 0.1

    input_delayed = dl(input_task_example)(parts)
    gathered_delayed = dl(gathered_task_example, nout=1)([input_delayed], [numpoints])[
        0
    ]
    post_proc_delayed = dl(post_processing_task_example)(
        input_delayed, gathered_delayed, prefactor
    )
    input_future = dc.compute(input_delayed)
    gathered_future = dc.compute(gathered_delayed)
    post_proc_future = dc.compute(post_proc_delayed)
    input_data = input_future.result()
    gathered_data = gathered_future.result()
    post_proc_data = post_proc_future.result()

    assert input_data == parts
    gather_results = {}
    for part in parts:
        gather_results[part] = np.linspace(0.0, 1.0, numpoints)
    for part in gather_results:
        assert np.all(gathered_data[part] == gather_results[part])
    post_proc_results = 0.0
    for part in parts:
        post_proc_results += (
            prefactor * np.sum(input_data[part]) * np.sum(gather_results[part])
        )
    assert post_proc_data == post_proc_results 
開發者ID:microsoft,項目名稱:qmt,代碼行數:42,代碼來源:test_tasks.py

示例13: apply

# 需要導入模塊: from dask import distributed [as 別名]
# 或者: from dask.distributed import Client [as 別名]
def apply(
        self,
        df: dd.DataFrame,
        scheduler: Scheduler = "processes",
        fault_tolerant: bool = False,
    ) -> np.ndarray:
        """Label Dask DataFrame of data points with LFs.

        Parameters
        ----------
        df
            Dask DataFrame containing data points to be labeled by LFs
        scheduler
            A Dask scheduling configuration: either a string option or
            a ``Client``. For more information, see
            https://docs.dask.org/en/stable/scheduling.html#
        fault_tolerant
            Output ``-1`` if LF execution fails?

        Returns
        -------
        np.ndarray
            Matrix of labels emitted by LFs
        """
        f_caller = _FunctionCaller(fault_tolerant)
        apply_fn = partial(apply_lfs_to_data_point, lfs=self._lfs, f_caller=f_caller)
        map_fn = df.map_partitions(lambda p_df: p_df.apply(apply_fn, axis=1))
        labels = map_fn.compute(scheduler=scheduler)
        labels_with_index = rows_to_triplets(labels)
        return self._numpy_from_row_data(labels_with_index) 
開發者ID:snorkel-team,項目名稱:snorkel,代碼行數:32,代碼來源:dask.py

示例14: main

# 需要導入模塊: from dask import distributed [as 別名]
# 或者: from dask.distributed import Client [as 別名]
def main():
    # Setup logging on the main process:
    _start_logging()

    # Start three worker processes on the local machine:
    client = Client(n_workers=3, threads_per_worker=1)

    # Setup Eliot logging on each worker process:
    client.run(_start_logging)

    # Run the Dask computation in the worker processes:
    result = main_computation()
    print("Result:", result) 
開發者ID:itamarst,項目名稱:eliot,代碼行數:15,代碼來源:dask_eliot.py

示例15: test_future

# 需要導入模塊: from dask import distributed [as 別名]
# 或者: from dask.distributed import Client [as 別名]
def test_future(self):
        """compute_with_trace() can handle Futures."""
        client = Client(processes=False)
        self.addCleanup(client.shutdown)
        [bag] = dask.persist(from_sequence([1, 2, 3]))
        bag = bag.map(lambda x: x * 5)
        result = dask.compute(bag)
        self.assertEqual(result, ([5, 10, 15],))
        self.assertEqual(result, compute_with_trace(bag)) 
開發者ID:itamarst,項目名稱:eliot,代碼行數:11,代碼來源:test_dask.py


注:本文中的dask.distributed.Client方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。