当前位置: 首页>>代码示例>>Python>>正文


Python Client.shutdown方法代码示例

本文整理汇总了Python中ipyparallel.Client.shutdown方法的典型用法代码示例。如果您正苦于以下问题:Python Client.shutdown方法的具体用法?Python Client.shutdown怎么用?Python Client.shutdown使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在ipyparallel.Client的用法示例。


在下文中一共展示了Client.shutdown方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: stop_server

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import shutdown [as 别名]
def stop_server(is_slurm=False):
    '''
    programmatically stops the ipyparallel server
    '''
    sys.stdout.write("Stopping cluster...\n")
    sys.stdout.flush()

    
    if is_slurm:
        from ipyparallel import Client
        pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
        c = Client(ipython_dir=pdir, profile=profile)        
        ee = c[:]
        ne = len(ee)
        print 'Shutting down %d engines.'%(ne)
        c.shutdown(hub=True)
        shutil.rmtree('profile_' + str(profile))
        try: 
            shutil.rmtree('./log/')
        except:
            print 'creating log folder'
            
        files = glob.glob('*.log')
        os.mkdir('./log')
        
        for fl in files:
            shutil.move(fl, './log/')
    
    else:
        
        proc = subprocess.Popen(["ipcluster stop"], shell=True, stderr=subprocess.PIPE)
        line_out = proc.stderr.readline()
        if 'CRITICAL' in line_out:
            sys.stdout.write("No cluster to stop...")
            sys.stdout.flush()
        elif 'Stopping' in line_out:
            st = time.time()
            sys.stdout.write('Waiting for cluster to stop...')
            while (time.time() - st) < 4:
                sys.stdout.write('.')
                sys.stdout.flush()
                time.sleep(1)
        else:
            print '**** Unrecognized Syntax in ipcluster output, waiting for server to stop anyways ****'
    
        

    sys.stdout.write(" done\n")
开发者ID:valentina-s,项目名称:Constrained_NMF,代码行数:50,代码来源:utilities.py

示例2: gibbs

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import shutdown [as 别名]
def gibbs(table_fp, mapping_fp, output_dir, loo, jobs, alpha1, alpha2, beta,
          source_rarefaction_depth, sink_rarefaction_depth,
          restarts, draws_per_restart, burnin, delay, cluster_start_delay,
          source_sink_column, source_column_value, sink_column_value,
          source_category_column):
    '''Gibb's sampler for Bayesian estimation of microbial sample sources.

    For details, see the project README file.
    '''
    # Create results directory. Click has already checked if it exists, and
    # failed if so.
    os.mkdir(output_dir)

    # Load the mapping file and biom table and remove samples which are not
    # shared.
    o = open(mapping_fp, 'U')
    sample_metadata_lines = o.readlines()
    o.close()

    sample_metadata, biom_table = \
        _cli_sync_biom_and_sample_metadata(
            parse_mapping_file(sample_metadata_lines),
            load_table(table_fp))

    # If biom table has fractional counts, it can produce problems in indexing
    # later on.
    biom_table.transform(lambda data, id, metadata: np.ceil(data))

    # If biom table has sample metadata, there will be pickling errors when
    # submitting multiple jobs. We remove the metadata by making a copy of the
    # table without metadata.
    biom_table = Table(biom_table._data.toarray(),
                       biom_table.ids(axis='observation'),
                       biom_table.ids(axis='sample'))

    # Parse the mapping file and options to get the samples requested for
    # sources and sinks.
    source_samples, sink_samples = sinks_and_sources(
        sample_metadata, column_header=source_sink_column,
        source_value=source_column_value, sink_value=sink_column_value)

    # If we have no source samples neither normal operation or loo will work.
    # Will also likely get strange errors.
    if len(source_samples) == 0:
        raise ValueError('Mapping file or biom table passed contain no '
                         '`source` samples.')

    # Prepare the 'sources' matrix by collapsing the `source_samples` by their
    # metadata values.
    sources_envs, sources_data = collapse_sources(source_samples,
                                                  sample_metadata,
                                                  source_category_column,
                                                  biom_table, sort=True)

    # Rarefiy data if requested.
    sources_data, biom_table = \
        subsample_sources_sinks(sources_data, sink_samples, biom_table,
                                source_rarefaction_depth,
                                sink_rarefaction_depth)

    # Build function that require only a single parameter -- sample -- to
    # enable parallel processing if requested.
    if loo:
        f = partial(_cli_loo_runner, source_category=source_category_column,
                    alpha1=alpha1, alpha2=alpha2, beta=beta,
                    restarts=restarts, draws_per_restart=draws_per_restart,
                    burnin=burnin, delay=delay,
                    sample_metadata=sample_metadata,
                    sources_data=sources_data, sources_envs=sources_envs,
                    biom_table=biom_table, output_dir=output_dir)
        sample_iter = source_samples
    else:
        f = partial(_cli_sink_source_prediction_runner, alpha1=alpha1,
                    alpha2=alpha2, beta=beta, restarts=restarts,
                    draws_per_restart=draws_per_restart, burnin=burnin,
                    delay=delay, sources_data=sources_data,
                    biom_table=biom_table, output_dir=output_dir)
        sample_iter = sink_samples

    if jobs > 1:
        # Launch the ipcluster and wait for it to come up.
        subprocess.Popen('ipcluster start -n %s --quiet' % jobs, shell=True)
        time.sleep(cluster_start_delay)
        c = Client()
        c[:].map(f, sample_iter, block=True)
        # Shut the cluster down. Answer taken from SO:
        # http://stackoverflow.com/questions/30930157/stopping-ipcluster-engines-ipython-parallel
        c.shutdown(hub=True)
    else:
        for sample in sample_iter:
            f(sample)

    # Format results for output.
    samples = []
    samples_data = []
    for sample_fp in glob.glob(os.path.join(output_dir, '*')):
        samples.append(sample_fp.strip().split('/')[-1].split('.txt')[0])
        samples_data.append(np.loadtxt(sample_fp, delimiter='\t'))
    mp, mps = _cli_collate_results(samples, samples_data, sources_envs)

#.........这里部分代码省略.........
开发者ID:mortonjt,项目名称:sourcetracker2,代码行数:103,代码来源:gibbs.py

示例3: gibbs_cli

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import shutdown [as 别名]

#.........这里部分代码省略.........
    # Load the metadata file and feature table.
    sample_metadata = parse_sample_metadata(open(mapping_fp, 'U'))
    feature_table = biom_to_df(load_table(table_fp))

    # Do high level check on feature data.
    feature_table = validate_gibbs_input(feature_table)

    # Remove samples not shared by both feature and metadata tables and order
    # rows equivalently.
    sample_metadata, feature_table = \
        intersect_and_sort_samples(sample_metadata, feature_table)

    # Identify source and sink samples.
    source_samples = get_samples(sample_metadata, source_sink_column,
                                 source_column_value)
    sink_samples = get_samples(sample_metadata, source_sink_column,
                               sink_column_value)

    # If we have no source samples neither normal operation or loo will work.
    # Will also likely get strange errors.
    if len(source_samples) == 0:
        raise ValueError(('You passed %s as the `source_sink_column` and %s '
                          'as the `source_column_value`. There are no samples '
                          'which are sources under these values. Please see '
                          'the help documentation and check your mapping '
                          'file.') % (source_sink_column, source_column_value))

    # Prepare the 'sources' matrix by collapsing the `source_samples` by their
    # metadata values.
    csources = collapse_source_data(sample_metadata, feature_table,
                                    source_samples, source_category_column,
                                    'mean')

    # Rarify collapsed source data if requested.
    if source_rarefaction_depth > 0:
        d = (csources.sum(1) >= source_rarefaction_depth)
        if not d.all():
            count_too_shallow = (~d).sum()
            shallowest = csources.sum(1).min()
            raise ValueError(('You requested rarefaction of source samples at '
                              '%s, but there are %s collapsed source samples '
                              'that have less sequences than that. The '
                              'shallowest of these is %s sequences.') %
                             (source_rarefaction_depth, count_too_shallow,
                              shallowest))
        else:
            csources = subsample_dataframe(csources, source_rarefaction_depth,
                                           replace=sample_with_replacement)

    # Prepare to rarify sink data if we are not doing LOO. If we are doing loo,
    # we skip the rarefaction, and set sinks to `None`.
    if not loo:
        sinks = feature_table.loc[sink_samples, :]
        if sink_rarefaction_depth > 0:
            d = (sinks.sum(1) >= sink_rarefaction_depth)
            if not d.all():
                count_too_shallow = (~d).sum()
                shallowest = sinks.sum(1).min()
                raise ValueError(('You requested rarefaction of sink samples '
                                  'at %s, but there are %s sink samples that '
                                  'have less sequences than that. The '
                                  'shallowest of these is %s sequences.') %
                                 (sink_rarefaction_depth, count_too_shallow,
                                  shallowest))
            else:
                sinks = subsample_dataframe(sinks, sink_rarefaction_depth,
                                            replace=sample_with_replacement)
    else:
        sinks = None

    # If we've been asked to do multiple jobs, we need to spin up a cluster.
    if jobs > 1:
        # Launch the ipcluster and wait for it to come up.
        subprocess.Popen('ipcluster start -n %s --quiet' % jobs, shell=True)
        time.sleep(cluster_start_delay)
        cluster = Client()
    else:
        cluster = None

    # Run the computations.
    mpm, mps, fas = gibbs(csources, sinks, alpha1, alpha2, beta, restarts,
                          draws_per_restart, burnin, delay, cluster=cluster,
                          create_feature_tables=per_sink_feature_assignments)

    # If we started a cluster, shut it down.
    if jobs > 1:
        cluster.shutdown(hub=True)

    # Write results.
    mpm.to_csv(os.path.join(output_dir, 'mixing_proportions.txt'), sep='\t')
    mps.to_csv(os.path.join(output_dir, 'mixing_proportions_stds.txt'),
               sep='\t')
    if per_sink_feature_assignments:
        for sink, fa in zip(mpm.index, fas):
            fa.to_csv(os.path.join(output_dir, sink + '.feature_table.txt'),
                      sep='\t')

    # Plot contributions.
    fig, ax = plot_heatmap(mpm)
    fig.savefig(os.path.join(output_dir, 'mixing_proportions.pdf'), dpi=300)
开发者ID:biota,项目名称:sourcetracker2,代码行数:104,代码来源:gibbs.py

示例4: stop_server

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import shutdown [as 别名]
def stop_server(ipcluster='ipcluster', pdir=None, profile=None, dview=None):
    """
    programmatically stops the ipyparallel server

    Parameters:
     ----------
     ipcluster : str
         ipcluster binary file name; requires 4 path separators on Windows
         Default: "ipcluster"

    """
    if 'multiprocessing' in str(type(dview)):
        dview.terminate()
    else:
        logger.info("Stopping cluster...")
        try:
            pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
            is_slurm = True
        except:
            logger.debug('stop_server: not a slurm cluster')
            is_slurm = False

        if is_slurm:
            if pdir is None and profile is None:
                pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
            c = Client(ipython_dir=pdir, profile=profile)
            ee = c[:]
            ne = len(ee)
            print(('Shutting down %d engines.' % (ne)))
            c.close()
            c.shutdown(hub=True)
            shutil.rmtree('profile_' + str(profile))
            try:
                shutil.rmtree('./log/')
            except:
                print('creating log folder')

            files = glob.glob('*.log')
            os.mkdir('./log')

            for fl in files:
                shutil.move(fl, './log/')

        else:
            if ipcluster == "ipcluster":
                proc = subprocess.Popen(
                    "ipcluster stop", shell=True, stderr=subprocess.PIPE, close_fds=(os.name != 'nt'))
            else:
                proc = subprocess.Popen(shlex.split(ipcluster + " stop"),
                                        shell=True, stderr=subprocess.PIPE, close_fds=(os.name != 'nt'))

            line_out = proc.stderr.readline()
            if b'CRITICAL' in line_out:
                logger.info("No cluster to stop...")
            elif b'Stopping' in line_out:
                st = time.time()
                logger.debug('Waiting for cluster to stop...')
                while (time.time() - st) < 4:
                    sys.stdout.write('.')
                    sys.stdout.flush()
                    time.sleep(1)
            else:
                print(line_out)
                print(
                    '**** Unrecognized syntax in ipcluster output, waiting for server to stop anyways ****')

            proc.stderr.close()

    logger.info("stop_cluster(): done")
开发者ID:Peichao,项目名称:Constrained_NMF,代码行数:71,代码来源:cluster.py


注:本文中的ipyparallel.Client.shutdown方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。