当前位置: 首页>>代码示例>>Python>>正文


Python MPI.Get_processor_name方法代码示例

本文整理汇总了Python中mpi4py.MPI.Get_processor_name方法的典型用法代码示例。如果您正苦于以下问题:Python MPI.Get_processor_name方法的具体用法?Python MPI.Get_processor_name怎么用?Python MPI.Get_processor_name使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mpi4py.MPI的用法示例。


在下文中一共展示了MPI.Get_processor_name方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import Get_processor_name [as 别名]
def __init__(self):
        # Initialize communicator and obtain standard MPI variables
        comm = MPI.COMM_WORLD

        self.comm = comm
        self.rank = comm.Get_rank()
        self.num_procs = comm.Get_size()
        self.name = MPI.Get_processor_name()

        # Define master rank
        self.master_rank = 0

        # Define message tags for task, result, and announce
        self.task_tag = 10
        self.result_tag = 20
        self.announce_tag = 30

        # create an empty message buffer
        messages = [] 
开发者ID:westpa,项目名称:westpa,代码行数:21,代码来源:mpi.py

示例2: worker_process

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import Get_processor_name [as 别名]
def worker_process(comm, rank, tags, status):
    # Worker processes execute code below
    name = MPI.Get_processor_name()
    print("I am a worker with rank %d on %s." % (rank, name))
    comm.send(None, dest=0, tag=tags.READY)

    while True:
        print('Recieving ...')
        task = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
        print('received!')

        tag = status.Get_tag()

        if tag == tags.START:
            # Do the work here
            result = task + 1
            print('attempting to send ...')
            comm.send(result, dest=0, tag=tags.DONE)
            print('sending worked ...')
        elif tag == tags.EXIT:
            print('went through exit')
            break 
开发者ID:hvasbath,项目名称:beat,代码行数:24,代码来源:pt_toy_example.py

示例3: get_id_within_node

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import Get_processor_name [as 别名]
def get_id_within_node(comm=None):
    from mpi4py import MPI
    if comm is None: comm = MPI.COMM_WORLD
    rank = comm.rank
    nodename =  MPI.Get_processor_name()
    nodelist = comm.allgather(nodename)
    return len([i for i in nodelist[:rank] if i==nodename]) 
开发者ID:SheffieldML,项目名称:PyDeepGP,代码行数:9,代码来源:parallel.py

示例4: _init_gpu

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import Get_processor_name [as 别名]
def _init_gpu(comm):
    """ Chooses a gpu and creates a context on it. """
    # Find out how many GPUs are available to us on this node.
    driver.init()
    num_gpus = driver.Device.count()

    # Figure out the names of the other hosts.
    rank = comm.Get_rank() # Find out which process I am.
    name = MPI.Get_processor_name() # The name of my node.
    hosts = comm.allgather(name) # Get the names of all the other hosts

    # Find out which GPU to take (by precedence).
    gpu_id = hosts[0:rank].count(name)
    if gpu_id >= num_gpus:
        raise TypeError('No GPU available.')

    
    # Create a context on the appropriate device.
    for k in range(num_gpus):
        try:
            device = driver.Device((gpu_id + k) % num_gpus)
            context = device.make_context()
        except:
            continue
        else:
#             print "On %s: process %d taking gpu %d of %d.\n" % \
#                 (name, rank, gpu_id+k, num_gpus)
            break

    return device, context # Return device and context.

# Global variable for the global space.
# The leading double underscore should prevent outside modules from accessing
# this variable. 
开发者ID:stanfordnqp,项目名称:maxwell-b,代码行数:36,代码来源:space.py

示例5: get_host

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import Get_processor_name [as 别名]
def get_host():
    """Get the hostname that this task is running on"""
    return MPI.Get_processor_name() 
开发者ID:IanLee1521,项目名称:utilities,代码行数:5,代码来源:mpi.py

示例6: _get_tasks

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import Get_processor_name [as 别名]
def _get_tasks(self):
        """
        Internal generator that yields the next available task from a worker
        """
        if self.is_root():
            raise RuntimeError("Root rank mistakenly told to await tasks")

        # logging info
        if self.comm.rank == 0:
            args = (self.rank, MPI.Get_processor_name(), self.comm.size)
            self.logger.debug("worker master rank is %d on %s with %d processes available" %args)

        # continously loop and wait for instructions
        while True:
            args = None
            tag = -1

            # have the master rank of the subcomm ask for task and then broadcast
            if self.comm.rank == 0:
                self.basecomm.send(None, dest=0, tag=self.tags.READY)
                args = self.basecomm.recv(source=0, tag=MPI.ANY_TAG, status=self.status)
                tag = self.status.Get_tag()

            # bcast to everyone in the worker subcomm
            args  = self.comm.bcast(args) # args is [task_number, task_value]
            tag   = self.comm.bcast(tag)

            # yield the task
            if tag == self.tags.START:

                # yield the task value
                yield args

                # wait for everyone in task group before telling master this task is done
                self.comm.Barrier()
                if self.comm.rank == 0:
                    self.basecomm.send([args[0], None], dest=0, tag=self.tags.DONE)

            # see ya later
            elif tag == self.tags.EXIT:
                break

        # wait for everyone in task group and exit
        self.comm.Barrier()
        if self.comm.rank == 0:
            self.basecomm.send(None, dest=0, tag=self.tags.EXIT)

        # debug logging
        self.logger.debug("rank %d process is done waiting" %self.rank) 
开发者ID:bccp,项目名称:nbodykit,代码行数:51,代码来源:batch.py

示例7: worker_process

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import Get_processor_name [as 别名]
def worker_process(comm, tags, status):
    """
    Worker processes, that do the actual sampling.
    They receive all arguments by the master process.

    Parameters
    ----------
    comm : mpi.communicator
    tags : message tags
    status : mpi.status object
    """
    name = MPI.Get_processor_name()
    logger.debug(
        "Entering worker process with rank %d on %s." % (comm.rank, name))
    comm.send(None, dest=0, tag=tags.READY)

    logger.debug('Worker %i receiving work package ...' % comm.rank)
    kwargs = comm.recv(source=0, tag=tags.INIT, status=status)
    logger.debug('Worker %i received package!' % comm.rank)

    try:
        step = kwargs['step']
    except KeyError:
        raise ValueError('Step method not defined!')

    # do initial sampling
    result = sample_pt_chain(**kwargs)
    comm.Send([result, MPI.DOUBLE], dest=0, tag=tags.DONE)

    # enter repeated sampling
    while True:
        # TODO: make transd-compatible
        data = num.empty(step.lordering.size, dtype=tconfig.floatX)
        comm.Recv([data, MPI.DOUBLE],
                  tag=MPI.ANY_TAG, source=0, status=status)

        tag = status.Get_tag()
        if tag == tags.SAMPLE:
            lpoint = step.lij.a2l(data)
            start = step.lij.l2d(lpoint)
            kwargs['start'] = start
            # overwrite previous point in case got swapped
            kwargs['step'].chain_previous_lpoint[comm.rank] = lpoint
            result = sample_pt_chain(**kwargs)

            logger.debug('Worker %i attempting to send ...' % comm.rank)
            comm.Send([result, MPI.DOUBLE], dest=0, tag=tags.DONE)
            logger.debug('Worker %i sent message successfully ...' % comm.rank)

        elif tag == tags.BETA:
            logger.debug(
                'Worker %i received beta: %f' % (comm.rank, data[0]))
            kwargs['step'].beta = data[0]

        elif tag == tags.EXIT:
            logger.debug('Worker %i went through EXIT!' % comm.rank)
            break 
开发者ID:hvasbath,项目名称:beat,代码行数:59,代码来源:pt.py

示例8: get_device

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import Get_processor_name [as 别名]
def get_device(comm, num_masters=1, gpu_limit=-1, gpu_for_master=False):
    """Arguments:
        comm: MPI intracommunicator containing all processes
        num_masters: number of processes that will be assigned as masters
        gpu_limit: maximum number of gpus to use on one host
        gpu_for_master: whether master processes should be given a gpu
       Returns device name 'cpu' or 'gpuN' appropriate for use with theano""" 
    def get_gpu_list(mem_lim = 2000):
        import gpustat
        stats = gpustat.GPUStatCollection.new_query()
        ids = list(map(lambda gpu: int(gpu.entry['index']), stats))
        ratios = map(lambda gpu: float(gpu.entry['memory.used'])/float(gpu.entry['memory.total']), stats)
        #used = list(map(lambda gpu: float(gpu.entry['memory.used']), stats))
        #unused_gpu = filter(lambda x: x[1] < 100.0, zip(ids, used))
        free = list(map(lambda gpu: float(gpu.entry['memory.total'])-float(gpu.entry['memory.used']), stats))
        unused_gpu = list(filter(lambda x: x[1]  > mem_lim, zip(ids, free)))
        return [x[0] for x in unused_gpu]

    # Get the ranks of the other processes that share the same host
    # and determine which GPU to take on the host
    if gpu_limit==0:
        logging.info("required to not use gpu")
        dev = 'cpu'
        return dev
    
    rank = comm.Get_rank()    
    host = MPI.Get_processor_name()
    hosts = comm.allgather(host)
    workers_sharing_host = [ i for i in range(comm.Get_size()) if hosts[i] == host ]
    if rank in workers_sharing_host:
        worker_id = workers_sharing_host.index( rank )
    else:
        worker_id = -1
    
    for inode in range( comm.Get_size()):
        if rank == inode:
            gpu_list = get_gpu_list()
            if gpu_limit>=0:
                gpu_list = gpu_list[:gpu_limit] #limit the number of gpu
            if len(gpu_list) == 0:
                logging.info("No free GPU available. Using CPU instead.")
                dev = 'cpu'
            elif worker_id<0:
                ## alone on that machine
                logging.info("Alone on the node and taking the last gpu")
                dev = 'gpu%d' % (gpu_list[-1])
            else:
                logging.debug("Sharing a node and taking on the gpu")
                dev = 'gpu%d' % (gpu_list[worker_id%len(gpu_list)])
            logging.debug("rank %d can have %s",rank,dev)
        comm.Barrier()
    return dev 
开发者ID:vlimant,项目名称:mpi_learn,代码行数:54,代码来源:manager.py

示例9: spawn_load

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import Get_processor_name [as 别名]
def spawn_load(self):
    
        '''spwan a parallel loading process using MPI'''

        if not para_load:
            return

        from mpi4py import MPI
        import os
        import sys
        
        hostname = MPI.Get_processor_name()
        mpiinfo = MPI.Info.Create()
        
        # will give all nodes filled issue if use key=host because need an additional slot
        # also the hostname should be exactly the same in the output list of --display-allocation
        if hostname != hostname.split('.')[0]:
            hostname = hostname.split('.')[0]
        mpiinfo.Set(key = 'add-host',value = hostname)
            
        num_spawn = 1

        if "CPULIST_train" in os.environ:
        # see https://gist.github.com/lebedov/eadce02a320d10f0e81c
            # print os.environ['CPULIST_train']
            envstr=""
            # for key, value in dict(os.environ).iteritems():
            #     envstr+= '%s=%s\n' % (key,value)
            envstr+='CPULIST_train=%s\n' %  os.environ['CPULIST_train']
            mpiinfo.Set(key ='env', value = envstr)

        
        ninfo = mpiinfo.Get_nkeys()
        # print ninfo
        
        mpicommand = sys.executable

        file_dir = os.path.dirname(os.path.realpath(__file__))# get the dir of imagenet.py
    
        self.icomm= MPI.COMM_SELF.Spawn(mpicommand, 
                args=[file_dir+'/proc_load_mpi.py'],
                info = mpiinfo, maxprocs = num_spawn) 
开发者ID:uoguelph-mlrg,项目名称:Theano-MPI,代码行数:44,代码来源:imagenet.py


注:本文中的mpi4py.MPI.Get_processor_name方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。