当前位置: 首页>>代码示例>>Python>>正文


Python sharedctypes.RawArray方法代码示例

本文整理汇总了Python中multiprocessing.sharedctypes.RawArray方法的典型用法代码示例。如果您正苦于以下问题:Python sharedctypes.RawArray方法的具体用法?Python sharedctypes.RawArray怎么用?Python sharedctypes.RawArray使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.sharedctypes的用法示例。


在下文中一共展示了sharedctypes.RawArray方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_execution_context

# 需要导入模块: from multiprocessing import sharedctypes [as 别名]
# 或者: from multiprocessing.sharedctypes import RawArray [as 别名]
def create_execution_context(self):
        pools = {}
        shared_data_lists = {}
        for worker_id in self._worker_ids:
            shared_data_list = []
            shared_data_lists[worker_id] = shared_data_list

            # for each worker_id, we fetch a batch size of 32 and this is being
            # done by various parallel processes
            for _ in range(self.batch_size):
                shared_arr = RawArray(ctypes.c_float, self._expected_data_size)
                shared_data_list.append(shared_arr)

            pools[worker_id] = Pool(
                processes=self._num_processes,
                initializer=self._init_pool,
                initargs=(
                    shared_data_list,
                )
            )
        self.pools = pools
        self.shared_data_lists = shared_data_lists
        logger.info('execution_context created...')
        logger.info('pools: {}'.format(pools))
        logger.info('shared_data_lists: {}'.format(shared_data_lists)) 
开发者ID:facebookresearch,项目名称:video-long-term-feature-banks,代码行数:27,代码来源:execution_context.py

示例2: RawArray

# 需要导入模块: from multiprocessing import sharedctypes [as 别名]
# 或者: from multiprocessing.sharedctypes import RawArray [as 别名]
def RawArray(typecode_or_type, size_or_initializer):
    '''
    Returns a shared array
    '''
    from multiprocessing.sharedctypes import RawArray
    return RawArray(typecode_or_type, size_or_initializer) 
开发者ID:war-and-code,项目名称:jawfish,代码行数:8,代码来源:__init__.py

示例3: __init__

# 需要导入模块: from multiprocessing import sharedctypes [as 别名]
# 或者: from multiprocessing.sharedctypes import RawArray [as 别名]
def __init__(self, floating=None, shared_memory=False, numpy_dtype=None):
        if numpy_dtype:
            log.debug('Using numpy')
            if numpy_dtype in NUMPY_DEFAULTS:
                numpy_dtype = 'float32'
            if numpy_dtype not in numpy.sctypeDict:
                raise ValueError(BAD_NUMPY_TYPE_ERROR % numpy_dtype)

        if shared_memory and numpy_dtype:
            log.error('Shared memory for numpy arrays is not yet supported.')
            numpy_dtype = None

        if floating is None:
            floating = not shared_memory

        c_type = c_float if floating else c_uint8

        if shared_memory:
            self.bytes = lambda size: RawArray(c_uint8, size)
            self.color_list = lambda size: RawArray(3 * c_type, size)
            # Note https://stackoverflow.com/questions/37705974/

        elif numpy_dtype:
            self.bytes = bytearray
            self.color_list = lambda size: numpy.zeros((size, 3), numpy_dtype)

        else:
            self.bytes = bytearray
            self.color_list = lambda size: [(0, 0, 0)] * size 
开发者ID:ManiacalLabs,项目名称:BiblioPixel,代码行数:31,代码来源:data_maker.py

示例4: testInitLockFalse

# 需要导入模块: from multiprocessing import sharedctypes [as 别名]
# 或者: from multiprocessing.sharedctypes import RawArray [as 别名]
def testInitLockFalse(self):
        buffer = SharedBuffer(array_len=self.array_len,
                              array_type=self.array_type,
                              np_array_type=self.np_array_type,
                              array_lock=False)
        # Test array types are correct
        self.assertEqual(len(buffer._data_buffer), self.buffer_len)
        self.assertIsInstance(buffer._data_buffer[0], np.ndarray)
        self.assertIs(buffer._data_buffer[0].dtype, np.dtype(self.np_array_type))
        self.assertIsInstance(buffer._data_buffer[0].base,
                              type(Array(self.array_type, self.array_len).get_obj()))
        self.assertIsInstance(buffer._timestamp_buffer,
                              type(RawArray("d", self.buffer_len)))
        self.assertIsInstance(buffer._index_buffer,
                              type(RawArray("l", self.buffer_len))) 
开发者ID:kindredresearch,项目名称:SenseAct,代码行数:17,代码来源:test_sharedbuffer.py

示例5: create

# 需要导入模块: from multiprocessing import sharedctypes [as 别名]
# 或者: from multiprocessing.sharedctypes import RawArray [as 别名]
def create(shape, dtype='d', alignment=32):
    '''Create an uninitialised shared array. Avoid object arrays, as these
    will almost certainly break as the objects themselves won't be stored in
    shared memory, only the pointers
    '''
    shape = numpy.atleast_1d(shape).astype('i')
    dtype = numpy.dtype(dtype)

    # we're going to use a flat ctypes array
    N = numpy.prod(shape) + alignment
    # The upper bound of size we want to allocate to be certain
    #  that we can take an aligned array of the right size from it.
    N_bytes_big = N * dtype.itemsize
    # The final (= right) size of the array
    N_bytes_right = numpy.prod(shape) * dtype.itemsize
    dt = 'b'

    # We create the big array first
    a = sharedctypes.RawArray(dt, int(N_bytes_big))
    sa = shmarray(a, (N_bytes_big,), dt)

    # We pick the first index of the new array that is aligned
    # If the address of the first element is 1 and we want 8-alignment, the
    #  first aligned index of the array is going to be 7 == -1 % 8
    start_index = -sa.ctypes.data % alignment
    # Finally, we take the (aligned) subarray and reshape it.
    sa = sa[start_index:start_index + N_bytes_right].view(dtype).reshape(shape)

    return sa 
开发者ID:friends-of-freeswitch,项目名称:switchio,代码行数:31,代码来源:shmarray.py

示例6: zeros

# 需要导入模块: from multiprocessing import sharedctypes [as 别名]
# 或者: from multiprocessing.sharedctypes import RawArray [as 别名]
def zeros(shape, dtype='d'):
    """Create an shared array initialised to zeros. Avoid object arrays, as these
    will almost certainly break as the objects themselves won't be stored in
    shared memory, only the pointers
    """
    sa = create(shape, dtype=dtype)
    # contrary to the documentation, sharedctypes.RawArray does NOT always
    # return an array which is initialised to zero - do it ourselves
    # http://code.google.com/p/python-multiprocessing/issues/detail?id=25
    sa[:] = numpy.zeros(1, dtype)
    return sa 
开发者ID:friends-of-freeswitch,项目名称:switchio,代码行数:13,代码来源:shmarray.py

示例7: create

# 需要导入模块: from multiprocessing import sharedctypes [as 别名]
# 或者: from multiprocessing.sharedctypes import RawArray [as 别名]
def create(shape, dtype='d', alignment=32):
    """Create an uninitialised shared array. Avoid object arrays, as these
    will almost certainly break as the objects themselves won't be stored in shared
    memory, only the pointers"""
    shape = numpy.atleast_1d(shape).astype('i')
    dtype = numpy.dtype(dtype)

    # we're going to use a flat ctypes array
    N = int(numpy.prod(shape) + alignment)
    # The upper bound of size we want to allocate to be certain
    #  that we can take an aligned array of the right size from it.
    N_bytes_big = int(N * dtype.itemsize)
    # The final (= right) size of the array
    N_bytes_right = int(numpy.prod(shape) * dtype.itemsize)

    dt = 'b'

    # We create the big array first
    a = sharedctypes.RawArray(dt, N_bytes_big)

    sa = shmarray(a, (N_bytes_big,), dt)

    # We pick the first index of the new array that is aligned
    # If the address of the first element is 1 and we want 8-alignment, the
    #  first aligned index of the array is going to be 7 == -1 % 8
    start_index = -sa.ctypes.data % alignment
    # Finally, we take the (aligned) subarray and reshape it.
    sa = sa[start_index:start_index + N_bytes_right].view(dtype).reshape(shape)

    return sa 
开发者ID:vacancy,项目名称:Jacinle,代码行数:32,代码来源:shmarray.py

示例8: zeros

# 需要导入模块: from multiprocessing import sharedctypes [as 别名]
# 或者: from multiprocessing.sharedctypes import RawArray [as 别名]
def zeros(shape, dtype='d'):
    """Create an shared array initialised to zeros. Avoid object arrays, as these
    will almost certainly break as the objects themselves won't be stored in shared
    memory, only the pointers"""
    sa = create(shape, dtype=dtype)
    # contrary to the documentation, sharedctypes.RawArray does NOT always return
    # an array which is initialised to zero - do it ourselves
    # http://code.google.com/p/python-multiprocessing/issues/detail?id=25
    sa[:] = numpy.zeros(1, dtype)
    return sa 
开发者ID:vacancy,项目名称:Jacinle,代码行数:12,代码来源:shmarray.py

示例9: __init__

# 需要导入模块: from multiprocessing import sharedctypes [as 别名]
# 或者: from multiprocessing.sharedctypes import RawArray [as 别名]
def __init__(self, shape, dtype=np.float32):
        # Compute total number of elements
        size = np.prod(shape)
        # Get the size of element
        if dtype == np.float32:
            typecode = 'f'
        elif dtype == np.float64:
            typecode = 'd'
        else:
            assert False, 'Unknown dtype.'
        self.data = sharedctypes.RawArray(typecode, size)
        self.shape = shape
        self.dtype = dtype 
开发者ID:xdshang,项目名称:VidVRD-helper,代码行数:15,代码来源:feature.py

示例10: make_shared

# 需要导入模块: from multiprocessing import sharedctypes [as 别名]
# 或者: from multiprocessing.sharedctypes import RawArray [as 别名]
def make_shared(n_envs, obs_space):
    shape = (n_envs, ) + obs_space.shape
    raw = RawArray(to_ctype(obs_space.dtype), int(np.prod(shape)))
    return np.frombuffer(raw, dtype=obs_space.dtype).reshape(shape) 
开发者ID:inoryy,项目名称:reaver,代码行数:6,代码来源:shm_multiproc.py

示例11: aucell4r

# 需要导入模块: from multiprocessing import sharedctypes [as 别名]
# 或者: from multiprocessing.sharedctypes import RawArray [as 别名]
def aucell4r(df_rnk: pd.DataFrame, signatures: Sequence[Type[GeneSignature]],
             auc_threshold: float = 0.05, noweights: bool = False, normalize: bool = False,
             num_workers: int = cpu_count()) -> pd.DataFrame:
    """
    Calculate enrichment of gene signatures for single cells.

    :param df_rnk: The rank matrix (n_cells x n_genes).
    :param signatures: The gene signatures or regulons.
    :param auc_threshold: The fraction of the ranked genome to take into account for the calculation of the
        Area Under the recovery Curve.
    :param noweights: Should the weights of the genes part of a signature be used in calculation of enrichment?
    :param normalize: Normalize the AUC values to a maximum of 1.0 per regulon.
    :param num_workers: The number of cores to use.
    :return: A dataframe with the AUCs (n_cells x n_modules).
    """
    if num_workers == 1:
        # Show progress bar ...
        aucs = pd.concat([enrichment4cells(df_rnk,
                                     module.noweights() if noweights else module,
                                     auc_threshold=auc_threshold) for module in tqdm(signatures)]).unstack("Regulon")
        aucs.columns = aucs.columns.droplevel(0)
    else:
        # Decompose the rankings dataframe: the index and columns are shared with the child processes via pickling.
        genes = df_rnk.columns.values
        cells = df_rnk.index.values
        # The actual rankings are shared directly. This is possible because during a fork from a parent process the child
        # process inherits the memory of the parent process. A RawArray is used instead of a synchronize Array because
        # these rankings are read-only.
        shared_ro_memory_array = RawArray(DTYPE_C, mul(*df_rnk.shape))
        array = np.frombuffer(shared_ro_memory_array, dtype=DTYPE)
        # Copy the contents of df_rank into this shared memory block using row-major ordering.
        array[:] = df_rnk.values.flatten(order='C')

        # The resulting AUCs are returned via a synchronize array.
        auc_mtx = Array('d', len(cells) * len(signatures))  # Double precision floats.

        # Convert the modules to modules with uniform weights if necessary.
        if noweights:
            signatures = list(map(lambda m: m.noweights(), signatures))

        # Do the analysis in separate child processes.
        chunk_size = ceil(float(len(signatures)) / num_workers)
        processes = [Process(target=_enrichment, args=(shared_ro_memory_array, chunk,
                                                       genes, cells, auc_threshold,
                                                       auc_mtx, (chunk_size*len(cells))*idx))
                     for idx, chunk in enumerate(chunked(signatures, chunk_size))]
        for p in processes:
            p.start()
        for p in processes:
            p.join()

        # Reconstitute the results array. Using C or row-major ordering.
        aucs = pd.DataFrame(data=np.ctypeslib.as_array(auc_mtx.get_obj()).reshape(len(signatures), len(cells)),
                            columns=pd.Index(data=cells, name='Cell'),
                            index=pd.Index(data=list(map(attrgetter("name"), signatures)), name='Regulon')).T
    return aucs/aucs.max(axis=0) if normalize else aucs 
开发者ID:aertslab,项目名称:pySCENIC,代码行数:58,代码来源:aucell.py

示例12: __init__

# 需要导入模块: from multiprocessing import sharedctypes [as 别名]
# 或者: from multiprocessing.sharedctypes import RawArray [as 别名]
def __init__(self, array_len, array_type, np_array_type, buffer_len=DEFAULT_BUFFER_LEN, array_lock=True):
        """Inits the SharedBuffer object with size and data type.

        Args:
            buffer_len: An integer size of the buffer
            array_len: An integer size of each buffer element (usually numpy array)
            array_type: A ctypes data type of buffer elements, e.g. 'd'
            np_array_type: A numpy data type of buffer elements, e.g. 'float64'
            array_lock: A bool specifying whether the buffer will be used with Lock

        """
        self.array_len = array_len
        self.np_array_type = np_array_type
        self._buffer_len = buffer_len
        self._array_type = array_type

        # Data is stored in a circular buffer of shared arrays
        self._data_buffer = []
        if array_lock:
            for _ in range(self._buffer_len):
                self._data_buffer.append(np.frombuffer(Array(self._array_type, self.array_len).get_obj(),
                                                       dtype=self.np_array_type))
            # We also store time stamps corresponding to each array record
            self._timestamp_buffer = Array('d', self._buffer_len)
            # We also store the index corresponding to each array record
            self._index_buffer = Array('l', self._buffer_len)
        else:
            # use RawArray without internal lock if needed
            for _ in range(self._buffer_len):
                self._data_buffer.append(np.frombuffer(RawArray(self._array_type, self.array_len),
                                                       dtype=self.np_array_type))
            self._timestamp_buffer = RawArray('d', self._buffer_len)
            self._index_buffer = RawArray('l', self._buffer_len)
        # Value of `index_buffer` is always set to `self._counter`, which is then increased
        self._counter = 0
        # buffer_p is a pointer which always points to the next available slot in `data_buffer`
        # where the newest data array can be stored
        self._buffer_p = Value('i', 0)
        # This variable is set to 1 when a new array is stored and
        # set to 0 when a new array is read
        self._data_updated = Value('i', 0)

        # Lock to ensure that changing the `data_updated` as well as the data itself is atomic
        self._access_lock = Lock() 
开发者ID:kindredresearch,项目名称:SenseAct,代码行数:46,代码来源:sharedbuffer.py

示例13: open

# 需要导入模块: from multiprocessing import sharedctypes [as 别名]
# 或者: from multiprocessing.sharedctypes import RawArray [as 别名]
def open(self, path):
        self.path = path
        try:
            self.locs, self.info = io.load_locs(path, qt_parent=self)
        except io.NoMetadataFileError:
            return
        groups = np.unique(self.locs.group)
        n_groups = len(groups)
        n_locs = len(self.locs)
        self.group_index = scipy.sparse.lil_matrix(
            (n_groups, n_locs), dtype=np.bool
        )
        progress = lib.ProgressDialog(
            "Creating group index", 0, len(groups), self
        )
        progress.set_value(0)
        for i, group in enumerate(groups):
            index = np.where(self.locs.group == group)[0]
            self.group_index[i, index] = True
            progress.set_value(i + 1)
        progress = lib.ProgressDialog(
            "Aligning by center of mass", 0, len(groups), self
        )
        progress.set_value(0)
        for i in range(n_groups):
            index = self.group_index[i, :].nonzero()[1]
            self.locs.x[index] -= np.mean(self.locs.x[index])
            self.locs.y[index] -= np.mean(self.locs.y[index])
            progress.set_value(i + 1)
        self.r = 2 * np.sqrt(np.mean(self.locs.x ** 2 + self.locs.y ** 2))
        self.update_image()
        status = lib.StatusDialog("Starting parallel pool...", self.window)
        global pool, x, y
        try:
            pool.close()
        except NameError:
            pass
        x = sharedctypes.RawArray("f", self.locs.x)
        y = sharedctypes.RawArray("f", self.locs.y)
        n_workers = max(1, int(0.75 * multiprocessing.cpu_count()))
        pool = multiprocessing.Pool(
            n_workers, init_pool, (x, y, self.group_index)
        )
        self.window.status_bar.showMessage("Ready for processing!")
        status.close() 
开发者ID:jungmannlab,项目名称:picasso,代码行数:47,代码来源:average.py


注:本文中的multiprocessing.sharedctypes.RawArray方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。