當前位置: 首頁>>代碼示例>>Python>>正文


Python nd.empty方法代碼示例

本文整理匯總了Python中mxnet.nd.empty方法的典型用法代碼示例。如果您正苦於以下問題:Python nd.empty方法的具體用法?Python nd.empty怎麽用?Python nd.empty使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在mxnet.nd的用法示例。


在下文中一共展示了nd.empty方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: default_mp_pad_batchify_fn

# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import empty [as 別名]
def default_mp_pad_batchify_fn(data):
    """Use shared memory for collating data into batch, labels are padded to same shape"""
    if isinstance(data[0], nd.NDArray):
        out = nd.empty((len(data),) + data[0].shape, dtype=data[0].dtype,
                       ctx=context.Context('cpu_shared', 0))
        return nd.stack(*data, out=out)
    elif isinstance(data[0], tuple):
        data = zip(*data)
        return [default_mp_pad_batchify_fn(i) for i in data]
    else:
        data = np.asarray(data)
        batch_size = len(data)
        pad = max([l.shape[0] for l in data] + [1,])
        buf = np.full((batch_size, pad, data[0].shape[-1]), -1, dtype=data[0].dtype)
        for i, l in enumerate(data):
            buf[i][:l.shape[0], :] = l
        return nd.array(buf, dtype=data[0].dtype, ctx=context.Context('cpu_shared', 0)) 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:19,代碼來源:dataloader.py

示例2: __next__

# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import empty [as 別名]
def __next__(self) -> DataEntry:
        # if the buffer is empty, fill the buffer first.
        # (should only executed in the first round)
        if not self.shuffle_buffer:
            self.shuffle_buffer = list(
                itertools.islice(
                    self.base_iterator, self.shuffle_buffer_length
                )
            )
        # if buffer still empty, means all elements used,
        # return a signal of end of iterator
        if not self.shuffle_buffer:
            raise StopIteration
        # choose an element at a random index and yield it
        # and fill it with the next element in the sequential generator
        idx = random.randint(0, len(self.shuffle_buffer) - 1)
        next_sample = self.shuffle_buffer[idx]

        # replace the index with the next element in the iterator if the iterator has not finished.
        # delete the index otherwise.
        try:
            self.shuffle_buffer[idx] = next(self.base_iterator)
        except StopIteration:
            del self.shuffle_buffer[idx]

        return next_sample 
開發者ID:awslabs,項目名稱:gluon-ts,代碼行數:28,代碼來源:parallelized_loader.py

示例3: clip_grad

# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import empty [as 別名]
def clip_grad(
        grads: Union[Generator[NDArray, NDArray, NDArray], List[NDArray], Tuple[NDArray]],
        clip_method: GradientClippingMethod,
        clip_val: float,
        inplace=True) -> List[NDArray]:
    """
    Clip gradient values inplace
    :param grads: gradients to be clipped
    :param clip_method: clipping method
    :param clip_val: clipping value. Interpreted differently depending on clipping method.
    :param inplace: modify grads if True, otherwise create NDArrays
    :return: clipped gradients
    """
    output = list(grads) if inplace else list(nd.empty(g.shape) for g in grads)
    if clip_method == GradientClippingMethod.ClipByGlobalNorm:
        norm_unclipped_grads = global_norm(grads)
        scale = clip_val / (norm_unclipped_grads.asscalar() + 1e-8)  # todo: use branching operators?
        if scale < 1.0:
            for g, o in zip(grads, output):
                nd.broadcast_mul(g, nd.array([scale]), out=o)
    elif clip_method == GradientClippingMethod.ClipByValue:
        for g, o in zip(grads, output):
            g.clip(-clip_val, clip_val, out=o)
    elif clip_method == GradientClippingMethod.ClipByNorm:
        for g, o in zip(grads, output):
            nd.broadcast_mul(g, nd.minimum(1.0, clip_val / (g.norm() + 1e-8)), out=o)
    else:
        raise KeyError('Unsupported gradient clipping method')
    return output 
開發者ID:NervanaSystems,項目名稱:coach,代碼行數:31,代碼來源:utils.py

示例4: _worker_fn

# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import empty [as 別名]
def _worker_fn(
    batch_size: int,
    batchify_fn: Callable,
    dtype: DType,
    is_train: bool,
    cyclic: bool,
    cycle_num: int,
    shuffle_buffer_length: int,
):
    """Function for processing data in worker process."""

    # initialize, or reset the iterator at each cycle
    if (_WorkerData.iterator_latest_reset_cycle < cycle_num) and (
        _WorkerData.iterator_latest_reset_cycle == 0 or not cyclic
    ):
        _worker_reset_iterator(
            is_train, cyclic, cycle_num, shuffle_buffer_length
        )

    # retrieve the samples that will be batched
    batch_samples = list(
        itertools.islice(_WorkerData.dataset_iterator, batch_size)
    )
    # batch the samples, if there were any
    if batch_samples:
        success = True
        batch = batchify_fn(
            data=batch_samples, dtype=dtype, multi_processing=True
        )
    else:
        # the second time without being able to provide a batch we want to delay calling them again
        # on fist exhaustion they should not be delayed, since they need to indicate depletion
        # dont make the penalty to high, since that delays rescheduling of non empty iterators
        if _WorkerData.iterator_exhausted_indicator:
            time.sleep(0.05)
        else:
            _WorkerData.iterator_exhausted_indicator = True
        success = False
        batch = None

    buf = io.BytesIO()
    ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(
        (success, MPWorkerInfo.worker_id, batch)
    )
    return buf.getvalue() 
開發者ID:awslabs,項目名稱:gluon-ts,代碼行數:47,代碼來源:parallelized_loader.py


注:本文中的mxnet.nd.empty方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。