当前位置: 首页>>代码示例>>Python>>正文


Python utils.split_and_load方法代码示例

本文整理汇总了Python中mxnet.gluon.utils.split_and_load方法的典型用法代码示例。如果您正苦于以下问题:Python utils.split_and_load方法的具体用法?Python utils.split_and_load怎么用?Python utils.split_and_load使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mxnet.gluon.utils的用法示例。


在下文中一共展示了utils.split_and_load方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: split_load_kwargs

# 需要导入模块: from mxnet.gluon import utils [as 别名]
# 或者: from mxnet.gluon.utils import split_and_load [as 别名]
def split_load_kwargs(inputs, kwargs, ctx_list, batch_axis=0):
    r"""Split with support for kwargs dictionary"""

    def split_map(obj):
        if isinstance(obj, NDArray):
            return split_and_load(obj, ctx_list, batch_axis, even_split=False)
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(split_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            return list(map(list, zip(*map(split_map, obj))))
        if isinstance(obj, dict) and len(obj) > 0:
            return list(map(type(obj), zip(*map(split_map, obj.items()))))
        return [obj for _ in ctx_list]

    inputs = split_map(inputs) if inputs else []
    kwargs = split_map(kwargs) if kwargs else []
    if len(inputs) < len(kwargs):
        inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
    elif len(kwargs) < len(inputs):
        kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
    inputs = tuple(inputs)
    kwargs = tuple(kwargs)
    return inputs, kwargs 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:25,代码来源:parallel.py

示例2: split_load_kwargs

# 需要导入模块: from mxnet.gluon import utils [as 别名]
# 或者: from mxnet.gluon.utils import split_and_load [as 别名]
def split_load_kwargs(inputs, kwargs, ctx_list, batch_axis=0):
    r"""Split with support for kwargs dictionary"""
    def split_map(obj):
        if isinstance(obj, NDArray):
            return split_and_load(obj, ctx_list, batch_axis, even_split=False)
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(split_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            return list(map(list, zip(*map(split_map, obj))))
        if isinstance(obj, dict) and len(obj) > 0:
            return list(map(type(obj), zip(*map(split_map, obj.items()))))
        return [obj for _ in ctx_list]
    inputs = split_map(inputs) if inputs else []
    kwargs = split_map(kwargs) if kwargs else []
    if len(inputs) < len(kwargs):
        inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
    elif len(kwargs) < len(inputs):
        kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
    inputs = tuple(inputs)
    kwargs = tuple(kwargs)
    return inputs, kwargs 
开发者ID:Angzz,项目名称:panoptic-fpn-gluon,代码行数:23,代码来源:parallel.py

示例3: test

# 需要导入模块: from mxnet.gluon import utils [as 别名]
# 或者: from mxnet.gluon.utils import split_and_load [as 别名]
def test(test_net, ctx, test_loader, iteration, logger):
    # print("Start testing iter %d." % iteration)
    Loss = gloss.SoftmaxCrossEntropyLoss()
    metric = mx.metric.Accuracy()
    metric_top5 = mx.metric.TopKAccuracy(5)
    test_loss = mx.metric.Loss()
    for batch in test_loader:
        trans = gutils.split_and_load(batch[0], ctx)
        labels = gutils.split_and_load(batch[1], ctx)
        outputs = [test_net(tran) for tran in trans]
        losses = [Loss(output, label) for output, label in zip(outputs, labels)]
        test_loss.update(0, losses)
        metric.update(labels, outputs)
        metric_top5.update(labels, outputs)
    _, test_top1_acc = metric.get()
    _, test_top5_acc = metric_top5.get()
    _, test_loss = test_loss.get()

    if test_top1_acc >= 0.7:
        test_net.save_parameters('imagenet_param/test_iter%d_%.5f.param' % (iteration, test_top1_acc))
    test_str = ("test_Loss: %f, test top1-acc %f, test top5-acc %f." % (test_loss, test_top1_acc, test_top5_acc))
    logger.info(test_str) 
开发者ID:PistonY,项目名称:ResidualAttentionNetwork,代码行数:24,代码来源:train_imagenet.py

示例4: split_load_kwargs

# 需要导入模块: from mxnet.gluon import utils [as 别名]
# 或者: from mxnet.gluon.utils import split_and_load [as 别名]
def split_load_kwargs(inputs, kwargs, ctx_list, batch_axis=0):
    r"""Split with support for kwargs dictionary"""
    def split_map(obj):
        if isinstance(obj, NDArray):
            return split_and_load(obj, ctx_list, batch_axis, even_split=False)
        if isinstance(obj, tuple) and len(obj) > 0:
            return list(zip(*map(split_map, obj)))
        if isinstance(obj, list) and len(obj) > 0:
            return list(map(list, zip(*map(split_map, obj))))
        if isinstance(obj, dict) and len(obj) > 0:
            return list(map(type(obj), zip(*map(split_map, obj.items()))))
        return [obj for targets in ctx_list]
    inputs = split_map(inputs) if inputs else []
    kwargs = split_map(kwargs) if kwargs else []
    if len(inputs) < len(kwargs):
        inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
    elif len(kwargs) < len(inputs):
        kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
    inputs = tuple(inputs)
    kwargs = tuple(kwargs)
    return inputs, kwargs 
开发者ID:zzdang,项目名称:cascade_rcnn_gluon,代码行数:23,代码来源:parallel.py

示例5: train_epoch

# 需要导入模块: from mxnet.gluon import utils [as 别名]
# 或者: from mxnet.gluon.utils import split_and_load [as 别名]
def train_epoch(self, inference, trainer, **kwargs):
        """
        Training with dSNEt loss
        :param inference: inference
        :param trainer: trainer of inference
        :return:
        """

        for Xs, Ys, Xt, Yt, _ in self.train_src_loader:
            Xs_lst = split_and_load(Xs, self.args.ctx, even_split=False)
            Ys_lst = split_and_load(Ys, self.args.ctx, even_split=False)
            Xt_lst = split_and_load(Xt, self.args.ctx, even_split=False)
            Yt_lst = split_and_load(Yt, self.args.ctx, even_split=False)

            if self.args.train_src:
                self.train_batch(Xs_lst, Ys_lst, Xt_lst, Yt_lst, inference, target=False)
                trainer.step(Xs.shape[0])

            self.train_batch(Xt_lst, Yt_lst, Xs_lst, Ys_lst, inference, target=True)

            trainer.step(Xt.shape[0])

            if self.args.log_itv > 0 and self.cur_iter % self.args.log_itv == 0:
                self.log_iter()
                if self.args.eval:
                    self.eval(inference, self.test_tgt_loader, target=True, epoch=False)

        self.log_epoch()
        if self.args.eval and self.cur_epoch > self.args.eval_epoch:
            self.eval(inference, self.test_tgt_loader, target=True, epoch=True) 
开发者ID:aws-samples,项目名称:d-SNE,代码行数:32,代码来源:training_sda.py

示例6: batch_fn

# 需要导入模块: from mxnet.gluon import utils [as 别名]
# 或者: from mxnet.gluon.utils import split_and_load [as 别名]
def batch_fn(batch, ctx):
    data_src = split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
    data_dst = split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
    label = split_and_load(batch[2], ctx_list=ctx, batch_axis=0)
    return data_src, data_dst, label 
开发者ID:osmr,项目名称:imgclsmob,代码行数:7,代码来源:eval_gl_mch.py

示例7: get_batch_fn

# 需要导入模块: from mxnet.gluon import utils [as 别名]
# 或者: from mxnet.gluon.utils import split_and_load [as 别名]
def get_batch_fn(ds_metainfo):
    """
    Get function for splitting data after extraction from data loader.

    Parameters
    ----------
    ds_metainfo : DatasetMetaInfo
        Dataset metainfo.

    Returns
    -------
    func
        Desired function.
    """
    if ds_metainfo.use_imgrec:
        def batch_fn(batch, ctx):
            data = split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
            label = split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
            return data, label

        return batch_fn
    else:
        def batch_fn(batch, ctx):
            data = split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
            label = split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
            return data, label

        return batch_fn 
开发者ID:osmr,项目名称:imgclsmob,代码行数:30,代码来源:dataset_utils.py

示例8: _eval

# 需要导入模块: from mxnet.gluon import utils [as 别名]
# 或者: from mxnet.gluon.utils import split_and_load [as 别名]
def _eval(self):
        n_correct = 0
        edit_dis = 0
        for images, labels in tqdm(self.validate_loader, desc='test model'):
            gpu_images = gutils.split_and_load(images, self.ctx)
            gpu_labels = gutils.split_and_load(labels, self.ctx)
            preds = [self.model(x)[0] for x in gpu_images]
            batch_dict = self.accuracy_batch(preds, gpu_labels, phase='VAL')
            n_correct += batch_dict['n_correct']
            edit_dis += batch_dict['edit_dis']
        return {'n_correct': n_correct, 'edit_dis': edit_dis} 
开发者ID:WenmuZhou,项目名称:crnn.gluon,代码行数:13,代码来源:trainer.py

示例9: _get_batch

# 需要导入模块: from mxnet.gluon import utils [as 别名]
# 或者: from mxnet.gluon.utils import split_and_load [as 别名]
def _get_batch(batch, ctx):
    """Return features and labels on ctx."""
    features, labels = batch
    if labels.dtype != features.dtype:
        labels = labels.astype(features.dtype)
    return (gutils.split_and_load(features, ctx),
            gutils.split_and_load(labels, ctx), features.shape[0]) 
开发者ID:d2l-ai,项目名称:d2l-zh,代码行数:9,代码来源:utils.py

示例10: _check_batchnorm_result

# 需要导入模块: from mxnet.gluon import utils [as 别名]
# 或者: from mxnet.gluon.utils import split_and_load [as 别名]
def _check_batchnorm_result(input, num_devices=1, cuda=False):
    from mxnet.gluon.utils import split_and_load
    def _find_bn(module):
        if isinstance(module, (mx.gluon.nn.BatchNorm, mx.gluon.contrib.nn.SyncBatchNorm)):
            return module
        elif isinstance(module.module, (mx.gluon.nn.BatchNorm, mx.gluon.contrib.nn.SyncBatchNorm)):
            return module.module

        raise RuntimeError('BN not found')

    def _syncParameters(bn1, bn2, ctx):
        ctx = input.context
        bn2.gamma.set_data(bn1.gamma.data(ctx))
        bn2.beta.set_data(bn1.beta.data(ctx))
        bn2.running_mean.set_data(bn1.running_mean.data(ctx))
        bn2.running_var.set_data(bn1.running_var.data(ctx))

    input1 = input.copy()
    input2 = input.copy()

    if cuda:
        input1 = input.as_in_context(mx.gpu(0))
        ctx_list = [mx.gpu(i) for i in range(num_devices)]
    else:
        ctx_list = [mx.cpu(0) for _ in range(num_devices)]

    nch = input.shape[1]
    bn1 = mx.gluon.nn.BatchNorm(in_channels=nch)
    bn2 = mx.gluon.contrib.nn.SyncBatchNorm(in_channels=nch, num_devices=num_devices)

    bn1.initialize(ctx=ctx_list[0])
    bn2.initialize(ctx=ctx_list)

    # using the same values for gamma and beta
    #_syncParameters(_find_bn(bn1), _find_bn(bn2), ctx_list[0])

    input1.attach_grad()
    inputs2 = split_and_load(input2, ctx_list, batch_axis=0)
    for xi in inputs2:
        xi.attach_grad()

    with mx.autograd.record():
        output1 = bn1(input1)
        output2  = [bn2(xi) for xi in inputs2]
        loss1 = (output1 ** 2).sum()
        loss2 = [(output ** 2).sum() for output in output2]
        mx.autograd.backward(loss1)
        mx.autograd.backward(loss2)

    output2 = mx.nd.concat(*[output.as_in_context(input.context) for output in output2], dim=0)
    # assert forwarding
    assert_almost_equal(input1.asnumpy(), input2.asnumpy(), atol=1e-3, rtol=1e-3)
    assert_almost_equal(output1.asnumpy(), output2.asnumpy(), atol=1e-3, rtol=1e-3)
    assert_almost_equal(_find_bn(bn1).running_mean.data(ctx_list[0]).asnumpy(),
                        _find_bn(bn2).running_mean.data(ctx_list[0]).asnumpy(),
                        atol=1e-3, rtol=1e-3)
    assert_almost_equal(_find_bn(bn1).running_var.data(ctx_list[0]).asnumpy(),
                        _find_bn(bn2).running_var.data(ctx_list[0]).asnumpy(),
                        atol=1e-3, rtol=1e-3)
    input2grad = mx.nd.concat(*[output.grad.as_in_context(input.context) for output in inputs2], dim=0)
    assert_almost_equal(input1.grad.asnumpy(), input2grad.asnumpy(), atol=1e-3, rtol=1e-3) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:63,代码来源:test_gluon_gpu.py

示例11: eval

# 需要导入模块: from mxnet.gluon import utils [as 别名]
# 或者: from mxnet.gluon.utils import split_and_load [as 别名]
def eval(self, inference, val_loader, log=True, target=True, epoch=True):
        """
        Evaluate the model
        :param inference: network
        :param val_loader: data loader
        :param log: log flag
        :param target: target flag for updating the record and log
        :param epoch: epoch flag for updating the record and log
        :return:
        """
        mtc_acc = Accuracy()
        mtc_acc.reset()
        # val_loader.reset()

        feature_nest, y_nest, y_hat_nest = [], [], []
        for X, Y in val_loader:
            X_lst = split_and_load(X, self.args.ctx, even_split=False)
            Y_lst = split_and_load(Y, self.args.ctx, even_split=False)

            for x, y in zip(X_lst, Y_lst):
                y_hat, features = inference(x)
                # update metric
                mtc_acc.update([y], [y_hat])

                y_nest.extend(y.asnumpy())
                feature_nest.extend(features.asnumpy())
                y_hat_nest.extend(y_hat.asnumpy())

        feature_nest = np.array(feature_nest)
        y_nest = np.array(y_nest).astype(int)
        y_hat_nest = np.array(y_hat_nest)

        if log:
            target_key = 'Tgt' if target else 'Src'
            epoch_key = 'Epoch' if epoch else 'Iter'
            record = self.cur_epoch if epoch else self.cur_iter

            if mtc_acc.get()[1] > self.records[epoch_key]['%s-Acc' % target_key]:
                if target:
                    self.records[epoch_key][epoch_key] = record
                self.records[epoch_key]['%s-Acc' % target_key] = mtc_acc.get()[1]
                self.records[epoch_key]['%s-label' % target_key] = y_nest
                self.records[epoch_key]['%s-preds' % target_key] = y_hat_nest
                self.records[epoch_key]['%s-features' % target_key] = feature_nest

                self.save_params(inference, 0, epoch_key)

            self.logger.update_scalar('%s [%d]: Eval-Acc-%s' % (epoch_key, record, target_key), mtc_acc.get()[1])
            if self.sw:
                self.sw.add_scalar('Acc/Eval-%s-Acc-%s' % (epoch, target_key), mtc_acc.get()[1], global_step=record)

        return mtc_acc.get()[1], y_nest, y_hat_nest, feature_nest 
开发者ID:aws-samples,项目名称:d-SNE,代码行数:54,代码来源:training_sda.py

示例12: _extract_features

# 需要导入模块: from mxnet.gluon import utils [as 别名]
# 或者: from mxnet.gluon.utils import split_and_load [as 别名]
def _extract_features(self, preprocessed_data, verbose=True):
        """
        Parameters
        ----------
        preprocessed_data : SArray

        Returns
        -------
        numpy array containing the deep features
        """
        last_progress_update = _time.time()
        progress_header_printed = False

        deep_features = _tc.SArrayBuilder(_np.ndarray)

        if _mac_ver() < (10, 14):
            # Use MXNet
            preprocessed_data = mx.nd.array(preprocessed_data)

            ctx_list = self.ctx
            if len(preprocessed_data) < len(ctx_list):
                ctx_list = ctx_list[:len(preprocessed_data)]
            batches = utils.split_and_load(preprocessed_data, ctx_list=ctx_list, even_split=False)

            for i, cur_batch in enumerate(batches):
                y = self.vggish_model.forward(cur_batch).asnumpy()
                for j in y:
                    deep_features.append(j)

                # If `verbose` is set, print an progress update about every 20s
                if verbose and _time.time() - last_progress_update >= 20:
                    if not progress_header_printed:
                        print("Extracting deep features -")
                        progress_header_printed = True
                    print("Extracted {} of {} batches".format(i, len(batches)))
                    last_progress_update = _time.time()
            if progress_header_printed:
                print("Extracted {} of {} batches\n".format(len(batches), len(batches)))

        else:
            # Use Core ML
            for i, cur_example in enumerate(preprocessed_data):
                for cur_frame in cur_example:
                    x = {'input1': [cur_frame]}
                    y = self.vggish_model.predict(x)
                    deep_features.append(y['output1'])

                # If `verbose` is set, print an progress update about every 20s
                if verbose and _time.time() - last_progress_update >= 20:
                    if not progress_header_printed:
                        print("Extracting deep features -")
                        progress_header_printed = True
                    print("Extracted {} of {}".format(i, len(preprocessed_data)))
                    last_progress_update = _time.time()
            if progress_header_printed:
                print("Extracted {} of {}\n".format(len(preprocessed_data), len(preprocessed_data)))

        return deep_features.close() 
开发者ID:yulingtianxia,项目名称:AudioEmotion,代码行数:60,代码来源:_audio_feature_extractor.py

示例13: _train_epoch

# 需要导入模块: from mxnet.gluon import utils [as 别名]
# 或者: from mxnet.gluon.utils import split_and_load [as 别名]
def _train_epoch(self, epoch):
        epoch_start = time.time()
        batch_start = time.time()
        train_loss = 0.
        for i, (images, labels) in enumerate(self.train_loader):
            if i >= self.train_loader_len:
                break
            self.global_step += 1
            cur_batch_size = images.shape[0]
            # 将图片和gt划分到每个gpu
            gpu_images = gutils.split_and_load(images, self.ctx)
            gpu_labels = gutils.split_and_load(labels, self.ctx)
            # 数据进行转换和丢到gpu

            # forward
            with autograd.record():
                preds = [self.model(x)[0] for x in gpu_images]
                ls = [self.criterion(pred, gpu_y) for pred, gpu_y in zip(preds, gpu_labels)]
            # backward
            for l in ls:
                l.backward()
            self.trainer.step(cur_batch_size)

            # loss 和 acc 记录到日志
            loss = sum([x.sum().asscalar() for x in ls]) / sum([x.shape[0] for x in ls])
            train_loss += loss

            batch_dict = self.accuracy_batch(preds, gpu_labels, phase='TRAIN')
            acc = batch_dict['n_correct'] / cur_batch_size
            edit_dis = batch_dict['edit_dis'] / cur_batch_size

            if self.tensorboard_enable:
                # write tensorboard
                self.writer.add_scalar('TRAIN/ctc_loss', loss, self.global_step)
                self.writer.add_scalar('TRAIN/acc', acc, self.global_step)
                self.writer.add_scalar('TRAIN/edit_distance', edit_dis, self.global_step)
                self.writer.add_scalar('TRAIN/lr', self.trainer.learning_rate, self.global_step)

            if (i + 1) % self.display_interval == 0:
                batch_time = time.time() - batch_start
                self.logger.info(
                    '[{}/{}], [{}/{}], global_step: {}, Speed: {:.1f} samples/sec, acc:{:.4f}, loss:{:.4f}, edit_dis:{:.4f} lr:{}, time:{:.2f}'.format(
                        epoch, self.epochs, i + 1, self.train_loader_len, self.global_step, self.display_interval * cur_batch_size / batch_time,
                        acc, loss, edit_dis, self.trainer.learning_rate, batch_time))
                batch_start = time.time()
        return {'train_loss': train_loss / self.train_loader_len, 'time': time.time() - epoch_start, 'epoch': epoch} 
开发者ID:WenmuZhou,项目名称:crnn.gluon,代码行数:48,代码来源:trainer.py

示例14: train

# 需要导入模块: from mxnet.gluon import utils [as 别名]
# 或者: from mxnet.gluon.utils import split_and_load [as 别名]
def train(train_net, iterations, trainer, ctx, lr_period: tuple, lr_decay, train_loader, test_loader, cat_interval):
    # set up logger
    logging.basicConfig()
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    log_file_path = 'Attention56_train.log'
    fh = logging.FileHandler(log_file_path)
    logger.addHandler(fh)

    net.collect_params().reset_ctx(ctx)
    train_gen = inf_train_gen(train_loader)
    Loss = gloss.SoftmaxCrossEntropyLoss()
    metric = mx.metric.Accuracy()
    metric_top5 = mx.metric.TopKAccuracy(5)
    train_loss = mx.metric.Loss()
    prev_time = datetime.datetime.now()

    metric.reset()
    train_loss.reset()

    for iteration in range(int(iterations)):
        batch = next(train_gen)
        trans = gutils.split_and_load(batch.data[0], ctx)
        labels = gutils.split_and_load(batch.label[0], ctx)

        with autograd.record():
            outputs = [train_net(tran) for tran in trans]
            losses = [Loss(output, label) for output, label in zip(outputs, labels)]

        for loss in losses:
            loss.backward()

        trainer.step(batch_size)
        train_loss.update(0, losses)
        metric.update(labels, outputs)
        metric_top5.update(labels, outputs)
        if iteration % cat_interval == cat_interval - 1:
            cur_time = datetime.datetime.now()
            time_str = format_time(prev_time, cur_time)
            _, top1_acc = metric.get()
            _, top5_acc = metric_top5.get()
            _, epoch_loss = train_loss.get()
            metric.reset()
            metric_top5.reset()
            train_loss.reset()
            epoch_str = ("Iter %d. Loss: %.5f, Train top1-acc %f, Train top5-acc %f."
                         % (iteration, epoch_loss, top1_acc, top5_acc))
            prev_time = cur_time
            logger.info(epoch_str + time_str + 'lr ' + str(trainer.learning_rate))
            test(train_net, ctx, test_loader, iteration, logger)
        if iteration in lr_period:
            trainer.set_learning_rate(trainer.learning_rate * lr_decay) 
开发者ID:PistonY,项目名称:ResidualAttentionNetwork,代码行数:54,代码来源:train_imagenet.py


注:本文中的mxnet.gluon.utils.split_and_load方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。