當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.Logger方法代碼示例

本文整理匯總了Python中utils.Logger方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.Logger方法的具體用法?Python utils.Logger怎麽用?Python utils.Logger使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在utils的用法示例。


在下文中一共展示了utils.Logger方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: main

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Logger [as 別名]
def main(config, resume):
    train_logger = Logger()

    # DATA LOADERS
    train_loader = get_instance(dataloaders, 'train_loader', config)
    val_loader = get_instance(dataloaders, 'val_loader', config)

    # MODEL
    model = get_instance(models, 'arch', config, train_loader.dataset.num_classes)
    print(f'\n{model}\n')

    # LOSS
    loss = getattr(losses, config['loss'])(ignore_index = config['ignore_index'])

    # TRAINING
    trainer = Trainer(
        model=model,
        loss=loss,
        resume=resume,
        config=config,
        train_loader=train_loader,
        val_loader=val_loader,
        train_logger=train_logger)

    trainer.train() 
開發者ID:yassouali,項目名稱:pytorch_segmentation,代碼行數:27,代碼來源:train.py

示例2: __init__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Logger [as 別名]
def __init__(self, hps, data_loader, g_mode, enc_mode, log_dir='./log/'):
		self.hps = hps
		self.data_loader = data_loader
		self.model_kept = []
		self.max_keep = hps.max_to_keep
		self.logger = Logger(log_dir)
		self.g_mode = g_mode
		self.enc_mode = enc_mode
		if self.g_mode != 'naive': 
			self.shift_c = to_var(torch.from_numpy(np.array([int(hps.n_speakers-hps.n_target_speakers) \
						   					 for _ in range(hps.batch_size)])), requires_grad=False)
		self.build_model() 
開發者ID:andi611,項目名稱:ZeroSpeech-TTS-without-T,代碼行數:14,代碼來源:trainer.py

示例3: start_up

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Logger [as 別名]
def start_up(*args):
        logger = Logger("local_ems_start_up")
        # Obtain static information of the local ems
        # Update the local EMS parameters
        local_models = {"DG": generators.Generator_AC.copy(),
                        "UG": generators.Generator_AC.copy(),
                        "Load_ac": loads.Load_AC.copy(),
                        "Load_uac": loads.Load_AC.copy(),
                        "BIC": convertors.BIC.copy(),
                        "ESS": energy_storage_systems.BESS.copy(),
                        "PV": generators.Generator_RES.copy(),
                        "WP": generators.Generator_RES.copy(),
                        "Load_dc": loads.Load_DC.copy(),
                        "Load_udc": loads.Load_DC.copy(),
                        "PMG": 0,
                        "V_DC": 0}
        local_models["PV"]["NPV"] = local_models["PV"]["PMAX"]
        local_models["WP"]["NWP"] = local_models["WP"]["PMAX"]

        T_short = default_look_ahead_time_step["Look_ahead_time_opf_time_step"] # The look ahead time step for short term operation
        T_middle = default_look_ahead_time_step["Look_ahead_time_ed_time_step"]# The look ahead time step for middle term operation
        T_long = default_look_ahead_time_step["Look_ahead_time_uc_time_step"]# The look ahead time step for long term operation
        # Update information
        local_model_short = deepcopy(local_models)
        local_model_middle = deepcopy(local_models)
        local_model_long = deepcopy(local_models)

        # Generate middle term operation model for local ems, these information should be updated according to the database of resource manager
        local_model_middle["UG"]["GEN_STATUS"] = [local_model_middle["UG"]["GEN_STATUS"]] * T_middle
        local_model_middle["DG"]["GEN_STATUS"] = [local_model_middle["DG"]["GEN_STATUS"]] * T_middle
        local_model_middle["PV"]["NPV"] = [local_model_middle["PV"]["NPV"]] * T_middle
        local_model_middle["PV"]["PMAX"] = [local_model_middle["PV"]["PMAX"]] * T_middle
        local_model_middle["WP"]["NWP"] = [local_model_middle["WP"]["NWP"]] * T_middle
        local_model_middle["WP"]["PMAX"] = [local_model_middle["WP"]["PMAX"]] * T_middle
        local_model_middle["Load_ac"]["STATUS"] = [local_model_middle["Load_ac"]["STATUS"]] * T_middle
        local_model_middle["Load_uac"]["STATUS"] = [local_model_middle["Load_uac"]["STATUS"]] * T_middle
        local_model_middle["Load_dc"]["STATUS"] = [local_model_middle["Load_dc"]["STATUS"]] * T_middle
        local_model_middle["Load_udc"]["STATUS"] = [local_model_middle["Load_udc"]["STATUS"]] * T_middle
        local_model_middle["BIC"]["STATUS"] = [local_model_middle["BIC"]["STATUS"]] * T_middle
        local_model_middle["ESS"]["STATUS"] = [local_model_middle["ESS"]["STATUS"]] * T_middle
        # Generate long term operation model for local ems
        local_model_long["UG"]["GEN_STATUS"] = [local_model_long["UG"]["GEN_STATUS"]] * T_long
        local_model_long["DG"]["GEN_STATUS"] = [local_model_long["DG"]["GEN_STATUS"]] * T_long
        local_model_long["PV"]["NPV"] = [local_model_long["PV"]["NPV"]] * T_long
        local_model_long["PV"]["PMAX"] = [local_model_long["PV"]["PMAX"]] * T_long
        local_model_long["WP"]["NWP"] = [local_model_long["WP"]["NWP"]] * T_long
        local_model_long["WP"]["PMAX"] = [local_model_long["WP"]["PMAX"]] * T_long
        local_model_long["Load_ac"]["STATUS"] = [local_model_long["Load_ac"]["STATUS"]] * T_long
        local_model_long["Load_uac"]["STATUS"] = [local_model_long["Load_uac"]["STATUS"]] * T_long
        local_model_long["Load_dc"]["STATUS"] = [local_model_long["Load_dc"]["STATUS"]] * T_long
        local_model_long["Load_udc"]["STATUS"] = [local_model_long["Load_udc"]["STATUS"]] * T_long
        local_model_long["BIC"]["STATUS"] = [local_model_long["BIC"]["STATUS"]] * T_long
        local_model_long["ESS"]["STATUS"] = [local_model_long["ESS"]["STATUS"]] * T_long


        return local_model_short, local_model_middle, local_model_long 
開發者ID:Matrixeigs,項目名稱:energy_management_system,代碼行數:58,代碼來源:start_up_lems.py

示例4: __init__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Logger [as 別名]
def __init__(self, type, dataset, split, lr, diter, vis_screen, save_path, l1_coef, l2_coef, pre_trained_gen, pre_trained_disc, batch_size, num_workers, epochs):
        with open('config.yaml', 'r') as f:
            config = yaml.load(f)

        self.generator = torch.nn.DataParallel(gan_factory.generator_factory(type).cuda())
        self.discriminator = torch.nn.DataParallel(gan_factory.discriminator_factory(type).cuda())

        if pre_trained_disc:
            self.discriminator.load_state_dict(torch.load(pre_trained_disc))
        else:
            self.discriminator.apply(Utils.weights_init)

        if pre_trained_gen:
            self.generator.load_state_dict(torch.load(pre_trained_gen))
        else:
            self.generator.apply(Utils.weights_init)

        if dataset == 'birds':
            self.dataset = Text2ImageDataset(config['birds_dataset_path'], split=split)
        elif dataset == 'flowers':
            self.dataset = Text2ImageDataset(config['flowers_dataset_path'], split=split)
        else:
            print('Dataset not supported, please select either birds or flowers.')
            exit()

        self.noise_dim = 100
        self.batch_size = batch_size
        self.num_workers = num_workers
        self.lr = lr
        self.beta1 = 0.5
        self.num_epochs = epochs
        self.DITER = diter

        self.l1_coef = l1_coef
        self.l2_coef = l2_coef

        self.data_loader = DataLoader(self.dataset, batch_size=self.batch_size, shuffle=True,
                                num_workers=self.num_workers)

        self.optimD = torch.optim.Adam(self.discriminator.parameters(), lr=self.lr, betas=(self.beta1, 0.999))
        self.optimG = torch.optim.Adam(self.generator.parameters(), lr=self.lr, betas=(self.beta1, 0.999))

        self.logger = Logger(vis_screen)
        self.checkpoints_path = 'checkpoints'
        self.save_path = save_path
        self.type = type 
開發者ID:aelnouby,項目名稱:Text-to-Image-Synthesis,代碼行數:48,代碼來源:trainer.py

示例5: get_val_utils

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Logger [as 別名]
def get_val_utils(opt):
    normalize = get_normalize_method(opt.mean, opt.std, opt.no_mean_norm,
                                     opt.no_std_norm)
    spatial_transform = [
        Resize(opt.sample_size),
        CenterCrop(opt.sample_size),
        ToTensor()
    ]
    if opt.input_type == 'flow':
        spatial_transform.append(PickFirstChannels(n=2))
    spatial_transform.extend([ScaleValue(opt.value_scale), normalize])
    spatial_transform = Compose(spatial_transform)

    temporal_transform = []
    if opt.sample_t_stride > 1:
        temporal_transform.append(TemporalSubsampling(opt.sample_t_stride))
    temporal_transform.append(
        TemporalEvenCrop(opt.sample_duration, opt.n_val_samples))
    temporal_transform = TemporalCompose(temporal_transform)

    val_data, collate_fn = get_validation_data(opt.video_path,
                                               opt.annotation_path, opt.dataset,
                                               opt.input_type, opt.file_type,
                                               spatial_transform,
                                               temporal_transform)
    if opt.distributed:
        val_sampler = torch.utils.data.distributed.DistributedSampler(
            val_data, shuffle=False)
    else:
        val_sampler = None
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=(opt.batch_size //
                                                         opt.n_val_samples),
                                             shuffle=False,
                                             num_workers=opt.n_threads,
                                             pin_memory=True,
                                             sampler=val_sampler,
                                             worker_init_fn=worker_init_fn,
                                             collate_fn=collate_fn)

    if opt.is_master_node:
        val_logger = Logger(opt.result_path / 'val.log',
                            ['epoch', 'loss', 'acc'])
    else:
        val_logger = None

    return val_loader, val_logger 
開發者ID:kenshohara,項目名稱:3D-ResNets-PyTorch,代碼行數:49,代碼來源:main.py

示例6: __init__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Logger [as 別名]
def __init__(self, model='xception', n_classes=10, img_size=(224, 224), data_dir = None,
                    pretrained=False, pretrained_path="./pretrained/", n_gpu=1,
                    default_init=True):
        """ 初始化時隻初始化model model可以是string 也可以是自己創建的model """
        super(classifier, self).__init__()

        self.resume = None
        self.data_loader = None
        self.valid_data_loader = None
        self.train_logger = Logger()

        if isinstance(model, str):
            arch = {
                    "type": model, 
                    "args": {"n_class": n_classes, "img_size": img_size, 
                    "pretrained": pretrained, "pretrained_path": pretrained_path} 
                    }
            self.config = {"name": model, "arch": arch, "n_gpu":n_gpu}
            self.model = get_instance(model_zoo, 'arch', self.config)       
            # self.model = getattr(model_zoo, model)(n_classes, img_size, pretrained, pretrained_path)
        elif callable(model):
            model_name = model.__class__.__name__
            arch = {
                    "type": model_name, 
                    "args": {"n_class": n_classes, "img_size": img_size} 
                    }
            self.config = {"name": model_name, "arch": arch, "n_gpu":n_gpu}
            self.model = model         
        else:
            self.logger.info("input type is invalid, please set model as str or a callable object")
            raise Exception("model: wrong input error")

        if default_init:
            # self.loss = torch.nn.CrossEntropyLoss() #效果一樣
            self.config["loss"] = "cls_loss"
            self.loss = getattr(module_loss, self.config["loss"])
            
            self.config["metrics"] = ["accuracy"]
            self.metrics = [getattr(module_metric, met) for met in self.config['metrics']]

            # build optimizer
            self.config["optimizer"] = {"type": "Adam", "args":{"lr": 0.0003, "weight_decay": 0.00003}}
            optimizer_params = filter(lambda p: p.requires_grad, self.model.parameters())
            self.optimizer = get_instance(torch.optim, 'optimizer', self.config, optimizer_params)

            self.config["lr_scheduler"] = {"type": "StepLR", "args": {"step_size": 50, "gamma": 0.2 }}
            self.lr_scheduler = get_instance(torch.optim.lr_scheduler, 'lr_scheduler', 
                self.config, self.optimizer)

            self.set_trainer()

        if data_dir:
            self.autoset_dataloader(data_dir, batch_size=64, shuffle=True, validation_split=0.2, 
                num_workers=4, transform = None) 
開發者ID:daili0015,項目名稱:ModelFeast,代碼行數:56,代碼來源:classifier.py

示例7: main

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Logger [as 別名]
def main(args):
    train_loader, test_loader = load_dataset(args.label, args.batch_size)
    model = ShakePyramidNet(depth=args.depth, alpha=args.alpha, label=args.label)
    model = torch.nn.DataParallel(model).cuda()
    cudnn.benckmark = True

    opt = optim.SGD(model.parameters(),
                    lr=args.lr,
                    momentum=0.9,
                    weight_decay=args.weight_decay,
                    nesterov=args.nesterov)
    scheduler = optim.lr_scheduler.MultiStepLR(opt, [args.epochs // 2, args.epochs * 3 // 4])
    loss_func = nn.CrossEntropyLoss().cuda()

    headers = ["Epoch", "LearningRate", "TrainLoss", "TestLoss", "TrainAcc.", "TestAcc."]
    logger = utils.Logger(args.checkpoint, headers)
    for e in range(args.epochs):
        scheduler.step()
        model.train()
        train_loss, train_acc, train_n = 0, 0, 0
        bar = tqdm(total=len(train_loader), leave=False)
        for x, t in train_loader:
            x, t = Variable(x.cuda()), Variable(t.cuda())
            y = model(x)
            loss = loss_func(y, t)
            opt.zero_grad()
            loss.backward()
            opt.step()

            train_acc += utils.accuracy(y, t).item()
            train_loss += loss.item() * t.size(0)
            train_n += t.size(0)
            bar.set_description("Loss: {:.4f}, Accuracy: {:.2f}".format(
                train_loss / train_n, train_acc / train_n * 100), refresh=True)
            bar.update()
        bar.close()

        model.eval()
        test_loss, test_acc, test_n = 0, 0, 0
        for x, t in tqdm(test_loader, total=len(test_loader), leave=False):
            with torch.no_grad():
                x, t = Variable(x.cuda()), Variable(t.cuda())
                y = model(x)
                loss = loss_func(y, t)
                test_loss += loss.item() * t.size(0)
                test_acc += utils.accuracy(y, t).item()
                test_n += t.size(0)

        if (e + 1) % args.snapshot_interval == 0:
            torch.save({
                "state_dict": model.state_dict(),
                "optimizer": opt.state_dict()
            }, os.path.join(args.checkpoint, "{}.tar".format(e + 1)))

        lr = opt.param_groups[0]["lr"]
        logger.write(e+1, lr, train_loss / train_n, test_loss / test_n,
                     train_acc / train_n * 100, test_acc / test_n * 100) 
開發者ID:owruby,項目名稱:shake-drop_pytorch,代碼行數:59,代碼來源:train.py

示例8: train

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Logger [as 別名]
def train(model, train_loader, eval_loader, num_epochs, output, opt, wd):
    utils.create_dir(output)
    # Paper uses AdaDelta
    if opt == 'Adadelta':
        optim = torch.optim.Adadelta(model.parameters(), rho=0.95, eps=1e-6, weight_decay=wd)
    elif opt == 'RMSprop':
        optim = torch.optim.RMSprop(model.parameters(), lr=0.01, alpha=0.99, eps=1e-08, weight_decay=wd, momentum=0, centered=False)
    elif opt == 'Adam':
        optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=wd)
    else:
        optim = torch.optim.Adamax(model.parameters(), weight_decay=wd)
    logger = utils.Logger(os.path.join(output, 'log.txt'))
    best_eval_score = 0

    for epoch in range(num_epochs):
        total_loss = 0
        train_score = 0
        t = time.time()
        correct = 0

        for i, (v, b, q, a) in enumerate(train_loader):
            v = Variable(v).cuda()
            b = Variable(b).cuda() # boxes not used
            q = Variable(q).cuda()
            a = Variable(a).cuda() # true labels

            pred = model(v, b, q, a)
            loss = instance_bce_with_logits(pred, a)
            loss.backward()
            nn.utils.clip_grad_norm(model.parameters(), 0.25)
            optim.step()
            optim.zero_grad()

            batch_score = compute_score_with_logits(pred, a.data).sum()
            total_loss += loss.data[0] * v.size(0)
            train_score += batch_score

        total_loss /= len(train_loader.dataset)
        train_score = 100 * train_score / len(train_loader.dataset)

        model.train(False)
        eval_score, bound, V_loss = evaluate(model, eval_loader)
        model.train(True)

        logger.write('epoch %d, time: %.2f' % (epoch, time.time()-t))
        logger.write('\ttrain_loss: %.3f, score: %.3f' % (total_loss, train_score))
        logger.write('\teval loss: %.3f, score: %.3f (%.3f)' % (V_loss, 100 * eval_score, 100 * bound))

        if eval_score > best_eval_score:
            model_path = os.path.join(output, 'model.pth')
            torch.save(model.state_dict(), model_path)
            best_eval_score = eval_score 
開發者ID:SinghJasdeep,項目名稱:Attention-on-Attention-for-VQA,代碼行數:54,代碼來源:train.py

示例9: main

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Logger [as 別名]
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    sys.stdout = Logger(osp.join(args.save_dir, 'log_' + args.dataset + '.txt'))

    if use_gpu:
        print("Currently using GPU: {}".format(args.gpu))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU")

    print("Creating dataset: {}".format(args.dataset))
    dataset = datasets.create(
        name=args.dataset, batch_size=args.batch_size, use_gpu=use_gpu,
        num_workers=args.workers,
    )

    trainloader, testloader = dataset.trainloader, dataset.testloader

    print("Creating model: {}".format(args.model))
    model = models.create(name=args.model, num_classes=dataset.num_classes)

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    criterion_xent = nn.CrossEntropyLoss()
    criterion_cent = CenterLoss(num_classes=dataset.num_classes, feat_dim=2, use_gpu=use_gpu)
    optimizer_model = torch.optim.SGD(model.parameters(), lr=args.lr_model, weight_decay=5e-04, momentum=0.9)
    optimizer_centloss = torch.optim.SGD(criterion_cent.parameters(), lr=args.lr_cent)

    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer_model, step_size=args.stepsize, gamma=args.gamma)

    start_time = time.time()

    for epoch in range(args.max_epoch):
        print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
        train(model, criterion_xent, criterion_cent,
              optimizer_model, optimizer_centloss,
              trainloader, use_gpu, dataset.num_classes, epoch)

        if args.stepsize > 0: scheduler.step()

        if args.eval_freq > 0 and (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.max_epoch:
            print("==> Test")
            acc, err = test(model, testloader, use_gpu, dataset.num_classes, epoch)
            print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed)) 
開發者ID:KaiyangZhou,項目名稱:pytorch-center-loss,代碼行數:57,代碼來源:main.py

示例10: train_main

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Logger [as 別名]
def train_main(config, resume):
    train_logger = Logger()

    data_config = config['data']

    t_transforms = _get_transform(config, 'train')
    v_transforms = _get_transform(config, 'val')
    print(t_transforms)

    data_manager = getattr(data_module, config['data']['type'])(config['data'])
    classes = data_manager.classes

    t_loader = data_manager.get_loader('train', t_transforms)
    v_loader = data_manager.get_loader('val', v_transforms)

    m_name = config['model']['type']
    model = getattr(net_module, m_name)(classes, config=config)
    num_classes = len(classes)


    loss = getattr(net_module, config['train']['loss'])
    metrics = getattr(net_module, config['metrics'])(num_classes)

    trainable_params = filter(lambda p: p.requires_grad, model.parameters())

    opt_name = config['optimizer']['type']
    opt_args = config['optimizer']['args']
    optimizer = getattr(torch.optim, opt_name)(trainable_params, **opt_args)


    lr_name = config['lr_scheduler']['type']
    lr_args = config['lr_scheduler']['args']
    if lr_name == 'None':
        lr_scheduler = None
    else:
        lr_scheduler = getattr(torch.optim.lr_scheduler, lr_name)(optimizer, **lr_args)


    trainer = Trainer(model, loss, metrics, optimizer, 
                      resume=resume,
                      config=config,
                      data_loader=t_loader,
                      valid_data_loader=v_loader,
                      lr_scheduler=lr_scheduler,
                      train_logger=train_logger)

    trainer.train()
    return trainer
    #duration = 1; freq = 440
    #os.system('play --no-show-progress --null --channels 1 synth %s sine %f'%(duration, freq)) 
開發者ID:ksanjeevan,項目名稱:crnn-audio-classification,代碼行數:52,代碼來源:run.py

示例11: train

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Logger [as 別名]
def train(model, train_loader, eval_loader, num_epochs, output):
    utils.create_dir(output)
    optim = torch.optim.Adamax(model.parameters())
    logger = utils.Logger(os.path.join(output, 'log.txt'))
    best_eval_score = 0

    for epoch in range(num_epochs):
        total_loss = 0
        train_score = 0
        t = time.time()

        for i, (v, b, q, a) in enumerate(train_loader):
            v = Variable(v).cuda()
            b = Variable(b).cuda()
            q = Variable(q).cuda()
            a = Variable(a).cuda()

            pred = model(v, b, q, a)
            loss = instance_bce_with_logits(pred, a)
            loss.backward()
            nn.utils.clip_grad_norm(model.parameters(), 0.25)
            optim.step()
            optim.zero_grad()

            batch_score = compute_score_with_logits(pred, a.data).sum()
            total_loss += loss.data[0] * v.size(0)
            train_score += batch_score

        total_loss /= len(train_loader.dataset)
        train_score = 100 * train_score / len(train_loader.dataset)
        model.train(False)
        eval_score, bound = evaluate(model, eval_loader)
        model.train(True)

        logger.write('epoch %d, time: %.2f' % (epoch, time.time()-t))
        logger.write('\ttrain_loss: %.2f, score: %.2f' % (total_loss, train_score))
        logger.write('\teval score: %.2f (%.2f)' % (100 * eval_score, 100 * bound))

        if eval_score > best_eval_score:
            model_path = os.path.join(output, 'model.pth')
            torch.save(model.state_dict(), model_path)
            best_eval_score = eval_score 
開發者ID:hengyuan-hu,項目名稱:bottom-up-attention-vqa,代碼行數:44,代碼來源:train.py


注:本文中的utils.Logger方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。