當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.L1Loss方法代碼示例

本文整理匯總了Python中torch.nn.L1Loss方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.L1Loss方法的具體用法?Python nn.L1Loss怎麽用?Python nn.L1Loss使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.L1Loss方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: define_loss

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import L1Loss [as 別名]
def define_loss(self):
        G_lossfn_type = self.opt_train['G_lossfn_type']
        if G_lossfn_type == 'l1':
            self.G_lossfn = nn.L1Loss().to(self.device)
        elif G_lossfn_type == 'l2':
            self.G_lossfn = nn.MSELoss().to(self.device)
        elif G_lossfn_type == 'l2sum':
            self.G_lossfn = nn.MSELoss(reduction='sum').to(self.device)
        elif G_lossfn_type == 'ssim':
            self.G_lossfn = SSIMLoss().to(self.device)
        else:
            raise NotImplementedError('Loss type [{:s}] is not found.'.format(G_lossfn_type))
        self.G_lossfn_weight = self.opt_train['G_lossfn_weight']

    # ----------------------------------------
    # define optimizer
    # ---------------------------------------- 
開發者ID:cszn,項目名稱:KAIR,代碼行數:19,代碼來源:model_plain2.py

示例2: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import L1Loss [as 別名]
def __init__(self, gen, dis, dataloader_train, dataloader_val, gpu_id, log_freq, save_dir, n_step):
        if torch.cuda.is_available():
            self.device = torch.device('cuda:'+str(gpu_id))  
        else:
             self.device = torch.device('cpu')
        self.gen = gen.to(self.device)
        self.dis = dis.to(self.device)

        self.dataloader_train = dataloader_train
        self.dataloader_val = dataloader_val
        self.optim_g = torch.optim.Adam(gen.parameters(), lr=1e-4, betas=(0.5, 0.999))
        self.optim_d = torch.optim.Adam(dis.parameters(), lr=1e-4, betas=(0.5, 0.999))
        self.criterionL1 = nn.L1Loss()
        self.criterionVGG = VGGLoss()
        self.criterionAdv = torch.nn.BCELoss()
        self.log_freq = log_freq
        self.save_dir = save_dir
        self.n_step = n_step
        self.step = 0
        print('Generator Parameters:', sum([p.nelement() for p in self.gen.parameters()]))
        print('Discriminator Parameters:', sum([p.nelement() for p in self.dis.parameters()])) 
開發者ID:shionhonda,項目名稱:viton-gan,代碼行數:23,代碼來源:train_tom.py

示例3: test_combination_invariant_loss_tf

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import L1Loss [as 別名]
def test_combination_invariant_loss_tf():
    n_batch = 40
    n_time = 400
    n_freq = 129
    n_sources = 2

    sources = torch.randn(n_batch, n_time, n_freq, n_sources)

    LossCPIT = ml.train.loss.CombinationInvariantLoss(
        loss_function=nn.L1Loss())
    LossL1 = nn.L1Loss()

    _loss_a = LossL1(sources, sources).item()

    for shift in range(n_sources):
        sources_a = sources[:, :, :, shift:]
        sources_b = sources[:, :, :, :shift]
        sources_c = torch.randn(n_batch, n_time, n_freq, n_sources)
        shifted_sources = torch.cat(
            [sources_a, sources_b, sources_c], dim=-1)
        _loss_b = LossCPIT(shifted_sources, sources).item()
        assert _loss_a == _loss_b 
開發者ID:nussl,項目名稱:nussl,代碼行數:24,代碼來源:test_loss.py

示例4: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import L1Loss [as 別名]
def __init__(
        self, 
        matching_type='features',
        matching_loss='L1',
        average_loss=True):
        super(Matcher, self).__init__()

        # Matched statistics
        if matching_type == 'features':
            self.get_stats = self.gram_matrix
        elif matching_type == 'features':
            self.get_stats = lambda x: x

        # Loss function
        matching_loss = matching_loss.lower()
        if matching_loss == 'mse':
            self.criterion = nn.MSELoss()
        elif matching_loss == 'smoothl1':
            self.criterion = nn.SmoothL1Loss()
        elif matching_loss == 'l1':
            self.criterion = nn.L1Loss()
        self.average_loss = average_loss 
開發者ID:egorzakharov,項目名稱:PerceptualGAN,代碼行數:24,代碼來源:perceptual_loss.py

示例5: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import L1Loss [as 別名]
def __init__(self, recursions=1, stride=1, kernel_size=5, use_perceptual_loss=True, wgan=False, w_col=1,
                 w_tex=0.001, w_per=0.1, gaussian=False, lpips_rot_flip=False, **kwargs):
        super(GeneratorLoss, self).__init__()
        self.pixel_loss = nn.L1Loss()
        self.color_filter = FilterLow(recursions=recursions, stride=stride, kernel_size=kernel_size, padding=False,
                                      gaussian=gaussian)
        if torch.cuda.is_available():
            self.pixel_loss = self.pixel_loss.cuda()
            self.color_filter = self.color_filter.cuda()
        self.perceptual_loss = PerceptualLoss(rotations=lpips_rot_flip, flips=lpips_rot_flip)
        self.use_perceptual_loss = use_perceptual_loss
        self.wasserstein = wgan
        self.w_col = w_col
        self.w_tex = w_tex
        self.w_per = w_per
        self.last_tex_loss = 0
        self.last_per_loss = 0
        self.last_col_loss = 0
        self.last_mean_loss = 0 
開發者ID:ManuelFritsche,項目名稱:real-world-sr,代碼行數:21,代碼來源:loss.py

示例6: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import L1Loss [as 別名]
def __init__(self, pretrained=True, device='cuda'):
		super(VGG_perceptual_loss, self).__init__()
		self.device=device
		self.loss_function=nn.L1Loss()
		self.vgg_features = vgg.make_layers(vgg.cfg['D'])
		if pretrained:
			self.vgg_features.load_state_dict(torch.load('utils/vgg16_pretrained_features.pth'))
		self.vgg_features.to(device)
		# freeze parameter update
		for params in self.vgg_features.parameters():
			params.requires_grad = False
		self.layer_name_mapping = {
			'3': "relu1_2",
			'8': "relu2_2",
			'15': "relu3_3",
			'22': "relu4_3"
		} 
開發者ID:Lotayou,項目名稱:everybody_dance_now_pytorch,代碼行數:19,代碼來源:perceptual_loss.py

示例7: get_loss_func

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import L1Loss [as 別名]
def get_loss_func(supervised_method):
    """Determines the supervised loss to be used, given the supervised method."""
    if supervised_method.endswith('l1'):
        return nn.L1Loss()
    elif supervised_method.endswith('mse'):
        return nn.MSELoss()
    elif supervised_method.endswith('berhu'):
        return BerHuLoss()
    elif supervised_method.endswith('silog'):
        return SilogLoss()
    elif supervised_method.endswith('abs_rel'):
        return lambda x, y: torch.mean(torch.abs(x - y) / x)
    else:
        raise ValueError('Unknown supervised loss {}'.format(supervised_method))

######################################################################################################################## 
開發者ID:TRI-ML,項目名稱:packnet-sfm,代碼行數:18,代碼來源:supervised_loss.py

示例8: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import L1Loss [as 別名]
def __init__(self, pretrained=False, device='cuda'):
        super(VGG_perceptual_loss, self).__init__()
        self.device = device
        self.loss_function = nn.L1Loss()
        self.vgg_features = vgg.make_layers(vgg.cfg['D'])
        if pretrained:
            self.vgg_features.load_state_dict(
                torch.load('vgg16_pretrained_features.pth'))
        self.vgg_features.to(device)
        # freeze parameter update
        for params in self.vgg_features.parameters():
            params.requires_grad = False
        self.layer_name_mapping = {
            '3': "relu1_2",
            '8': "relu2_2",
            '15': "relu3_3",
            '22': "relu4_3"
        } 
開發者ID:CUHKSZ-TQL,項目名稱:EverybodyDanceNow_reproduce_pytorch,代碼行數:20,代碼來源:perceptual_loss.py

示例9: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import L1Loss [as 別名]
def __init__(self, encoder):
        super(Net, self).__init__()
        enc_layers = list(encoder.children())
        self.enc_1 = nn.Sequential(*enc_layers[:4])  # input -> relu1_1
        self.enc_2 = nn.Sequential(*enc_layers[4:11])  # relu1_1 -> relu2_1
        self.enc_3 = nn.Sequential(*enc_layers[11:18])  # relu2_1 -> relu3_1
        self.enc_4 = nn.Sequential(*enc_layers[18:31])  # relu3_1 -> relu4_1
        self.mse_loss = nn.MSELoss()
        self.l1_loss = nn.L1Loss()

        # fix the encoder
        for name in ['enc_1', 'enc_2', 'enc_3', 'enc_4']:
            for param in getattr(self, name).parameters():
                param.requires_grad = False

    # extract relu1_1, relu2_1, relu3_1, relu4_1 from input image 
開發者ID:fei-hdu,項目名稱:ca-gan,代碼行數:18,代碼來源:net.py

示例10: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import L1Loss [as 別名]
def __init__(self):

        super(DetailEnhance, self).__init__()

        self.feature_level = 3

        self.extract_features = model.ValidationFeatures()
        self.extract_aligned_features = model.ExtractAlignedFeatures(n_res=5) # 4  5
        self.pcd_align = model.PCD_Align(groups=8) # 4  8
        self.tsa_fusion = model.TSA_Fusion(nframes=3, center=1)

        self.reconstruct = model.Reconstruct(n_res=20) # 5  40

        # Loss calculate
        self.L1_lossFn = nn.L1Loss()
        self.sL1_lossFn = nn.SmoothL1Loss()
        self.cL1_lossFn = loss.CharbonnierLoss()
        self.MSE_LossFn = nn.MSELoss() 
開發者ID:CM-BF,項目名稱:FeatureFlow,代碼行數:20,代碼來源:layers.py

示例11: compute_loss

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import L1Loss [as 別名]
def compute_loss(pred, target):
    """
    Computes loss value

    Args:
        pred (Tensor): B x T, predicted wavs
        target (Tensor): B x T, target wavs
    """
    stft_pred, _, _= compute_stft(pred, n_fft=2048, win_length=1024, hop_length=256)
    stft_target, _, _ = compute_stft(target, n_fft=2048, win_length=1024, hop_length=256)
    l1_loss = nn.L1Loss()

    log_stft_pred = torch.log(stft_pred + 1e-8)
    log_stft_target = torch.log(stft_target + 1e-8)
    l1 = l1_loss(log_stft_pred, log_stft_target)
    l2 = l1_loss(log_stft_pred[:, :, :500], log_stft_target[:, :,:500])
    l3 = l1_loss(stft_pred[:,:,:500], stft_target[:,:,:500])
    loss = l1 + l2 + l3
    return loss, l1, l2, l3 
開發者ID:tuan3w,項目名稱:cnn_vocoder,代碼行數:21,代碼來源:loss.py

示例12: setup

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import L1Loss [as 別名]
def setup(model, opt):

    if opt.criterion == "l1":
        criterion = nn.L1Loss().cuda()
    elif opt.criterion == "mse":
        criterion = nn.MSELoss().cuda()
    elif opt.criterion == "crossentropy":
        criterion = nn.CrossEntropyLoss().cuda()
    elif opt.criterion == "hingeEmbedding":
        criterion = nn.HingeEmbeddingLoss().cuda()
    elif opt.criterion == "tripletmargin":
        criterion = nn.TripletMarginLoss(margin = opt.margin, swap = opt.anchorswap).cuda()

    parameters = filter(lambda p: p.requires_grad, model.parameters())

    if opt.optimType == 'sgd':
        optimizer = optim.SGD(parameters, lr = opt.lr, momentum = opt.momentum, nesterov = opt.nesterov, weight_decay = opt.weightDecay)
    elif opt.optimType == 'adam':
        optimizer = optim.Adam(parameters, lr = opt.maxlr, weight_decay = opt.weightDecay)

    if opt.weight_init:
        utils.weights_init(model, opt)

    return model, criterion, optimizer 
開發者ID:drimpossible,項目名稱:Deep-Expander-Networks,代碼行數:26,代碼來源:__init__.py

示例13: val_inner_epoch

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import L1Loss [as 別名]
def val_inner_epoch(dataloader, model):
    sum_loss = 0
    model.eval()
    criterion = nn.L1Loss()
    with torch.no_grad():
        for X_batch, y_batch in dataloader:
            X_batch = X_batch.cuda()
            y_batch = y_batch.cuda()
            mask = model.predict(X_batch)
            X_batch = spec_utils.crop_center(mask, X_batch, False)
            y_batch = spec_utils.crop_center(mask, y_batch, False)

            loss = criterion(X_batch * mask, y_batch)
            sum_loss += float(loss.detach().cpu().numpy()) * len(X_batch)

    return sum_loss / len(dataloader.dataset) 
開發者ID:tsurumeso,項目名稱:vocal-remover,代碼行數:18,代碼來源:train.py

示例14: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import L1Loss [as 別名]
def __init__(self, hparams):
        super(ParrotLoss, self).__init__()
        self.hidden_dim = hparams.encoder_embedding_dim
        self.ce_loss = hparams.ce_loss

        self.L1Loss = nn.L1Loss(reduction='none')
        self.MSELoss = nn.MSELoss(reduction='none')
        self.BCEWithLogitsLoss = nn.BCEWithLogitsLoss(reduction='none')
        self.CrossEntropyLoss = nn.CrossEntropyLoss(reduction='none')
        self.n_frames_per_step = hparams.n_frames_per_step_decoder
        self.eos = hparams.n_symbols
        self.predict_spectrogram = hparams.predict_spectrogram

        self.contr_w = hparams.contrastive_loss_w
        self.spenc_w = hparams.speaker_encoder_loss_w
        self.texcl_w = hparams.text_classifier_loss_w
        self.spadv_w = hparams.speaker_adversial_loss_w
        self.spcla_w = hparams.speaker_classifier_loss_w 
開發者ID:jxzhanggg,項目名稱:nonparaSeq2seqVC_code,代碼行數:20,代碼來源:loss.py

示例15: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import L1Loss [as 別名]
def __init__(self, hparams):
        super(ParrotLoss, self).__init__()
        
        self.L1Loss = nn.L1Loss(reduction='none')
        self.MSELoss = nn.MSELoss(reduction='none')
        self.BCEWithLogitsLoss = nn.BCEWithLogitsLoss(reduction='none')
        self.CrossEntropyLoss = nn.CrossEntropyLoss(reduction='none')
        self.n_frames_per_step = hparams.n_frames_per_step_decoder
        self.eos = hparams.n_symbols
        self.predict_spectrogram = hparams.predict_spectrogram

        self.contr_w = hparams.contrastive_loss_w
        self.spenc_w = hparams.speaker_encoder_loss_w
        self.texcl_w = hparams.text_classifier_loss_w
        self.spadv_w = hparams.speaker_adversial_loss_w
        self.spcla_w = hparams.speaker_classifier_loss_w

        self.speaker_A = hparams.speaker_A
        self.speaker_B = hparams.speaker_B 
開發者ID:jxzhanggg,項目名稱:nonparaSeq2seqVC_code,代碼行數:21,代碼來源:loss.py


注:本文中的torch.nn.L1Loss方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。