当前位置: 首页>>代码示例>>Python>>正文


Python functional.instance_norm方法代码示例

本文整理汇总了Python中torch.nn.functional.instance_norm方法的典型用法代码示例。如果您正苦于以下问题:Python functional.instance_norm方法的具体用法?Python functional.instance_norm怎么用?Python functional.instance_norm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.instance_norm方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import instance_norm [as 别名]
def forward(self, input, ConInfor):
        self._check_input_dim(input)
        b, c = input.size(0), input.size(1)
        if self.num_con >0:
            tarBias = self.ConBias(ConInfor).view(b,c,1,1)
        else:
            tarBias = 0
        out = F.instance_norm(
            input, self.running_mean, self.running_var, None, None,
            self.training or not self.track_running_stats, self.momentum, self.eps)
        
        if self.affine:
            bias = self.bias.repeat(b).view(b,c,1,1)
            weight = self.weight.repeat(b).view(b,c,1,1)
            return (out.view(b, c, *input.size()[2:])+tarBias)*weight + bias
        else:
            return out.view(b, c, *input.size()[2:])+tarBias 
开发者ID:Xiaoming-Yu,项目名称:DMIT,代码行数:19,代码来源:cbin.py

示例2: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import instance_norm [as 别名]
def forward(self, x, y):
    # Calculate class-conditional gains and biases
    gain = (1 + self.gain(y)).view(y.size(0), -1, 1, 1)
    bias = self.bias(y).view(y.size(0), -1, 1, 1)
    # If using my batchnorm
    if self.mybn or self.cross_replica:
      return self.bn(x, gain=gain, bias=bias)
    # else:
    else:
      if self.norm_style == 'bn':
        out = F.batch_norm(x, self.stored_mean, self.stored_var, None, None,
                          self.training, 0.1, self.eps)
      elif self.norm_style == 'in':
        out = F.instance_norm(x, self.stored_mean, self.stored_var, None, None,
                          self.training, 0.1, self.eps)
      elif self.norm_style == 'gn':
        out = groupnorm(x, self.normstyle)
      elif self.norm_style == 'nonorm':
        out = x
      return out * gain + bias 
开发者ID:ajbrock,项目名称:BigGAN-PyTorch,代码行数:22,代码来源:layers.py

示例3: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import instance_norm [as 别名]
def forward(self, input, ConInfor):
        self._check_input_dim(input)
        b, c = input.size(0), input.size(1)
        out = F.instance_norm(
            input, self.running_mean, self.running_var, None, None,
            self.training or not self.track_running_stats, self.momentum, self.eps)
        
        if self.num_con >0:
            weight = self.ConAlpha(ConInfor).view(b,c,1,1)
            bias = self.ConBeta(ConInfor).view(b,c,1,1)
        else:
            weight = 1
            bias = 0
        return out.view(b, c, *input.size()[2:])*weight + bias 
开发者ID:Xiaoming-Yu,项目名称:DMIT,代码行数:16,代码来源:adain.py

示例4: _instance_norm

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import instance_norm [as 别名]
def _instance_norm(raw, input, running_mean=None, running_var=None, weight=None,
                  bias=None, use_input_stats=True, momentum=0.1, eps=1e-5):
    # TODO: the batch size!=1 view operations
    print("WARNING: The Instance Normalization transfers to Caffe using BatchNorm, so the batch size should be 1")
    if running_var is not None or weight is not None:
        # TODO: the affine=True or track_running_stats=True case
        raise NotImplementedError("not implement the affine=True or track_running_stats=True case InstanceNorm")
    x= torch.batch_norm(
        input, weight, bias, running_mean, running_var,
        use_input_stats, momentum, eps,torch.backends.cudnn.enabled)
    bottom_blobs = [log.blobs(input)]
    layer_name1 = log.add_layer(name='instance_norm')
    top_blobs = log.add_blobs([x], name='instance_norm_blob')
    layer1 = caffe_net.Layer_param(name=layer_name1, type='BatchNorm',
                                   bottom=bottom_blobs, top=top_blobs)
    if running_mean is None or running_var is None:
        # not use global_stats, normalization is performed over the current mini-batch
        layer1.batch_norm_param(use_global_stats=0,eps=eps)
        running_mean=torch.zeros(input.size()[1])
        running_var=torch.ones(input.size()[1])
    else:
        layer1.batch_norm_param(use_global_stats=1, eps=eps)
    running_mean_clone = running_mean.clone()
    running_var_clone = running_var.clone()
    layer1.add_data(running_mean_clone.cpu().numpy(), running_var_clone.cpu().numpy(), np.array([1.0]))
    log.cnet.add_layer(layer1)
    if weight is not None and bias is not None:
        layer_name2 = log.add_layer(name='bn_scale')
        layer2 = caffe_net.Layer_param(name=layer_name2, type='Scale',
                                       bottom=top_blobs, top=top_blobs)
        layer2.param.scale_param.bias_term = True
        layer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
        log.cnet.add_layer(layer2)
    return x


#upsample layer 
开发者ID:xxradon,项目名称:PytorchToCaffe,代码行数:39,代码来源:pytorch_to_caffe.py

示例5: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import instance_norm [as 别名]
def forward(self, inputs):
		'''
		Forward pass, return log probabilities over correspondences.

		inputs -- 4D data tensor (BxCxNx1)
		B -> batch size (multiple image pairs)
		C -> 5 values (2D coordinate + 2D coordinate + 1D side information)
		N -> number of correspondences
		1 -> dummy dimension
		
		'''
		batch_size = inputs.size(0)
		data_size = inputs.size(2) # number of correspondences

		x = inputs
		x = F.relu(self.p_in(x))
		
		for r in self.res_blocks:
			res = x
			x = F.relu(r[1](F.instance_norm(r[0](x)))) 
			x = F.relu(r[3](F.instance_norm(r[2](x))))
			x = x + res

		log_probs = F.logsigmoid(self.p_out(x))

		# normalization in log space such that probabilities sum to 1
		log_probs = log_probs.view(batch_size, -1)
		normalizer = torch.logsumexp(log_probs, dim=1)
		normalizer = normalizer.unsqueeze(1).expand(-1, data_size)
		log_probs = log_probs - normalizer
		log_probs = log_probs.view(batch_size, 1, data_size, 1)

		return log_probs 
开发者ID:vislearn,项目名称:ngransac,代码行数:35,代码来源:network.py

示例6: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import instance_norm [as 别名]
def forward(self, input, ConInfor):
        self._check_input_dim(input)
        b, c = input.size(0), input.size(1)
        tarBias = self.ConBias(ConInfor).view(b,c,1,1)
        out = F.instance_norm(
            input, self.running_mean, self.running_var, None, None,
            self.training or not self.track_running_stats, self.momentum, self.eps)
        
        if self.affine:
            bias = self.bias.repeat(b).view(b,c,1,1)
            weight = self.weight.repeat(b).view(b,c,1,1)
            return (out.view(b, c, *input.size()[2:])+tarBias)*weight + bias
        else:
            return out.view(b, c, *input.size()[2:])+tarBias 
开发者ID:Xiaoming-Yu,项目名称:SingleGAN,代码行数:16,代码来源:cbin.py

示例7: _instance_norm

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import instance_norm [as 别名]
def _instance_norm(raw, input, running_mean=None, running_var=None, weight=None,
                   bias=None, use_input_stats=True, momentum=0.1, eps=1e-5):
    # TODO: the batch size!=1 view operations
    print("WARNING: The Instance Normalization transfers to Caffe using BatchNorm, so the batch size should be 1")
    if running_var is not None or weight is not None:
        # TODO: the affine=True or track_running_stats=True case
        raise NotImplementedError("not implement the affine=True or track_running_stats=True case InstanceNorm")
    x = torch.batch_norm(
        input, weight, bias, running_mean, running_var,
        use_input_stats, momentum, eps, torch.backends.cudnn.enabled)
    bottom_blobs = [log.blobs(input)]
    layer_name1 = log.add_layer(name='instance_norm')
    top_blobs = log.add_blobs([x], name='instance_norm_blob')
    layer1 = caffe_net.Layer_param(name=layer_name1, type='BatchNorm',
                                   bottom=bottom_blobs, top=top_blobs)
    if running_mean is None or running_var is None:
        # not use global_stats, normalization is performed over the current mini-batch
        layer1.batch_norm_param(use_global_stats=0, eps=eps)
        running_mean = torch.zeros(input.size()[1])
        running_var = torch.ones(input.size()[1])
    else:
        layer1.batch_norm_param(use_global_stats=1, eps=eps)
    running_mean_clone = running_mean.clone()
    running_var_clone = running_var.clone()
    layer1.add_data(running_mean_clone.cpu().numpy(), running_var_clone.cpu().numpy(), np.array([1.0]))
    log.cnet.add_layer(layer1)
    if weight is not None and bias is not None:
        layer_name2 = log.add_layer(name='bn_scale')
        layer2 = caffe_net.Layer_param(name=layer_name2, type='Scale',
                                       bottom=top_blobs, top=top_blobs)
        layer2.param.scale_param.bias_term = True
        layer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
        log.cnet.add_layer(layer2)
    return x


# upsample layer 
开发者ID:JDAI-CV,项目名称:fast-reid,代码行数:39,代码来源:pytorch_to_caffe.py

示例8: test_instance_norm

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import instance_norm [as 别名]
def test_instance_norm(self):
        inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
        running_mean = torch.randn(3, device='cuda', dtype=self.dtype)
        running_var = torch.randn(3, device='cuda', dtype=self.dtype)
        output = F.instance_norm(inp, running_mean=running_mean, running_var=running_var, weight=None, bias=None, use_input_stats=True, momentum=0.1, eps=1e-05) 
开发者ID:NVIDIA,项目名称:apex,代码行数:7,代码来源:test_pyprof_nvtx.py

示例9: compute_vgg19_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import instance_norm [as 别名]
def compute_vgg19_loss(self, vgg, img, target, vgg_type='vgg19'):
        img_feature = self.vgg((img + 1) / 2)
        target_feature = self.vgg((target + 1) / 2).detach()
        if vgg_type == 'vgg19':
            return F.l1_loss(img_feature, target_feature)
        if vgg_type == 'vgg19_sp':
            sp = SpatialNorm(affine=False)
            return F.l1_loss(sp(img_feature)[0], sp(target_feature)[0])
        elif vgg_type == 'vgg19_sp_mean':
            m1, m2 = img_feature.mean(dim=1), target_feature.mean(dim=1)
            return F.l1_loss(m1, m2)
        elif vgg_type == 'vgg19_sp_mean_mix':
            m1, m2 = img_feature.mean(dim=1), target_feature.mean(dim=1)
            return 0.5 * F.l1_loss(img_feature, target_feature) + 0.5 * F.l1_loss(m1, m2)
        elif vgg_type == 'vgg19_sp_meanstd':
            m1, m2 = img_feature.mean(dim=1), target_feature.mean(dim=1)
            std1, std2 = img_feature.std(dim=1), target_feature.std(dim=1)
            return 0.5 * F.l1_loss(m1, m2) + 0.5 *F.l1_loss(std1, std2)
        elif vgg_type == 'vgg19_sp_meanstd_mix':
            m1, m2 = img_feature.mean(dim=1), target_feature.mean(dim=1)
            std1, std2 = img_feature.std(dim=1), target_feature.std(dim=1)
            return 0.5 * F.l1_loss(img_feature, target_feature) + 0.25 * F.l1_loss(m1, m2) + 0.25 * F.l1_loss(std1, std2)
        elif vgg_type == 'vgg19_in':
            return F.l1_loss(F.instance_norm(img_feature), F.instance_norm(target_feature))
        elif vgg_type == 'vgg19_in_mean':
            img_feature = img_feature.view(*img_feature.shape[:2], -1)
            target_feature = target_feature.view(*target_feature.shape[:2], -1)
            m1, m2 = img_feature.mean(dim=2), target_feature.mean(dim=2)
            return F.l1_loss(m1, m2)
        elif vgg_type == 'vgg19_in_meanstd':
            img_feature = img_feature.view(*img_feature.shape[:2], -1)
            target_feature = target_feature.view(*target_feature.shape[:2], -1)
            m1, m2 = img_feature.mean(dim=2), target_feature.mean(dim=2)
            std1, std2 = img_feature.std(dim=2), target_feature.std(dim=2)
            return F.l1_loss(m1, m2) + F.l1_loss(std1, std2)
        else:
            raise ValueError('vgg_type = {}'.format(vgg_type)) 
开发者ID:Boyiliee,项目名称:PONO,代码行数:39,代码来源:trainer_pono.py


注:本文中的torch.nn.functional.instance_norm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。