本文整理汇总了Python中torch.__version__方法的典型用法代码示例。如果您正苦于以下问题:Python torch.__version__方法的具体用法?Python torch.__version__怎么用?Python torch.__version__使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.__version__方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import __version__ [as 别名]
def forward(self, x):
if x.numel() == 0 and torch.__version__ <= '1.4':
out_shape = [x.shape[0], self.out_channels]
for i, k, p, s, d in zip(x.shape[-2:], self.kernel_size,
self.padding, self.stride, self.dilation):
o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1
out_shape.append(o)
empty = NewEmptyTensorOp.apply(x, out_shape)
if self.training:
# produce dummy gradient to avoid DDP warning.
dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
return empty + dummy
else:
return empty
return super().forward(x)
示例2: patch_norm_fp32
# 需要导入模块: import torch [as 别名]
# 或者: from torch import __version__ [as 别名]
def patch_norm_fp32(module):
"""Recursively convert normalization layers from FP16 to FP32.
Args:
module (nn.Module): The modules to be converted in FP16.
Returns:
nn.Module: The converted module, the normalization layers have been
converted to FP32.
"""
if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)):
module.float()
if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3':
module.forward = patch_forward_method(module.forward, torch.half,
torch.float)
for child in module.children():
patch_norm_fp32(child)
return module
示例3: print_cuda_statistics
# 需要导入模块: import torch [as 别名]
# 或者: from torch import __version__ [as 别名]
def print_cuda_statistics():
logger = logging.getLogger("Cuda Statistics")
import sys
from subprocess import call
import torch
logger.info('__Python VERSION: {}'.format(sys.version))
logger.info('__pyTorch VERSION: {}'.format(torch.__version__))
logger.info('__CUDA VERSION')
call(["nvcc", "--version"])
logger.info('__CUDNN VERSION: {}'.format(torch.backends.cudnn.version()))
logger.info('__Number CUDA Devices: {}'.format(torch.cuda.device_count()))
logger.info('__Devices')
call(["nvidia-smi", "--format=csv",
"--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free"])
logger.info('Active CUDA Device: GPU {}'.format(torch.cuda.current_device()))
logger.info('Available devices {}'.format(torch.cuda.device_count()))
logger.info('Current cuda device {}'.format(torch.cuda.current_device()))
示例4: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import __version__ [as 别名]
def __init__(self, base_lr, max_lr, monitor='val_loss', step_size_up=2000, step_size_down=None, mode='triangular',
gamma=1., scale_fn=None, scale_mode='cycle', cycle_momentum=True, base_momentum=0.8, max_momentum=0.9,
last_epoch=-1, step_on_batch=False):
from distutils.version import LooseVersion
version = torch.__version__ if str(torch.__version__) is torch.__version__ else "0.4.0"
if LooseVersion(version) > LooseVersion("1.0.0"): # CyclicLR is implemented
super(CyclicLR, self).__init__(lambda opt:
torch.optim.lr_scheduler.CyclicLR(
opt, base_lr, max_lr, step_size_up=step_size_up,
step_size_down=step_size_down, mode=mode, gamma=gamma,
scale_fn=scale_fn, scale_mode=scale_mode,
cycle_momentum=cycle_momentum, base_momentum=base_momentum,
max_momentum=max_momentum, last_epoch=last_epoch),
monitor=monitor, step_on_batch=step_on_batch)
else:
raise NotImplementedError('CyclicLR scheduler was not implemented in PyTorch versions less than 1.1.0. '
'Update PyTorch or use the CyclicLR callback from an older Torchbearer version.')
示例5: test_lambda_lr
# 需要导入模块: import torch [as 别名]
# 或者: from torch import __version__ [as 别名]
def test_lambda_lr(self):
from distutils.version import LooseVersion
import torch
version = torch.__version__ if str(torch.__version__) is torch.__version__ else "0.4.0"
if LooseVersion(version) > LooseVersion("1.0.0"): # CyclicLR is implemented
with patch('torch.optim.lr_scheduler.CyclicLR') as lr_mock:
state = {torchbearer.OPTIMIZER: 'optimizer'}
scheduler = CyclicLR(0.01, 0.1, monitor='test', step_size_up=200, step_size_down=None, mode='triangular',
gamma=2., scale_fn=None, scale_mode='cycle', cycle_momentum=False, base_momentum=0.7, max_momentum=0.9,
last_epoch=-1, step_on_batch='batch')
scheduler.on_start(state)
lr_mock.assert_called_once_with('optimizer', 0.01, 0.1, step_size_up=200, step_size_down=None, mode='triangular',
gamma=2., scale_fn=None, scale_mode='cycle', cycle_momentum=False, base_momentum=0.7, max_momentum=0.9,
last_epoch=-1)
self.assertTrue(scheduler._step_on_batch == 'batch')
self.assertTrue(scheduler._monitor == 'test')
else:
self.assertRaises(NotImplementedError, lambda: CyclicLR(0.01, 0.1, monitor='test', step_size_up=200, step_size_down=None, mode='triangular',
gamma=2., scale_fn=None, scale_mode='cycle', cycle_momentum=False, base_momentum=0.7, max_momentum=0.9,
last_epoch=-1, step_on_batch='batch'))
示例6: cross_entropy2d
# 需要导入模块: import torch [as 别名]
# 或者: from torch import __version__ [as 别名]
def cross_entropy2d(input, target, weight=None, size_average=True):
# input: (n, c, h, w), target: (n, h, w)
n, c, h, w = input.size()
# log_p: (n, c, h, w)
if LooseVersion(torch.__version__) < LooseVersion('0.3'):
# ==0.2.X
log_p = F.log_softmax(input).cuda()
else:
# >=0.3
log_p = F.log_softmax(input, dim=1).cuda()
# log_p: (n*h*w, c)
log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous()
log_p = log_p[target.view(n, h, w, 1).repeat(1, 1, 1, c) >= 0]
log_p = log_p.view(-1, c)
# target: (n*h*w,)
# mask = (target != 255)
# target = target[mask]
loss = F.nll_loss(log_p, target, weight=weight, size_average=False, ignore_index=255).cuda()
if size_average:
loss /= (n*h*w)
return loss
示例7: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import __version__ [as 别名]
def __init__(self, unique_draws: List, stack_size: int, steps: int,
canvas_shape: List):
"""
This class parses complete output from the network which are in joint
fashion. This class can be used to generate final canvas and
expressions.
:param unique_draws: Unique draw/op operations in the current dataset
:param stack_size: Stack size
:param steps: Number of steps in the program
:param canvas_shape: Shape of the canvases
"""
self.canvas_shape = canvas_shape
self.stack_size = stack_size
self.steps = steps
self.Parser = Parser()
self.sim = SimulateStack(self.stack_size, self.canvas_shape)
self.unique_draws = unique_draws
self.pytorch_version = torch.__version__[2]
示例8: initial_setup
# 需要导入模块: import torch [as 别名]
# 或者: from torch import __version__ [as 别名]
def initial_setup(self, args):
############
logging.info(f"run pid: {os.getpid()} parent: {os.getppid()}")
logging.info("#########")
logging.info(args.__dict__)
logging.info(f"Rank: {args.rank} World_size: {args.world_size}, Run {args.run_name}")
args.cuda = torch.cuda.is_available()
logging.info(f"Pytorch version: {torch.__version__}")
logging.info("Using CUDA: {} CUDA AVAIL: {} #DEVICES: {} VERSION: {}".format(
args.cuda, torch.cuda.is_available(), torch.cuda.device_count(),
torch.version.cuda))
if not args.cuda:
self.device = 'cpu'
else:
self.device = 'cuda'
cudnn.benchmark = True
cudnn.enabled = True
random.seed(args.seed) # The seed needs to be constant between processes.
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
示例9: update
# 需要导入模块: import torch [as 别名]
# 或者: from torch import __version__ [as 别名]
def update(self, image):
im_croped, im_pad, real_shape, im_scale = self.im_preprocess(image)
self.im_scale = im_scale
self.ori_image_shape = image.shape
im_data = torch.from_numpy(im_croped).permute(2, 0, 1)
im_data = im_data.unsqueeze(0)
# forward
if LooseVersion(torch.__version__) > LooseVersion('0.3.1'):
with torch.no_grad():
im_var = Variable(im_data).cuda(self.gpu)
self.score_map = self.model(im_var)
else:
im_var = Variable(im_data, volatile=True).cuda(self.gpu)
self.score_map = self.model(im_var)
return real_shape, im_scale
示例10: extract_reid_features
# 需要导入模块: import torch [as 别名]
# 或者: from torch import __version__ [as 别名]
def extract_reid_features(reid_model, image, tlbrs):
if len(tlbrs) == 0:
return torch.FloatTensor()
patches = extract_image_patches(image, tlbrs)
patches = np.asarray([im_preprocess(cv2.resize(p, reid_model.inp_size)) for p in patches], dtype=np.float32)
gpu = net_utils.get_device(reid_model)
if LooseVersion(torch.__version__) > LooseVersion('0.3.1'):
with torch.no_grad():
im_var = Variable(torch.from_numpy(patches))
if gpu is not None:
im_var = im_var.cuda(gpu)
features = reid_model(im_var).data
else:
im_var = Variable(torch.from_numpy(patches), volatile=True)
if gpu is not None:
im_var = im_var.cuda(gpu)
features = reid_model(im_var).data
return features
示例11: _get_default_conda_env
# 需要导入模块: import torch [as 别名]
# 或者: from torch import __version__ [as 别名]
def _get_default_conda_env() -> Optional[Dict[str, Any]]:
"""Get default Conda environment.
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
import torch
import fonduer
return _mlflow_conda_env(
additional_conda_deps=[
"pytorch={}".format(torch.__version__), # type: ignore
"psycopg2",
"pip",
],
additional_pip_deps=["fonduer=={}".format(fonduer.__version__)],
additional_conda_channels=["pytorch"],
)
示例12: _build_lookup
# 需要导入模块: import torch [as 别名]
# 或者: from torch import __version__ [as 别名]
def _build_lookup(self) -> None:
r'''
Builds lookup-tables necessary to map flattened data to correct locations for reshaping into a matrix.
Also handles missing data, i.e. elements in the matrix which do not exist in the flattened data
'''
shp = (self.n_v,self.n_fpv) if self.row_wise else (self.n_fpv,self.n_v)
lookup = torch.zeros(shp, dtype=torch.long)
missing = torch.zeros(shp, dtype=torch.bool if LooseVersion(torch.__version__) >= LooseVersion("1.2") else torch.uint8)
if self.row_wise:
for i, v in enumerate(self.vecs):
for j, c in enumerate(self.fpv):
f = f'{v}_{c}'
if f in self.cont_feats: lookup[i,j] = self.cont_feats.index(f)
else: missing[i,j] = 1
else:
for j, v in enumerate(self.vecs):
for i, c in enumerate(self.fpv):
f = f'{v}_{c}'
if f in self.cont_feats: lookup[i,j] = self.cont_feats.index(f)
else: missing[i,j] = 1
self.missing,self.lookup = to_device(missing.flatten()),to_device(lookup.flatten())
示例13: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import __version__ [as 别名]
def forward(self, input, label=None):
if self.training:
# --------------------------- cos(theta) & phi(theta) ---------------------------
cosine = F.linear(F.normalize(input), F.normalize(self.weight))
sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
phi = cosine * self.cos_m - sine * self.sin_m
if self.easy_margin:
phi = torch.where(cosine > 0, phi, cosine)
else:
phi = torch.where(cosine > self.th, phi, cosine - self.mm)
# --------------------------- convert label to one-hot ---------------------------
# one_hot = torch.zeros(cosine.size(), requires_grad=True, device='cuda')
one_hot = torch.zeros(cosine.size(), device='cuda')
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
# -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
output = (one_hot * phi) + ((1.0 - one_hot) * cosine) # you can use torch.where if your torch.__version__ is 0.4
output *= self.s
else:
cosine = F.linear(F.normalize(input), F.normalize(self.weight))
output = self.s * cosine
return output
示例14: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import __version__ [as 别名]
def forward(self, input, label):
# --------------------------- cos(theta) & phi(theta) ---------------------------
cosine = F.linear(F.normalize(input), F.normalize(self.weight))
sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
phi = cosine * self.cos_m - sine * self.sin_m
if self.easy_margin:
phi = torch.where(cosine > 0, phi, cosine)
else:
phi = torch.where(cosine > self.th, phi, cosine - self.mm)
# --------------------------- convert label to one-hot ---------------------------
# one_hot = torch.zeros(cosine.size(), requires_grad=True, device='cuda')
one_hot = torch.zeros(cosine.size(), device='cuda')
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
# -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
# you can use torch.where if your torch.__version__ is 0.4
output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
output *= self.s
# print(output)
return output
示例15: assert_and_infer_cfg
# 需要导入模块: import torch [as 别名]
# 或者: from torch import __version__ [as 别名]
def assert_and_infer_cfg(make_immutable=True):
"""Call this function in your script after you have finished setting all cfg
values that are necessary (e.g., merging a config from a file, merging
command line config options, etc.). By default, this function will also
mark the global cfg as immutable to prevent changing the global cfg settings
during script execution (which can lead to hard to debug errors or code
that's harder to understand than is necessary).
"""
if __C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS:
assert __C.VGG.IMAGENET_PRETRAINED_WEIGHTS, \
"Path to the weight file must not be empty to load imagenet pertrained resnets."
if version.parse(torch.__version__) < version.parse('0.4.0'):
__C.PYTORCH_VERSION_LESS_THAN_040 = True
# create alias for PyTorch version less than 0.4.0
init.uniform_ = init.uniform
init.normal_ = init.normal
init.constant_ = init.constant
nn.GroupNorm = mynn.GroupNorm
if make_immutable:
cfg.immutable(True)