本文整理汇总了Python中torch.int方法的典型用法代码示例。如果您正苦于以下问题:Python torch.int方法的具体用法?Python torch.int怎么用?Python torch.int使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.int方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int [as 别名]
def forward(ctx, features, rois, out_size, spatial_scale):
assert features.is_cuda
out_h, out_w = _pair(out_size)
assert isinstance(out_h, int) and isinstance(out_w, int)
ctx.save_for_backward(rois)
num_channels = features.size(1)
num_rois = rois.size(0)
out_size = (num_rois, num_channels, out_h, out_w)
output = features.new_zeros(out_size)
argmax = features.new_zeros(out_size, dtype=torch.int)
roi_pool_ext.forward(features, rois, out_h, out_w, spatial_scale,
output, argmax)
ctx.spatial_scale = spatial_scale
ctx.feature_size = features.size()
ctx.argmax = argmax
return output
示例2: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int [as 别名]
def __init__(self, backbone: BackboneBase, num_classes: int, pooler_mode: Pooler.Mode,
anchor_ratios: List[Tuple[int, int]], anchor_sizes: List[int],
rpn_pre_nms_top_n: int, rpn_post_nms_top_n: int,
anchor_smooth_l1_loss_beta: Optional[float] = None, proposal_smooth_l1_loss_beta: Optional[float] = None):
super().__init__()
self.features, hidden, num_features_out, num_hidden_out = backbone.features()
self._bn_modules = nn.ModuleList([it for it in self.features.modules() if isinstance(it, nn.BatchNorm2d)] +
[it for it in hidden.modules() if isinstance(it, nn.BatchNorm2d)])
# NOTE: It's crucial to freeze batch normalization modules for few batches training, which can be done by following processes
# (1) Change mode to `eval`
# (2) Disable gradient (we move this process into `forward`)
for bn_module in self._bn_modules:
for parameter in bn_module.parameters():
parameter.requires_grad = False
self.rpn = RegionProposalNetwork(num_features_out, anchor_ratios, anchor_sizes, rpn_pre_nms_top_n, rpn_post_nms_top_n, anchor_smooth_l1_loss_beta)
self.detection = Model.Detection(pooler_mode, hidden, num_hidden_out, num_classes, proposal_smooth_l1_loss_beta)
示例3: preprocess
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int [as 别名]
def preprocess(self, x, avoid_precomp=False):
image_id = int(x.split('_')[-1].split('.')[0])
try:
f = h5py.File(self.detections_path, 'r')
precomp_data = f['%d_features' % image_id][()]
if self.sort_by_prob:
precomp_data = precomp_data[np.argsort(np.max(f['%d_cls_prob' % image_id][()], -1))[::-1]]
except KeyError:
warnings.warn('Could not find detections for %d' % image_id)
precomp_data = np.random.rand(10,2048)
delta = self.max_detections - precomp_data.shape[0]
if delta > 0:
precomp_data = np.concatenate([precomp_data, np.zeros((delta, precomp_data.shape[1]))], axis=0)
elif delta < 0:
precomp_data = precomp_data[:self.max_detections]
return precomp_data.astype(np.float32)
示例4: decode
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int [as 别名]
def decode(self, word_idxs, join_words=True):
if isinstance(word_idxs, list) and len(word_idxs) == 0:
return self.decode([word_idxs, ], join_words)[0]
if isinstance(word_idxs, list) and isinstance(word_idxs[0], int):
return self.decode([word_idxs, ], join_words)[0]
elif isinstance(word_idxs, np.ndarray) and word_idxs.ndim == 1:
return self.decode(word_idxs.reshape((1, -1)), join_words)[0]
elif isinstance(word_idxs, torch.Tensor) and word_idxs.ndimension() == 1:
return self.decode(word_idxs.unsqueeze(0), join_words)[0]
captions = []
for wis in word_idxs:
caption = []
for wi in wis:
word = self.vocab.itos[int(wi)]
if word == self.eos_token:
break
caption.append(word)
if join_words:
caption = ' '.join(caption)
captions.append(caption)
return captions
示例5: _graph_fn_call
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int [as 别名]
def _graph_fn_call(self, inputs):
if self.backend == "python" or get_backend() == "python":
if isinstance(inputs, list):
inputs = np.asarray(inputs)
return inputs.astype(dtype=util.convert_dtype(self.to_dtype, to="np"))
elif get_backend() == "pytorch":
torch_dtype = util.convert_dtype(self.to_dtype, to="pytorch")
if torch_dtype == torch.float or torch.float32:
return inputs.float()
elif torch_dtype == torch.int or torch.int32:
return inputs.int()
elif torch_dtype == torch.uint8:
return inputs.byte()
elif get_backend() == "tf":
in_space = get_space_from_op(inputs)
to_dtype = util.convert_dtype(self.to_dtype, to="tf")
if inputs.dtype != to_dtype:
ret = tf.cast(x=inputs, dtype=to_dtype)
if in_space.has_batch_rank is True:
ret._batch_rank = 0 if in_space.time_major is False else 1
if in_space.has_time_rank is True:
ret._time_rank = 0 if in_space.time_major is True else 1
return ret
else:
return inputs
示例6: add_args
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int [as 别名]
def add_args(parser):
group = parser.add_argument_group("ASG Loss")
group.add_argument(
"--asg-transitions-init",
help="initial diagonal value of transition matrix",
type=float,
default=0.0,
)
group.add_argument(
"--max-replabel", help="maximum # of replabels", type=int, default=2
)
group.add_argument(
"--linseg-updates",
help="# of training updates to use LinSeg initialization",
type=int,
default=0,
)
group.add_argument(
"--hide-linseg-messages",
help="hide messages about LinSeg initialization",
action="store_true",
)
示例7: torch_dtype_to_np_dtype
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int [as 别名]
def torch_dtype_to_np_dtype(dtype):
dtype_dict = {
torch.bool : np.dtype(np.bool),
torch.uint8 : np.dtype(np.uint8),
torch.int8 : np.dtype(np.int8),
torch.int16 : np.dtype(np.int16),
torch.short : np.dtype(np.int16),
torch.int32 : np.dtype(np.int32),
torch.int : np.dtype(np.int32),
torch.int64 : np.dtype(np.int64),
torch.long : np.dtype(np.int64),
torch.float16 : np.dtype(np.float16),
torch.half : np.dtype(np.float16),
torch.float32 : np.dtype(np.float32),
torch.float : np.dtype(np.float32),
torch.float64 : np.dtype(np.float64),
torch.double : np.dtype(np.float64),
}
return dtype_dict[dtype]
# ---------------------- InferenceEngine internal types ------------------------
示例8: for_train_steps
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int [as 别名]
def for_train_steps(self, steps):
"""Run this trial for the given number of training steps. Note that the generator will output (None, None) if it
has not been set. Useful for differentiable programming. Returns self so that methods can be chained for
convenience. If steps is larger than dataset size then loader will be refreshed like if it was a new epoch. If
steps is -1 then loader will be refreshed until stopped by STOP_TRAINING flag or similar.
Example: ::
# Simple trial that runs for 100 training iterations, in this case optimising nothing
>>> from torchbearer import Trial
>>> trial = Trial(None).for_train_steps(100)
Args:
steps (int): The number of training steps per epoch to run.
Returns:
Trial: self
"""
if not isinstance(steps, int):
warnings.warn("Number of training steps is not an int, casting to int")
steps = int(steps)
self.state[torchbearer.TRAIN_STEPS] = steps
self.state[torchbearer.TRAIN_DATA] = (self.state[torchbearer.TRAIN_GENERATOR], self.state[torchbearer.TRAIN_STEPS])
return self
示例9: with_train_generator
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int [as 别名]
def with_train_generator(self, generator, steps=None):
"""Use this trial with the given train generator. Returns self so that methods can be chained for convenience.
Example: ::
# Simple trial that runs for 100 training iterations on the MNIST dataset
>>> from torchbearer import Trial
>>> from torchvision.datasets import MNIST
>>> from torch.utils.data import DataLoader
>>> dataloader = DataLoader(MNIST('./data/', train=True))
>>> trial = Trial(None).with_train_generator(dataloader).for_steps(100).run(1)
Args:
generator: The train data generator to use during calls to :meth:`.run`
steps (int): The number of steps per epoch to take when using this generator.
Returns:
Trial: self
"""
self.state[torchbearer.TRAIN_GENERATOR] = generator
steps = self.state[torchbearer.TRAIN_STEPS] if steps is None else steps
steps = len(generator) if steps is None else steps
self.for_train_steps(steps)
return self
示例10: with_test_generator
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int [as 别名]
def with_test_generator(self, generator, steps=None):
"""Use this trial with the given test generator. Returns self so that methods can be chained for convenience.
Example: ::
# Simple trial that runs for 10 test iterations on no data
>>> from torchbearer import Trial
>>> data = torch.rand(10, 1)
>>> trial = Trial(None).with_test_data(data).for_test_steps(10).run(1)
Args:
generator: The test data generator to use during calls to :meth:`.predict`
steps (int): The number of steps per epoch to take when using this generator
Returns:
Trial: self
"""
self.state[torchbearer.TEST_GENERATOR] = generator
steps = self.state[torchbearer.TEST_STEPS] if steps is None else steps
steps = len(generator) if steps is None else steps
self.for_test_steps(steps)
return self
示例11: with_test_data
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int [as 别名]
def with_test_data(self, x, batch_size=1, num_workers=1, steps=None):
"""Use this trial with the given test data. Returns self so that methods can be chained for convenience.
Example: ::
# Simple trial that runs for 10 test iterations on some random data
>>> from torchbearer import Trial
>>> data = torch.rand(10, 1)
>>> trial = Trial(None).with_test_data(data).for_test_steps(10).run(1)
Args:
x (torch.Tensor): The test x data to use during calls to :meth:`.predict`
batch_size (int): The size of each batch to sample from the data
num_workers (int): Number of worker threads to use in the data loader
steps (int): The number of steps per epoch to take when using this data
Returns:
Trial: self
"""
dataset = TensorDataset(x)
dataloader = DataLoader(dataset, batch_size, num_workers=num_workers)
self.with_test_generator(dataloader, steps=steps)
return self
示例12: cuda
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int [as 别名]
def cuda(self, device=None):
""" Moves all model parameters and buffers to the GPU.
Example: ::
>>> from torchbearer import Trial
>>> t = Trial(None).cuda()
Args:
device (int): if specified, all parameters will be copied to that device
Returns:
Trial: self
"""
if device is None:
device = torch.cuda.current_device()
self.to('cuda:' + str(device))
return self
示例13: process_string
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int [as 别名]
def process_string(self, sequence, size, remove_repetitions=False):
string = ''
offsets = []
for i in range(size):
char = self.int_to_char[sequence[i].item()]
if char != self.int_to_char[self.blank_index]:
# if this char is a repetition and remove_repetitions=true, then skip
if remove_repetitions and i != 0 and char == self.int_to_char[sequence[i - 1].item()]:
pass
elif char == self.labels[self.space_index]:
string += ' '
offsets.append(i)
else:
string = string + char
offsets.append(i)
return string, torch.tensor(offsets, dtype=torch.int)
示例14: test_r2l_scorer_prepare_inputs
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int [as 别名]
def test_r2l_scorer_prepare_inputs(self):
eos = self.task.tgt_dict.eos()
src_tokens = torch.tensor([[6, 7, 8], [1, 2, 3]], dtype=torch.int)
hypos = [
{"tokens": torch.tensor([12, 13, 14, eos], dtype=torch.int)},
{"tokens": torch.tensor([22, 23, eos], dtype=torch.int)},
{"tokens": torch.tensor([12, 13, 14, eos], dtype=torch.int)},
{"tokens": torch.tensor([22, 23, eos], dtype=torch.int)},
]
with patch(
"pytorch_translate.utils.load_diverse_ensemble_for_inference",
return_value=([self.model], self.args, self.task),
):
scorer = R2LModelScorer(self.args, "/tmp/model_path.txt", None, self.task)
(encoder_inputs, tgt_tokens) = scorer.prepare_inputs(src_tokens, hypos)
# Test encoder inputs
assert torch.equal(
encoder_inputs[0],
torch.tensor(
[[6, 7, 8], [6, 7, 8], [1, 2, 3], [1, 2, 3]], dtype=torch.int
),
), "Encoder inputs are not as expected"
示例15: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import int [as 别名]
def __init__(self,
means: Tensor,
covs: Tensor,
last_measured: Optional[Tensor] = None):
"""
:param means: The means (2D tensor)
:param covs: The covariances (3D tensor).
:param last_measured: 1D tensor indicating number of timesteps since mean/cov were updated with measurements;
defaults to 0s.
"""
num_groups, state_size = means.shape
self.num_groups = num_groups
self.means = means
self.covs = covs
self._H = None
self._R = None
if last_measured is None:
self.last_measured = torch.zeros(self.num_groups, dtype=torch.int)
else:
self.last_measured = last_measured
self._validate()