本文整理汇总了Python中torch.jit.ScriptModule方法的典型用法代码示例。如果您正苦于以下问题:Python jit.ScriptModule方法的具体用法?Python jit.ScriptModule怎么用?Python jit.ScriptModule使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.jit
的用法示例。
在下文中一共展示了jit.ScriptModule方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: script_lstm
# 需要导入模块: from torch import jit [as 别名]
# 或者: from torch.jit import ScriptModule [as 别名]
def script_lstm(input_size, hidden_size, num_layers, bias=True,
batch_first=False, dropout=False, bidirectional=False):
'''Returns a ScriptModule that mimics a PyTorch native LSTM.'''
# The following are not implemented.
assert bias
assert not batch_first
if bidirectional:
stack_type = StackedLSTM2
layer_type = BidirLSTMLayer
dirs = 2
elif dropout:
stack_type = StackedLSTMWithDropout
layer_type = LSTMLayer
dirs = 1
else:
stack_type = StackedLSTM
layer_type = LSTMLayer
dirs = 1
return stack_type(num_layers, layer_type,
first_layer_args=[LSTMCell, input_size, hidden_size],
other_layer_args=[LSTMCell, hidden_size * dirs,
hidden_size])
示例2: script_lnlstm
# 需要导入模块: from torch import jit [as 别名]
# 或者: from torch.jit import ScriptModule [as 别名]
def script_lnlstm(input_size, hidden_size, num_layers, bias=True,
batch_first=False, dropout=False, bidirectional=False,
decompose_layernorm=False):
'''Returns a ScriptModule that mimics a PyTorch native LSTM.'''
# The following are not implemented.
assert bias
assert not batch_first
assert not dropout
if bidirectional:
stack_type = StackedLSTM2
layer_type = BidirLSTMLayer
dirs = 2
else:
stack_type = StackedLSTM
layer_type = LSTMLayer
dirs = 1
return stack_type(num_layers, layer_type,
first_layer_args=[LayerNormLSTMCell, input_size, hidden_size,
decompose_layernorm],
other_layer_args=[LayerNormLSTMCell, hidden_size * dirs,
hidden_size, decompose_layernorm])
示例3: load_traced_model
# 需要导入模块: from torch import jit [as 别名]
# 或者: from torch.jit import ScriptModule [as 别名]
def load_traced_model(
model_path: Union[str, Path],
device: Device = "cpu",
opt_level: str = None,
) -> jit.ScriptModule:
"""Loads a traced model.
Args:
model_path: Path to traced model
device (str): Torch device
opt_level (str): Apex FP16 init level, optional
Returns:
ScriptModule: Traced model
"""
# jit.load dont work with pathlib.Path
model_path = str(model_path)
if opt_level is not None:
device = "cuda"
model = jit.load(model_path, map_location=device)
if opt_level is not None:
assert_fp16_available()
from apex import amp
model = amp.initialize(model, optimizers=None, opt_level=opt_level)
return model
示例4: trace_model
# 需要导入模块: from torch import jit [as 别名]
# 或者: from torch.jit import ScriptModule [as 别名]
def trace_model(
model: Model,
predict_fn: Callable,
batch=None,
method_name: str = "forward",
mode: str = "eval",
requires_grad: bool = False,
opt_level: str = None,
device: Device = "cpu",
predict_params: dict = None,
) -> jit.ScriptModule:
"""Traces model using runner and batch.
Args:
model: Model to trace
predict_fn: Function to run prediction with the model provided,
takes model, inputs parameters
batch: Batch to trace the model
method_name (str): Model's method name that will be
used as entrypoint during tracing
mode (str): Mode for model to trace (``train`` or ``eval``)
requires_grad (bool): Flag to use grads
opt_level (str): Apex FP16 init level, optional
device (str): Torch device
predict_params (dict): additional parameters for model forward
Returns:
jit.ScriptModule: Traced model
Raises:
ValueError: if both batch and predict_fn must be specified or
mode is not in 'eval' or 'train'.
"""
if batch is None or predict_fn is None:
raise ValueError("Both batch and predict_fn must be specified.")
if mode not in ["train", "eval"]:
raise ValueError(f"Unknown mode '{mode}'. Must be 'eval' or 'train'")
predict_params = predict_params or {}
tracer = _TracingModelWrapper(model, method_name)
if opt_level is not None:
assert_fp16_available()
# If traced in AMP we need to initialize the model before calling
# the jit
# https://github.com/NVIDIA/apex/issues/303#issuecomment-493142950
from apex import amp
model = model.to(device)
model = amp.initialize(model, optimizers=None, opt_level=opt_level)
getattr(model, mode)()
set_requires_grad(model, requires_grad=requires_grad)
predict_fn(tracer, batch, **predict_params)
return tracer.tracing_result
示例5: save_traced_model
# 需要导入模块: from torch import jit [as 别名]
# 或者: from torch.jit import ScriptModule [as 别名]
def save_traced_model(
model: jit.ScriptModule,
logdir: Union[str, Path] = None,
method_name: str = "forward",
mode: str = "eval",
requires_grad: bool = False,
opt_level: str = None,
out_dir: Union[str, Path] = None,
out_model: Union[str, Path] = None,
checkpoint_name: str = None,
) -> None:
"""Saves traced model.
Args:
model (ScriptModule): Traced model
logdir (Union[str, Path]): Path to experiment
method_name (str): Name of the method was traced
mode (str): Model's mode - `train` or `eval`
requires_grad (bool): Whether model was traced with require_grad or not
opt_level (str): Apex FP16 init level used during tracing
out_dir (Union[str, Path]): Directory to save model to
(overrides logdir)
out_model (Union[str, Path]): Path to save model to
(overrides logdir & out_dir)
checkpoint_name (str): Checkpoint name used to restore the model
Raises:
ValueError: if nothing out of `logdir`, `out_dir` or `out_model`
is specified.
"""
if out_model is None:
file_name = get_trace_name(
method_name=method_name,
mode=mode,
requires_grad=requires_grad,
opt_level=opt_level,
additional_string=checkpoint_name,
)
output: Path = out_dir
if output is None:
if logdir is None:
raise ValueError(
"One of `logdir`, `out_dir` or `out_model` "
"should be specified"
)
output: Path = Path(logdir) / "trace"
output.mkdir(exist_ok=True, parents=True)
out_model = str(output / file_name)
else:
out_model = str(out_model)
jit.save(model, out_model)