本文整理匯總了Python中torch.nn.ModuleDict方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.ModuleDict方法的具體用法?Python nn.ModuleDict怎麽用?Python nn.ModuleDict使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.ModuleDict方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ModuleDict [as 別名]
def __init__(self, cfg):
super(AnyNet, self).__init__()
self.cfg = cfg.copy()
self.max_disp = cfg.model.max_disp
self.stage = cfg.model.stage
self.backbone = build_backbone(cfg)
self.cost_processor = build_cost_processor(cfg)
# disparity predictor
self.disp_predictor = nn.ModuleDict()
for st in self.stage:
self.disp_predictor[st] = FasterSoftArgmin(
max_disp=cfg.model.disp_predictor.max_disp[st],
start_disp=cfg.model.disp_predictor.start_disp[st],
dilation=cfg.model.disp_predictor.dilation[st],
alpha=cfg.model.disp_predictor.alpha,
normalize=cfg.model.disp_predictor.normalize,
)
self.disp_refinement = build_disp_refinement(cfg)
# make general stereo matching loss evaluator
self.loss_evaluator = make_gsm_loss_evaluator(cfg)
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ModuleDict [as 別名]
def __init__(self, cfg):
super(AnyNetProcessor, self).__init__()
self.cfg = cfg.copy()
self.batch_norm = cfg.model.batch_norm
self.stage = self.cfg.model.stage
# cost computation parameters, dict
self.max_disp = self.cfg.model.cost_processor.cost_computation.max_disp
self.start_disp = self.cfg.model.cost_processor.cost_computation.start_disp
self.dilation = self.cfg.model.cost_processor.cost_computation.dilation
# cost aggregation
self.aggregator_type = self.cfg.model.cost_processor.cost_aggregator.type
self.aggregator = nn.ModuleDict()
for st in self.stage:
self.aggregator[st] = AnyNetAggregator(
in_planes=self.cfg.model.cost_processor.cost_aggregator.in_planes[st],
agg_planes=self.cfg.model.cost_processor.cost_aggregator.agg_planes[st],
num=self.cfg.model.cost_processor.cost_aggregator.num,
batch_norm=self.batch_norm,
)
示例3: create_embedding_matrix
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ModuleDict [as 別名]
def create_embedding_matrix(feature_columns, init_std=0.0001, linear=False, sparse=False, device='cpu'):
# Return nn.ModuleDict: for sparse features, {embedding_name: nn.Embedding}
# for varlen sparse features, {embedding_name: nn.EmbeddingBag}
sparse_feature_columns = list(
filter(lambda x: isinstance(x, SparseFeat), feature_columns)) if len(feature_columns) else []
varlen_sparse_feature_columns = list(
filter(lambda x: isinstance(x, VarLenSparseFeat), feature_columns)) if len(feature_columns) else []
embedding_dict = nn.ModuleDict(
{feat.embedding_name: nn.Embedding(feat.vocabulary_size, feat.embedding_dim if not linear else 1, sparse=sparse)
for feat in
sparse_feature_columns + varlen_sparse_feature_columns}
)
# for feat in varlen_sparse_feature_columns:
# embedding_dict[feat.embedding_name] = nn.EmbeddingBag(
# feat.dimension, embedding_size, sparse=sparse, mode=feat.combiner)
for tensor in embedding_dict.values():
nn.init.normal_(tensor.weight, mean=0, std=init_std)
return embedding_dict.to(device)
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ModuleDict [as 別名]
def __init__(
self,
name: str,
module_pool: ModuleDict,
task_flow: List[
Dict[str, Union[str, List[Tuple[str, str]], List[Tuple[str, int]]]]
],
loss_func: Callable,
output_func: Callable,
scorer: Scorer,
weight: Union[float, int] = 1.0,
) -> None:
"""Initialize EmmentalTask."""
self.name = name
assert isinstance(module_pool, nn.ModuleDict) is True
self.module_pool = module_pool
self.task_flow = task_flow
self.loss_func = loss_func
self.output_func = output_func
self.scorer = scorer
self.weight = weight
if Meta.config["meta_config"]["verbose"]:
logger.info(f"Created task: {self.name}")
示例5: param_dict
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ModuleDict [as 別名]
def param_dict(self) -> ModuleDict:
p = ModuleDict()
for process_name, process in self.processes.items():
p[f"process:{process_name}"] = process.param_dict()
p['measure_cov'] = self.measure_covariance.param_dict()
p['measure_var_nn'] = self._measure_var_nn
p['init_state'] = ParameterDict([('mean', self.init_mean_params)])
p['init_state'].update(self.init_covariance.param_dict().items())
p['process_cov'] = self.process_covariance.param_dict()
p['process_var_nn'] = self._process_var_nn
return p
# Transition Matrix -------:
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ModuleDict [as 別名]
def __init__(
self,
name: str,
module_pool: nn.ModuleDict,
op_sequence: Sequence[Operation],
scorer: Scorer = Scorer(metrics=["accuracy"]),
loss_func: Optional[Callable[..., torch.Tensor]] = None,
output_func: Optional[Callable[..., torch.Tensor]] = None,
) -> None:
self.name = name
self.module_pool = module_pool
self.op_sequence = op_sequence
self.loss_func = loss_func or F.cross_entropy
self.output_func = output_func or partial(F.softmax, dim=1)
self.scorer = scorer
logging.info(f"Created task: {self.name}")
示例7: create_task
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ModuleDict [as 別名]
def create_task(task_name: str, module_suffixes: List[str]) -> Task:
module1_name = f"linear1{module_suffixes[0]}"
module2_name = f"linear2{module_suffixes[1]}"
module_pool = nn.ModuleDict(
{
module1_name: nn.Sequential(nn.Linear(2, 20), nn.ReLU()),
module2_name: nn.Linear(20, 2),
}
)
op1 = Operation(module_name=module1_name, inputs=[("_input_", "coordinates")])
op2 = Operation(module_name=module2_name, inputs=[op1.name])
op_sequence = [op1, op2]
task = Task(
name=task_name,
module_pool=module_pool,
op_sequence=op_sequence,
scorer=Scorer(metrics=["f1", "accuracy"]),
)
return task
示例8: create_task
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ModuleDict [as 別名]
def create_task(task_name, module_suffixes=("", "")):
module1_name = f"linear1{module_suffixes[0]}"
module2_name = f"linear2{module_suffixes[1]}"
linear1 = nn.Linear(2, 2)
linear1.weight.data.copy_(torch.eye(2))
linear1.bias.data.copy_(torch.zeros((2,)))
linear2 = nn.Linear(2, 2)
linear2.weight.data.copy_(torch.eye(2))
linear2.bias.data.copy_(torch.zeros((2,)))
module_pool = nn.ModuleDict(
{module1_name: nn.Sequential(linear1, nn.ReLU()), module2_name: linear2}
)
op0 = Operation(module_name=module1_name, inputs=[("_input_", "data")], name="op0")
op1 = Operation(module_name=module2_name, inputs=[op0.name], name="op1")
op_sequence = [op0, op1]
task = Task(name=task_name, module_pool=module_pool, op_sequence=op_sequence)
return task
示例9: create_task
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ModuleDict [as 別名]
def create_task(task_name: str, module_suffixes: List[str]) -> Task:
module1_name = f"linear1{module_suffixes[0]}"
module2_name = f"linear2{module_suffixes[1]}"
module_pool = nn.ModuleDict(
{
module1_name: nn.Sequential(nn.Linear(2, 20), nn.ReLU()),
module2_name: nn.Linear(20, 2),
}
)
op1 = Operation(module_name=module1_name, inputs=[("_input_", "coordinates")])
op2 = Operation(module_name=module2_name, inputs=[op1.name])
op_sequence = [op1, op2]
task = Task(
name=task_name,
module_pool=module_pool,
op_sequence=op_sequence,
scorer=Scorer(metrics=["accuracy"]),
)
return task
示例10: create_task
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ModuleDict [as 別名]
def create_task(task_name, module_suffixes=("", "")):
module1_name = f"linear1{module_suffixes[0]}"
module2_name = f"linear2{module_suffixes[1]}"
module_pool = nn.ModuleDict(
{
module1_name: nn.Sequential(nn.Linear(2, 10), nn.ReLU()),
module2_name: nn.Linear(10, 2),
}
)
op1 = Operation(module_name=module1_name, inputs=[("_input_", "data")])
op2 = Operation(module_name=module2_name, inputs=[op1.name])
op_sequence = [op1, op2]
task = Task(name=task_name, module_pool=module_pool, op_sequence=op_sequence)
return task
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ModuleDict [as 別名]
def __init__(self, control_settings):
super().__init__()
self.control_settings = control_settings # see ControllableSeq2seqAgent
# int to string mapping giving the canonical ordering of the controls
self.idx2ctrl = {
d['idx']: control for control, d in self.control_settings.items()
}
# Initialize control embeddings
self.control_embeddings = nn.ModuleDict(
{
c: nn.Embedding(d['num_buckets'], d['embsize'], sparse=False)
for c, d in control_settings.items()
}
) # maps from string (ctrl name) to nn.Embedding
示例12: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ModuleDict [as 別名]
def __init__(self, net_enc, net_dec, labeldata, loss_scale=None):
super(SegmentationModule, self).__init__()
self.encoder = net_enc
self.decoder = net_dec
self.crit_dict = nn.ModuleDict()
if loss_scale is None:
self.loss_scale = {"object": 1, "part": 0.5, "scene": 0.25, "material": 1}
else:
self.loss_scale = loss_scale
# criterion
self.crit_dict["object"] = nn.NLLLoss(ignore_index=0) # ignore background 0
self.crit_dict["material"] = nn.NLLLoss(ignore_index=0) # ignore background 0
self.crit_dict["scene"] = nn.NLLLoss(ignore_index=-1) # ignore unlabelled -1
# Label data - read from json
self.labeldata = labeldata
object_to_num = {k: v for v, k in enumerate(labeldata['object'])}
part_to_num = {k: v for v, k in enumerate(labeldata['part'])}
self.object_part = {object_to_num[k]:
[part_to_num[p] for p in v]
for k, v in labeldata['object_part'].items()}
self.object_with_part = sorted(self.object_part.keys())
self.decoder.object_part = self.object_part
self.decoder.object_with_part = self.object_with_part
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ModuleDict [as 別名]
def __init__(self, C_in, C_out, stride, max_nodes, op_names, affine=False, track_running_stats=True):
super(NAS201SearchCell, self).__init__()
self.op_names = deepcopy(op_names)
self.edges = nn.ModuleDict()
self.max_nodes = max_nodes
self.in_dim = C_in
self.out_dim = C_out
for i in range(1, max_nodes):
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
if j == 0:
xlists = [OPS[op_name](C_in , C_out, stride, affine, track_running_stats) for op_name in op_names]
else:
xlists = [OPS[op_name](C_in , C_out, 1, affine, track_running_stats) for op_name in op_names]
self.edges[ node_str ] = nn.ModuleList( xlists )
self.edge_keys = sorted(list(self.edges.keys()))
self.edge2index = {key:i for i, key in enumerate(self.edge_keys)}
self.num_edges = len(self.edges)
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ModuleDict [as 別名]
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev, affine, track_running_stats):
super(NASNetInferCell, self).__init__()
self.reduction = reduction
if reduction_prev: self.preprocess0 = OPS['skip_connect'](C_prev_prev, C, 2, affine, track_running_stats)
else : self.preprocess0 = OPS['nor_conv_1x1'](C_prev_prev, C, 1, affine, track_running_stats)
self.preprocess1 = OPS['nor_conv_1x1'](C_prev, C, 1, affine, track_running_stats)
if not reduction:
nodes, concats = genotype['normal'], genotype['normal_concat']
else:
nodes, concats = genotype['reduce'], genotype['reduce_concat']
self._multiplier = len(concats)
self._concats = concats
self._steps = len(nodes)
self._nodes = nodes
self.edges = nn.ModuleDict()
for i, node in enumerate(nodes):
for in_node in node:
name, j = in_node[0], in_node[1]
stride = 2 if reduction and j < 2 else 1
node_str = '{:}<-{:}'.format(i+2, j)
self.edges[node_str] = OPS[name](C, C, stride, affine, track_running_stats)
# [TODO] to support drop_prob in this function..
示例15: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ModuleDict [as 別名]
def __init__(self, model, return_layers):
if not set(return_layers).issubset([name for name, _ in model.named_children()]):
raise ValueError("return_layers are not present in model")
super(IntermediateLayerGetter, self).__init__()
orig_return_layers = return_layers
return_layers = {k: v for k, v in return_layers.items()}
layers = OrderedDict()
for name, module in model.named_children():
layers[name] = module
if name in return_layers:
del return_layers[name]
if not return_layers:
break
self.layers = nn.ModuleDict(layers)
self.return_layers = orig_return_layers