本文整理汇总了Python中torch.LongTensor.new_tensor方法的典型用法代码示例。如果您正苦于以下问题:Python LongTensor.new_tensor方法的具体用法?Python LongTensor.new_tensor怎么用?Python LongTensor.new_tensor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.LongTensor
的用法示例。
在下文中一共展示了LongTensor.new_tensor方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_checklist_info
# 需要导入模块: from torch import LongTensor [as 别名]
# 或者: from torch.LongTensor import new_tensor [as 别名]
def _get_checklist_info(agenda: torch.LongTensor,
all_actions: List[ProductionRule],
terminal_productions: Set[str],
max_num_terminals: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Takes an agenda, a list of all actions, a set of terminal productions in the corresponding
world, and a length to pad the checklist vectors to, and returns a target checklist against
which the checklist at each state will be compared to compute a loss, indices of
``terminal_actions``, and a ``checklist_mask`` that indicates which of the terminal actions
are relevant for checklist loss computation.
Parameters
----------
``agenda`` : ``torch.LongTensor``
Agenda of one instance of size ``(agenda_size, 1)``.
``all_actions`` : ``List[ProductionRule]``
All actions for one instance.
``terminal_productions`` : ``Set[str]``
String representations of terminal productions in the corresponding world.
``max_num_terminals`` : ``int``
Length to which the checklist vectors will be padded till. This is the max number of
terminal productions in all the worlds in the batch.
"""
terminal_indices = []
target_checklist_list = []
agenda_indices_set = set([int(x) for x in agenda.squeeze(0).detach().cpu().numpy()])
# We want to return checklist target and terminal actions that are column vectors to make
# computing softmax over the difference between checklist and target easier.
for index, action in enumerate(all_actions):
# Each action is a ProductionRule, a tuple where the first item is the production
# rule string.
if action[0] in terminal_productions:
terminal_indices.append([index])
if index in agenda_indices_set:
target_checklist_list.append([1])
else:
target_checklist_list.append([0])
while len(target_checklist_list) < max_num_terminals:
target_checklist_list.append([0])
terminal_indices.append([-1])
# (max_num_terminals, 1)
terminal_actions = agenda.new_tensor(terminal_indices)
# (max_num_terminals, 1)
target_checklist = agenda.new_tensor(target_checklist_list, dtype=torch.float)
checklist_mask = (target_checklist != 0).float()
return target_checklist, terminal_actions, checklist_mask
示例2: _action_history_match
# 需要导入模块: from torch import LongTensor [as 别名]
# 或者: from torch.LongTensor import new_tensor [as 别名]
def _action_history_match(predicted: List[int], targets: torch.LongTensor) -> int:
# TODO(mattg): this could probably be moved into a FullSequenceMatch metric, or something.
# Check if target is big enough to cover prediction (including start/end symbols)
if len(predicted) > targets.size(1):
return 0
predicted_tensor = targets.new_tensor(predicted)
targets_trimmed = targets[:, :len(predicted)]
# Return 1 if the predicted sequence is anywhere in the list of targets.
return torch.max(torch.min(targets_trimmed.eq(predicted_tensor), dim=1)[0]).item()
示例3: _get_checklist_info
# 需要导入模块: from torch import LongTensor [as 别名]
# 或者: from torch.LongTensor import new_tensor [as 别名]
def _get_checklist_info(self,
agenda: torch.LongTensor,
all_actions: List[ProductionRuleArray]) -> Tuple[torch.Tensor,
torch.Tensor,
torch.Tensor]:
"""
Takes an agenda and a list of all actions and returns a target checklist against which the
checklist at each state will be compared to compute a loss, indices of ``terminal_actions``,
and a ``checklist_mask`` that indicates which of the terminal actions are relevant for
checklist loss computation. If ``self.penalize_non_agenda_actions`` is set to``True``,
``checklist_mask`` will be all 1s (i.e., all terminal actions are relevant). If it is set to
``False``, indices of all terminals that are not in the agenda will be masked.
Parameters
----------
``agenda`` : ``torch.LongTensor``
Agenda of one instance of size ``(agenda_size, 1)``.
``all_actions`` : ``List[ProductionRuleArray]``
All actions for one instance.
"""
terminal_indices = []
target_checklist_list = []
agenda_indices_set = set([int(x) for x in agenda.squeeze(0).detach().cpu().numpy()])
for index, action in enumerate(all_actions):
# Each action is a ProductionRuleArray, a tuple where the first item is the production
# rule string.
if action[0] in self._terminal_productions:
terminal_indices.append([index])
if index in agenda_indices_set:
target_checklist_list.append([1])
else:
target_checklist_list.append([0])
# We want to return checklist target and terminal actions that are column vectors to make
# computing softmax over the difference between checklist and target easier.
# (num_terminals, 1)
terminal_actions = agenda.new_tensor(terminal_indices)
# (num_terminals, 1)
target_checklist = agenda.new_tensor(target_checklist_list, dtype=torch.float)
if self._penalize_non_agenda_actions:
# All terminal actions are relevant
checklist_mask = torch.ones_like(target_checklist)
else:
checklist_mask = (target_checklist != 0).float()
return target_checklist, terminal_actions, checklist_mask