本文整理汇总了Python中torch.nn.modules.Dropout方法的典型用法代码示例。如果您正苦于以下问题:Python modules.Dropout方法的具体用法?Python modules.Dropout怎么用?Python modules.Dropout使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.modules
的用法示例。
在下文中一共展示了modules.Dropout方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from torch.nn import modules [as 别名]
# 或者: from torch.nn.modules import Dropout [as 别名]
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
# binary_feature_dim: int,
embedding_dropout: float = 0.0,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(LstmSwag, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
# For the span based evaluation, we don't want to consider labels
# for verb, because the verb index is provided to the model.
self.encoder = encoder
self.embedding_dropout = Dropout(p=embedding_dropout)
self.output_prediction = Linear(self.encoder.get_output_dim(), 1, bias=False)
check_dimensions_match(text_field_embedder.get_output_dim(),
encoder.get_input_dim(),
"text embedding dim", "eq encoder input dim")
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
示例2: __init__
# 需要导入模块: from torch.nn import modules [as 别名]
# 或者: from torch.nn.modules import Dropout [as 别名]
def __init__(
self,
options_file: str,
weight_file: str,
num_output_representations: int,
requires_grad: bool = False,
do_layer_norm: bool = False,
dropout: float = 0.5,
vocab_to_cache: List[str] = None,
keep_sentence_boundaries: bool = False,
scalar_mix_parameters: List[float] = None,
module: torch.nn.Module = None,
) -> None:
super().__init__()
logger.info("Initializing ELMo")
if module is not None:
if options_file is not None or weight_file is not None:
raise ConfigurationError("Don't provide options_file or weight_file with module")
self._elmo_lstm = module
else:
self._elmo_lstm = _ElmoBiLm(
options_file,
weight_file,
requires_grad=requires_grad,
vocab_to_cache=vocab_to_cache,
)
self._has_cached_vocab = vocab_to_cache is not None
self._keep_sentence_boundaries = keep_sentence_boundaries
self._dropout = Dropout(p=dropout)
self._scalar_mixes: Any = []
for k in range(num_output_representations):
scalar_mix = ScalarMix(
self._elmo_lstm.num_layers,
do_layer_norm=do_layer_norm,
initial_scalar_parameters=scalar_mix_parameters,
trainable=scalar_mix_parameters is None,
)
self.add_module("scalar_mix_{}".format(k), scalar_mix)
self._scalar_mixes.append(scalar_mix)
示例3: __init__
# 需要导入模块: from torch.nn import modules [as 别名]
# 或者: from torch.nn.modules import Dropout [as 别名]
def __init__(self,
options_file ,
weight_file ,
num_output_representations ,
requires_grad = False,
do_layer_norm = False,
dropout = 0.5,
vocab_to_cache = None,
module = None) :
super(Elmo, self).__init__()
logging.info(u"Initializing ELMo")
if module is not None:
if options_file is not None or weight_file is not None:
raise ConfigurationError(
u"Don't provide options_file or weight_file with module")
self._elmo_lstm = module
else:
self._elmo_lstm = _ElmoBiLm(options_file,
weight_file,
requires_grad=requires_grad,
vocab_to_cache=vocab_to_cache)
self._has_cached_vocab = vocab_to_cache is not None
self._dropout = Dropout(p=dropout)
self._scalar_mixes = []
for k in range(num_output_representations):
scalar_mix = ScalarMix(self._elmo_lstm.num_layers, do_layer_norm=do_layer_norm)
self.add_module(u'scalar_mix_{}'.format(k), scalar_mix)
self._scalar_mixes.append(scalar_mix)
示例4: __init__
# 需要导入模块: from torch.nn import modules [as 别名]
# 或者: from torch.nn.modules import Dropout [as 别名]
def __init__(self, vocab ,
text_field_embedder ,
encoder ,
binary_feature_dim ,
embedding_dropout = 0.0,
initializer = InitializerApplicator(),
regularizer = None,
label_smoothing = None) :
super(SemanticRoleLabeler, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size(u"labels")
# For the span based evaluation, we don't want to consider labels
# for verb, because the verb index is provided to the model.
self.span_metric = SpanBasedF1Measure(vocab, tag_namespace=u"labels", ignore_classes=[u"V"])
self.encoder = encoder
# There are exactly 2 binary features for the verb predicate embedding.
self.binary_feature_embedding = Embedding(2, binary_feature_dim)
self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(),
self.num_classes))
self.embedding_dropout = Dropout(p=embedding_dropout)
self._label_smoothing = label_smoothing
check_dimensions_match(text_field_embedder.get_output_dim() + binary_feature_dim,
encoder.get_input_dim(),
u"text embedding dim + verb indicator embedding dim",
u"encoder input dim")
initializer(self)
示例5: __init__
# 需要导入模块: from torch.nn import modules [as 别名]
# 或者: from torch.nn.modules import Dropout [as 别名]
def __init__(
self,
options_file: str,
weight_file: str,
num_output_representations: int,
requires_grad: bool = False,
do_layer_norm: bool = False,
dropout: float = 0.5,
vocab_to_cache: List[str] = None,
module: torch.nn.Module = None,
) -> None:
super(Elmo, self).__init__()
logging.info("Initializing ELMo")
if module is not None:
if options_file is not None or weight_file is not None:
raise ValueError("Don't provide options_file or weight_file with module")
self._elmo_lstm = module
else:
self._elmo_lstm = _ElmoBiLm(
options_file,
weight_file,
requires_grad=requires_grad,
vocab_to_cache=vocab_to_cache,
)
self._has_cached_vocab = vocab_to_cache is not None
self._dropout = Dropout(p=dropout)
self._scalar_mixes: Any = []
for k in range(num_output_representations):
scalar_mix = ScalarMix(self._elmo_lstm.num_layers, do_layer_norm=do_layer_norm)
self.add_module("scalar_mix_{}".format(k), scalar_mix)
self._scalar_mixes.append(scalar_mix)
示例6: __init__
# 需要导入模块: from torch.nn import modules [as 别名]
# 或者: from torch.nn.modules import Dropout [as 别名]
def __init__(self,
options_file: str,
weight_file: str,
num_output_representations: int,
requires_grad: bool = False,
do_layer_norm: bool = False,
dropout: float = 0.5,
vocab_to_cache: List[str] = None,
module: torch.nn.Module = None) -> None:
super(Elmo, self).__init__()
logging.info("Initializing ELMo")
if module is not None:
if options_file is not None or weight_file is not None:
raise ConfigurationError(
"Don't provide options_file or weight_file with module")
self._elmo_lstm = module
else:
self._elmo_lstm = _ElmoBiLm(options_file,
weight_file,
requires_grad=requires_grad,
vocab_to_cache=vocab_to_cache)
self._has_cached_vocab = vocab_to_cache is not None
self._dropout = Dropout(p=dropout)
self.num_output_representations=num_output_representations
if num_output_representations!=-1:
self._scalar_mixes: Any = []
for k in range(num_output_representations):
scalar_mix = ScalarMix(self._elmo_lstm.num_layers, do_layer_norm=do_layer_norm)
self.add_module('scalar_mix_{}'.format(k), scalar_mix)
self._scalar_mixes.append(scalar_mix)
示例7: __init__
# 需要导入模块: from torch.nn import modules [as 别名]
# 或者: from torch.nn.modules import Dropout [as 别名]
def __init__(self,
vocab: Vocabulary,
source_embedder: TextFieldEmbedder,
sentence_encoder: Seq2VecEncoder,
sentence_accumulator: Seq2SeqEncoder,
use_salience: bool,
use_pos_embedding: bool,
use_output_bias: bool,
use_novelty: bool,
dropout: float = 0.3,
pos_embedding_num: int = 50,
pos_embedding_size: int = 128) -> None:
super(SummaRuNNer, self).__init__(vocab)
self._source_embedder = source_embedder
self._sentence_encoder = sentence_encoder
self._se_output_dim = self._sentence_encoder.get_output_dim()
self._sentence_accumulator = sentence_accumulator
self._h_sentence_dim = self._sentence_accumulator.get_output_dim()
self._dropout_layer = Dropout(dropout)
self._content_projection_layer = Linear(self._h_sentence_dim, 1)
self._use_salience = use_salience
if use_salience:
self._document_linear_layer = Linear(self._h_sentence_dim, self._h_sentence_dim, bias=True)
self._salience_linear_layer = Linear(self._h_sentence_dim, self._h_sentence_dim, bias=False)
self._use_pos_embedding = use_pos_embedding
if use_pos_embedding:
self._pos_embedding_num = pos_embedding_num
self._pos_embedding_size = pos_embedding_size
self._pos_embedding_layer = Embedding(pos_embedding_num, pos_embedding_size)
self._pos_projection_layer = Linear(pos_embedding_size, 1)
self._use_output_bias = use_output_bias
if use_output_bias:
self._output_bias = Parameter(torch.zeros(1).uniform_(-0.1,0.1), requires_grad=True)
self._use_novelty = use_novelty
if use_novelty:
self._novelty_linear_layer = Linear(self._h_sentence_dim, self._h_sentence_dim, bias=False)