本文整理匯總了Python中torch.nn.modules.Dropout方法的典型用法代碼示例。如果您正苦於以下問題:Python modules.Dropout方法的具體用法?Python modules.Dropout怎麽用?Python modules.Dropout使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn.modules
的用法示例。
在下文中一共展示了modules.Dropout方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch.nn import modules [as 別名]
# 或者: from torch.nn.modules import Dropout [as 別名]
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
# binary_feature_dim: int,
embedding_dropout: float = 0.0,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(LstmSwag, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
# For the span based evaluation, we don't want to consider labels
# for verb, because the verb index is provided to the model.
self.encoder = encoder
self.embedding_dropout = Dropout(p=embedding_dropout)
self.output_prediction = Linear(self.encoder.get_output_dim(), 1, bias=False)
check_dimensions_match(text_field_embedder.get_output_dim(),
encoder.get_input_dim(),
"text embedding dim", "eq encoder input dim")
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
示例2: __init__
# 需要導入模塊: from torch.nn import modules [as 別名]
# 或者: from torch.nn.modules import Dropout [as 別名]
def __init__(
self,
options_file: str,
weight_file: str,
num_output_representations: int,
requires_grad: bool = False,
do_layer_norm: bool = False,
dropout: float = 0.5,
vocab_to_cache: List[str] = None,
keep_sentence_boundaries: bool = False,
scalar_mix_parameters: List[float] = None,
module: torch.nn.Module = None,
) -> None:
super().__init__()
logger.info("Initializing ELMo")
if module is not None:
if options_file is not None or weight_file is not None:
raise ConfigurationError("Don't provide options_file or weight_file with module")
self._elmo_lstm = module
else:
self._elmo_lstm = _ElmoBiLm(
options_file,
weight_file,
requires_grad=requires_grad,
vocab_to_cache=vocab_to_cache,
)
self._has_cached_vocab = vocab_to_cache is not None
self._keep_sentence_boundaries = keep_sentence_boundaries
self._dropout = Dropout(p=dropout)
self._scalar_mixes: Any = []
for k in range(num_output_representations):
scalar_mix = ScalarMix(
self._elmo_lstm.num_layers,
do_layer_norm=do_layer_norm,
initial_scalar_parameters=scalar_mix_parameters,
trainable=scalar_mix_parameters is None,
)
self.add_module("scalar_mix_{}".format(k), scalar_mix)
self._scalar_mixes.append(scalar_mix)
示例3: __init__
# 需要導入模塊: from torch.nn import modules [as 別名]
# 或者: from torch.nn.modules import Dropout [as 別名]
def __init__(self,
options_file ,
weight_file ,
num_output_representations ,
requires_grad = False,
do_layer_norm = False,
dropout = 0.5,
vocab_to_cache = None,
module = None) :
super(Elmo, self).__init__()
logging.info(u"Initializing ELMo")
if module is not None:
if options_file is not None or weight_file is not None:
raise ConfigurationError(
u"Don't provide options_file or weight_file with module")
self._elmo_lstm = module
else:
self._elmo_lstm = _ElmoBiLm(options_file,
weight_file,
requires_grad=requires_grad,
vocab_to_cache=vocab_to_cache)
self._has_cached_vocab = vocab_to_cache is not None
self._dropout = Dropout(p=dropout)
self._scalar_mixes = []
for k in range(num_output_representations):
scalar_mix = ScalarMix(self._elmo_lstm.num_layers, do_layer_norm=do_layer_norm)
self.add_module(u'scalar_mix_{}'.format(k), scalar_mix)
self._scalar_mixes.append(scalar_mix)
示例4: __init__
# 需要導入模塊: from torch.nn import modules [as 別名]
# 或者: from torch.nn.modules import Dropout [as 別名]
def __init__(self, vocab ,
text_field_embedder ,
encoder ,
binary_feature_dim ,
embedding_dropout = 0.0,
initializer = InitializerApplicator(),
regularizer = None,
label_smoothing = None) :
super(SemanticRoleLabeler, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size(u"labels")
# For the span based evaluation, we don't want to consider labels
# for verb, because the verb index is provided to the model.
self.span_metric = SpanBasedF1Measure(vocab, tag_namespace=u"labels", ignore_classes=[u"V"])
self.encoder = encoder
# There are exactly 2 binary features for the verb predicate embedding.
self.binary_feature_embedding = Embedding(2, binary_feature_dim)
self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(),
self.num_classes))
self.embedding_dropout = Dropout(p=embedding_dropout)
self._label_smoothing = label_smoothing
check_dimensions_match(text_field_embedder.get_output_dim() + binary_feature_dim,
encoder.get_input_dim(),
u"text embedding dim + verb indicator embedding dim",
u"encoder input dim")
initializer(self)
示例5: __init__
# 需要導入模塊: from torch.nn import modules [as 別名]
# 或者: from torch.nn.modules import Dropout [as 別名]
def __init__(
self,
options_file: str,
weight_file: str,
num_output_representations: int,
requires_grad: bool = False,
do_layer_norm: bool = False,
dropout: float = 0.5,
vocab_to_cache: List[str] = None,
module: torch.nn.Module = None,
) -> None:
super(Elmo, self).__init__()
logging.info("Initializing ELMo")
if module is not None:
if options_file is not None or weight_file is not None:
raise ValueError("Don't provide options_file or weight_file with module")
self._elmo_lstm = module
else:
self._elmo_lstm = _ElmoBiLm(
options_file,
weight_file,
requires_grad=requires_grad,
vocab_to_cache=vocab_to_cache,
)
self._has_cached_vocab = vocab_to_cache is not None
self._dropout = Dropout(p=dropout)
self._scalar_mixes: Any = []
for k in range(num_output_representations):
scalar_mix = ScalarMix(self._elmo_lstm.num_layers, do_layer_norm=do_layer_norm)
self.add_module("scalar_mix_{}".format(k), scalar_mix)
self._scalar_mixes.append(scalar_mix)
示例6: __init__
# 需要導入模塊: from torch.nn import modules [as 別名]
# 或者: from torch.nn.modules import Dropout [as 別名]
def __init__(self,
options_file: str,
weight_file: str,
num_output_representations: int,
requires_grad: bool = False,
do_layer_norm: bool = False,
dropout: float = 0.5,
vocab_to_cache: List[str] = None,
module: torch.nn.Module = None) -> None:
super(Elmo, self).__init__()
logging.info("Initializing ELMo")
if module is not None:
if options_file is not None or weight_file is not None:
raise ConfigurationError(
"Don't provide options_file or weight_file with module")
self._elmo_lstm = module
else:
self._elmo_lstm = _ElmoBiLm(options_file,
weight_file,
requires_grad=requires_grad,
vocab_to_cache=vocab_to_cache)
self._has_cached_vocab = vocab_to_cache is not None
self._dropout = Dropout(p=dropout)
self.num_output_representations=num_output_representations
if num_output_representations!=-1:
self._scalar_mixes: Any = []
for k in range(num_output_representations):
scalar_mix = ScalarMix(self._elmo_lstm.num_layers, do_layer_norm=do_layer_norm)
self.add_module('scalar_mix_{}'.format(k), scalar_mix)
self._scalar_mixes.append(scalar_mix)
示例7: __init__
# 需要導入模塊: from torch.nn import modules [as 別名]
# 或者: from torch.nn.modules import Dropout [as 別名]
def __init__(self,
vocab: Vocabulary,
source_embedder: TextFieldEmbedder,
sentence_encoder: Seq2VecEncoder,
sentence_accumulator: Seq2SeqEncoder,
use_salience: bool,
use_pos_embedding: bool,
use_output_bias: bool,
use_novelty: bool,
dropout: float = 0.3,
pos_embedding_num: int = 50,
pos_embedding_size: int = 128) -> None:
super(SummaRuNNer, self).__init__(vocab)
self._source_embedder = source_embedder
self._sentence_encoder = sentence_encoder
self._se_output_dim = self._sentence_encoder.get_output_dim()
self._sentence_accumulator = sentence_accumulator
self._h_sentence_dim = self._sentence_accumulator.get_output_dim()
self._dropout_layer = Dropout(dropout)
self._content_projection_layer = Linear(self._h_sentence_dim, 1)
self._use_salience = use_salience
if use_salience:
self._document_linear_layer = Linear(self._h_sentence_dim, self._h_sentence_dim, bias=True)
self._salience_linear_layer = Linear(self._h_sentence_dim, self._h_sentence_dim, bias=False)
self._use_pos_embedding = use_pos_embedding
if use_pos_embedding:
self._pos_embedding_num = pos_embedding_num
self._pos_embedding_size = pos_embedding_size
self._pos_embedding_layer = Embedding(pos_embedding_num, pos_embedding_size)
self._pos_projection_layer = Linear(pos_embedding_size, 1)
self._use_output_bias = use_output_bias
if use_output_bias:
self._output_bias = Parameter(torch.zeros(1).uniform_(-0.1,0.1), requires_grad=True)
self._use_novelty = use_novelty
if use_novelty:
self._novelty_linear_layer = Linear(self._h_sentence_dim, self._h_sentence_dim, bias=False)