本文整理汇总了Python中dynet.ParameterCollection方法的典型用法代码示例。如果您正苦于以下问题:Python dynet.ParameterCollection方法的具体用法?Python dynet.ParameterCollection怎么用?Python dynet.ParameterCollection使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类dynet
的用法示例。
在下文中一共展示了dynet.ParameterCollection方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: add_params
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import ParameterCollection [as 别名]
def add_params(model, size, name=""):
""" Adds parameters to the model.
Inputs:
model (dy.ParameterCollection): The parameter collection for the model.
size (tuple of int): The size to create.
name (str, optional): The name of the parameters.
"""
if len(size) == 1:
print("vector " + name + ": " +
str(size[0]) + "; uniform in [-0.1, 0.1]")
else:
print("matrix " +
name +
": " +
str(size[0]) +
" x " +
str(size[1]) +
"; uniform in [-0.1, 0.1]")
return model.add_parameters(size,
init=dy.UniformInitializer(0.1),
name=name)
示例2: __init__
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import ParameterCollection [as 别名]
def __init__(self, vocab, options):
import dynet as dy
from uuparser.feature_extractor import FeatureExtractor
global dy
self.model = dy.ParameterCollection()
self.trainer = dy.AdamTrainer(self.model, alpha=options.learning_rate)
self.activations = {'tanh': dy.tanh, 'sigmoid': dy.logistic, 'relu':
dy.rectify, 'tanh3': (lambda x:
dy.tanh(dy.cwise_multiply(dy.cwise_multiply(x, x), x)))}
self.activation = self.activations[options.activation]
self.costaugFlag = options.costaugFlag
self.feature_extractor = FeatureExtractor(self.model, options, vocab)
self.labelsFlag=options.labelsFlag
mlp_in_dims = options.lstm_output_size*2
self.unlabeled_MLP = biMLP(self.model, mlp_in_dims, options.mlp_hidden_dims,
options.mlp_hidden2_dims, 1, self.activation)
if self.labelsFlag:
self.labeled_MLP = biMLP(self.model, mlp_in_dims, options.mlp_hidden_dims,
options.mlp_hidden2_dims,len(self.feature_extractor.irels),self.activation)
self.proj = options.proj
示例3: __init__
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import ParameterCollection [as 别名]
def __init__(self):
self.params = dy.ParameterCollection()
示例4: renew_collection
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import ParameterCollection [as 别名]
def renew_collection(self):
"""Renew every time new architecture is sampled to clear out old parameters"""
self.params = dy.ParameterCollection()
示例5: init_params
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import ParameterCollection [as 别名]
def init_params(self):
self.pc = dy.ParameterCollection()
示例6: create_multilayer_lstm_params
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import ParameterCollection [as 别名]
def create_multilayer_lstm_params(num_layers,
in_size,
state_size,
model,
name=""):
""" Adds a multilayer LSTM to the model parameters.
Inputs:
num_layers (int): Number of layers to create.
in_size (int): The input size to the first layer.
state_size (int): The size of the states.
model (dy.ParameterCollection): The parameter collection for the model.
name (str, optional): The name of the multilayer LSTM.
"""
params = []
in_size = in_size
state_size = state_size
for i in range(num_layers):
layer_name = name + "-" + str(i)
print(
"LSTM " +
layer_name +
": " +
str(in_size) +
" x " +
str(state_size) +
"; default Dynet initialization of hidden weights")
params.append(dy.VanillaLSTMBuilder(1,
in_size,
state_size,
model))
in_size = state_size
return params
示例7: construct_token_predictor
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import ParameterCollection [as 别名]
def construct_token_predictor(parameter_collection,
params,
vocabulary,
attention_key_size,
snippet_size,
anonymizer=None):
""" Constructs a token predictor given the parameters.
Inputs:
parameter_collection (dy.ParameterCollection): Contains the parameters.
params (dictionary): Contains the command line parameters/hyperparameters.
vocabulary (Vocabulary): Vocabulary object for output generation.
attention_key_size (int): The size of the attention keys.
anonymizer (Anonymizer): An anonymization object.
"""
if params.use_snippets and anonymizer and not params.previous_decoder_snippet_encoding:
return SnippetAnonymizationTokenPredictor(parameter_collection,
params,
vocabulary,
attention_key_size,
snippet_size,
anonymizer)
elif params.use_snippets and not params.previous_decoder_snippet_encoding:
return SnippetTokenPredictor(parameter_collection,
params,
vocabulary,
attention_key_size,
snippet_size)
elif anonymizer:
return AnonymizationTokenPredictor(parameter_collection,
params,
vocabulary,
attention_key_size,
anonymizer)
else:
return TokenPredictor(parameter_collection,
params,
vocabulary,
attention_key_size)
示例8: __init__
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import ParameterCollection [as 别名]
def __init__(self, vocab, options):
# import here so we don't load Dynet if just running parser.py --help for example
from uuparser.multilayer_perceptron import MLP
from uuparser.feature_extractor import FeatureExtractor
import dynet as dy
global dy
global LEFT_ARC, RIGHT_ARC, SHIFT, SWAP
LEFT_ARC, RIGHT_ARC, SHIFT, SWAP = 0,1,2,3
self.model = dy.ParameterCollection()
self.trainer = dy.AdamTrainer(self.model, alpha=options.learning_rate)
self.activations = {'tanh': dy.tanh, 'sigmoid': dy.logistic, 'relu':
dy.rectify, 'tanh3': (lambda x:
dy.tanh(dy.cwise_multiply(dy.cwise_multiply(x, x), x)))}
self.activation = self.activations[options.activation]
self.oracle = options.oracle
self.headFlag = options.headFlag
self.rlMostFlag = options.rlMostFlag
self.rlFlag = options.rlFlag
self.k = options.k
#dimensions depending on extended features
self.nnvecs = (1 if self.headFlag else 0) + (2 if self.rlFlag or self.rlMostFlag else 0)
self.feature_extractor = FeatureExtractor(self.model, options, vocab, self.nnvecs)
self.irels = self.feature_extractor.irels
if options.no_bilstms > 0:
mlp_in_dims = options.lstm_output_size*2*self.nnvecs*(self.k+1)
else:
mlp_in_dims = self.feature_extractor.lstm_input_size*self.nnvecs*(self.k+1)
self.unlabeled_MLP = MLP(self.model, 'unlabeled', mlp_in_dims, options.mlp_hidden_dims,
options.mlp_hidden2_dims, 4, self.activation)
self.labeled_MLP = MLP(self.model, 'labeled' ,mlp_in_dims, options.mlp_hidden_dims,
options.mlp_hidden2_dims,2*len(self.irels)+2,self.activation)
示例9: __init__
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import ParameterCollection [as 别名]
def __init__(self):
super().__init__()
self.model = dy.ParameterCollection()
input_size = FEATURES
# Create word embeddings and initialise
self.id_to_token = []
self.token_to_id = {}
pretrained = []
if args.word_vectors:
for line in open(args.word_vectors):
parts = line.strip().split()
word = parts[0].lower()
vector = [float(v) for v in parts[1:]]
self.token_to_id[word] = len(self.id_to_token)
self.id_to_token.append(word)
pretrained.append(vector)
NWORDS = len(self.id_to_token)
DIM_WORDS = len(pretrained[0])
self.pEmbedding = self.model.add_lookup_parameters((NWORDS, DIM_WORDS))
self.pEmbedding.init_from_array(np.array(pretrained))
input_size += 4 * DIM_WORDS
self.hidden = []
self.bias = []
self.hidden.append(self.model.add_parameters((HIDDEN, input_size)))
self.bias.append(self.model.add_parameters((HIDDEN,)))
for i in range(args.layers - 1):
self.hidden.append(self.model.add_parameters((HIDDEN, HIDDEN)))
self.bias.append(self.model.add_parameters((HIDDEN,)))
self.final_sum = self.model.add_parameters((HIDDEN, 1))