本文整理汇总了Python中dynet.rectify方法的典型用法代码示例。如果您正苦于以下问题:Python dynet.rectify方法的具体用法?Python dynet.rectify怎么用?Python dynet.rectify使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类dynet
的用法示例。
在下文中一共展示了dynet.rectify方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: nonlinearity
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import rectify [as 别名]
def nonlinearity(h_nonlin_name):
def compile_fn(di, dh):
def fn(di):
nonlin_name = dh['nonlin_name']
if nonlin_name == 'relu':
Out = dy.rectify(di['in'])
elif nonlin_name == 'elu':
Out = dy.elu(di['in'])
elif nonlin_name == 'tanh':
Out = dy.tanh(di['in'])
else:
raise ValueError
return {'out': Out}
return fn
return siso_dynet_module('Nonlinearity', compile_fn,
{'nonlin_name': h_nonlin_name})
示例2: __init__
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import rectify [as 别名]
def __init__(self, vocab, options):
import dynet as dy
from uuparser.feature_extractor import FeatureExtractor
global dy
self.model = dy.ParameterCollection()
self.trainer = dy.AdamTrainer(self.model, alpha=options.learning_rate)
self.activations = {'tanh': dy.tanh, 'sigmoid': dy.logistic, 'relu':
dy.rectify, 'tanh3': (lambda x:
dy.tanh(dy.cwise_multiply(dy.cwise_multiply(x, x), x)))}
self.activation = self.activations[options.activation]
self.costaugFlag = options.costaugFlag
self.feature_extractor = FeatureExtractor(self.model, options, vocab)
self.labelsFlag=options.labelsFlag
mlp_in_dims = options.lstm_output_size*2
self.unlabeled_MLP = biMLP(self.model, mlp_in_dims, options.mlp_hidden_dims,
options.mlp_hidden2_dims, 1, self.activation)
if self.labelsFlag:
self.labeled_MLP = biMLP(self.model, mlp_in_dims, options.mlp_hidden_dims,
options.mlp_hidden2_dims,len(self.feature_extractor.irels),self.activation)
self.proj = options.proj
示例3: __init__
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import rectify [as 别名]
def __init__(self, vocab, options):
# import here so we don't load Dynet if just running parser.py --help for example
from uuparser.multilayer_perceptron import MLP
from uuparser.feature_extractor import FeatureExtractor
import dynet as dy
global dy
global LEFT_ARC, RIGHT_ARC, SHIFT, SWAP
LEFT_ARC, RIGHT_ARC, SHIFT, SWAP = 0,1,2,3
self.model = dy.ParameterCollection()
self.trainer = dy.AdamTrainer(self.model, alpha=options.learning_rate)
self.activations = {'tanh': dy.tanh, 'sigmoid': dy.logistic, 'relu':
dy.rectify, 'tanh3': (lambda x:
dy.tanh(dy.cwise_multiply(dy.cwise_multiply(x, x), x)))}
self.activation = self.activations[options.activation]
self.oracle = options.oracle
self.headFlag = options.headFlag
self.rlMostFlag = options.rlMostFlag
self.rlFlag = options.rlFlag
self.k = options.k
#dimensions depending on extended features
self.nnvecs = (1 if self.headFlag else 0) + (2 if self.rlFlag or self.rlMostFlag else 0)
self.feature_extractor = FeatureExtractor(self.model, options, vocab, self.nnvecs)
self.irels = self.feature_extractor.irels
if options.no_bilstms > 0:
mlp_in_dims = options.lstm_output_size*2*self.nnvecs*(self.k+1)
else:
mlp_in_dims = self.feature_extractor.lstm_input_size*self.nnvecs*(self.k+1)
self.unlabeled_MLP = MLP(self.model, 'unlabeled', mlp_in_dims, options.mlp_hidden_dims,
options.mlp_hidden2_dims, 4, self.activation)
self.labeled_MLP = MLP(self.model, 'labeled' ,mlp_in_dims, options.mlp_hidden_dims,
options.mlp_hidden2_dims,2*len(self.irels)+2,self.activation)
示例4: __init__
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import rectify [as 别名]
def __init__(self, n_in, n_hid, n_out, n_layers, model):
self.mlp = MultiLayerPerceptron(
[2 * n_in] + [n_hid] * n_layers + [n_out], activation=dy.rectify,
model=model)
示例5: __call__
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import rectify [as 别名]
def __call__(self, src, trg):
return self.bilinear(
dy.rectify(self.src_mlp(src)), # HOTFIX rectify here?
dy.rectify(self.trg_mlp(trg)))
示例6: predict_emb
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import rectify [as 别名]
def predict_emb(self, chars):
dy.renew_cg()
conv_param = dy.parameter(self.conv)
conv_param_bias = dy.parameter(self.conv_bias)
H = dy.parameter(self.cnn_to_rep_params)
Hb = dy.parameter(self.cnn_to_rep_bias)
O = dy.parameter(self.mlp_out)
Ob = dy.parameter(self.mlp_out_bias)
# padding
pad_char = self.c2i[PADDING_CHAR]
padding_size = self.window_width // 2 # TODO also consider w_stride?
char_ids = ([pad_char] * padding_size) + chars + ([pad_char] * padding_size)
if len(chars) < self.pooling_maxk:
# allow k-max pooling layer output to transform to affine
char_ids.extend([pad_char] * (self.pooling_maxk - len(chars)))
embeddings = dy.concatenate_cols([self.char_lookup[cid] for cid in char_ids])
reshaped_embeddings = dy.reshape(dy.transpose(embeddings), (1, len(char_ids), self.char_dim))
# not using is_valid=False due to maxk-pooling-induced extra padding
conv_out = dy.conv2d_bias(reshaped_embeddings, conv_param, conv_param_bias, self.stride, is_valid=True)
relu_out = dy.rectify(conv_out)
### pooling when max_k can only be 1, not sure what other differences may be
#poolingk = [1, len(chars)]
#pooling_out = dy.maxpooling2d(relu_out, poolingk, self.stride, is_valid=True)
#pooling_out_flat = dy.reshape(pooling_out, (self.hidden_dim,))
### another possible way for pooling is just max_dim(relu_out, d=1)
pooling_out = dy.kmax_pooling(relu_out, self.pooling_maxk, d=1) # d = what dimension to max over
pooling_out_flat = dy.reshape(pooling_out, (self.hidden_dim * self.pooling_maxk,))
return O * dy.tanh(H * pooling_out_flat + Hb) + Ob
示例7: __call__
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import rectify [as 别名]
def __call__(self, query, options, gold, lengths, query_no):
if len(options) == 1:
return None, 0
final = []
if args.word_vectors:
qvecs = [dy.lookup(self.pEmbedding, w) for w in query]
qvec_max = dy.emax(qvecs)
qvec_mean = dy.average(qvecs)
for otext, features in options:
inputs = dy.inputTensor(features)
if args.word_vectors:
ovecs = [dy.lookup(self.pEmbedding, w) for w in otext]
ovec_max = dy.emax(ovecs)
ovec_mean = dy.average(ovecs)
inputs = dy.concatenate([inputs, qvec_max, qvec_mean, ovec_max, ovec_mean])
if args.drop > 0:
inputs = dy.dropout(inputs, args.drop)
h = inputs
for pH, pB in zip(self.hidden, self.bias):
h = dy.affine_transform([pB, pH, h])
if args.nonlin == "linear":
pass
elif args.nonlin == "tanh":
h = dy.tanh(h)
elif args.nonlin == "cube":
h = dy.cube(h)
elif args.nonlin == "logistic":
h = dy.logistic(h)
elif args.nonlin == "relu":
h = dy.rectify(h)
elif args.nonlin == "elu":
h = dy.elu(h)
elif args.nonlin == "selu":
h = dy.selu(h)
elif args.nonlin == "softsign":
h = dy.softsign(h)
elif args.nonlin == "swish":
h = dy.cmult(h, dy.logistic(h))
final.append(dy.sum_dim(h, [0]))
final = dy.concatenate(final)
nll = -dy.log_softmax(final)
dense_gold = []
for i in range(len(options)):
dense_gold.append(1.0 / len(gold) if i in gold else 0.0)
answer = dy.inputTensor(dense_gold)
loss = dy.transpose(answer) * nll
predicted_link = np.argmax(final.npvalue())
return loss, predicted_link