本文整理匯總了Python中dynet.tanh方法的典型用法代碼示例。如果您正苦於以下問題:Python dynet.tanh方法的具體用法?Python dynet.tanh怎麽用?Python dynet.tanh使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類dynet
的用法示例。
在下文中一共展示了dynet.tanh方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: nonlinearity
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import tanh [as 別名]
def nonlinearity(h_nonlin_name):
def compile_fn(di, dh):
def fn(di):
nonlin_name = dh['nonlin_name']
if nonlin_name == 'relu':
Out = dy.rectify(di['in'])
elif nonlin_name == 'elu':
Out = dy.elu(di['in'])
elif nonlin_name == 'tanh':
Out = dy.tanh(di['in'])
else:
raise ValueError
return {'out': Out}
return fn
return siso_dynet_module('Nonlinearity', compile_fn,
{'nonlin_name': h_nonlin_name})
示例2: _upsample_old
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import tanh [as 別名]
def _upsample_old(self, mgc, start, stop):
mgc_index = start / len(self.upsample_w_t)
ups_index = start % len(self.upsample_w_t)
upsampled = []
mgc_vect = dy.inputVector(mgc[mgc_index])
for x in range(stop - start):
# sigm = dy.logistic(self.upsample_w_s[ups_index].expr(update=True) * mgc_vect + self.upsample_b_s[ups_index].expr(update=True))
tnh = dy.tanh(self.upsample_w_t[ups_index].expr(update=True) * mgc_vect + self.upsample_b_t[ups_index].expr(
update=True))
# r = dy.cmult(sigm, tnh)
upsampled.append(tnh)
ups_index += 1
if ups_index == len(self.upsample_w_t):
ups_index = 0
mgc_index += 1
if mgc_index == len(
mgc): # last frame is sometimes not processed, but it should have similar parameters
mgc_index -= 1
else:
mgc_vect = dy.inputVector(mgc[mgc_index])
return upsampled
示例3: _upsample
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import tanh [as 別名]
def _upsample(self, mgc, start, stop):
mgc_index = start / len(self.upsample_w_s)
ups_index = start % len(self.upsample_w_s)
upsampled = []
mgc_vect = dy.inputVector(mgc[mgc_index])
for x in range(stop - start):
sigm = dy.logistic(
self.upsample_w_s[ups_index].expr(update=True) * mgc_vect + self.upsample_b_s[ups_index].expr(
update=True))
tnh = dy.tanh(self.upsample_w_t[ups_index].expr(update=True) * mgc_vect + self.upsample_b_t[ups_index].expr(
update=True))
r = dy.cmult(sigm, tnh)
upsampled.append(r)
ups_index += 1
if ups_index == len(self.upsample_w_s):
ups_index = 0
mgc_index += 1
if mgc_index == len(
mgc): # last frame is sometimes not processed, but it should have similar parameters
mgc_index -= 1
else:
mgc_vect = dy.inputVector(mgc[mgc_index])
return upsampled
示例4: __init__
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import tanh [as 別名]
def __init__(self, vocab, options):
import dynet as dy
from uuparser.feature_extractor import FeatureExtractor
global dy
self.model = dy.ParameterCollection()
self.trainer = dy.AdamTrainer(self.model, alpha=options.learning_rate)
self.activations = {'tanh': dy.tanh, 'sigmoid': dy.logistic, 'relu':
dy.rectify, 'tanh3': (lambda x:
dy.tanh(dy.cwise_multiply(dy.cwise_multiply(x, x), x)))}
self.activation = self.activations[options.activation]
self.costaugFlag = options.costaugFlag
self.feature_extractor = FeatureExtractor(self.model, options, vocab)
self.labelsFlag=options.labelsFlag
mlp_in_dims = options.lstm_output_size*2
self.unlabeled_MLP = biMLP(self.model, mlp_in_dims, options.mlp_hidden_dims,
options.mlp_hidden2_dims, 1, self.activation)
if self.labelsFlag:
self.labeled_MLP = biMLP(self.model, mlp_in_dims, options.mlp_hidden_dims,
options.mlp_hidden2_dims,len(self.feature_extractor.irels),self.activation)
self.proj = options.proj
示例5: Init
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import tanh [as 別名]
def Init(self,options):
paddingWordVec = self.word_lookup[1] if options.word_emb_size > 0 else None
paddingElmoVec = dy.zeros(self.elmo.emb_dim) if self.elmo else None
paddingPosVec = self.pos_lookup[1] if options.pos_emb_size > 0 else None
paddingCharVec = self.charPadding.expr() if options.char_emb_size > 0 else None
paddingTbankVec = self.treebank_lookup[0] if options.tbank_emb_size > 0 else None
self.paddingVec = dy.tanh(self.word2lstm.expr() *\
dy.concatenate(list(filter(None,[paddingWordVec,
paddingElmoVec,
paddingPosVec,
paddingCharVec,
paddingTbankVec]))) + self.word2lstmbias.expr())
self.empty = self.paddingVec if self.nnvecs == 1 else\
dy.concatenate([self.paddingVec for _ in range(self.nnvecs)])
示例6: word_repr
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import tanh [as 別名]
def word_repr(self, char_seq, cembs):
# obtain the word representation when given its character sequence
wlen = len(char_seq)
if 'rgW%d'%wlen not in self.param_exprs:
self.param_exprs['rgW%d'%wlen] = dy.parameter(self.params['reset_gate_W'][wlen-1])
self.param_exprs['rgb%d'%wlen] = dy.parameter(self.params['reset_gate_b'][wlen-1])
self.param_exprs['cW%d'%wlen] = dy.parameter(self.params['com_W'][wlen-1])
self.param_exprs['cb%d'%wlen] = dy.parameter(self.params['com_b'][wlen-1])
chars = dy.concatenate(cembs)
reset_gate = dy.logistic(self.param_exprs['rgW%d'%wlen] * chars + self.param_exprs['rgb%d'%wlen])
word = dy.tanh(self.param_exprs['cW%d'%wlen] * dy.cmult(reset_gate,chars) + self.param_exprs['cb%d'%wlen])
if self.known_words is not None and tuple(char_seq) in self.known_words:
return (word + dy.lookup(self.params['word_embed'],self.known_words[tuple(char_seq)]))/2.
return word
示例7: __init__
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import tanh [as 別名]
def __init__(self, lstm):
self.lstm = lstm
self.outputs = []
self.c = dynet.parameter(self.lstm.c0)
self.h = dynet.tanh(self.c)
self.W_i = dynet.parameter(self.lstm.W_i)
self.b_i = dynet.parameter(self.lstm.b_i)
self.W_f = dynet.parameter(self.lstm.W_f)
self.b_f = dynet.parameter(self.lstm.b_f)
self.W_c = dynet.parameter(self.lstm.W_c)
self.b_c = dynet.parameter(self.lstm.b_c)
self.W_o = dynet.parameter(self.lstm.W_o)
self.b_o = dynet.parameter(self.lstm.b_o)
示例8: add_input
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import tanh [as 別名]
def add_input(self, input_vec):
"""
Note that this function updates the existing State object!
"""
x = dynet.concatenate([input_vec, self.h])
i = dynet.logistic(self.W_i * x + self.b_i)
f = dynet.logistic(self.W_f * x + self.b_f)
g = dynet.tanh(self.W_c * x + self.b_c)
o = dynet.logistic(self.W_o * x + self.b_o)
c = dynet.cmult(f, self.c) + dynet.cmult(i, g)
h = dynet.cmult(o, dynet.tanh(c))
self.c = c
self.h = h
self.outputs.append(h)
return self
示例9: predict_emb
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import tanh [as 別名]
def predict_emb(self, chars):
dy.renew_cg()
finit = self.char_fwd_lstm.initial_state()
binit = self.char_bwd_lstm.initial_state()
H = dy.parameter(self.lstm_to_rep_params)
Hb = dy.parameter(self.lstm_to_rep_bias)
O = dy.parameter(self.mlp_out)
Ob = dy.parameter(self.mlp_out_bias)
pad_char = self.c2i[PADDING_CHAR]
char_ids = [pad_char] + chars + [pad_char]
embeddings = [self.char_lookup[cid] for cid in char_ids]
bi_fwd_out = finit.transduce(embeddings)
bi_bwd_out = binit.transduce(reversed(embeddings))
rep = dy.concatenate([bi_fwd_out[-1], bi_bwd_out[-1]])
return O * dy.tanh(H * rep + Hb) + Ob
示例10: build_tagging_graph
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import tanh [as 別名]
def build_tagging_graph(self, sentence):
dy.renew_cg()
embeddings = [self.word_rep(w) for w in sentence]
lstm_out = self.word_bi_lstm.transduce(embeddings)
H = {}
Hb = {}
O = {}
Ob = {}
scores = {}
for att in self.attributes:
H[att] = dy.parameter(self.lstm_to_tags_params[att])
Hb[att] = dy.parameter(self.lstm_to_tags_bias[att])
O[att] = dy.parameter(self.mlp_out[att])
Ob[att] = dy.parameter(self.mlp_out_bias[att])
scores[att] = []
for rep in lstm_out:
score_t = O[att] * dy.tanh(H[att] * rep + Hb[att]) + Ob[att]
scores[att].append(score_t)
return scores
示例11: dnn_net_simple
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import tanh [as 別名]
def dnn_net_simple(num_classes):
# declaring hyperparameter
h_nonlin_name = D(['relu', 'tanh',
'elu']) # nonlinearity function names to choose from
h_opt_drop = D(
[0, 1]) # dropout optional hyperparameter; 0 is exclude, 1 is include
h_drop_keep_prob = D([0.25, 0.5,
0.75]) # dropout probability to choose from
h_num_hidden = D([64, 128, 256, 512, 1024
]) # number of hidden units for affine transform module
h_num_repeats = D([1, 2]) # 1 is appearing once, 2 is appearing twice
# defining search space topology
model = mo.siso_sequential([
flatten(),
mo.siso_repeat(
lambda: mo.siso_sequential([
dense(h_num_hidden),
nonlinearity(h_nonlin_name),
mo.siso_optional(lambda: dropout(h_drop_keep_prob), h_opt_drop),
]), h_num_repeats),
dense(D([num_classes]))
])
return model
示例12: dnn_net
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import tanh [as 別名]
def dnn_net(num_classes):
h_nonlin_name = D(['relu', 'tanh', 'elu'])
h_opt_drop = D([0, 1])
return mo.siso_sequential([
flatten(),
mo.siso_repeat(
lambda: dnn_cell(D([64, 128, 256, 512, 1024]), h_nonlin_name,
h_opt_drop, D([0.25, 0.5, 0.75])), D([1, 2])),
dense(D([num_classes]))
])
# Main/Searcher
# Getting and reading mnist data adapted from here:
# https://github.com/clab/dynet/blob/master/examples/mnist/mnist-autobatch.py
示例13: build_tagging_graph
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import tanh [as 別名]
def build_tagging_graph(self, sentence):
dy.renew_cg()
embeddings = [self.word_rep(w) for w in sentence]
lstm_out = self.bi_lstm.transduce(embeddings)
H = dy.parameter(self.lstm_to_tags_params)
Hb = dy.parameter(self.lstm_to_tags_bias)
O = dy.parameter(self.mlp_out)
Ob = dy.parameter(self.mlp_out_bias)
scores = []
if options.bigram:
for rep, word in zip(lstm_out, sentence):
bi1 = dy.lookup(self.bigram_lookup, word[0], update=self.we_update)
bi2 = dy.lookup(self.bigram_lookup, word[1], update=self.we_update)
if self.dropout is not None:
bi1 = dy.dropout(bi1, self.dropout)
bi2 = dy.dropout(bi2, self.dropout)
score_t = O * dy.tanh(H * dy.concatenate(
[bi1,
rep,
bi2]) + Hb) + Ob
scores.append(score_t)
else:
for rep in lstm_out:
score_t = O * dy.tanh(H * rep + Hb) + Ob
scores.append(score_t)
return scores
示例14: _get_intermediate_state
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import tanh [as 別名]
def _get_intermediate_state(self, state, dropout_amount=0.):
intermediate_state = dy.tanh(
du.linear_layer(
state, self.state_transform_weights))
return dy.dropout(intermediate_state, dropout_amount)
示例15: _predict_one
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import tanh [as 別名]
def _predict_one(self, mgc, noise):
mgc = dy.inputVector(mgc)
outputs = []
noise_vec = dy.inputVector(noise[0:self.UPSAMPLE_COUNT])
[hidden_w, hidden_b] = self.mlp_excitation
hidden_input = mgc # dy.concatenate([mgc, noise_vec])
for w, b in zip(hidden_w, hidden_b):
hidden_input = dy.tanh(w.expr(update=True) * hidden_input + b.expr(update=True))
excitation = dy.logistic(
self.excitation_w.expr(update=True) * hidden_input + self.excitation_b.expr(update=True))
[hidden_w, hidden_b] = self.mlp_filter
hidden_input = mgc # dy.concatenate([mgc, noise_vec])
for w, b in zip(hidden_w, hidden_b):
hidden_input = dy.tanh(w.expr(update=True) * hidden_input + b.expr(update=True))
filter = dy.tanh(self.filter_w.expr(update=True) * hidden_input + self.filter_b.expr(update=True))
[hidden_w, hidden_b] = self.mlp_vuv
hidden_input = mgc # dy.concatenate([mgc, noise_vec])
for w, b in zip(hidden_w, hidden_b):
hidden_input = dy.tanh(w.expr(update=True) * hidden_input + b.expr(update=True))
vuv = dy.logistic(self.vuv_w.expr(update=True) * hidden_input + self.vuv_b.expr(update=True))
# sample_vec = dy.inputVector(noise[self.UPSAMPLE_COUNT:self.UPSAMPLE_COUNT * 2])
# noise_vec = dy.inputVector(noise[0:self.UPSAMPLE_COUNT + self.FILTER_SIZE - 1])
mixed = excitation # * vuv + noise_vec * (1.0 - vuv)
for ii in range(self.UPSAMPLE_COUNT):
tmp = dy.cmult(filter, dy.pickrange(mixed, ii, ii + self.FILTER_SIZE))
outputs.append(dy.sum_elems(tmp))
outputs = dy.concatenate(outputs)
# from ipdb import set_trace
# set_trace()
# mixed = dy.reshape(mixed, (self.UPSAMPLE_COUNT + self.FILTER_SIZE - 1, 1, 1))
# filter = dy.reshape(filter, (self.FILTER_SIZE, 1, 1, 1))
# outputs = dy.conv2d(mixed, filter, stride=(1, 1), is_valid=True)
# outputs = dy.reshape(outputs, (self.UPSAMPLE_COUNT,))
# outputs = outputs + noise_vec * vuv
return outputs, excitation, filter, vuv