本文整理匯總了Python中dynet.logistic方法的典型用法代碼示例。如果您正苦於以下問題:Python dynet.logistic方法的具體用法?Python dynet.logistic怎麽用?Python dynet.logistic使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類dynet
的用法示例。
在下文中一共展示了dynet.logistic方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _upsample_old
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import logistic [as 別名]
def _upsample_old(self, mgc, start, stop):
mgc_index = start / len(self.upsample_w_t)
ups_index = start % len(self.upsample_w_t)
upsampled = []
mgc_vect = dy.inputVector(mgc[mgc_index])
for x in range(stop - start):
# sigm = dy.logistic(self.upsample_w_s[ups_index].expr(update=True) * mgc_vect + self.upsample_b_s[ups_index].expr(update=True))
tnh = dy.tanh(self.upsample_w_t[ups_index].expr(update=True) * mgc_vect + self.upsample_b_t[ups_index].expr(
update=True))
# r = dy.cmult(sigm, tnh)
upsampled.append(tnh)
ups_index += 1
if ups_index == len(self.upsample_w_t):
ups_index = 0
mgc_index += 1
if mgc_index == len(
mgc): # last frame is sometimes not processed, but it should have similar parameters
mgc_index -= 1
else:
mgc_vect = dy.inputVector(mgc[mgc_index])
return upsampled
示例2: _upsample
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import logistic [as 別名]
def _upsample(self, mgc, start, stop):
mgc_index = start / len(self.upsample_w_s)
ups_index = start % len(self.upsample_w_s)
upsampled = []
mgc_vect = dy.inputVector(mgc[mgc_index])
for x in range(stop - start):
sigm = dy.logistic(
self.upsample_w_s[ups_index].expr(update=True) * mgc_vect + self.upsample_b_s[ups_index].expr(
update=True))
tnh = dy.tanh(self.upsample_w_t[ups_index].expr(update=True) * mgc_vect + self.upsample_b_t[ups_index].expr(
update=True))
r = dy.cmult(sigm, tnh)
upsampled.append(r)
ups_index += 1
if ups_index == len(self.upsample_w_s):
ups_index = 0
mgc_index += 1
if mgc_index == len(
mgc): # last frame is sometimes not processed, but it should have similar parameters
mgc_index -= 1
else:
mgc_vect = dy.inputVector(mgc[mgc_index])
return upsampled
示例3: __init__
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import logistic [as 別名]
def __init__(self, vocab, options):
import dynet as dy
from uuparser.feature_extractor import FeatureExtractor
global dy
self.model = dy.ParameterCollection()
self.trainer = dy.AdamTrainer(self.model, alpha=options.learning_rate)
self.activations = {'tanh': dy.tanh, 'sigmoid': dy.logistic, 'relu':
dy.rectify, 'tanh3': (lambda x:
dy.tanh(dy.cwise_multiply(dy.cwise_multiply(x, x), x)))}
self.activation = self.activations[options.activation]
self.costaugFlag = options.costaugFlag
self.feature_extractor = FeatureExtractor(self.model, options, vocab)
self.labelsFlag=options.labelsFlag
mlp_in_dims = options.lstm_output_size*2
self.unlabeled_MLP = biMLP(self.model, mlp_in_dims, options.mlp_hidden_dims,
options.mlp_hidden2_dims, 1, self.activation)
if self.labelsFlag:
self.labeled_MLP = biMLP(self.model, mlp_in_dims, options.mlp_hidden_dims,
options.mlp_hidden2_dims,len(self.feature_extractor.irels),self.activation)
self.proj = options.proj
示例4: word_repr
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import logistic [as 別名]
def word_repr(self, char_seq, cembs):
# obtain the word representation when given its character sequence
wlen = len(char_seq)
if 'rgW%d'%wlen not in self.param_exprs:
self.param_exprs['rgW%d'%wlen] = dy.parameter(self.params['reset_gate_W'][wlen-1])
self.param_exprs['rgb%d'%wlen] = dy.parameter(self.params['reset_gate_b'][wlen-1])
self.param_exprs['cW%d'%wlen] = dy.parameter(self.params['com_W'][wlen-1])
self.param_exprs['cb%d'%wlen] = dy.parameter(self.params['com_b'][wlen-1])
chars = dy.concatenate(cembs)
reset_gate = dy.logistic(self.param_exprs['rgW%d'%wlen] * chars + self.param_exprs['rgb%d'%wlen])
word = dy.tanh(self.param_exprs['cW%d'%wlen] * dy.cmult(reset_gate,chars) + self.param_exprs['cb%d'%wlen])
if self.known_words is not None and tuple(char_seq) in self.known_words:
return (word + dy.lookup(self.params['word_embed'],self.known_words[tuple(char_seq)]))/2.
return word
示例5: add_input
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import logistic [as 別名]
def add_input(self, input_vec):
"""
Note that this function updates the existing State object!
"""
x = dynet.concatenate([input_vec, self.h])
i = dynet.logistic(self.W_i * x + self.b_i)
f = dynet.logistic(self.W_f * x + self.b_f)
g = dynet.tanh(self.W_c * x + self.b_c)
o = dynet.logistic(self.W_o * x + self.b_o)
c = dynet.cmult(f, self.c) + dynet.cmult(i, g)
h = dynet.cmult(o, dynet.tanh(c))
self.c = c
self.h = h
self.outputs.append(h)
return self
示例6: _predict_one
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import logistic [as 別名]
def _predict_one(self, mgc, noise):
mgc = dy.inputVector(mgc)
outputs = []
noise_vec = dy.inputVector(noise[0:self.UPSAMPLE_COUNT])
[hidden_w, hidden_b] = self.mlp_excitation
hidden_input = mgc # dy.concatenate([mgc, noise_vec])
for w, b in zip(hidden_w, hidden_b):
hidden_input = dy.tanh(w.expr(update=True) * hidden_input + b.expr(update=True))
excitation = dy.logistic(
self.excitation_w.expr(update=True) * hidden_input + self.excitation_b.expr(update=True))
[hidden_w, hidden_b] = self.mlp_filter
hidden_input = mgc # dy.concatenate([mgc, noise_vec])
for w, b in zip(hidden_w, hidden_b):
hidden_input = dy.tanh(w.expr(update=True) * hidden_input + b.expr(update=True))
filter = dy.tanh(self.filter_w.expr(update=True) * hidden_input + self.filter_b.expr(update=True))
[hidden_w, hidden_b] = self.mlp_vuv
hidden_input = mgc # dy.concatenate([mgc, noise_vec])
for w, b in zip(hidden_w, hidden_b):
hidden_input = dy.tanh(w.expr(update=True) * hidden_input + b.expr(update=True))
vuv = dy.logistic(self.vuv_w.expr(update=True) * hidden_input + self.vuv_b.expr(update=True))
# sample_vec = dy.inputVector(noise[self.UPSAMPLE_COUNT:self.UPSAMPLE_COUNT * 2])
# noise_vec = dy.inputVector(noise[0:self.UPSAMPLE_COUNT + self.FILTER_SIZE - 1])
mixed = excitation # * vuv + noise_vec * (1.0 - vuv)
for ii in range(self.UPSAMPLE_COUNT):
tmp = dy.cmult(filter, dy.pickrange(mixed, ii, ii + self.FILTER_SIZE))
outputs.append(dy.sum_elems(tmp))
outputs = dy.concatenate(outputs)
# from ipdb import set_trace
# set_trace()
# mixed = dy.reshape(mixed, (self.UPSAMPLE_COUNT + self.FILTER_SIZE - 1, 1, 1))
# filter = dy.reshape(filter, (self.FILTER_SIZE, 1, 1, 1))
# outputs = dy.conv2d(mixed, filter, stride=(1, 1), is_valid=True)
# outputs = dy.reshape(outputs, (self.UPSAMPLE_COUNT,))
# outputs = outputs + noise_vec * vuv
return outputs, excitation, filter, vuv
示例7: __init__
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import logistic [as 別名]
def __init__(self, vocab, options):
# import here so we don't load Dynet if just running parser.py --help for example
from uuparser.multilayer_perceptron import MLP
from uuparser.feature_extractor import FeatureExtractor
import dynet as dy
global dy
global LEFT_ARC, RIGHT_ARC, SHIFT, SWAP
LEFT_ARC, RIGHT_ARC, SHIFT, SWAP = 0,1,2,3
self.model = dy.ParameterCollection()
self.trainer = dy.AdamTrainer(self.model, alpha=options.learning_rate)
self.activations = {'tanh': dy.tanh, 'sigmoid': dy.logistic, 'relu':
dy.rectify, 'tanh3': (lambda x:
dy.tanh(dy.cwise_multiply(dy.cwise_multiply(x, x), x)))}
self.activation = self.activations[options.activation]
self.oracle = options.oracle
self.headFlag = options.headFlag
self.rlMostFlag = options.rlMostFlag
self.rlFlag = options.rlFlag
self.k = options.k
#dimensions depending on extended features
self.nnvecs = (1 if self.headFlag else 0) + (2 if self.rlFlag or self.rlMostFlag else 0)
self.feature_extractor = FeatureExtractor(self.model, options, vocab, self.nnvecs)
self.irels = self.feature_extractor.irels
if options.no_bilstms > 0:
mlp_in_dims = options.lstm_output_size*2*self.nnvecs*(self.k+1)
else:
mlp_in_dims = self.feature_extractor.lstm_input_size*self.nnvecs*(self.k+1)
self.unlabeled_MLP = MLP(self.model, 'unlabeled', mlp_in_dims, options.mlp_hidden_dims,
options.mlp_hidden2_dims, 4, self.activation)
self.labeled_MLP = MLP(self.model, 'labeled' ,mlp_in_dims, options.mlp_hidden_dims,
options.mlp_hidden2_dims,2*len(self.irels)+2,self.activation)
示例8: __call__
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import logistic [as 別名]
def __call__(self, query, options, gold, lengths, query_no):
if len(options) == 1:
return None, 0
final = []
if args.word_vectors:
qvecs = [dy.lookup(self.pEmbedding, w) for w in query]
qvec_max = dy.emax(qvecs)
qvec_mean = dy.average(qvecs)
for otext, features in options:
inputs = dy.inputTensor(features)
if args.word_vectors:
ovecs = [dy.lookup(self.pEmbedding, w) for w in otext]
ovec_max = dy.emax(ovecs)
ovec_mean = dy.average(ovecs)
inputs = dy.concatenate([inputs, qvec_max, qvec_mean, ovec_max, ovec_mean])
if args.drop > 0:
inputs = dy.dropout(inputs, args.drop)
h = inputs
for pH, pB in zip(self.hidden, self.bias):
h = dy.affine_transform([pB, pH, h])
if args.nonlin == "linear":
pass
elif args.nonlin == "tanh":
h = dy.tanh(h)
elif args.nonlin == "cube":
h = dy.cube(h)
elif args.nonlin == "logistic":
h = dy.logistic(h)
elif args.nonlin == "relu":
h = dy.rectify(h)
elif args.nonlin == "elu":
h = dy.elu(h)
elif args.nonlin == "selu":
h = dy.selu(h)
elif args.nonlin == "softsign":
h = dy.softsign(h)
elif args.nonlin == "swish":
h = dy.cmult(h, dy.logistic(h))
final.append(dy.sum_dim(h, [0]))
final = dy.concatenate(final)
nll = -dy.log_softmax(final)
dense_gold = []
for i in range(len(options)):
dense_gold.append(1.0 / len(gold) if i in gold else 0.0)
answer = dy.inputTensor(dense_gold)
loss = dy.transpose(answer) * nll
predicted_link = np.argmax(final.npvalue())
return loss, predicted_link