本文整理匯總了Python中dynet.inputVector方法的典型用法代碼示例。如果您正苦於以下問題:Python dynet.inputVector方法的具體用法?Python dynet.inputVector怎麽用?Python dynet.inputVector使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類dynet
的用法示例。
在下文中一共展示了dynet.inputVector方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: forward
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import inputVector [as 別名]
def forward(self, observations):
def log_sum_exp(scores):
npval = scores.npvalue()
argmax_score = np.argmax(npval)
max_score_expr = dy.pick(scores, argmax_score)
max_score_expr_broadcast = dy.concatenate([max_score_expr] * self.tagset_size)
return max_score_expr + dy.log(dy.sum_dim(dy.transpose(dy.exp(scores - max_score_expr_broadcast)),[1]))
init_alphas = [-1e10] * self.tagset_size
init_alphas[t2i[START_TAG]] = 0
for_expr = dy.inputVector(init_alphas)
for obs in observations:
alphas_t = []
for next_tag in range(self.tagset_size):
obs_broadcast = dy.concatenate([dy.pick(obs, next_tag)] * self.tagset_size)
next_tag_expr = for_expr + self.transitions[next_tag] + obs_broadcast
alphas_t.append(log_sum_exp(next_tag_expr))
for_expr = dy.concatenate(alphas_t)
terminal_expr = for_expr + self.transitions[t2i["<STOP>"]]
alpha = log_sum_exp(terminal_expr)
return alpha
示例2: forward
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import inputVector [as 別名]
def forward(self, observations):
def log_sum_exp(scores):
npval = scores.npvalue()
argmax_score = np.argmax(npval)
max_score_expr = dy.pick(scores, argmax_score)
max_score_expr_broadcast = dy.concatenate([max_score_expr] * self.dim_output)
return max_score_expr + dy.log(dy.sum_elems(dy.transpose(dy.exp(scores - max_score_expr_broadcast))))
init_alphas = [-1e10] * self.dim_output
init_alphas[self.sp_s] = 0
for_expr = dy.inputVector(init_alphas)
for obs in observations:
alphas_t = []
for next_tag in range(self.dim_output):
obs_broadcast = dy.concatenate([dy.pick(obs, next_tag)] * self.dim_output)
next_tag_expr = for_expr + self.trans[next_tag] + obs_broadcast
alphas_t.append(log_sum_exp(next_tag_expr))
for_expr = dy.concatenate(alphas_t)
terminal_expr = for_expr + self.trans[self.sp_e]
alpha = log_sum_exp(terminal_expr)
return alpha
示例3: _upsample_old
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import inputVector [as 別名]
def _upsample_old(self, mgc, start, stop):
mgc_index = start / len(self.upsample_w_t)
ups_index = start % len(self.upsample_w_t)
upsampled = []
mgc_vect = dy.inputVector(mgc[mgc_index])
for x in range(stop - start):
# sigm = dy.logistic(self.upsample_w_s[ups_index].expr(update=True) * mgc_vect + self.upsample_b_s[ups_index].expr(update=True))
tnh = dy.tanh(self.upsample_w_t[ups_index].expr(update=True) * mgc_vect + self.upsample_b_t[ups_index].expr(
update=True))
# r = dy.cmult(sigm, tnh)
upsampled.append(tnh)
ups_index += 1
if ups_index == len(self.upsample_w_t):
ups_index = 0
mgc_index += 1
if mgc_index == len(
mgc): # last frame is sometimes not processed, but it should have similar parameters
mgc_index -= 1
else:
mgc_vect = dy.inputVector(mgc[mgc_index])
return upsampled
示例4: _upsample
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import inputVector [as 別名]
def _upsample(self, mgc, start, stop):
mgc_index = start / len(self.upsample_w_s)
ups_index = start % len(self.upsample_w_s)
upsampled = []
mgc_vect = dy.inputVector(mgc[mgc_index])
for x in range(stop - start):
sigm = dy.logistic(
self.upsample_w_s[ups_index].expr(update=True) * mgc_vect + self.upsample_b_s[ups_index].expr(
update=True))
tnh = dy.tanh(self.upsample_w_t[ups_index].expr(update=True) * mgc_vect + self.upsample_b_t[ups_index].expr(
update=True))
r = dy.cmult(sigm, tnh)
upsampled.append(r)
ups_index += 1
if ups_index == len(self.upsample_w_s):
ups_index = 0
mgc_index += 1
if mgc_index == len(
mgc): # last frame is sometimes not processed, but it should have similar parameters
mgc_index -= 1
else:
mgc_vect = dy.inputVector(mgc[mgc_index])
return upsampled
示例5: test_multilinear_forward
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import inputVector [as 別名]
def test_multilinear_forward():
model = dy.Model()
a, b, c = np.random.RandomState(0).randn(3, 100)
ml = MultilinearFactored(n_features=100, n_inputs=3, n_components=5,
model=model)
dy_fwd = ml(dy.inputVector(a),
dy.inputVector(b),
dy.inputVector(c)).value()
U = [dy.parameter(u).value() for u in ml.get_components()]
expected = np.dot(U[0], a)
expected *= np.dot(U[1], b)
expected *= np.dot(U[2], c)
expected = np.sum(expected)
assert (expected - dy_fwd) ** 2 < 1e-4
示例6: loss
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import inputVector [as 別名]
def loss(self, sentence, word_chars, tags_set):
'''
For use in training phase.
Tag sentence (all attributes) and compute loss based on probability of expected tags.
'''
observations_set = self.build_tagging_graph(sentence, word_chars)
errors = {}
for att, tags in tags_set.items():
err = []
for obs, tag in zip(observations_set[att], tags):
err_t = dy.pickneglogsoftmax(obs, tag)
err.append(err_t)
errors[att] = dy.esum(err)
if self.att_props is not None:
for att, err in errors.items():
prop_vec = dy.inputVector([self.att_props[att]] * err.dim()[0])
err = dy.cmult(err, prop_vec)
return errors
示例7: compute_accuracy
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import inputVector [as 別名]
def compute_accuracy(self, inputs, outputs):
correct = 0
for (label, img) in self.val_dataset:
dy.renew_cg()
x = dy.inputVector(img)
co.forward({inputs['in']: x})
logits = outputs['out'].val
pred = np.argmax(logits.npvalue())
if (label == pred): correct += 1
return (1.0 * correct / len(self.val_dataset))
示例8: evaluate
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import inputVector [as 別名]
def evaluate(self, inputs, outputs):
params = M.get_collection()
optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)
num_batches = int(len(self.train_dataset) / self.batch_size)
for epoch in range(self.max_num_training_epochs):
random.shuffle(self.train_dataset)
i = 0
total_loss = 0
while (i < len(self.train_dataset)):
dy.renew_cg()
mbsize = min(self.batch_size, len(self.train_dataset) - i)
minibatch = self.train_dataset[i:i + mbsize]
losses = []
for (label, img) in minibatch:
x = dy.inputVector(img)
co.forward({inputs['in']: x})
logits = outputs['out'].val
loss = dy.pickneglogsoftmax(logits, label)
losses.append(loss)
mbloss = dy.esum(losses) / mbsize
mbloss.backward()
optimizer.update()
total_loss += mbloss.scalar_value()
i += mbsize
val_acc = self.compute_accuracy(inputs, outputs)
if self.log_output_to_terminal and epoch % self.display_step == 0:
print("epoch:", '%d' % (epoch + 1), "loss:",
"{:.9f}".format(total_loss / num_batches),
"validation_accuracy:", "%.5f" % val_acc)
val_acc = self.compute_accuracy(inputs, outputs)
return {'val_acc': val_acc}
示例9: viterbi_decoding
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import inputVector [as 別名]
def viterbi_decoding(self, observations, gold_tags, use_margins):
backpointers = []
init_vvars = [-1e10] * self.tagset_size
init_vvars[t2i[START_TAG]] = 0 # <Start> has all the probability
for_expr = dy.inputVector(init_vvars)
trans_exprs = [self.transitions[idx] for idx in range(self.tagset_size)]
for gold, obs in zip(gold_tags, observations):
bptrs_t = []
vvars_t = []
for next_tag in range(self.tagset_size):
next_tag_expr = for_expr + trans_exprs[next_tag]
next_tag_arr = next_tag_expr.npvalue()
best_tag_id = np.argmax(next_tag_arr)
bptrs_t.append(best_tag_id)
vvars_t.append(dy.pick(next_tag_expr, best_tag_id))
for_expr = dy.concatenate(vvars_t) + obs
# optional margin adaptation
if use_margins and self.margins != 0:
adjust = [self.margins] * self.tagset_size
adjust[gold] = 0
for_expr = for_expr + dy.inputVector(adjust)
backpointers.append(bptrs_t)
# Perform final transition to terminal
terminal_expr = for_expr + trans_exprs[t2i[END_TAG]]
terminal_arr = terminal_expr.npvalue()
best_tag_id = np.argmax(terminal_arr)
path_score = dy.pick(terminal_expr, best_tag_id)
# Reverse over the backpointers to get the best path
best_path = [best_tag_id] # Start with the tag that was best for terminal
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
start = best_path.pop() # Remove the start symbol
best_path.reverse()
assert start == t2i[START_TAG]
# Return best path and best path's score
return best_path, path_score
示例10: viterbi_decoding
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import inputVector [as 別名]
def viterbi_decoding(self, observations):
backpointers = []
init_vvars = [-1e10] * self.dim_output
init_vvars[self.sp_s] = 0
for_expr = dy.inputVector(init_vvars)
trans_exprs = [self.trans[idx] for idx in range(self.dim_output)]
for obs in observations:
bptrs_t = []
vvars_t = []
for next_tag in range(self.dim_output):
next_tag_expr = for_expr + trans_exprs[next_tag]
next_tag_arr = next_tag_expr.npvalue()
best_tag_id = np.argmax(next_tag_arr)
bptrs_t.append(best_tag_id)
vvars_t.append(dy.pick(next_tag_expr, best_tag_id))
for_expr = dy.concatenate(vvars_t) + obs
backpointers.append(bptrs_t)
terminal_expr = for_expr + trans_exprs[self.sp_e]
terminal_arr = terminal_expr.npvalue()
best_tag_id = np.argmax(terminal_arr)
path_score = dy.pick(terminal_expr, best_tag_id)
best_path = [best_tag_id]
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
start = best_path.pop()
best_path.reverse()
if not start == self.sp_s:
raise AssertionError("start != self.sp_s")
return best_path, path_score
示例11: encode_pt
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import inputVector [as 別名]
def encode_pt(self, X, train=False):
dy.renew_cg()
# Remove dy.parameters(...) for DyNet v.2.1
#w_pos = dy.parameter(self.w_pos)
#b_pos = dy.parameter(self.b_pos)
w_pos = self.w_pos
b_pos = self.b_pos
ipts = []
length = len(X[0])
for i in range(length):
cids = X[0][i]
wid = X[1][i]
tids = X[2][i]
vec_char = self.char_seq_model.transduce([self.UNI[cid] for cid in cids])[-1]
vec_tags = []
for tid in tids:
if tid == 0:
zero = dy.inputVector(np.zeros(self.dim_tag_emb))
vec_tags.append(zero)
else:
vec_tags.append(self.POS[tid])
vec_tag = dy.esum(vec_tags)
if wid == 0:
vec_word = dy.inputVector(np.zeros(self.dim_word))
else:
vec_word = self.WORD[wid]
vec_at_i = dy.concatenate([vec_word, vec_char, vec_tag])
if train is True:
vec_at_i = dy.dropout(vec_at_i, self.dropout_rate)
ipts.append(vec_at_i)
hiddens = self.pos_model.transduce(ipts)
probs = [dy.softmax(w_pos*h+b_pos) for h in hiddens]
return probs
示例12: _predict_one
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import inputVector [as 別名]
def _predict_one(self, mgc, noise):
mgc = dy.inputVector(mgc)
outputs = []
noise_vec = dy.inputVector(noise[0:self.UPSAMPLE_COUNT])
[hidden_w, hidden_b] = self.mlp_excitation
hidden_input = mgc # dy.concatenate([mgc, noise_vec])
for w, b in zip(hidden_w, hidden_b):
hidden_input = dy.tanh(w.expr(update=True) * hidden_input + b.expr(update=True))
excitation = dy.logistic(
self.excitation_w.expr(update=True) * hidden_input + self.excitation_b.expr(update=True))
[hidden_w, hidden_b] = self.mlp_filter
hidden_input = mgc # dy.concatenate([mgc, noise_vec])
for w, b in zip(hidden_w, hidden_b):
hidden_input = dy.tanh(w.expr(update=True) * hidden_input + b.expr(update=True))
filter = dy.tanh(self.filter_w.expr(update=True) * hidden_input + self.filter_b.expr(update=True))
[hidden_w, hidden_b] = self.mlp_vuv
hidden_input = mgc # dy.concatenate([mgc, noise_vec])
for w, b in zip(hidden_w, hidden_b):
hidden_input = dy.tanh(w.expr(update=True) * hidden_input + b.expr(update=True))
vuv = dy.logistic(self.vuv_w.expr(update=True) * hidden_input + self.vuv_b.expr(update=True))
# sample_vec = dy.inputVector(noise[self.UPSAMPLE_COUNT:self.UPSAMPLE_COUNT * 2])
# noise_vec = dy.inputVector(noise[0:self.UPSAMPLE_COUNT + self.FILTER_SIZE - 1])
mixed = excitation # * vuv + noise_vec * (1.0 - vuv)
for ii in range(self.UPSAMPLE_COUNT):
tmp = dy.cmult(filter, dy.pickrange(mixed, ii, ii + self.FILTER_SIZE))
outputs.append(dy.sum_elems(tmp))
outputs = dy.concatenate(outputs)
# from ipdb import set_trace
# set_trace()
# mixed = dy.reshape(mixed, (self.UPSAMPLE_COUNT + self.FILTER_SIZE - 1, 1, 1))
# filter = dy.reshape(filter, (self.FILTER_SIZE, 1, 1, 1))
# outputs = dy.conv2d(mixed, filter, stride=(1, 1), is_valid=True)
# outputs = dy.reshape(outputs, (self.UPSAMPLE_COUNT,))
# outputs = outputs + noise_vec * vuv
return outputs, excitation, filter, vuv
示例13: _compute_guided_attention
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import inputVector [as 別名]
def _compute_guided_attention(self, att_vect, decoder_step, num_characters, num_mgcs):
target_probs = []
t1 = float(decoder_step) / num_mgcs
for encoder_step in range(num_characters):
target_probs.append(1.0 - np.exp(-((float(encoder_step) / num_characters - t1) ** 2) / 0.1))
target_probs = dy.inputVector(target_probs)
return dy.transpose(target_probs) * att_vect
示例14: compute_gold_style_probs
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import inputVector [as 別名]
def compute_gold_style_probs(self, target_mgc):
gold_mgc = [dy.inputVector(mgc) for mgc in target_mgc]
hidden = gold_mgc
for fw, bw in zip(self.style_encoder_fw, self.style_encoder_bw):
fw_out = fw.initial_state().transduce(hidden)
bw_out = list(reversed(bw.initial_state().transduce(reversed(hidden))))
hidden = [dy.concatenate([x_fw, x_bw]) for x_fw, x_bw in zip(fw_out, bw_out)]
summary = dy.concatenate([fw_out[-1], bw_out[0]])
_, style_probs = self._attend_classic([self.style_lookup[i] for i in range(self.NUM_STYLE_TOKENS)], summary,
self.att_style_w1.expr(update=True), self.att_style_w2.expr(update=True),
self.att_style_v.expr(update=True))
return style_probs
示例15: learn
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import inputVector [as 別名]
def learn(self, characters, target_mgc, guided_att=True):
num_mgc = target_mgc.shape[0]
# print num_mgc
dy.renew_cg()
for pi in characters:
if pi.char not in self.encodings.char2int:
print("Unknown input: '" + pi.char + "' - skipping file")
return 0
style_probs = self.compute_gold_style_probs(target_mgc)
output_mgc, output_stop, output_attention = self._predict(characters, target_mgc, style_probs=style_probs)
losses = []
index = 0
for mgc, real_mgc in zip(output_mgc, target_mgc):
t_mgc = dy.inputVector(real_mgc)
# losses.append(self._compute_binary_divergence(mgc, t_mgc) )
losses.append(dy.l1_distance(mgc, t_mgc))
if index % 3 == 0:
# attention loss
if guided_att:
att = output_attention[index // 3]
losses.append(self._compute_guided_attention(att, index // 3, len(characters) + 2, num_mgc // 3))
# EOS loss
stop = output_stop[index // 3]
if index >= num_mgc - 6:
losses.append(dy.l1_distance(stop, dy.scalarInput(-0.8)))
else:
losses.append(dy.l1_distance(stop, dy.scalarInput(0.8)))
index += 1
loss = dy.esum(losses)
loss_val = loss.value() / num_mgc
loss.backward()
self.trainer.update()
return loss_val