本文整理匯總了Python中dynet.renew_cg方法的典型用法代碼示例。如果您正苦於以下問題:Python dynet.renew_cg方法的具體用法?Python dynet.renew_cg怎麽用?Python dynet.renew_cg使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類dynet
的用法示例。
在下文中一共展示了dynet.renew_cg方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: synthesize
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import renew_cg [as 別名]
def synthesize(self, mgc, batch_size, sample=True, temperature=1.0, path=None, return_residual=False):
last_proc = 0
synth = []
noise = np.random.normal(0, 1.0, (len(mgc) * self.UPSAMPLE_COUNT + self.UPSAMPLE_COUNT))
for mgc_index in range(len(mgc)):
dy.renew_cg()
curr_proc = int((mgc_index + 1) * 100 / len(mgc))
if curr_proc % 5 == 0 and curr_proc != last_proc:
while last_proc < curr_proc:
last_proc += 5
sys.stdout.write(' ' + str(last_proc))
sys.stdout.flush()
output, excitation, filter, vuv = self._predict_one(mgc[mgc_index],
noise[
self.UPSAMPLE_COUNT * mgc_index:self.UPSAMPLE_COUNT * mgc_index + 2 * self.UPSAMPLE_COUNT])
for x in output.value():
synth.append(x)
# synth = self.dio.ulaw_decode(synth, discreete=False)
synth = np.array(synth, dtype=np.float32)
synth = np.clip(synth * 32768, -32767, 32767)
synth = np.array(synth, dtype=np.int16)
return synth
示例2: predict_emb
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import renew_cg [as 別名]
def predict_emb(self, chars):
dy.renew_cg()
finit = self.char_fwd_lstm.initial_state()
binit = self.char_bwd_lstm.initial_state()
H = dy.parameter(self.lstm_to_rep_params)
Hb = dy.parameter(self.lstm_to_rep_bias)
O = dy.parameter(self.mlp_out)
Ob = dy.parameter(self.mlp_out_bias)
pad_char = self.c2i[PADDING_CHAR]
char_ids = [pad_char] + chars + [pad_char]
embeddings = [self.char_lookup[cid] for cid in char_ids]
bi_fwd_out = finit.transduce(embeddings)
bi_bwd_out = binit.transduce(reversed(embeddings))
rep = dy.concatenate([bi_fwd_out[-1], bi_bwd_out[-1]])
return O * dy.tanh(H * rep + Hb) + Ob
示例3: build_tagging_graph
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import renew_cg [as 別名]
def build_tagging_graph(self, sentence):
dy.renew_cg()
embeddings = [self.word_rep(w) for w in sentence]
lstm_out = self.word_bi_lstm.transduce(embeddings)
H = {}
Hb = {}
O = {}
Ob = {}
scores = {}
for att in self.attributes:
H[att] = dy.parameter(self.lstm_to_tags_params[att])
Hb[att] = dy.parameter(self.lstm_to_tags_bias[att])
O[att] = dy.parameter(self.mlp_out[att])
Ob[att] = dy.parameter(self.mlp_out_bias[att])
scores[att] = []
for rep in lstm_out:
score_t = O[att] * dy.tanh(H[att] * rep + Hb[att]) + Ob[att]
scores[att].append(score_t)
return scores
示例4: compute_accuracy
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import renew_cg [as 別名]
def compute_accuracy(self, inputs, outputs):
correct = 0
for (label, img) in self.val_dataset:
dy.renew_cg()
x = dy.inputVector(img)
co.forward({inputs['in']: x})
logits = outputs['out'].val
pred = np.argmax(logits.npvalue())
if (label == pred): correct += 1
return (1.0 * correct / len(self.val_dataset))
示例5: evaluate
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import renew_cg [as 別名]
def evaluate(self, inputs, outputs):
params = M.get_collection()
optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)
num_batches = int(len(self.train_dataset) / self.batch_size)
for epoch in range(self.max_num_training_epochs):
random.shuffle(self.train_dataset)
i = 0
total_loss = 0
while (i < len(self.train_dataset)):
dy.renew_cg()
mbsize = min(self.batch_size, len(self.train_dataset) - i)
minibatch = self.train_dataset[i:i + mbsize]
losses = []
for (label, img) in minibatch:
x = dy.inputVector(img)
co.forward({inputs['in']: x})
logits = outputs['out'].val
loss = dy.pickneglogsoftmax(logits, label)
losses.append(loss)
mbloss = dy.esum(losses) / mbsize
mbloss.backward()
optimizer.update()
total_loss += mbloss.scalar_value()
i += mbsize
val_acc = self.compute_accuracy(inputs, outputs)
if self.log_output_to_terminal and epoch % self.display_step == 0:
print("epoch:", '%d' % (epoch + 1), "loss:",
"{:.9f}".format(total_loss / num_batches),
"validation_accuracy:", "%.5f" % val_acc)
val_acc = self.compute_accuracy(inputs, outputs)
return {'val_acc': val_acc}
示例6: build_tagging_graph
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import renew_cg [as 別名]
def build_tagging_graph(self, sentence):
dy.renew_cg()
embeddings = [self.word_rep(w) for w in sentence]
lstm_out = self.bi_lstm.transduce(embeddings)
H = dy.parameter(self.lstm_to_tags_params)
Hb = dy.parameter(self.lstm_to_tags_bias)
O = dy.parameter(self.mlp_out)
Ob = dy.parameter(self.mlp_out_bias)
scores = []
if options.bigram:
for rep, word in zip(lstm_out, sentence):
bi1 = dy.lookup(self.bigram_lookup, word[0], update=self.we_update)
bi2 = dy.lookup(self.bigram_lookup, word[1], update=self.we_update)
if self.dropout is not None:
bi1 = dy.dropout(bi1, self.dropout)
bi2 = dy.dropout(bi2, self.dropout)
score_t = O * dy.tanh(H * dy.concatenate(
[bi1,
rep,
bi2]) + Hb) + Ob
scores.append(score_t)
else:
for rep in lstm_out:
score_t = O * dy.tanh(H * rep + Hb) + Ob
scores.append(score_t)
return scores
示例7: calc_errors
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import renew_cg [as 別名]
def calc_errors(self, batch: List[Tuple]):
dy.renew_cg()
errors_exp = dy.concatenate([dy.average(list(self.model.forward(_in, _out))) for _in, _out in batch])
errors = errors_exp.value()
if len(batch) == 1:
errors = [errors]
return np.array(errors)
示例8: train_epoch
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import renew_cg [as 別名]
def train_epoch(self, batch_size):
batches = list(self.batch(self.train_data, batch_size))
for batch in tqdm(batches, unit="batch-" + str(batch_size)):
dy.renew_cg()
error = dy.esum([dy.average(list(self.model.forward(_in, _out))) for _in, _out in batch])
self.losses.append(float(error.value()) / len(batch))
error.backward()
self.trainer.update()
time.sleep(0.01)
return sum(self.losses[-1 * len(batches):]) / len(batches)
示例9: predict
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import renew_cg [as 別名]
def predict(self, inputs, greedy=True):
for _in in inputs:
dy.renew_cg()
results = list(self.model.forward(_in, greedy=greedy))
yield results
示例10: train_step
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import renew_cg [as 別名]
def train_step(model, batch, lr_coeff, dropout):
dy.renew_cg()
losses = []
assert not model.prev_decoder_snippet_rep
num_tokens = 0
for item in batch.items:
loss = model.prepare_and_predict(item,
use_gold=True,
training=True,
dropout=dropout)
num_tokens += len(item.gold_query())
losses.append(loss)
final_loss = dy.esum(losses) / num_tokens
final_loss.forward()
final_loss.backward()
model.trainer.learning_rate = lr_coeff
model.trainer.update()
return final_loss.npvalue()[0]
# eval_step
# Runs an evaluation on the example.
#
# Inputs:
# example: an Utterance.
# use_gold: whether or not to pass gold tokens into the decoder.
#
# Outputs:
# information provided by prepare and predict
示例11: eval_step
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import renew_cg [as 別名]
def eval_step(model,
example,
use_gold=False,
dropout_amount=0.,
beam_size=1):
dy.renew_cg()
assert not model.prev_decoder_snippet_rep
return model.prepare_and_predict(example,
use_gold=use_gold,
training=False,
dropout=dropout_amount,
beam_size=beam_size)
示例12: encode_ws
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import renew_cg [as 別名]
def encode_ws(self, X, train=False):
dy.renew_cg()
# Remove dy.parameters(...) for DyNet v.2.1
#w_ws = dy.parameter(self.w_ws)
#b_ws = dy.parameter(self.b_ws)
w_ws = self.w_ws
b_ws = self.b_ws
ipts = []
length = len(X[0])
for i in range(length):
uni = X[0][i]
bi = X[1][i]
ctype = X[2][i]
start = X[3][i]
end = X[4][i]
vec_uni = dy.concatenate([self.UNI[uid] for uid in uni])
vec_bi = dy.concatenate([self.BI[bid] for bid in bi])
vec_start = dy.esum([self.WORD[sid] for sid in start])
vec_end = dy.esum([self.WORD[eid] for eid in end])
vec_ctype = dy.concatenate([self.CTYPE[cid] for cid in ctype])
vec_at_i = dy.concatenate([vec_uni, vec_bi, vec_ctype, vec_start, vec_end])
if train is True:
vec_at_i = dy.dropout(vec_at_i, self.dropout_rate)
ipts.append(vec_at_i)
bilstm_outputs = self.ws_model.transduce(ipts)
observations = [w_ws*h+b_ws for h in bilstm_outputs]
return observations
示例13: encode_pt
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import renew_cg [as 別名]
def encode_pt(self, X, train=False):
dy.renew_cg()
# Remove dy.parameters(...) for DyNet v.2.1
#w_pos = dy.parameter(self.w_pos)
#b_pos = dy.parameter(self.b_pos)
w_pos = self.w_pos
b_pos = self.b_pos
ipts = []
length = len(X[0])
for i in range(length):
cids = X[0][i]
wid = X[1][i]
tids = X[2][i]
vec_char = self.char_seq_model.transduce([self.UNI[cid] for cid in cids])[-1]
vec_tags = []
for tid in tids:
if tid == 0:
zero = dy.inputVector(np.zeros(self.dim_tag_emb))
vec_tags.append(zero)
else:
vec_tags.append(self.POS[tid])
vec_tag = dy.esum(vec_tags)
if wid == 0:
vec_word = dy.inputVector(np.zeros(self.dim_word))
else:
vec_word = self.WORD[wid]
vec_at_i = dy.concatenate([vec_word, vec_char, vec_tag])
if train is True:
vec_at_i = dy.dropout(vec_at_i, self.dropout_rate)
ipts.append(vec_at_i)
hiddens = self.pos_model.transduce(ipts)
probs = [dy.softmax(w_pos*h+b_pos) for h in hiddens]
return probs
示例14: start_batch
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import renew_cg [as 別名]
def start_batch(self):
self.losses = []
dy.renew_cg()
示例15: transcribe
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import renew_cg [as 別名]
def transcribe(self, word):
if word.lower() in self.lexicon:
return self.lexicon[word.lower()]
dy.renew_cg()
output, ignore = self._predict(word)
transcription = [self.encodings.phoneme_list[np.argmax(value.npvalue())] for value in output]
# print (word, transcription)
return transcription