本文整理汇总了Python中parameters.Parameters.normalize方法的典型用法代码示例。如果您正苦于以下问题:Python Parameters.normalize方法的具体用法?Python Parameters.normalize怎么用?Python Parameters.normalize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类parameters.Parameters
的用法示例。
在下文中一共展示了Parameters.normalize方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from parameters import Parameters [as 别名]
# 或者: from parameters.Parameters import normalize [as 别名]
#.........这里部分代码省略.........
correct_repr = correct_sequence[-1:]
assert noise_repr != correct_repr
assert noise_sequence[:-1] == correct_sequence[:-1]
sequence = correct_sequence[:-1]
# r = graph.train(self.embed(sequence), self.embed([correct_repr])[0], self.embed([noise_repr])[0], self.parameters.score_biases[correct_repr], self.parameters.score_biases[noise_repr])
r = graph.train(self.embed(sequence), self.embed(correct_repr)[0], self.embed(noise_repr)[0], self.parameters.score_biases[correct_repr], self.parameters.score_biases[noise_repr], learning_rate * weight)
assert len(noise_repr) == 1
assert len(correct_repr) == 1
noise_repr = noise_repr[0]
correct_repr = correct_repr[0]
(loss, predictrepr, correct_score, noise_score, dsequence, dcorrect_repr, dnoise_repr, dcorrect_scorebias, dnoise_scorebias) = r
# print
# print "loss = ", loss
# print "predictrepr = ", predictrepr
# print "correct_repr = ", correct_repr, self.embed(correct_repr)[0]
# print "noise_repr = ", noise_repr, self.embed(noise_repr)[0]
# print "correct_score = ", correct_score
# print "noise_score = ", noise_score
else:
r = graph.train(self.embeds(correct_sequences), self.embeds(noise_sequences), learning_rate * weights[0])
if HYPERPARAMETERS["EMBEDDING_LEARNING_RATE"] != 0:
(dcorrect_inputss, dnoise_inputss, losss, unpenalized_losss, l1penaltys, correct_scores, noise_scores) = r
else:
(losss, unpenalized_losss, l1penaltys, correct_scores, noise_scores) = r
# print [d.shape for d in dcorrect_inputss]
# print [d.shape for d in dnoise_inputss]
# print "losss", losss.shape, losss
# print "unpenalized_losss", unpenalized_losss.shape, unpenalized_losss
# print "l1penaltys", l1penaltys.shape, l1penaltys
# print "correct_scores", correct_scores.shape, correct_scores
# print "noise_scores", noise_scores.shape, noise_scores
import sets
to_normalize = sets.Set()
for ecnt in range(len(correct_sequences)):
(loss, unpenalized_loss, correct_score, noise_score) = \
(losss[ecnt], unpenalized_losss[ecnt], correct_scores[ecnt], noise_scores[ecnt])
if l1penaltys.shape == ():
assert l1penaltys == 0
l1penalty = 0
else:
l1penalty = l1penaltys[ecnt]
correct_sequence = correct_sequences[ecnt]
noise_sequence = noise_sequences[ecnt]
if HYPERPARAMETERS["EMBEDDING_LEARNING_RATE"] != 0:
dcorrect_inputs = [d[ecnt] for d in dcorrect_inputss]
dnoise_inputs = [d[ecnt] for d in dnoise_inputss]
# print [d.shape for d in dcorrect_inputs]
# print [d.shape for d in dnoise_inputs]
# print "loss", loss.shape, loss
# print "unpenalized_loss", unpenalized_loss.shape, unpenalized_loss
# print "l1penalty", l1penalty.shape, l1penalty
# print "correct_score", correct_score.shape, correct_score
# print "noise_score", noise_score.shape, noise_score
self.train_loss.add(loss)
self.train_err.add(correct_score <= noise_score)
self.train_lossnonzero.add(loss > 0)
squashloss = 1./(1.+math.exp(-loss))
self.train_squashloss.add(squashloss)
if not LBL:
self.train_unpenalized_loss.add(unpenalized_loss)
self.train_l1penalty.add(l1penalty)
示例2: __init__
# 需要导入模块: from parameters import Parameters [as 别名]
# 或者: from parameters.Parameters import normalize [as 别名]
#.........这里部分代码省略.........
sequence = correct_sequence[:-1]
# r = graph.train(self.embed(sequence), self.embed([correct_repr])[0], self.embed([noise_repr])[0], self.parameters.score_biases[correct_repr], self.parameters.score_biases[noise_repr])
r = graph.train(self.embed(sequence), self.embed(correct_repr)[0], self.embed(noise_repr)[0], self.parameters.score_biases[correct_repr], self.parameters.score_biases[noise_repr], learning_rate * weight)
assert len(noise_repr) == 1
assert len(correct_repr) == 1
noise_repr = noise_repr[0]
correct_repr = correct_repr[0]
(loss, predictrepr, correct_score, noise_score, dsequence, dcorrect_repr, dnoise_repr, dcorrect_scorebias, dnoise_scorebias) = r
# print
# print "loss = ", loss
# print "predictrepr = ", predictrepr
# print "correct_repr = ", correct_repr, self.embed(correct_repr)[0]
# print "noise_repr = ", noise_repr, self.embed(noise_repr)[0]
# print "correct_score = ", correct_score
# print "noise_score = ", noise_score
else:
noise_sequences, weights = self.corrupt_examples(correct_sequences)
# All weights must be the same, if we first multiply by the learning rate
for w in weights: assert w == weights[0]
#print self.embeds(correct_sequences)
#print self.embeds(noise_sequences)
#print learning_rate * weights[0]
r = graph.train(self.embeds(correct_sequences), self.embeds(noise_sequences), learning_rate * weights[0])
(dcorrect_inputss, dnoise_inputss, losss, unpenalized_losss, l1penaltys, correct_scores, noise_scores) = r
# print [d.shape for d in dcorrect_inputss]
# print [d.shape for d in dnoise_inputss]
# print "losss", losss.shape, losss
# print "unpenalized_losss", unpenalized_losss.shape, unpenalized_losss
# print "l1penaltys", l1penaltys.shape, l1penaltys
# print "correct_scores", correct_scores.shape, correct_scores
# print "noise_scores", noise_scores.shape, noise_scores
import sets
to_normalize = sets.Set()
for ecnt in range(len(correct_sequences)):
(loss, unpenalized_loss, correct_score, noise_score) = \
(losss[ecnt], unpenalized_losss[ecnt], correct_scores[ecnt], noise_scores[ecnt])
if l1penaltys.shape == ():
assert l1penaltys == 0
l1penalty = 0
else:
l1penalty = l1penaltys[ecnt]
correct_sequence = correct_sequences[ecnt]
noise_sequence = noise_sequences[ecnt]
dcorrect_inputs = [d[ecnt] for d in dcorrect_inputss]
dnoise_inputs = [d[ecnt] for d in dnoise_inputss]
# print [d.shape for d in dcorrect_inputs]
# print [d.shape for d in dnoise_inputs]
# print "loss", loss.shape, loss
# print "unpenalized_loss", unpenalized_loss.shape, unpenalized_loss
# print "l1penalty", l1penalty.shape, l1penalty
# print "correct_score", correct_score.shape, correct_score
# print "noise_score", noise_score.shape, noise_score
self.train_loss.add(loss)
self.train_err.add(correct_score <= noise_score)
self.train_lossnonzero.add(loss > 0)
squashloss = 1./(1.+math.exp(-loss))
self.train_squashloss.add(squashloss)
if not LBL:
self.train_unpenalized_loss.add(unpenalized_loss)
self.train_l1penalty.add(l1penalty)
self.train_unpenalized_lossnonzero.add(unpenalized_loss > 0)
示例3: __init__
# 需要导入模块: from parameters import Parameters [as 别名]
# 或者: from parameters.Parameters import normalize [as 别名]
#.........这里部分代码省略.........
# Backoff to 0gram smoothing if we fail 10 times to get noise.
if cnt > 10: e[-1] = random.randint(0, self.parameters.vocab_size-1)
weight = 1./pr
return e, weight
def corrupt_examples(self, correct_sequences):
noise_sequences = []
weights = []
for e in correct_sequences:
noise_sequence, weight = self.corrupt_example(e)
noise_sequences.append(noise_sequence)
weights.append(weight)
return noise_sequences, weights
def train(self, correct_sequences):
from hyperparameters import HYPERPARAMETERS
learning_rate = HYPERPARAMETERS["LEARNING_RATE"]
noise_sequences, weights = self.corrupt_examples(correct_sequences)
# All weights must be the same, if we first multiply by the learning rate
for w in weights: assert w == weights[0]
r = graph.train(self.embeds(correct_sequences), self.embeds(noise_sequences), learning_rate * weights[0])
(dcorrect_inputss, dnoise_inputss, losss, unpenalized_losss, l1penaltys, correct_scores, noise_scores) = r
# print [d.shape for d in dcorrect_inputss]
# print [d.shape for d in dnoise_inputss]
# print "losss", losss.shape, losss
# print "unpenalized_losss", unpenalized_losss.shape, unpenalized_losss
# print "l1penaltys", l1penaltys.shape, l1penaltys
# print "correct_scores", correct_scores.shape, correct_scores
# print "noise_scores", noise_scores.shape, noise_scores
import sets
to_normalize = sets.Set()
for ecnt in range(len(correct_sequences)):
(loss, unpenalized_loss, correct_score, noise_score) = \
(losss[ecnt], unpenalized_losss[ecnt], correct_scores[ecnt], noise_scores[ecnt])
if l1penaltys.shape == ():
assert l1penaltys == 0
l1penalty = 0
else:
l1penalty = l1penaltys[ecnt]
correct_sequence = correct_sequences[ecnt]
noise_sequence = noise_sequences[ecnt]
dcorrect_inputs = [d[ecnt] for d in dcorrect_inputss]
dnoise_inputs = [d[ecnt] for d in dnoise_inputss]
# print [d.shape for d in dcorrect_inputs]
# print [d.shape for d in dnoise_inputs]
# print "loss", loss.shape, loss
# print "unpenalized_loss", unpenalized_loss.shape, unpenalized_loss
# print "l1penalty", l1penalty.shape, l1penalty
# print "correct_score", correct_score.shape, correct_score
# print "noise_score", noise_score.shape, noise_score
self.train_loss.add(loss)
self.train_err.add(correct_score <= noise_score)
self.train_lossnonzero.add(loss > 0)
squashloss = 1./(1.+math.exp(-loss))
self.train_squashloss.add(squashloss)
self.train_unpenalized_loss.add(unpenalized_loss)
self.train_l1penalty.add(l1penalty)
self.train_unpenalized_lossnonzero.add(unpenalized_loss > 0)