本文整理汇总了Python中pylearn2.training_algorithms.sgd.SGD.continue_learning方法的典型用法代码示例。如果您正苦于以下问题:Python SGD.continue_learning方法的具体用法?Python SGD.continue_learning怎么用?Python SGD.continue_learning使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pylearn2.training_algorithms.sgd.SGD
的用法示例。
在下文中一共展示了SGD.continue_learning方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: DenseDesignMatrix
# 需要导入模块: from pylearn2.training_algorithms.sgd import SGD [as 别名]
# 或者: from pylearn2.training_algorithms.sgd.SGD import continue_learning [as 别名]
import theano
import numpy as np
n = 200
p = 2
X = np.random.normal(0, 1, (n, p))
y = X[:,0]* X[:, 1] + np.random.normal(0, .1, n)
y.shape = (n, 1)
ds = DenseDesignMatrix(X=X, y=y)
hidden_layer = Sigmoid(layer_name='hidden', dim=10, irange=.1, init_bias=1.)
output_layer = Linear(dim=1, layer_name='y', irange=.1)
trainer = SGD(learning_rate=.05, batch_size=10,
termination_criterion=EpochCounter(200))
layers = [hidden_layer, output_layer]
ann = MLP(layers, nvis=2)
trainer.setup(ann, ds)
while True:
trainer.train(dataset=ds)
ann.monitor.report_epoch()
ann.monitor()
if not trainer.continue_learning(ann):
break
inputs = X
y_est = ann.fprop(theano.shared(inputs, name='inputs')).eval()
print(y_est.shape)
示例2: SequenceTaggerNetwork
# 需要导入模块: from pylearn2.training_algorithms.sgd import SGD [as 别名]
# 或者: from pylearn2.training_algorithms.sgd.SGD import continue_learning [as 别名]
#.........这里部分代码省略.........
if self.reg_factors:
rf = self.reg_factors
lhdims = len(self.tagger.hdims)
l_inputlayer = len(self.tagger.layers[0].layers)
coeffs = ([[rf] * l_inputlayer] + ([rf] * lhdims) + [rf], rf)
cost = SeqTaggerCost(coeffs, self.dropout)
self.cost = cost
self.mbsb = MonitorBasedSaveBest(channel_name='valid_objective',
save_path=save_best_path)
mon_dataset = dict(self.dataset)
if not self.monitor_train:
del mon_dataset['train']
_learning_rule = (self.momentum_rule if self.use_momentum else None)
self.algorithm = SGD(batch_size=1, learning_rate=self.lr,
termination_criterion=term,
monitoring_dataset=mon_dataset,
cost=cost,
learning_rule=_learning_rule,
)
self.algorithm.setup(self, self.dataset['train'])
if self.plot_monitor:
cn = ["valid_objective", "test_objective"]
if self.monitor_train:
cn.append("train_objective")
plots = Plots(channel_names=cn, save_path=self.plot_monitor)
self.pm = PlotManager([plots], freq=1)
self.pm.setup(self, None, self.algorithm)
def train(self):
while True:
if not self.algorithm.continue_learning(self):
break
self.algorithm.train(dataset=self.dataset['train'])
self.monitor.report_epoch()
self.monitor()
self.mbsb.on_monitor(self, self.dataset['valid'], self.algorithm)
if self.use_momentum:
self.momentum_adjustor.on_monitor(self, self.dataset['valid'],
self.algorithm)
if hasattr(self, 'learning_rate_adjustor'):
self.learning_rate_adjustor.on_monitor(
self, self.dataset['valid'], self.algorithm)
if hasattr(self, 'pm'):
self.pm.on_monitor(
self, self.dataset['valid'], self.algorithm)
def prepare_tagging(self):
X = self.get_input_space().make_theano_batch(batch_size=1)
Y = self.fprop(X)
self.f = theano.function([X[0], X[1]], Y)
self.start = self.A.get_value()[0]
self.end = self.A.get_value()[1]
self.A_value = self.A.get_value()[2:]
def process_input(self, words, feats):
return self.f(words, feats)
def tag_sen(self, words, feats, debug=False, return_probs=False):
if not hasattr(self, 'f'):
self.prepare_tagging()
y = self.process_input(words, feats)
tagger_out = y[2 + self.n_classes:]
res = viterbi(self.start, self.A_value, self.end, tagger_out,
示例3: MLP
# 需要导入模块: from pylearn2.training_algorithms.sgd import SGD [as 别名]
# 或者: from pylearn2.training_algorithms.sgd.SGD import continue_learning [as 别名]
#layers = [layer0, layer2, layer3]
ann = MLP(layers, input_space=ishape)
t_algo = SGD(learning_rate = 1e-1,
batch_size = 100,
batches_per_iter = 1,
termination_criterion=EpochCounter(2)
)
ds = DataPylearn2([train_set_x,train_set_y],[48,48,1],7)
t_algo.setup(ann, ds)
while True:
t_algo.train(dataset=ds)
ann.monitor.report_epoch()
ann.monitor()
if not t_algo.continue_learning(ann):
break
# test: https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/scripts/icml_2013_wrepl/emotions/make_submission.py
ds2 = DataPylearn2([test_set_x,test_set_y],[48,48,1],-1)
m = ds2.X.shape[0]
batch_size = 100
extra = (batch_size - m % batch_size) % batch_size
assert (m + extra) % batch_size == 0
if extra > 0:
ds2.X = np.concatenate((ds2.X, np.zeros((extra, ds2.X.shape[1]),
dtype=ds2.X.dtype)), axis=0)
assert ds2.X.shape[0] % batch_size == 0
X = ann.get_input_space().make_batch_theano()
Y = ann.fprop(X)