本文整理汇总了Python中nolearn.lasagne.TrainSplit方法的典型用法代码示例。如果您正苦于以下问题:Python lasagne.TrainSplit方法的具体用法?Python lasagne.TrainSplit怎么用?Python lasagne.TrainSplit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nolearn.lasagne
的用法示例。
在下文中一共展示了lasagne.TrainSplit方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: model_initial
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import TrainSplit [as 别名]
def model_initial(X_train,y_train,max_iter = 5):
global params, val_acc
params = []
val_acc = np.zeros(max_iter)
lr = theano.shared(np.float32(1e-4))
for iteration in range(max_iter):
print 'Initializing weights (%d/5) ...'%(iteration+1)
network_init = create_network()
net_init = NeuralNet(
network_init,
max_epochs=3,
update=adam,
update_learning_rate=lr,
train_split=TrainSplit(eval_size=0.1),
batch_iterator_train=BatchIterator(batch_size=32),
batch_iterator_test=BatchIterator(batch_size=64),
on_training_finished=[SaveTrainHistory(iteration = iteration)],
verbose=0)
net_init.initialize()
net_init.fit(X_train, y_train)
#model training
示例2: model_train
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import TrainSplit [as 别名]
def model_train(X_train, y_train,learning_rate = 1e-4,epochs = 50):
network = create_network()
lr = theano.shared(np.float32(learning_rate))
net = NeuralNet(
network,
max_epochs=epochs,
update=adam,
update_learning_rate=lr,
train_split=TrainSplit(eval_size=0.1),
batch_iterator_train=BatchIterator(batch_size=32),
batch_iterator_test=BatchIterator(batch_size=64),
#on_training_started=[LoadBestParam(iteration=val_acc.argmax())],
on_epoch_finished=[EarlyStopping(patience=5)],
verbose=1)
print 'Loading pre-training weights...'
net.load_params_from(params[val_acc.argmax()])
print 'Continue to train...'
net.fit(X_train, y_train)
print 'Model training finished.'
return net
#model testing
示例3: model_train
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import TrainSplit [as 别名]
def model_train(X_train, y_train,learning_rate = 1e-4,epochs = 50):
network = create_network()
lr = theano.shared(np.float32(learning_rate))
net = NeuralNet(
network,
max_epochs=epochs,
update=adam,
update_learning_rate=lr,
train_split=TrainSplit(eval_size=0.1),
batch_iterator_train=BatchIterator(batch_size=32),
batch_iterator_test=BatchIterator(batch_size=64),
regression = True,
objective_loss_function = squared_error,
#on_training_started=[LoadBestParam(iteration=val_loss.argmin())],
on_epoch_finished=[EarlyStopping(patience=5)],
verbose=1)
print 'loading pre-training weights...'
net.load_params_from(params[val_loss.argmin()])
print 'continue to train...'
net.fit(X_train, y_train)
print 'training finished'
return net
#model testing
示例4: model_initial
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import TrainSplit [as 别名]
def model_initial(X_train,y_train,max_iter = 5):
global params, val_loss
params = []
val_loss = np.zeros(max_iter)
lr = theano.shared(np.float32(1e-4))
for iteration in range(max_iter):
print 'initializing weights (%d/5) ...'%(iteration+1)
print iteration
network_init = create_network()
net_init = NeuralNet(
network_init,
max_epochs=3,
update=adam,
update_learning_rate=lr,
train_split=TrainSplit(eval_size=0.1),
batch_iterator_train=BatchIterator(batch_size=32),
batch_iterator_test=BatchIterator(batch_size=64),
regression = True,
objective_loss_function = squared_error,
on_training_finished=[SaveTrainHistory(iteration = iteration)],
verbose=0)
net_init.initialize()
net_init.fit(X_train, y_train)
#model training
示例5: __init__
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import TrainSplit [as 别名]
def __init__(self, isTrain, isNN):
super(RegressionNN, self).__init__(isTrain, isNN)
# data preprocessing
#self.dataPreprocessing()
self.net1 = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
#('hidden2', layers.DenseLayer),
#('hidden3', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 13), # input dimension is 13
hidden_num_units=6, # number of units in hidden layer
#hidden2_num_units=8, # number of units in hidden layer
#hidden3_num_units=4, # number of units in hidden layer
output_nonlinearity=None, # output layer uses sigmoid function
output_num_units=1, # output dimension is 1
# obejctive function
objective_loss_function = lasagne.objectives.squared_error,
# optimization method:
update=lasagne.updates.nesterov_momentum,
update_learning_rate=0.002,
update_momentum=0.4,
# use 25% as validation
train_split=TrainSplit(eval_size=0.2),
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=100, # we want to train this many epochs
verbose=0,
)
示例6: __init__
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import TrainSplit [as 别名]
def __init__(self, isTrain):
super(RegressionUniformBlending, self).__init__(isTrain)
# data preprocessing
#self.dataPreprocessing()
self.net1 = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
#('hidden2', layers.DenseLayer),
#('hidden3', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 13), # input dimension is 13
hidden_num_units=6, # number of units in hidden layer
#hidden2_num_units=8, # number of units in hidden layer
#hidden3_num_units=4, # number of units in hidden layer
output_nonlinearity=None, # output layer uses sigmoid function
output_num_units=1, # output dimension is 1
# obejctive function
objective_loss_function = lasagne.objectives.squared_error,
# optimization method:
update=lasagne.updates.nesterov_momentum,
update_learning_rate=0.002,
update_momentum=0.4,
# use 25% as validation
train_split=TrainSplit(eval_size=0.2),
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=100, # we want to train this many epochs
verbose=0,
)
# Create linear regression object
self.linRegr = linear_model.LinearRegression()
# Create KNN regression object
self.knn = neighbors.KNeighborsRegressor(86, weights='distance')
# Create Decision Tree regression object
self.decisionTree = DecisionTreeRegressor(max_depth=7, max_features=None)
# Create AdaBoost regression object
decisionReg = DecisionTreeRegressor(max_depth=10)
rng = np.random.RandomState(1)
self.adaReg = AdaBoostRegressor(decisionReg,
n_estimators=400,
random_state=rng)
# Create linear regression object
self.model = RandomForestRegressor(max_features='sqrt', n_estimators=32, max_depth=39)