本文整理汇总了Python中nolearn.lasagne.NeuralNet方法的典型用法代码示例。如果您正苦于以下问题:Python lasagne.NeuralNet方法的具体用法?Python lasagne.NeuralNet怎么用?Python lasagne.NeuralNet使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nolearn.lasagne
的用法示例。
在下文中一共展示了lasagne.NeuralNet方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: model_initial
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import NeuralNet [as 别名]
def model_initial(X_train,y_train,max_iter = 5):
global params, val_acc
params = []
val_acc = np.zeros(max_iter)
lr = theano.shared(np.float32(1e-4))
for iteration in range(max_iter):
print 'Initializing weights (%d/5) ...'%(iteration+1)
network_init = create_network()
net_init = NeuralNet(
network_init,
max_epochs=3,
update=adam,
update_learning_rate=lr,
train_split=TrainSplit(eval_size=0.1),
batch_iterator_train=BatchIterator(batch_size=32),
batch_iterator_test=BatchIterator(batch_size=64),
on_training_finished=[SaveTrainHistory(iteration = iteration)],
verbose=0)
net_init.initialize()
net_init.fit(X_train, y_train)
#model training
示例2: model_train
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import NeuralNet [as 别名]
def model_train(X_train, y_train,learning_rate = 1e-4,epochs = 50):
network = create_network()
lr = theano.shared(np.float32(learning_rate))
net = NeuralNet(
network,
max_epochs=epochs,
update=adam,
update_learning_rate=lr,
train_split=TrainSplit(eval_size=0.1),
batch_iterator_train=BatchIterator(batch_size=32),
batch_iterator_test=BatchIterator(batch_size=64),
#on_training_started=[LoadBestParam(iteration=val_acc.argmax())],
on_epoch_finished=[EarlyStopping(patience=5)],
verbose=1)
print 'Loading pre-training weights...'
net.load_params_from(params[val_acc.argmax()])
print 'Continue to train...'
net.fit(X_train, y_train)
print 'Model training finished.'
return net
#model testing
示例3: model_train
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import NeuralNet [as 别名]
def model_train(X_train, y_train,learning_rate = 1e-4,epochs = 50):
network = create_network()
lr = theano.shared(np.float32(learning_rate))
net = NeuralNet(
network,
max_epochs=epochs,
update=adam,
update_learning_rate=lr,
train_split=TrainSplit(eval_size=0.1),
batch_iterator_train=BatchIterator(batch_size=32),
batch_iterator_test=BatchIterator(batch_size=64),
regression = True,
objective_loss_function = squared_error,
#on_training_started=[LoadBestParam(iteration=val_loss.argmin())],
on_epoch_finished=[EarlyStopping(patience=5)],
verbose=1)
print 'loading pre-training weights...'
net.load_params_from(params[val_loss.argmin()])
print 'continue to train...'
net.fit(X_train, y_train)
print 'training finished'
return net
#model testing
示例4: model_initial
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import NeuralNet [as 别名]
def model_initial(X_train,y_train,max_iter = 5):
global params, val_loss
params = []
val_loss = np.zeros(max_iter)
lr = theano.shared(np.float32(1e-4))
for iteration in range(max_iter):
print 'initializing weights (%d/5) ...'%(iteration+1)
print iteration
network_init = create_network()
net_init = NeuralNet(
network_init,
max_epochs=3,
update=adam,
update_learning_rate=lr,
train_split=TrainSplit(eval_size=0.1),
batch_iterator_train=BatchIterator(batch_size=32),
batch_iterator_test=BatchIterator(batch_size=64),
regression = True,
objective_loss_function = squared_error,
on_training_finished=[SaveTrainHistory(iteration = iteration)],
verbose=0)
net_init.initialize()
net_init.fit(X_train, y_train)
#model training
示例5: get_nn_model
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import NeuralNet [as 别名]
def get_nn_model(shape):
np.random.seed(9)
model = NeuralNet(
layers=[
('input', layers.InputLayer),
('hidden1', layers.DenseLayer),
('hidden2', layers.DenseLayer),
('output', layers.DenseLayer),
],
input_shape=(None, shape[1]),
hidden1_num_units=16, # number of units in hidden layer
hidden1_nonlinearity=sigmoid,
hidden2_num_units=8, # number of units in hidden layer
hidden2_nonlinearity=sigmoid,
output_nonlinearity=softmax,
output_num_units=2, # target values
# optimization method:
update=adagrad,
update_learning_rate=theano.shared(np.float32(0.1)),
on_epoch_finished=[
],
use_label_encoder=False,
batch_iterator_train=BatchIterator(batch_size=500),
regression=False, # flag to indicate we're dealing with regression problem
max_epochs=900, # we want to train this many epochs
verbose=1,
eval_size=0.0,
)
return model
示例6: CNN
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import NeuralNet [as 别名]
def CNN(n_epochs):
net1 = NeuralNet(
layers=[
('input', layers.InputLayer),
('conv1', layers.Conv2DLayer), # Convolutional layer. Params defined below
('pool1', layers.MaxPool2DLayer), # Like downsampling, for execution speed
('conv2', layers.Conv2DLayer),
('hidden3', layers.DenseLayer),
('output', layers.DenseLayer),
],
input_shape=(None, 1, 6, 5),
conv1_num_filters=8,
conv1_filter_size=(3, 3),
conv1_nonlinearity=lasagne.nonlinearities.rectify,
pool1_pool_size=(2, 2),
conv2_num_filters=12,
conv2_filter_size=(1, 1),
conv2_nonlinearity=lasagne.nonlinearities.rectify,
hidden3_num_units=1000,
output_num_units=2,
output_nonlinearity=lasagne.nonlinearities.softmax,
update_learning_rate=0.0001,
update_momentum=0.9,
max_epochs=n_epochs,
verbose=0,
)
return net1
示例7: __init__
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import NeuralNet [as 别名]
def __init__(self, isTrain, isNN):
super(RegressionNN, self).__init__(isTrain, isNN)
# data preprocessing
#self.dataPreprocessing()
self.net1 = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
#('hidden2', layers.DenseLayer),
#('hidden3', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 13), # input dimension is 13
hidden_num_units=6, # number of units in hidden layer
#hidden2_num_units=8, # number of units in hidden layer
#hidden3_num_units=4, # number of units in hidden layer
output_nonlinearity=None, # output layer uses sigmoid function
output_num_units=1, # output dimension is 1
# obejctive function
objective_loss_function = lasagne.objectives.squared_error,
# optimization method:
update=lasagne.updates.nesterov_momentum,
update_learning_rate=0.002,
update_momentum=0.4,
# use 25% as validation
train_split=TrainSplit(eval_size=0.2),
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=100, # we want to train this many epochs
verbose=0,
)
示例8: __init__
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import NeuralNet [as 别名]
def __init__(self, isTrain, isOutlierRemoval, isNN=1):
super(ClassificationNN, self).__init__(isTrain, isOutlierRemoval, isNN=1)
# data preprocessing
self.dataPreprocessing()
self.net1 = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
#('hidden2', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 12), # inut dimension is 12
hidden_num_units=6, # number of units in hidden layer
#hidden2_num_units=3, # number of units in hidden layer
output_nonlinearity=lasagne.nonlinearities.sigmoid, # output layer uses sigmoid function
output_num_units=1, # output dimension is 1
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.002,
update_momentum=0.9,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=25, # we want to train this many epochs
verbose=0,
)
示例9: get_net
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import NeuralNet [as 别名]
def get_net():
return NeuralNet(
layers=[
('input', layers.InputLayer),
('conv1', Conv2DLayer),
('pool1', MaxPool2DLayer),
('dropout1', layers.DropoutLayer),
('conv2', Conv2DLayer),
('pool2', MaxPool2DLayer),
('dropout2', layers.DropoutLayer),
('conv3', Conv2DLayer),
('pool3', MaxPool2DLayer),
('dropout3', layers.DropoutLayer),
('hidden4', layers.DenseLayer),
('dropout4', layers.DropoutLayer),
('hidden5', layers.DenseLayer),
('output', layers.DenseLayer),
],
input_shape=(None, 1, 96, 96),
conv1_num_filters=32, conv1_filter_size=(3, 3), pool1_pool_size=(2, 2),
dropout1_p=0.1,
conv2_num_filters=64, conv2_filter_size=(2, 2), pool2_pool_size=(2, 2),
dropout2_p=0.2,
conv3_num_filters=128, conv3_filter_size=(2, 2), pool3_pool_size=(2, 2),
dropout3_p=0.3,
hidden4_num_units=1000,
dropout4_p=0.5,
hidden5_num_units=1000,
output_num_units=30, output_nonlinearity=None,
update_learning_rate=theano.shared(float32(0.03)),
update_momentum=theano.shared(float32(0.9)),
regression=True,
batch_iterator_train=FlipBatchIterator(batch_size=128),
on_epoch_finished=[
AdjustVariable('update_learning_rate', start=0.03, stop=0.0001),
AdjustVariable('update_momentum', start=0.9, stop=0.999),
EarlyStopping(patience=200),
],
max_epochs=3000,
verbose=1,
)
示例10: __init__
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import NeuralNet [as 别名]
def __init__(self, isTrain):
super(RegressionUniformBlending, self).__init__(isTrain)
# data preprocessing
#self.dataPreprocessing()
self.net1 = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
#('hidden2', layers.DenseLayer),
#('hidden3', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 13), # input dimension is 13
hidden_num_units=6, # number of units in hidden layer
#hidden2_num_units=8, # number of units in hidden layer
#hidden3_num_units=4, # number of units in hidden layer
output_nonlinearity=None, # output layer uses sigmoid function
output_num_units=1, # output dimension is 1
# obejctive function
objective_loss_function = lasagne.objectives.squared_error,
# optimization method:
update=lasagne.updates.nesterov_momentum,
update_learning_rate=0.002,
update_momentum=0.4,
# use 25% as validation
train_split=TrainSplit(eval_size=0.2),
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=100, # we want to train this many epochs
verbose=0,
)
# Create linear regression object
self.linRegr = linear_model.LinearRegression()
# Create KNN regression object
self.knn = neighbors.KNeighborsRegressor(86, weights='distance')
# Create Decision Tree regression object
self.decisionTree = DecisionTreeRegressor(max_depth=7, max_features=None)
# Create AdaBoost regression object
decisionReg = DecisionTreeRegressor(max_depth=10)
rng = np.random.RandomState(1)
self.adaReg = AdaBoostRegressor(decisionReg,
n_estimators=400,
random_state=rng)
# Create linear regression object
self.model = RandomForestRegressor(max_features='sqrt', n_estimators=32, max_depth=39)
示例11: __init__
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import NeuralNet [as 别名]
def __init__(self, isTrain, isOutlierRemoval=0):
super(ClassificationUniformBlending, self).__init__(isTrain, isOutlierRemoval)
# data preprocessing
self.dataPreprocessing()
# create logistic regression object
self.logreg = linear_model.LogisticRegression(tol=1e-6, penalty='l1', C=0.0010985411419875584)
# create adaboost object
self.dt_stump = DecisionTreeClassifier(max_depth=10)
self.ada = AdaBoostClassifier(
base_estimator=self.dt_stump,
learning_rate=1,
n_estimators=5,
algorithm="SAMME.R")
# create knn object
self.knn = neighbors.KNeighborsClassifier(2, weights='uniform')
# create decision tree object
self.decisiontree = DecisionTreeClassifier(max_depth=45, max_features='log2')
# create neural network object
self.net1 = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
#('hidden2', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 12), # inut dimension is 12
hidden_num_units=6, # number of units in hidden layer
#hidden2_num_units=3, # number of units in hidden layer
output_nonlinearity=lasagne.nonlinearities.sigmoid, # output layer uses sigmoid function
output_num_units=1, # output dimension is 1
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.002,
update_momentum=0.9,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=25, # we want to train this many epochs
verbose=0,
)
# create PLA object
self.pla = Perceptron()
# create random forest object
self.rf = RandomForestClassifier(max_features='log2', n_estimators=20, max_depth=30)
示例12: __init__
# 需要导入模块: from nolearn import lasagne [as 别名]
# 或者: from nolearn.lasagne import NeuralNet [as 别名]
def __init__(self, isTrain, isOutlierRemoval=0):
super(ClassificationLinearBlending, self).__init__(isTrain, isOutlierRemoval)
# data preprocessing
self.dataPreprocessing()
# create logistic regression object
self.logreg = linear_model.LogisticRegression(tol=1e-6, penalty='l1', C=0.0010985411419875584)
# create adaboost object
self.dt_stump = DecisionTreeClassifier(max_depth=10)
self.ada = AdaBoostClassifier(
base_estimator=self.dt_stump,
learning_rate=1,
n_estimators=5,
algorithm="SAMME.R")
# create knn object
self.knn = neighbors.KNeighborsClassifier(6, weights='uniform')
# create decision tree object
self.decisiontree = DecisionTreeClassifier(max_depth=50)
# create neural network object
self.net1 = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
#('hidden2', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 12), # inut dimension is 12
hidden_num_units=6, # number of units in hidden layer
#hidden2_num_units=3, # number of units in hidden layer
output_nonlinearity=lasagne.nonlinearities.sigmoid, # output layer uses sigmoid function
output_num_units=1, # output dimension is 1
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.002,
update_momentum=0.9,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=25, # we want to train this many epochs
verbose=0,
)