本文整理汇总了Python中nolearn.lasagne.NeuralNet.fit方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet.fit方法的具体用法?Python NeuralNet.fit怎么用?Python NeuralNet.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nolearn.lasagne.NeuralNet
的用法示例。
在下文中一共展示了NeuralNet.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: trainNet
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import fit [as 别名]
def trainNet(X, Y, ln, loadFile = ""):
net1 = NeuralNet(
layers=[ # four layers: two hidden layers
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
('hidden1', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters: Best 400 400
input_shape=(None, numInputs), # 31 inputs
hidden_num_units=400, # number of units in hidden layer
hidden1_num_units=400,
hidden_nonlinearity=lasagne.nonlinearities.sigmoid,
hidden1_nonlinearity=lasagne.nonlinearities.sigmoid,
output_nonlinearity=None, # output layer uses identity function
output_num_units=numOutputs, # 4 outputs
# optimization method:
update=nesterov_momentum,
update_learning_rate=ln,
update_momentum=0.9,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=1500, # we want to train this many epochs
verbose=1,
)
#if (loadFile != ""):
#net1.load_params_from(loadFile)
net1.max_epochs = 10
net1.update_learning_rate = ln;
net1.fit(X, Y) # This thing try to do the fit itself
return net1
示例2: train
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import fit [as 别名]
def train():
weather = load_weather()
training = load_training()
X = assemble_X(training, weather)
print len(X[0])
mean, std = normalize(X)
y = assemble_y(training)
input_size = len(X[0])
learning_rate = theano.shared(np.float32(0.1))
net = NeuralNet(
layers=[
('input', InputLayer),
('hidden1', DenseLayer),
('dropout1', DropoutLayer),
('hidden2', DenseLayer),
('dropout2', DropoutLayer),
('output', DenseLayer),
],
# layer parameters:
input_shape=(None, input_size),
hidden1_num_units=325,
dropout1_p=0.4,
hidden2_num_units=325,
dropout2_p=0.4,
output_nonlinearity=sigmoid,
output_num_units=1,
# optimization method:
update=nesterov_momentum,
update_learning_rate=learning_rate,
update_momentum=0.9,
# Decay the learning rate
on_epoch_finished=[
AdjustVariable(learning_rate, target=0, half_life=1),
],
# This is silly, but we don't want a stratified K-Fold here
# To compensate we need to pass in the y_tensor_type and the loss.
regression=True,
y_tensor_type = T.imatrix,
objective_loss_function = binary_crossentropy,
max_epochs=85,
eval_size=0.1,
verbose=1,
)
X, y = shuffle(X, y, random_state=123)
net.fit(X, y)
_, X_valid, _, y_valid = net.train_test_split(X, y, net.eval_size)
probas = net.predict_proba(X_valid)[:,0]
print("ROC score", metrics.roc_auc_score(y_valid, probas))
return net, mean, std
示例3: fit_model
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import fit [as 别名]
def fit_model(train_x, y, test_x):
"""Feed forward neural network for kaggle digit recognizer competition.
Intentionally limit network size and optimization time (by choosing max_epochs = 15) to meet runtime restrictions
"""
print("\n\nRunning Convetional Net. Optimization progress below\n\n")
net1 = NeuralNet(
layers=[ #list the layers here
('input', layers.InputLayer),
('hidden1', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, train_x.shape[1]),
hidden1_num_units=200, hidden1_nonlinearity=rectify, #params of first layer
output_nonlinearity=softmax, # softmax for classification problems
output_num_units=10, # 10 target values
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.05,
update_momentum=0.7,
regression=False,
max_epochs=10, # Intentionally limited for execution speed
verbose=1,
)
net1.fit(train_x, y)
predictions = net1.predict(test_x)
return(predictions)
示例4: lasagne_oneLayer_classifier
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import fit [as 别名]
def lasagne_oneLayer_classifier(param, X, labels):
## initialize the NN
layers0 = [('input', InputLayer),
('dense0', DenseLayer),
('dropout', DropoutLayer),
('output', DenseLayer)]
net0 = NeuralNet(layers=layers0,
input_shape=(None, param['num_features']),
dense0_num_units=param['dense0_num_units'],
dropout_p=param['dropout_p'],
output_num_units=param['num_classes'],
output_nonlinearity=softmax,
update=nesterov_momentum,
update_learning_rate=param['update_learning_rate'],
update_momentum=param['update_momentum'],
eval_size=0.02,
verbose=1,
max_epochs=param['max_epochs'])
## fit the results
net0.fit(X, labels)
return net0
示例5: train
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import fit [as 别名]
def train(self, X, y_train, X_test, ids_test, y_test, outfile, is_valid):
X = np.array(X)
encoder = LabelEncoder()
y = encoder.fit_transform(y_train).astype(np.int32)
num_classes = len(encoder.classes_)
num_features = X.shape[1]
layers0 = [('input', InputLayer),
('dense1', DenseLayer),
('dropout1', DropoutLayer),
('dense2', DenseLayer),
('dropout2', DropoutLayer),
('output', DenseLayer)]
net0 = NeuralNet(layers=layers0,
input_shape=(None, num_features),
dense1_num_units=3500,
dropout1_p=0.4,
dense2_num_units=2300,
dropout2_p=0.5,
output_num_units=num_classes,
output_nonlinearity=softmax,
#update=nesterov_momentum,
update=adagrad,
update_learning_rate=0.01,
#update_momentum=0.9,
#objective_loss_function=softmax,
objective_loss_function=categorical_crossentropy,
eval_size=0.2,
verbose=1,
max_epochs=20)
net0.fit(X, y)
X_test = np.array(X_test)
self.make_submission(net0, X_test, ids_test, encoder)
示例6: nn_example
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import fit [as 别名]
def nn_example(data):
net1 = NeuralNet(
layers=[('input', layers.InputLayer),
('hidden', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 28*28),
hidden_num_units=100, # number of units in 'hidden' layer
output_nonlinearity=lasagne.nonlinearities.softmax,
output_num_units=10, # 10 target values for the digits 0, 1, 2, ..., 9
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=10,
verbose=1,
)
# Train the network
net1.fit(data['X_train'], data['y_train'])
# Try the network on new data
print("Feature vector (100-110): %s" % data['X_test'][0][100:110])
print("Label: %s" % str(data['y_test'][0]))
print("Predicted: %s" % str(net1.predict([data['X_test'][0]])))
示例7: neural_network
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import fit [as 别名]
def neural_network(x_train, y_train):
X, y, encoder, scaler = load_train_data(x_train, y_train)
num_classes = len(encoder.classes_)
num_features = X.shape[1]
layers0 = [
("input", InputLayer),
("dropoutf", DropoutLayer),
("dense0", DenseLayer),
("dropout", DropoutLayer),
("dense1", DenseLayer),
("dropout2", DropoutLayer),
("output", DenseLayer),
]
net0 = NeuralNet(
layers=layers0,
input_shape=(None, num_features),
dropoutf_p=0.15,
dense0_num_units=1000,
dropout_p=0.25,
dense1_num_units=500,
dropout2_p=0.25,
output_num_units=num_classes,
output_nonlinearity=softmax,
update=adagrad,
update_learning_rate=0.005,
eval_size=0.01,
verbose=1,
max_epochs=30,
)
net0.fit(X, y)
return (net0, scaler)
示例8: fit
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import fit [as 别名]
def fit(xTrain, yTrain, dense0_num=800, dropout_p=0.5, dense1_num=500, update_learning_rate=0.01,
update_momentum=0.9, test_ratio=0.2, max_epochs=20):
#update_momentum=0.9, test_ratio=0.2, max_epochs=20, train_fname='train.csv'):
#xTrain, yTrain, encoder, scaler = load_train_data(train_fname)
#xTest, ids = load_test_data('test.csv', scaler)
num_features = len(xTrain[0,:])
num_classes = 9
print num_features
layers0 = [('input', InputLayer),
('dense0', DenseLayer),
('dropout', DropoutLayer),
('dense1', DenseLayer),
('output', DenseLayer)]
clf = NeuralNet(layers=layers0,
input_shape=(None, num_features),
dense0_num_units=dense0_num,
dropout_p=dropout_p,
dense1_num_units=dense1_num,
output_num_units=num_classes,
output_nonlinearity=softmax,
update=nesterov_momentum,
update_learning_rate=update_learning_rate,
update_momentum=update_momentum,
eval_size=test_ratio,
verbose=1,
max_epochs=max_epochs)
clf.fit(xTrain, yTrain)
ll_train = metrics.log_loss(yTrain, clf.predict_proba(xTrain))
print ll_train
return clf
示例9: OptNN
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import fit [as 别名]
def OptNN(d1, h1, d2, h2, d3, start, stop, max_epochs):
params2 = params.copy()
on_epoch = [AdjustVariable('update_learning_rate',
start = start, stop = stop),
AdjustVariable('update_momentum', start = .9, stop = .999)]
params2['dropout1_p'] = d1
params2['dropout2_p'] = d2
params2['dropout3_p'] = d3
params2['dropout4_p'] = d4
params2['hidden1_num_units'] = h1
params2['hidden2_num_units'] = h2
params2['hidden3_num_units'] = h3
params2['max_epochs'] = max_epochs
params2['on_epoch_finished'] = on_epoch
kcv = StratifiedKFold(Y, 5, shuffle = True)
res = np.empty((len(Y), len(np.unique(Y)))); i = 1
CVScores = []
for train_idx, valid_idx in kcv:
logger.info("Running fold %d...", i); i += 1
net = NeuralNet(**params2)
net.set_params(eval_size = None)
net.fit(X[train_idx], Y[train_idx])
res[valid_idx, :] = net.predict_proba(X[valid_idx])
CVScores.append(log_loss(Y[valid_idx], res[valid_idx]))
return -np.mean(CVScores)
示例10: train
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import fit [as 别名]
def train(x_train, y_train):
clf_nn = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden1', layers.DenseLayer),
('hidden2', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 2538), # 784 input pixels per batch
hidden1_num_units=100, # number of units in hidden layer
hidden2_num_units=100,
output_nonlinearity=nonlinearities.softmax, # output layer uses identity function
output_num_units=10, # 10 target values
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=50, # we want to train this many epochs
verbose=1,
)
clf_nn.fit(x_train, y_train)
return clf_nn
示例11: loadNet
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import fit [as 别名]
def loadNet(netName):
if os.path.exists(netName):
net = pickle.load(open(netName, "rb"))
else:
net = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 9216), # 96x96 input pixels per batch
hidden_num_units=100, # number of units in hidden layer
output_nonlinearity=None, # output layer uses identity function
output_num_units=30, # 30 target values
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=400, # we want to train this many epochs
verbose=1,
)
X, y = load()
net.fit(X, y)
print("X.shape == {}; X.min == {:.3f}; X.max == {:.3f}".format(X.shape, X.min(), X.max()))
print("y.shape == {}; y.min == {:.3f}; y.max == {:.3f}".format(y.shape, y.min(), y.max()))
pickle.dump(net, open(netName, 'wb'), -1)
return net
示例12: fit
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import fit [as 别名]
def fit(self,tr,add_feat_tr):
## if trend exists, remove trend
if self.trend ==1:
trend = self.est_trend(tr)
tr = tr-np.asarray(trend)
layers0=[
## 2 layers with one hidden layer
(InputLayer, {'shape': (None,8,self.window_length)}),
(DenseLayer, {'num_units': 8*self.window_length}),
(DropoutLayer, {'p':0.3}),
(DenseLayer, {'num_units': 8*self.window_length/3}),
## the output layer
(DenseLayer, {'num_units': 1, 'nonlinearity': None}),
]
feats = build_feat(tr, add_feat_tr, window_length=self.window_length)
print feats.shape
feat_target = get_target(tr,window_length=self.window_length)
print feat_target.shape
net0 = NeuralNet(
layers=layers0,
max_epochs=400,
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
verbose=1,
regression=True,
)
net0.fit(feats[:-1],feat_target)
return net0,feats,feat_target
示例13: gridsearch_alpha
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import fit [as 别名]
def gridsearch_alpha(self,learning_rate,index,params=None):
hidden_unit = ((index+1)*2)/3
self.l_in = ls.layers.InputLayer(shape=(None,n_input),input_var=None)
self.l_hidden = ls.layers.DenseLayer(self.l_in,num_units=15,nonlinearity=ls.nonlinearities.rectify)
self.network = l_out = ls.layers.DenseLayer(self.l_hidden,num_units=1)
list_results = np.array([learning_rate.shape[0]],dtype=np.float64)
for item in learning_rate:
#Init Neural net
net1 = NeuralNet(
layers=self.network,
# optimization method:
update=nesterov_momentum,
update_learning_rate=item,
update_momentum=0.9,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=800, # we want to train this many epochs
# verbose=1,
eval_size = 0.4
)
net1.fit(self.X_training,self.y_training)
self.pred = net1.predict(self.n_sample2)
name_file = "Params/saveNeuralNetwork_%s_%s.tdn" %(item,index)
net1.save_params_to(name_file)
score_nn = net1.score(self.n_sample2,self.n_test2)
list_results[item] = score_nn
print "index=%f,item=%f,score=%f"%(index,item,score_nn)
return list_results
示例14: NN
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import fit [as 别名]
def NN(X,y):
net1 = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 9216), # 96x96 input pixels per batch
hidden_num_units=100, # number of units in hidden layer
output_nonlinearity=None, # output layer uses identity function
output_num_units=30, # 30 target values
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=400, # we want to train this many epochs
verbose=1,
)
net1.fit(X, y)
示例15: build_mlp
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import fit [as 别名]
def build_mlp(input_var=None):
net1 = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden1', layers.DenseLayer),
('hidden2', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 14, 2177), # 14 x 2177 input pixels per batch
hidden1_num_units=100, # number of units in hidden layer
hidden2_num_units=100,
output_nonlinearity=lasagne.nonlinearities.softmax, # output layer uses identity function
output_num_units=2, # 2 target values
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
#regression=False, # flag to indicate we're dealing with regression problem
max_epochs=500, # we want to train this many epochs
verbose=1,
)
X, y = load_dataset()
y = np.asanyarray(y,np.int32)
print(X.shape)
print(y.shape)
net1.fit(X, y)