本文整理汇总了Python中nolearn.lasagne.NeuralNet类的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet类的具体用法?Python NeuralNet怎么用?Python NeuralNet使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了NeuralNet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: lasagne_oneLayer_classifier
def lasagne_oneLayer_classifier(param, X, labels):
## initialize the NN
layers0 = [('input', InputLayer),
('dense0', DenseLayer),
('dropout', DropoutLayer),
('output', DenseLayer)]
net0 = NeuralNet(layers=layers0,
input_shape=(None, param['num_features']),
dense0_num_units=param['dense0_num_units'],
dropout_p=param['dropout_p'],
output_num_units=param['num_classes'],
output_nonlinearity=softmax,
update=nesterov_momentum,
update_learning_rate=param['update_learning_rate'],
update_momentum=param['update_momentum'],
eval_size=0.02,
verbose=1,
max_epochs=param['max_epochs'])
## fit the results
net0.fit(X, labels)
return net0
示例2: fit_nn_and_predict_probas
def fit_nn_and_predict_probas(features, dv, features_t):
bwh = BestWeightsHolder()
tvs = TrainValidSplitter(standardize=True,few=True)
layers = [('input', InputLayer),
('dense0', DenseLayer),
('dropout0', DropoutLayer),
('dense1', DenseLayer),
('dropout1', DropoutLayer),
('output', DenseLayer)]
net = NeuralNet(layers=layers,
input_shape=(None, features.shape[1]),
dense0_num_units=512,
dropout0_p=0.4,
dense1_num_units=256,
dropout1_p=0.4,
output_num_units=38,
output_nonlinearity=softmax,
update=adagrad,
update_learning_rate=0.02,
train_split=tvs,
verbose=1,
max_epochs=40,
on_epoch_finished=[bwh.hold_best_weights])
holder = net.fit(features, dv)
holder.load_params_from(bwh.best_weights)
return holder.predict_proba(np.hstack((tvs.standa.transform(features_t[:,:23]), features_t[:,23:])))
示例3: fit_model
def fit_model(train_x, y, test_x):
"""Feed forward neural network for kaggle digit recognizer competition.
Intentionally limit network size and optimization time (by choosing max_epochs = 15) to meet runtime restrictions
"""
print("\n\nRunning Convetional Net. Optimization progress below\n\n")
net1 = NeuralNet(
layers=[ #list the layers here
('input', layers.InputLayer),
('hidden1', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, train_x.shape[1]),
hidden1_num_units=200, hidden1_nonlinearity=rectify, #params of first layer
output_nonlinearity=softmax, # softmax for classification problems
output_num_units=10, # 10 target values
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.05,
update_momentum=0.7,
regression=False,
max_epochs=10, # Intentionally limited for execution speed
verbose=1,
)
net1.fit(train_x, y)
predictions = net1.predict(test_x)
return(predictions)
示例4: test_diamond
def test_diamond(self, NeuralNet):
input, hidden1, hidden2, concat, output = (
Mock(), Mock(), Mock(), Mock(), Mock())
nn = NeuralNet(
layers=[
('input', input),
('hidden1', hidden1),
('hidden2', hidden2),
('concat', concat),
('output', output),
],
input_shape=(10, 10),
hidden2_incoming='input',
concat_incoming=['hidden1', 'hidden2'],
)
nn.initialize_layers(nn.layers)
input.assert_called_with(name='input', shape=(10, 10))
hidden1.assert_called_with(incoming=input.return_value, name='hidden1')
hidden2.assert_called_with(incoming=input.return_value, name='hidden2')
concat.assert_called_with(
incoming=[hidden1.return_value, hidden2.return_value],
name='concat'
)
output.assert_called_with(incoming=concat.return_value, name='output')
示例5: _create_nnet
def _create_nnet(self, input_dims, output_dims, learning_rate, num_hidden_units=15, batch_size=32, max_train_epochs=1,
hidden_nonlinearity=nonlinearities.rectify, output_nonlinearity=None, update_method=updates.sgd):
"""
A subclass may override this if a different sort
of network is desired.
"""
nnlayers = [('input', layers.InputLayer), ('hidden', layers.DenseLayer), ('output', layers.DenseLayer)]
nnet = NeuralNet(layers=nnlayers,
# layer parameters:
input_shape=(None, input_dims),
hidden_num_units=num_hidden_units,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
output_num_units=output_dims,
# optimization method:
update=update_method,
update_learning_rate=learning_rate,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=max_train_epochs,
batch_iterator_train=BatchIterator(batch_size=batch_size),
train_split=nolearn.lasagne.TrainSplit(eval_size=0),
verbose=0,
)
nnet.initialize()
return nnet
示例6: train
def train():
weather = load_weather()
training = load_training()
X = assemble_X(training, weather)
print len(X[0])
mean, std = normalize(X)
y = assemble_y(training)
input_size = len(X[0])
learning_rate = theano.shared(np.float32(0.1))
net = NeuralNet(
layers=[
('input', InputLayer),
('hidden1', DenseLayer),
('dropout1', DropoutLayer),
('hidden2', DenseLayer),
('dropout2', DropoutLayer),
('output', DenseLayer),
],
# layer parameters:
input_shape=(None, input_size),
hidden1_num_units=325,
dropout1_p=0.4,
hidden2_num_units=325,
dropout2_p=0.4,
output_nonlinearity=sigmoid,
output_num_units=1,
# optimization method:
update=nesterov_momentum,
update_learning_rate=learning_rate,
update_momentum=0.9,
# Decay the learning rate
on_epoch_finished=[
AdjustVariable(learning_rate, target=0, half_life=1),
],
# This is silly, but we don't want a stratified K-Fold here
# To compensate we need to pass in the y_tensor_type and the loss.
regression=True,
y_tensor_type = T.imatrix,
objective_loss_function = binary_crossentropy,
max_epochs=85,
eval_size=0.1,
verbose=1,
)
X, y = shuffle(X, y, random_state=123)
net.fit(X, y)
_, X_valid, _, y_valid = net.train_test_split(X, y, net.eval_size)
probas = net.predict_proba(X_valid)[:,0]
print("ROC score", metrics.roc_auc_score(y_valid, probas))
return net, mean, std
示例7: train
def train(x_train, y_train):
clf_nn = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden1', layers.DenseLayer),
('hidden2', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 2538), # 784 input pixels per batch
hidden1_num_units=100, # number of units in hidden layer
hidden2_num_units=100,
output_nonlinearity=nonlinearities.softmax, # output layer uses identity function
output_num_units=10, # 10 target values
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=50, # we want to train this many epochs
verbose=1,
)
clf_nn.fit(x_train, y_train)
return clf_nn
示例8: CompileNetwork
def CompileNetwork(l_out, epochs, update, update_learning_rate, objective_l2,
earlystopping, patience, batch_size, verbose):
update_fn = getattr(updates, update)
earlystop = EarlyStopping(patience=patience, verbose=verbose)
net = NeuralNet(
l_out,
max_epochs=epochs,
update=update_fn,
objective_l2=objective_l2,
batch_iterator_train = BatchIterator(batch_size=batch_size),
batch_iterator_test = BatchIterator(batch_size=batch_size),
verbose=verbose,
on_training_finished = [earlystop.load_best_weights]
)
if earlystopping == True:
net.on_epoch_finished.append(earlystop)
if update_learning_rate is not None:
net.update_learning_rate=update_learning_rate
return net
示例9: test_initialization_with_tuples
def test_initialization_with_tuples(self, NeuralNet):
input = Mock(__name__="InputLayer", __bases__=(InputLayer,))
hidden1, hidden2, output = [Mock(__name__="MockLayer", __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
(input, {"shape": (10, 10), "name": "input"}),
(hidden1, {"some": "param", "another": "param"}),
(hidden2, {}),
(output, {"name": "output"}),
],
input_shape=(10, 10),
mock1_some="iwin",
)
out = nn.initialize_layers(nn.layers)
input.assert_called_with(name="input", shape=(10, 10))
assert nn.layers_["input"] is input.return_value
hidden1.assert_called_with(incoming=input.return_value, name="mock1", some="iwin", another="param")
assert nn.layers_["mock1"] is hidden1.return_value
hidden2.assert_called_with(incoming=hidden1.return_value, name="mock2")
assert nn.layers_["mock2"] is hidden2.return_value
output.assert_called_with(incoming=hidden2.return_value, name="output")
assert out is nn.layers_["output"]
示例10: train_net
def train_net(X, y):
net2 = NeuralNet(
layers=[
('input', layers.InputLayer),
('ncaa', NCAALayer),
('dropout1', layers.DropoutLayer),
('hidden', layers.DenseLayer),
('dropout2', layers.DropoutLayer),
('output', layers.DenseLayer),
],
input_shape = (None, num_features * 2),
ncaa_num_units = 128,
dropout1_p=0.2,
hidden_num_units=128,
dropout2_p=0.3,
output_nonlinearity=nonlinearities.sigmoid,
output_num_units=1,
update=nesterov_momentum,
update_learning_rate=theano.shared(float32(0.01)),
update_momentum=theano.shared(float32(0.9)),
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=20, # we want to train this many epochs
verbose=1,
)
net2.fit(X, y)
return net2
示例11: train_network
def train_network():
layers0 = [('input', InputLayer),
('dense0', DenseLayer),
('dropout0', DropoutLayer),
('dense1', DenseLayer),
('dropout1', DropoutLayer),
('dense2', DenseLayer),
('output', DenseLayer)]
es = EarlyStopping(patience=200)
net0 = NeuralNet(layers=layers0,
input_shape=(None, num_features),
dense0_num_units=256,
dropout0_p=0.5,
dense1_num_units=128,
dropout1_p=0.5,
dense2_num_units=64,
output_num_units=num_classes,
output_nonlinearity=softmax,
update=nesterov_momentum,
update_learning_rate=theano.shared(float32(0.01)),
update_momentum=theano.shared(float32(0.9)),
eval_size=0.2,
verbose=1,
max_epochs=1000,
on_epoch_finished=[
AdjustVariable('update_learning_rate', start=0.01, stop=0.0001),
AdjustVariable('update_momentum', start=0.9, stop=0.999),
es
])
net0.fit(X, y)
return (es.best_valid, net0)
示例12: test_initialization_legacy
def test_initialization_legacy(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
('input', input),
('hidden1', hidden1),
('hidden2', hidden2),
('output', output),
],
input_shape=(10, 10),
hidden1_some='param',
)
out = nn.initialize_layers(nn.layers)
input.assert_called_with(
name='input', shape=(10, 10))
assert nn.layers_['input'] is input.return_value
hidden1.assert_called_with(
incoming=input.return_value, name='hidden1', some='param')
assert nn.layers_['hidden1'] is hidden1.return_value
hidden2.assert_called_with(
incoming=hidden1.return_value, name='hidden2')
assert nn.layers_['hidden2'] is hidden2.return_value
output.assert_called_with(
incoming=hidden2.return_value, name='output')
assert out[0] is nn.layers_['output']
示例13: test_diamond
def test_diamond(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, concat, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(4)]
nn = NeuralNet(
layers=[
('input', input),
('hidden1', hidden1),
('hidden2', hidden2),
('concat', concat),
('output', output),
],
input_shape=(10, 10),
hidden2_incoming='input',
concat_incomings=['hidden1', 'hidden2'],
)
nn.initialize_layers(nn.layers)
input.assert_called_with(name='input', shape=(10, 10))
hidden1.assert_called_with(incoming=input.return_value, name='hidden1')
hidden2.assert_called_with(incoming=input.return_value, name='hidden2')
concat.assert_called_with(
incomings=[hidden1.return_value, hidden2.return_value],
name='concat'
)
output.assert_called_with(incoming=concat.return_value, name='output')
示例14: fit
def fit(xTrain, yTrain, dense0_num=800, dropout_p=0.5, dense1_num=500, update_learning_rate=0.01,
update_momentum=0.9, test_ratio=0.2, max_epochs=20):
#update_momentum=0.9, test_ratio=0.2, max_epochs=20, train_fname='train.csv'):
#xTrain, yTrain, encoder, scaler = load_train_data(train_fname)
#xTest, ids = load_test_data('test.csv', scaler)
num_features = len(xTrain[0,:])
num_classes = 9
print num_features
layers0 = [('input', InputLayer),
('dense0', DenseLayer),
('dropout', DropoutLayer),
('dense1', DenseLayer),
('output', DenseLayer)]
clf = NeuralNet(layers=layers0,
input_shape=(None, num_features),
dense0_num_units=dense0_num,
dropout_p=dropout_p,
dense1_num_units=dense1_num,
output_num_units=num_classes,
output_nonlinearity=softmax,
update=nesterov_momentum,
update_learning_rate=update_learning_rate,
update_momentum=update_momentum,
eval_size=test_ratio,
verbose=1,
max_epochs=max_epochs)
clf.fit(xTrain, yTrain)
ll_train = metrics.log_loss(yTrain, clf.predict_proba(xTrain))
print ll_train
return clf
示例15: neural_network
def neural_network(x_train, y_train):
X, y, encoder, scaler = load_train_data(x_train, y_train)
num_classes = len(encoder.classes_)
num_features = X.shape[1]
layers0 = [
("input", InputLayer),
("dropoutf", DropoutLayer),
("dense0", DenseLayer),
("dropout", DropoutLayer),
("dense1", DenseLayer),
("dropout2", DropoutLayer),
("output", DenseLayer),
]
net0 = NeuralNet(
layers=layers0,
input_shape=(None, num_features),
dropoutf_p=0.15,
dense0_num_units=1000,
dropout_p=0.25,
dense1_num_units=500,
dropout2_p=0.25,
output_num_units=num_classes,
output_nonlinearity=softmax,
update=adagrad,
update_learning_rate=0.005,
eval_size=0.01,
verbose=1,
max_epochs=30,
)
net0.fit(X, y)
return (net0, scaler)