本文整理汇总了Python中nolearn.lasagne.NeuralNet.predict方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet.predict方法的具体用法?Python NeuralNet.predict怎么用?Python NeuralNet.predict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nolearn.lasagne.NeuralNet
的用法示例。
在下文中一共展示了NeuralNet.predict方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fit_model
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict [as 别名]
def fit_model(train_x, y, test_x):
"""Feed forward neural network for kaggle digit recognizer competition.
Intentionally limit network size and optimization time (by choosing max_epochs = 15) to meet runtime restrictions
"""
print("\n\nRunning Convetional Net. Optimization progress below\n\n")
net1 = NeuralNet(
layers=[ #list the layers here
('input', layers.InputLayer),
('hidden1', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, train_x.shape[1]),
hidden1_num_units=200, hidden1_nonlinearity=rectify, #params of first layer
output_nonlinearity=softmax, # softmax for classification problems
output_num_units=10, # 10 target values
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.05,
update_momentum=0.7,
regression=False,
max_epochs=10, # Intentionally limited for execution speed
verbose=1,
)
net1.fit(train_x, y)
predictions = net1.predict(test_x)
return(predictions)
示例2: nn_example
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict [as 别名]
def nn_example(data):
net1 = NeuralNet(
layers=[('input', layers.InputLayer),
('hidden', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 28*28),
hidden_num_units=100, # number of units in 'hidden' layer
output_nonlinearity=lasagne.nonlinearities.softmax,
output_num_units=10, # 10 target values for the digits 0, 1, 2, ..., 9
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=10,
verbose=1,
)
# Train the network
net1.fit(data['X_train'], data['y_train'])
# Try the network on new data
print("Feature vector (100-110): %s" % data['X_test'][0][100:110])
print("Label: %s" % str(data['y_test'][0]))
print("Predicted: %s" % str(net1.predict([data['X_test'][0]])))
示例3: gridsearch_alpha
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict [as 别名]
def gridsearch_alpha(self,learning_rate,index,params=None):
hidden_unit = ((index+1)*2)/3
self.l_in = ls.layers.InputLayer(shape=(None,n_input),input_var=None)
self.l_hidden = ls.layers.DenseLayer(self.l_in,num_units=15,nonlinearity=ls.nonlinearities.rectify)
self.network = l_out = ls.layers.DenseLayer(self.l_hidden,num_units=1)
list_results = np.array([learning_rate.shape[0]],dtype=np.float64)
for item in learning_rate:
#Init Neural net
net1 = NeuralNet(
layers=self.network,
# optimization method:
update=nesterov_momentum,
update_learning_rate=item,
update_momentum=0.9,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=800, # we want to train this many epochs
# verbose=1,
eval_size = 0.4
)
net1.fit(self.X_training,self.y_training)
self.pred = net1.predict(self.n_sample2)
name_file = "Params/saveNeuralNetwork_%s_%s.tdn" %(item,index)
net1.save_params_to(name_file)
score_nn = net1.score(self.n_sample2,self.n_test2)
list_results[item] = score_nn
print "index=%f,item=%f,score=%f"%(index,item,score_nn)
return list_results
示例4: NN
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict [as 别名]
class NN():
def __init__(self):
self.nn = None
self.scaler = MinMaxScaler(feature_range = (-1, 1))
self.y_scaler = MinMaxScaler(feature_range = (-1,1))
def fit(self, X, y):
"""incremental online fitting"""
X = np.asarray(X).reshape(1, -1).astype(np.float32)
y = np.asarray(y).reshape(-1, 1).astype(np.float32)
self.scaler = self.scaler.partial_fit(X)
self.y_scaler = self.y_scaler.partial_fit(y)
self.nn = NeuralNet(
layers=[
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, len(X[0])),
hidden_num_units=15, # number of units in hidden layer
output_nonlinearity=None, # output layer uses identity function
output_num_units=1, # 2 target values
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=2, # TRY 50 and 46 epochs!
verbose=3,
eval_size=0.0
)
print self.scaler.transform(X), '|', self.y_scaler.transform(y)
self.nn.fit(self.scaler.transform(X), self.y_scaler.transform(y))
return self
def predict(self, X):
print self.nn.predict(X)
return self.nn.predict(X)
示例5: NN
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict [as 别名]
class NN(object):
def __init__(self, input_size, hidden_1_size, hidden_2_size=None):
n_layers = [
('input', layers.InputLayer),
('hidden1', layers.DenseLayer),
('dropout1', layers.DropoutLayer)
]
if hidden_2_size is not None:
n_layers.extend(
[('hidden2', layers.DenseLayer), ('dropout2', layers.DropoutLayer)]
)
n_layers.append(('output', layers.DenseLayer))
self.model = NeuralNet(
layers=n_layers,
input_shape=(None, input_size),
hidden1_num_units=hidden_1_size, dropout1_p=0.5,
output_nonlinearity=tanh,
output_num_units=1,
regression=True,
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
eval_size=0.1,
on_epoch_finished=[
AdjustVariable('update_learning_rate', stop=0.0001, decrement=0.00001),
AdjustVariable('update_momentum', stop=0.999, increment=0.0001),
EarlyStopping(patience=100)
],
max_epochs=5000,
verbose=1
)
if hidden_2_size is not None:
self.model.__dict__['hidden2_num_units'] = hidden_2_size
self.model.__dict__['dropout2_p'] = 0.5
def train(self, X, Y):
self.model.fit(np.asarray(X, dtype=np.float32), np.asarray(Y, dtype=np.float32))
def predict_continuous(self, X_test):
return self.model.predict(np.asarray(X_test, dtype=np.float32))
def predict_classes(self, X_test):
Y_pred = self.predict_continuous(X_test)
# threshold the continuous values to get the classes
pos = Y_pred >= .33
neg = Y_pred <= -0.33
neu = np.logical_and(Y_pred < 0.33, Y_pred > -0.33)
Y_pred[pos] = 1
Y_pred[neg] = -1
Y_pred[neu] = 0
return Y_pred.reshape(-1)
示例6: predict
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict [as 别名]
def predict(self, X):
X = np.array(X,dtype=np.float32)
preds = NeuralNet.predict(self,X)
preds = np.argmax(preds,axis=1)
preds = self.label_encoder.inverse_transform(preds)
return preds
示例7: __init__
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict [as 别名]
class network:
"""
a base class for a neural network
"""
name = 'baseclass'
network = []
# this variable is read after each epoch
again = True
def __init__(self):
"""
set up a network
"""
self.network = NeuralNet(layers=[])
def fit(self, X, y):
"""
use the training set to get a model
"""
# handle the interrupt signal gracefully
# (by stopping after the current epoch)
for instance in self.network.on_epoch_finished:
if isinstance(instance, checkAgain):
signal.signal(signal.SIGINT, self.handle_break)
break
print('\nusing network {}\n'.format(self.name))
return self.network.fit(X,y)
def predict(self, X):
"""
predict the targets after the network is fitted
"""
return self.network.predict(X)
def handle_break(self, signum, frame):
"""
this function handles the siginterrupt by setting the variable 'again'
to false
"""
if self.again:
# first signal - soft stop
print(
"\ninterrupt signal received. Stopping after the current epoch")
self.again = False
else:
# second signal - break immediately
print("\nsecond interrupt signal received. Goodbye")
sys.exit(1)
示例8: RegressionNN
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict [as 别名]
class RegressionNN(RegressionBase.RegressionBase):
def __init__(self, isTrain, isNN):
super(RegressionNN, self).__init__(isTrain, isNN)
# data preprocessing
#self.dataPreprocessing()
self.net1 = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
#('hidden2', layers.DenseLayer),
#('hidden3', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 13), # input dimension is 13
hidden_num_units=6, # number of units in hidden layer
#hidden2_num_units=8, # number of units in hidden layer
#hidden3_num_units=4, # number of units in hidden layer
output_nonlinearity=None, # output layer uses sigmoid function
output_num_units=1, # output dimension is 1
# obejctive function
objective_loss_function = lasagne.objectives.squared_error,
# optimization method:
update=lasagne.updates.nesterov_momentum,
update_learning_rate=0.002,
update_momentum=0.4,
# use 25% as validation
train_split=TrainSplit(eval_size=0.2),
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=100, # we want to train this many epochs
verbose=0,
)
def dataPreprocessing(self):
# due to the observation, standization does not help the optimization.
# So do not use it!
#self.Standardization()
pass
def training(self):
# train the NN model
self.net1.fit(self.X_train, self.y_train)
def predict(self):
# predict the test data
self.y_pred = self.net1.predict(self.X_test)
# print MSE
mse = mean_squared_error(self.y_pred, self.y_test)
print "MSE: {}".format(mse)
示例9: test_lasagne_functional_regression
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict [as 别名]
def test_lasagne_functional_regression(boston):
from nolearn.lasagne import NeuralNet
X, y = boston
layer1 = InputLayer(shape=(128, 13))
layer2 = DenseLayer(layer1, num_units=100)
output = DenseLayer(layer2, num_units=1, nonlinearity=identity)
nn = NeuralNet(
layers=output,
update_learning_rate=0.01,
update_momentum=0.1,
regression=True,
max_epochs=50,
)
nn.fit(X[:300], y[:300])
assert mean_absolute_error(nn.predict(X[300:]), y[300:]) < 3.0
assert r2_score(nn.predict(X[300:]), y[300:]) == nn.score(X[300:], y[300:])
示例10: test_lasagne_functional_mnist
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict [as 别名]
def test_lasagne_functional_mnist(mnist):
# Run a full example on the mnist dataset
from nolearn.lasagne import NeuralNet
X, y = mnist
X_train, y_train = X[:60000], y[:60000]
X_test, y_test = X[60000:], y[60000:]
epochs = []
def on_epoch_finished(nn, train_history):
epochs[:] = train_history
if len(epochs) > 1:
raise StopIteration()
nn = NeuralNet(
layers=[
('input', InputLayer),
('hidden1', DenseLayer),
('dropout1', DropoutLayer),
('hidden2', DenseLayer),
('dropout2', DropoutLayer),
('output', DenseLayer),
],
input_shape=(None, 784),
output_num_units=10,
output_nonlinearity=softmax,
more_params=dict(
hidden1_num_units=512,
hidden2_num_units=512,
),
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=5,
on_epoch_finished=on_epoch_finished,
)
nn.fit(X_train, y_train)
assert len(epochs) == 2
assert epochs[0]['valid_accuracy'] > 0.85
assert epochs[1]['valid_accuracy'] > epochs[0]['valid_accuracy']
assert sorted(epochs[0].keys()) == [
'epoch', 'train_loss', 'valid_accuracy', 'valid_loss',
]
y_pred = nn.predict(X_test)
assert accuracy_score(y_pred, y_test) > 0.85
示例11: nnet
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict [as 别名]
def nnet(pipe):
pipe.features = pipe.features.astype(np.float32)
pipe.labels = pipe.labels.astype(np.int32)
pipe.features = StandardScaler().fit_transform(pipe.features)
X_train, X_test, y_train, y_test = train_test_split(pipe.features, pipe.labels)
nnet = NeuralNet(
# Specify the layers
layers=[('input', layers.InputLayer),
('hidden1', layers.DenseLayer),
('hidden2', layers.DenseLayer),
('hidden3', layers.DenseLayer),
('output', layers.DenseLayer)],
# Input Layer
input_shape=(None, pipe.features.shape[1]),
# Hidden Layer 1
hidden1_num_units=512,
hidden1_nonlinearity=rectify,
# Hidden Layer 2
hidden2_num_units=512,
hidden2_nonlinearity=rectify,
# # Hidden Layer 3
hidden3_num_units=512,
hidden3_nonlinearity=rectify,
# Output Layer
output_num_units=2,
output_nonlinearity=softmax,
# Optimization
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.3,
max_epochs=30,
# Others,
regression=False,
verbose=1,
)
nnet.fit(X_train, y_train)
y_predict = nnet.predict(X_test)
print "precision for nnet:", precision_score(y_test, y_predict)
print "recall for nnet:", recall_score(y_test, y_predict)
print "f1 for nnet:", f1_score(y_test, y_predict, average='weighted')
pickle.dump( nnet, open( "model.pkl", "wb" ), protocol = cPickle.HIGHEST_PROTOCOL)
示例12: network
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict [as 别名]
class network(object):
def __init__(self,X_train, Y_train):
#self.__hidden=0
self.__hidden=int(math.ceil((2*(X_train.shape[1]+ 1))/3))
self.net= NeuralNet(
layers=[
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
('output', layers.DenseLayer)
],
input_shape=( None, X_train.shape[1] ),
hidden_num_units=self.__hidden,
#hidden_nonlinearity=nonlinearities.tanh,
output_nonlinearity=None,
batch_iterator_train=BatchIterator(batch_size=256),
output_num_units=1,
on_epoch_finished=[EarlyStopping(patience=50)],
update=momentum,
update_learning_rate=theano.shared(np.float32(0.03)),
update_momentum=theano.shared(np.float32(0.8)),
regression=True,
max_epochs=1000,
verbose=1,
)
self.net.fit(X_train,Y_train)
def predict(self,X):
return self.net.predict(X)
def showMetrics(self):
train_loss = np.array([i["train_loss"] for i in self.net.train_history_])
valid_loss = np.array([i["valid_loss"] for i in self.net.train_history_])
pyplot.plot(train_loss, linewidth=3, label="training")
pyplot.plot(valid_loss, linewidth=3, label="validation")
pyplot.grid()
pyplot.legend()
pyplot.xlabel("epoch")
pyplot.ylabel("loss")
# pyplot.ylim(1e-3, 1e-2)
pyplot.yscale("log")
pyplot.show()
def saveNet(self,fname):
self.net.save_params_to(fname)
def loadNet(self,fname):
self.net.load_params_from(fname)
示例13: main
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict [as 别名]
def main():
xtrain, ytrain, xval, yval, xtest, ytest = loaddata()
# <codecell>
conv_filters = 32
deconv_filters = 32
filter_sizes = 7
epochs = 20
encode_size = 40
ae = NeuralNet(
layers=[
('input', layers.InputLayer),
('conv', layers.Conv2DLayer),
('pool', layers.MaxPool2DLayer),
('flatten', ReshapeLayer), # output_dense
('encode_layer', layers.DenseLayer),
('hidden', layers.DenseLayer), # output_dense
('unflatten', ReshapeLayer),
('unpool', Unpool2DLayer),
('deconv', layers.Conv2DLayer),
('output_layer', ReshapeLayer),
],
input_shape=(None, 1, 80, 80),
conv_num_filters=conv_filters,
conv_filter_size=(filter_sizes, filter_sizes),
conv_nonlinearity=None,
pool_pool_size=(2, 2),
flatten_shape=(([0], -1)), # not sure if necessary?
encode_layer_num_units=encode_size,
hidden_num_units=deconv_filters * (28 + filter_sizes - 1) ** 2 / 4,
unflatten_shape=(([0], deconv_filters, (28 + filter_sizes - 1) / 2, (28 + filter_sizes - 1) / 2 )),
unpool_ds=(2, 2),
deconv_num_filters=1,
deconv_filter_size=(filter_sizes, filter_sizes),
# deconv_border_mode="valid",
deconv_nonlinearity=None,
output_layer_shape=(([0],-1)),
update_learning_rate=0.01,
update_momentum=0.975,
batch_iterator_train=FlipBatchIterator(batch_size=128),
regression=True,
max_epochs=epochs,
verbose=1,
)
ae.fit(xtrain, ytrain)
X_train_pred = ae.predict(xtrain).reshape(-1, 80, 80)
示例14: regressNN
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict [as 别名]
def regressNN(X,y):
layers_all = [('input',InputLayer),
('dense',DenseLayer),
('output',DenseLayer)]
np.random.shuffle(X)
print(X.shape,y.shape)
#net.fit(X,y)
folds=3
skf = KFold( X.shape[0], n_folds=folds)
for train_index,test_index in skf:
net = NeuralNet(layers = layers_all,
input_shape = (None,X.shape[1]),
dense_num_units=2,
dense_nonlinearity=None,
regression=True,
update_momentum=0.9,
update_learning_rate=0.001,
output_nonlinearity=None,
output_num_units=1,
max_epochs=100)
Xtrain,Xtest = X[train_index], X[test_index]
ytrain,ytest = y[train_index], y[test_index]
Xtrain = np.array(Xtrain,dtype='float64')
Xtest = np.array(Xtest,dtype='float64')
#Xtrain[np.isinf(Xtrain)] = 0
net.fit(Xtrain,ytrain)
error=0
errorList =[]
predictions= []
for i in range(0,Xtest.shape[0]):
a= np.transpose(Xtest[i,:].reshape(Xtest[i,:].shape[0],1))
pr = net.predict(a)
temp_err=np.absolute(pr-ytest[i])*60
errorList.append(temp_err)
predictions.append(pr)
error += temp_err
print('Average error in minutes: {0}'.format(error/Xtest.shape[0]))
print('Max/min/median error: {0} , {1} , {2}'.format(max(errorList),min(errorList),np.median(errorList)))
del errorList[:]
del predictions[:]
示例15: lasagne_model
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict [as 别名]
def lasagne_model(train, y_train, test):
layers = [('input', InputLayer),
('dense0', DenseLayer),
('dropout0', DropoutLayer),
('dense1', DenseLayer),
('dropout1', DropoutLayer),
('dense2', DenseLayer),
('dropout2', DropoutLayer),
('output', DenseLayer)]
num_features = len(train[0])
num_classes = 1
model = NeuralNet(layers=layers,
input_shape=(None, num_features),
objective_loss_function=squared_error,
dense0_num_units=6,
dropout0_p=0.4, #0.1,
dense1_num_units=4,
dropout1_p=0.4, #0.1,
dense2_num_units=2,
dropout2_p=0.4, #0.1,
output_num_units=num_classes,
output_nonlinearity=tanh,
regression=True,
update=nesterov_momentum, #adagrad,
update_momentum=0.9,
update_learning_rate=0.004,
eval_size=0.2,
verbose=1,
max_epochs=5) #15)
x_train = np.array(train).astype(np.float32)
x_test = np.array(test).astype(np.float32)
model.fit(x_train, y_train)
pred_val = model.predict(x_test)
print pred_val.shape
test_probs = np.array(pred_val).reshape(len(pred_val),)
print test_probs.shape
indices = test_probs < 0
test_probs[indices] = 0
return test_probs