本文整理汇总了Python中nolearn.lasagne.NeuralNet.predict_proba方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet.predict_proba方法的具体用法?Python NeuralNet.predict_proba怎么用?Python NeuralNet.predict_proba使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nolearn.lasagne.NeuralNet
的用法示例。
在下文中一共展示了NeuralNet.predict_proba方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fit
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict_proba [as 别名]
def fit(xTrain, yTrain, dense0_num=800, dropout_p=0.5, dense1_num=500, update_learning_rate=0.01,
update_momentum=0.9, test_ratio=0.2, max_epochs=20):
#update_momentum=0.9, test_ratio=0.2, max_epochs=20, train_fname='train.csv'):
#xTrain, yTrain, encoder, scaler = load_train_data(train_fname)
#xTest, ids = load_test_data('test.csv', scaler)
num_features = len(xTrain[0,:])
num_classes = 9
print num_features
layers0 = [('input', InputLayer),
('dense0', DenseLayer),
('dropout', DropoutLayer),
('dense1', DenseLayer),
('output', DenseLayer)]
clf = NeuralNet(layers=layers0,
input_shape=(None, num_features),
dense0_num_units=dense0_num,
dropout_p=dropout_p,
dense1_num_units=dense1_num,
output_num_units=num_classes,
output_nonlinearity=softmax,
update=nesterov_momentum,
update_learning_rate=update_learning_rate,
update_momentum=update_momentum,
eval_size=test_ratio,
verbose=1,
max_epochs=max_epochs)
clf.fit(xTrain, yTrain)
ll_train = metrics.log_loss(yTrain, clf.predict_proba(xTrain))
print ll_train
return clf
示例2: train
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict_proba [as 别名]
def train():
weather = load_weather()
training = load_training()
X = assemble_X(training, weather)
print len(X[0])
mean, std = normalize(X)
y = assemble_y(training)
input_size = len(X[0])
learning_rate = theano.shared(np.float32(0.1))
net = NeuralNet(
layers=[
('input', InputLayer),
('hidden1', DenseLayer),
('dropout1', DropoutLayer),
('hidden2', DenseLayer),
('dropout2', DropoutLayer),
('output', DenseLayer),
],
# layer parameters:
input_shape=(None, input_size),
hidden1_num_units=325,
dropout1_p=0.4,
hidden2_num_units=325,
dropout2_p=0.4,
output_nonlinearity=sigmoid,
output_num_units=1,
# optimization method:
update=nesterov_momentum,
update_learning_rate=learning_rate,
update_momentum=0.9,
# Decay the learning rate
on_epoch_finished=[
AdjustVariable(learning_rate, target=0, half_life=1),
],
# This is silly, but we don't want a stratified K-Fold here
# To compensate we need to pass in the y_tensor_type and the loss.
regression=True,
y_tensor_type = T.imatrix,
objective_loss_function = binary_crossentropy,
max_epochs=85,
eval_size=0.1,
verbose=1,
)
X, y = shuffle(X, y, random_state=123)
net.fit(X, y)
_, X_valid, _, y_valid = net.train_test_split(X, y, net.eval_size)
probas = net.predict_proba(X_valid)[:,0]
print("ROC score", metrics.roc_auc_score(y_valid, probas))
return net, mean, std
示例3: DeepCls
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict_proba [as 别名]
class DeepCls(object):
def __init__(self,n_cats=2,input_size=1040,k=1):
self.net1 = NeuralNet(
layers=[
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
('output', layers.DenseLayer),
],
input_shape=(None, input_size),
hidden_num_units=300,
output_nonlinearity=softmax,
output_num_units=n_cats,
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=False,
max_epochs=2500,
verbose=1,
)
self.k=k
def __call__(self,hog_desc):
hog_desc=np.expand_dims(hog_desc,0)
prob=self.net1.predict_proba(hog_desc)
prob=prob.flatten()
return prob[self.k]
示例4: OptNN
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict_proba [as 别名]
def OptNN(d1, h1, d2, h2, d3, start, stop, max_epochs):
params2 = params.copy()
on_epoch = [AdjustVariable('update_learning_rate',
start = start, stop = stop),
AdjustVariable('update_momentum', start = .9, stop = .999)]
params2['dropout1_p'] = d1
params2['dropout2_p'] = d2
params2['dropout3_p'] = d3
params2['dropout4_p'] = d4
params2['hidden1_num_units'] = h1
params2['hidden2_num_units'] = h2
params2['hidden3_num_units'] = h3
params2['max_epochs'] = max_epochs
params2['on_epoch_finished'] = on_epoch
kcv = StratifiedKFold(Y, 5, shuffle = True)
res = np.empty((len(Y), len(np.unique(Y)))); i = 1
CVScores = []
for train_idx, valid_idx in kcv:
logger.info("Running fold %d...", i); i += 1
net = NeuralNet(**params2)
net.set_params(eval_size = None)
net.fit(X[train_idx], Y[train_idx])
res[valid_idx, :] = net.predict_proba(X[valid_idx])
CVScores.append(log_loss(Y[valid_idx], res[valid_idx]))
return -np.mean(CVScores)
示例5: OptNN2
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict_proba [as 别名]
def OptNN2(d0, d1,d2, d3, h1, h2, h3, me, ls, le):
h1, h2, h3 = int(h1), int(h2), int(h3);
me = int(me)
params = dict(
layers = [
('input', layers.InputLayer),
('dropout1', layers.DropoutLayer),
('hidden1', layers.DenseLayer),
('dropout2', layers.DropoutLayer),
('hidden2', layers.DenseLayer),
('dropout3', layers.DropoutLayer),
('hidden3', layers.DenseLayer),
('dropout4', layers.DropoutLayer),
('output', layers.DenseLayer),
],
input_shape = (None, 93),
dropout1_p = d0,
hidden1_num_units = h1,
dropout2_p = d1,
hidden2_num_units = h2,
dropout3_p = d2,
hidden3_num_units = h3,
dropout4_p = d3,
output_nonlinearity = softmax,
output_num_units = 9,
update = nesterov_momentum,
update_learning_rate = theano.shared(float32(l_start)),
update_momentum = theano.shared(float32(m_start)),
regression = False,
on_epoch_finished = [
AdjustVariable('update_learning_rate', start = ls,
stop = le, is_log = True),
AdjustVariable('update_momentum', start = m_start,
stop = m_stop, is_log = False),
],
max_epochs = me,
verbose = 1,
)
CVScores = []
res = np.empty((len(Y), len(np.unique(Y))))
kcv = StratifiedKFold(Y, 5, shuffle = True); i = 1
for train_idx, valid_idx in kcv:
logger.info("Running fold %d...", i); i += 1
net = NeuralNet(**params)
net.set_params(eval_size = None)
net.fit(X[train_idx], Y[train_idx])
res[valid_idx, :] = net.predict_proba(X[valid_idx])
CVScores.append(log_loss(Y[valid_idx], res[valid_idx]))
return -np.mean(CVScores)
示例6: calc_prob_bag
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict_proba [as 别名]
def calc_prob_bag(i, best_max_epochs, NNargs, X_all, y_all, X_test):
np.random.seed(111*(i+1)) # diffs random seed to diffs bagging
print('\n - Bag: %i ' % (i+1))
if best_max_epochs == 0:
print(' First fit to get optimal num of epochs...')
NNargs["max_epochs"] = 1000
NNargs["eval_size"] = 0.05 # just a small test set to derive optimal numepochs
NNargs["on_epoch_finished"][-1] = EarlyStopping(patience=25) # more patience
clf_bag = NeuralNet(**NNargs)
clf_bag.fit(X_all, y_all)
global GLOBrealnumepochs
best_max_epochs = GLOBrealnumepochs
print(' we will refit now with max epochs = %i' % best_max_epochs)
NNargs["max_epochs"] = best_max_epochs
NNargs["eval_size"] = 0.0001
NNargs["on_epoch_finished"][-1] = EarlyStopping(patience=1000) # kind of a infinite patience to let max epochs rule
clf_bag = NeuralNet(**NNargs)
clf_bag.fit(X_all, y_all)
probs_bags = clf_bag.predict_proba(X_test)
return probs_bags
示例7: train_dnn
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict_proba [as 别名]
def train_dnn(train, train_y, test):
num_features = train.shape[1]
num_classes = len(list(set(train_y)))
layers0 = [('input', InputLayer),
('dropout0', DropoutLayer),
('dense0', DenseLayer),
('dropout1', DropoutLayer),
('dense1', DenseLayer),
('dropout2', DropoutLayer),
('output', DenseLayer)]
net0 = NeuralNet(layers=layers0,
input_shape=(None, num_features),
dropout0_p = 0.1, #theano.shared(float32(0.1)),
dense0_num_units= 5000,
dropout1_p= 0.3, #theano.shared(float32(0.5)),
dense1_num_units = 10000,
dropout2_p = 0.5,
output_num_units=num_classes,
output_nonlinearity=softmax,
update=nesterov_momentum,
#update_learning_rate=0.003,
#update_momentum=0.9,
update_learning_rate = theano.shared(float32(0.001)),
update_momentum=theano.shared(float32(0.9)),
objective_loss_function = categorical_crossentropy,
train_split = TrainSplit(0.2),
verbose=1,
max_epochs=150,
on_epoch_finished=[
EarlyStopping(patience = 20),
AdjustVariable('update_learning_rate', start=0.001, stop=0.0001),
AdjustVariable('update_momentum', start=0.9, stop=0.999),
]
)
net0.fit(train, train_y)
print('Prediction Complete')
pred1 = net0.predict_proba(test)
return pred1
示例8: train
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict_proba [as 别名]
def train(features, features_sub,label):
layers0 = [('input', InputLayer),
('dropout0', DropoutLayer),
('dense0', DenseLayer),
('dropout1', DropoutLayer),
('dense1', DenseLayer),
('dropout2', DropoutLayer),
('output', DenseLayer)]
net0 = NeuralNet(layers=layers0,
input_shape=(None, features.shape[1]),
dropout0_p = 0.18, #theano.shared(float32(0.1)),
dense0_num_units= 2000,
dropout1_p= 0.6, #theano.shared(float32(0.4)),
dense1_num_units= 4000,
dropout2_p = 0.9, #theano.shared(float32(0.7)),
output_num_units= len(set(label)),
output_nonlinearity=softmax,
update=nesterov_momentum,
update_momentum = 0.95, #theano.shared(float32(0.9)),
update_learning_rate = 0.002, #theano.shared(float32(0.01)),
#update_momentum=theano.shared(float32(0.9)),
train_split = TrainSplit(0.2),
verbose=1,
max_epochs = 40
# on_epoch_finished=[
# AdjustVariable('update_learning_rate', start=0.01, stop=0.005),
# #AdjustVariable('update_momentum', start=0.9, stop=0.999),
# #AdjustVariable('dropout0_p', start = 0.1, stop = 0.2),
# # AdjustVariable('dropout1_p', start = 0.45, stop = 0.6),
# # AdjustVariable('dropout2_p', start = 0.8, stop = 0.9)
# ]
)
features = np.array(features.values, dtype = np.float32)
net0.fit(features, np.array(label, dtype = np.int32) )
print('Prediction Complete')
results = net0.predict_proba(np.array(features_sub.values, dtype = np.float32))
return
示例9: train_autoencoder
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict_proba [as 别名]
def train_autoencoder(train, train_y, test):
num_features = train.shape[1]
net = NeuralNet(
layers=[
('input', InputLayer),
('auto', AutoEncoder),
('output', DenseLayer),
],
input_shape=(None, 1, num_features),
auto_num_units = 1000,
auto_n_hidden = 10,
output_num_units=1000,
update_learning_rate=theano.shared(float32(0.03)),
update_momentum=theano.shared(float32(0.9)),
output_nonlinearity=nonlinearities.softmax,
regression=True,
max_epochs=3,
verbose=1,
)
net.fit(train, train_y)
with open('net.pickle', 'wb') as f:
pickle.dump(net, f, -1)
pred_auto = net.predict_proba(test)
return pred_auto
示例10: nn_level2
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict_proba [as 别名]
def nn_level2(train_x, train_y, test_x):
'''
neural net proba predict for level 2
'''
num_classes = len(np.unique(train_y))
num_features = train_x.shape[1]
layers0 = [('input', InputLayer),
('dropoutf', DropoutLayer),
('dense0', DenseLayer),
('dropout', DropoutLayer),
('dense1', DenseLayer),
('dropout2', DropoutLayer),
('output', DenseLayer)]
net0 = NeuralNet(layers=layers0,
input_shape=(None, num_features),
dropoutf_p=0.15,
dense0_num_units=1000,
dropout_p=0.25,
dense1_num_units=500,
dropout2_p=0.25,
output_num_units=num_classes,
output_nonlinearity=softmax,
update=adagrad,
update_learning_rate=theano.shared(np.float32(0.01)),
# on_epoch_finished=[AdjustVariable('update_learning_rate', start=0.02, stop=0.016)],
max_epochs=18,
eval_size=0.2,
verbose=1,
)
net0.fit(train_x, train_y)
pred = net0.predict_proba(test_x).astype(np.float32)
return pred
示例11: classify
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict_proba [as 别名]
def classify(train_file, test_file, output_file):
test = pd.read_csv(test_file)
train = pd.read_csv(train_file)
test['age'] = test['age'] / train['age'].max()
train['age'] = train['age'] / train['age'].max()
train.replace(to_replace={'Transfer': 0, 'Return_to_owner': 1, 'Euthanasia': 2, 'Adoption': 3, 'Died': 4}, inplace=True)
test.replace(to_replace={'Transfer': 0, 'Return_to_owner': 1, 'Euthanasia': 2, 'Adoption': 3, 'Died': 4}, inplace=True)
train_X = train.ix[:, features].as_matrix().astype('float32')
train_Y = train.ix[:, 'outcome'].as_matrix().astype('int32')
test_X = test.ix[:, features].as_matrix().astype('float32')
model = NeuralNet(
layers=[
('input', layers.InputLayer),
('hidden1', layers.DenseLayer),
('hidden2', layers.DenseLayer),
('output', layers.DenseLayer),
],
input_shape=(None, len(features)),
hidden1_num_units=100, hidden1_nonlinearity=sigmoid,
hidden2_num_units=100, hidden2_nonlinearity=rectify,
max_epochs=100,
output_nonlinearity=softmax,
output_num_units=5,
update_learning_rate=0.01,
).fit(train_X, train_Y)
all_proba = model.predict_proba(test_X).reshape(test_X.shape[0], 5)
result = pd.DataFrame(
data=all_proba,
columns=['Transfer', 'Return_to_owner', 'Euthanasia', 'Adoption', 'Died'],
index=test['id']
)
result.to_csv(output_file, index_label="ID", float_format='%.5f')
示例12: AdjustVariable
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict_proba [as 别名]
AdjustVariable('update_momentum', start=0.9, stop=0.999),
],
regression=False, # flag to indicate we're dealing with regression problem
max_epochs=500, # we want to train this many epochs
verbose=1
)
clf.fit(x_train,y_train)
# if 1:
# y_pred = clf.predict_proba(x_test)
#
# filename = 'testdata_aug_d'
# savefile = open(filename+'.pkl', 'wb')
# cPickle.dump((x_test, y_pred, name1),savefile,-1)
# savefile.close()
if 1:
y_pred = clf.predict(x_test)
print "Accuracy:", zero_one_loss(y_test, y_pred)
print "Classification report:"
print classification_report(y_test, y_pred)
print 'Confusion matrix:'
print confusion_matrix(y_test,y_pred)
else:
x_test = np.asarray(x_test,dtype=np.float32)
ypred = clf.predict_proba(x_test)
y_str = ['Class_1','Class_2','Class_3','Class_4','Class_5','Class_6','Class_7','Class_8','Class_9']
kcsv.print_csv(ypred, name1, y_str,indexname='id')
示例13: sum
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict_proba [as 别名]
weights.append(res['x'])
# aucList.append(roc_auc_score(y.iloc[test_index],train_eval_probs))
print "Deep Learning - CNN"
print sum(netAccuracy)/10.
#%%
resultNet = net0.predict_proba(X_test)
result = gbm.predict(xgb.DMatrix(X_test))
result_lb = lb.predict_proba(X_test)
submit = pd.read_csv('/Users/weizhi/Desktop/kaggle walmart competetion/sample_submission.csv')
Id = submit['VisitNumber']
submit.iloc[:,1:] = resultNet
submit.iloc[:,0] = Id
submit.to_csv('/Users/weizhi/Desktop/kaggle walmart competetion/deeplearning.csv',index = False)
示例14: print
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict_proba [as 别名]
random_state=1)
print("\n\nTraining")
net = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
('output', layers.DenseLayer),
],
input_shape=(None, 6),
hidden_num_units=5,
output_nonlinearity=sigmoid,
output_num_units=2,
# optimization method:
update=sgd,
update_learning_rate=0.01,
#update_momentum=0.9,
regression=False,
max_epochs=50,
verbose=1,
)
net.fit(X, y)
pred = net.predict_proba(tX)[:,0]
print("\t", log_loss(ty, pred))
print("\t", roc_auc_score(ty, pred))
示例15: NeuralNet
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import predict_proba [as 别名]
("input", InputLayer),
("dense0", DenseLayer),
("dropout", DropoutLayer),
("dense1", DenseLayer),
("output", DenseLayer),
]
net0 = NeuralNet(
layers=layers0,
input_shape=(None, num_features),
dense0_num_units=100,
dropout_p=0.5,
dense1_num_units=100,
output_num_units=num_classes,
output_nonlinearity=softmax,
update=nesterov_momentum,
update_learning_rate=0.05,
update_momentum=0.9,
eval_size=0.2,
verbose=1,
max_epochs=20,
)
net0.fit(train_X, train_y)
y_prob = net0.predict_proba(check_X)
print ("LogLoss {score}".format(score=log_loss(check_y, y_prob)))