本文整理汇总了Python中sknn.mlp.Classifier类的典型用法代码示例。如果您正苦于以下问题:Python Classifier类的具体用法?Python Classifier怎么用?Python Classifier使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Classifier类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mlp
def mlp(number_layers, number_neurons_1, number_neurons_2, number_neurons_3, number_neurons_4, dropout_rate):
layers = []
number_neurons = []
number_neurons.append(number_neurons_1)
number_neurons.append(number_neurons_2)
number_neurons.append(number_neurons_3)
number_neurons.append(number_neurons_4)
for i in np.arange(number_layers):
layers.append(Layer("Sigmoid", units=number_neurons[i], dropout = dropout_rate))
layers.append(Layer("Softmax", units=2))
scores = []
for i in np.arange(n_validations):
X_train, X_test, Y_train, Y_test = sklearn.cross_validation.train_test_split(X,Y, test_size=0.3, random_state=1)
predictor = Classifier(
layers=layers,
learning_rate=0.001,
n_iter=25)
predictor.fit(X_train, Y_train)
scores.append(metrics.accuracy_score(Y_test, predictor.predict(X_test)))
return -median(scores)
示例2: main
def main():
vals, actions = matrixFromCSV("C:\\Users\\Chrisd\\Documents\\College\\Spring 2016\\379K\\Kaggle\\Kaggle\\train.csv")
X_train, X_test, y_train, y_test = train_test_split(vals, actions, test_size=0.33, random_state=22)
totalTest, totalAns = matrixFromCSV("C:\\Users\\Chrisd\\Documents\\College\\Spring 2016\\379K\\Kaggle\\Kaggle\\test.csv")
nn = Classifier(
layers=[
Layer("Softmax", units=10),
Layer("Linear", units=10),
Layer("Sigmoid")],
learning_rate=0.001,
n_iter=20)
nn.fit(X_train,y_train)
pickle.dump(nn, open('nn.pkl', 'wb'))
'''rs = RandomizedSearchCV(nn, param_distributions={
'learning_rate': stats.uniform(0.001, 0.05),
'hidden0__units': stats.randint(4, 100),
'hidden1__units': stats.randint(4, 100),
'hidden1__type': ["Linear","Rectifier", "Sigmoid", "Tanh"]})
rs.fit(X_train, y_train)
pickle.dump(rs, open('rs.pkl', 'wb'))
rs = pickle.load(open('rs.pkl', 'rb'))'''
#print(X_test.shape)
#X_test.reshape(9,1)'''
nn = pickle.load(open('nn.pkl', 'rb'))
answer = nn.predict(X_test)
writeToCSV(answer)
print(getPercent(answer,y_test))
示例3: train
def train(X, ty):
nn = Classifier(
layers=[Layer("Sigmoid", units=5000), Layer("Sigmoid", units=5)], learning_rate=0.001, n_iter=100, verbose=1
)
nn.fit(X, ty)
print "Train Done!"
return nn
示例4: autoEncoderOptimization
def autoEncoderOptimization(data):
rbm = ae.AutoEncoder(
layers=[
ae.Layer("Tanh", units=300),
ae.Layer("Sigmoid", units=200),
ae.Layer("Tanh", units=100)
],
learning_rate=0.002,
n_iter=10
)
rbm.fit(data["train"])
model = Classifier(
layers=[
Layer("Tanh", units=300),
Layer("Sigmoid", units=200),
Layer("Tanh", units=100),
Layer("Rectifier", units=100),
Layer("Rectifier", units=50),
Layer("Softmax")
],
)
rbm.transfer(model)
model.fit(data["train"], data["label"])
prediction = model.predict(data["train"])
print accuracy_score(data["label"], prediction)
示例5: train_neural_network
def train_neural_network(samples, nn=None, learning_rate=0.001, n_iter=25): #pylint:disable=invalid-name
"""Trains a neural network using the given sample data.
Args:
samples: Tuple containing (sample inputs, sample outputs).
nn: Neural network that should be trained. If this is none, a new NN
will be created.
learning_rate: Neural network learning rate.
n_iter: Number of training iterations to use.
Returns:
The trained neural network.
"""
sample_inputs, sample_outputs = check_samples(samples)
# Create a new classifier if necessary.
if nn is None:
n_features = len(sample_inputs[0])
nn = Classifier(
layers=[
Layer("Maxout", units=n_features, pieces=2),
Layer("Softmax")],
learning_rate=learning_rate,
n_iter=n_iter)
# Train the classifier.
nn.fit(sample_inputs, sample_outputs)
return nn
示例6: batch_train
def batch_train(train,val,model_path):
trainX,trainY = train
valX,valY = val
nn = Classifier(layers = [
Convolution('Rectifier',
channels=100,
kernel_shape=(5,WORD_DIM),
border_mode='valid'
#pool_shape=(3,1),
#pool_type='max'
),
Layer('Rectifier',units=900,dropout=0.5),
Layer('Softmax')],
batch_size = 50,
learning_rate = 0.02,
normalize='dropout',
verbose = True)
nn.n_iter = 100
print 'Net created...'
try:
nn.fit(trainX,trainY)
except KeyboardInterrupt:
pickle.dump(nn,open(model_path,'wb'))
pickle.dump(nn,open(model_path,'wb'))
print 'Done, final model saved'
print 'Testing'
#Accuracy on the validation set
print 'Validation accuracy:',batch_test(model_path,val)
示例7: test_VerboseClassifier
def test_VerboseClassifier(self):
nn = MLPC(layers=[L("Softmax")], verbose=1, n_iter=1)
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,1), dtype=numpy.int32)
nn.fit(a_in, a_out)
assert_in("Epoch Training Error Validation Error Time", self.buf.getvalue())
assert_in(" 1 ", self.buf.getvalue())
assert_in(" N/A ", self.buf.getvalue())
示例8: trainMLP
def trainMLP(trainX, trainY, validationX, validationY, activation='Tanh', algorithm='adam',
hidden_layer_size=2048, alpha=0.001):
print('Learning...')
trainX, trainY = shuffle(trainX, trainY)
validationX, validationY = shuffle(validationX, validationY)
mlp = Classifier(
layers=[
Layer(activation, units=hidden_layer_size, dropout=0.1),
Layer("Softmax", units=len(np.unique(trainY)), dropout=0.2)
], learning_rule=algorithm,
learning_rate=0.0005,
learning_momentum=0.9,
batch_size=256,
n_stable=10,
n_iter=200,
regularize="L2",
weight_decay=alpha,
loss_type="mcc", #?
valid_set=(validationX, validationY),
verbose=True)
print(mlp)
mlp.fit(trainX, trainY)
return mlp
示例9: wrapper_for_backprop_neural_network_code
def wrapper_for_backprop_neural_network_code(train_x, train_y, test_x, test_y):
score = None
nn = Classifier(
layers=[Layer('Sigmoid', units=5),
Layer('Softmax')], learning_rate=.001, n_iter=25)
nn.fit(train_x, train_y)
predicted = nn.predict(test_x)
score = accuracy_score(predicted, test_y)
return score
示例10: fit_network
def fit_network():
x,y = datasplit.data()
x_normalized = normalize(x,norm='l2')
nn = Classifier(layers=[Layer("Softmax" , units=1000),Layer("Softmax" , units=62)],learning_rate=0.02,n_iter=1)
le= LabelEncoder()
le.fit(y)
y = le.transform(y)
nn.fit(x_normalized , y)
return nn
示例11: _ann_n_iter
def _ann_n_iter(data, data_test, target, target_test, n_units):
nn = Classifier(
layers=[
Layer("Sigmoid", units=n_units),
Layer("Softmax")],
n_iter=4000)
nn.fit(data, target)
test_score = nn.score(data_test, target_test)
print n_units, test_score
示例12: _ann_n_iter
def _ann_n_iter(data, data_test, target, target_test, n_iter):
nn = Classifier(
layers=[
Layer("Sigmoid", units=100),
Layer("Softmax")],
n_iter=n_iter)
train_score = np.mean(cross_validation.cross_val_score(nn, data, target, cv=10))
nn.fit(data, target)
test_score = nn.score(data_test, target_test)
print n_iter, train_score, test_score
示例13: CNN
def CNN(X_train, y_train, X_test):
nn = Classifier(
layers=[
Convolution("Rectifier", channels=20, kernel_shape=(5,5), dropout=0.25),
Layer("Tanh", units=300),
Layer("Tanh", units=100),
Layer("Softmax")], learning_rate=0.02, n_iter=10)
nn.fit(X_train, y_train)
print('\nTRAIN SCORE', nn.score(X_train, y_train))
return list(nn.predict(X_test))
示例14: train_model
def train_model(values,labels):
model = Classifier(
layers=[
Convolution("Rectifier", channels=8, kernel_shape=(3,3)),
Layer("Softmax")
],
learning_rate=0.02,
n_iter=5)
model.fit(values, labels)
return model
示例15: covnetTrain
def covnetTrain(train_bmi , train_labels , ite =10 , kernel =3 ,learn_rate =0.02, channel = 8):
nn = Classifier(
layers = [
Convolution("Rectifier", channels=channel, kernel_shape=(kernel,kernel)),
Layer("Softmax")],
learning_rate=learn_rate,
n_iter=ite
)
neuralnet = nn.fit(train_bmi , train_labels)
return neuralnet