本文整理汇总了Python中sknn.mlp.Regressor类的典型用法代码示例。如果您正苦于以下问题:Python Regressor类的具体用法?Python Regressor怎么用?Python Regressor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Regressor类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: NeuralNet
def NeuralNet(train, test, features):
eta = 0.025
niter = 2000
regressor = Regressor(
layers=[Layer("Rectifier", units=100), Layer("Tanh", units=100), Layer("Sigmoid", units=100), Layer("Linear")],
learning_rate=eta,
learning_rule="momentum",
learning_momentum=0.9,
batch_size=100,
valid_size=0.01,
n_stable=100,
n_iter=niter,
verbose=True,
)
print regressor.__class__.__name__
start = time.time()
regressor.fit(np.array(train[list(features)]), train[goal])
print " -> Training time:", time.time() - start
if not os.path.exists("result/"):
os.makedirs("result/")
# TODO: fix this shit
predictions = regressor.predict(np.array(test[features]))
try: # try to flatten a list that might be flattenable.
predictions = list(itertools.chain.from_iterable(predictions))
except:
pass
csvfile = "result/dat-nnet-eta%s-niter%s.csv" % (str(eta), str(niter))
with open(csvfile, "w") as output:
writer = csv.writer(output, lineterminator="\n")
writer.writerow([myid, goal])
for i in range(0, len(predictions)):
writer.writerow([i + 1, predictions[i]])
示例2: gamma
def gamma():
value_map = {'warm': 1.0, 'neutral': 0.5, 'cold': 0.0}
X = data["x"][:, [0, 1, 2, 5, 6]]
X = np.abs(X)
maxX = np.amax(X, axis=0)
minX = np.amax(X, axis=0)
X = (X - minX) / maxX
Y = data["y"][:, 1]
Y = np.asarray([value_map[y] for y in Y])
split_data = cross_validation.train_test_split(X, Y, test_size=0.2)
X_train = split_data[0]
X_test = split_data[1]
Y_train = split_data[2]
Y_test = split_data[3]
nn = Regressor(
layers=[
Layer("Rectifier", units=3),
Layer("Linear")],
learning_rate=1e-3,
n_iter=100)
nn.fit(X_train, Y_train)
print 'inosity accuracy'
prediction = nn.predict(X_test)
prediction = [closest(y[0]) for y in prediction]
Y_test = [closest(y) for y in Y_test]
print metrics.accuracy_score(prediction, Y_test)
示例3: ClassificationTools
class ClassificationTools():
def __init__(self, inputVector=[], outputVector=[], filepath=''):
if filepath == '':
self.inputVector = numpy.asarray(inputVector)
self.outputVector = numpy.asarray(outputVector)
self.model = None
else:
self.model = pickle.load(file(filepath, 'r'))
def setVectors(self, inputVector, outputVector):
self.inputVector = numpy.asarray(inputVector)
self.outputVector = numpy.asarray(outputVector)
def trainMultilayerPerceptron(self, hlunits=10000, learningRate=0.01, iters=1000):
# trains a simple MLP with a single hidden layer
self.model = Regressor(
layers=[
Layer("Rectifier", units=hlunits),
Layer("Linear")],
learning_rate=learningRate,
n_iter=iters)
self.model.fit(self.inputVector, self.outputVector)
def predict(self, toPredict):
prediction = self.model.predict(numpy.asarray(toPredict))
return prediction # this will be a 1D numpy array of floats
def trainDeepNetwork(self):
# trains a deep network based a multi layer autoencoder
# which is then fine tuned using an MLP
pass
def serializeModel(self, filepath):
pickle.dump(self.model, file(filepath, 'w'))
示例4: test_VerboseRegressor
def test_VerboseRegressor(self):
nn = MLPR(layers=[L("Linear")], verbose=1, n_iter=1)
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
nn.fit(a_in, a_out)
assert_in("Epoch Training Error Validation Error Time", self.buf.getvalue())
assert_in(" 1 ", self.buf.getvalue())
assert_in(" N/A ", self.buf.getvalue())
示例5: neural_net
def neural_net(features,target,test_size_percent=0.2,cv_split=3,n_iter=100,learning_rate=0.01):
'''Features -> Pandas Dataframe with attributes as columns
target -> Pandas Dataframe with target column for prediction
Test_size_percent -> Percentage of data point to be used for testing'''
scale=preprocessing.MinMaxScaler()
X_array = scale.fit_transform(features)
y_array = scale.fit_transform(target)
mlp = Regressor(layers=[Layer("Rectifier",units=5), # Hidden Layer1
Layer("Rectifier",units=3) # Hidden Layer2
,Layer("Linear")], # Output Layer
n_iter = n_iter, learning_rate=0.01)
X_train, X_test, y_train, y_test = train_test_split(X_array, y_array.T.squeeze(), test_size=test_size_percent, random_state=4)
mlp.fit(X_train,y_train)
test_prediction = mlp.predict(X_test)
tscv = TimeSeriesSplit(cv_split)
training_score = cross_val_score(mlp,X_train,y_train,cv=tscv.n_splits)
testing_score = cross_val_score(mlp,X_test,y_test,cv=tscv.n_splits)
print"Cross-val Training score:", training_score.mean()
# print"Cross-val Testing score:", testing_score.mean()
training_predictions = cross_val_predict(mlp,X_train,y_train,cv=tscv.n_splits)
testing_predictions = cross_val_predict(mlp,X_test,y_test,cv=tscv.n_splits)
training_accuracy = metrics.r2_score(y_train,training_predictions)
# test_accuracy_model = metrics.r2_score(y_test,test_prediction_model)
test_accuracy = metrics.r2_score(y_test,testing_predictions)
# print"Cross-val predicted accuracy:", training_accuracy
print"Test-predictions accuracy:",test_accuracy
plot_model(target,y_train,y_test,training_predictions,testing_predictions)
return mlp
开发者ID:SOLIMAN68,项目名称:Data-driven_Building_simulation_Polimi_EETBS,代码行数:32,代码来源:master_1_4_eachBuilding_allModels.py
示例6: CreateNetwork
def CreateNetwork(data, predicates):
# входная размерность
dim_in = len(predicates)
# выходная размерность
dim_out = len(data[0]) - 1
# конфигурация сети
neural_network = Regressor(
layers=[
Layer("Rectifier", units=50),
Layer("Linear")],
learning_rate=0.001,
n_iter=5000)
# формирование обучающей выборки
x_train = np.array([CalcPredicates(row[0], predicates) for row in data])
y_train = np.array([apply(float, row[1:]) for row in data])
# обучение
logging.info('Start training')
logging.info('\n'+str(x_train))
logging.info('\n'+str(y_train))
try:
neural_network.fit(x_train, y_train)
except KeyboardInterrupt:
logging.info('User break')
pass
logging.info('Network created successfully')
logging.info('score = '+str(neural_network.score(x_train, y_train)))
# сохранение обученной сети
pickle.dump(neural_network, open(datetime.datetime.now().isoformat()+'.pkl', 'wb'))
return neural_network
示例7: NeuralRegLearner
class NeuralRegLearner(object):
def __init__(self, verbose = False):
self.name = "Neural net Regression Learner"
self.network = Regressor( layers=[
Layer("Rectifier", units=100),
Layer("Linear")],
learning_rate=0.02,
n_iter=10)
def addEvidence(self,dataX,dataY):
"""
@summary: Add training data to learner
@param dataX: X values of data to add
@param dataY: the Y training values
"""
dataX = np.array(dataX)
dataY = np.array(dataY)
self.network.fit(dataX, dataY)
def query(self,points):
"""
@summary: Estimate a set of test points given the model we built.
@param points: should be a numpy array with each row corresponding to a specific query.
@returns the estimated values according to the saved model.
"""
return self.network.predict(points)
示例8: TestLinearNetwork
class TestLinearNetwork(unittest.TestCase):
def setUp(self):
self.nn = MLPR(layers=[L("Linear")], n_iter=1)
def test_LifeCycle(self):
del self.nn
def test_PredictNoOutputUnitsAssertion(self):
a_in = numpy.zeros((8,16))
assert_raises(AssertionError, self.nn.predict, a_in)
def test_AutoInitializeWithOutputUnits(self):
self.nn.layers[-1].units = 4
a_in = numpy.zeros((8,16))
self.nn.predict(a_in)
def test_FitAutoInitialize(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
self.nn.fit(a_in, a_out)
assert_true(self.nn.is_initialized)
def test_FitWrongSize(self):
a_in, a_out = numpy.zeros((7,16)), numpy.zeros((9,4))
assert_raises(AssertionError, self.nn.fit, a_in, a_out)
示例9: run_EqualityTest
def run_EqualityTest(self, copier, asserter):
for activation in ["Rectifier", "Sigmoid", "Maxout", "Tanh"]:
nn1 = MLPR(layers=[L(activation, units=16, pieces=2), L("Linear", units=1)], random_state=1234)
nn1._initialize(self.a_in, self.a_out)
nn2 = copier(nn1, activation)
asserter(numpy.all(nn1.predict(self.a_in) == nn2.predict(self.a_in)))
示例10: TestSerializedNetwork
class TestSerializedNetwork(TestLinearNetwork):
def setUp(self):
self.original = MLPR(layers=[L("Linear")])
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
self.original._initialize(a_in, a_out)
buf = io.BytesIO()
pickle.dump(self.original, buf)
buf.seek(0)
self.nn = pickle.load(buf)
def test_TypeOfWeightsArray(self):
for w, b in self.nn._mlp_to_array():
assert_equal(type(w), numpy.ndarray)
assert_equal(type(b), numpy.ndarray)
# Override base class test, you currently can't re-train a network that
# was serialized and deserialized.
def test_FitAutoInitialize(self): pass
def test_ResizeInputFrom4D(self): pass
def test_ResizeInputFrom3D(self): pass
def test_PredictNoOutputUnitsAssertion(self):
# Override base class test, this is not initialized but it
# should be able to predict without throwing assert.
assert_true(self.nn.is_initialized)
def test_PredictAlreadyInitialized(self):
a_in = numpy.zeros((8,16))
self.nn.predict(a_in)
示例11: TestDataAugmentation
class TestDataAugmentation(unittest.TestCase):
def setUp(self):
self.called = 0
self.value = 1.0
self.nn = MLPR(
layers=[L("Linear")],
n_iter=1,
batch_size=2,
mutator=self._mutate_fn)
def _mutate_fn(self, sample):
self.called += 1
sample[sample == 0.0] = self.value
def test_TestCalledOK(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
self.nn._fit(a_in, a_out)
assert_equals(a_in.shape[0], self.called)
def test_DataIsUsed(self):
self.value = float("nan")
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
assert_raises(RuntimeError, self.nn._fit, a_in, a_out)
示例12: make
def make(self, activation, seed=1234, train=False, **keywords):
nn = MLPR(layers=[L(activation, units=16, **keywords), L("Linear", units=1)], random_state=seed, n_iter=1)
if train:
nn.fit(self.a_in, self.a_out)
else:
nn._initialize(self.a_in, self.a_out)
return nn
示例13: test_UnusedParameterWarning
def test_UnusedParameterWarning(self):
nn = MLPR(layers=[L("Linear", pieces=2)], n_iter=1)
a_in = numpy.zeros((8,16))
nn._initialize(a_in, a_in)
assert_in('Parameter `pieces` is unused', self.buf.getvalue())
self.buf = io.StringIO() # clear
示例14: test_SetParametersConstructor
def test_SetParametersConstructor(self):
weights = numpy.random.uniform(-1.0, +1.0, (16,4))
biases = numpy.random.uniform(-1.0, +1.0, (4,))
nn = MLPR(layers=[L("Linear")], parameters=[(weights, biases)])
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
nn._initialize(a_in, a_out)
assert_in('Reloading parameters for 1 layer weights and biases.', self.buf.getvalue())
示例15: test_HorizontalKernel
def test_HorizontalKernel(self):
nn = MLPR(layers=[
C("Rectifier", channels=7, kernel_shape=(16,1)),
L("Linear", units=5)])
a_in = numpy.zeros((8,16,16,1))
nn._create_specs(a_in)
assert_equal(nn.unit_counts, [256, 16 * 7, 5])