本文整理汇总了Python中sknn.mlp.MultiLayerPerceptron类的典型用法代码示例。如果您正苦于以下问题:Python MultiLayerPerceptron类的具体用法?Python MultiLayerPerceptron怎么用?Python MultiLayerPerceptron使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MultiLayerPerceptron类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TestConvolution
class TestConvolution(unittest.TestCase):
def setUp(self):
self.nn = MLP(
layers=[
C("Rectifier", kernel_shape=(3,3), channels=4),
L("Linear")],
n_iter=1)
def test_FitError(self):
# The sparse matrices can't store anything but 2D, but convolution needs 3D or more.
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X, y = sparse_matrix((8, 16)), sparse_matrix((8, 16))
assert_raises((TypeError, NotImplementedError), self.nn._fit, X, y)
def test_FitResizeSquare(self):
# The sparse matrices can't store anything but 2D, but convolution needs 3D or more.
X, y = numpy.zeros((8, 36)), numpy.zeros((8, 4))
self.nn._fit(X, y)
def test_FitResizeFails(self):
# The sparse matrices can't store anything but 2D, but convolution needs 3D or more.
X, y = numpy.zeros((8, 35)), numpy.zeros((8, 4))
assert_raises(AssertionError, self.nn._fit, X, y)
示例2: TestMemoryMap
class TestMemoryMap(unittest.TestCase):
__types__ = ['float32', 'float64']
def setUp(self):
self.nn = MLP(layers=[L("Linear", units=3)], n_iter=1)
self.directory = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.directory)
def make(self, name, shape, dtype):
filename = os.path.join(self.directory, name)
return numpy.memmap(filename, dtype=dtype, mode='w+', shape=shape)
def test_FitAllTypes(self):
for t in self.__types__:
theano.config.floatX = t
X = self.make('X', (12, 3), dtype=t)
y = self.make('y', (12, 3), dtype=t)
self.nn._fit(X, y)
def test_PredictAllTypes(self):
for t in self.__types__:
theano.config.floatX = t
X = self.make('X', (12, 3), dtype=t)
yp = self.nn._predict(X)
示例3: TestTrainingProcedure
class TestTrainingProcedure(unittest.TestCase):
def test_FitTerminateStable(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
activation = "Gaussian" if sknn.backend.name == "pylearn2" else "Linear"
self.nn = MLP(
layers=[L(activation)], learning_rate=0.001,
n_iter=None, n_stable=1, f_stable=0.01,
valid_set=(a_in, a_out))
self.nn._fit(a_in, a_out)
def test_FitAutomaticValidation(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
self.nn = MLP(
layers=[L("Linear")], learning_rate=0.001,
n_iter=10, n_stable=1, f_stable=0.1,
valid_size=0.25)
self.nn._fit(a_in, a_out)
def test_TrainingInfinite(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
self.nn = MLP(layers=[L("Linear")])
assert_raises(AssertionError, self.nn._fit, a_in, a_out)
示例4: TestScipySparseMatrix
class TestScipySparseMatrix(unittest.TestCase):
def setUp(self):
self.nn = MLP(layers=[L("Linear", units=4)], n_iter=1)
def test_FitFloat64(self):
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X = sparse_matrix((8, 4), dtype=numpy.float64)
y = sparse_matrix((8, 4), dtype=numpy.float64)
self.nn._fit(X, y)
def test_FitFloat32(self):
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X = sparse_matrix((8, 4), dtype=numpy.float32)
y = sparse_matrix((8, 4), dtype=numpy.float32)
self.nn._fit(X, y)
def test_FitHybrid(self):
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X = sparse_matrix((8, 4), dtype=numpy.float32)
y = numpy.zeros((8, 4), dtype=numpy.float32)
self.nn._fit(X, y)
def test_FitMutator(self):
def mutate(x):
self.count += 1
return x - 0.5
self.nn.mutator = mutate
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X = sparse_matrix((8, 4), dtype=numpy.float32)
y = numpy.zeros((8, 4), dtype=numpy.float32)
self.count = 0
assert_equal(0, self.count)
self.nn._fit(X, y)
assert_equal(8, self.count)
def test_Predict64(self):
theano.config.floatX = 'float64'
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X = sparse_matrix((8, 4), dtype=numpy.float64)
yp = self.nn._predict(X)
assert_equal(yp.dtype, numpy.float64)
def test_Predict32(self):
theano.config.floatX = 'float32'
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X = sparse_matrix((8, 4), dtype=numpy.float32)
yp = self.nn._predict(X)
assert_equal(yp.dtype, numpy.float32)
示例5: TestScipySparseMatrix
class TestScipySparseMatrix(unittest.TestCase):
def setUp(self):
self.nn = MLP(layers=[L("Gaussian", units=4)], n_iter=1)
def test_FitFloat64(self):
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X = sparse_matrix((8, 4), dtype=numpy.float64)
y = sparse_matrix((8, 4), dtype=numpy.float64)
self.nn._fit(X, y)
def test_FitFloat32(self):
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X = sparse_matrix((8, 4), dtype=numpy.float32)
y = sparse_matrix((8, 4), dtype=numpy.float32)
self.nn._fit(X, y)
def test_Predict64(self):
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X = sparse_matrix((8, 4), dtype=numpy.float64)
self.nn._predict(X)
def test_Predict32(self):
for t in SPARSE_TYPES:
sparse_matrix = getattr(scipy.sparse, t)
X = sparse_matrix((8, 4), dtype=numpy.float32)
self.nn._predict(X)
示例6: setUp
def setUp(self):
self.batch_count = 0
self.batch_items = 0
self.nn = MLP(
layers=[L("Rectifier")],
learning_rate=0.001, n_iter=1,
callback={'on_batch_start': self.on_batch_start})
示例7: test_FitTerminateStable
def test_FitTerminateStable(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
self.nn = MLP(
layers=[L("Gaussian")], learning_rate=0.001,
n_iter=None, n_stable=1, f_stable=0.1,
valid_set=(a_in, a_out))
self.nn._fit(a_in, a_out)
示例8: test_FitAutomaticValidation
def test_FitAutomaticValidation(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
self.nn = MLP(
layers=[L("Linear")], learning_rate=0.001,
n_iter=10, n_stable=1, f_stable=0.1,
valid_size=0.25)
self.nn._fit(a_in, a_out)
示例9: test_FitTerminateStable
def test_FitTerminateStable(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
activation = "Gaussian" if sknn.backend.name == "pylearn2" else "Linear"
self.nn = MLP(
layers=[L(activation)], learning_rate=0.001,
n_iter=None, n_stable=1, f_stable=0.01,
valid_set=(a_in, a_out))
self.nn._fit(a_in, a_out)
示例10: TestTrainingProcedure
class TestTrainingProcedure(unittest.TestCase):
def test_FitTerminateStable(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
self.nn = MLP(
layers=[L("Gaussian")], learning_rate=0.001,
n_iter=None, n_stable=1, f_stable=0.1,
valid_set=(a_in, a_out))
self.nn._fit(a_in, a_out)
def test_FitAutomaticValidation(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
self.nn = MLP(
layers=[L("Gaussian")], learning_rate=0.001,
n_iter=10, n_stable=1, f_stable=0.1,
valid_size=0.25)
self.nn._fit(a_in, a_out)
示例11: test_TrainConstantOneEpoch
def test_TrainConstantOneEpoch(self):
for t in ['csr_matrix', 'csc_matrix']:
sparse_matrix = getattr(scipy.sparse, t)
X_s, y_s = sparse_matrix((8, 16), dtype=numpy.float32), sparse_matrix((8, 16), dtype=numpy.float32)
X, y = X_s.toarray(), y_s.toarray()
nn1 = MLP(layers=[L("Linear")], n_iter=1, random_state=1234)
nn1._fit(X, y)
nn2 = MLP(layers=[L("Linear")], n_iter=1, random_state=1234)
nn2._fit(X_s, y_s)
assert_true(numpy.all(nn1._predict(X_s) == nn1._predict(X_s)))
示例12: test_TrainingUserDefined
def test_TrainingUserDefined(self):
self.counter = 0
def terminate(**_):
self.counter += 1
return False
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
self.nn = MLP(layers=[L("Linear")], n_iter=100, n_stable=None, callback={'on_epoch_finish': terminate})
self.nn._fit(a_in, a_out)
assert_equals(self.counter, 1)
示例13: TestBatchSize
class TestBatchSize(unittest.TestCase):
def setUp(self):
self.batch_count = 0
self.nn = MLP(
layers=[L("Rectifier")],
learning_rate=0.001, n_iter=1,
callback={'on_batch_start': self.on_batch_start})
def on_batch_start(self, **args):
self.batch_count += 1
def test_BatchSizeLargerThanInput(self):
self.nn.batch_size = 32
a_in, a_out = numpy.zeros((8,16)), numpy.ones((8,4))
self.nn._fit(a_in, a_out)
assert_equals(1, self.batch_count)
def test_BatchSizeSmallerThanInput(self):
self.nn.batch_size = 4
a_in, a_out = numpy.ones((8,16)), numpy.zeros((8,4))
self.nn._fit(a_in, a_out)
assert_equals(2, self.batch_count)
def test_BatchSizeNonMultiple(self):
self.nn.batch_size = 4
a_in, a_out = numpy.zeros((9,16)), numpy.ones((9,4))
self.nn._fit(a_in, a_out)
assert_equals(3, self.batch_count)
示例14: test_TrainRandomOneEpoch
def test_TrainRandomOneEpoch(self):
for t in ['dok_matrix', 'lil_matrix']:
sparse_matrix = getattr(scipy.sparse, t)
X_s, y_s = sparse_matrix((8, 16), dtype=numpy.float32), sparse_matrix((8, 16), dtype=numpy.float32)
for i in range(X_s.shape[0]):
X_s[i,random.randint(0, X_s.shape[1]-1)] = 1.0
y_s[i,random.randint(0, y_s.shape[1]-1)] = 1.0
X, y = X_s.toarray(), y_s.toarray()
nn1 = MLP(layers=[L("Linear")], n_iter=1, random_state=1234)
nn1._fit(X, y)
nn2 = MLP(layers=[L("Linear")], n_iter=1, random_state=1234)
nn2._fit(X_s, y_s)
assert_true(numpy.all(nn1._predict(X_s) == nn1._predict(X_s)))
示例15: TestInputDataTypes
class TestInputDataTypes(unittest.TestCase):
def setUp(self):
self.nn = MLP(layers=[L("Gaussian")], n_iter=1)
def test_FitSciPySparse(self):
X, y = scipy.sparse.csr_matrix((8, 4)), scipy.sparse.csr_matrix((8, 4))
self.nn._fit(X, y)
def test_PredictSciPySparse(self):
X, y = scipy.sparse.csr_matrix((8, 4)), scipy.sparse.csr_matrix((8, 4))
self.nn._fit(X, y)
self.nn._predict(X)