当前位置: 首页>>代码示例>>Python>>正文


Python mlp.MLP类代码示例

本文整理汇总了Python中mlp.MLP的典型用法代码示例。如果您正苦于以下问题:Python MLP类的具体用法?Python MLP怎么用?Python MLP使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了MLP类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_brain

def create_brain():
	topology = [24,48,24,12,1]
	brain = MLP(topology)
	brain = load_training('data/train.csv', brain)
	brain.saveNetwork()

	return brain
开发者ID:diezcami,项目名称:CS129-18-face-detection,代码行数:7,代码来源:jann.py

示例2: train

    def train(self, X, Y, learning_rate=0.1, n_epochs=100, report_frequency=10, lambda_l2=0.0):

        self.report_frequency = report_frequency 

        # allocate symbolic variables for the data
        x = T.matrix('x')  
        y = T.matrix('y')  

        # put the data in shared memory
        self.shared_x = theano.shared(numpy.asarray(X, dtype=theano.config.floatX))
        self.shared_y = theano.shared(numpy.asarray(Y, dtype=theano.config.floatX))
        rng = numpy.random.RandomState(1234)

        # initialize the mlp
        mlp = MLP(rng=rng, input=x, n_in=self.n_in, n_out=self.n_out,
                  n_hidden=self.n_hidden, activation=self.activation)

        # define the cost function, possibly with regularizing term
        if lambda_l2>0.0:
            cost = mlp.cost(y) + lambda_l2*mlp.l2
        else:
            cost = mlp.cost(y) 

        # compute the gradient of cost with respect to theta (stored in params)
        # the resulting gradients will be stored in a list gparams
        gparams = [T.grad(cost, param) for param in mlp.params]

        updates = [(param, param - learning_rate * gparam)
            for param, gparam in zip(mlp.params, gparams) ]

        # compiling a Theano function `train_model` that returns the cost, but
        # at the same time updates the parameter of the model based on the rules
        # defined in `updates`
        train_model = theano.function(
            inputs=[],
            outputs=cost,
            updates=updates,
            givens={
                x: self.shared_x,
                y: self.shared_y
            }
        )

        #define function that returns model prediction
        self.predict_model = theano.function(
            inputs=[mlp.input], outputs=mlp.y_pred)

        ###############
        # TRAIN MODEL #
        ###############

        epoch = 0

        while (epoch < n_epochs):
            epoch = epoch + 1
            epoch_cost = train_model()
            if epoch % self.report_frequency == 0:
                print("epoch: %d  cost: %f" % (epoch, epoch_cost))
开发者ID:TianqiJiang,项目名称:Machine-Learning-Class,代码行数:58,代码来源:function_approximator.py

示例3: fit_model

 def fit_model(self, X, Y, num_classes):
   if self.modeltype == "mlp":
     classifier = MLP(self.input_size, self.hidden_sizes, num_classes)
   else:
     classifier = RNN(self.input_size, self.hidden_size, num_classes)
   train_func = classifier.get_train_func(self.learning_rate)
   for num_iter in range(self.max_iter):
     for x, y in zip(X, Y):
       train_func(x, y)
   return classifier
开发者ID:BMKEG,项目名称:exp-parser,代码行数:10,代码来源:nn_classifier.py

示例4: load_nn_dwl

def load_nn_dwl(paramFileName):

    paramList = numpy.load(open(paramFileName, 'r'))
    W1, b1, W2, b2 = paramList['arr_0']
    n_input = len(W1)
    n_hidden = len(W2)
    n_out = len(W2[0])
    x = T.matrix('x')
    rng = numpy.random.RandomState(1234)

    classifier = MLP(rng=rng, input=x, n_in=n_input, n_hidden=n_hidden, n_out=n_out)
    classifier.load_model_params(paramList['arr_0'])

    return classifier
开发者ID:Sandy4321,项目名称:dwl,代码行数:14,代码来源:features.py

示例5: __init__

    def __init__(self, n_ins, hidden_layers_sizes, n_outs,
                    numpy_rng=None, theano_rng=None):

        MLP.__init__(self, n_ins, hidden_layers_sizes, n_outs,
                    numpy_rng, theano_rng)

        # labels (used for minibatch sgd during RL)
        self.y = T.vector('y')
        # actions (for each label, there is a corresponding 
        # number here representing the ouput node value that
        # it should be compared to during SGD
        self.a = T.ivector('a')

        # The training error
        self.training_cost = T.sum(T.sqr(self.outLayer.output[T.arange(self.a.shape[0]),self.a] - self.y))
开发者ID:rfeinman,项目名称:navigationQlearning,代码行数:15,代码来源:Q_net.py

示例6: main

def main():
    dataset = [((0, 0), (0, 1)), ((0, 1), (1, 0)), ((1, 0), (1, 0)), ((1, 1), (0, 1))]

    #dtanh = lambda o: 1 - o ** 2
    dsigm = lambda o: o * (1 - o)

    activation_functions = (np.vectorize(sigmoid), np.vectorize(sigmoid))
    #activation_functions = (np.tanh, np.tanh)
    derivation_functions = (np.vectorize(dsigm), np.vectorize(dsigm))
    #derivation_functions = (np.vectorize(dtanh), np.vectorize(dtanh))

    m = MLP((2, 3, 2), activation_functions, derivation_functions)
    m.train(dataset, epsilon=0, alpha=0.9, eta=.25, epochs=2500)

    for i in range(len(dataset)):
        o = m.feedForward(dataset[i][0])
        print(i, dataset[i][0], encode(o.argmax(), len(o)), ' (expected ', dataset[i][1], ')')
开发者ID:martianboy,项目名称:mlp,代码行数:17,代码来源:xor.py

示例7: setUp

    def setUp(self):
        xor = MLP()
        xor.add_layer(Layer(2))
        xor.add_layer(Layer(2))
        xor.add_layer(Layer(1))

        xor.init_network()

        xor.patterns = [([0, 0], [0]), ([0, 1], [1]), ([1, 0], [1]), ([1, 1], [0])]
        self.xor = xor
开发者ID:xieyanfu,项目名称:mlp-2,代码行数:10,代码来源:xor_test.py

示例8: test_xor

    def test_xor(self):
        xor = MLP()
        xor.add_layer(Layer(2))
        xor.add_layer(Layer(2))
        xor.add_layer(Layer(1))

        xor.init_network()

        xor_patterns = [
            ([0, 0], [0]),
            ([0, 1], [1]),
            ([1, 0], [1]),
            ([1, 1], [0]),
        ]

        xor.train(xor_patterns)
        for inp, outp in xor_patterns:
            self.assertEqual(xor.run(inp), outp)
开发者ID:jozo-styrak,项目名称:mlp,代码行数:18,代码来源:xor_test.py

示例9: __init__

class CWS:
    def __init__(self, s):
	self.mlp = MLP(s['ne'], s['de'], s['win'], s['nh'], 4, s['L2_reg'], np.random.RandomState(s['seed']))
	self.s = s

    def fit(self, lex, label):
	s = self.s
	n_sentences = len(lex)
	n_train = int(n_sentences * (1. - s['valid_size']))
	s['clr'] = s['lr']
	best_f = 0
	for e in xrange(s['n_epochs']):
	    shuffle([lex, label], s['seed'])
	    train_lex, valid_lex = lex[:n_train], lex[n_train:]
	    train_label, valid_label = label[:n_train], label[n_train:]
	    tic = time.time()
	    cost = 0
	    for i in xrange(n_train):
		if len(train_lex[i]) == 2: continue
		words = np.asarray(contextwin(train_lex[i], s['win']), dtype = 'int32')
		labels = [0] + train_label[i] + [0]
		y_pred = self.mlp.predict(words)
		cost += self.mlp.fit(words, [0]+y_pred, [0]+labels, s['clr'])
		self.mlp.normalize()
		if s['verbose']:
		    print '[learning] epoch %i >> %2.2f%%' % (e+1, (i+1)*100./n_train), 'completed in %s << \r' % time_format(time.time() - tic),
		    sys.stdout.flush()
	    print '[learning] epoch %i >> cost = %f' % (e+1, cost / n_train), ', %s used' % time_format(time.time() - tic)
	    pred_y = self.predict(valid_lex)
	    p, r, f = evaluate(pred_y, valid_label)
	    print '           P: %2.2f%% R: %2.2f%% F: %2.2f%%' % (p*100., r*100., f*100.)
	    '''
	    if f > best_f:
		best_f = f
		self.save()
	    '''

    def predict(self, lex):
	s = self.s
	y = [self.mlp.predict(np.asarray(contextwin(x, s['win'])).astype('int32'))[1:-1] for x in lex]
	return y

    def save(self):
	if not os.path.exists('params'): os.mkdir('params')
	self.mlp.save() 

    def load(self):
	self.mlp.load()
开发者ID:zbxzc35,项目名称:cws,代码行数:48,代码来源:cws.py

示例10: main

def main():
  training, dev = get_data()
  window_size = 5
  n_input = window_size
  n_hidden = 100
  n_output = 1
  A = 1
  num_hidden_layers = 1
  mlp = MLP(n_input, num_hidden_layers, n_hidden, n_output)
  n_epochs = 50
  step = False
  l = loss(mlp, training, window_size, window_size/2)
  print "initial loss: " + str(l)
  for j in range(0, n_epochs):
    print "epoch " + str(j)
    random.shuffle(training)
    c = 0
    for xs, y in training:
      if c == 10:
        break
      c += 1
      if step:
        train(mlp, xs, y, window_size, window_size/2)
      else:
        train(mlp, xs, y, window_size, 1)
    if step:
      error(mlp, training, window_size, window_size/2)
    else:
      error(mlp, training, window_size, 1) 
    if step:
      l = loss(mlp, training, window_size, window_size/2)
    else:
      l = loss(mlp, training, window_size, 1)
    print "loss: " + str(l)
    eta = A / float(j/float(n_epochs) + 1)
    mlp.eta = eta
    print "lr:", mlp.eta

  print "Getting Dev Accuracy..." 
  if step:
    error(mlp, dev, window_size, window_size/2)
  else:
    error(mlp, dev, window_size, 1)
开发者ID:d-unknown-processor,项目名称:mlcd_proj,代码行数:43,代码来源:density_demo.py

示例11: MLP_VAD

class MLP_VAD(object):
    def __init__(self, model_file):
        rng = np.random.RandomState(1234)

        self.x = T.matrix('x')

        self.classifier = MLP(
            rng=rng,
            input=self.x,
            n_in=200,
            n_hidden=180,
            n_out=2
        )

        self.classifier.load_model(model_file)

    def classify(self, fs, sig):
        if fs != SAMPLE_RATE:
            sig = downsample(fs, sig)

        num_samples = int(WINDOW_SIZE * SAMPLE_RATE)
        num_frames = len(sig)/num_samples
        sig = sig[0:num_frames*num_samples].reshape((num_frames, num_samples))
        sig = sig * np.hamming(num_samples)
        spec = np.abs(np.fft.fft(sig)) # spectrum of signal

        shared_x = theano.shared(np.asarray(spec, dtype=theano.config.floatX), borrow=True)

        index = T.lscalar()  # index to a [mini]batch

        predict_model = theano.function(
            inputs=[index],
            outputs=self.classifier.y_pred,
            givens={
                self.x: shared_x[index:index + 1],
            }
        )

        # classify each frame
        predicted_values = [predict_model(i)[0] for i in xrange(num_frames)]
        return np.asarray(predicted_values)
开发者ID:braindead,项目名称:nnvad,代码行数:41,代码来源:mlp_vad.py

示例12: __init__

 def __init__(self,input_size,output_size,n_hidden=500,learning_rate=0.01, 
         L1_reg=0.00, L2_reg=0.0001, 
         n_epochs=1000,batch_size=20):
     self.learning_rate = learning_rate
     self.L1_reg = L1_reg
     self.L2_reg = L2_reg
     self.n_epochs = n_epochs
     self.batch_size=batch_size
     self.n_hidden = n_hidden
     self.x = T.matrix('x')      
     self.mlp =  MLP(input = self.x, n_in = input_size, \
                  n_hidden = n_hidden, n_out = output_size)
开发者ID:swarbrickjones,项目名称:easyNN,代码行数:12,代码来源:mLPClassifier.py

示例13: test_add_layer

    def test_add_layer(self):
        a = MLP()
        with self.assertRaises(AssertionError):
            a.add_layer('')

        a.add_layer(Layer(1))
        a.add_layer(Layer(2))
        a.add_layer(Layer(3))
        self.assertEqual(len(a.layers), 3)
        for l in a.layers:
            self.assertIsInstance(l, Layer)
开发者ID:jozo-styrak,项目名称:mlp,代码行数:11,代码来源:mlp_test.py

示例14: testMLP

	def testMLP(self):
		'''
		Using MLP of one hidden layer and one softmax layer
		'''
		conf_filename = './snippet_mlp.conf'
		start_time = time.time()
		configer = MLPConfiger(conf_filename)
		mlpnet = MLP(configer, verbose=True)
		end_time = time.time()
		pprint('Time used to build the architecture of MLP: %f seconds' % (end_time-start_time))
		# Training
		start_time = time.time()
		for i in xrange(configer.nepoch):
			cost, accuracy = mlpnet.train(self.snippet_train_set, self.snippet_train_label)
			pprint('epoch %d, cost = %f, accuracy = %f' % (i, cost, accuracy))
		end_time = time.time()
		pprint('Time used for training MLP network on Snippet task: %f minutes' % ((end_time-start_time)/60))
		# Test
		test_size = self.snippet_test_label.shape[0]
		prediction = mlpnet.predict(self.snippet_test_set)
		accuracy = np.sum(prediction == self.snippet_test_label) / float(test_size)
		pprint('Test accuracy: %f' % accuracy)
开发者ID:appscluster,项目名称:sentiment-CNN,代码行数:22,代码来源:exp_snippet.py

示例15: __init__

    def __init__(self, model_file):
        rng = np.random.RandomState(1234)

        self.x = T.matrix('x')

        self.classifier = MLP(
            rng=rng,
            input=self.x,
            n_in=200,
            n_hidden=180,
            n_out=2
        )

        self.classifier.load_model(model_file)
开发者ID:braindead,项目名称:nnvad,代码行数:14,代码来源:mlp_vad.py


注:本文中的mlp.MLP类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。