當前位置: 首頁>>代碼示例>>Python>>正文


Python MLP.fit方法代碼示例

本文整理匯總了Python中mlp.MLP.fit方法的典型用法代碼示例。如果您正苦於以下問題:Python MLP.fit方法的具體用法?Python MLP.fit怎麽用?Python MLP.fit使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在mlp.MLP的用法示例。


在下文中一共展示了MLP.fit方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: fit_model

# 需要導入模塊: from mlp import MLP [as 別名]
# 或者: from mlp.MLP import fit [as 別名]
 def fit_model(self, X, Y, num_classes):
   if self.modeltype == "mlp" or self.modeltype == "rnn":
     if self.modeltype == "mlp":
       classifier = MLP(self.input_size, self.hidden_sizes, num_classes)
     else:
       classifier = RNN(self.input_size, self.hidden_size, num_classes)
     train_func = classifier.get_train_func(self.learning_rate)
     for num_iter in range(self.max_iter):
       for x, y in zip(X, Y):
         train_func(x, y)
   elif self.modeltype == "lstm":
     classifier = Sequential()
     classifier.add(LSTM(input_dim=self.input_size, output_dim=self.input_size/2))
     #classifier.add(Dropout(0.3))
     classifier.add(Dense(num_classes, activation='softmax'))
     classifier.compile(loss='categorical_crossentropy', optimizer='adam')
     Y_indexed = numpy.zeros((len(Y), num_classes))
     for i in range(len(Y)):
       Y_indexed[i][Y[i]] = 1
     classifier.fit(X, Y_indexed, nb_epoch=20)
   return classifier
開發者ID:edvisees,項目名稱:exp-parser,代碼行數:23,代碼來源:nn_classifier.py

示例2: __init__

# 需要導入模塊: from mlp import MLP [as 別名]
# 或者: from mlp.MLP import fit [as 別名]
class CWS:
    def __init__(self, s):
	self.mlp = MLP(s['ne'], s['de'], s['win'], s['nh'], 4, s['L2_reg'], np.random.RandomState(s['seed']))
	self.s = s

    def fit(self, lex, label):
	s = self.s
	n_sentences = len(lex)
	n_train = int(n_sentences * (1. - s['valid_size']))
	s['clr'] = s['lr']
	best_f = 0
	for e in xrange(s['n_epochs']):
	    shuffle([lex, label], s['seed'])
	    train_lex, valid_lex = lex[:n_train], lex[n_train:]
	    train_label, valid_label = label[:n_train], label[n_train:]
	    tic = time.time()
	    cost = 0
	    for i in xrange(n_train):
		if len(train_lex[i]) == 2: continue
		words = np.asarray(contextwin(train_lex[i], s['win']), dtype = 'int32')
		labels = [0] + train_label[i] + [0]
		y_pred = self.mlp.predict(words)
		cost += self.mlp.fit(words, [0]+y_pred, [0]+labels, s['clr'])
		self.mlp.normalize()
		if s['verbose']:
		    print '[learning] epoch %i >> %2.2f%%' % (e+1, (i+1)*100./n_train), 'completed in %s << \r' % time_format(time.time() - tic),
		    sys.stdout.flush()
	    print '[learning] epoch %i >> cost = %f' % (e+1, cost / n_train), ', %s used' % time_format(time.time() - tic)
	    pred_y = self.predict(valid_lex)
	    p, r, f = evaluate(pred_y, valid_label)
	    print '           P: %2.2f%% R: %2.2f%% F: %2.2f%%' % (p*100., r*100., f*100.)
	    '''
	    if f > best_f:
		best_f = f
		self.save()
	    '''

    def predict(self, lex):
	s = self.s
	y = [self.mlp.predict(np.asarray(contextwin(x, s['win'])).astype('int32'))[1:-1] for x in lex]
	return y

    def save(self):
	if not os.path.exists('params'): os.mkdir('params')
	self.mlp.save() 

    def load(self):
	self.mlp.load()
開發者ID:zbxzc35,項目名稱:cws,代碼行數:50,代碼來源:cws.py

示例3:

# 需要導入模塊: from mlp import MLP [as 別名]
# 或者: from mlp.MLP import fit [as 別名]
		'mlp_{}_{}_l{}_lr{}_m{}_di{}_dh{}.npz'.format(
		model.optimization,
		model.activation,
		'-'.join(map(str, model.layers)), 
		model.learning_rate,
		model.momentum,
		model.dropout_p_input,
		model.dropout_p_hidden))

	# check if this configuration has already been tried
	if os.path.exists(fname):
		continue
	# if not continue with training
	print 'Trying the following configuration:', try_params
	t0 = time.time()
	train_cost_history = model.fit(train_x, train_y, epochs=25)
	print 'Training completed in {:.2f} sec'.format(time.time() - t0)

	# and validation
	valid_y_pred = model.predict(valid_x)
	valid_accuracy = np.sum(valid_y_pred == valid_y, dtype=np.float32) / valid_y.shape[0]
	print 'Validation accuracy: {:.2f}'.format(valid_accuracy * 100)

	if best_valid is None or best_valid < valid_accuracy:
		best_valid = valid_accuracy
		best_conf = try_params
		test_y_pred = model.predict(test_x)
		best_test = np.sum(test_y_pred == test_y, dtype=np.float32) / test_y.shape[0]

	# finally save the training costs and the validation accuracy to disk
	np.savez_compressed(fname, train_cost=train_cost_history, valid_accuracy=valid_accuracy)
開發者ID:aciccarelli,項目名稱:DNN_Lab_UPF,代碼行數:33,代碼來源:mlp_opt.py

示例4: main

# 需要導入模塊: from mlp import MLP [as 別名]
# 或者: from mlp.MLP import fit [as 別名]
def main(argv):

    # Handle command line arguments
    # 0. just use the default percent of the
    #   data set for training (70%)
    #
    # 1. should be a number from 0 to 1
    #   (0 <= x < 1) which determines what
    #   percent of the data set should be
    #   used for training
    #
    # 2. first argument is the filename of
    #   the file containing the data set
    tpercent = 0.7
    filename = ""

    if len(argv) == 2:
        tpercent = float(argv[1])
        # make sure it's in the legal range
        if tpercent < 0.0:
            tpercent *= -1
        while tpercent >= 1.0:
            tpercent -= 1.0

    elif len(argv) == 3:
        filename = argv[1]

        tpercent = float(argv[2])
        # make sure it's in the legal range
        if tpercent < 0.0:
            tpercent *= -1
        while tpercent >= 1.0:
            tpercent -= 1.0

    # get the time for use as a random seed
    #   this can be replaced by something else,
    #   but using the time will allow for a
    #   different shuffle each time
    seed = int(time.time())

    if filename == "":
        iris = datasets.load_iris()
        data = iris.data
        targets = iris.target

    # load from a file instead if that's the correct approach
    else:
        csv = np.genfromtxt(filename, delimiter=",")
        numcols = len(csv[0])
        data = csv[:, : numcols - 1]  # the first columns are the data
        targets = csv[:, numcols - 1]  # the last column is the targets

    # shuffle based on the time
    #   this uses the same seed (the time)
    #   in both the data and the targets so
    #   they match up after the shuffle
    np.random.seed(seed)
    np.random.shuffle(data)
    # reset the seed
    np.random.seed(seed)
    np.random.shuffle(targets)

    # determine the correct sizes of the sets
    tsize = int(tpercent * targets.size)
    psize = targets.size - tsize

    tdata = data[:tsize]
    pdata = data[tsize : tsize + psize]

    ttargets = targets[:tsize]
    ptargets = targets[tsize : tsize + psize]

    # train the classifier
    # classifier = KnnClassifier(3)
    # classifier = KNeighborsClassifier(n_neighbors=3)
    # classifier = DTreeClassifier()
    # classifier = tree.DecisionTreeClassifier()
    classifier = MLP((3, 3), 0.2, 30)
    classifier.fit(tdata, ttargets)

    # see how it did
    numcorrect = 0
    predictions = classifier.predict(pdata)
    for i in range(psize):
        if predictions[i] == ptargets[i]:
            numcorrect += 1

    percentcorrect = (numcorrect / psize) * 100.0

    print("Completed. Predicted", str(percentcorrect), "% correctly.")
開發者ID:justinrixx,項目名稱:Classifiers,代碼行數:92,代碼來源:main.py


注:本文中的mlp.MLP.fit方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。