当前位置: 首页>>代码示例>>Python>>正文


Python DBN.fit方法代码示例

本文整理汇总了Python中nolearn.dbn.DBN.fit方法的典型用法代码示例。如果您正苦于以下问题:Python DBN.fit方法的具体用法?Python DBN.fit怎么用?Python DBN.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nolearn.dbn.DBN的用法示例。


在下文中一共展示了DBN.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: training_dbn

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import fit [as 别名]
def training_dbn(train_dataset, n_targets=2, learn_rates=0.3, learn_rate_decays=0.9, epochs=1000, n_hidden_layers=5, n_hidden_layer_nodes=100, 
                 verbose=True):
    layers = np.ones(n_hidden_layers, dtype=int) * n_hidden_layer_nodes
    print(layers.tolist())
    X_train, y_train = train_dataset
    ff = [X_train.shape[1]]
    ff.extend(layers.tolist())
    ff.append(n_targets)
    # Create the dbn
    clf = DBN(
              ff,
              learn_rates=learn_rates,
              learn_rate_decays=learn_rate_decays,
              epochs=epochs,
              dropouts=0.1,
              verbose=verbose)

    # Counting time for training
    start = time.time()
    clf.fit(X_train, y_train) # training
    end = time.time()

    exec_time = end - start
    print('Exec time was {} secs'.format(exec_time))
    return clf, exec_time
开发者ID:caiobelfort,项目名称:DeepLearningProject,代码行数:27,代码来源:dbn.py

示例2: train_model

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import fit [as 别名]
def train_model(data_set_path='/home/devin.fisher/Kingdoms/treadstone/_samples/still_data/still_training_data.pkl'):
    # data_set = None
    with open(data_set_path, 'rb') as f:
        data_set = pickle.load(f)

    # with open('/home/devin.fisher/Kingdoms/lol/still_training_data2.pkl', 'rb') as f:
    #     data_set = pickle.load(f)

    # (train_x, test_x, train_y, test_y) = train_test_split(data_set['data'], data_set['target'], test_size=0.1)

    train_x = data_set['data']
    test_x = data_set['data']
    train_y = data_set['target']
    test_y = data_set['target']

    dbn = DBN(
        [-1, 300, -1],
        learn_rates=0.3,
        learn_rate_decays=0.9,
        epochs=60,
        verbose=1)
    dbn.fit(train_x, train_y)

    joblib.dump(dbn, 'digit_model.pkl', compress=9)

    # dbn = joblib.load('digit_model.pkl')

    # compute the predictions for the test data and show a classification report
    preds = dbn.predict(test_x)
    print classification_report(test_y, preds)
开发者ID:devin-fisher,项目名称:treadstone,代码行数:32,代码来源:video_still_model_builder.py

示例3: benchmark

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import fit [as 别名]
def benchmark(k, epochs):
  print("*" * 80)
  print("k: %d, epochs: %d\n" % (k, epochs))

  #select = SelectKBest(score_func=chi2, k=k)
  select = TruncatedSVD(n_components=k)
  X_train_trunc = select.fit_transform(X_train, Y_train)
  X_test_trunc = select.transform(X_test)

  print('done truncating')

  clf = DBN([X_train_trunc.shape[1], k, 4], learn_rates=0.3, learn_rate_decays=0.9, epochs=epochs, verbose=1)
  clf.fit(X_train_trunc, Y_train)
  pred = clf.predict(X_test_trunc)

  if CREATE_SUBMISSION:
    X_submit_trunc = select.transform(X_submit)
    pred_submit = clf.predict(X_submit_trunc)
    dump_csv(pred_submit, k, epochs)

  score = metrics.f1_score(Y_test, pred)
  print("f1-score:   %0.3f" % score)

  print("classification report:")
  print(metrics.classification_report(Y_test, pred))

  print("confusion matrix:")
  print(metrics.confusion_matrix(Y_test, pred))
开发者ID:alireza-saberi,项目名称:Applied_MachineLearning_COMP_598_MiniProject2,代码行数:30,代码来源:dbn_test.py

示例4: main

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import fit [as 别名]
def main():
  data_id = 'B'
  data_path = '/broad/compbio/maxwshen/data/1-MAKETRAINTEST/complete/golf/'
  
  print 'train...', datetime.datetime.now()
  train_set = readin(data_id, 'train', data_path)
  print 'valid...', datetime.datetime.now()
  valid_set = readin(data_id, 'valid', data_path)
  print 'test...', datetime.datetime.now()
  test_set = readin(data_id, 'test', data_path)

  # Input to 300 node RBM to 2 node output
  dbn = DBN( \
    [xtrain.shape[1], 300, 2], \
    learn_rates = 5, \
    learn_rate_decays = 0.9, \
    epochs = 31, \
    verbose = 1)
  dbn.fit(dat_train, y_train)

  preds = dbn.predict(dat_test)
  print classification_report(y_test, preds)

  out_fn = 'dbn.pickle'
  with open(out_fn, 'w') as f:
    pickle.dump(dbn, out_fn)

  return
开发者ID:maxwshen,项目名称:Kellis,代码行数:30,代码来源:dbn_nolearn.py

示例5: train

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import fit [as 别名]
def train(X, Y, alphabet):
    model = DBN([13, 1000, len(alphabet)],
    learn_rates=0.3,
    learn_rate_decays=0.9,
    epochs=10,
    verbose=1,)

    model.fit(X, Y)
    return model
开发者ID:larisahax,项目名称:Dialect,代码行数:11,代码来源:nn.py

示例6: run

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import fit [as 别名]
def run():
    X_train, Y_train = load_training_data()

    X_train, Y_train = rotate_dataset(X_train, Y_train, 8)
    X_train, Y_train = nudge_dataset(X_train, Y_train)

    n_features = X_train.shape[1]
    n_classes = 10
    classifier = DBN([n_features, 8000, n_classes], 
        learn_rates=0.4, learn_rate_decays=0.9 ,epochs=75, verbose=1)

    classifier.fit(X_train, Y_train)

    test_data = get_test_data_set()
    predictions = classifier.predict(test_data)
    write_predictions_to_csv(predictions)
开发者ID:bin2000,项目名称:kaggle-mnist-digits,代码行数:18,代码来源:predict.py

示例7: train_clf

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import fit [as 别名]
def train_clf(dim, X, y, classificator):
    print("Training for {} classes".format(dim[2]))
    if classificator == "DBN":
        clf = DBN(dim,
                  learn_rates=dbn_learn_rates,
                  learn_rate_decays=dbn_learn_rate_decays,
                  epochs=dbn_epochs,
                  minibatch_size=dbn_minibatch_size,
                  verbose=dbn_verbose,
                  dropouts=dbn_dropouts
              )
    elif classificator == "GaussianNB":
        clf = GaussianNB()

    clf.fit(X, y)

    return clf
开发者ID:presight,项目名称:happy-cerberus,代码行数:19,代码来源:train.py

示例8: DigitProphet

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import fit [as 别名]
class DigitProphet(object):
	def __init__(self):
		# load train.csv
		# train = pd.read_csv("data/train.csv")
		# data_train=train.as_matrix()
		# values_train=data_train[:,0]
		# images_train=data_train[:,1:]
		# trainX, _trainX, trainY, _trainY = train_test_split(images_train/255.,values_train,test_size=0.5)

		# #load test.csv
		# test = pd.read_csv("data/test.csv")
		# data_test=test.as_matrix()
		# testX, _testX = train_test_split(data_test/255.,test_size=0.99)
		
		# Random Forest
		# self.clf = RandomForestClassifier()
		
		# Stochastic Gradient Descent
		# self.clf = SGDClassifier()
		
		# Support Vector Machine
		# self.clf = LinearSVC()
		
		# Nearest Neighbors
		# self.clf = KNeighborsClassifier(n_neighbors=13)
		
		
		train = pd.read_csv("data/train.csv")
		data_train=train.as_matrix()
		values_train=data_train[:,0]
		images_train=data_train[:,1:]
		trainX, _trainX, trainY, _trainY = train_test_split(images_train/255.,values_train,test_size=0.995)
		
		# Neural Network
		self.clf = DBN([trainX.shape[1], 300, 10],learn_rates=0.3,learn_rate_decays=0.9,epochs=10,verbose = 1)
		
		#Training
		self.clf.fit(trainX, trainY)
		
		pass

	def predictImage(self,array):
		image=np.atleast_2d(array)
		return self.clf.predict(image)[0]
开发者ID:Type-of-Python,项目名称:redigit,代码行数:46,代码来源:clf.py

示例9: train_dbn_dataset

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import fit [as 别名]
def train_dbn_dataset(dataset, x_test, y_test, alpha, nhidden, epochs, batch_size, noises=[]):
    from nolearn.dbn import DBN
    num_classes = len(set(y_test))
    print "Number of classes", num_classes
    x_train, y_train = dataset
    dbn_model = DBN([x_train.shape[1], nhidden, num_classes],
                    learn_rates = alpha,
                    learn_rate_decays = 0.9,
                    epochs = epochs,
                    verbose = 1,
                    nesterov=False,
                    minibatch_size=batch_size,
                    noises = noises)

    dbn_model.fit(x_train, y_train)
    from sklearn.metrics import classification_report, accuracy_score
    y_true, y_pred = y_test, dbn_model.predict(x_test) # Get our predictions
    print(classification_report(y_true, y_pred)) # Classification on each digit
    print(roc_auc_score(y_true, y_pred)) # Classification on each digit
    return y_pred, roc_auc_score(y_true, y_pred)
开发者ID:viveksck,项目名称:nolearn,代码行数:22,代码来源:adult_dbn.py

示例10: dbn_clf

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import fit [as 别名]
def dbn_clf(X, y, hidden_sizes=[300], num_epochs=10):
    """ deep belief network """
    Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.25, random_state=0)
    output_categories = np.load(os.path.join(loaddir,'submit_col_name.npy'))

    print('Start training Neural Network...')

    dbn = DBN(
        [Xtrain.shape[1]] + hidden_sizes + [len(output_categories)],
        learn_rates = 0.3,
        learn_rate_decays = 0.9,
        epochs = num_epochs,
        verbose = 1)
    dbn.fit(Xtrain, ytrain)
    
    ypred = dbn.predict_proba(Xtest)
    score = log_loss(ytest, ypred)
    print('Log loss = {}'.format(score))

    return dbn, score
开发者ID:wkvictor,项目名称:Kaggle-TalkingData,代码行数:22,代码来源:train_models.py

示例11: test

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import fit [as 别名]
	 def test(self):
                 #iris = datasets.load_iris()
                 #X, y = iris.data, iris.target
                 X, y = self.dataMat,self.labelMat
                 X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.6, random_state=12)
                 #clf = RandomForestClassifier(max_depth=6,min_samples_split=9,min_samples_leaf=15,n_estimators=5)
                 #clf = DBN([X.shape[1], 24, 2],scales=0.5,learn_rates=0.02,learn_rate_decays = 0.95, learn_rate_minimums =0.001,epochs=500,l2_costs = 0.02*0.031, dropouts=0.2,verbose=0)
                 #cvnum = ShuffleSplit(2013,n_iter=10,test_size=0.6,train_size=0.4,random_state=0)
                 for scal in arange(4.5, 5.0, 0.5):
                     print "**************************************************************"
                     print "DBN scal=",scal
                     clf = DBN([X.shape[1], 24,48, 2],scales=0.5,learn_rates=0.01,learn_rate_decays = 0.95, learn_rate_minimums =0.001,epochs=50,l2_costs = 0.02*0.001, dropouts=0.0,verbose=0)
                     clf.fit(X_train, y_train);
                     scores = cross_val_score(clf,X,y,cv=3,scoring='roc_auc')
                     y_pred = clf.predict(X_test);
                     y_predprob = clf.predict_proba(X_test);
                     prf=precision_recall_fscore_support(y_test, y_pred, average='binary')
                     print ("Accuracy: %0.5f (+/- %0.5f)" % (scores.mean(), scores.std() * 2))
                     print  classification_report(y_test,y_pred)
                     print 'The accuracy is: ', accuracy_score(y_test,y_pred)
                     print 'The log loss is:', log_loss(y_test, y_predprob)
                     print 'The ROC score is:', roc_auc_score(y_test,y_predprob[:,1])
开发者ID:kevinmtian,项目名称:Kaggle-Contests,代码行数:24,代码来源:cross_valid_NN.py

示例12: main

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import fit [as 别名]
def main():
    data_fn = "/home/ec2-user/Kellis/data/bravo.formatted/dat.all.txt"
    blacklist_fn = "/home/ec2-user/Kellis/data/bravo.formatted/dat.blacklist.txt"
    y_fn = "/home/ec2-user/Kellis/data/bravo.formatted/dat.y.txt"

    data = read_delimited_txt(data_fn, "\t")
    blacklist = read_delimited_txt(blacklist_fn, "\t")
    y = read_delimited_txt(y_fn, "\t")

    # Get names and remove the first element of each row which is the row number
    names = data[0]
    data = data[1:]
    for i in range(len(data)):
        data[i] = data[i][1:]

    y = y[1:]
    for i in range(len(y)):
        y[i] = y[i][-1]
    y = convert_y_binary(y)

    # Normalizes column-wise so all values are between 0 and 1
    data = normalize_0_1(data)

    # Split into training, testing
    xtrain, xtest, ytrain, ytest = train_test_split(data, y, test_size=0.2, random_state=1)

    # Input to 300 node RBM to 2 node output
    dbn = DBN([xtrain.shape[1], 300, 2], learn_rates=5, learn_rate_decays=0.9, epochs=501, verbose=1)
    dbn.fit(xtrain, ytrain)

    preds = dbn.predict(xtest)
    print classification_report(ytest, preds)

    out_fn = "dbn.pickle"
    with open(out_fn, "w") as f:
        pickle.dump(dbn, out_fn)

    return
开发者ID:maxwshen,项目名称:Kellis,代码行数:40,代码来源:dbn.py

示例13: runOfflineML

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import fit [as 别名]
def runOfflineML(y, X, classifiers, savemodel=False):
    X_train, X_test, y_train, y_test = train_test_split(X, y.astype("int0"), test_size=0.20, random_state=0)
    data = dict(x_train=X_train, x_test=X_test, y_train=y_train, y_test=y_test)
    cls_stats = initClsStats(classifiers)
    for cls_name, cls in classifiers.items():
        cls_stats[cls_name]["n_train"] = data["x_train"].shape[0]
        cls_stats[cls_name]["n_test"] = data["x_test"].shape[0]
        cls_stats[cls_name]["n_features"] = data["x_train"].shape[1]
        tick = time.time()
        if cls_name == "DBN":
            data = dataNormalise(data)
            clf = DBN([data["x_train"].shape[1], 300, 2], learn_rates=0.3, learn_rate_decays=0.9, epochs=10, verbose=1)
            clf.fit(data["x_train"], data["y_train"])
        else:
            clf = classifiers[cls_name].fit(data["x_train"], data["y_train"])
        if savemodel:
            pickle.dump(clf, open(cls_name + ".dat", "w"))
            clf = pickle.load(open(cls_name + ".dat", "r"))
        cls_stats[cls_name]["training_time"] += time.time() - tick
        # check the accuracy on the training set
        tick = time.time()
        predicted = clf.predict(data["x_test"])
        cls_stats[cls_name]["testing_time"] += time.time() - tick
        acc = metrics.accuracy_score(data["y_test"], predicted)
        cls_stats[cls_name]["accuracy"] = acc
        print cls_name, "accuracy is: " + str(acc)
        # auc = metrics.roc_auc_score(data['y_test'], probs[:, 1])
        conf_matrix = metrics.confusion_matrix(data["y_test"], predicted)
        cls_stats[cls_name]["conf_matrix"] = conf_matrix
        # print conf_matrix
        precision, recall, fscore, support = metrics.precision_recall_fscore_support(data["y_test"], predicted)
        cls_stats[cls_name]["precision"] = precision
        cls_stats[cls_name]["recall"] = recall
        cls_stats[cls_name]["fscore"] = fscore
        cls_stats[cls_name]["support"] = support
    return cls_stats
开发者ID:Nik0l,项目名称:UTemPro,代码行数:38,代码来源:OfflineLearning.py

示例14: range

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import fit [as 别名]
if __name__=='__main__':
    dbn_list=[]
    for i in range(2,8):
        dat,lab=db_load(tup(i),i)
        try:
            dbn = joblib.load("pickles/dbn_"+str(tup(i))+"x"+str(i)+".pkl") 
            dbn_list.append(dbn)
        except:
            dbn = DBN(
                [i*tup(i), 400, 10],
                learn_rates = 0.3,
                learn_rate_decays = 0.9,
                epochs = 50,
                verbose = 1
                )
            dbn.fit(dat,lab)
            dbn_list.append(dbn)
            joblib.dump(dbn,"pickles/dbn_"+str(tup(i))+"x"+str(i)+".pkl")
        finally:
            #print dat.shape
            #print lab.shape
            print dbn_list.__len__()
            print ("trained ! ready to predict!")
            #print "training report for {}x{}:".format(tup(i),i)
            tes,labt=test_load(tup(i),i)
            preds=dbn.predict(tes)
            sampleClassificationReport=classification_report(labt,preds)
            #print sampleClassificationReport

    while(1):
        dst="."
开发者ID:Yami-Bitshark,项目名称:DBN_MULTI_RESOLUTION_DIGITS,代码行数:33,代码来源:vid-multi-dbn.py

示例15: train_test_split

# 需要导入模块: from nolearn.dbn import DBN [as 别名]
# 或者: from nolearn.dbn.DBN import fit [as 别名]
    # Split data to train and test
    X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=TEST_SIZE, random_state=0)
    X_train = X_train.todense()
    X_test = X_test.todense()


    # Train --------------------------------------------------------------
    print "Training..."
    t1 = datetime.now()

    dbn = DBN(
        [-1, 300, 300, -1],
        learn_rates=0.1,
        learn_rate_decays=0.9,
        epochs=10,
        verbose=1)
    dbn.fit(X_train, y_train)


    print "Training %f secs" % (datetime.now() - t1).total_seconds()

    if TEST_SIZE > 0:
        tlabel = dbn.predict(X_test)
        print 'Error: %f' % error_track_0(tlabel, y_test)

    if DUMP:
        # Dump model --------------------------------------------------------------
        print "Dumping model..."
        joblib.dump(dbn, '../model/deep/%s.pkl' % MODEL_NAME)
开发者ID:bingo4508,项目名称:ML-handwriting-recognition,代码行数:31,代码来源:deep_learning.py


注:本文中的nolearn.dbn.DBN.fit方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。