当前位置: 首页>>代码示例>>Python>>正文


Python BernoulliRBM.transform方法代码示例

本文整理汇总了Python中sklearn.neural_network.BernoulliRBM.transform方法的典型用法代码示例。如果您正苦于以下问题:Python BernoulliRBM.transform方法的具体用法?Python BernoulliRBM.transform怎么用?Python BernoulliRBM.transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.neural_network.BernoulliRBM的用法示例。


在下文中一共展示了BernoulliRBM.transform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testRBM

# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import transform [as 别名]
def testRBM():
  X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
  print X
  model = BernoulliRBM(n_components=2)
  model.fit(X)
  print dir(model)
  print model.transform(X)
  print model.score_samples(X)
  print model.gibbs
开发者ID:chrissly31415,项目名称:amimanera,代码行数:11,代码来源:plankton.py

示例2: rbm_001

# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import transform [as 别名]
def rbm_001():
    s = 15
    crop = 150
    n_patches = 400000
    rf_size = 5

    train_x_crop_scale = CropScaleImageTransformer(training=True,
                                                   result_path='data/data_train_crop_{}_scale_{}.npy'.format(crop, s),
                                                   crop_size=crop,
                                                   scaled_size=s,
                                                   n_jobs=-1,
                                                   memmap=True)

    patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,
                                                         patch_size=rf_size,
                                                         n_jobs=-1)
    images = train_x_crop_scale.transform()
    images = images.reshape((images.shape[0], 15 * 15 * 3))

    # rbm needs inputs to be between 0 and 1
    scaler = MinMaxScaler()
    images = scaler.fit_transform(images)

    # Training takes a long time, says 80 seconds per iteration, but seems like longer
    # And this is only with 256 components
    rbm = BernoulliRBM(verbose=1)
    rbm.fit(images)

    train_x = rbm.transform(images)
    train_y = classes.train_solutions.data

    # 0.138 CV on 50% of the dataset
    wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 500}, n_jobs=-1)
    wrapper.cross_validation(train_x, train_y, sample=0.5, parallel_estimator=True)
开发者ID:cyberport-kaggle,项目名称:galaxy-zoo,代码行数:36,代码来源:rbm_001.py

示例3: test_transform

# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import transform [as 别名]
def test_transform():
    X = Xdigits[:100]
    rbm1 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
    rbm1.fit(X)

    Xt1 = rbm1.transform(X)
    Xt2 = rbm1._mean_hiddens(X)

    assert_array_equal(Xt1, Xt2)
开发者ID:amitmse,项目名称:scikit-learn,代码行数:11,代码来源:test_rbm.py

示例4: __init__

# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import transform [as 别名]
class DeepRbmMnistClassifier:

    def __init__(self):
        self.n_components_first = 500
        self.n_components_second = 500
        self.n_components_third = 2000
        self.n_iter_first = 20
        self.n_iter_second = 20
        self.n_iter_third = 20
        self.learning_rate_first = 0.06
        self.learning_rate_second = 0.06
        self.learning_rate_third = 0.06
        self.verbose = True

    def label_to_feature(self,y):
        feature = [0]*10
        feature[y] = 1
        return feature

    def fit(self,X,y):
        self.rbm_1 = BernoulliRBM(verbose=self.verbose,
                            n_components=self.n_components_first,
                            n_iter=self.n_iter_first,
                            learning_rate=self.learning_rate_first)
        self.rbm_2 = BernoulliRBM(verbose=self.verbose,
                            n_components=self.n_components_second,
                            n_iter=self.n_iter_second,
                            learning_rate=self.learning_rate_second)
        self.first_pipeline = Pipeline(steps=[('rbm_1',self.rbm_1), ('rbm_2',self.rbm_2)])
        self.first_pipeline.fit(X,y)

        # TODO improve. Look at how it is done in classify
        new_features = []
        for example,label in zip(X,y):
            transformed = self.first_pipeline.transform(example)[0]
            new_features.append(np.concatenate((transformed,self.label_to_feature(label))))

        self.rbm_3 = BernoulliRBM(verbose=self.verbose,
                            n_components=self.n_components_third,
                            n_iter=self.n_iter_third,
                            learning_rate=self.learning_rate_third)
        self.rbm_3.fit(new_features,y)

    def classify(self,X):
        transformed = self.first_pipeline.transform(X)
        transformed = np.concatenate((transformed,[[0]*10]*len(transformed)),axis=1)

        # The inverse of rbm_3 to go from hidden layer to visible layer
        rbm_aux = BernoulliRBM()
        rbm_aux.intercept_hidden_ = self.rbm_3.intercept_visible_
        rbm_aux.intercept_visible_ = self.rbm_3.intercept_hidden_
        rbm_aux.components_ = np.transpose(self.rbm_3.components_)
        results = rbm_aux.transform(self.rbm_3.transform(transformed))
        results = results[:,-10:]
        return np.argmax(results,axis=1)
开发者ID:costapt,项目名称:kaggle_digit_recognizer,代码行数:57,代码来源:deep_rbm.py

示例5: classify

# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import transform [as 别名]
    def classify(self,X):
        transformed = self.first_pipeline.transform(X)
        transformed = np.concatenate((transformed,[[0]*10]*len(transformed)),axis=1)

        # The inverse of rbm_3 to go from hidden layer to visible layer
        rbm_aux = BernoulliRBM()
        rbm_aux.intercept_hidden_ = self.rbm_3.intercept_visible_
        rbm_aux.intercept_visible_ = self.rbm_3.intercept_hidden_
        rbm_aux.components_ = np.transpose(self.rbm_3.components_)
        results = rbm_aux.transform(self.rbm_3.transform(transformed))
        results = results[:,-10:]
        return np.argmax(results,axis=1)
开发者ID:costapt,项目名称:kaggle_digit_recognizer,代码行数:14,代码来源:deep_rbm.py

示例6: _RBM

# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import transform [as 别名]
    def _RBM(self, X, y):

        from sklearn.neural_network import BernoulliRBM

        # PCA model creation, number of components
        # feature extraction method. Used here (after sampling) because we are
        # creating an universal model and not this_dataset-specific.
        neural_network = BernoulliRBM(n_components=self.k_features)

        neural_network.fit(X, y)
        X = neural_network.transform(X)

        self.feature_reduction_method = neural_network

        return X
开发者ID:mikbuch,项目名称:pymri,代码行数:17,代码来源:datasets.py

示例7: temp

# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import transform [as 别名]
def temp(features):
    [featuresNorm, MAX, MIN] = normalizeFeatures(features)
    [X, Y] = listOfFeatures2Matrix(featuresNorm)
    rbm = BernoulliRBM(n_components = 10, n_iter = 1000, learning_rate = 0.01,  verbose = False)
    X1 = X[0::2]
    X2 = X[1::2]
    Y1 = Y[0::2]
    Y2 = Y[1::2]    
    rbm.fit(X1,Y1)
    YY = rbm.transform(X1)

    for i in range(10):plt.plot(YY[i,:],'r')
    for i in range(10):plt.plot(YY[i+10,:],'g')
    for i in range(10):plt.plot(YY[i+20,:],'b')
    plt.show()
开发者ID:NathanYC,项目名称:pyImageClassification,代码行数:17,代码来源:train.py

示例8: trainRBM_SVM

# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import transform [as 别名]
def trainRBM_SVM(features, Cparam, nComponents):
    [X, Y] = listOfFeatures2Matrix(features)
    rbm = BernoulliRBM(n_components = nComponents, n_iter = 30, learning_rate = 0.2,  verbose = True)
    rbm.fit(X,Y)
    newX = rbm.transform(X)
#    colors = ["r","g","b"]
#    for i in range(1,Y.shape[0],5):
#        plt.plot(newX[i,:], colors[int(Y[i])])
#    plt.show()

    classifier = {}
    classifier["rbm"] = rbm    
    svm = sklearn.svm.SVC(C = Cparam, kernel = 'linear',  probability = True)        
    svm.fit(newX,Y)

    classifier["svm"] = svm

    return classifier    
开发者ID:NathanYC,项目名称:pyImageClassification,代码行数:20,代码来源:train.py

示例9: BoWFeature

# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import transform [as 别名]
class BoWFeature(BaseEstimator, TransformerMixin):
    def __init__(self, patch_num=10000, patch_size=(8, 8), sample_num = 300,\
                n_components=256, learning_rate=0.03, n_iter=100, batch_size=100):
        self.patch_num = patch_num
        self.patch_size = patch_size
        self.sample_num = sample_num
        
        self.n_components = n_components
        self.learning_rate = learning_rate
        self.n_iter = n_iter
        self.batch_size = batch_size

    
    def fit(self, X, y=None):
        num = self.patch_num // X.size
        data = []
        for item in X:
            img = imread(str(item[0]))
            img = img_as_ubyte(rgb2gray(img))
            #img = self.binary(img) # 二值化
            tmp = extract_patches_2d(img, self.patch_size, max_patches = num,\
                                    random_state=np.random.RandomState())
            data.append(tmp)
        
        data = np.vstack(data)
        data = data.reshape(data.shape[0], -1)
        data = np.asarray(data, 'float32')
        
        # 二值化后不需要0-1归化
        data = data - np.min(data, 0)
        data = data/(np.max(data, 0) + 0.0001)  # 0-1 scaling
        
        self.rbm = BernoulliRBM(n_components=self.n_components,\
                        learning_rate=self.learning_rate, \
                        n_iter=self.n_iter,\
                        batch_size=self.batch_size,\
                        verbose=True)
        self.rbm.fit(data)
        return self
    
    def transform(self, X):
        results = []
        for sample in X:
            img = imread(str(sample[0]))
            img = img_as_ubyte(rgb2gray(img))
            #img = self.binary(img)
            patches = extract_patches_2d(img, self.patch_size,\
                                         max_patches = self.sample_num,\
                                         random_state=np.random.RandomState())
            
            patches = patches.reshape(patches.shape[0], -1)
            patches = np.asarray(patches, 'float32')
            
            patches = patches-np.min(patches, 0)
            patches = patches/(np.max(patches, 0) + 0.0001)

            patches = self.rbm.transform(patches)
            results.append(patches.sum(axis=0))
        return np.vstack(results)
    
    def get_params(self, deep=True):
        return {"patch_num": self.patch_num,
                "sample_num":self.sample_num,
                "patch_size":self.patch_size,
                "learning_rate":self.learning_rate,
                "n_components":self.n_components,
                "n_iter":self.n_iter,
                "batch_size":self.batch_size}
    def set_params(self, **parameters):
        for parameter, value in parameters.items():
            self.__setattr__(parameter, value)
        return self
        
    def binary(self, img):
        edge = sobel(img)
        thresh = threshold_otsu(edge)
        edge = edge>=thresh
        return edge.astype(np.int)
开发者ID:AI42,项目名称:CNN-detection-tracking,代码行数:80,代码来源:rbm.py

示例10: PLDA

# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import transform [as 别名]
# ====== plda ====== #
plda = PLDA(n_phi=NUM_DIM, random_state=SEED)
plda.fit(X_train, y_train)
X_train_plda = plda.predict_log_proba(X_train)
X_score_plda = plda.predict_log_proba(X_score)
# ====== gmm ====== #
gmm = GaussianMixture(n_components=NUM_DIM, max_iter=100, covariance_type='full',
                      random_state=SEED)
gmm.fit(X_train)
X_train_gmm = gmm._estimate_weighted_log_prob(X_train)
X_score_gmm = gmm._estimate_weighted_log_prob(X_score)
# ====== rbm ====== #
rbm = BernoulliRBM(n_components=NUM_DIM, batch_size=8, learning_rate=0.0008,
                   n_iter=8, verbose=2, random_state=SEED)
rbm.fit(X_train)
X_train_rbm = rbm.transform(X_train)
X_score_rbm = rbm.transform(X_score)
# ===========================================================================
# Deep Learning
# ===========================================================================

# ===========================================================================
# Visualize
# ===========================================================================
def plot(train, score, title, applying_pca=False):
  if applying_pca:
    pca = PCA(n_components=NUM_DIM)
    pca.fit(train)
    train = pca.transform(train)
    score = pca.transform(score)
  plot_figure(nrow=6, ncol=12)
开发者ID:imito,项目名称:odin,代码行数:33,代码来源:iris_latent_space.py

示例11: RBMtest01

# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import transform [as 别名]
def RBMtest01():
	#利用RBM进行non-linear feature extraction
	#相对于直接进行logistic regression, RBM features 可以提高分类精度

	import numpy as np
	import matplotlib.pyplot as plt

	from scipy.ndimage import convolve
	from sklearn import linear_model, datasets, metrics
	from sklearn.cross_validation import train_test_split
	from sklearn.neural_network import BernoulliRBM
	from sklearn.pipeline import Pipeline

	def nudge_dataset(X, Y):
		direction_vectors = [
			[[0, 1, 0],
			 [0, 0, 0],
			 [0, 0, 0]],

			[[0, 0, 0],
			 [1, 0, 0],
			 [0, 0, 0]],

			[[0, 0, 0],
			 [0, 0, 1],
			 [0, 0, 0]],

			[[0, 0, 0],
			 [0, 0, 0],
			 [0, 1, 0]]
		]

		shift = lambda x, w: convolve(x.reshape((8, 8)), mode = 'constant', weights = w).ravel()

		X = np.concatenate([X] + [np.apply_along_axis(shift, 1, X, vector) for vector in direction_vectors])
		Y = np.concatenate([Y for _ in range(5)], axis = 0)

		return X, Y

	digits = datasets.load_digits()
	X = np.asarray(digits.data, 'float32')  #这里应该就是进行了一下数据类型转换 a#list to array

	X, Y = nudge_dataset(X, digits.target)  #相当于重新生成了5倍的X,Y

	#print np.max(X, 0)
	#print np.min(X, 0)
	X = (X - np.min(X, 0)) / (np.max(X, 0) - - np.min(X, 0) + 0.0001) # 0-1 scaling 这里做了归一化(每一维分别归一化)

	X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0)


	print set(Y_train)
	#'''
	#新建模型
	logistic = linear_model.LogisticRegression()
	rbm = BernoulliRBM(random_state = 0, verbose = True)

	#感觉这里的pipeline就是一个连续进行fit, transform的过程
	#而rbm模型transform的结果是Latent representations of the data.

	classifier = Pipeline(steps = [('rbm', rbm), ('logistic', logistic)])

	#Training
	#这里的参数是根据cross-validation选出来的 -- GridSearchCV
	rbm.learning_rate = 0.06
	rbm.n_iter = 20
	rbm.n_components = 100  #这里就是利用rbm 训练出100个特征
	logistic.C = 6000


	#rbm.fit(X_train, Y_train)
	rbm.fit(X_train)


	#rbm从数据的维数来看,首先是一个非监督的训练过程,就是从X_train中求出N个代表性的vector,
	#然后再把原始的X_trian投影到这N的向量上,获得X_train的新N维feature
	#与PCA类似

	predicted_Y = rbm.transform(X_train)

	print rbm.components_  #rbm.components_是 100 * 64的矩阵
	print len(rbm.components_)
	print len(rbm.components_[0])

	print predicted_Y
	print len(predicted_Y)
	print len(predicted_Y[0])
	print len(X_train)
	print len(X_train[0])


	# Training RBM-Logistic Pipeline
	#相当于这里输入的还是每一维都进行了归一化之后的X_train
	#对应的Y_train还是0-9 表示label
	print "Start Training RBM-Logistic Pipeline"
	classifier.fit(X_train, Y_train)




#.........这里部分代码省略.........
开发者ID:hyliu0302,项目名称:scikit-learn-notes,代码行数:103,代码来源:myScikitLearnFcns.py

示例12: map

# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import transform [as 别名]
#'MVDREQLVQKARLAEQAERYDDMAAAMKNVTELNEPLSNEERNLLSVAYKNVVGARRSSWRVISSIEQKTSADGNEKKIEMVRAYREKIEKELEAVCQDVLSLLDNYLIKNCSETQYESKVFYLKMKGDYYRYLAEVATGEKRATVVESSEKAYSEAHEISKEHMQPTHPIRLGLALNYSVFYYEIQNAPEQACHLAKTAFDDAIAELDTLNEDSYKDSTLIMQLLRDNLTLWTSDQQDD',
#'MAVMAPRTLVLLLSGALALTQTWAGSHSMRYFFTSVSRPGRGEPRFIAVGYVDDTQFVRFDSDAASQRMEPRAPWIEQEGPEYWDGETRKVKAHSQTHRVDLGTLRGYYNQSEAGSHTVQRMYGCDVGSDWRFLRGYHQYAYDGKDYIALKEDLRSWTAADMAAQTTKHKWEAAHVAEQLRAYLEGTCVEWLRRYLENGKETLQRTDAPKTHMTHHAVSDHEATLRCWALSFYPAEITLTWQRDGEDQTQDTELVETRPAGDGTFQKWAAVVVPSGQEQRYTCHVQHEGLPKPLTLRWEPSSQPTIPIVGIIAGLVLFGAVITGAVVAAVMWRRKSSDRKGGSYSQAASSDSAQGSDVSL',
#'MTMDKSELVQKAKLAEQAERYDDMAAAMKAVTEQGHELSNEERNLLSVAYKNVVGARRSSWRVISSIEQKTERNEKKQQMGKEYREKIEAELQDICNDVLELLDKYLIPNATQPESKVFYLKMKGDYFRYLSEVASGDNKQTTVSNSQQAYQEAFEISKKEMQPTHPIRLGLALNFSVFYYEILNSPEKACSLAKTAFDEAIAELDTLNEESYKDSTLIMQLLRDNLTLWTSENQGDEGD',
#]

comblength = 7

X = map(lambda s : np.array(createAAFreqVector(s,Lmap,comblength)) , seqs)
#print X

#X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001)  # 0-1 scaling
#print X.shape

rbm.fit(X)
ssss ='MAVMAPRTLVLLLSGALALTQTWAGSHSMRYFFTSVSRPGRGEPRFIAVGYVDDTQFVRFDSDAASQRMEPRAPWIEQEGPEYWDGETRKVKAHSQTHRVDLGTLRGYYNQSEAGSHTVQRMYGCDVGSDWRFLRGYHQYAYDGKDYIALKEDLRSWTAADMAAQTTKHKWEAAHVAEQLRAYLEGTCVEWLRRYLENGKETLQRTDAPKTHMTHHAVSDHEATLRCWALSFYPAEITLTWQRDGEDQTQDTELVETRPAGDGTFQKWAAVVVPSGQEQRYTCHVQHEGLPKPLTLRWEPSSQPTIPIVGIIAGLVLFGAVITGAVVAAVMWRRKSSDRKGGSYSQAASSDSAQGSDVSL'
transformedSeq = rbm.transform(np.array(createAAFreqVector(ssss,Lmap,comblength)))
print transformedSeq
print 'len', len(transformedSeq)
# Training RBM-Logistic Pipeline
#classifier.fit(X_train, Y_train)

# Training Logistic regression
#logistic_classifier = linear_model.LogisticRegression(C=100.0)
#logistic_classifier.fit(X_train, Y_train)

###############################################################################
# Evaluation

print()

###############################################################################
开发者ID:PurinLord,项目名称:DBM-s_Proteinas,代码行数:33,代码来源:RBMseq.py

示例13: len

# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import transform [as 别名]
    x = x_all[:length_train]
    t = x_all[length_train:]

    label = np.array(label)

    length_test = len(test)

    n = label.shape[1]

    print "x shape",x.shape
    print "t shape",t.shape

    print "rbm"
    rbm  = BernoulliRBM(n_components=2000,n_iter=20,batch_size=66)
    rbm.fit(x)
    x = rbm.transform(x)
    t = rbm.transform(t)

    print "rbm x shape",x.shape
    print "rbm t shape",t.shape

    #构造结果的矩阵
    answer = []

    

    print "开始回归"

    for i in range(n):
        print "第%s个"%(i)
        clf = linear_model.Ridge(alpha=2,fit_intercept=True,normalize=True,tol=1e-9)
开发者ID:lavizhao,项目名称:sentiment,代码行数:33,代码来源:rbm_reg.py

示例14: BernoulliRBM

# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import transform [as 别名]
#bigMatrixTrain = (bigMatrixTrain - np.min(bigMatrixTrain, 0)) / (np.max(bigMatrixTrain, 0) + 0.0001)  # 0-1 scaling
#Divide dataset for cross validation purposes
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
    bigMatrixTrain, y, test_size = 0.4, random_state = 0) #fix this

# specify parameters and distributions to sample from
# Models we will use
rbm = BernoulliRBM(random_state=0, verbose=True)

#classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
rbm.learning_rate = 0.04
rbm.n_iter = 30
# More components tend to give better prediction performance, but larger fitting time
rbm.n_components = 300
X_train = rbm.fit_transform(X_train)
X_test = rbm.transform(X_test)

# Train a logistic model
print("Fitting the classifier to the training set")
logisticModel = linear_model.LogisticRegression()
t0 = time()
param_grid = {'C': [10, 30, 100, 300, 1000]}
logisticModel = GridSearchCV(logisticModel, param_grid = param_grid)
logisticModel = logisticModel.fit(X_train, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(logisticModel.best_estimator_)

#logistic.C = 6000.0

# Train a SVM classification model
开发者ID:wacax,项目名称:DogsVsCats,代码行数:33,代码来源:CatsDogsBernoulli.py

示例15: BernoulliRBM

# 需要导入模块: from sklearn.neural_network import BernoulliRBM [as 别名]
# 或者: from sklearn.neural_network.BernoulliRBM import transform [as 别名]
# X_test = X_test[test_permut, :]
# y_test = y_test[test_permut]


# rbm learning
# TODO: try to search better parametrs with grid search
rbm = BernoulliRBM(random_state=0, verbose=True)
rbm.learning_rate = 0.1
rbm.n_iter = 30
rbm.n_components = 16

print X_train
print X_train.shape
rbm.fit(all_feats)
X_train = np.concatenate((rbm.transform(X_train), X_train_preserved), 1)
X_test = np.concatenate((rbm.transform(X_test), X_test_preserved), 1)
print X_train
print X_train.shape


ens_lbls = []
ens_probs = []
# iterate over classifiers
for name, clf in zip(names, classifiers):
    print "[{}] learning starting ...".format(name)
    clf.fit(X_train, y_train)
    print "[{}] learning finished".format(name)
    probs = clf.predict_proba(X_test)[:, [1]]
    dump_to_file(name+"_res_probs", ids, probs)
开发者ID:boocheck,项目名称:santander,代码行数:31,代码来源:main.py


注:本文中的sklearn.neural_network.BernoulliRBM.transform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。