本文整理汇总了Python中sklearn.decomposition.PCA.predict方法的典型用法代码示例。如果您正苦于以下问题:Python PCA.predict方法的具体用法?Python PCA.predict怎么用?Python PCA.predict使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.decomposition.PCA
的用法示例。
在下文中一共展示了PCA.predict方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: RegressionDriver
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import predict [as 别名]
class RegressionDriver(BaseDriver):
def __init__(self):
super(RegressionDriver, self).__init__()
if REGRESSOR == "LOG":
self.driver = LogisticRegression()
elif REGRESSOR == "RFR":
self.driver = RandomForestRegressor(n_estimators=N_ESTIMATORS, n_jobs=N_JOBS)
elif REGRESSOR == "GBR":
self.driver = GradientBoostingClassifier(n_estimators=300, max_depth=5, learning_rate=0.05)
elif REGRESSOR == "PCA":
self.driver = PCA(n_components=1)
else:
raise Exception("Regressor: %s not supported." % REGRESSOR)
genuineX = []
forgeryX = []
genuineY = []
forgeryY = []
# Training process
for sigs in self.train_set:
personTrain = PersonTraining(sigs)
genuine, forgery = personTrain.calc_train_set()
genuineX.extend(genuine)
forgeryX.extend(forgery)
# To adjust PCA result, 0 means genuine and 1 means forgery
genuineY = [0.0] * len(genuineX)
forgeryY = [1.0] * len(forgeryX)
trainX = genuineX + forgeryX
trainY = genuineY + forgeryY
self.driver.fit(trainX, trainY)
def test(self):
LOGGER.info("Start test")
count = 1
test_set = self.test_set
if TRAIN_SET_INCLUDE:
test_set.extend(self.train_set)
forgery_test_result = []
genuine_test_result = []
random_test_result = []
genuine_test_dis = []
forgery_test_dis = []
falseRejectCount = 0
falseAcceptSkillCount = 0
falseAcceptRandomCount = 0
for i in range(len(test_set)):
one_test_set = test_set[i]
LOGGER.info("Test signature: %d" % count)
count += 1
personTest = PersonTest(one_test_set["genuine"][0:REF_COUNT])
genuine_set = one_test_set["genuine"][REF_COUNT:]
forgery_set = one_test_set["forgery"]
random_set = []
for j in range(len(genuine_set)):
sig = genuine_set[j]
dis = personTest.calc_dis(sig)
if REGRESSOR == "PCA":
res = self.driver.transform(dis)
res = res.tolist()[0][0]
else:
res = self.driver.predict(dis)
res = res.tolist()[0]
genuine_test_dis.append(res)
LOGGER.info("Genuine Test: Result: %s, %s" % (res, dis))
genuine_test_result.append(res)
if (res > 0.5):
LOGGER.fatal("FalseReject: uid: %d, sid: %d" % (i, j))
falseRejectCount += 1
for j in range(len(forgery_set)):
sig = forgery_set[j]
dis = personTest.calc_dis(sig)
if REGRESSOR == "PCA":
res = self.driver.transform(dis)
res = res.tolist()[0][0]
else:
res = self.driver.predict(dis)
res = res.tolist()[0]
forgery_test_dis.append(res)
LOGGER.info("Forgery Test: Result: %s, %s" % (res, dis))
forgery_test_result.append(res)
if (res <= 0.5):
LOGGER.fatal("FalseAccept: uid: %d, sid: %d" % (i, j))
falseAcceptSkillCount += 1
if RANDOM_FORGERY_INCLUDE:
for j in range(len(test_set)):
if i == j:
continue
random_set.extend(test_set[j]["genuine"])
#.........这里部分代码省略.........
示例2: KNN_A
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import predict [as 别名]
def KNN_A(rootdir, posdir, posnum, negnum_p):
pos = []
neg = []
pathpos = []
pathneg = []
folders = []
imgspos = []
imgsneg = []
with open('list.txt', 'r') as f:
for line in f:
line = line.strip()
folders.append(line)
gbf = igbf.GABOR_FEAT()
for folder in folders:
fname = os.path.join(rootdir, folder)
if 0 == cmp(folder, posdir):
fvs,imgs = gbf.gen_folder(fname, posnum)
if fvs is None:
print 'pos None ',fname
continue
pos.extend(fvs)
imgspos.extend(imgs)
pathpos.extend([folder for k in range(len(fvs))])
else:
fvs,imgs = gbf.gen_folder(fname, negnum_p)
if fvs is None:
print 'neg None ', fname
continue
neg.extend(fvs)
imgsneg.extend(imgs)
pathneg.extend([folder for k in range(len(fvs))])
label0 = [0 for k in range(len(pos))]
label1 = [1 for k in range(len(neg))]
samples = np.array(pos + neg)
labels = np.array(label0 + label1)
paths = pathpos + pathneg
imgs = imgspos + imgsneg
clf = PCA(100)
print 'before pca : ', samples.shape
samples = clf.fit_transform(samples)
print 'after pca : ', samples.shape
if 0:
clf = KNeighborsClassifier(5)
clf.fit(samples,labels)
res = []
for k in range(samples.shape[0]):
prd = clf.predict(samples[k,:])
res.append((paths[k],prd))
res = sorted(res, key = lambda k : k[0])
line = ""
for path, prd in res:
line += path + ' ' + str(prd) + '\n'
with open('result.txt', 'w') as f:
f.writelines(line)
else:
clf = NearestNeighbors(5).fit(samples)
dists,idxs = clf.kneighbors(samples, 5)
line = ""
for k in range(len(idxs)):
for j in range(len(idxs[k])):
line += paths[idxs[k][j]] + ' '
line += '\n'
with open('result.txt', 'w') as f:
f.writelines(line)
return
示例3: float
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import predict [as 别名]
train_predict = clf.predict(train_data)
eval_predict = clf.predict(eval_data)
eval_result = np.sum(eval_predict == eval_label) / float(eval_label.shape[0])
train_result = np.sum(eval_predict == eval_label) / float(eval_label.shape[0])
print (eval_predict)
print (eval_result)
print (train_result)
raw_input()
feature = 41
pca = PCA(n_components=41, whiten=True)
pca.fit(train_data, train_label)
print ("rf done")
out = pca.predict(eval_data)
print (np.sum(out == eval_label) / float(eval_label.shape[0]))
raw_input()
matrix = np.ndarray([SIZE, feature])
for i in range(data.shape[0]):
data_T = np.reshape(data[i], [1, -1])
matrix[i] = pca.transform(data_T)
data_length = data.shape[0]
f = file(name=FILENAME, mode="w+")
for x in range(data_length):
info = []
str_label = str(label[x]) + " "
info.append(str_label)
示例4: FeatureExtractor
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import predict [as 别名]
#.........这里部分代码省略.........
n_jobs : int
训练时用到的CPU核心数,如果是-1则使用全部核心。
"""
sift = SIFT_create()
descs = np.array([sift.detectAndCompute(img, None)[1] for img in images])
# Sometimes descriptor is None, turn it into np.ndarray type.
descs = [d if isinstance(d, np.ndarray) else
np.array([]).reshape(0, 128).astype('float32') for d in descs]
# 训练好的聚类器放入self.red
self.red = KMeans(n_clusters=n_clusters, n_jobs=n_jobs,
random_state=42).fit(np.vstack(descs))
def sift_extract(self, image):
"""
利用SIFT,对给定的图片提取特征向量。使用前必须先初始化特征提取器。
Parameters
----
image : 二维numpy数组
灰度图。
Returns
-------
一维numpy数组
图片的特征向量。
"""
assert self.red, "self.red should be initial!"
n_clusters = self.red.n_clusters # 聚类的数量
features = np.zeros(n_clusters) # 提取到的特征
sift = SIFT_create()
descriptors = sift.detectAndCompute(image, None)[1]
if descriptors is None: # 如果没有找到一个描述子,就返回全是0的数组
return features
y = self.red.predict(descriptors) # 对描述子聚类
features[list(set(y))] = 1 # 得到最终的特征
return features
def lbp_train(self, images, n_components=0.95):
"""
利用LBP,训练特征提取器(训练PCA)
将图像均分为6个区域(左上、右上、左中、右中、左下、右下),对这6个区域
分别求LBP特征,然后根据这些特征出现的频率得到256维的直方图,把这6个直方
图合并起来将得到1536维的特征向量。因为维数太高,在不损失准确度的前提下
有必要利用PCA对其降维以提高运算速度。该方法就是训练PCA以便从1536维的特
征向量得到较低维的特征向量。
Parameters
----------
images : 列表
要用来训练的图片的集合。列表中的每个图片都是二维的numpy数组(灰度图)。
n_components : int或float
要保留的特征数。
如果是大于1的整数,那么就保留n_components个特征;如果是小于1的浮点
数,那么就保留n_components的方差。
"""
X = np.array([]).reshape(0, 1536)
for img in images:
height, width = img.shape
w = width // 2
h = height // 3
feature = np.array([])
# 将图像分为6个区域,非别求这6个区域的lbp特征
for i in range(2):
for j in range(3):