本文整理汇总了Python中facerec.model.PredictableModel.predict方法的典型用法代码示例。如果您正苦于以下问题:Python PredictableModel.predict方法的具体用法?Python PredictableModel.predict怎么用?Python PredictableModel.predict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类facerec.model.PredictableModel
的用法示例。
在下文中一共展示了PredictableModel.predict方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: App
# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import predict [as 别名]
class App(object):
def __init__(self, video_src, dataset_fn, face_sz=(130,130), cascade_fn=join(curpath, 'haarcascade_frontalface_alt2.xml')):
self.face_sz = face_sz
self.cam = create_capture(video_src)
ret, self.frame = self.cam.read()
self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
# define feature extraction chain & and classifier)
feature = ChainOperator(TanTriggsPreprocessing(), LBP())
classifier = NearestNeighbor(dist_metric=ChiSquareDistance())
# build the predictable model
self.predictor = PredictableModel(feature, classifier)
# read the data & compute the predictor
self.dataSet = DataSet(filename=dataset_fn,sz=self.face_sz)
self.predictor.compute(self.dataSet.data,self.dataSet.labels)
def run(self):
while True:
ret, frame = self.cam.read()
# resize the frame to half the original size
img = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2), interpolation = cv2.INTER_CUBIC)
imgout = img.copy()
for i,r in enumerate(self.detector.detect(img)):
x0,y0,x1,y1 = r
# get face, convert to grayscale & resize to face_sz
face = img[y0:y1, x0:x1]
face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
face = cv2.resize(face, self.face_sz, interpolation = cv2.INTER_CUBIC)
# get a prediction
prediction = self.predictor.predict(face)
# draw the face area
cv2.rectangle(imgout, (x0,y0),(x1,y1),(0,255,0),2)
# draw the predicted name (folder name...)
draw_str(imgout, (x0-20,y0-20), self.dataSet.names[prediction])
cv2.imshow('videofacerec', imgout)
# get pressed key
ch = cv2.waitKey(10)
if ch == 27:
break
示例2: checkFace
# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import predict [as 别名]
def checkFace(origin_img):
#To do
model = PredictableModel(Fisherfaces(), NearestNeighbor())
result_name = 'unknown'
[X,y,subject_names] = read_images(path)
list_of_labels = list(xrange(max(y)+1))
subject_dictionary = dict(zip(list_of_labels, subject_names))
model.compute(X,y)
gray = cv2.cvtColor(origin_img, cv2.COLOR_BGR2GRAY)
sampleImage = cv2.resize(gray, (256,256))
[ predicted_label, generic_classifier_output] = model.predict(sampleImage)
print [ predicted_label, generic_classifier_output]
if int(generic_classifier_output['distances']) <= 700:
result_name = str(subject_dictionary[predicted_label])
return result_name
示例3: test_one_method
# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import predict [as 别名]
def test_one_method(input_faces, test_faces, feature, classifier, chain=True):
if chain:
feature = ChainOperator(TanTriggsPreprocessing(), feature)
model = PredictableModel(feature, classifier)
id_list, face_list = zip(*input_faces)
start = time.clock()
model.compute(face_list, id_list)
stop = time.clock()
training_time = stop-start
res_list = []
start = time.clock()
for id, image in test_faces:
res = model.predict(image)
res_list.append([id]+res)
stop = time.clock()
predict_time = stop-start
return (training_time, predict_time, res_list)
示例4: range
# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import predict [as 别名]
mod3.compute(Xtrain,ytrain)
mod4.compute(Xtrain,ytrain)
mod5.compute(Xtrain,ytrain)
mod6.compute(Xtrain,ytrain)
mod7.compute(Xtrain,ytrain)
mod8.compute(Xtrain,ytrain)
mod9.compute(Xtrain,ytrain)
mod10.compute(Xtrain,ytrain)
#For Training Size 3
p=np.array(np.ones(len(Xtest))*9,dtype=int)
count=0
for i in range(len(Xtest)):
d10=mod10.predict(Xtest[i])
if (d10[1]['distances']<0.33):
count+=1
p[i]=int(d10[0])
# print 'mod9',(d10[1]['distances']),p[i],ytest[i]
continue
d9=mod9.predict(Xtest[i])
if (d9[1]['distances']<40):
count+=1
p[i]=int(d9[0])
# print 'mod9',abs(d9[1]['distances']),p[i],ytest[i]
continue
d6=mod6.predict(Xtest[i])
if (abs(d6[1]['distances'])>0.68):
count+=1
p[i]=int(d6[0])
示例5: dataset
# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import predict [as 别名]
# load a dataset (e.g. AT&T Facedatabase)
dataSet = DataSet("/root/libface/img/yalefaces")
# define Fisherfaces as feature extraction method
feature = Fisherfaces()
# define a 1-NN classifier with Euclidean Distance
classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
# define the model as the combination
model = PredictableModel(feature=feature, classifier=classifier)
# show fisherfaces
model.compute(dataSet.data, dataSet.labels)
#try to recgonize
im = Image.open("/root/libface/img/reg.jpg")
im = im.convert("L")
ar = []
ar.append(np.asarray(im, dtype=np.uint8))
print(dataSet.names[model.predict(ar)])
# turn the first (at most) 16 eigenvectors into grayscale
# images (note: eigenvectors are stored by column!)
"""
E = []
for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
e = model.feature.eigenvectors[:,i].reshape(dataSet.data[0].shape)
E.append(minmax_normalize(e,0,255, dtype=np.uint8))
# plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.pdf")
# perform a 10-fold cross validation
cv = KFoldCrossValidation(model, k=10)
cv.validate(dataSet.data, dataSet.labels)
print cv
"""
示例6: int
# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import predict [as 别名]
rval, frame = vc.read()
img = frame
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.2, 3)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
sampleImage = gray[y:y+h, x:x+w]
sampleImage = cv2.resize(sampleImage, (256,256))
#capiamo di chi è sta faccia
[ predicted_label, generic_classifier_output] = model.predict(sampleImage)
print [ predicted_label, generic_classifier_output]
#scelta la soglia a 700. soglia maggiore di 700, accuratezza minore e v.v.
if int(generic_classifier_output['distances']) <= 700:
cv2.putText(img,'tu sei : '+str(subject_dictionary[predicted_label]), (x,y), cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,250),3,1)
cv2.imshow('result',img)
if cv2.waitKey(10) == 27:
break
cv2.destroyAllWindows()
vc.release()
示例7: PredictableModel
# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import predict [as 别名]
model = PredictableModel(feature=feature, classifier=classifier)
# Compute the Fisherfaces on the given data (in X) and labels (in y):
#---------------------------------------------
# print "Generating model"
if(not os.path.exists("./temp/mymodel")):
model.compute(X, y)
save_model("./temp/mymodel", model) #saving model here - CHANGE THIS
exit()
# print "loading model"
model = load_model("./temp/mymodel")
# print "loaded model"
urlForImage = sys.argv[2]
tmpfilename = "./temp/"+str(urlForImage.split('/')[-1]) #saving image here - CHANGE THIS
urllib.urlretrieve(urlForImage, tmpfilename)
im = Image.open(tmpfilename) #add rotate of 90? Don't think so.
im = im.resize((648,486), Image.ANTIALIAS)
im = im.convert("L")
# print "hello",str(im.size)
im.show()
to_predict_x = np.asarray(im, dtype=np.uint8)
li=model.predict(to_predict_x)
if(int(li[1]['distances'])<10000):
# print str(li)
# print str(d)
print str(d[li[0]])
# print "Authenticated as ",str(li[0]),":",str(d[li[0]])," with distance : ",str(li[1]['distances']) #set threshold as 10000
else:
print '-1'
# print "Could not Authenticate with distance : ",str(li[1]['distances'][0])," for ",str(li[0]),":",str(d[li[0]])
示例8: Fisherfaces
# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import predict [as 别名]
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# Add handler to facerec modules, so we see what's going on inside:
logger = logging.getLogger("facerec")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# Define the Fisherfaces as Feature Extraction method:
feature = Fisherfaces()
# Define a 1-NN classifier with Euclidean Distance:
classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
# Define the model as the combination
model = PredictableModel(feature=feature, classifier=classifier)
# Compute the Fisherfaces on the given data (in X) and labels (in y):
model.compute(X, y)
prediction = model.predict(X)
predicted_label = prediction[0]
classifier_output = prediction[1]
distance = classifier_output['distances'][0]
print distance
E = []
for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
E.append(minmax_normalize(e,0,255, dtype=np.uint8))
# Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png")
# Perform a 10-fold cross validation
cv = KFoldCrossValidation(model, k=10)
cv.validate(X, y)
示例9: PredictableModel
# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import predict [as 别名]
model = PredictableModel(feature=feature, classifier=classifier)
# Compute the Fisherfaces on the given data (in X) and labels (in y):
model.compute(X, y)
# Then turn the first (at most) 16 eigenvectors into grayscale
# images (note: eigenvectors are stored by column!)
# E = []
# for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
# e = model.feature.eigenvectors[:, i].reshape(X[0].shape)
# E.append(minmax_normalize(e, 0, 255, dtype=np.uint8))
img_path = 'rawand1.jpg'
coverted_img_path = "temp_%s" % img_path
detect_face(img_path,outfile=coverted_img_path)
img = Image.open(coverted_img_path)
img = img.convert("L")
p = model.predict(img)[0]
label = keys[p]
print label
[X, y, keys] = read_images("../faces2/", keys=keys)
model.classifier.update(X,y)
p = model.predict(img)[0]
label = keys[p]
print label
示例10: threshold_function
# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import predict [as 别名]
# threshold_lpq_normalized = threshold_function(0.67, 0.3)
threshold_lpq_chisquared = threshold_function(70, 35)
# threshold_spatial_cosine = threshold_function(0.908, 0.908)
# threshold_spatial_chisuearbrd = threshold_function()
# threshold = threshold_lpq_normalized
threshold = threshold_lpq_chisquared
# threshold = threshold_spatial_cosine
for image, id in test_list:
target_full_name = os.path.join(test_path, image)
prufu_mynd = utils.read_image(target_full_name)
# prufu_mynd = fp.process_image(utils.read_image(target_full_name))
if prufu_mynd is not None:
res = model.predict(prufu_mynd)
found_id = threshold(res) # result_from_res(res)
print found_id, ",", id
else:
print "Gat ekki opnað prufumynd"
"""
p1 = fp.process_image(utils.read_image("/Users/matti/Documents/forritun/att_faces/arora_01.jpg"))
p2 = utils.read_image("/Users/matti/Dropbox/Skjöl/Meistaraverkefni/server/test_faces_to_search_for/arora_01.png")
res1 = model.predict(p1)
res2 = model.predict(p2)
print res1
print res2
"""
"""
示例11: int
# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import predict [as 别名]
# If we're learning, skip back to the top of the loop
#
continue
#
# If we don't have anything in the database, skip the recognition part
#
if X == []:
break;
#
# Do we recognize the current face?
# The "predict" method will return the closest match of the current image to the database
#
finalimage = sampleImage & facefilter
[ predicted_label, generic_classifier_output] = model.predict(finalimage)
#
# Determine if the prediction is within a certain "threshold". This is actually the
# "distance" between the image and the database. The closer the distance is to "0", the
# closer a match it really is.
#
# Higher thresholds result in less accuracy or more mis-identified pictures.
#
if int(generic_classifier_output['distances'][0]) > current_threshold * 4:
high=current_threshold * 4
else:
high=int(generic_classifier_output['distances'][0])
#
# The percentage is calculated to tell us how close we are to a perfect match we have to the current image
示例12: FaceDatabase
# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import predict [as 别名]
class FaceDatabase(object):
def __init__(self, database_folder, feature_parameter="LPQ", metric="chi", k=3):
self.model = None
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger("facerec")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
path = database_folder
start = time.clock()
input_faces = utils.read_images_from_single_folder(path)
stop = time.clock()
print("read {}, images from {} in {} seconds.".format(len(input_faces), path, stop-start))
feature = None
m = {
"fisher": Fisherfaces,
"fisher80": Fisherfaces,
"pca": PCA,
"pca10": PCA,
"lda": LDA,
"spatial": SpatialHistogram,
"LPQ": SpatialHistogram
}
if feature_parameter in m:
if feature_parameter == 'LPQ':
feature = SpatialHistogram(LPQ())
self.threshold = threshold_function(71.4, 70)
elif feature_parameter == 'fisher80':
feature = Fisherfaces(80)
self.threshold = threshold_function(0.61, 0.5)
elif feature_parameter == 'fisher':
feature = Fisherfaces()
self.threshold = threshold_function(0.61, 0.5)
elif feature_parameter == 'pca80':
feature = PCA(80)
else:
feature = m[feature_parameter]()
metric_param = None
d = {"euclid": EuclideanDistance,
"cosine": CosineDistance,
"normal": NormalizedCorrelation,
"chi": ChiSquareDistance,
"histo": HistogramIntersection,
"l1b": L1BinRatioDistance,
"chibrd": ChiSquareBRD
}
if metric in d:
metric_param = d[metric]()
else:
metric_param = ChiSquareDistance()
classifier = NearestNeighbor(dist_metric=metric_param, k=k)
feature = ChainOperator(TanTriggsPreprocessing(), feature)
# feature = ChainOperator(TanTriggsPreprocessing(0.1, 10.0, 1.0, 3.0), feature)
self.model = PredictableModel(feature, classifier)
# images in one list, id's on another
id_list, face_list = zip(*input_faces)
print "Train the model"
start = time.clock()
# model.compute(X, y)
self.model.compute(face_list, id_list)
stop = time.clock()
print "Training done in", stop-start, " next...find a face"
# threshold_lpq_normalized = threshold_function(0.67, 0.3)
# threshold_lpq_chisquared = threshold_function(71.4, 70)
# threshold_spatial_cosine = threshold_function(0.908, 0.908)
# threshold_spatial_chisuearbrd = threshold_function()
# threshold = threshold_lpq_normalized
def find_face(self, input_face_image):
assert self.model, "Model is not valid"
res = self.model.predict(input_face_image)
print res
return self.threshold(res)