当前位置: 首页>>代码示例>>Python>>正文


Python model.PredictableModel类代码示例

本文整理汇总了Python中facerec.model.PredictableModel的典型用法代码示例。如果您正苦于以下问题:Python PredictableModel类的具体用法?Python PredictableModel怎么用?Python PredictableModel使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了PredictableModel类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_model_file

def create_model_file(username, image_path, feature, classifier):
    # read images and set labels
    [X, y] = read_images(image_path)
    # Define the model as the combination
    model = PredictableModel(feature=feature.value, classifier=classifier.value)

    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    model.compute(X, y)

    # We then save the model, which uses Pythons pickle module:
    model_name = username + "_model.pkl"
    save_model(model_name, model)
开发者ID:davideberdin,项目名称:Tinder-ML,代码行数:12,代码来源:model_creator.py

示例2: computeAndSaveModel

def computeAndSaveModel(path_to_database, path_for_model_output, size, model_type="Fisherfaces", num_components=0, classifier_neighbours=1):
    print "\n[+] Saving new model (confirmed below)."    
    [X,y,names] = read_images(path_to_database, sz=size)
    if model_type == "Eigenfaces":
        model = PredictableModel(PCA(num_components=num_components), NearestNeighbor(k=classifier_neighbours), dimensions=size, namesDict=names)
    elif model_type == "Fisherfaces":
        model = PredictableModel(Fisherfaces(num_components=num_components), NearestNeighbor(k=classifier_neighbours), dimensions=size, namesDict=names)
    else:
        print "[-] specify the type of model you want to comput as either 'Fisherface' or 'Eigenface' in the computeAndSaveModel function."
        return False

    model.compute(X,y)   
    save_model(path_for_model_output, model)
    print "\n[+] Saving confirmed. New model saved to:", path_for_model_output
开发者ID:leoneckert,项目名称:facerec,代码行数:14,代码来源:facerec_tools.py

示例3: create_model_db

def create_model_db(user, modelpath, feature, classifier, setsize=None):
    [X, y], testpersons = read_images_db(user, setsize)
    # Define the model as the combination
    model = PredictableModel(feature=feature.value, classifier=classifier.value)

    # Compute the feature-algorithm on the given data (in X) and labels (in y):
    model.compute(X, y)

    # We then save the model, which uses Pythons pickle module:
    model_name = "{}_{}_model.pkl".format(user.username, user.id)
    testpersons_name = "{}_{}_testpersons.pkl".format(user.username, user.id)
    #save_model(os.path.join(modelpath, model_name), model)
    #with open(os.path.join(modelpath, testpersons_name), "w") as picklefile:
    #    pickle.dump(testpersons, picklefile)

    return model, testpersons
开发者ID:davideberdin,项目名称:Tinder-ML,代码行数:16,代码来源:model_creator.py

示例4: App

class App(object):
    def __init__(self, video_src, dataset_fn, face_sz=(130,130), cascade_fn=join(curpath, 'haarcascade_frontalface_alt2.xml')):
        self.face_sz = face_sz
        self.cam = create_capture(video_src)
        ret, self.frame = self.cam.read()
        self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
        # define feature extraction chain & and classifier)
        feature = ChainOperator(TanTriggsPreprocessing(), LBP())
        classifier = NearestNeighbor(dist_metric=ChiSquareDistance())
        # build the predictable model
        self.predictor = PredictableModel(feature, classifier)
        # read the data & compute the predictor
        self.dataSet = DataSet(filename=dataset_fn,sz=self.face_sz)
        self.predictor.compute(self.dataSet.data,self.dataSet.labels)

    def run(self):
        while True:
            ret, frame = self.cam.read()

            # resize the frame to half the original size
            img = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2), interpolation = cv2.INTER_CUBIC)
            imgout = img.copy()
            for i,r in enumerate(self.detector.detect(img)):
                x0,y0,x1,y1 = r

                # get face, convert to grayscale & resize to face_sz
                face = img[y0:y1, x0:x1]
                face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face, self.face_sz, interpolation = cv2.INTER_CUBIC)

                # get a prediction
                prediction = self.predictor.predict(face)

                # draw the face area
                cv2.rectangle(imgout, (x0,y0),(x1,y1),(0,255,0),2)

                # draw the predicted name (folder name...)
                draw_str(imgout, (x0-20,y0-20), self.dataSet.names[prediction])

            cv2.imshow('videofacerec', imgout)

            # get pressed key
            ch = cv2.waitKey(10)
            if ch == 27:
                break
开发者ID:heynemann,项目名称:facerec,代码行数:45,代码来源:videofacerec.py

示例5: test_one_method

def test_one_method(input_faces, test_faces, feature, classifier, chain=True):
    if chain:
        feature = ChainOperator(TanTriggsPreprocessing(), feature)

    model = PredictableModel(feature, classifier)
    id_list, face_list = zip(*input_faces)

    start = time.clock()
    model.compute(face_list, id_list)
    stop = time.clock()
    training_time = stop-start

    res_list = []
    start = time.clock()
    for id, image in test_faces:
        res = model.predict(image)
        res_list.append([id]+res)
    stop = time.clock()
    predict_time = stop-start

    return (training_time, predict_time, res_list)
开发者ID:orvitinn,项目名称:msc,代码行数:21,代码来源:maelingar_01.py

示例6: checkFace

def checkFace(origin_img):
    #To do
    model = PredictableModel(Fisherfaces(), NearestNeighbor())
    
    result_name = 'unknown'
    
    [X,y,subject_names] = read_images(path)
    list_of_labels = list(xrange(max(y)+1))
    subject_dictionary = dict(zip(list_of_labels, subject_names))
    model.compute(X,y)

    gray = cv2.cvtColor(origin_img, cv2.COLOR_BGR2GRAY)
    sampleImage = cv2.resize(gray, (256,256))
        
    [ predicted_label, generic_classifier_output] = model.predict(sampleImage)
    print [ predicted_label, generic_classifier_output]
        
    if int(generic_classifier_output['distances']) <=  700:
        result_name = str(subject_dictionary[predicted_label])

    return result_name
开发者ID:ChanMinPark,项目名称:RaspiVision,代码行数:21,代码来源:zzpcm_facerec_2_server.py

示例7: __init__

 def __init__(self, video_src, dataset_fn, face_sz=(130,130), cascade_fn=join(curpath, 'haarcascade_frontalface_alt2.xml')):
     self.face_sz = face_sz
     self.cam = create_capture(video_src)
     ret, self.frame = self.cam.read()
     self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
     # define feature extraction chain & and classifier)
     feature = ChainOperator(TanTriggsPreprocessing(), LBP())
     classifier = NearestNeighbor(dist_metric=ChiSquareDistance())
     # build the predictable model
     self.predictor = PredictableModel(feature, classifier)
     # read the data & compute the predictor
     self.dataSet = DataSet(filename=dataset_fn,sz=self.face_sz)
     self.predictor.compute(self.dataSet.data,self.dataSet.labels)
开发者ID:heynemann,项目名称:facerec,代码行数:13,代码来源:videofacerec.py

示例8: __init__

	def __init__(self, video_src, dataset_fn, face_sz=(130,130), cascade_fn="/Users/george/job/__webdocs/webguerillas/opencv/project/facereg/opencv/OpenCV-2.4.2/data/haarcascades/haarcascade_frontalface_alt2.xml"):
		self.face_sz = face_sz
		self.cam = create_capture(video_src)
		ret, self.frame = self.cam.read()
		self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
		# define feature extraction chain & and classifier) 
		feature = ChainOperator(TanTriggsPreprocessing(), LBP())
		classifier = NearestNeighbor(dist_metric=ChiSquareDistance())
		# build the predictable model
		self.predictor = PredictableModel(feature, classifier)
		# read the data & compute the predictor
		self.dataSet = DataSet(filename=dataset_fn,sz=self.face_sz)
		self.predictor.compute(self.dataSet.data,self.dataSet.labels)
开发者ID:georgiee,项目名称:face-recognition-rails,代码行数:13,代码来源:videofacerec.py

示例9: train

def train(train_path):
    # Now read in the image data. This must be a valid path!
    [X,y,class_names] = read_images(train_path)
    print X,y,class_names
    # Then set up a handler for logging:
    handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    # Add handler to facerec modules, so we see what's going on inside:
    logger = logging.getLogger("facerec")
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    # Define the Fisherfaces as Feature Extraction method:
    feature = Fisherfaces()
    # Define a 1-NN classifier with Euclidean Distance:
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
    # Define the model as the combination
    model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    model.compute(X, y)
    # Then turn the first (at most) 16 eigenvectors into grayscale
    # images (note: eigenvectors are stored by column!)
    E = []
    for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
        e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
        E.append(minmax_normalize(e,0,255, dtype=np.uint8))
    # Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
    subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", 
        colormap=cm.jet, filename="fisherfaces.png")
    # Perform a 10-fold cross validation
    cv = KFoldCrossValidation(model, k=10)
    cv.validate(X, y)
    # And print the result:
    cv.print_results()
    save_model('model.pkl', model, class_names)
    return [model,class_names]
开发者ID:bazilik,项目名称:nazi-camera,代码行数:36,代码来源:LDA.py

示例10: __init__

 def __init__(self, feature, classifier, image_size, subject_names):
     PredictableModel.__init__(self, feature=feature, classifier=classifier)
     self.image_size = image_size
     self.subject_names = subject_names
开发者ID:ianjuma,项目名称:recogniseD,代码行数:4,代码来源:helper.py

示例11: read_images

 # Now read in the image data. This must be a valid path!
 [X,y] = read_images(sys.argv[1])
 # Then set up a handler for logging:
 handler = logging.StreamHandler(sys.stdout)
 formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
 handler.setFormatter(formatter)
 # Add handler to facerec modules, so we see what's going on inside:
 logger = logging.getLogger("facerec")
 logger.addHandler(handler)
 logger.setLevel(logging.DEBUG)
 # Define the Fisherfaces as Feature Extraction method:
 feature = Fisherfaces()
 # Define a 1-NN classifier with Euclidean Distance:
 classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
 # Define the model as the combination
 my_model = PredictableModel(feature=feature, classifier=classifier)
 # Compute the Fisherfaces on the given data (in X) and labels (in y):
 my_model.compute(X, y)
 # We then save the model, which uses Pythons pickle module:
 save_model('model.pkl', my_model)
 model = load_model('model.pkl')
 # Then turn the first (at most) 16 eigenvectors into grayscale
 # images (note: eigenvectors are stored by column!)
 E = []
 for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
     e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
     E.append(minmax_normalize(e,0,255, dtype=np.uint8))
 # Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
 subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png")
 # Perform a 10-fold cross validation
 cv = KFoldCrossValidation(model, k=10)
开发者ID:UieLinux,项目名称:uiefaces,代码行数:31,代码来源:create_model.py

示例12: PredictableModel

# coding=utf-8
from facerec.feature import Fisherfaces
from facerec.classifier import NearestNeighbor
from facerec.model import PredictableModel
from PIL import Image
import numpy as np
from PIL import Image
import sys, os
import time
#sys.path.append("../..")
import cv2
import multiprocessing



model = PredictableModel(Fisherfaces(), NearestNeighbor())

cascPath = sys.argv[1]
vc=cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier(cascPath)


#una volta ottenuto (prossimo step) un db di facce, le 
def read_images(path, sz=(256,256)):
    """Reads the images in a given folder, resizes images on the fly if size is given.
    Args:
        path: Path to a folder with subfolders representing the subjects (persons).
        sz: A tuple with the size Resizes 
    Returns:
        A list [X,y]
            X: The images, which is a Python list of numpy arrays.
开发者ID:Alanbaraban,项目名称:first,代码行数:31,代码来源:italiano.py

示例13: DataSet

# load a dataset
random.seed()
dataSet = DataSet("/Users/gsj987/Desktop/毕设资料/faces_girls")
#dataSet.shuffle()
#idx = np.argsort([random.random() for i in xrange(len(dataSet.labels))])
#dataSet.labels = dataSet.labels[idx]
for ind in range(20, 400, 20):
# define a 1-NN classifier with Euclidean Distance
  classifier = SVM(svm_parameter("-t 1 -c 1 -g 1 -r 262144 -q"))
# define Fisherfaces as feature extraction method

  feature = ChainOperator(HistogramEqualization(), PCA(ind))

#print feature.compute(dataSet.data, dataSet.labels)
# now stuff them into a PredictableModel
  model = PredictableModel(feature=feature, classifier=classifier)
# show fisherfaces
  model.compute(dataSet.data,dataSet.labels)

#print model.feature.model2.eigenvectors.shape, dataSet.data
#es = model.feature.model2.eigenvectors

#img = smp.toimage(np.dot(es,dd[0]).reshape(120,120))
#img.save("pca100.jpg")
#plot_eigenvectors(model.feature.model2.eigenvectors, 9, sz=dataSet.data[0].shape, filename=None)
# perform a 5-fold cross validation
  cv = KFoldCrossValidation(model, 5)
  cv.validate(dataSet.data, dataSet.labels)

  print ind,cv.tp, cv.fp, "%.4f" %(cv.tp/(cv.tp+cv.fp+0.0001))
开发者ID:gsj987,项目名称:facerec,代码行数:30,代码来源:pca.py

示例14: len

#    print "read images"
#    print len(X),len(y)
    # Then set up a handler for logging:
    handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    # Add handler to facerec modules, so we see what's going on inside:
    logger = logging.getLogger("facerec")
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    # Define the Fisherfaces as Feature Extraction method:
    feature = Fisherfaces()
    # Define a 1-NN classifier with Euclidean Distance:
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
    # Define the model as the combination
    model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
#---------------------------------------------
#    print "Generating model"
    if(not os.path.exists("./temp/mymodel")):
      model.compute(X, y)
      save_model("./temp/mymodel", model)  #saving model here - CHANGE THIS
      exit()
    
#    print "loading model"
    model = load_model("./temp/mymodel")
#    print "loaded model"
    urlForImage = sys.argv[2]
    tmpfilename = "./temp/"+str(urlForImage.split('/')[-1])  #saving image here - CHANGE THIS
    urllib.urlretrieve(urlForImage, tmpfilename)
    im = Image.open(tmpfilename) #add rotate of 90? Don't think so.
开发者ID:sgbrainwaves,项目名称:GroupC,代码行数:31,代码来源:FaceRecognition.py

示例15: read_images

    # Now read in the image data. This must be a valid path!
    [X,y] = read_images(sys.argv[1])
    # Then set up a handler for logging:
    handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    # Add handler to facerec modules, so we see what's going on inside:
    logger = logging.getLogger("facerec")
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    # Define the Fisherfaces as Feature Extraction method:
    feature = Fisherfaces()
    # Define a 1-NN classifier with Euclidean Distance:
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
    # Define the model as the combination
    model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    model.compute(X, y)
    prediction = model.predict(X)
    predicted_label = prediction[0]
    classifier_output = prediction[1]

    distance = classifier_output['distances'][0]
    print distance

    E = []
    for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
        e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
        E.append(minmax_normalize(e,0,255, dtype=np.uint8))
    # Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
    subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png")
开发者ID:shriyanka,项目名称:opencv_scripts,代码行数:31,代码来源:recface.py


注:本文中的facerec.model.PredictableModel类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。