当前位置: 首页>>代码示例>>Python>>正文


Python PredictableModel.compute方法代码示例

本文整理汇总了Python中facerec.model.PredictableModel.compute方法的典型用法代码示例。如果您正苦于以下问题:Python PredictableModel.compute方法的具体用法?Python PredictableModel.compute怎么用?Python PredictableModel.compute使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在facerec.model.PredictableModel的用法示例。


在下文中一共展示了PredictableModel.compute方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_model_file

# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import compute [as 别名]
def create_model_file(username, image_path, feature, classifier):
    # read images and set labels
    [X, y] = read_images(image_path)
    # Define the model as the combination
    model = PredictableModel(feature=feature.value, classifier=classifier.value)

    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    model.compute(X, y)

    # We then save the model, which uses Pythons pickle module:
    model_name = username + "_model.pkl"
    save_model(model_name, model)
开发者ID:davideberdin,项目名称:Tinder-ML,代码行数:14,代码来源:model_creator.py

示例2: computeAndSaveModel

# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import compute [as 别名]
def computeAndSaveModel(path_to_database, path_for_model_output, size, model_type="Fisherfaces", num_components=0, classifier_neighbours=1):
    print "\n[+] Saving new model (confirmed below)."    
    [X,y,names] = read_images(path_to_database, sz=size)
    if model_type == "Eigenfaces":
        model = PredictableModel(PCA(num_components=num_components), NearestNeighbor(k=classifier_neighbours), dimensions=size, namesDict=names)
    elif model_type == "Fisherfaces":
        model = PredictableModel(Fisherfaces(num_components=num_components), NearestNeighbor(k=classifier_neighbours), dimensions=size, namesDict=names)
    else:
        print "[-] specify the type of model you want to comput as either 'Fisherface' or 'Eigenface' in the computeAndSaveModel function."
        return False

    model.compute(X,y)   
    save_model(path_for_model_output, model)
    print "\n[+] Saving confirmed. New model saved to:", path_for_model_output
开发者ID:leoneckert,项目名称:facerec,代码行数:16,代码来源:facerec_tools.py

示例3: create_model_db

# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import compute [as 别名]
def create_model_db(user, modelpath, feature, classifier, setsize=None):
    [X, y], testpersons = read_images_db(user, setsize)
    # Define the model as the combination
    model = PredictableModel(feature=feature.value, classifier=classifier.value)

    # Compute the feature-algorithm on the given data (in X) and labels (in y):
    model.compute(X, y)

    # We then save the model, which uses Pythons pickle module:
    model_name = "{}_{}_model.pkl".format(user.username, user.id)
    testpersons_name = "{}_{}_testpersons.pkl".format(user.username, user.id)
    #save_model(os.path.join(modelpath, model_name), model)
    #with open(os.path.join(modelpath, testpersons_name), "w") as picklefile:
    #    pickle.dump(testpersons, picklefile)

    return model, testpersons
开发者ID:davideberdin,项目名称:Tinder-ML,代码行数:18,代码来源:model_creator.py

示例4: App

# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import compute [as 别名]
class App(object):
    def __init__(self, video_src, dataset_fn, face_sz=(130,130), cascade_fn=join(curpath, 'haarcascade_frontalface_alt2.xml')):
        self.face_sz = face_sz
        self.cam = create_capture(video_src)
        ret, self.frame = self.cam.read()
        self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
        # define feature extraction chain & and classifier)
        feature = ChainOperator(TanTriggsPreprocessing(), LBP())
        classifier = NearestNeighbor(dist_metric=ChiSquareDistance())
        # build the predictable model
        self.predictor = PredictableModel(feature, classifier)
        # read the data & compute the predictor
        self.dataSet = DataSet(filename=dataset_fn,sz=self.face_sz)
        self.predictor.compute(self.dataSet.data,self.dataSet.labels)

    def run(self):
        while True:
            ret, frame = self.cam.read()

            # resize the frame to half the original size
            img = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2), interpolation = cv2.INTER_CUBIC)
            imgout = img.copy()
            for i,r in enumerate(self.detector.detect(img)):
                x0,y0,x1,y1 = r

                # get face, convert to grayscale & resize to face_sz
                face = img[y0:y1, x0:x1]
                face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face, self.face_sz, interpolation = cv2.INTER_CUBIC)

                # get a prediction
                prediction = self.predictor.predict(face)

                # draw the face area
                cv2.rectangle(imgout, (x0,y0),(x1,y1),(0,255,0),2)

                # draw the predicted name (folder name...)
                draw_str(imgout, (x0-20,y0-20), self.dataSet.names[prediction])

            cv2.imshow('videofacerec', imgout)

            # get pressed key
            ch = cv2.waitKey(10)
            if ch == 27:
                break
开发者ID:heynemann,项目名称:facerec,代码行数:47,代码来源:videofacerec.py

示例5: test_one_method

# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import compute [as 别名]
def test_one_method(input_faces, test_faces, feature, classifier, chain=True):
    if chain:
        feature = ChainOperator(TanTriggsPreprocessing(), feature)

    model = PredictableModel(feature, classifier)
    id_list, face_list = zip(*input_faces)

    start = time.clock()
    model.compute(face_list, id_list)
    stop = time.clock()
    training_time = stop-start

    res_list = []
    start = time.clock()
    for id, image in test_faces:
        res = model.predict(image)
        res_list.append([id]+res)
    stop = time.clock()
    predict_time = stop-start

    return (training_time, predict_time, res_list)
开发者ID:orvitinn,项目名称:msc,代码行数:23,代码来源:maelingar_01.py

示例6: checkFace

# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import compute [as 别名]
def checkFace(origin_img):
    #To do
    model = PredictableModel(Fisherfaces(), NearestNeighbor())
    
    result_name = 'unknown'
    
    [X,y,subject_names] = read_images(path)
    list_of_labels = list(xrange(max(y)+1))
    subject_dictionary = dict(zip(list_of_labels, subject_names))
    model.compute(X,y)

    gray = cv2.cvtColor(origin_img, cv2.COLOR_BGR2GRAY)
    sampleImage = cv2.resize(gray, (256,256))
        
    [ predicted_label, generic_classifier_output] = model.predict(sampleImage)
    print [ predicted_label, generic_classifier_output]
        
    if int(generic_classifier_output['distances']) <=  700:
        result_name = str(subject_dictionary[predicted_label])

    return result_name
开发者ID:ChanMinPark,项目名称:RaspiVision,代码行数:23,代码来源:zzpcm_facerec_2_server.py

示例7: train

# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import compute [as 别名]
def train(train_path):
    # Now read in the image data. This must be a valid path!
    [X,y,class_names] = read_images(train_path)
    print X,y,class_names
    # Then set up a handler for logging:
    handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    # Add handler to facerec modules, so we see what's going on inside:
    logger = logging.getLogger("facerec")
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    # Define the Fisherfaces as Feature Extraction method:
    feature = Fisherfaces()
    # Define a 1-NN classifier with Euclidean Distance:
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
    # Define the model as the combination
    model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    model.compute(X, y)
    # Then turn the first (at most) 16 eigenvectors into grayscale
    # images (note: eigenvectors are stored by column!)
    E = []
    for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
        e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
        E.append(minmax_normalize(e,0,255, dtype=np.uint8))
    # Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
    subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", 
        colormap=cm.jet, filename="fisherfaces.png")
    # Perform a 10-fold cross validation
    cv = KFoldCrossValidation(model, k=10)
    cv.validate(X, y)
    # And print the result:
    cv.print_results()
    save_model('model.pkl', model, class_names)
    return [model,class_names]
开发者ID:bazilik,项目名称:nazi-camera,代码行数:38,代码来源:LDA.py

示例8:

# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import compute [as 别名]
Xtrain,ytrain=read_images('/home/rishabh/f1/faces aviral train/',(100,129))
Xtest,ytest=read_images('/home/rishabh/f1/faces aviral/',(100,129))

mod1=PredictableModel(PCA(num_components=50),NearestNeighbor(k=1))
mod2=PredictableModel(PCA(num_components=50),NearestNeighbor(k=1,dist_metric=CosineDistance()))    
mod3=PredictableModel(Fisherfaces(num_components=50),NearestNeighbor(k=1))
mod4=PredictableModel(Fisherfaces(num_components=50),NearestNeighbor(k=1,dist_metric=CosineDistance()))
mod5=PredictableModel(SpatialHistogram(),NearestNeighbor(k=1))
mod6=PredictableModel(SpatialHistogram(),NearestNeighbor(k=1,dist_metric=CosineDistance())) 
mod7=PredictableModel(SpatialHistogram(lbp_operator=LPQ()),NearestNeighbor(k=1))
mod8=PredictableModel(SpatialHistogram(lbp_operator=LPQ()),NearestNeighbor(k=1,dist_metric=CosineDistance()))
mod9=PredictableModel(SpatialHistogram(),NearestNeighbor(k=1,dist_metric=ChiSquareDistance())) 
mod10=PredictableModel(SpatialHistogram(),NearestNeighbor(k=1,dist_metric=NormalizedCorrelation())) 

mod1.compute(Xtrain,ytrain)
mod2.compute(Xtrain,ytrain)
mod3.compute(Xtrain,ytrain)
mod4.compute(Xtrain,ytrain)
mod5.compute(Xtrain,ytrain)
mod6.compute(Xtrain,ytrain)
mod7.compute(Xtrain,ytrain)
mod8.compute(Xtrain,ytrain)
mod9.compute(Xtrain,ytrain)
mod10.compute(Xtrain,ytrain)


#For Training Size 3

p=np.array(np.ones(len(Xtest))*9,dtype=int)
count=0
开发者ID:rishabhjain141,项目名称:FaceRecCode,代码行数:32,代码来源:Rec.py

示例9: dataset

# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import compute [as 别名]
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add handler to facerec modules
logger = logging.getLogger("facerec")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# load a dataset (e.g. AT&T Facedatabase)
dataSet = DataSet("/root/libface/img/yalefaces")
# define Fisherfaces as feature extraction method
feature = Fisherfaces()
# define a 1-NN classifier with Euclidean Distance
classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
# define the model as the combination
model = PredictableModel(feature=feature, classifier=classifier)
# show fisherfaces
model.compute(dataSet.data, dataSet.labels) 
#try to recgonize
im = Image.open("/root/libface/img/reg.jpg")
im = im.convert("L")
ar = []
ar.append(np.asarray(im, dtype=np.uint8))
print(dataSet.names[model.predict(ar)])
# turn the first (at most) 16 eigenvectors into grayscale
# images (note: eigenvectors are stored by column!)

"""
E = []
for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
	e = model.feature.eigenvectors[:,i].reshape(dataSet.data[0].shape)
	E.append(minmax_normalize(e,0,255, dtype=np.uint8))
# plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
开发者ID:jief123,项目名称:lian,代码行数:33,代码来源:fisherfaces_example.py

示例10: read_images

# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import compute [as 别名]
    [X,y] = read_images(sys.argv[1])
    # Then set up a handler for logging:
    handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    # Add handler to facerec modules, so we see what's going on inside:
    logger = logging.getLogger("facerec")
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    # Define the Fisherfaces as Feature Extraction method:
    feature = Fisherfaces()
    # Define a 1-NN classifier with Euclidean Distance:
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
    # Define the model as the combination
    model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    model.compute(X, y)
    # Then turn the first (at most) 16 eigenvectors into grayscale
    # images (note: eigenvectors are stored by column!)
    E = []
    for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
	    e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
	    E.append(minmax_normalize(e,0,255, dtype=np.uint8))
    # Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
    subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png")
    # Perform a 10-fold cross validation
    cv = KFoldCrossValidation(model, k=10)
    cv.validate(X, y)
    # And print the result:
    print cv
开发者ID:androidxtreme,项目名称:facerec,代码行数:32,代码来源:simple_example.py

示例11: Fisherfaces

# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import compute [as 别名]
 # Then set up a handler for logging:
 handler = logging.StreamHandler(sys.stdout)
 formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
 handler.setFormatter(formatter)
 # Add handler to facerec modules, so we see what's going on inside:
 logger = logging.getLogger("facerec")
 logger.addHandler(handler)
 logger.setLevel(logging.DEBUG)
 # Define the Fisherfaces as Feature Extraction method:
 feature = Fisherfaces()
 # Define a 1-NN classifier with Euclidean Distance:
 classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
 # Define the model as the combination
 my_model = PredictableModel(feature=feature, classifier=classifier)
 # Compute the Fisherfaces on the given data (in X) and labels (in y):
 my_model.compute(X, y)
 # We then save the model, which uses Pythons pickle module:
 save_model('model.pkl', my_model)
 model = load_model('model.pkl')
 # Then turn the first (at most) 16 eigenvectors into grayscale
 # images (note: eigenvectors are stored by column!)
 E = []
 for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
     e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
     E.append(minmax_normalize(e,0,255, dtype=np.uint8))
 # Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
 subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png")
 # Perform a 10-fold cross validation
 cv = KFoldCrossValidation(model, k=10)
 cv.validate(X, y)
 # And print the result:
开发者ID:UieLinux,项目名称:uiefaces,代码行数:33,代码来源:create_model.py

示例12: PredictableModel

# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import compute [as 别名]
    # Define the model as the combination
    # model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the model on the given data (in X) and labels (in y):

    feature = ChainOperator(TanTriggsPreprocessing(), feature)
    # classifier = NearestNeighbor()
    model = PredictableModel(feature, classifier)


    # images in one list, id's on another
    id_list, face_list = zip(*input_faces)

    print "Train the model"
    start = time.clock()
    # model.compute(X, y)
    model.compute(face_list, id_list)
    stop = time.clock()
    print "Training done in", stop-start, " next...find a face"

    # test_path = "/Users/matti/Documents/forritun/att_faces/"
    test_path = "/Users/matti/Dropbox/Skjöl/Meistaraverkefni/server/test_faces_02"
    """
    target = "10.bmp"
    if len(sys.argv) > 3:
        target = sys.argv[3]
    """

    fp = utils.FaceProcessor()
    
    test_list = [
        ("10.bmp", 41),
开发者ID:orvitinn,项目名称:msc,代码行数:33,代码来源:face_rec2.py

示例13: read_images

# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import compute [as 别名]
##count = 0
##if not os.path.exists(os.path.join(pathdir+name)):
##    os.makedirs(os.path.join(pathdir+name))
##    print 'No path'
    
######################################
#Going through the database
[X,y,subject_names] = read_images(pathdir)

#Creates a list of the number of members
list_of_labels = list(xrange(max(y)+1))
#Maps a dictionary between the numbers and the names of the individuals
subject_dictionary = dict(zip(list_of_labels, subject_names))

#Using the 3 Models to compute Similarities Based on Data Sets
model.compute(X,y)
model1.compute(X,y)
model2.compute(X,y)

######################################
#Loading the Pictures
pictures = open('links.txt','r')
for i in pictures:
    i= i.strip()
    if i[:4] == 'http':
        req = urllib.urlopen(i)
        arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
        #Each picture we analyze is stored in img
        img = cv2.imdecode(arr, -1)

        #Now doing facial Detection. For more information, refer to Facial Detection.py. The process is the same as the one done here
开发者ID:NathanJiangCS,项目名称:Project_Twice,代码行数:33,代码来源:Facial+Recognition.py

示例14: list

# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import compute [as 别名]
            #   [ 1, first_name in database]
            #   [ 2, second_name in database]
            #   ...
            #   [ n, last_name in database]
            #
            # This dictionary is used in for the greeting and labeling
            #
            list_of_labels = list(xrange(max(y)+1))
            subject_dictionary = dict(zip(list_of_labels, subject_names))
     
            #
            # This constructs the linear distriminant analysis matrix, which is used for facial identification
            #
            initial_time = time.time()
            print "Constructing linear discriminant analysis matrix for facial identification: "
            model.compute(Z,y)

            print "Construction completed in {0:.2f} seconds.\n".format(time.time() - initial_time)

        current_state = "Tracking"

    # 
    # Get a new frame from the webcam
    #
    rval, frame = vc.read()

    # 
    # Copy the frame adn convert the whole thing to black and white to make recognition easier
    #
    img = frame
    rows,cols,ch = frame.shape
开发者ID:ADVALAIN596,项目名称:pyrobotlab,代码行数:33,代码来源:faceidentification.py

示例15: FaceDatabase

# 需要导入模块: from facerec.model import PredictableModel [as 别名]
# 或者: from facerec.model.PredictableModel import compute [as 别名]
class FaceDatabase(object):

    def __init__(self, database_folder, feature_parameter="LPQ", metric="chi", k=3):
        self.model = None
        
        handler = logging.StreamHandler(sys.stdout)
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        handler.setFormatter(formatter)
        logger = logging.getLogger("facerec")
        logger.addHandler(handler)
        logger.setLevel(logging.DEBUG)

        path = database_folder

        start = time.clock()
        input_faces = utils.read_images_from_single_folder(path)
        stop = time.clock()

        print("read {}, images from {} in {} seconds.".format(len(input_faces), path, stop-start))

        feature = None
        m = {
          "fisher": Fisherfaces,
          "fisher80": Fisherfaces,
          "pca": PCA,
          "pca10": PCA,
          "lda": LDA,
          "spatial": SpatialHistogram,
          "LPQ": SpatialHistogram
        }

        if feature_parameter in m:
            if feature_parameter == 'LPQ':
                feature = SpatialHistogram(LPQ())
                self.threshold = threshold_function(71.4, 70)
            elif feature_parameter == 'fisher80':
                feature = Fisherfaces(80)
                self.threshold = threshold_function(0.61, 0.5)
            elif feature_parameter == 'fisher':
                feature = Fisherfaces()
                self.threshold = threshold_function(0.61, 0.5)
            elif feature_parameter == 'pca80':
                feature = PCA(80)
            else:
                feature = m[feature_parameter]()

        metric_param = None
        d = {"euclid": EuclideanDistance,
             "cosine": CosineDistance,
             "normal": NormalizedCorrelation,
             "chi":  ChiSquareDistance,
             "histo": HistogramIntersection,
             "l1b": L1BinRatioDistance,
             "chibrd": ChiSquareBRD
             }
        if metric in d:
            metric_param = d[metric]()
        else:
            metric_param = ChiSquareDistance()

        classifier = NearestNeighbor(dist_metric=metric_param, k=k)
        feature = ChainOperator(TanTriggsPreprocessing(), feature)
        # feature = ChainOperator(TanTriggsPreprocessing(0.1, 10.0, 1.0, 3.0), feature)
        self.model = PredictableModel(feature, classifier)

        # images in one list, id's on another
        id_list, face_list = zip(*input_faces)

        print "Train the model"
        start = time.clock()
        # model.compute(X, y)
        self.model.compute(face_list, id_list)
        stop = time.clock()
        print "Training done in", stop-start, " next...find a face"

        # threshold_lpq_normalized = threshold_function(0.67, 0.3)
        # threshold_lpq_chisquared = threshold_function(71.4, 70)
        # threshold_spatial_cosine = threshold_function(0.908, 0.908)
        # threshold_spatial_chisuearbrd = threshold_function()
        # threshold = threshold_lpq_normalized

    def find_face(self, input_face_image):
        assert self.model, "Model is not valid"
        res = self.model.predict(input_face_image)
        print res
        return self.threshold(res)
开发者ID:orvitinn,项目名称:msc,代码行数:88,代码来源:facedb.py


注:本文中的facerec.model.PredictableModel.compute方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。