當前位置: 首頁>>代碼示例>>Java>>正文


Java FaceDetector.Face方法代碼示例

本文整理匯總了Java中android.media.FaceDetector.Face方法的典型用法代碼示例。如果您正苦於以下問題:Java FaceDetector.Face方法的具體用法?Java FaceDetector.Face怎麽用?Java FaceDetector.Face使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在android.media.FaceDetector的用法示例。


在下文中一共展示了FaceDetector.Face方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: onStart

import android.media.FaceDetector; //導入方法依賴的package包/類
@Override
public void onStart() {
    super.onStart();
    initView();

    mFaces = new FaceResult[MAX_FACE_COUNT];
    mPreviousFaces = new FaceResult[MAX_FACE_COUNT];
    mDetectedFaces = new FaceDetector.Face[MAX_FACE_COUNT];
    for (int i = 0; i < MAX_FACE_COUNT; i++) {
        mFaces[i] = new FaceResult();
        mPreviousFaces[i] = new FaceResult();
    }
    mFacesCountMap = new SparseIntArray();

    presenter = new SignInPresenter(this,getContext());
    presenter.start();
}
 
開發者ID:lazyparser,項目名稱:xbot_head,代碼行數:18,代碼來源:SignInFragment.java

示例2: findFaceMid

import android.media.FaceDetector; //導入方法依賴的package包/類
private static PointF findFaceMid(Bitmap in){
	PointF mid = new PointF();
	Bitmap bitmap565 = in.copy(Bitmap.Config.RGB_565, true);

	FaceDetector fd = new FaceDetector(in.getWidth(), in.getHeight(), 1);
	FaceDetector.Face[] faces = new FaceDetector.Face[1];
	fd.findFaces(bitmap565, faces);
	

	FaceDetector.Face face = faces[0];
	if (face != null){
		try{
			face.getMidPoint(mid);
			return mid;
		} catch (NullPointerException n){}
	}
	return null;

}
 
開發者ID:mots,項目名稱:haxsync,代碼行數:20,代碼來源:BitmapUtil.java

示例3: detectFace

import android.media.FaceDetector; //導入方法依賴的package包/類
private int detectFace() {
    Bitmap bitmap = mOriginal.copy(Bitmap.Config.RGB_565, true);

    FaceDetector.Face faces[] = new FaceDetector.Face[MAX_FACES];
    FaceDetector detector = new FaceDetector(bitmap.getWidth(), bitmap.getHeight(), MAX_FACES);
    int count = detector.findFaces(bitmap, faces);
    if (count > 0) {
        FaceDetector.Face face = faces[0];
        face.getMidPoint(mCenterPoint);
        mEyeDistance = face.eyesDistance();
    } else {
        mCenterPoint.x = 0.0f;
        mCenterPoint.y = 0.0f;
        mEyeDistance = 0.0f;
    }

    bitmap.recycle();

    return count;
}
 
開發者ID:Tinker-S,項目名稱:FaceBarCodeDemo,代碼行數:21,代碼來源:FaceComposeActivity.java

示例4: detectFace

import android.media.FaceDetector; //導入方法依賴的package包/類
private int detectFace() {
    Bitmap bitmap = mOriginal.copy(Bitmap.Config.RGB_565, true);

    FaceDetector.Face faces[] = new FaceDetector.Face[MAX_FACES];
    FaceDetector detector = new FaceDetector(bitmap.getWidth(), bitmap.getHeight(), MAX_FACES);
    int count = detector.findFaces(bitmap, faces);
    if (count > 0) {
        FaceDetector.Face face = faces[0];
        face.getMidPoint(mCenterPoint);
        mEyeDistance = face.eyesDistance();
    } else {
        mCenterPoint.x = 0.0f;
        mCenterPoint.y = 0.0f;
        mEyeDistance = 0.0f;
    }

    bitmap.recycle();
    bitmap = null;

    return count;
}
 
開發者ID:Tinker-S,項目名稱:FaceBarCodeDemo,代碼行數:22,代碼來源:ImageComposeDemoActivity.java

示例5: detectFace

import android.media.FaceDetector; //導入方法依賴的package包/類
@Nullable private static PointF detectFace(@NonNull Bitmap testBitmap) {
    final int maxFaces = 1;
    long millis = System.currentTimeMillis();
    // initialize the face detector, and look for only one face...
    FaceDetector fd = new FaceDetector(testBitmap.getWidth(), testBitmap.getHeight(), maxFaces);
    FaceDetector.Face[] faces = new FaceDetector.Face[maxFaces];
    int numFound = fd.findFaces(testBitmap, faces);
    PointF facePos = null;
    if (numFound > 0) {
        facePos = new PointF();
        faces[0].getMidPoint(facePos);
        // center on the nose, not on the eyes
        facePos.y += faces[0].eyesDistance() / 2;
        // normalize the position to [0, 1]
        facePos.set(MathUtil.constrain(facePos.x / testBitmap.getWidth(), 0, 1),
                MathUtil.constrain(facePos.y / testBitmap.getHeight(), 0, 1));
        L.d("Found face at " + facePos.x + ", " + facePos.y);
    }
    L.d("Face detection took " + (System.currentTimeMillis() - millis) + "ms");
    return facePos;
}
 
開發者ID:wikimedia,項目名稱:apps-android-wikipedia,代碼行數:22,代碼來源:FacePostprocessor.java

示例6: getFaces

import android.media.FaceDetector; //導入方法依賴的package包/類
List<FaceDetector.Face> getFaces(UQI uqi) {
    int max = 10;
    List<FaceDetector.Face> faces = new ArrayList<>();
    Bitmap bitmap = this.getBitmapRGB565(uqi);
    if (bitmap == null) return faces;
    FaceDetector detector = new FaceDetector(bitmap.getWidth(), bitmap.getHeight(), max);
    FaceDetector.Face[] facesArray = new FaceDetector.Face[max];
    int count = detector.findFaces(bitmap, facesArray);
    for (int i = 0; i < count; i++) {
        FaceDetector.Face face = facesArray[i];
        if (face != null && face.confidence() > 0.3)
            faces.add(face);
    }
    return faces;
}
 
開發者ID:PrivacyStreams,項目名稱:PrivacyStreams,代碼行數:16,代碼來源:ImageData.java

示例7: getFaceRect

import android.media.FaceDetector; //導入方法依賴的package包/類
/**
 * 計算識別框
 */
private void getFaceRect() {
    Rect[] faceRectList = new Rect[mDetectorData.getFacesCount()];
    Rect rect = null;
    float distance = 0;
    for (int i = 0; i < mDetectorData.getFacesCount(); i++) {
        faceRectList[i] = new Rect();
        FaceDetector.Face face = mFaces[i];
        if (face != null) {
            float eyeDistance = face.eyesDistance();
            eyeDistance = eyeDistance * mZoomRatio;
            if (eyeDistance > distance) {
                distance = eyeDistance;
                rect = faceRectList[i];
            }
            PointF midEyesPoint = new PointF();
            face.getMidPoint(midEyesPoint);
            midEyesPoint.x = midEyesPoint.x * mZoomRatio;
            midEyesPoint.y = midEyesPoint.y * mZoomRatio;
            ViseLog.i("eyeDistance:" + eyeDistance + ",midEyesPoint.x:" + midEyesPoint.x
                    + ",midEyesPoint.y:" + midEyesPoint.y);
            faceRectList[i].set((int) (midEyesPoint.x - eyeDistance),
                    (int) (midEyesPoint.y - eyeDistance),
                    (int) (midEyesPoint.x + eyeDistance),
                    (int) (midEyesPoint.y + eyeDistance));
            ViseLog.i("FaceRectList[" + i + "]:" + faceRectList[i]);
        }
    }
    mDetectorData.setLightIntensity(FaceUtil.getYUVLight(mDetectorData.getFaceData(), rect, mCameraWidth));
    mDetectorData.setFaceRectList(faceRectList);
    if (mCameraWidth > 0) {
        mDetectorData.setDistance(distance * 2 / mCameraWidth);
    }
}
 
開發者ID:xiaoyaoyou1212,項目名稱:ViseFace,代碼行數:37,代碼來源:NormalFaceDetector.java

示例8: detectFaces

import android.media.FaceDetector; //導入方法依賴的package包/類
@ProtoMethod(description = "Detect faces in a bitmap", example = "")
@ProtoMethodParam(params = {"Bitmap", "numFaces"})
public int detectFaces(Bitmap bmp, int num_faces) {
    FaceDetector face_detector = new FaceDetector(bmp.getWidth(), bmp.getHeight(), num_faces);
    FaceDetector.Face[] faces = new FaceDetector.Face[num_faces];
    int face_count = face_detector.findFaces(bmp, faces);

    return face_count;
}
 
開發者ID:victordiaz,項目名稱:phonk,代碼行數:10,代碼來源:PUtil.java

示例9: CommentaryFragment

import android.media.FaceDetector; //導入方法依賴的package包/類
public CommentaryFragment() {
    faces = new FaceResult[MAX_FACE_COUNT];
    previousFaces = new FaceResult[MAX_FACE_COUNT];
    detectedFaces = new FaceDetector.Face[MAX_FACE_COUNT];
    for (int i = 0; i < MAX_FACE_COUNT; i++) {
        faces[i] = new FaceResult();
        previousFaces[i] = new FaceResult();
    }

    recyclerViewBitmapList = new ArrayList<>();
    facesCountMap = new SparseIntArray();
}
 
開發者ID:lazyparser,項目名稱:xbot_head,代碼行數:13,代碼來源:CommentaryFragment.java

示例10: onStart

import android.media.FaceDetector; //導入方法依賴的package包/類
@Override
public void onStart() {
    super.onStart();

    mFaces = new FaceResult[MAX_FACE_COUNT];
    mPreviousFaces = new FaceResult[MAX_FACE_COUNT];
    mDetectedFaces = new FaceDetector.Face[MAX_FACE_COUNT];
    for (int i = 0; i < MAX_FACE_COUNT; i++) {
        mFaces[i] = new FaceResult();
        mPreviousFaces[i] = new FaceResult();
    }
    mFacesCountMap = new SparseIntArray();

}
 
開發者ID:lazyparser,項目名稱:xbot_head,代碼行數:15,代碼來源:InteractionFragment.java

示例11: performTask

import android.media.FaceDetector; //導入方法依賴的package包/類
@Override
@Nullable
public PointF performTask() {
    // boost this thread's priority a bit
    Thread.currentThread().setPriority(Thread.MAX_PRIORITY - 1);
    long millis = System.currentTimeMillis();
    // create a new bitmap onto which we'll draw the original bitmap,
    // because the FaceDetector requires it to be a 565 bitmap, and it
    // must also be even width. Reduce size of copy for performance.
    Bitmap testBmp = new565ScaledBitmap(srcBitmap);

    // initialize the face detector, and look for only one face...
    FaceDetector fd = new FaceDetector(testBmp.getWidth(), testBmp.getHeight(), 1);
    FaceDetector.Face[] faces = new FaceDetector.Face[1];
    int numFound = fd.findFaces(testBmp, faces);

    PointF facePos = null;
    if (numFound > 0) {
        facePos = new PointF();
        faces[0].getMidPoint(facePos);
        // scale back to proportions of original image
        facePos.x = (facePos.x * srcBitmap.getWidth() / BITMAP_COPY_WIDTH);
        facePos.y = (facePos.y * srcBitmap.getHeight() / testBmp.getHeight());
        L.d("Found face at " + facePos.x + ", " + facePos.y);
    }
    // free our temporary bitmap
    testBmp.recycle();

    L.d("Face detection took " + (System.currentTimeMillis() - millis) + "ms");

    return facePos;
}
 
開發者ID:gnosygnu,項目名稱:xowa_android,代碼行數:33,代碼來源:ImageViewWithFace.java

示例12: handleFace

import android.media.FaceDetector; //導入方法依賴的package包/類
private void handleFace(FaceDetector.Face f) {
    PointF midPoint = new PointF();

    int r = ((int) (f.eyesDistance() * mScale)) * 2;
    f.getMidPoint(midPoint);
    midPoint.x *= mScale;
    midPoint.y *= mScale;

    int midX = (int) midPoint.x;
    int midY = (int) midPoint.y;

    HighlightView hv = new HighlightView(mImageView);

    int width = mBitmap.getWidth();
    int height = mBitmap.getHeight();

    Rect imageRect = new Rect(0, 0, width, height);

    RectF faceRect = new RectF(midX, midY, midX, midY);
    faceRect.inset(-r, -r);
    if (faceRect.left < 0) {
        faceRect.inset(-faceRect.left, -faceRect.left);
    }

    if (faceRect.top < 0) {
        faceRect.inset(-faceRect.top, -faceRect.top);
    }

    if (faceRect.right > imageRect.right) {
        faceRect.inset(faceRect.right - imageRect.right, faceRect.right - imageRect.right);
    }

    if (faceRect.bottom > imageRect.bottom) {
        faceRect.inset(faceRect.bottom - imageRect.bottom, faceRect.bottom - imageRect.bottom);
    }

    hv.setup(mImageMatrix, imageRect, faceRect, mCircleCrop, mAspectX != 0 && mAspectY != 0);

    mImageView.add(hv);
}
 
開發者ID:MaestroDroid,項目名稱:TakeAndCrop,代碼行數:41,代碼來源:CropImageActivity.java

示例13: setFaces

import android.media.FaceDetector; //導入方法依賴的package包/類
public void setFaces(PreProcessorFactory.PreprocessingMode preprocessingMode) {
    List<Mat> images = getImages();

    PreferencesHelper preferencesHelper = new PreferencesHelper(context);
    if (preferencesHelper.getDetectionMethod()){
        faces = faceDetection.getFaces(images.get(0));
        angle = faceDetection.getAngle();
    } else {
        Mat img = images.get(0);
        FaceDetector faceDetector = new FaceDetector(img.cols(), img.rows(), 1);
        Bitmap bmp = Bitmap.createBitmap(img.cols(), img.rows(), Bitmap.Config.RGB_565);
        Utils.matToBitmap(img, bmp);
        FaceDetector.Face[] facesAndroid = new FaceDetector.Face[1];
        if (faceDetector.findFaces(bmp, facesAndroid) > 0){
            faces = new Rect[facesAndroid.length];
            for (int i=0; i<facesAndroid.length; i++){
                PointF pointF = new PointF();
                facesAndroid[i].getMidPoint(pointF);
                int xWidth = (int) (1.34 * facesAndroid[i].eyesDistance());
                int yWidth = (int) (1.12 * facesAndroid[i].eyesDistance());
                int dist = (int) (2.77 * facesAndroid[i].eyesDistance());
                Rect face = new Rect((int) pointF.x - xWidth, (int) pointF.y - yWidth, dist, dist);
                faces[i] = face;
            }
        }
    }

    if (preprocessingMode == PreProcessorFactory.PreprocessingMode.RECOGNITION && preferencesHelper.getDetectionMethod()){
        // Change the image rotation to the angle where the face was detected
        images.remove(0);
        images.add(faceDetection.getImg());
        setImages(images);
    }
}
 
開發者ID:Qualeams,項目名稱:Android-Face-Recognition-with-Deep-Learning-Library,代碼行數:35,代碼來源:PreProcessor.java

示例14: handleFace

import android.media.FaceDetector; //導入方法依賴的package包/類
private void handleFace(FaceDetector.Face f) {
    PointF midPoint = new PointF();

    int r = ((int) (f.eyesDistance() * mScale)) * 2;
    f.getMidPoint(midPoint);
    midPoint.x *= mScale;
    midPoint.y *= mScale;

    int midX = (int) midPoint.x;
    int midY = (int) midPoint.y;

    HighlightView hv = new HighlightView(mImageView);

    int width = mBitmap.getWidth();
    int height = mBitmap.getHeight();

    Rect imageRect = new Rect(0, 0, width, height);

    RectF faceRect = new RectF(midX, midY, midX, midY);
    faceRect.inset(-r, -r);
    if (faceRect.left < 0) {
        faceRect.inset(-faceRect.left, -faceRect.left);
    }

    if (faceRect.top < 0) {
        faceRect.inset(-faceRect.top, -faceRect.top);
    }

    if (faceRect.right > imageRect.right) {
        faceRect.inset(faceRect.right - imageRect.right, faceRect.right - imageRect.right);
    }

    if (faceRect.bottom > imageRect.bottom) {
        faceRect.inset(faceRect.bottom - imageRect.bottom, faceRect.bottom - imageRect.bottom);
    }

    hv.setup(mImageMatrix, imageRect, faceRect, false, true);

    mImageView.add(hv);
}
 
開發者ID:bangqu,項目名稱:eshow-android,代碼行數:41,代碼來源:CropImage.java

示例15: updateMeasurement

import android.media.FaceDetector; //導入方法依賴的package包/類
private void updateMeasurement(final FaceDetector.Face currentFace) {
    if (currentFace == null) {
        // _facesFoundInMeasurement--;
        return;
    }

    _foundFace = _currentFaceDetectionThread.getCurrentFace();

    _points.add(new Point(_foundFace.eyesDistance(),
            CALIBRATION_DISTANCE_A4_MM
                    * (_distanceAtCalibrationPoint / _foundFace
                    .eyesDistance())));

    while (_points.size() > _threashold) {
        _points.remove(0);
    }

    float sum = 0;
    for (Point p : _points) {
        sum += p.getEyeDistance();
    }

    _currentAvgEyeDistance = sum / _points.size();

    _currentDistanceToFace = CALIBRATION_DISTANCE_A4_MM
            * (_distanceAtCalibrationPoint / _currentAvgEyeDistance);

    _currentDistanceToFace = Util.MM_TO_CM(_currentDistanceToFace);

    MeasurementStepMessage message = new MeasurementStepMessage();
    message.setConfidence(currentFace.confidence());
    message.setCurrentAvgEyeDistance(_currentAvgEyeDistance);
    message.setDistToFace(_currentDistanceToFace);
    message.setEyesDistance(currentFace.eyesDistance());
    message.setMeasurementsLeft(_calibrationsLeft);
    message.setProcessTimeForLastFrame(_processTimeForLastFrame);

    MessageHUB.get().sendMessage(MessageHUB.MEASUREMENT_STEP, message);
}
 
開發者ID:ZhuFengdaaa,項目名稱:Chuangxinbei,代碼行數:40,代碼來源:CameraSurfaceView.java


注:本文中的android.media.FaceDetector.Face方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。