本文整理汇总了Java中com.google.android.gms.vision.face.FaceDetector类的典型用法代码示例。如果您正苦于以下问题:Java FaceDetector类的具体用法?Java FaceDetector怎么用?Java FaceDetector使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FaceDetector类属于com.google.android.gms.vision.face包,在下文中一共展示了FaceDetector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: creteCameraTracker
import com.google.android.gms.vision.face.FaceDetector; //导入依赖的package包/类
/**
* Create face decoder and camera source.
*/
private void creteCameraTracker() {
mDetector = new FaceDetector.Builder(mActivity)
.setTrackingEnabled(false)
.setClassificationType(FaceDetector.ALL_CLASSIFICATIONS)
.build();
mDetector.setProcessor(
new MultiProcessor.Builder<>(new GraphicFaceTrackerFactory())
.build());
if (!mDetector.isOperational()) {
mUserAwareVideoView.onErrorOccurred();
Log.e("Start Tracking", "Face tracker is not operational.");
}
mCameraSource = new CameraSource.Builder(mActivity, mDetector)
.setRequestedPreviewSize(640, 480)
.setFacing(CameraSource.CAMERA_FACING_FRONT)
.setRequestedFps(30.0f)
.build();
}
示例2: createCameraSource
import com.google.android.gms.vision.face.FaceDetector; //导入依赖的package包/类
private void createCameraSource() {
Context context = getApplicationContext();
FaceDetector detector = createFaceDetector(context);
detector.setProcessor(new MultiProcessor.Builder<>(new GraphicFaceTrackerFactory()).build());
if (!detector.isOperational()) {
Timber.d("Face detector dependencies are not yet available.");
}
cameraSource = new CameraSource.Builder(context, detector) //
// Camera will decide actual size, and we'll crop accordingly in layout.
.setRequestedPreviewSize(640, 480)
.setFacing(CameraSource.CAMERA_FACING_FRONT)
.setRequestedFps(MAX_FRAME_RATE)
.setAutoFocusEnabled(true)
.build();
}
示例3: onUpdate
import com.google.android.gms.vision.face.FaceDetector; //导入依赖的package包/类
/**
* Update the position/characteristics of the face within the overlay.
*/
@Override
public void onUpdate(FaceDetector.Detections<Face> detectionResults, Face face) {
mOverlay.add(mFaceGraphic);
boolean isSmiling = face.getIsSmilingProbability() > SMILING_THRESHOLD;
if (isSmiling) {
float leftEye = face.getIsLeftEyeOpenProbability();
float rightEye = face.getIsRightEyeOpenProbability();
if (Math.abs(leftEye - rightEye) >= WINK_THRESHOLD) {
takeShot();
}
}
mFaceGraphic.setIsReady(isSmiling);
mFaceGraphic.updateFace(face);
}
示例4: createCameraSource
import com.google.android.gms.vision.face.FaceDetector; //导入依赖的package包/类
/**
* Creates the face detector and the camera.
*/
private void createCameraSource() {
Context context = getApplicationContext();
FaceDetector detector = createFaceDetector(context);
int facing = CameraSource.CAMERA_FACING_FRONT;
if (!mIsFrontFacing) {
facing = CameraSource.CAMERA_FACING_BACK;
}
// The camera source is initialized to use either the front or rear facing camera. We use a
// relatively low resolution for the camera preview, since this is sufficient for this app
// and the face detector will run faster at lower camera resolutions.
//
// However, note that there is a speed/accuracy trade-off with respect to choosing the
// camera resolution. The face detector will run faster with lower camera resolutions,
// but may miss smaller faces, landmarks, or may not correctly detect eyes open/closed in
// comparison to using higher camera resolutions. If you have any of these issues, you may
// want to increase the resolution.
mCameraSource = new CameraSource.Builder(context, detector)
.setFacing(facing)
.setRequestedPreviewSize(320, 240)
.setRequestedFps(60.0f)
.setAutoFocusEnabled(true)
.build();
}
示例5: createCameraSource
import com.google.android.gms.vision.face.FaceDetector; //导入依赖的package包/类
/**
* Creates and starts the camera. Note that this uses a higher resolution in comparison
* to other detection examples to enable the barcode detector to detect small barcodes
* at long distances.
*/
private void createCameraSource() {
Context context = getApplicationContext();
FaceDetector detector = new FaceDetector.Builder(context)
.setClassificationType(FaceDetector.ALL_CLASSIFICATIONS)
.setLandmarkType(FaceDetector.ALL_LANDMARKS)
.setMode(FaceDetector.ACCURATE_MODE)
.build();
detector.setProcessor(
new MultiProcessor.Builder<>(new GraphicFaceTrackerFactory())
.build());
if (!detector.isOperational()) {
// Note: The first time that an app using face API is installed on a device, GMS will
// download a native library to the device in order to do detection. Usually this
// completes before the app is run for the first time. But if that download has not yet
// completed, then the above call will not detect any faces.
//
// isOperational() can be used to check if the required native library is currently
// available. The detector will automatically become operational once the library
// download completes on device.
Log.w(TAG, "Face detector dependencies are not yet available.");
}
mCameraSource = new CameraSource.Builder(context, detector)
.setRequestedPreviewSize(640, 480)
.setFacing(CameraSource.CAMERA_FACING_FRONT)
.setRequestedFps(30.0f)
.build();
}
示例6: createCameraSource
import com.google.android.gms.vision.face.FaceDetector; //导入依赖的package包/类
/**
* Creates and starts the camera. Note that this uses a higher resolution in comparison
* to other detection examples to enable the ocr detector to detect small text samples
* at long distances.
* <p>
* Suppressing InlinedApi since there is a check that the minimum version is met before using
* the constant.
*/
@SuppressLint("InlinedApi")
private void createCameraSource(boolean autoFocus, boolean useFlash) {
Context context = getApplicationContext();
// A text recognizer is created to find text. An associated multi-processor instance
// is set to receive the text recognition results, track the text, and maintain
// graphics for each text block on screen. The factory is used by the multi-processor to
// create a separate tracker instance for each text block.
textRecognizer = new TextRecognizer.Builder(context).build();
textRecognizer.setProcessor(new OcrDetectorProcessor(mGraphicOverlay, this));
// A face detector is created to track faces. An associated multi-processor instance
// is set to receive the face detection results, track the faces, and maintain graphics for
// each face on screen. The factory is used by the multi-processor to create a separate
// tracker instance for each face.
FaceDetector faceDetector = new FaceDetector.Builder(context).build();
FaceTrackerFactory faceFactory = new FaceTrackerFactory(mGraphicOverlay, this);
faceDetector.setProcessor(
new MultiProcessor.Builder<>(faceFactory).build());
// A multi-detector groups the two detectors together as one detector. All images received
// by this detector from the camera will be sent to each of the underlying detectors, which
// will each do face and barcode detection, respectively. The detection results from each
// are then sent to associated tracker instances which maintain per-item graphics on the
// screen.
multiDetector = new MultiDetector.Builder()
.add(faceDetector)
.add(textRecognizer)
.build();
if (!multiDetector.isOperational()) {
Log.w(TAG, "Detector dependencies are not yet available.");
// Check for low storage. If there is low storage, the native library will not be
// downloaded, so detection will not become operational.
IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;
if (hasLowStorage) {
Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
Log.w(TAG, getString(R.string.low_storage_error));
}
}
// Creates and starts the camera. Note that this uses a higher resolution in comparison
// to other detection examples to enable the text recognizer to detect small pieces of text.
mCameraSource =
new CameraSource.Builder(getApplicationContext(), multiDetector)
.setFacing(CameraSource.CAMERA_FACING_BACK)
.setRequestedPreviewSize(1600, 1024)
.setRequestedFps(2.0f)
.setFlashMode(useFlash ? Camera.Parameters.FLASH_MODE_TORCH : null)
.setFocusMode(autoFocus ? Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE : null)
.build();
}
示例7: call
import com.google.android.gms.vision.face.FaceDetector; //导入依赖的package包/类
@Override
public FREObject call( FREContext context, FREObject[] args ) {
super.call( context, args );
AIR.log( "FaceDetection::isOperational" );
Activity activity = AIR.getContext().getActivity();
FaceDetector.Builder fb = new FaceDetector.Builder( activity.getApplicationContext() );
final FaceDetector detector = fb.build();
try {
return FREObject.newObject( detector.isOperational() );
} catch( FREWrongThreadException e ) {
e.printStackTrace();
}
return null;
}
示例8: setDetector
import com.google.android.gms.vision.face.FaceDetector; //导入依赖的package包/类
public void setDetector(){
FaceDetector detector = new FaceDetector.Builder(this)
.setTrackingEnabled(true)
.setLandmarkType(FaceDetector.ALL_LANDMARKS)
.setMode(FaceDetector.ACCURATE_MODE)
.build();
Detector<Face> safeDetector = new SafeFaceDetector(detector);
if (!safeDetector.isOperational()) {
Toast.makeText(this, "Detector are having issues", Toast.LENGTH_LONG).show();
} else {
Frame frame = new Frame.Builder().setBitmap(mbitmap).build();
mFaces = safeDetector.detect(frame);
safeDetector.release();
}
}
示例9: findLargestFace
import com.google.android.gms.vision.face.FaceDetector; //导入依赖的package包/类
private Optional<Face> findLargestFace(Bitmap inputImage) {
final FaceDetector detector = createDetector();
final SparseArray<Face> faces = detector.detect(new Frame.Builder().setBitmap(inputImage).build());
Face largestFace = null;
float largestSize = 0f;
Timber.d("found " + faces.size() + " faces in photo");
for (int i = 0; i < faces.size(); ++i) {
final Face face = faces.valueAt(i);
final float faceSize = face.getHeight() * face.getWidth();
if (faceSize > largestSize) {
largestFace = face;
largestSize = faceSize;
}
}
detector.release();
return Optional.fromNullable(largestFace);
}
示例10: createCameraSourceFront
import com.google.android.gms.vision.face.FaceDetector; //导入依赖的package包/类
private void createCameraSourceFront() {
previewFaceDetector = new FaceDetector.Builder(context)
.setClassificationType(FaceDetector.ALL_CLASSIFICATIONS)
.setLandmarkType(FaceDetector.ALL_LANDMARKS)
.setMode(FaceDetector.FAST_MODE)
.setProminentFaceOnly(true)
.setTrackingEnabled(true)
.build();
if(previewFaceDetector.isOperational()) {
previewFaceDetector.setProcessor(new MultiProcessor.Builder<>(new GraphicFaceTrackerFactory()).build());
} else {
Toast.makeText(context, "FACE DETECTION NOT AVAILABLE", Toast.LENGTH_SHORT).show();
}
if(useCamera2) {
mCamera2Source = new Camera2Source.Builder(context, previewFaceDetector)
.setFocusMode(Camera2Source.CAMERA_AF_AUTO)
.setFlashMode(Camera2Source.CAMERA_FLASH_AUTO)
.setFacing(Camera2Source.CAMERA_FACING_FRONT)
.build();
//IF CAMERA2 HARDWARE LEVEL IS LEGACY, CAMERA2 IS NOT NATIVE.
//WE WILL USE CAMERA1.
if(mCamera2Source.isCamera2Native()) {
startCameraSource();
} else {
useCamera2 = false;
if(usingFrontCamera) createCameraSourceFront(); else createCameraSourceBack();
}
} else {
mCameraSource = new CameraSource.Builder(context, previewFaceDetector)
.setFacing(CameraSource.CAMERA_FACING_FRONT)
.setRequestedFps(30.0f)
.build();
startCameraSource();
}
}
示例11: createCameraSourceBack
import com.google.android.gms.vision.face.FaceDetector; //导入依赖的package包/类
private void createCameraSourceBack() {
previewFaceDetector = new FaceDetector.Builder(context)
.setClassificationType(FaceDetector.ALL_CLASSIFICATIONS)
.setLandmarkType(FaceDetector.ALL_LANDMARKS)
.setMode(FaceDetector.FAST_MODE)
.setProminentFaceOnly(true)
.setTrackingEnabled(true)
.build();
if(previewFaceDetector.isOperational()) {
previewFaceDetector.setProcessor(new MultiProcessor.Builder<>(new GraphicFaceTrackerFactory()).build());
} else {
Toast.makeText(context, "FACE DETECTION NOT AVAILABLE", Toast.LENGTH_SHORT).show();
}
if(useCamera2) {
mCamera2Source = new Camera2Source.Builder(context, previewFaceDetector)
.setFocusMode(Camera2Source.CAMERA_AF_AUTO)
.setFlashMode(Camera2Source.CAMERA_FLASH_AUTO)
.setFacing(Camera2Source.CAMERA_FACING_BACK)
.build();
//IF CAMERA2 HARDWARE LEVEL IS LEGACY, CAMERA2 IS NOT NATIVE.
//WE WILL USE CAMERA1.
if(mCamera2Source.isCamera2Native()) {
startCameraSource();
} else {
useCamera2 = false;
if(usingFrontCamera) createCameraSourceFront(); else createCameraSourceBack();
}
} else {
mCameraSource = new CameraSource.Builder(context, previewFaceDetector)
.setFacing(CameraSource.CAMERA_FACING_BACK)
.setRequestedFps(30.0f)
.build();
startCameraSource();
}
}
示例12: getFaceDetector
import com.google.android.gms.vision.face.FaceDetector; //导入依赖的package包/类
@Override
public FaceDetector getFaceDetector() {
if (mFaceDetector == null) {
mFaceDetector = new FaceDetector.Builder(this)
.setTrackingEnabled(true)
.build();
}
return mFaceDetector;
}
示例13: createCameraSource
import com.google.android.gms.vision.face.FaceDetector; //导入依赖的package包/类
/**
* Creates and starts the camera. Note that this uses a higher resolution in comparison
* to other detection examples to enable the barcode detector to detect small barcodes
* at long distances.
*/
private void createCameraSource() {
Context context = getApplicationContext();
FaceDetector detector = new FaceDetector.Builder(context)
.setClassificationType(FaceDetector.ALL_CLASSIFICATIONS)
.build();
detector.setProcessor(
new MultiProcessor.Builder<>(new GraphicFaceTrackerFactory())
.build());
if (!detector.isOperational()) {
// Note: The first time that an app using face API is installed on a device, GMS will
// download a native library to the device in order to do detection. Usually this
// completes before the app is run for the first time. But if that download has not yet
// completed, then the above call will not detect any faces.
//
// isOperational() can be used to check if the required native library is currently
// available. The detector will automatically become operational once the library
// download completes on device.
Log.w(TAG, "Face detector dependencies are not yet available.");
}
mCameraSource = new CameraSource.Builder(context, detector)
.setRequestedPreviewSize(640, 480)
.setFacing(CameraSource.CAMERA_FACING_BACK)
.setRequestedFps(30.0f)
.build();
}
示例14: createCameraSource
import com.google.android.gms.vision.face.FaceDetector; //导入依赖的package包/类
/**
* Creates the face detector and the camera.
*/
private void createCameraSource(FaceDetector detector) {
Context context = getApplicationContext();
mCameraSource = new CameraSource.Builder(context, detector)
.setFacing(CameraSource.CAMERA_FACING_FRONT)
.setRequestedFps(24f)
.build();
}
示例15: onUpdate
import com.google.android.gms.vision.face.FaceDetector; //导入依赖的package包/类
@Override
public void onUpdate(FaceDetector.Detections<Face> detectionResults, final Face face) {
smileProbability = face.getIsSmilingProbability();
if (smileProbability != -1) {
runOnUiThread(new Runnable() {
@Override
public void run() {
setColorAlpha(smileProbability * 255);
}
});
} else {
setColorAlpha(0);
}
}