本文整理汇总了Java中com.google.android.gms.vision.face.Face.getWidth方法的典型用法代码示例。如果您正苦于以下问题:Java Face.getWidth方法的具体用法?Java Face.getWidth怎么用?Java Face.getWidth使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.google.android.gms.vision.face.Face
的用法示例。
在下文中一共展示了Face.getWidth方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: drawFaceBox
import com.google.android.gms.vision.face.Face; //导入方法依赖的package包/类
/**
* drawFaceBox(Canvas canvas, double scale)方法会更有趣,被检测到人脸数据以位置信息的方式存储到mFaces中,
* 这个方法将基于这些位置数据中的宽、高在检测到的人脸位置画一个绿色的矩形框。
你需要定义自己的绘画对象,然后从你的SparseArray数组中循环的找出位置、高度和宽度信息,再利用这些信息在画布上画出矩形。
*/
private void drawFaceBox(Canvas canvas, double scale){
//新建画笔
Paint paint =new Paint();
paint.setColor(Color.BLUE);
paint.setStrokeWidth(5);
paint.setStyle(Paint.Style.STROKE);
float left =0;
float top=0;
float right=0;
float bottom=0;
for(int i=0;i<mFaces.size();i++){
Face face =mFaces.get(i);
left = (float)scale*face.getPosition().x;
top =(float)scale*face.getPosition().y;
right = (float) scale * ( face.getPosition().x + face.getWidth() );
bottom = (float) scale * ( face.getPosition().y + face.getHeight() );
canvas.drawRect( left, top, right, bottom, paint );
}
}
示例2: getLandmarkPosition
import com.google.android.gms.vision.face.Face; //导入方法依赖的package包/类
/**
* Finds a specific landmark position, or approximates the position based on past observations
* if it is not present.
*/
private PointF getLandmarkPosition(Face face, int landmarkId) {
for (Landmark landmark : face.getLandmarks()) {
if (landmark.getType() == landmarkId) {
return landmark.getPosition();
}
}
PointF prop = mPreviousProportions.get(landmarkId);
if (prop == null) {
return null;
}
float x = face.getPosition().x + (prop.x * face.getWidth());
float y = face.getPosition().y + (prop.y * face.getHeight());
return new PointF(x, y);
}
示例3: findLargestFace
import com.google.android.gms.vision.face.Face; //导入方法依赖的package包/类
private Optional<Face> findLargestFace(Bitmap inputImage) {
final FaceDetector detector = createDetector();
final SparseArray<Face> faces = detector.detect(new Frame.Builder().setBitmap(inputImage).build());
Face largestFace = null;
float largestSize = 0f;
Timber.d("found " + faces.size() + " faces in photo");
for (int i = 0; i < faces.size(); ++i) {
final Face face = faces.valueAt(i);
final float faceSize = face.getHeight() * face.getWidth();
if (faceSize > largestSize) {
largestFace = face;
largestSize = faceSize;
}
}
detector.release();
return Optional.fromNullable(largestFace);
}
示例4: addBitmapToFace
import com.google.android.gms.vision.face.Face; //导入方法依赖的package包/类
private static Bitmap addBitmapToFace(Bitmap backgroundBitmap, Bitmap emojiBitmap, Face face) {
// Initialize the results bitmap to be a mutable copy of the original image
Bitmap resultBitmap = Bitmap.createBitmap(backgroundBitmap.getWidth(),
backgroundBitmap.getHeight(), backgroundBitmap.getConfig());
// Scale the emoji so it looks better on the face
float scaleFactor = EMOJI_SCALE_FACTOR;
// Determine the size of the emoji to match the width of the face and preserve aspect ratio
int newEmojiWidth = (int) (face.getWidth() * scaleFactor);
int newEmojiHeight = (int) (emojiBitmap.getHeight() *
newEmojiWidth / emojiBitmap.getWidth() * scaleFactor);
// Scale the emoji
emojiBitmap = Bitmap.createScaledBitmap(emojiBitmap, newEmojiWidth, newEmojiHeight, false);
// Determine the emoji position so it best lines up with the face
float emojiPositionX =
(face.getPosition().x + face.getWidth() / 2) - emojiBitmap.getWidth() / 2;
float emojiPositionY =
(face.getPosition().y + face.getHeight() / 2) - emojiBitmap.getHeight() / 3;
// Create the canvas and draw the bitmaps to it
Canvas canvas = new Canvas(resultBitmap);
canvas.drawBitmap(backgroundBitmap, 0, 0, null);
canvas.drawBitmap(emojiBitmap, emojiPositionX, emojiPositionY, null);
return resultBitmap;
}
示例5: addBitmapToFace
import com.google.android.gms.vision.face.Face; //导入方法依赖的package包/类
/**
* Combines the original picture with the emoji bitmaps
*
* @param backgroundBitmap The original picture
* @param emojiBitmap The chosen emoji
* @param face The detected face
* @return The final bitmap, including the emojis over the faces
*/
private static Bitmap addBitmapToFace(Bitmap backgroundBitmap, Bitmap emojiBitmap, Face face) {
// Initialize the results bitmap to be a mutable copy of the original image
Bitmap resultBitmap = Bitmap.createBitmap(backgroundBitmap.getWidth(),
backgroundBitmap.getHeight(), backgroundBitmap.getConfig());
// Scale the emoji so it looks better on the face
float scaleFactor = EMOJI_SCALE_FACTOR;
// Determine the size of the emoji to match the width of the face and preserve aspect ratio
int newEmojiWidth = (int) (face.getWidth() * scaleFactor);
int newEmojiHeight = (int) (emojiBitmap.getHeight() *
newEmojiWidth / emojiBitmap.getWidth() * scaleFactor);
// Scale the emoji
emojiBitmap = Bitmap.createScaledBitmap(emojiBitmap, newEmojiWidth, newEmojiHeight, false);
// Determine the emoji position so it best lines up with the face
float emojiPositionX =
(face.getPosition().x + face.getWidth() / 2) - emojiBitmap.getWidth() / 2;
float emojiPositionY =
(face.getPosition().y + face.getHeight() / 2) - emojiBitmap.getHeight() / 3;
// Create the canvas and draw the bitmaps to it
Canvas canvas = new Canvas(resultBitmap);
canvas.drawBitmap(backgroundBitmap, 0, 0, null);
canvas.drawBitmap(emojiBitmap, emojiPositionX, emojiPositionY, null);
return resultBitmap;
}
示例6: getFaceBitmap
import com.google.android.gms.vision.face.Face; //导入方法依赖的package包/类
private Bitmap getFaceBitmap(Bitmap bmp,Context context){
FaceDetector faceDetector = new
FaceDetector.Builder(context).setTrackingEnabled(false)
.build();
if(!faceDetector.isOperational()){
System.out.println("Face detector not working");
return null;
}
Bitmap faceBitmap = null;
Frame frame = new Frame.Builder().setBitmap(bmp).build();
SparseArray<Face> faces = faceDetector.detect(frame);
for(int i=0; i<faces.size(); i++) {
Face thisFace = faces.valueAt(i);
int faceWidth=(int)thisFace.getWidth();
int faceHeight=(int)thisFace.getHeight();
int x1 = (int)thisFace.getPosition().x;
int y1 = (int)thisFace.getPosition().y;
faceBitmap=Bitmap.createBitmap(bmp,
x1> (faceWidth/2) ? (x1-faceWidth/2):0,
y1> (faceHeight/2) ? (y1-faceHeight/2):0,
1.5*faceWidth < bmp.getWidth() ? (int)(1.5*faceWidth) : bmp.getWidth(),
1.5*faceHeight < bmp.getWidth() ? (int)(1.5*faceHeight) : bmp.getHeight());
}
if(faceBitmap!=null) return faceBitmap;
return bmp;
}
示例7: updatePreviousProportions
import com.google.android.gms.vision.face.Face; //导入方法依赖的package包/类
private void updatePreviousProportions(Face face) {
for (Landmark landmark : face.getLandmarks()) {
PointF position = landmark.getPosition();
float xProp = (position.x - face.getPosition().x) / face.getWidth();
float yProp = (position.y - face.getPosition().y) / face.getHeight();
mPreviousProportions.put(landmark.getType(), new PointF(xProp, yProp));
}
}
示例8: FaceDetectionObj
import com.google.android.gms.vision.face.Face; //导入方法依赖的package包/类
public FaceDetectionObj(Face face) {
faceId = face.getId();
xPosition = face.getPosition().x;
yPosition = face.getPosition().y;
height = face.getHeight();
width = face.getWidth();
leftEye = face.getIsLeftEyeOpenProbability();
rightEye = face.getIsRightEyeOpenProbability();
isSmile = face.getIsSmilingProbability();
}
示例9: detect
import com.google.android.gms.vision.face.Face; //导入方法依赖的package包/类
@Override
public void detect(
SharedBufferHandle frameData, int width, int height, DetectResponse callback) {
// The vision library will be downloaded the first time the API is used
// on the device; this happens "fast", but it might have not completed,
// bail in this case.
if (!mFaceDetector.isOperational()) {
Log.e(TAG, "FaceDetector is not operational");
// Fallback to Android's FaceDetectionImpl.
FaceDetectorOptions options = new FaceDetectorOptions();
options.fastMode = mFastMode;
options.maxDetectedFaces = mMaxFaces;
FaceDetectionImpl detector = new FaceDetectionImpl(options);
detector.detect(frameData, width, height, callback);
return;
}
Frame frame = SharedBufferUtils.convertToFrame(frameData, width, height);
if (frame == null) {
Log.e(TAG, "Error converting SharedMemory to Frame");
callback.call(new FaceDetectionResult[0]);
return;
}
final SparseArray<Face> faces = mFaceDetector.detect(frame);
FaceDetectionResult[] faceArray = new FaceDetectionResult[faces.size()];
for (int i = 0; i < faces.size(); i++) {
faceArray[i] = new FaceDetectionResult();
final Face face = faces.valueAt(i);
final PointF corner = face.getPosition();
faceArray[i].boundingBox = new RectF();
faceArray[i].boundingBox.x = corner.x;
faceArray[i].boundingBox.y = corner.y;
faceArray[i].boundingBox.width = face.getWidth();
faceArray[i].boundingBox.height = face.getHeight();
final List<Landmark> landmarks = face.getLandmarks();
ArrayList<org.chromium.shape_detection.mojom.Landmark> mojoLandmarks =
new ArrayList<org.chromium.shape_detection.mojom.Landmark>(landmarks.size());
for (int j = 0; j < landmarks.size(); j++) {
final Landmark landmark = landmarks.get(j);
final int landmarkType = landmark.getType();
if (landmarkType == Landmark.LEFT_EYE || landmarkType == Landmark.RIGHT_EYE
|| landmarkType == Landmark.BOTTOM_MOUTH) {
org.chromium.shape_detection.mojom.Landmark mojoLandmark =
new org.chromium.shape_detection.mojom.Landmark();
mojoLandmark.location = new org.chromium.gfx.mojom.PointF();
mojoLandmark.location.x = landmark.getPosition().x;
mojoLandmark.location.y = landmark.getPosition().y;
mojoLandmark.type = landmarkType == Landmark.BOTTOM_MOUTH ? LandmarkType.MOUTH
: LandmarkType.EYE;
mojoLandmarks.add(mojoLandmark);
}
}
faceArray[i].landmarks = mojoLandmarks.toArray(
new org.chromium.shape_detection.mojom.Landmark[mojoLandmarks.size()]);
}
callback.call(faceArray);
}