本文整理汇总了Java中com.google.android.gms.vision.Frame类的典型用法代码示例。如果您正苦于以下问题:Java Frame类的具体用法?Java Frame怎么用?Java Frame使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Frame类属于com.google.android.gms.vision包,在下文中一共展示了Frame类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: detectText
import com.google.android.gms.vision.Frame; //导入依赖的package包/类
public void detectText(View view) {
Bitmap textBitmap = BitmapFactory.decodeResource(getResources(), R.drawable.cat);
TextRecognizer textRecognizer = new TextRecognizer.Builder(this).build();
if (!textRecognizer.isOperational()) {
new AlertDialog.Builder(this)
.setMessage("Text recognizer could not be set up on your device :(")
.show();
return;
}
Frame frame = new Frame.Builder().setBitmap(textBitmap).build();
SparseArray<TextBlock> text = textRecognizer.detect(frame);
for (int i = 0; i < text.size(); ++i) {
TextBlock item = text.valueAt(i);
if (item != null && item.getValue() != null) {
detectedTextView.setText(item.getValue());
}
}
}
示例2: detectTextBlocks
import com.google.android.gms.vision.Frame; //导入依赖的package包/类
List<TextBlock> detectTextBlocks(UQI uqi) {
List<TextBlock> result = new ArrayList<>();
Bitmap bitmap = this.getBitmap(uqi);
if (bitmap == null) return result;
TextRecognizer textRecognizer = new TextRecognizer.Builder(uqi.getContext()).build();
if (!textRecognizer.isOperational()) {
Logging.warn("TextRecognizer is not operational");
textRecognizer.release();
return result;
}
Frame imageFrame = new Frame.Builder().setBitmap(bitmap).build();
SparseArray<TextBlock> textBlocks = textRecognizer.detect(imageFrame);
for (int i = 0; i < textBlocks.size(); i++) {
TextBlock textBlock = textBlocks.get(textBlocks.keyAt(i));
result.add(textBlock);
}
textRecognizer.release();
return result;
}
示例3: getDetectorOrientation
import com.google.android.gms.vision.Frame; //导入依赖的package包/类
private int getDetectorOrientation(int sensorOrientation) {
switch (sensorOrientation) {
case 0:
return Frame.ROTATION_0;
case 90:
return Frame.ROTATION_90;
case 180:
return Frame.ROTATION_180;
case 270:
return Frame.ROTATION_270;
case 360:
return Frame.ROTATION_0;
default:
return Frame.ROTATION_90;
}
}
示例4: getFrameRotation
import com.google.android.gms.vision.Frame; //导入依赖的package包/类
private int getFrameRotation() {
switch (orientation) {
case 90: {
return Frame.ROTATION_90;
}
case 180: {
return Frame.ROTATION_180;
}
case 270: {
return Frame.ROTATION_270;
}
default: {
return Frame.ROTATION_0;
}
}
}
示例5: setDetector
import com.google.android.gms.vision.Frame; //导入依赖的package包/类
public void setDetector(){
FaceDetector detector = new FaceDetector.Builder(this)
.setTrackingEnabled(true)
.setLandmarkType(FaceDetector.ALL_LANDMARKS)
.setMode(FaceDetector.ACCURATE_MODE)
.build();
Detector<Face> safeDetector = new SafeFaceDetector(detector);
if (!safeDetector.isOperational()) {
Toast.makeText(this, "Detector are having issues", Toast.LENGTH_LONG).show();
} else {
Frame frame = new Frame.Builder().setBitmap(mbitmap).build();
mFaces = safeDetector.detect(frame);
safeDetector.release();
}
}
示例6: detectDocument
import com.google.android.gms.vision.Frame; //导入依赖的package包/类
Document detectDocument(Frame frame){
Size imageSize = new Size(frame.getMetadata().getWidth(), frame.getMetadata().getHeight());
Mat src = new Mat();
Utils.bitmapToMat(frame.getBitmap(), src);
List<MatOfPoint> contours = CVProcessor.findContours(src);
src.release();
if(!contours.isEmpty()){
CVProcessor.Quadrilateral quad = CVProcessor.getQuadrilateral(contours, imageSize);
if(quad != null){
quad.points = CVProcessor.getUpscaledPoints(quad.points, CVProcessor.getScaleRatio(imageSize));
return new Document(frame, quad);
}
}
return null;
}
示例7: findLargestFace
import com.google.android.gms.vision.Frame; //导入依赖的package包/类
private Optional<Face> findLargestFace(Bitmap inputImage) {
final FaceDetector detector = createDetector();
final SparseArray<Face> faces = detector.detect(new Frame.Builder().setBitmap(inputImage).build());
Face largestFace = null;
float largestSize = 0f;
Timber.d("found " + faces.size() + " faces in photo");
for (int i = 0; i < faces.size(); ++i) {
final Face face = faces.valueAt(i);
final float faceSize = face.getHeight() * face.getWidth();
if (faceSize > largestSize) {
largestFace = face;
largestSize = faceSize;
}
}
detector.release();
return Optional.fromNullable(largestFace);
}
示例8: padFrameRight
import com.google.android.gms.vision.Frame; //导入依赖的package包/类
/**
* Creates a new frame based on the original frame, with additional width on the right to
* increase the size to avoid the bug in the underlying face detector.
*/
private Frame padFrameRight( Frame originalFrame, int newWidth ) {
Frame.Metadata metadata = originalFrame.getMetadata();
int width = metadata.getWidth();
int height = metadata.getHeight();
Log.i( TAG, "Padded image from: " + width + "x" + height + " to " + newWidth + "x" + height );
ByteBuffer origBuffer = originalFrame.getGrayscaleImageData();
int origOffset = origBuffer.arrayOffset();
byte[] origBytes = origBuffer.array();
// This can be changed to just .allocate in the future, when Frame supports non-direct
// byte buffers.
ByteBuffer paddedBuffer = ByteBuffer.allocateDirect( newWidth * height );
int paddedOffset = paddedBuffer.arrayOffset();
byte[] paddedBytes = paddedBuffer.array();
Arrays.fill( paddedBytes, (byte) 0 );
for( int y = 0; y < height; ++y ) {
int origStride = origOffset + y * width;
int paddedStride = paddedOffset + y * newWidth;
System.arraycopy( origBytes, origStride, paddedBytes, paddedStride, width );
}
return new Frame.Builder()
.setImageData( paddedBuffer, newWidth, height, ImageFormat.NV21 )
.setId( metadata.getId() )
.setRotation( metadata.getRotation() )
.setTimestampMillis( metadata.getTimestampMillis() )
.build();
}
示例9: getFaceCount
import com.google.android.gms.vision.Frame; //导入依赖的package包/类
private int getFaceCount(@NonNull Bitmap img) {
Frame frame = new Frame.Builder()
.setBitmap(img)
.build();
SparseArray<Face> faces = detector.detect(frame);
log.d("%d faces detected within image %s", faces.size(), img);
return faces.size();
}
示例10: performOCR
import com.google.android.gms.vision.Frame; //导入依赖的package包/类
private String performOCR(Bitmap bitmap) {
getImageUri(OcrIdActivity.this, bitmap);
String textResult = "";
Frame frame = new Frame.Builder().setBitmap(bitmap).build();
TextRecognizer textRecognizer = new TextRecognizer.Builder(this).build();
SparseArray<TextBlock> textblock = textRecognizer.detect(frame);
TextBlock tb = null;
List<Text> texto = new ArrayList<>();
for (int i = 0; i < textblock.size(); i++) {
tb = textblock.get(textblock.keyAt(i));
Log.e("TEXT", tb.toString() + "");
texto.addAll(tb.getComponents());
}
for (Text t : texto) {
for (Text t2 : t.getComponents()) {
textResult += t2.getValue() + " ";
}
textResult += "\n";
}
if (!textResult.equals("")) {
bitmap.recycle();
return textResult;
} else {
Toast toast = Toast.makeText(this, R.string.ocr_fail, Toast.LENGTH_SHORT);
toast.setGravity(Gravity.TOP, 0, 0);
toast.show();
return "";
}
}
示例11: scanBitmap
import com.google.android.gms.vision.Frame; //导入依赖的package包/类
public static void scanBitmap(Context context, Bitmap bitmap, int barcodeFormat, BarcodeRetriever barcodeRetriever) {
BarcodeDetector detector =
new BarcodeDetector.Builder(context)
.setBarcodeFormats(barcodeFormat)
.build();
if (!detector.isOperational()) {
barcodeRetriever.onRetrievedFailed("Could not set up the detector!");
// txtView.setText("Could not set up the detector!");
return;
}
Frame frame = new Frame.Builder().setBitmap(bitmap).build();
barcodeRetriever.onBitmapScanned(detector.detect(frame));
}
示例12: setBitmap
import com.google.android.gms.vision.Frame; //导入依赖的package包/类
public void setBitmap(Bitmap bitmap) {
mBitmap = bitmap;
/*开启人脸检测*/
FaceDetector detector = null;
try {
detector = new FaceDetector.Builder(getContext())
.setTrackingEnabled(false)
.setLandmarkType(FaceDetector.ALL_LANDMARKS)
.setMode(FaceDetector.ACCURATE_MODE)
.build();
} catch (Exception e) {
e.printStackTrace();
}
/**
* 你需要检查FaceDetector是否是可操作的。每当用户第一次在设备上使用人脸检测,
* Play Services服务需要加载一组小型本地库去处理应用程序的请求。
* 虽然这些工作一般在应用程序启动之前就完成了,但是做好失败处理同样是必要的。
如果FaceDetector是可操作的,那么你需要将位图数据转化成Frame对象,
并通过detect函数传入用来做人脸数据分析。当完成数据分析后,你需要释放探测器,
防止内存泄露。最后调用invalidate()函数来触发视图刷新。
* **/
//检测人脸检测是否可用,如果可用
if (!detector.isOperational()) {
//不可以用,重新加载
Log.i("wangqihui","not initial");
} else {
Frame frame =new Frame.Builder().setBitmap(bitmap).build();
mFaces=detector.detect(frame);
detector.release();
}
/* Frame frame =new Frame.Builder().setBitmap(bitmap).build();
mFaces=detector.detect(frame);
detector.release();*/
invalidate();
}
示例13: onActivityResult
import com.google.android.gms.vision.Frame; //导入依赖的package包/类
@Override
protected void onActivityResult(int codigoSolicitud, int codigoResultado, Intent datos) {
if (codigoSolicitud == CODIGO_SOLICITUD_TOMAR_IMAGEN) {
if (codigoResultado == RESULT_OK) {
// Guardamos la foto en un objeto Bitmap
Bitmap foto = (Bitmap) datos.getExtras().get("data");
// Le pasamos el detector de QR
Frame frame = new Frame.Builder().setBitmap(foto).build();
SparseArray<Barcode> barcodes = detectorQR.detect(frame);
if (barcodes.size() == 0){
// No detectó QR
Toast.makeText(MainActivity.this, getString(R.string.no_qr_encontrado), Toast.LENGTH_LONG).show();
}
else{
Barcode codigoQR = barcodes.valueAt(0);
Toast.makeText(MainActivity.this, codigoQR.rawValue, Toast.LENGTH_LONG).show();
if(validarMensaje(codigoQR.rawValue)){
Intent intent = new Intent(this, NavegacionActivity.class);
intent.putExtra(EXTRA_MENSAJE, codigoQR.rawValue);
startActivity(intent);
}
else{
// No detectó la latitud y la longitud en el mensaje
Toast.makeText(MainActivity.this, getString(R.string.qr_incorrecto), Toast.LENGTH_LONG).show();
}
}
} else if (codigoResultado == RESULT_CANCELED) {
// Usuario canceló la imagen
} else {
// Captura de imagen fallida
}
}
}
示例14: faceDetection
import com.google.android.gms.vision.Frame; //导入依赖的package包/类
/***
* This method realize the face detection, and this call in another methods
* for automatice the process
* @param stroke the bold of line to show around the face
* @param color the color of rectangle to recognizer the face
* @param activity the currect activity
* @param photo your photo
* @return
*/
private Bitmap faceDetection(int stroke, int color, Activity activity, Bitmap photo){
this.detector = new FaceDetector.Builder(activity)
.setMode(FaceDetector.ACCURATE_MODE)
.setLandmarkType(FaceDetector.ALL_LANDMARKS)
.setClassificationType(FaceDetector.ALL_CLASSIFICATIONS)
.setTrackingEnabled(false)
.build();
try {
if (false == this.detector.isOperational()) {
return null;
}
//Add the image on a Frame object
Frame frame = new Frame.Builder()
.setBitmap(photo)
.build();
//Detect all faces from Frame object
SparseArray<Face> faceArray = detector.detect(frame);
//Do some drawing on faces
Bitmap outBitmap = drawOnFace(faceArray, photo, stroke, color);
//Releasing the detector object
this.detector.release();
return (outBitmap != null) ? outBitmap : photo;
}catch(Exception ev){
return null;
}
}
示例15: padFrameBottom
import com.google.android.gms.vision.Frame; //导入依赖的package包/类
/**
* Creates a new frame based on the original frame, with additional height on the bottom to
* increase the size to avoid the bug in the underlying face detector.
*/
private Frame padFrameBottom( Frame originalFrame, int newHeight ) {
Frame.Metadata metadata = originalFrame.getMetadata();
int width = metadata.getWidth();
int height = metadata.getHeight();
Log.i( TAG, "Padded image from: " + width + "x" + height + " to " + width + "x" + newHeight );
ByteBuffer origBuffer = originalFrame.getGrayscaleImageData();
int origOffset = origBuffer.arrayOffset();
byte[] origBytes = origBuffer.array();
// This can be changed to just .allocate in the future, when Frame supports non-direct
// byte buffers.
ByteBuffer paddedBuffer = ByteBuffer.allocateDirect( width * newHeight );
int paddedOffset = paddedBuffer.arrayOffset();
byte[] paddedBytes = paddedBuffer.array();
Arrays.fill( paddedBytes, (byte) 0 );
// Copy the image content from the original, without bothering to fill in the padded bottom
// part.
for( int y = 0; y < height; ++y ) {
int origStride = origOffset + y * width;
int paddedStride = paddedOffset + y * width;
System.arraycopy( origBytes, origStride, paddedBytes, paddedStride, width );
}
return new Frame.Builder()
.setImageData( paddedBuffer, width, newHeight, ImageFormat.NV21 )
.setId( metadata.getId() )
.setRotation( metadata.getRotation() )
.setTimestampMillis( metadata.getTimestampMillis() )
.build();
}