本文整理汇总了Java中org.tensorflow.demo.env.ImageUtils.convertYUV420ToARGB8888方法的典型用法代码示例。如果您正苦于以下问题:Java ImageUtils.convertYUV420ToARGB8888方法的具体用法?Java ImageUtils.convertYUV420ToARGB8888怎么用?Java ImageUtils.convertYUV420ToARGB8888使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.tensorflow.demo.env.ImageUtils
的用法示例。
在下文中一共展示了ImageUtils.convertYUV420ToARGB8888方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: onImageAvailable
import org.tensorflow.demo.env.ImageUtils; //导入方法依赖的package包/类
@Override
public void onImageAvailable(final ImageReader reader) {
Image image = null;
try {
image = reader.acquireLatestImage();
if (image == null) {
return;
}
if (computing) {
image.close();
return;
}
computing = true;
Trace.beginSection("imageAvailable");
final Plane[] planes = image.getPlanes();
fillBytes(planes, yuvBytes);
final int yRowStride = planes[0].getRowStride();
final int uvRowStride = planes[1].getRowStride();
final int uvPixelStride = planes[1].getPixelStride();
ImageUtils.convertYUV420ToARGB8888(
yuvBytes[0],
yuvBytes[1],
yuvBytes[2],
rgbBytes,
previewWidth,
previewHeight,
yRowStride,
uvRowStride,
uvPixelStride,
false);
image.close();
} catch (final Exception e) {
if (image != null) {
image.close();
}
LOGGER.e(e, "Exception!");
Trace.endSection();
return;
}
rgbFrameBitmap.setPixels(rgbBytes, 0, previewWidth, 0, 0, previewWidth, previewHeight);
final Canvas canvas = new Canvas(croppedBitmap);
canvas.drawBitmap(rgbFrameBitmap, frameToCropTransform, null);
// For examining the actual TF input.
if (SAVE_PREVIEW_BITMAP) {
ImageUtils.saveBitmap(croppedBitmap);
}
runInBackground(
new Runnable() {
@Override
public void run() {
final long startTime = SystemClock.uptimeMillis();
final List<Classifier.Recognition> results = classifier.recognizeImage(croppedBitmap);
lastProcessingTimeMs = SystemClock.uptimeMillis() - startTime;
cropCopyBitmap = Bitmap.createBitmap(croppedBitmap);
resultsView.setResults(results);
requestRender();
computing = false;
}
});
Trace.endSection();
}
示例2: onImageAvailable
import org.tensorflow.demo.env.ImageUtils; //导入方法依赖的package包/类
/**
* Callback for Camera2 API
*/
@Override
public void onImageAvailable(final ImageReader reader) {
//We need wait until we have some size from onPreviewSizeChosen
if (previewWidth == 0 || previewHeight == 0) {
return;
}
if (rgbBytes == null) {
rgbBytes = new int[previewWidth * previewHeight];
}
try {
final Image image = reader.acquireLatestImage();
if (image == null) {
return;
}
if (isProcessingFrame) {
image.close();
return;
}
isProcessingFrame = true;
Trace.beginSection("imageAvailable");
final Plane[] planes = image.getPlanes();
fillBytes(planes, yuvBytes);
yRowStride = planes[0].getRowStride();
final int uvRowStride = planes[1].getRowStride();
final int uvPixelStride = planes[1].getPixelStride();
imageConverter =
new Runnable() {
@Override
public void run() {
ImageUtils.convertYUV420ToARGB8888(
yuvBytes[0],
yuvBytes[1],
yuvBytes[2],
previewWidth,
previewHeight,
yRowStride,
uvRowStride,
uvPixelStride,
rgbBytes);
}
};
postInferenceCallback =
new Runnable() {
@Override
public void run() {
image.close();
isProcessingFrame = false;
}
};
processImage();
} catch (final Exception e) {
LOGGER.e(e, "Exception!");
Trace.endSection();
return;
}
Trace.endSection();
}
示例3: onImageAvailable
import org.tensorflow.demo.env.ImageUtils; //导入方法依赖的package包/类
@Override public void onImageAvailable(final ImageReader reader) {
imageReader = reader;
Image image = null;
try {
image = reader.acquireLatestImage();
if (image == null) {
return;
}
if (savingImage || computing) {
image.close();
return;
}
savingImage = true;
Trace.beginSection("imageAvailable");
final Plane[] planes = image.getPlanes();
fillBytes(planes, yuvBytes);
final int yRowStride = planes[0].getRowStride();
final int uvRowStride = planes[1].getRowStride();
final int uvPixelStride = planes[1].getPixelStride();
ImageUtils.convertYUV420ToARGB8888(yuvBytes[0], yuvBytes[1], yuvBytes[2], rgbBytes,
previewWidth, previewHeight, yRowStride, uvRowStride, uvPixelStride, false);
image.close();
} catch (final Exception e) {
if (image != null) {
image.close();
}
LOGGER.e(e, "Exception!");
Trace.endSection();
return;
}
rgbFrameBitmap.setPixels(rgbBytes, 0, previewWidth, 0, 0, previewWidth, previewHeight);
final Canvas canvas = new Canvas(croppedBitmap);
canvas.drawBitmap(rgbFrameBitmap, frameToCropTransform, null);
// For examining the actual TF input.
if (SAVE_PREVIEW_BITMAP) {
ImageUtils.saveBitmap(croppedBitmap);
}
savingImage = false;
Trace.endSection();
}