本文整理汇总了Java中org.tensorflow.demo.env.ImageUtils.getTransformationMatrix方法的典型用法代码示例。如果您正苦于以下问题:Java ImageUtils.getTransformationMatrix方法的具体用法?Java ImageUtils.getTransformationMatrix怎么用?Java ImageUtils.getTransformationMatrix使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.tensorflow.demo.env.ImageUtils
的用法示例。
在下文中一共展示了ImageUtils.getTransformationMatrix方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: onPreviewSizeChosen
import org.tensorflow.demo.env.ImageUtils; //导入方法依赖的package包/类
public void onPreviewSizeChosen(final Size size, final int rotation) {
previewWidth = size.getWidth();
previewHeight = size.getHeight();
final Display display = getWindowManager().getDefaultDisplay();
final int screenOrientation = display.getRotation();
LOGGER.i("Sensor orientation: %d, Screen orientation: %d", rotation, screenOrientation);
sensorOrientation = rotation + screenOrientation;
LOGGER.i("Initializing at size %dx%d", previewWidth, previewHeight);
rgbBytes = new int[previewWidth * previewHeight];
rgbFrameBitmap = Bitmap.createBitmap(previewWidth, previewHeight, Bitmap.Config.ARGB_8888);
croppedBitmap = Bitmap.createBitmap(INPUT_SIZE, INPUT_SIZE, Bitmap.Config.ARGB_8888);
frameToCropTransform =
ImageUtils.getTransformationMatrix(previewWidth, previewHeight, INPUT_SIZE, INPUT_SIZE,
sensorOrientation, MAINTAIN_ASPECT);
Matrix cropToFrameTransform = new Matrix();
frameToCropTransform.invert(cropToFrameTransform);
yuvBytes = new byte[3][];
}
示例2: onPreviewSizeChosen
import org.tensorflow.demo.env.ImageUtils; //导入方法依赖的package包/类
@Override
public void onPreviewSizeChosen(final Size size, final int rotation) {
final float textSizePx =
TypedValue.applyDimension(
TypedValue.COMPLEX_UNIT_DIP, TEXT_SIZE_DIP, getResources().getDisplayMetrics());
borderedText = new BorderedText(textSizePx);
borderedText.setTypeface(Typeface.MONOSPACE);
if (TensorFlowYoloDetector.selectedModel == 0) {
classifier = TensorFlowYoloDetector.create(
getAssets(),
YOLO_MODEL_FILE_FACE,
YOLO_INPUT_SIZE,
YOLO_INPUT_NAME,
YOLO_OUTPUT_NAMES,
YOLO_BLOCK_SIZE);
} else {
classifier = TensorFlowYoloDetector.create(
getAssets(),
YOLO_MODEL_FILE_HAND,
YOLO_INPUT_SIZE,
YOLO_INPUT_NAME,
YOLO_OUTPUT_NAMES,
YOLO_BLOCK_SIZE);
}
previewWidth = size.getWidth();
previewHeight = size.getHeight();
final Display display = getWindowManager().getDefaultDisplay();
final int screenOrientation = display.getRotation();
LOGGER.i("Sensor orientation: %d, Screen orientation: %d", rotation, screenOrientation);
sensorOrientation = rotation + screenOrientation;
LOGGER.i("Initializing at size %dx%d", previewWidth, previewHeight);
rgbBytes = new int[previewWidth * previewHeight];
rgbFrameBitmap = Bitmap.createBitmap(previewWidth, previewHeight, Config.ARGB_8888);
croppedBitmap = Bitmap.createBitmap(YOLO_INPUT_SIZE, YOLO_INPUT_SIZE, Config.ARGB_8888);
frameToCropTransform =
ImageUtils.getTransformationMatrix(
previewWidth, previewHeight,
YOLO_INPUT_SIZE, YOLO_INPUT_SIZE,
sensorOrientation, MAINTAIN_ASPECT);
cropToFrameTransform = new Matrix();
frameToCropTransform.invert(cropToFrameTransform);
yuvBytes = new byte[3][];
}
示例3: draw
import org.tensorflow.demo.env.ImageUtils; //导入方法依赖的package包/类
public synchronized void draw(final Canvas canvas) {
// TODO(andrewharp): This may not work for non-90 deg rotations.
final float multiplier =
Math.min(canvas.getWidth() / (float) frameHeight, canvas.getHeight() / (float) frameWidth);
frameToCanvasMatrix =
ImageUtils.getTransformationMatrix(
frameWidth,
frameHeight,
(int) (multiplier * frameHeight),
(int) (multiplier * frameWidth),
sensorOrientation,
false);
for (final TrackedRecognition recognition : trackedObjects) {
final RectF trackedPos =
(objectTracker != null)
? recognition.trackedObject.getTrackedPositionInPreviewFrame()
: new RectF(recognition.location);
getFrameToCanvasMatrix().mapRect(trackedPos);
boxPaint.setColor(recognition.color);
final float cornerSize = Math.min(trackedPos.width(), trackedPos.height()) / 8.0f;
canvas.drawRoundRect(trackedPos, cornerSize, cornerSize, boxPaint);
final String labelString =
!TextUtils.isEmpty(recognition.title)
? String.format("%s %.2f", recognition.title, recognition.detectionConfidence)
: String.format("%.2f", recognition.detectionConfidence);
borderedText.drawText(canvas, trackedPos.left + cornerSize, trackedPos.bottom, labelString);
}
}
示例4: draw
import org.tensorflow.demo.env.ImageUtils; //导入方法依赖的package包/类
public synchronized void draw(final Canvas canvas) {
if (objectTracker == null) {
return;
}
// TODO(andrewharp): This may not work for non-90 deg rotations.
final float multiplier =
Math.min(canvas.getWidth() / (float) frameHeight, canvas.getHeight() / (float) frameWidth);
frameToCanvasMatrix =
ImageUtils.getTransformationMatrix(
frameWidth,
frameHeight,
(int) (multiplier * frameHeight),
(int) (multiplier * frameWidth),
sensorOrientation,
false);
for (final TrackedRecognition recognition : trackedObjects) {
final ObjectTracker.TrackedObject trackedObject = recognition.trackedObject;
final RectF trackedPos = trackedObject.getTrackedPositionInPreviewFrame();
getFrameToCanvasMatrix().mapRect(trackedPos);
boxPaint.setColor(recognition.color);
final float cornerSize = Math.min(trackedPos.width(), trackedPos.height()) / 8.0f;
canvas.drawRoundRect(trackedPos, cornerSize, cornerSize, boxPaint);
final String labelString =
!TextUtils.isEmpty(recognition.title)
? String.format("%s %.2f", recognition.title, recognition.detectionConfidence)
: String.format("%.2f", recognition.detectionConfidence);
borderedText.drawText(canvas, trackedPos.left + cornerSize, trackedPos.bottom, labelString);
}
}
示例5: onPreviewSizeChosen
import org.tensorflow.demo.env.ImageUtils; //导入方法依赖的package包/类
@Override
public void onPreviewSizeChosen(final Size size, final int rotation) {
final float textSizePx =
TypedValue.applyDimension(
TypedValue.COMPLEX_UNIT_DIP, TEXT_SIZE_DIP, getResources().getDisplayMetrics());
borderedText = new BorderedText(textSizePx);
borderedText.setTypeface(Typeface.MONOSPACE);
classifier =
TensorFlowImageClassifier.create(
getAssets(),
MODEL_FILE,
LABEL_FILE,
INPUT_SIZE,
IMAGE_MEAN,
IMAGE_STD,
INPUT_NAME,
OUTPUT_NAME);
resultsView = (ResultsView) findViewById(R.id.results);
previewWidth = size.getWidth();
previewHeight = size.getHeight();
final Display display = getWindowManager().getDefaultDisplay();
final int screenOrientation = display.getRotation();
LOGGER.i("Sensor orientation: %d, Screen orientation: %d", rotation, screenOrientation);
sensorOrientation = rotation + screenOrientation;
LOGGER.i("Initializing at size %dx%d", previewWidth, previewHeight);
rgbBytes = new int[previewWidth * previewHeight];
rgbFrameBitmap = Bitmap.createBitmap(previewWidth, previewHeight, Config.ARGB_8888);
croppedBitmap = Bitmap.createBitmap(INPUT_SIZE, INPUT_SIZE, Config.ARGB_8888);
frameToCropTransform =
ImageUtils.getTransformationMatrix(
previewWidth, previewHeight,
INPUT_SIZE, INPUT_SIZE,
sensorOrientation, MAINTAIN_ASPECT);
cropToFrameTransform = new Matrix();
frameToCropTransform.invert(cropToFrameTransform);
yuvBytes = new byte[3][];
addCallback(
new DrawCallback() {
@Override
public void drawCallback(final Canvas canvas) {
renderDebug(canvas);
}
});
}
示例6: onPreviewSizeChosen
import org.tensorflow.demo.env.ImageUtils; //导入方法依赖的package包/类
@Override
public void onPreviewSizeChosen(final Size size, final int rotation) {
final float textSizePx = TypedValue.applyDimension(
TypedValue.COMPLEX_UNIT_DIP, TEXT_SIZE_DIP, getResources().getDisplayMetrics());
borderedText = new BorderedText(textSizePx);
borderedText.setTypeface(Typeface.MONOSPACE);
classifier =
TensorFlowImageClassifier.create(
getAssets(),
MODEL_FILE,
LABEL_FILE,
INPUT_SIZE,
IMAGE_MEAN,
IMAGE_STD,
INPUT_NAME,
OUTPUT_NAME);
previewWidth = size.getWidth();
previewHeight = size.getHeight();
final Display display = getWindowManager().getDefaultDisplay();
final int screenOrientation = display.getRotation();
LOGGER.i("Sensor orientation: %d, Screen orientation: %d", rotation, screenOrientation);
sensorOrientation = rotation + screenOrientation;
LOGGER.i("Initializing at size %dx%d", previewWidth, previewHeight);
rgbFrameBitmap = Bitmap.createBitmap(previewWidth, previewHeight, Config.ARGB_8888);
croppedBitmap = Bitmap.createBitmap(INPUT_SIZE, INPUT_SIZE, Config.ARGB_8888);
frameToCropTransform = ImageUtils.getTransformationMatrix(
previewWidth, previewHeight,
INPUT_SIZE, INPUT_SIZE,
sensorOrientation, MAINTAIN_ASPECT);
cropToFrameTransform = new Matrix();
frameToCropTransform.invert(cropToFrameTransform);
addCallback(
new DrawCallback() {
@Override
public void drawCallback(final Canvas canvas) {
renderDebug(canvas);
}
});
}