当前位置: 首页>>代码示例>>Java>>正文


Java ImageFormat类代码示例

本文整理汇总了Java中android.graphics.ImageFormat的典型用法代码示例。如果您正苦于以下问题:Java ImageFormat类的具体用法?Java ImageFormat怎么用?Java ImageFormat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


ImageFormat类属于android.graphics包,在下文中一共展示了ImageFormat类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createFromNV21

import android.graphics.ImageFormat; //导入依赖的package包/类
public static byte[] createFromNV21(@NonNull final byte[] data,
                                    final int width,
                                    final int height,
                                    int rotation,
                                    final Rect croppingRect,
                                    final boolean flipHorizontal)
    throws IOException
{
  byte[] rotated = rotateNV21(data, width, height, rotation, flipHorizontal);
  final int rotatedWidth  = rotation % 180 > 0 ? height : width;
  final int rotatedHeight = rotation % 180 > 0 ? width  : height;
  YuvImage previewImage = new YuvImage(rotated, ImageFormat.NV21,
                                       rotatedWidth, rotatedHeight, null);

  ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
  previewImage.compressToJpeg(croppingRect, 80, outputStream);
  byte[] bytes = outputStream.toByteArray();
  outputStream.close();
  return bytes;
}
 
开发者ID:XecureIT,项目名称:PeSanKita-android,代码行数:21,代码来源:BitmapUtil.java

示例2: configureCamera

import android.graphics.ImageFormat; //导入依赖的package包/类
private void configureCamera() {
    final Camera.Parameters parameters = camera.getParameters();
    try {
        parameters.setPreviewFormat(ImageFormat.NV21);

        // set focus for video if present
        List<String> focusModes = parameters.getSupportedFocusModes();

        if (null != focusModes && focusModes.contains(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO)) {
            parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO);
        }

        // check if torch is present
        List<String> flashModes = parameters.getSupportedFlashModes();

        cameraFlashIsSupported = null != flashModes && flashModes.contains(Camera.Parameters.FLASH_MODE_TORCH);

        final Camera.Size bestPreviewSize = getBestPreviewSize();
        photoProcessor.setPreviewSize(bestPreviewSize.width, bestPreviewSize.height);
        parameters.setPreviewSize(bestPreviewSize.width, bestPreviewSize.height);
        camera.setParameters(parameters);
    } catch (RuntimeException exception) {
        Toast.makeText(getContext(), R.string.camera_configuration_failed, Toast.LENGTH_SHORT).show();
    }
}
 
开发者ID:humaniq,项目名称:humaniq-android,代码行数:26,代码来源:PhotoFragment.java

示例3: setParams

import android.graphics.ImageFormat; //导入依赖的package包/类
private void setParams() {
    //LogUtil.e("preview set size=" + width + " : " + height);
    Camera.Parameters parameters = camera.getParameters();
    //        parameters.setPreviewSize(width, height);
    //        parameters.setPictureSize(width, height);
    parameters.setPreviewFormat(ImageFormat.NV21);
    camera.setDisplayOrientation(90);
    parameters.setRotation(90);

    List<Integer> supportedPreviewFormats = parameters.getSupportedPreviewFormats();
    for (Integer integer : supportedPreviewFormats) {
        //LogUtil.e("preview format=" + integer);
    }

    List<Camera.Size> supportedPreviewSizes = parameters.getSupportedPreviewSizes();
    for (Camera.Size size : supportedPreviewSizes) {
        //LogUtil.e("preview size=" + size.width + " : " + size.height);
    }
    camera.setParameters(parameters);
}
 
开发者ID:FreeSunny,项目名称:Amazing,代码行数:21,代码来源:CameraActivity.java

示例4: saveFace

import android.graphics.ImageFormat; //导入依赖的package包/类
private void saveFace(final int x, final int y, final int r, final int b) {
    if (DEBUG) Log.d(TAG, "[saveFace()]");
    new Thread(new Runnable() {
        @Override
        public void run() {
            synchronized (mVideoSource) {
                mImageYuv = new YuvImage(mVideoSource, ImageFormat.NV21, CameraWrapper.IMAGE_WIDTH, CameraWrapper.IMAGE_HEIGHT, null);
            }
            ByteArrayOutputStream stream = new ByteArrayOutputStream();
            mImageYuv.compressToJpeg(new Rect(0, 0, CameraWrapper.IMAGE_WIDTH, CameraWrapper.IMAGE_HEIGHT), 100, stream);
            Bitmap bitmap = BitmapFactory.decodeByteArray(stream.toByteArray(), 0, stream.size());

            int left = (x > 0) ? x : 0;
            int top = (y > 0) ? y : 0;
            int creatW = (r < CameraWrapper.IMAGE_WIDTH) ? (r - x) : (CameraWrapper.IMAGE_HEIGHT - x - 1);
            int creatH = (b < CameraWrapper.IMAGE_WIDTH) ? (b - y) : (CameraWrapper.IMAGE_HEIGHT - y - 1);

            mImage = Bitmap.createBitmap(bitmap, left, top, creatW, creatH, null, false);
            if (DEBUG) Log.d(TAG, "[saveFace()] x:" + x + "  y:" + y + "\n" +
                    "[saveFace()] h:" + mImage.getHeight() + "  w:" + mImage.getWidth());
            if (null != mImage)
                FaceUtil.saveBitmapToFile(mImage);
        }
    }).start();
}
 
开发者ID:Yee-chen,项目名称:seeta4Android,代码行数:26,代码来源:FaceDetector.java

示例5: createPreviewBuffer

import android.graphics.ImageFormat; //导入依赖的package包/类
/**
 * Creates one buffer for the camera preview callback.  The size of the buffer is based off of
 * the camera preview size and the format of the camera image.
 *
 * @return a new preview buffer of the appropriate size for the current camera settings
 */
private byte[] createPreviewBuffer(Size previewSize) {
    int bitsPerPixel = ImageFormat.getBitsPerPixel(ImageFormat.NV21);
    long sizeInBits = previewSize.getHeight() * previewSize.getWidth() * bitsPerPixel;
    int bufferSize = (int) Math.ceil(sizeInBits / 8.0d) + 1;

    //
    // NOTICE: This code only works when using play services v. 8.1 or higher.
    //

    // Creating the byte array this way and wrapping it, as opposed to using .allocate(),
    // should guarantee that there will be an array to work with.
    byte[] byteArray = new byte[bufferSize];
    ByteBuffer buffer = ByteBuffer.wrap(byteArray);
    if (!buffer.hasArray() || (buffer.array() != byteArray)) {
        // I don't think that this will ever happen.  But if it does, then we wouldn't be
        // passing the preview content to the underlying detector later.
        throw new IllegalStateException("Failed to create valid buffer for camera source.");
    }

    mBytesToByteBuffer.put(byteArray, buffer);
    return byteArray;
}
 
开发者ID:victoraldir,项目名称:BuddyBook,代码行数:29,代码来源:CameraSource.java

示例6: decodeToBitMap

import android.graphics.ImageFormat; //导入依赖的package包/类
private Bitmap decodeToBitMap(byte[] data) {
	try {
		YuvImage image = new YuvImage(data, ImageFormat.NV21, PREVIEW_WIDTH,
				PREVIEW_HEIGHT, null);
		if (image != null) {
			ByteArrayOutputStream stream = new ByteArrayOutputStream();
			image.compressToJpeg(new Rect(0, 0, PREVIEW_WIDTH, PREVIEW_HEIGHT),
					80, stream);
			Bitmap bmp = BitmapFactory.decodeByteArray(
					stream.toByteArray(), 0, stream.size());
			stream.close();
			return bmp ;
		}
	} catch (Exception ex) {
		Log.e("Sys", "Error:" + ex.getMessage());
	}
	return null;
}
 
开发者ID:JosephPai,项目名称:WithYou,代码行数:19,代码来源:VideoVerify.java

示例7: createPreviewBuffer

import android.graphics.ImageFormat; //导入依赖的package包/类
/**
 * Creates one buffer for the camera preview callback.  The size of the buffer is based off of
 * the camera preview size and the format of the camera image.
 *
 * @return a new preview buffer of the appropriate size for the current camera settings
 */
private byte[] createPreviewBuffer(Size previewSize) {
    int bitsPerPixel = ImageFormat.getBitsPerPixel(ImageFormat.NV21);
    long sizeInBits = previewSize.getHeight() * previewSize.getWidth() * bitsPerPixel;
    int bufferSize = (int) Math.ceil(sizeInBits / 8.0d) + 1;
    //
    // NOTICE: This code only works when using play services v. 8.1 or higher.
    //
    // Creating the byte array this way and wrapping it, as opposed to using .allocate(),
    // should guarantee that there will be an array to work with.
    byte[] byteArray = new byte[bufferSize];
    ByteBuffer buffer = ByteBuffer.wrap(byteArray);
    if (!buffer.hasArray() || (buffer.array() != byteArray)) {
        // I don't think that this will ever happen.  But if it does, then we wouldn't be
        // passing the preview content to the underlying detector later.
        throw new IllegalStateException("Failed to create valid buffer for camera source.");
    }
    mBytesToByteBuffer.put(byteArray, buffer);
    return byteArray;
}
 
开发者ID:EzequielAdrianM,项目名称:Camera2Vision,代码行数:26,代码来源:CameraSource.java

示例8: rgba

import android.graphics.ImageFormat; //导入依赖的package包/类
@Override
public Mat rgba() {
    if (mPreviewFormat == ImageFormat.NV21)
        Imgproc.cvtColor(mYuvFrameData, mRgba, Imgproc.COLOR_YUV2RGBA_NV21, 4);
    else if (mPreviewFormat == ImageFormat.YV12)
        Imgproc.cvtColor(mYuvFrameData, mRgba, Imgproc.COLOR_YUV2RGB_I420, 4);  // COLOR_YUV2RGBA_YV12 produces inverted colors
    else
        throw new IllegalArgumentException("Preview Format can be NV21 or YV12");

    return mRgba;
}
 
开发者ID:typer9527,项目名称:FaceDetectDemo,代码行数:12,代码来源:JavaCameraView.java

示例9: extract

import android.graphics.ImageFormat; //导入依赖的package包/类
/**
 * Extracts the Y-Plane from the YUV_420_8888 image to creates a IntensityPlane.
 * The actual plane data will be copied into the new IntensityPlane object.
 *
 * @throws IllegalArgumentException if the provided images is not in the YUV_420_888 format
 */
@NonNull
public static IntensityPlane extract(@NonNull Image img) {
    if (img.getFormat() != ImageFormat.YUV_420_888) {
        throw new IllegalArgumentException("image format must be YUV_420_888");
    }

    Image.Plane[] planes = img.getPlanes();

    ByteBuffer buffer = planes[0].getBuffer();
    byte[] yPlane = new byte[buffer.remaining()];
    buffer.get(yPlane);

    int yRowStride = planes[0].getRowStride();

    return new IntensityPlane(img.getWidth(), img.getHeight(), yPlane, yRowStride);
}
 
开发者ID:BioID-GmbH,项目名称:BWS-Android,代码行数:23,代码来源:IntensityPlane.java

示例10: setupPreviewSizeAndImageReader

import android.graphics.ImageFormat; //导入依赖的package包/类
/**
 * lazily initialize ImageReader and select preview size
 */
private void setupPreviewSizeAndImageReader() {
    if (previewSize == null) {
        previewSize = cameraHelper.selectPreviewSize(openCamera);
    }

    if (imageReader == null) {
        int maxImages = 2;  // should be at least 2 according to ImageReader.acquireLatestImage() documentation
        imageReader = ImageReader.newInstance(previewSize.getWidth(), previewSize.getHeight(), ImageFormat.YUV_420_888, maxImages);
        imageReader.setOnImageAvailableListener(new ImageReader.OnImageAvailableListener() {
            @Override
            public void onImageAvailable(ImageReader reader) {
                Image img = reader.acquireLatestImage();
                if (img != null) {

                    // Make a in memory copy of the image to close the image from the reader as soon as possible.
                    // This helps the thread running the preview staying up to date.
                    IntensityPlane imgCopy = IntensityPlane.extract(img);
                    img.close();

                    int imageRotation = cameraHelper.getImageRotation(openCamera, getRelativeDisplayRotation());

                    presenter.onImageCaptured(imgCopy, imageRotation);
                }
            }
        }, null);
    }
}
 
开发者ID:BioID-GmbH,项目名称:BWS-Android,代码行数:31,代码来源:FacialRecognitionFragment.java

示例11: startStream

import android.graphics.ImageFormat; //导入依赖的package包/类
/**
 * Need be called after @prepareVideo or/and @prepareAudio.
 * This method override resolution of @startPreview to resolution seated in @prepareVideo. If you
 * never startPreview this method startPreview for you to resolution seated in @prepareVideo.
 *
 * @param url of the stream like:
 * protocol://ip:port/application/streamName
 *
 * RTSP: rtsp://192.168.1.1:1935/live/pedroSG94
 * RTSPS: rtsps://192.168.1.1:1935/live/pedroSG94
 * RTMP: rtmp://192.168.1.1:1935/live/pedroSG94
 * RTMPS: rtmps://192.168.1.1:1935/live/pedroSG94
 */
public void startStream(String url) {
  if (openGlView != null && Build.VERSION.SDK_INT >= 18) {
    if (videoEncoder.getRotation() == 90 || videoEncoder.getRotation() == 270) {
      openGlView.setEncoderSize(videoEncoder.getHeight(), videoEncoder.getWidth());
    } else {
      openGlView.setEncoderSize(videoEncoder.getWidth(), videoEncoder.getHeight());
    }
    openGlView.startGLThread();
    openGlView.addMediaCodecSurface(videoEncoder.getInputSurface());
    cameraManager =
        new Camera1ApiManager(openGlView.getSurfaceTexture(), openGlView.getContext());
    cameraManager.prepareCamera(videoEncoder.getWidth(), videoEncoder.getHeight(),
        videoEncoder.getFps(), ImageFormat.NV21);
  }
  startStreamRtp(url);
  videoEncoder.start();
  audioEncoder.start();
  cameraManager.start();
  microphoneManager.start();
  streaming = true;
  onPreview = true;
}
 
开发者ID:pedroSG94,项目名称:rtmp-rtsp-stream-client-java,代码行数:36,代码来源:Camera1Base.java

示例12: startPreview

import android.graphics.ImageFormat; //导入依赖的package包/类
private void startPreview() {
    try {
        CameraCharacteristics characteristics = cameraManager.getCameraCharacteristics(cameraId);
        StreamConfigurationMap configMap = characteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP);

        Size previewSize = Util.getPreferredPreviewSize(
                configMap.getOutputSizes(ImageFormat.JPEG),textureView.getWidth(), textureView.getHeight());

        surfaceTexture.setDefaultBufferSize(previewSize.getWidth(),previewSize.getHeight());
        Surface surface = new Surface(surfaceTexture);
        captureBuilder = cameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW);
        captureBuilder.addTarget(surface);

        cameraDevice.createCaptureSession(Arrays.asList(surface),captureSessionCallback,backgroundHandler);
    } catch (CameraAccessException e) {
        e.printStackTrace();
    }
}
 
开发者ID:lazyparser,项目名称:xbot_head,代码行数:19,代码来源:CommentaryFragment.java

示例13: setDefaultCameraParameters

import android.graphics.ImageFormat; //导入依赖的package包/类
public void setDefaultCameraParameters(Camera camera, Camera.CameraInfo cameraInfo) {
    Camera.Parameters parameters = camera.getParameters();

    parameters.setPictureFormat(ImageFormat.JPEG);

    List<Camera.Size> supportedSizes = parameters.getSupportedPictureSizes();
    Camera.Size pictureSize = getBestSize(supportedSizes, 0);
    parameters.setPictureSize(pictureSize.width, pictureSize.height);

    float whRatio = (float) pictureSize.width / pictureSize.height;

    List<Camera.Size> previewSupportedSizes = parameters.getSupportedPreviewSizes();
    Camera.Size previewSize = getBestSize(previewSupportedSizes, whRatio);
    parameters.setPreviewSize(previewSize.width, previewSize.height);

    List<String> supportedFocusModes = camera.getParameters().getSupportedFocusModes();
    boolean hasAutoFocus = supportedFocusModes != null && supportedFocusModes.contains(Camera.Parameters.FOCUS_MODE_AUTO);

    if(hasAutoFocus) {
        parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_AUTO);
    }

    if(cameraInfo.facing == Camera.CameraInfo.CAMERA_FACING_BACK) {
        parameters.setFlashMode(Camera.Parameters.FLASH_MODE_AUTO);
    }

    List<String> supportedScreenModes = camera.getParameters().getSupportedSceneModes();
    boolean hasAutoScene = supportedScreenModes != null && supportedFocusModes.contains(Camera.Parameters.SCENE_MODE_AUTO);
    if(hasAutoScene) {
        parameters.setSceneMode(Camera.Parameters.SCENE_MODE_AUTO);
    }

    parameters.setColorEffect(Camera.Parameters.EFFECT_NONE);

    int orientation = cameraInfo.orientation;
    parameters.setRotation(orientation);

    camera.setParameters(parameters);
}
 
开发者ID:cuonghuynhvan,项目名称:react-native-camera-android-simple,代码行数:40,代码来源:CameraManager.java

示例14: setPreviewFormat

import android.graphics.ImageFormat; //导入依赖的package包/类
public static void setPreviewFormat(Camera camera, Camera.Parameters parameters) throws CameraNotSupportException{
    //设置预览回调的图片格式
    try {
        parameters.setPreviewFormat(ImageFormat.NV21);
        camera.setParameters(parameters);
    } catch (Exception e) {
        throw new CameraNotSupportException();
    }
}
 
开发者ID:wuyisheng,项目名称:libRtmp,代码行数:10,代码来源:AndroidUntil.java

示例15: initCamera

import android.graphics.ImageFormat; //导入依赖的package包/类
private void initCamera() {
    if (this.mCamera != null) {
        this.mCameraParamters = this.mCamera.getParameters();
        this.mCameraParamters.setPreviewFormat(ImageFormat.NV21);
        this.mCameraParamters.setFlashMode("off");
        this.mCameraParamters.setWhiteBalance(Camera.Parameters.WHITE_BALANCE_AUTO);
        this.mCameraParamters.setSceneMode(Camera.Parameters.SCENE_MODE_AUTO);
        Point p = MyApplication.getBestCameraResolution(this.mCameraParamters, MyApplication.getScreenMetrics());
        IMAGE_WIDTH = p.x;
        IMAGE_HEIGHT = p.y;
        this.mCameraParamters.setPreviewSize(IMAGE_WIDTH, IMAGE_HEIGHT);
        mCameraPreviewCallback = new CameraPreviewCallback();
        byte[] a = new byte[IMAGE_WIDTH * IMAGE_HEIGHT * 3 / 2];
        byte[] b = new byte[IMAGE_WIDTH * IMAGE_HEIGHT * 3 / 2];
        byte[] c = new byte[IMAGE_WIDTH * IMAGE_HEIGHT * 3 / 2];
        mCamera.addCallbackBuffer(a);
        mCamera.addCallbackBuffer(b);
        mCamera.addCallbackBuffer(c);
        mCamera.setPreviewCallbackWithBuffer(mCameraPreviewCallback);
        List<String> focusModes = this.mCameraParamters.getSupportedFocusModes();
        if (focusModes.contains("continuous-video")) {
            this.mCameraParamters
                    .setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO);
        }
        this.mCamera.setParameters(this.mCameraParamters);
        this.mCamera.startPreview();

        this.mIsPreviewing = true;
    }
}
 
开发者ID:Yee-chen,项目名称:seeta4Android,代码行数:31,代码来源:CameraWrapper.java


注:本文中的android.graphics.ImageFormat类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。