本文整理汇总了Java中android.graphics.Bitmap.getPixels方法的典型用法代码示例。如果您正苦于以下问题:Java Bitmap.getPixels方法的具体用法?Java Bitmap.getPixels怎么用?Java Bitmap.getPixels使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类android.graphics.Bitmap
的用法示例。
在下文中一共展示了Bitmap.getPixels方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: process
import android.graphics.Bitmap; //导入方法依赖的package包/类
@Override
public void process(Bitmap bitmap) {
final int w = bitmap.getWidth();
final int h = bitmap.getHeight();
final int[] pixels = new int[w * h];
/*
* Using {@link Bitmap#getPixels} reduces the number of Java-JNI calls and passes all the image
* pixels in one call. This allows us to edit all the data in the Java world and then hand back
* the final result later.
*/
bitmap.getPixels(pixels, 0, w, 0, 0, w, h);
for (int x = 0; x < w; x++) {
for (int y = 0; y < h; y++) {
final int offset = y * w + x;
pixels[offset] = SlowGreyScalePostprocessor.getGreyColor(pixels[offset]);
}
}
bitmap.setPixels(pixels, 0, w, 0, 0, w, h);
}
示例2: getYUV420sp
import android.graphics.Bitmap; //导入方法依赖的package包/类
/**
* YUV420sp
*
* @param inputWidth
* @param inputHeight
* @param scaled
* @return
*/
public static byte[] getYUV420sp(int inputWidth, int inputHeight, Bitmap scaled) {
int[] argb = new int[inputWidth * inputHeight];
scaled.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
/**
* 需要转换成偶数的像素点,否则编码YUV420的时候有可能导致分配的空间大小不够而溢出。
*/
int requiredWidth = inputWidth % 2 == 0 ? inputWidth : inputWidth + 1;
int requiredHeight = inputHeight % 2 == 0 ? inputHeight : inputHeight + 1;
int byteLength = requiredWidth * requiredHeight * 3 / 2;
if (yuvs == null || yuvs.length < byteLength) {
yuvs = new byte[byteLength];
} else {
Arrays.fill(yuvs, (byte) 0);
}
encodeYUV420SP(yuvs, argb, inputWidth, inputHeight);
scaled.recycle();
return yuvs;
}
示例3: isBlackBitmap
import android.graphics.Bitmap; //导入方法依赖的package包/类
public static boolean isBlackBitmap(Bitmap bitmap) throws Throwable {
if (bitmap == null || bitmap.isRecycled()) {
return true;
}
boolean z;
int[] iArr = new int[(bitmap.getWidth() * bitmap.getHeight())];
bitmap.getPixels(iArr, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
for (int i : iArr) {
if ((i & ViewCompat.MEASURED_SIZE_MASK) != 0) {
z = true;
break;
}
}
z = false;
return !z;
}
示例4: convertGreyImg
import android.graphics.Bitmap; //导入方法依赖的package包/类
/**
* 将彩色图转换为灰度图
*
* @param img 源Bitmap
* @return 返回转换好的位图
*/
public static Bitmap convertGreyImg(Bitmap img) {
int width = img.getWidth(); // 获取位图的宽
int height = img.getHeight(); // 获取位图的高
int[] pixels = new int[width * height]; // 通过位图的大小创建像素点数组
img.getPixels(pixels, 0, width, 0, 0, width, height);
int alpha = 0xFF << 24;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
int grey = pixels[width * i + j];
int red = ((grey & 0x00FF0000) >> 16);
int green = ((grey & 0x0000FF00) >> 8);
int blue = (grey & 0x000000FF);
grey = (int) ((float) red * 0.3 + (float) green * 0.59 + (float) blue * 0.11);
grey = alpha | (grey << 16) | (grey << 8) | grey;
pixels[width * i + j] = grey;
}
}
Bitmap result = Bitmap.createBitmap(width, height, Config.RGB_565);
result.setPixels(pixels, 0, width, 0, 0, width, height);
return result;
}
示例5: getBGRAImageByte
import android.graphics.Bitmap; //导入方法依赖的package包/类
/**
* Get the pixels by byte[] of Bitmap
* @param image
* @return pixels by byte[]
*/
public static int[] getBGRAImageByte(Bitmap image) {
int width = image.getWidth();
int height = image.getHeight();
if(image.getConfig().equals(Config.ARGB_8888)) {
int[] imgData = new int[width * height];
image.getPixels(imgData, 0, width, 0, 0, width, height);
return imgData;
// byte[] imgPixels = new byte[width * height];
// for (int i = 0; i < imgData.length; ++i) {
// int p = 0;
// //p += ((imgData[i] >> 24) & 0xFF);
// p += ((imgData[i] >> 16) & 0xFF);
// p += ((imgData[i] >> 8) & 0xFF);
// p += ((imgData[i] >> 0) & 0xFF);
// imgPixels[i] = (byte) (p/3);
// }
} else {
// TODO
}
return null;
}
示例6: RGBLuminanceSource
import android.graphics.Bitmap; //导入方法依赖的package包/类
public RGBLuminanceSource(Bitmap bitmap) {
super(null==bitmap?0:bitmap.getWidth(), null==bitmap?0:bitmap.getHeight());
int width = null==bitmap?0:bitmap.getWidth();
int height = null==bitmap?0:bitmap.getHeight();
// super(bitmap.getWidth(), bitmap.getHeight());
// int width = bitmap.getWidth();
// int height = bitmap.getHeight();
int[] pixels = new int[width * height];
bitmap.getPixels(pixels, 0, width, 0, 0, width, height);
// In order to measure pure decoding speed, we convert the entire image
// to a greyscale array
// up front, which is the same as the Y channel of the
// YUVLuminanceSource in the real app.
luminances = new byte[width * height];
for (int y = 0; y < height; y++) {
int offset = y * width;
for (int x = 0; x < width; x++) {
int pixel = pixels[offset + x];
int r = (pixel >> 16) & 0xff;
int g = (pixel >> 8) & 0xff;
int b = pixel & 0xff;
if (r == g && g == b) {
// Image is already greyscale, so pick any channel.
luminances[offset + x] = (byte) r;
} else {
// Calculate luminance cheaply, favoring green.
luminances[offset + x] = (byte) ((r + g + g + b) >> 2);
}
}
}
}
示例7: convertYUV21FromRGB
import android.graphics.Bitmap; //导入方法依赖的package包/类
public static byte[] convertYUV21FromRGB(Bitmap bitmap){
bitmap = rotaingImageView(90, bitmap);
int inputWidth = bitmap.getWidth();
int inputHeight = bitmap.getHeight();
int[] argb = new int[inputWidth * inputHeight];
bitmap.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
byte[] yuv = new byte[inputWidth * inputHeight * 3 / 2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
bitmap.recycle();
return yuv;
}
示例8: getAlplaBitmap
import android.graphics.Bitmap; //导入方法依赖的package包/类
public static Bitmap getAlplaBitmap(Bitmap sourceImg, int alpha) {
int[] argb = new int[sourceImg.getWidth() * sourceImg.getHeight()];
sourceImg.getPixels(argb, 0, sourceImg.getWidth(), 0, 0, sourceImg.getWidth(), sourceImg.getHeight());
alpha = alpha * 255 / 100;
for (int i = 0; i < argb.length; i++) {
argb[i] = (alpha << 24) | (argb[i] & 0x00FFFFFF);
}
sourceImg = Bitmap.createBitmap(argb, sourceImg.getWidth(), sourceImg.getHeight(), Bitmap.Config.ARGB_8888);
return sourceImg;
}
示例9: inputFromImage
import android.graphics.Bitmap; //导入方法依赖的package包/类
static public float[] inputFromImage(Bitmap[] bmps, float meanR, float meanG, float meanB) {
if (bmps.length == 0) return null;
int width = bmps[0].getWidth();
int height = bmps[0].getHeight();
float[] buf = new float[height * width * 3 * bmps.length];
for (int x=0; x<bmps.length; x++) {
Bitmap bmp = bmps[x];
if (bmp.getWidth() != width || bmp.getHeight() != height)
return null;
int[] pixels = new int[ height * width ];
bmp.getPixels(pixels, 0, width, 0, 0, height, width);
int start = width * height * 3 * x;
for (int i=0; i<height; i++) {
for (int j=0; j<width; j++) {
int pos = i * width + j;
int pixel = pixels[pos];
buf[start + pos] = Color.red(pixel) - meanR;
buf[start + width * height + pos] = Color.green(pixel) - meanG;
buf[start + width * height * 2 + pos] = Color.blue(pixel) - meanB;
}
}
}
return buf;
}
示例10: getImageData
import android.graphics.Bitmap; //导入方法依赖的package包/类
/**
* Convert the image into an array from a bitmap
* @param img - image to convert
* @return - Returns the array representation of the bitmap
*/
private int[] getImageData(Bitmap img) {
int w = img.getWidth();
int h = img.getHeight();
int[] data = new int[w * h];
img.getPixels(data, 0, w, 0, 0, w, h);
return data;
}
示例11: Preprocess
import android.graphics.Bitmap; //导入方法依赖的package包/类
float[] Preprocess(Bitmap imBitmap){
imBitmap = Bitmap.createScaledBitmap(imBitmap, IMAGE_SIZE, IMAGE_SIZE, true);
int[] intValues = new int[IMAGE_SIZE * IMAGE_SIZE];
float[] floatValues = new float[IMAGE_SIZE * IMAGE_SIZE * 3];
imBitmap.getPixels(intValues, 0, IMAGE_SIZE, 0, 0, IMAGE_SIZE, IMAGE_SIZE);
for (int i = 0; i < intValues.length; ++i) {
final int val = intValues[i];
floatValues[i * 3] = ((float)((val >> 16) & 0xFF))/255;//R
floatValues[i * 3 + 1] = ((float)((val >> 8) & 0xFF))/255;//G
floatValues[i * 3 + 2] = ((float)((val & 0xFF)))/255;//B
}
return floatValues;
}
示例12: getPixelsFromBitmap
import android.graphics.Bitmap; //导入方法依赖的package包/类
/**
* get pixel from bitmap and convert it in to two-dimensional array
*/
private int[][] getPixelsFromBitmap(Bitmap bitmap) {
int[] imagePixels = new int[bitmap.getWidth() * bitmap.getHeight()];
int[][] pixelsBitmap = new int[bitmap.getWidth()][bitmap.getHeight()];
bitmap.getPixels(imagePixels, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
for (int i = 0; i < bitmap.getWidth(); i++) {
for (int j = 0; j < bitmap.getHeight(); j++) {
pixelsBitmap[i][j] = imagePixels[(j * bitmap.getWidth()) + i];
}
}
return pixelsBitmap;
}
示例13: BitmapLuminanceSource
import android.graphics.Bitmap; //导入方法依赖的package包/类
public BitmapLuminanceSource(Bitmap bitmap) {
super(bitmap.getWidth(), bitmap.getHeight());
// 首先,要取得该图片的像素数组内容
int[] data = new int[bitmap.getWidth() * bitmap.getHeight()];
this.bitmapPixels = new byte[bitmap.getWidth() * bitmap.getHeight()];
bitmap.getPixels(data, 0, getWidth(), 0, 0, getWidth(), getHeight());
// 将int数组转换为byte数组,也就是取像素值中蓝色值部分作为辨析内容
for (int i = 0; i < data.length; i++) {
this.bitmapPixels[i] = (byte) data[i];
}
}
示例14: recognizeImage
import android.graphics.Bitmap; //导入方法依赖的package包/类
@Override
public List<Recognition> recognizeImage(final Bitmap bitmap) {
// Log this method so that it can be analyzed with systrace.
Trace.beginSection("recognizeImage");
Trace.beginSection("preprocessBitmap");
// Preprocess the image data from 0-255 int to normalized float based
// on the provided parameters.
bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
for (int i = 0; i < intValues.length; ++i) {
floatValues[i * 3 + 0] = ((intValues[i] & 0xFF) - imageMean) / imageStd;
floatValues[i * 3 + 1] = (((intValues[i] >> 8) & 0xFF) - imageMean) / imageStd;
floatValues[i * 3 + 2] = (((intValues[i] >> 16) & 0xFF) - imageMean) / imageStd;
}
Trace.endSection(); // preprocessBitmap
// Copy the input data into TensorFlow.
Trace.beginSection("fillNodeFloat");
inferenceInterface.fillNodeFloat(
inputName, new int[] {1, inputSize, inputSize, 3}, floatValues);
Trace.endSection();
// Run the inference call.
Trace.beginSection("runInference");
inferenceInterface.runInference(outputNames);
Trace.endSection();
// Copy the output Tensor back into the output array.
Trace.beginSection("readNodeFloat");
final float[] outputScoresEncoding = new float[numLocations];
final float[] outputLocationsEncoding = new float[numLocations * 4];
inferenceInterface.readNodeFloat(outputNames[0], outputLocationsEncoding);
inferenceInterface.readNodeFloat(outputNames[1], outputScoresEncoding);
Trace.endSection();
outputLocations = decodeLocationsEncoding(outputLocationsEncoding);
outputScores = decodeScoresEncoding(outputScoresEncoding);
// Find the best detections.
final PriorityQueue<Recognition> pq =
new PriorityQueue<Recognition>(
1,
new Comparator<Recognition>() {
@Override
public int compare(final Recognition lhs, final Recognition rhs) {
// Intentionally reversed to put high confidence at the head of the queue.
return Float.compare(rhs.getConfidence(), lhs.getConfidence());
}
});
// Scale them back to the input size.
for (int i = 0; i < outputScores.length; ++i) {
final RectF detection =
new RectF(
outputLocations[4 * i] * inputSize,
outputLocations[4 * i + 1] * inputSize,
outputLocations[4 * i + 2] * inputSize,
outputLocations[4 * i + 3] * inputSize);
pq.add(new Recognition("" + i, null, outputScores[i], detection));
}
final ArrayList<Recognition> recognitions = new ArrayList<Recognition>();
for (int i = 0; i < Math.min(pq.size(), MAX_RESULTS); ++i) {
recognitions.add(pq.poll());
}
Trace.endSection(); // "recognizeImage"
return recognitions;
}
开发者ID:jxtz518,项目名称:Tensorflow_Andriod_With_Audio_Output,代码行数:70,代码来源:TensorFlowMultiBoxDetector.java
示例15: sharpen
import android.graphics.Bitmap; //导入方法依赖的package包/类
/**
* 锐化效果处理
*
* @param bitmap 原图
* @return 锐化效果处理后的图片
*/
public static Bitmap sharpen(Bitmap bitmap) {
// 拉普拉斯矩阵
int[] laplacian = new int[]{-1, -1, -1, -1, 9, -1, -1, -1, -1};
int width = bitmap.getWidth();
int height = bitmap.getHeight();
Bitmap newBitmap = Bitmap.createBitmap(width, height,
Config.RGB_565);
int pixR = 0;
int pixG = 0;
int pixB = 0;
int pixColor = 0;
int newR = 0;
int newG = 0;
int newB = 0;
int idx = 0;
float alpha = 0.3F;
int[] pixels = new int[width * height];
bitmap.getPixels(pixels, 0, width, 0, 0, width, height);
for (int i = 1, length = height - 1; i < length; i++) {
for (int k = 1, len = width - 1; k < len; k++) {
idx = 0;
for (int m = -1; m <= 1; m++) {
for (int n = -1; n <= 1; n++) {
pixColor = pixels[(i + n) * width + k + m];
pixR = Color.red(pixColor);
pixG = Color.green(pixColor);
pixB = Color.blue(pixColor);
newR = newR + (int) (pixR * laplacian[idx] * alpha);
newG = newG + (int) (pixG * laplacian[idx] * alpha);
newB = newB + (int) (pixB * laplacian[idx] * alpha);
idx++;
}
}
newR = Math.min(255, Math.max(0, newR));
newG = Math.min(255, Math.max(0, newG));
newB = Math.min(255, Math.max(0, newB));
pixels[i * width + k] = Color.argb(255, newR, newG, newB);
newR = 0;
newG = 0;
newB = 0;
}
}
newBitmap.setPixels(pixels, 0, width, 0, 0, width, height);
return newBitmap;
}