本文整理汇总了Java中org.opencv.highgui.Highgui.imdecode方法的典型用法代码示例。如果您正苦于以下问题:Java Highgui.imdecode方法的具体用法?Java Highgui.imdecode怎么用?Java Highgui.imdecode使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.opencv.highgui.Highgui
的用法示例。
在下文中一共展示了Highgui.imdecode方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: loadResource
import org.opencv.highgui.Highgui; //导入方法依赖的package包/类
public static Mat loadResource(Context context, int resourceId, int flags) throws IOException
{
InputStream is = context.getResources().openRawResource(resourceId);
ByteArrayOutputStream os = new ByteArrayOutputStream(is.available());
byte[] buffer = new byte[4096];
int bytesRead;
while ((bytesRead = is.read(buffer)) != -1) {
os.write(buffer, 0, bytesRead);
}
is.close();
Mat encoded = new Mat(1, os.size(), CvType.CV_8U);
encoded.put(0, 0, os.toByteArray());
os.close();
Mat decoded = Highgui.imdecode(encoded, flags);
encoded.release();
return decoded;
}
示例2: bufferedImageToMat
import org.opencv.highgui.Highgui; //导入方法依赖的package包/类
protected static Mat bufferedImageToMat(BufferedImage buf) throws IOException {
// Get bytes
byte[] bytes = bufferedImageToBytes(buf);
// Map to Bytes
Byte[] bigByteArray = new Byte[bytes.length];
for (int i=0; i < bytes.length; i++)
bigByteArray[i] = new Byte(bytes[i]);
// Convert bytes to matrix
List<Byte> matlist = Arrays.asList(bigByteArray);
Mat img = new Mat();
img = Converters.vector_char_to_Mat(matlist);
img = Highgui.imdecode(img, Highgui.CV_LOAD_IMAGE_COLOR);
return img;
}
示例3: byteswritableToMat
import org.opencv.highgui.Highgui; //导入方法依赖的package包/类
public static Mat byteswritableToMat(BytesWritable inputBW) {
// Compute input bytes
byte[] imageFileBytes = inputBW.getBytes();
Byte[] bigByteArray = new Byte[imageFileBytes.length];
for (int i=0; i<imageFileBytes.length; i++) {
bigByteArray[i] = new Byte(imageFileBytes[i]);
}
// To list
List<Byte> matlist = Arrays.asList(bigByteArray);
// Convert into image matrix
Mat img = new Mat();
img = Converters.vector_char_to_Mat(matlist);
img = Highgui.imdecode(img, Highgui.CV_LOAD_IMAGE_COLOR);
return img;
}
示例4: calculateDescriptors
import org.opencv.highgui.Highgui; //导入方法依赖的package包/类
/**
* Calculates descriptors as defined by detectorType and
* descriptorType provided at construction for the provided image
* @param input
* @return
* @throws IOException
*/
private Mat calculateDescriptors(byte[] buffer) throws IOException{
MatOfByte mob = new MatOfByte(buffer);
Mat image = Highgui.imdecode(mob, Highgui.CV_LOAD_IMAGE_ANYCOLOR);
FeatureDetector siftDetector = FeatureDetector.create(detectorType);
MatOfKeyPoint mokp = new MatOfKeyPoint();
siftDetector.detect(image, mokp);
Mat descriptors = new Mat();
DescriptorExtractor extractor = DescriptorExtractor.create(descriptorType);
extractor.compute(image, mokp, descriptors);
return descriptors;
}
示例5: execute
import org.opencv.highgui.Highgui; //导入方法依赖的package包/类
@Override
public List<CVParticle> execute(CVParticle input) throws Exception {
ArrayList<CVParticle> result = new ArrayList<CVParticle>();
Frame frame = (Frame)input;
if(frame.getImageType().equals(Frame.NO_IMAGE)) return result;
MatOfByte mob = new MatOfByte(frame.getImageBytes());
Mat image = Highgui.imdecode(mob, Highgui.CV_LOAD_IMAGE_COLOR);
/*
mob = new MatOfByte();
Highgui.imencode(".png", image, mob);
BufferedImage bi = ImageUtils.bytesToImage(mob.toArray());
ImageIO.write(bi, "png", new File("testOutput/"+sf.getStreamId()+"_"+sf.getSequenceNr()+".png"));
*/
MatOfRect haarDetections = new MatOfRect();
haarDetector.detectMultiScale(image, haarDetections, scaleFactor, minNeighbors, flags, new Size(minSize[0], minSize[1]), new Size(maxSize[0], maxSize[1]));
ArrayList<Descriptor> descriptors = new ArrayList<Descriptor>();
for(Rect rect : haarDetections.toArray()){
Rectangle box = new Rectangle(rect.x, rect.y, rect.width, rect.height);
descriptors.add(new Descriptor(input.getStreamId(), input.getSequenceNr(), box, 0, new float[0]));
}
Feature feature = new Feature(input.getStreamId(), input.getSequenceNr(), name, 0, descriptors, null);
if(outputFrame){
frame.getFeatures().add(feature);
result.add(frame);
}else{
result.add(feature);
}
return result;
}
示例6: bytesToMatrix
import org.opencv.highgui.Highgui; //导入方法依赖的package包/类
public static final Mat bytesToMatrix(int height, int width, byte[] bytes) {
Mat raw = new Mat(height, width, CvType.makeType(0, 1));
raw.put(0, 0, bytes);
Mat result = Highgui.imdecode(raw, Highgui.CV_LOAD_IMAGE_COLOR);
return result;
}
示例7: execute
import org.opencv.highgui.Highgui; //导入方法依赖的package包/类
@Override
public List<CVParticle> execute(List<CVParticle> input) throws Exception {
List<CVParticle> result = new ArrayList<CVParticle>();
if(input.size() != 2 || !(input.get(0) instanceof Frame) || !(input.get(1) instanceof Frame))
return result;
Frame frame1 = (Frame)input.get(0);
Frame frame2 = (Frame)input.get(1);
MatOfByte mob1 = new MatOfByte(frame1.getImageBytes());
Mat image1 = Highgui.imdecode(mob1, Highgui.CV_LOAD_IMAGE_ANYCOLOR);
Mat image1Gray = new Mat( image1.size(), CvType.CV_8UC1 );
Imgproc.cvtColor( image1, image1Gray, Imgproc.COLOR_RGB2GRAY );
MatOfByte mob2 = new MatOfByte(frame2.getImageBytes());
Mat image2 = Highgui.imdecode(mob2, Highgui.CV_LOAD_IMAGE_ANYCOLOR);
Mat image2Gray = new Mat( image2.size(), CvType.CV_8UC1 );
Imgproc.cvtColor( image2, image2Gray, Imgproc.COLOR_RGB2GRAY );
Mat opticalFlow = new Mat( image1Gray.size(), CvType.CV_32FC2 );
Video.calcOpticalFlowFarneback( image1Gray, image2Gray, opticalFlow, 0.5, 1, 1, 1, 7, 1.5, 1 );
int cols = opticalFlow.cols();
int rows = opticalFlow.rows();
int maxz = opticalFlow.get(0,0).length;
float[] tmp = new float[maxz];
float[][][] dense = new float[cols][rows][maxz];
for(int y=0; y<opticalFlow.rows(); y++){
for(int x=0; x<opticalFlow.cols(); x++){
opticalFlow.get(y,x, tmp);
dense[x][y][0] = tmp[0];
dense[x][y][1] = tmp[1];
}
}
Feature feature = new Feature(frame1.getStreamId(), frame1.getSequenceNr(), name, frame2.getSequenceNr()-frame1.getSequenceNr(), null, dense);
if(outputFrame){
frame1.getFeatures().add(feature);
result.add(frame1);
}else{
result.add(feature);
}
return result;
}
示例8: execute
import org.opencv.highgui.Highgui; //导入方法依赖的package包/类
@Override
public List<CVParticle> execute(CVParticle particle) throws Exception {
List<CVParticle> result = new ArrayList<CVParticle>();
if(!(particle instanceof Frame)) return result;
Frame frame = (Frame)particle;
if(frame.getImageType().equals(Frame.NO_IMAGE)) return result;
try{
MatOfByte mob = new MatOfByte(frame.getImageBytes());
Mat image = Highgui.imdecode(mob, Highgui.CV_LOAD_IMAGE_ANYCOLOR);
FeatureDetector siftDetector = FeatureDetector.create(detectorType);
MatOfKeyPoint mokp = new MatOfKeyPoint();
siftDetector.detect(image, mokp);
List<KeyPoint> keypoints = mokp.toList();
Mat descriptors = new Mat();
DescriptorExtractor extractor = DescriptorExtractor.create(descriptorType);
extractor.compute(image, mokp, descriptors);
List<Descriptor> descrList = new ArrayList<Descriptor>();
float[] tmp = new float[1];
for(int r=0; r<descriptors.rows(); r++){
float[] values = new float[descriptors.cols()];
for(int c=0; c<descriptors.cols(); c++){
descriptors.get(r, c, tmp);
values[c] = tmp[0];
}
descrList.add(new Descriptor(frame.getStreamId(), frame.getSequenceNr(), new Rectangle((int)keypoints.get(r).pt.x, (int)keypoints.get(r).pt.y, 0, 0), 0, values));
}
Feature feature = new Feature(frame.getStreamId(), frame.getSequenceNr(), featureName, 0, descrList, null);
if(outputFrame){
frame.getFeatures().add(feature);
result.add(frame);
}else{
result.add(feature);
}
}catch(Exception e){
// catching exception at this point will prevent the sent of a fail!
logger.warn("Unable to extract features for frame!", e);
}
return result;
}
示例9: execute
import org.opencv.highgui.Highgui; //导入方法依赖的package包/类
@Override
public List<Feature> execute(CVParticle input) throws Exception
{
Frame sf = (Frame)input;
MatOfByte mob = new MatOfByte(sf.getImageBytes());
Mat image = Highgui.imdecode(mob, Highgui.CV_LOAD_IMAGE_COLOR);
Mat hist = new Mat();
MatOfInt chans;
MatOfInt histsize;
MatOfFloat ranges;
List<Mat> images = new ArrayList<Mat>();
ArrayList<Feature> result = new ArrayList<Feature>();
ArrayList<Descriptor> hist_descriptors = new ArrayList<Descriptor>();
Rectangle box = new Rectangle(0, 0, (int) image.size().width, (int) image.size().height); // size of image get boundingbox from sf
images.add(image);
for (int i = 0; i < chansj.length; i++){
chans = new MatOfInt(chansj[i]);
histsize = new MatOfInt(histsizej[i]);
ranges = new MatOfFloat(rangesj[i*2],rangesj[i*2+1]);
Imgproc.calcHist(images, chans, new Mat(), hist, histsize, ranges);
float[] tmp = new float[1];
int rows = (int) hist.size().height;
float[] values = new float[rows];
int c = 0;
for (int r = 0; r < rows; r++) // loop over rows/columns
{
hist.get(r, c, tmp);
values[r] = tmp[0];
}
hist_descriptors.add(new Descriptor(input.getStreamId(), input.getSequenceNr(), box, 0, values));
}
// add features to result
if ( hist_descriptors.size() > 0 )
result.add( new Feature( input.getStreamId(), input.getSequenceNr(), name, 0, hist_descriptors, null ) );
return result;
}
示例10: Image2Mat
import org.opencv.highgui.Highgui; //导入方法依赖的package包/类
/**
* Creates a Mat object for the image in the provided frame
* @param image the image to be converted to mat
* @param imageType the encoding to use, see {@link Frame}
* @return Mat object representing the image of type Highgui.CV_LOAD_IMAGE_COLOR
* @throws IOException if the image cannot be read or converted into binary format
*/
public static Mat Image2Mat(BufferedImage image, String imageType) throws IOException{
MatOfByte mob = new MatOfByte( ImageUtils.imageToBytes(image, imageType) );
return Highgui.imdecode(mob, Highgui.CV_LOAD_IMAGE_COLOR);
}
示例11: bytes2Mat
import org.opencv.highgui.Highgui; //导入方法依赖的package包/类
/**
* creates a Mat object directly from a set of bytes
* @param bytes binary representation of an image
* @return Mat object of type Highgui.CV_LOAD_IMAGE_COLOR
*/
public static Mat bytes2Mat(byte[] bytes){
MatOfByte mob = new MatOfByte( bytes );
return Highgui.imdecode(mob, Highgui.CV_LOAD_IMAGE_COLOR);
}