本文整理汇总了Java中org.opencv.features2d.DescriptorExtractor类的典型用法代码示例。如果您正苦于以下问题:Java DescriptorExtractor类的具体用法?Java DescriptorExtractor怎么用?Java DescriptorExtractor使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
DescriptorExtractor类属于org.opencv.features2d包,在下文中一共展示了DescriptorExtractor类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: KMeansMatcher
import org.opencv.features2d.DescriptorExtractor; //导入依赖的package包/类
public KMeansMatcher()
{
model = null;
featureDetector = FeatureDetector.create(FeatureDetector.PYRAMID_ORB);
descriptorExtractor = DescriptorExtractor.create(DescriptorExtractor.BRIEF);
matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_SL2);
}
示例2: ExtractPath
import org.opencv.features2d.DescriptorExtractor; //导入依赖的package包/类
public ExtractPath() {
super();
mKeyPointsPrev = new MatOfKeyPoint();
// set up feature detection
try {
mFeatureDectector = FeatureDetector.create(FeatureDetector.FAST);
} catch (UnsatisfiedLinkError err) {
Log.e(TAG, "Feature detector failed with");
err.printStackTrace();
}
// set up description detection
mDescExtractor = DescriptorExtractor.create(DescriptorExtractor.BRISK);
mDescMatcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
mPrevFrame = new Mat();
prevKeyPoints = new MatOfKeyPoint();
RGBFrame = new Mat();
mForeGroundMask = new Mat();
mContours = new ArrayList<MatOfPoint>();
//creates a new BackgroundSubtractorMOG class with the arguments
mBackgroundSub = Video.createBackgroundSubtractorMOG2(50, 0, true);
}
示例3: getDescriptorExtractorType
import org.opencv.features2d.DescriptorExtractor; //导入依赖的package包/类
public int getDescriptorExtractorType(String extractorName){
if(extractorName.equals("SURF")){
return DescriptorExtractor.SURF;
}else if(extractorName.equals("SIFT")){
return DescriptorExtractor.SIFT;
}else if(extractorName.equals("ORB")){
return DescriptorExtractor.ORB;
}else if(extractorName.equals("BRIEF")){
return DescriptorExtractor.BRIEF;
}else if(extractorName.equals("BRISK")){
return DescriptorExtractor.BRISK;
}else if(extractorName.equals("FREAK")){
return DescriptorExtractor.FREAK;
}else if(extractorName.equals("OpponentSIFT")){
return DescriptorExtractor.OPPONENT_SIFT;
}else if(extractorName.equals("OpponentSURF")){
return DescriptorExtractor.OPPONENT_SURF;
}else if(extractorName.equals("OpponentORB")){
return DescriptorExtractor.OPPONENT_ORB;
}else if(extractorName.equals("OpponentBRIEF")){
return DescriptorExtractor.OPPONENT_BRIEF;
}else if(extractorName.equals("OpponentBRISK")){
return DescriptorExtractor.OPPONENT_BRISK;
}else if(extractorName.equals("OpponentFREAK")){
return DescriptorExtractor.OPPONENT_FREAK;
}else {
return 0;
}
}
示例4: calculateDescriptors
import org.opencv.features2d.DescriptorExtractor; //导入依赖的package包/类
/**
* Calculates descriptors as defined by detectorType and
* descriptorType provided at construction for the provided image
* @param input
* @return
* @throws IOException
*/
private Mat calculateDescriptors(byte[] buffer) throws IOException{
MatOfByte mob = new MatOfByte(buffer);
Mat image = Highgui.imdecode(mob, Highgui.CV_LOAD_IMAGE_ANYCOLOR);
FeatureDetector siftDetector = FeatureDetector.create(detectorType);
MatOfKeyPoint mokp = new MatOfKeyPoint();
siftDetector.detect(image, mokp);
Mat descriptors = new Mat();
DescriptorExtractor extractor = DescriptorExtractor.create(descriptorType);
extractor.compute(image, mokp, descriptors);
return descriptors;
}
示例5: onManagerConnected
import org.opencv.features2d.DescriptorExtractor; //导入依赖的package包/类
@Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS:
try {
target = Utils.loadResource(MyActivity.this, R.raw.f1, Highgui.CV_LOAD_IMAGE_COLOR);
detector = FeatureDetector.create(FeatureDetector.ORB);
extractor = DescriptorExtractor.create(DescriptorExtractor.ORB);
MatOfKeyPoint keyPoint = new MatOfKeyPoint();
Mat descriptors = new Mat();
long time = System.currentTimeMillis();
detector.detect(target, keyPoint);
extractor.compute(target, keyPoint, descriptors);
Log.d("opencv", "计算关键点耗时(毫秒): " + (System.currentTimeMillis() - time) +
", 关键点总数: " + keyPoint.toArray().length);
for (KeyPoint k : keyPoint.toArray()) {
Core.circle(target, k.pt, 5, new Scalar(255, 0, 0));
}
Mat tmp = new Mat(target.cols(), target.rows(), CvType.CV_8U, new Scalar(4));
Bitmap image = Bitmap.createBitmap(target.cols(), target.rows(), Bitmap.Config.ARGB_8888);
Imgproc.cvtColor(target, tmp, Imgproc.COLOR_RGB2BGRA, 4);
Utils.matToBitmap(tmp, image);
myImageView.setImageBitmap(image);
} catch (Exception e) {
throw new RuntimeException(e);
}
break;
default:
super.onManagerConnected(status);
}
}
示例6: detectMinutiae
import org.opencv.features2d.DescriptorExtractor; //导入依赖的package包/类
private Mat detectMinutiae(Mat skeleton, int border) {
HashSet<Minutiae> minutiaeSet = new HashSet<>();
System.out.println("Detecting minutiae");
for(int c = border; c<skeleton.cols()-border; c++){
for(int r = border; r<skeleton.rows()-border; r++) {
double point = skeleton.get(r, c)[0];
if (point != 0) { // Not black
int cn = neighbourCount(skeleton, r, c);
if(cn == 1)
minutiaeSet.add(new Minutiae(c, r, Minutiae.Type.RIDGEENDING));
else if(cn == 3)
minutiaeSet.add(new Minutiae(c, r, Minutiae.Type.BIFURCATION));
}
}
}
System.out.println("filtering minutiae");
HashSet<Minutiae> filteredMinutiae = filterMinutiae(minutiaeSet, skeleton);
System.out.println("number of minutiae: " + filteredMinutiae.size());
Mat result = new Mat();
System.out.println("Drawing minutiae");
Imgproc.cvtColor(skeleton, result, Imgproc.COLOR_GRAY2RGB);
double[] red = {255, 0, 0};
double[] green = {0, 255, 0};
for (Minutiae m : filteredMinutiae) {
double [] color;
if (m.type == Minutiae.Type.BIFURCATION) color = green;
else color = red;
result.put(m.y, m.x , color);
result.put(m.y, m.x-1, color);
result.put(m.y, m.x+1, color);
result.put(m.y-1, m.x , color);
result.put(m.y+1, m.x , color);
}
MatOfKeyPoint keypoints = new MatOfKeyPoint();
keypoints.fromArray(minutiaeToKeyPoints(skeleton, filteredMinutiae));
keypointsField = keypoints;
DescriptorExtractor extractor = DescriptorExtractor.create(DescriptorExtractor.ORB);
Mat descriptors = new Mat();
extractor.compute(skeleton, keypoints, descriptors);
descriptorsField = descriptors;
return result;
}
示例7: detectFeatures
import org.opencv.features2d.DescriptorExtractor; //导入依赖的package包/类
@NonNull
private Mat detectFeatures(Mat skeleton, Mat edges) {
FeatureDetector star = FeatureDetector.create(FeatureDetector.ORB);
DescriptorExtractor brief = DescriptorExtractor.create(DescriptorExtractor.ORB);
MatOfKeyPoint keypoints = new MatOfKeyPoint();
star.detect(skeleton, keypoints);
keypointsField = keypoints;
KeyPoint[] keypointArray = keypoints.toArray();
ArrayList<KeyPoint> filteredKeypointArray = new ArrayList<>(keypointArray.length);
int filterCount = 0;
for (KeyPoint k : keypointArray) {
if (edges.get((int)k.pt.y, (int)k.pt.x)[0] <= 0.0) {
k.size /= 8;
filteredKeypointArray.add(k);
} else {
filterCount++;
}
}
Log.d(TAG, String.format("Filtered %s Keypoints", filterCount));
keypoints.fromList(filteredKeypointArray);
Mat descriptors = new Mat();
brief.compute(skeleton, keypoints, descriptors);
descriptorsField = descriptors;
Mat results = new Mat();
Scalar color = new Scalar(255, 0, 0); // RGB
Features2d.drawKeypoints(skeleton, keypoints, results, color, Features2d.DRAW_RICH_KEYPOINTS);
return results;
}
示例8: ObjectDetection
import org.opencv.features2d.DescriptorExtractor; //导入依赖的package包/类
/**
* Instantiate an object detector based on the FAST, BRIEF, and BRUTEFORCE_HAMMING algorithms
*/
public ObjectDetection() {
detector = FeatureDetector.create(FeatureDetectorType.FAST.val());
extractor = DescriptorExtractor.create(DescriptorExtractorType.BRIEF.val());
matcher = DescriptorMatcher.create(DescriptorMatcherType.BRUTEFORCE_HAMMING.val());
}
示例9: initializeOpenCVDependencies
import org.opencv.features2d.DescriptorExtractor; //导入依赖的package包/类
private void initializeOpenCVDependencies() throws IOException {
mOpenCvCameraView.enableView();
detector = FeatureDetector.create(FeatureDetector.ORB);
descriptor = DescriptorExtractor.create(DescriptorExtractor.ORB);
matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
img1 = new Mat();
AssetManager assetManager = getAssets();
InputStream istr = assetManager.open("a.jpeg");
Bitmap bitmap = BitmapFactory.decodeStream(istr);
Utils.bitmapToMat(bitmap, img1);
Imgproc.cvtColor(img1, img1, Imgproc.COLOR_RGB2GRAY);
img1.convertTo(img1, 0); //converting the image to match with the type of the cameras image
descriptors1 = new Mat();
keypoints1 = new MatOfKeyPoint();
detector.detect(img1, keypoints1);
descriptor.compute(img1, keypoints1, descriptors1);
}
示例10: surfaceCreated
import org.opencv.features2d.DescriptorExtractor; //导入依赖的package包/类
@Override
public void surfaceCreated(SurfaceHolder holder) {
matches = new MatOfDMatch();
orbDetector = FeatureDetector.create(FeatureDetector.ORB);
orbDescriptor = DescriptorExtractor.create(DescriptorExtractor.ORB);
kp2 = new MatOfKeyPoint();
desc2 = new Mat();
}
示例11: ImageDetector
import org.opencv.features2d.DescriptorExtractor; //导入依赖的package包/类
public ImageDetector() {
this(FeatureDetector.ORB, DescriptorExtractor.ORB, DescriptorMatcher.BRUTEFORCE_HAMMINGLUT);
}
示例12: compute
import org.opencv.features2d.DescriptorExtractor; //导入依赖的package包/类
private static Mat compute(Mat img, MatOfKeyPoint points, DescriptorExtractor extractor) {
// Compute descriptor
Mat descriptor = new Mat();
extractor.compute(img, points, descriptor);
return descriptor;
}
示例13: testMatSerialization
import org.opencv.features2d.DescriptorExtractor; //导入依赖的package包/类
private void testMatSerialization(){
File storage = Environment.getExternalStorageDirectory();
String path = storage.getAbsolutePath()+"/opencv/file.bin";
FeatureDetector detector = FeatureDetector.create(FeatureDetector.GRID_ORB);
DescriptorExtractor extractor = DescriptorExtractor.create(DescriptorExtractor.ORB);
MatOfKeyPoint kpts = new MatOfKeyPoint();
detector.detect(mRgba, kpts);
Mat descriptors = new Mat();
extractor.compute(mGray, kpts, descriptors);
Log.d(TAG, "test - descriptors "+descriptors);
UtilsOpenCV.matToJson(descriptors);
//UtilsOpenCV.matStore(path, descriptors);
// UtilsOpenCV.matRetrieve(path, rows, cols, type);
}
示例14: main
import org.opencv.features2d.DescriptorExtractor; //导入依赖的package包/类
public static void main(String[] args){
// first some global (topology configuration)
StormCVConfig conf = new StormCVConfig();
/**
* Sets the OpenCV library to be used which depends on the system the topology is being executed on
*/
conf.put(StormCVConfig.STORMCV_OPENCV_LIB, "mac64_opencv_java248.dylib");
conf.setNumWorkers(8); // number of workers in the topology
conf.setMaxSpoutPending(32); // maximum un-acked/un-failed frames per spout (spout blocks if this number is reached)
conf.put(StormCVConfig.STORMCV_FRAME_ENCODING, Frame.JPG_IMAGE); // indicates frames will be encoded as JPG throughout the topology (JPG is the default when not explicitly set)
conf.put(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS, true); // True if Storm should timeout messages or not.
conf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS , 10); // The maximum amount of time given to the topology to fully process a message emitted by a spout (default = 30)
conf.put(StormCVConfig.STORMCV_SPOUT_FAULTTOLERANT, false); // indicates if the spout must be fault tolerant; i.e. spouts do NOT! replay tuples on fail
conf.put(StormCVConfig.STORMCV_CACHES_TIMEOUT_SEC, 30); // TTL (seconds) for all elements in all caches throughout the topology (avoids memory overload)
String userDir = System.getProperty("user.dir").replaceAll("\\\\", "/");
// create a list with files to be processed, in this case just one. Multiple files will be spread over the available spouts
List<String> files = new ArrayList<String>();
files.add( "file://"+ userDir + "/resources/data/" );
int frameSkip = 13;
// now create the topology itself (spout -> scale -> {face detection, sift} -> drawer -> streamer)
TopologyBuilder builder = new TopologyBuilder();
// just one spout reading video files, extracting 1 frame out of 25 (i.e. 1 per second)
builder.setSpout("spout", new CVParticleSpout( new FileFrameFetcher(files).frameSkip(frameSkip) ), 1 );
// add bolt that scales frames down to 25% of the original size
builder.setBolt("scale", new SingleInputBolt( new ScaleImageOp(0.25f)), 1)
.shuffleGrouping("spout");
// one bolt with a HaarCascade classifier detecting faces. This operation outputs a Frame including the Features with detected faces.
// the xml file must be present on the classpath!
builder.setBolt("face", new SingleInputBolt( new HaarCascadeOp("face", "lbpcascade_frontalface.xml").outputFrame(true)), 1)
.shuffleGrouping("scale");
// add a bolt that performs SIFT keypoint extraction
builder.setBolt("sift", new SingleInputBolt( new FeatureExtractionOp("sift", FeatureDetector.SIFT, DescriptorExtractor.SIFT).outputFrame(false)), 2)
.shuffleGrouping("scale");
// Batch bolt that waits for input from both the face and sift detection bolts and combines them in a single frame object
builder.setBolt("combiner", new BatchInputBolt(new SequenceNrBatcher(2), new FeatureCombinerOp()), 1)
.fieldsGrouping("sift", new Fields(FrameSerializer.STREAMID))
.fieldsGrouping("face", new Fields(FrameSerializer.STREAMID));
// simple bolt that draws Features (i.e. locations of features) into the frame
builder.setBolt("drawer", new SingleInputBolt(new DrawFeaturesOp()), 1)
.shuffleGrouping("combiner");
// add bolt that creates a webservice on port 8558 enabling users to view the result
builder.setBolt("streamer", new BatchInputBolt(
new SlidingWindowBatcher(2, frameSkip).maxSize(6), // note the required batcher used as a buffer and maintains the order of the frames
new MjpegStreamingOp().port(8558).framerate(5)).groupBy(new Fields(FrameSerializer.STREAMID))
, 1)
.shuffleGrouping("drawer");
// NOTE: if the topology is started (locally) go to http://localhost:8558/streaming/tiles and click the image to see the stream!
try {
// run in local mode
LocalCluster cluster = new LocalCluster();
cluster.submitTopology( "multifeature", conf, builder.createTopology() );
Utils.sleep(120*1000); // run two minutes and then kill the topology
cluster.shutdown();
System.exit(1);
// run on a storm cluster
// StormSubmitter.submitTopology("some_topology_name", conf, builder.createTopology());
} catch (Exception e){
e.printStackTrace();
}
}
示例15: main
import org.opencv.features2d.DescriptorExtractor; //导入依赖的package包/类
public static void main(String[] args){
// first some global (topology configuration)
StormCVConfig conf = new StormCVConfig();
conf.put(StormCVConfig.STORMCV_OPENCV_LIB, "mac64_opencv_java248.dylib");
conf.setNumWorkers(5); // number of workers in the topology
conf.put(StormCVConfig.STORMCV_FRAME_ENCODING, Frame.JPG_IMAGE); // indicates frames will be encoded as JPG throughout the topology (JPG is the default when not explicitly set)
conf.put(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS, true); // True if Storm should timeout messages or not.
conf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS , 10); // The maximum amount of time given to the topology to fully process a message emitted by a spout (default = 30)
conf.put(StormCVConfig.STORMCV_SPOUT_FAULTTOLERANT, false); // indicates if the spout must be fault tolerant; i.e. spouts do NOT! replay tuples on fail
conf.put(StormCVConfig.STORMCV_CACHES_TIMEOUT_SEC, 30); // TTL (seconds) for all elements in all caches throughout the topology (avoids memory overload)
conf.put(Config.NIMBUS_TASK_LAUNCH_SECS, 30);
String userDir = System.getProperty("user.dir").replaceAll("\\\\", "/");
List<String> prototypes = new ArrayList<String>();
prototypes.add( "file://"+ userDir +"/resources/data" );
// create a linear DRPC builder called 'match'
LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("match");
//add a FeatureMatchRequestOp that receives drpc requests
builder.addBolt(new RequestBolt(new FeatureMatchRequestOp()), 1);
// add two bolts that perform sift extraction (as used in other examples!)
builder.addBolt(new SingleInputBolt(
new FeatureExtractionOp("sift", FeatureDetector.SIFT, DescriptorExtractor.SIFT).outputFrame(false)
), 1).shuffleGrouping();
// add bolt that matches queries it gets with the prototypes it has loaded upon the prepare.
// The prototypes are divided over the available tasks which means that each query has to be send to all tasks (use allGrouping)
// the matcher only reports a match if at least 1 strong match has been found (can be set to 0)
builder.addBolt(new SingleInputBolt(new PartialMatcher(prototypes, 0, 0.5f)), 2).allGrouping();
// add a bolt that aggregates all the results it gets from the two matchers
builder.addBolt(new BatchBolt(new FeatureMatchResultOp(true)), 1).fieldsGrouping(new Fields(CVParticleSerializer.REQUESTID));
// create local drpc server and cluster. Deploy the drpc topology on the cluster
LocalDRPC drpc = new LocalDRPC();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("drpc-demo", conf, builder.createLocalTopology(drpc));
// use all face images as queries (same images as loaded by the matcher!)
File queryDir = new File(userDir +"/resources/data/");
for(String img : queryDir.list()){
if(!img.endsWith(".jpg")) continue; // to avoid reading non-image files
// execute the drpc with the image as argument. Note that the execute blocks
String matchesJson = drpc.execute("match", "file://"+userDir +"/resources/data/"+img);
System.out.println(img+" : " + matchesJson);
}
cluster.shutdown();
drpc.shutdown();
}