本文整理汇总了Java中net.semanticmetadata.lire.DocumentBuilder类的典型用法代码示例。如果您正苦于以下问题:Java DocumentBuilder类的具体用法?Java DocumentBuilder怎么用?Java DocumentBuilder使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DocumentBuilder类属于net.semanticmetadata.lire包,在下文中一共展示了DocumentBuilder类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: luceneIndexer
import net.semanticmetadata.lire.DocumentBuilder; //导入依赖的package包/类
/**
* Index for each builder type
* @param image
* @param picture_id
* @param prefix
* @param builder
* @param conf
* @throws IOException
*/
private static void luceneIndexer(BufferedImage image, UUID picture_id, String prefix, DocumentBuilder builder, IndexWriterConfig conf)
throws IOException
{
File path = getPath(prefix);
log.debug("creating indexed path " + path.getAbsolutePath());
IndexWriter iw = new IndexWriter(FSDirectory.open(path), conf);
try {
Document document = builder.createDocument(image, picture_id.toString());
iw.addDocument(document);
} catch (Exception e) {
System.err.println("Error reading image or indexing it.");
e.printStackTrace();
}
// closing the IndexWriter
iw.close();
}
示例2: createDescriptorFields
import net.semanticmetadata.lire.DocumentBuilder; //导入依赖的package包/类
@Override
public Field[] createDescriptorFields(BufferedImage image) {
Field[] result = null;
try {
// extract features from image:
List<Feature> features = extractor.computeSiftFeatures(image);
result = new Field[features.size()];
int count = 0;
// create new document:
for (Iterator<Feature> fit = features.iterator(); fit.hasNext(); ) {
Feature f = fit.next();
result[count] = new StoredField(DocumentBuilder.FIELD_NAME_SIFT, f.getByteArrayRepresentation());
count++;
}
} catch (IOException e) {
logger.severe(e.getMessage());
}
return result;
}
示例3: createDocument
import net.semanticmetadata.lire.DocumentBuilder; //导入依赖的package包/类
public Document createDocument(BufferedImage image, String identifier) {
Document doc = null;
try {
// extract features from image:
List<Feature> features = extractor.computeSiftFeatures(image);
// create new document:
doc = new Document();
for (Iterator<Feature> fit = features.iterator(); fit.hasNext(); ) {
Feature f = fit.next();
// add each feature to the document:
doc.add(new StoredField(DocumentBuilder.FIELD_NAME_SIFT, f.getByteArrayRepresentation()));
}
if (identifier != null)
doc.add(new StringField(DocumentBuilder.FIELD_NAME_IDENTIFIER, identifier, Field.Store.YES));
} catch (IOException e) {
logger.severe(e.getMessage());
}
return doc;
}
示例4: createColorOnlyDocument
import net.semanticmetadata.lire.DocumentBuilder; //导入依赖的package包/类
/**
* Creates a document from a (non existent) one color image. Can be used for
* color search.
*
* @param color the color for the image
* @return the document for searching.
*/
public static Document createColorOnlyDocument(Color color) {
assert (color != null);
// Create a one pixel image
int imgWidth = 64;
BufferedImage img = new BufferedImage(imgWidth, imgWidth, BufferedImage.TYPE_INT_RGB);
Graphics2D g = (Graphics2D) img.getGraphics();
g.setColor(color);
g.fillRect(0, 0, imgWidth, imgWidth);
// Hand it over to ScalableColor to create a descriptor:
ColorLayout scd = new ColorLayout();
scd.extract(img);
// create the string representation
String sc = scd.getStringRepresentation();
// System.out.println("sc = " + sc);
// System.out.println("color = " + color);
logger.fine("Extraction from image finished");
Document doc = new Document();
if (sc != null)
doc.add(new StringField(DocumentBuilder.FIELD_NAME_COLORLAYOUT, sc, Field.Store.YES));
return doc;
}
示例5: createDocument
import net.semanticmetadata.lire.DocumentBuilder; //导入依赖的package包/类
/**
* Creates a fully fledged Document to be added to a Lucene index.
*
* @param image the image to index. Cannot be NULL.
* @param identifier an id for the image, for instance the filename or an URL. Can be NULL.
* @return
*/
public Document createDocument(BufferedImage image, String identifier) {
assert (image != null);
// sangupta: create a new document else code below
// will throw a NPE
Document doc = new Document();
if (identifier != null) {
doc.add(new StringField(DocumentBuilder.FIELD_NAME_IDENTIFIER, identifier, Field.Store.YES));
}
Field[] fields = createDescriptorFields(image);
for (int i = 0; i < fields.length; i++) {
doc.add(fields[i]);
}
return doc;
}
示例6: createDocument
import net.semanticmetadata.lire.DocumentBuilder; //导入依赖的package包/类
public Document createDocument(BufferedImage image, String identifier) throws FileNotFoundException {
docsCreated = true;
Document doc = new Document();
if (identifier != null)
doc.add(new StoredField(DocumentBuilder.FIELD_NAME_IDENTIFIER, identifier));
if (builders.size() >= 1) {
for (DocumentBuilder builder : builders) {
Field[] fields = builder.createDescriptorFields(image);
for (int i = 0; i < fields.length; i++) {
Field field = fields[i];
doc.add(field);
}
}
}
return doc;
}
示例7: indexFiles
import net.semanticmetadata.lire.DocumentBuilder; //导入依赖的package包/类
private void indexFiles(ArrayList<String> images, DocumentBuilder builder, String indexPath) throws IOException {
System.out.println(">> Indexing " + images.size() + " files.");
IndexWriter iw = LuceneUtils.createIndexWriter(indexPath, true);
int count = 0;
long time = System.currentTimeMillis();
for (String identifier : images) {
Document doc = builder.createDocument(new FileInputStream(identifier), identifier);
iw.addDocument(doc);
count++;
if (count % 100 == 0) System.out.println(count + " files indexed.");
// if (count == 200) break;
}
long timeTaken = (System.currentTimeMillis() - time);
float sec = ((float) timeTaken) / 1000f;
System.out.println(sec + " seconds taken, " + (timeTaken / count) + " ms per image.");
iw.close();
}
示例8: testSearchSpeed
import net.semanticmetadata.lire.DocumentBuilder; //导入依赖的package包/类
private void testSearchSpeed(ArrayList<String> images, final Class featureClass) throws IOException {
parallelIndexer = new ParallelIndexer(8, indexPath, testExtensive, true) {
@Override
public void addBuilders(ChainedDocumentBuilder builder) {
builder.addBuilder(new GenericDocumentBuilder(featureClass, "feature"));
}
};
parallelIndexer.run();
IndexReader reader = DirectoryReader.open(new RAMDirectory(FSDirectory.open(new File(indexPath)), IOContext.READONCE));
Bits liveDocs = MultiFields.getLiveDocs(reader);
double queryCount = 0d;
ImageSearcher searcher = new GenericFastImageSearcher(100, featureClass, "feature");
long ms = System.currentTimeMillis();
for (int i = 0; i < reader.maxDoc(); i++) {
if (reader.hasDeletions() && !liveDocs.get(i)) continue; // if it is deleted, just ignore it.
String fileName = getIDfromFileName(reader.document(i).getValues(DocumentBuilder.FIELD_NAME_IDENTIFIER)[0]);
if (queries.keySet().contains(fileName)) {
queryCount += 1d;
// ok, we've got a query here for a document ...
Document queryDoc = reader.document(i);
ImageSearchHits hits = searcher.search(queryDoc, reader);
}
}
ms = System.currentTimeMillis() - ms;
System.out.printf("%s \t %3.1f \n", featureClass.getName().substring(featureClass.getName().lastIndexOf('.')+1), (double) ms / queryCount);
}
示例9: testSearchBenchmark
import net.semanticmetadata.lire.DocumentBuilder; //导入依赖的package包/类
public void testSearchBenchmark() throws IOException {
System.out.printf("%s\t%s\t%s\t%s\t%s\t%s\n", "Test", "[email protected]", "[email protected]", "[email protected]", "[email protected]", "[email protected]");
IndexReader reader = DirectoryReader.open(MMapDirectory.open(new File(indexPath)));
getRecall("ColorLayout (linear)", new GenericFastImageSearcher(50, ColorLayout.class, DocumentBuilder.FIELD_NAME_COLORLAYOUT, true, reader), reader);
getRecall("PHOG (linear)", new GenericFastImageSearcher(50, PHOG.class, DocumentBuilder.FIELD_NAME_PHOG, true, reader), reader);
getRecall("JCD (linear)", new GenericFastImageSearcher(50, JCD.class, DocumentBuilder.FIELD_NAME_JCD, true, reader), reader);
getRecall("EdgeHistogram (linear)", new GenericFastImageSearcher(50, EdgeHistogram.class, DocumentBuilder.FIELD_NAME_EDGEHISTOGRAM, true, reader), reader);
// getRecall("CEDD (linear)", new GenericFastImageSearcher(50, CEDD.class, DocumentBuilder.FIELD_NAME_CEDD, true, reader), reader);
// getRecall("JointHistogram (linear)", new GenericFastImageSearcher(50, JointHistogram.class, DocumentBuilder.FIELD_NAME_JOINT_HISTOGRAM, true, reader), reader);
// getRecall("LocalBinaryPatterns (linear)", new GenericFastImageSearcher(50, LocalBinaryPatterns.class, DocumentBuilder.FIELD_NAME_LOCAL_BINARY_PATTERNS, true, reader), reader);
// getRecall("Luminance Layout (linear)", new GenericFastImageSearcher(50, LuminanceLayout.class, DocumentBuilder.FIELD_NAME_LUMINANCE_LAYOUT, true, reader), reader);
// getRecall("BinaryPatternsPyramid (linear)", new GenericFastImageSearcher(50, BinaryPatternsPyramid.class, DocumentBuilder.FIELD_NAME_BINARY_PATTERNS_PYRAMID, true, reader), reader);
//
getRecall("ColorLayout (hashed)", new BitSamplingImageSearcher(50, DocumentBuilder.FIELD_NAME_COLORLAYOUT, DocumentBuilder.FIELD_NAME_COLORLAYOUT + "_hash", new ColorLayout(), 1000), reader);
getRecall("PHOG (hashed)", new BitSamplingImageSearcher(50, DocumentBuilder.FIELD_NAME_PHOG, DocumentBuilder.FIELD_NAME_PHOG + "_hash", new PHOG(), 1000), reader);
getRecall("JCD (hashed)", new BitSamplingImageSearcher(50, DocumentBuilder.FIELD_NAME_JCD, DocumentBuilder.FIELD_NAME_JCD + "_hash", new JCD(), 1000), reader);
getRecall("EdgeHistogram (hashed)", new BitSamplingImageSearcher(50, DocumentBuilder.FIELD_NAME_EDGEHISTOGRAM, DocumentBuilder.FIELD_NAME_EDGEHISTOGRAM + "_hash", new EdgeHistogram(), 1000), reader);
// getRecall("CEDD (hashed)", new BitSamplingImageSearcher(50, DocumentBuilder.FIELD_NAME_CEDD, DocumentBuilder.FIELD_NAME_CEDD + "_hash", new JCD(), 1000), reader);
// getRecall("JointHistogram (hashed)", new BitSamplingImageSearcher(50, DocumentBuilder.FIELD_NAME_JOINT_HISTOGRAM, DocumentBuilder.FIELD_NAME_JOINT_HISTOGRAM + "_hash", new JointHistogram(), 1000), reader);
// getRecall("LocalBinaryPatterns (hashed)", new BitSamplingImageSearcher(50, DocumentBuilder.FIELD_NAME_LOCAL_BINARY_PATTERNS, DocumentBuilder.FIELD_NAME_LOCAL_BINARY_PATTERNS + "_hash", new LocalBinaryPatterns(), 1000), reader);
// getRecall("Luminance Layout (hashed)", new BitSamplingImageSearcher(50, DocumentBuilder.FIELD_NAME_LUMINANCE_LAYOUT, DocumentBuilder.FIELD_NAME_LUMINANCE_LAYOUT + "_hash", new LuminanceLayout(), 1000), reader);
// getRecall("BinaryPatternsPyramid (hashed)", new BitSamplingImageSearcher(50, DocumentBuilder.FIELD_NAME_BINARY_PATTERNS_PYRAMID, DocumentBuilder.FIELD_NAME_BINARY_PATTERNS_PYRAMID + "_hash", new LocalBinaryPatterns(), 1000), reader);
// getRecall("VLAD (linear)", new GenericFastImageSearcher(1000, GenericByteLireFeature.class, DocumentBuilder.FIELD_NAME_SURF_VLAD, true, reader), reader);
}
示例10: testColorLayoutFastMap
import net.semanticmetadata.lire.DocumentBuilder; //导入依赖的package包/类
public void testColorLayoutFastMap() {
// creating the list of user objects ...
LinkedList<LireFeature> objs = new LinkedList<LireFeature>();
for (Iterator<Document> documentIterator = docs.iterator(); documentIterator.hasNext(); ) {
Document document = documentIterator.next();
String[] cls = document.getValues(DocumentBuilder.FIELD_NAME_COLORLAYOUT);
if (cls.length > 0) {
ColorLayout clt = new ColorLayout();
clt.setStringRepresentation(cls[0]);
objs.add(clt);
}
}
System.out.println("--------------- < COLORLAYOUT > ---------------");
long nano = System.nanoTime();
createFastMapForObjects(objs, null);
nano = System.nanoTime() - nano;
System.out.println("Time taken: ~ " + (nano / (1000 * 1000 * 1000)) + " s");
}
示例11: testScalableColorFastMap
import net.semanticmetadata.lire.DocumentBuilder; //导入依赖的package包/类
public void testScalableColorFastMap() {
// creating the list of user objects ...
LinkedList<LireFeature> objs = new LinkedList<LireFeature>();
for (Iterator<Document> documentIterator = docs.iterator(); documentIterator.hasNext(); ) {
Document document = documentIterator.next();
String[] cls = document.getValues(DocumentBuilder.FIELD_NAME_SCALABLECOLOR);
if (cls.length > 0) {
ScalableColor sct = new ScalableColor();
sct.setStringRepresentation(cls[0]);
objs.add(sct);
}
}
System.out.println("--------------- < ScalableColor > ---------------");
long nano = System.nanoTime();
System.out.println("---------< No cache >----------------");
createFastMapForObjects(objs, null);
System.out.println("---------< Array cache >----------------");
createArrayFastMapForObjects(objs, null);
nano = System.nanoTime() - nano;
System.out.println("Time taken: ~ " + (nano / (1000 * 1000)) + " ms");
}
示例12: testEdgeHistogramFastMap
import net.semanticmetadata.lire.DocumentBuilder; //导入依赖的package包/类
public void testEdgeHistogramFastMap() {
// creating the list of user objects ...
LinkedList<LireFeature> objs = new LinkedList<LireFeature>();
for (Iterator<Document> documentIterator = docs.iterator(); documentIterator.hasNext(); ) {
Document document = documentIterator.next();
String[] cls = document.getValues(DocumentBuilder.FIELD_NAME_EDGEHISTOGRAM);
if (cls.length > 0) {
EdgeHistogram eht = new EdgeHistogram();
eht.setStringRepresentation(cls[0]);
objs.add(eht);
}
}
System.out.println("--------------- < EdgeHistogram > ---------------");
long nano = System.nanoTime();
createFastMapForObjects(objs, null);
nano = System.nanoTime() - nano;
System.out.println("Time taken: ~ " + (nano / (1000 * 1000)) + " ms");
}
示例13: testAutoColorCorrelogramFastMap
import net.semanticmetadata.lire.DocumentBuilder; //导入依赖的package包/类
public void testAutoColorCorrelogramFastMap() {
// creating the list of user objects ...
LinkedList<LireFeature> objs = new LinkedList<LireFeature>();
for (Iterator<Document> documentIterator = docs.iterator(); documentIterator.hasNext(); ) {
Document document = documentIterator.next();
String[] cls = document.getValues(DocumentBuilder.FIELD_NAME_AUTOCOLORCORRELOGRAM);
if (cls.length > 0) {
AutoColorCorrelogram acc = new AutoColorCorrelogram();
acc.setStringRepresentation(cls[0]);
objs.add(acc);
}
}
System.out.println("--------------- < AutoColorCorrelogram > ---------------");
long nano = System.nanoTime();
createFastMapForObjects(objs, null);
nano = System.nanoTime() - nano;
System.out.println("Time taken: ~ " + (nano / (1000 * 1000)) + " ms");
}
示例14: rerank
import net.semanticmetadata.lire.DocumentBuilder; //导入依赖的package包/类
private TopDocs rerank(TopDocs docs, LireFeature feature, IndexReader reader) throws IOException, IllegalAccessException, InstantiationException {
LireFeature tmp = feature.getClass().newInstance();
ArrayList<ScoreDoc> res = new ArrayList<ScoreDoc>(docs.scoreDocs.length);
float maxScore = 0f;
for (int i = 0; i < docs.scoreDocs.length; i++) {
tmp.setByteArrayRepresentation(reader.document(docs.scoreDocs[i].doc).getBinaryValue(DocumentBuilder.FIELD_NAME_OPPONENT_HISTOGRAM).bytes);
maxScore = Math.max(1 / tmp.getDistance(feature), maxScore);
res.add(new ScoreDoc(docs.scoreDocs[i].doc, 1 / tmp.getDistance(feature)));
}
// sorting res ...
Collections.sort(res, new Comparator<ScoreDoc>() {
@Override
public int compare(ScoreDoc o1, ScoreDoc o2) {
return (int) Math.signum(o2.score - o1.score);
}
});
return new TopDocs(50, (ScoreDoc[]) res.toArray(new ScoreDoc[res.size()]), maxScore);
}
示例15: testIndexMissingFiles
import net.semanticmetadata.lire.DocumentBuilder; //导入依赖的package包/类
public void testIndexMissingFiles() throws IOException {
// first delete some of the existing ones ...
System.out.println("Deleting visual words from docs ...");
IndexReader ir = DirectoryReader.open(FSDirectory.open(new File(index)));
IndexWriter iw = LuceneUtils.createIndexWriter(index, false);
int maxDocs = ir.maxDoc();
for (int i = 0; i < maxDocs / 10; i++) {
Document d = ir.document(i);
d.removeFields(DocumentBuilder.FIELD_NAME_SURF_VISUAL_WORDS);
d.removeFields(DocumentBuilder.FIELD_NAME_SURF_LOCAL_FEATURE_HISTOGRAM);
// d.removeFields(DocumentBuilder.FIELD_NAME_SURF);
iw.updateDocument(new Term(DocumentBuilder.FIELD_NAME_IDENTIFIER, d.getValues(DocumentBuilder.FIELD_NAME_IDENTIFIER)[0]), d);
}
System.out.println("# of deleted docs: " + maxDocs / 10);
System.out.println("Optimizing and closing ...");
iw.close();
ir.close();
System.out.println("Creating new visual words ...");
SurfFeatureHistogramBuilder surfFeatureHistogramBuilder = new SurfFeatureHistogramBuilder(DirectoryReader.open(FSDirectory.open(new File(index))), numSamples, clusters);
surfFeatureHistogramBuilder.indexMissing();
System.out.println("Finished.");
}