本文整理汇总了Java中net.semanticmetadata.lire.imageanalysis.Histogram类的典型用法代码示例。如果您正苦于以下问题:Java Histogram类的具体用法?Java Histogram怎么用?Java Histogram使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Histogram类属于net.semanticmetadata.lire.imageanalysis包,在下文中一共展示了Histogram类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getVisualWords
import net.semanticmetadata.lire.imageanalysis.Histogram; //导入依赖的package包/类
/**
* Takes one single document and creates the visual words and adds them to the document. The same document is returned.
*
* @param d the document to use for adding the visual words
* @return
* @throws IOException
*/
public Document getVisualWords(Document d) throws IOException {
clusters = Cluster.readClusters(clusterFile);
int[] tmpHist = new int[clusters.length];
LireFeature f = getFeatureInstance();
IndexableField[] fields = d.getFields(localFeatureFieldName);
// find the appropriate cluster for each feature:
for (int j = 0; j < fields.length; j++) {
f.setByteArrayRepresentation(fields[j].binaryValue().bytes, fields[j].binaryValue().offset, fields[j].binaryValue().length);
tmpHist[clusterForFeature((Histogram) f)]++;
}
normalize(tmpHist);
d.add(new TextField(visualWordsFieldName, arrayToVisualWordString(tmpHist), Field.Store.YES));
d.add(new StringField(localFeatureHistFieldName, SerializationUtils.arrayToString(tmpHist), Field.Store.YES));
d.removeFields(localFeatureFieldName);
return d;
}
示例2: getVisualWords
import net.semanticmetadata.lire.imageanalysis.Histogram; //导入依赖的package包/类
/**
* Takes one single document and creates the visual words and adds them to the document. The same document is returned.
*
* @param d the document to use for adding the visual words
* @return
* @throws java.io.IOException
*/
public Document getVisualWords(Document d) throws IOException { // TODO: Adapt to VLAD!
clusters = Cluster.readClusters(clusterFile);
double[] vlad = null;
LireFeature f = getFeatureInstance();
IndexableField[] fields = d.getFields(localFeatureFieldName);
// find the appropriate cluster for each feature:
for (int j = 0; j < fields.length; j++) {
f.setByteArrayRepresentation(fields[j].binaryValue().bytes, fields[j].binaryValue().offset, fields[j].binaryValue().length);
if (vlad == null) {
vlad = new double[clusters.length * f.getDoubleHistogram().length];
Arrays.fill(vlad, 0d);
}
int clusterIndex = clusterForFeature((Histogram) f);
double[] mean = clusters[clusterIndex].getMean();
for (int k = 0; k < f.getDoubleHistogram().length; k++) {
vlad[clusterIndex * f.getDoubleHistogram().length + k] += f.getDoubleHistogram()[k] - mean[k];
}
}
normalize(vlad);
GenericByteLireFeature feat = new GenericByteLireFeature();
feat.setData(vlad);
d.add(new StoredField(vladFieldName, feat.getByteArrayRepresentation()));
// d.add(new StringField(localFeatureHistFieldName, SerializationUtils.arrayToString(tmpHist), Field.Store.YES));
d.removeFields(localFeatureFieldName);
return d;
}
示例3: clusterForFeature
import net.semanticmetadata.lire.imageanalysis.Histogram; //导入依赖的package包/类
/**
* Find the appropriate cluster for a given feature.
*
* @param f
* @return the index of the cluster.
*/
private int clusterForFeature(Histogram f) {
double distance = clusters[0].getDistance(f);
double tmp;
int result = 0;
for (int i = 1; i < clusters.length; i++) {
tmp = clusters[i].getDistance(f);
if (tmp < distance) {
distance = tmp;
result = i;
}
}
return result;
}
示例4: indexMissing
import net.semanticmetadata.lire.imageanalysis.Histogram; //导入依赖的package包/类
public void indexMissing() throws IOException {
// Reading clusters from disk:
clusters = Cluster.readClusters(clusterFile);
// create & store histograms:
System.out.println("Creating histograms ...");
int[] tmpHist = new int[numClusters];
LireFeature f = getFeatureInstance();
// based on bug report from Einav Itamar <[email protected]>
IndexWriter iw = LuceneUtils.createIndexWriter(((DirectoryReader) reader).directory(),
false, LuceneUtils.AnalyzerType.WhitespaceAnalyzer);
for (int i = 0; i < reader.maxDoc(); i++) {
// if (!reader.isDeleted(i)) {
for (int j = 0; j < tmpHist.length; j++) {
tmpHist[j] = 0;
}
Document d = reader.document(i);
// Only if there are no values yet:
if (d.getValues(visualWordsFieldName) == null || d.getValues(visualWordsFieldName).length == 0) {
IndexableField[] fields = d.getFields(localFeatureFieldName);
// find the appropriate cluster for each feature:
for (int j = 0; j < fields.length; j++) {
f.setByteArrayRepresentation(fields[j].binaryValue().bytes, fields[j].binaryValue().offset, fields[j].binaryValue().length);
tmpHist[clusterForFeature((Histogram) f)]++;
}
normalize(tmpHist);
d.add(new TextField(visualWordsFieldName, arrayToVisualWordString(tmpHist), Field.Store.YES));
d.add(new StringField(localFeatureHistFieldName, SerializationUtils.arrayToString(tmpHist), Field.Store.YES));
// now write the new one. we use the identifier to update ;)
iw.updateDocument(new Term(DocumentBuilder.FIELD_NAME_IDENTIFIER, d.getValues(DocumentBuilder.FIELD_NAME_IDENTIFIER)[0]), d);
}
// }
}
iw.commit();
iw.close();
System.out.println("Finished.");
}
示例5: getDistance
import net.semanticmetadata.lire.imageanalysis.Histogram; //导入依赖的package包/类
public double getDistance(Histogram f) {
return getDistance(f.descriptor);
}
示例6: indexMissing
import net.semanticmetadata.lire.imageanalysis.Histogram; //导入依赖的package包/类
/**
* Indexes all documents in the index, that do not include the VLAD feature yet.
*
* @throws IOException
*/
public void indexMissing() throws IOException {
// Reading clusters from disk:
clusters = Cluster.readClusters(clusterFile);
// create & store histograms:
System.out.println("Creating histograms ...");
// int[] tmpHist = new int[numClusters];
LireFeature f = getFeatureInstance();
IndexWriter iw = LuceneUtils.createIndexWriter(((DirectoryReader) reader).directory(), true, LuceneUtils.AnalyzerType.WhitespaceAnalyzer);
for (int i = 0; i < reader.maxDoc(); i++) {
// if (!reader.isDeleted(i)) {
Document d = reader.document(i);
double[] vlad = null;
// Only if there are no values yet:
if (d.getValues(vladFieldName) == null || d.getValues(vladFieldName).length == 0) {
IndexableField[] fields = d.getFields(localFeatureFieldName);
// find the appropriate cluster for each feature:
for (int j = 0; j < fields.length; j++) {
f.setByteArrayRepresentation(fields[j].binaryValue().bytes, fields[j].binaryValue().offset, fields[j].binaryValue().length);
if (vlad == null) { // init vlad if it is null.
vlad = new double[clusters.length * f.getDoubleHistogram().length];
Arrays.fill(vlad, 0d);
}
int clusterIndex = clusterForFeature((Histogram) f);
double[] mean = clusters[clusterIndex].getMean();
for (int k = 0; k < f.getDoubleHistogram().length; k++) {
vlad[clusterIndex * f.getDoubleHistogram().length + k] += f.getDoubleHistogram()[k] - mean[k];
}
}
normalize(vlad);
GenericByteLireFeature feat = new GenericByteLireFeature();
feat.setData(vlad);
// System.out.println(feat.getStringRepresentation());
d.add(new StoredField(vladFieldName, feat.getByteArrayRepresentation()));
iw.updateDocument(new Term(DocumentBuilder.FIELD_NAME_IDENTIFIER, d.getValues(DocumentBuilder.FIELD_NAME_IDENTIFIER)[0]), d);
}
// }
}
iw.commit();
iw.close();
System.out.println("Finished.");
}
示例7: run
import net.semanticmetadata.lire.imageanalysis.Histogram; //导入依赖的package包/类
public void run() {
// int[] tmpHist = new int[numClusters];
LireFeature f = getFeatureInstance();
for (int i = start; i < end; i++) {
try {
// if (!reader.isDeleted(i)) { // TODO!
// for (int j = 0; j < tmpHist.length; j++) {
// tmpHist[j] = 0;
// }
Document d = reader.document(i);
IndexableField[] fields = d.getFields(localFeatureFieldName);
// remove the fields if they are already there ...
d.removeField(vladFieldName);
// d.removeField(localFeatureHistFieldName);
double[] vlad = null;
// VLAD - Vector of Locally Aggregated Descriptors
for (int j = 0; j < fields.length; j++) {
f.setByteArrayRepresentation(fields[j].binaryValue().bytes, fields[j].binaryValue().offset, fields[j].binaryValue().length);
if (vlad == null) { // init vlad if it is null.
vlad = new double[clusters.length * f.getDoubleHistogram().length];
Arrays.fill(vlad, 0d);
}
int clusterIndex = clusterForFeature((Histogram) f);
// System.out.println("clusterIndex = " + clusterIndex);
double[] mean = clusters[clusterIndex].getMean();
for (int k = 0; k < f.getDoubleHistogram().length; k++) {
// System.out.println((clusterIndex*f.getDoubleHistogram().length+k) + " - mean: " + mean.length + " - feature: " + f.getDoubleHistogram().length);
vlad[clusterIndex * f.getDoubleHistogram().length + k] += f.getDoubleHistogram()[k] - mean[k];
}
}
normalize(vlad);
GenericByteLireFeature feat = new GenericByteLireFeature();
feat.setData(vlad);
// System.out.println(feat.getStringRepresentation());
d.add(new StoredField(vladFieldName, feat.getByteArrayRepresentation()));
// d.add(new StringField(localFeatureHistFieldName, SerializationUtils.arrayToString(tmpHist), Field.Store.YES));
// remove local features to save some space if requested:
if (DELETE_LOCAL_FEATURES) {
d.removeFields(localFeatureFieldName);
}
// now write the new one. we use the identifier to update ;)
iw.updateDocument(new Term(DocumentBuilder.FIELD_NAME_IDENTIFIER, d.getValues(DocumentBuilder.FIELD_NAME_IDENTIFIER)[0]), d);
if (pm != null) {
double len = (double) (end - start);
double percent = (double) (i - start) / len * 45d + 50;
pm.setProgress((int) percent);
pm.setNote("Creating visual words, ~" + (int) percent + "% finished");
}
// }
} catch (IOException e) {
e.printStackTrace();
}
}
}
示例8: run
import net.semanticmetadata.lire.imageanalysis.Histogram; //导入依赖的package包/类
public void run() {
int[] tmpHist = new int[numClusters];
LireFeature f = getFeatureInstance();
for (int i = start; i < end; i++) {
try {
// if (!reader.isDeleted(i)) { // TODO!
for (int j = 0; j < tmpHist.length; j++) {
tmpHist[j] = 0;
}
Document d = reader.document(i);
IndexableField[] fields = d.getFields(localFeatureFieldName);
// remove the fields if they are already there ...
d.removeField(visualWordsFieldName);
d.removeField(localFeatureHistFieldName);
// find the appropriate cluster for each feature:
for (int j = 0; j < fields.length; j++) {
f.setByteArrayRepresentation(fields[j].binaryValue().bytes, fields[j].binaryValue().offset, fields[j].binaryValue().length);
tmpHist[clusterForFeature((Histogram) f)]++;
}
normalize(tmpHist);
d.add(new TextField(visualWordsFieldName, arrayToVisualWordString(tmpHist), Field.Store.YES));
d.add(new StringField(localFeatureHistFieldName, SerializationUtils.arrayToString(tmpHist), Field.Store.YES));
// remove local features to save some space if requested:
if (DELETE_LOCAL_FEATURES) {
d.removeFields(localFeatureFieldName);
}
// now write the new one. we use the identifier to update ;)
iw.updateDocument(new Term(DocumentBuilder.FIELD_NAME_IDENTIFIER, d.getValues(DocumentBuilder.FIELD_NAME_IDENTIFIER)[0]), d);
if (pm != null) {
double len = (double) (end - start);
double percent = (double) (i - start) / len * 45d + 50;
pm.setProgress((int) percent);
pm.setNote("Creating visual words, ~" + (int) percent + "% finished");
}
// }
} catch (IOException e) {
e.printStackTrace();
}
}
}
示例9: getClusterOfFeature
import net.semanticmetadata.lire.imageanalysis.Histogram; //导入依赖的package包/类
/**
* Used to find the cluster of a feature actually used in the clustering process (so
* it is known by the k-means class).
*
* @param f the feature to search for
* @return the index of the Cluster
*/
public int getClusterOfFeature(Histogram f) {
if (featureIndex == null) createIndex();
return featureIndex.get(f);
}