本文整理汇总了Java中com.carrotsearch.hppc.IntIntOpenHashMap.containsKey方法的典型用法代码示例。如果您正苦于以下问题:Java IntIntOpenHashMap.containsKey方法的具体用法?Java IntIntOpenHashMap.containsKey怎么用?Java IntIntOpenHashMap.containsKey使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.carrotsearch.hppc.IntIntOpenHashMap
的用法示例。
在下文中一共展示了IntIntOpenHashMap.containsKey方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: remapGapIndices
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
/** Called by {@link CTReader#normalizeIndices(CTTree)}. */
static private void remapGapIndices(IntIntOpenHashMap map, int[] lastIndex, CTNode curr)
{
int gapIndex = curr.gapIndex;
if (map.containsKey(gapIndex))
{
curr.gapIndex = map.get(gapIndex);
}
else if (gapIndex != -1)
{
curr.gapIndex = lastIndex[0];
map.put(gapIndex, lastIndex[0]++);
}
for (CTNode child : curr.ls_children)
remapGapIndices(map, lastIndex, child);
}
示例2: BoostedComp
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
public BoostedComp(IntIntOpenHashMap boostedDocs, ScoreDoc[] scoreDocs, float maxScore) {
this.boostedMap = new IntFloatOpenHashMap(boostedDocs.size()*2);
for(int i=0; i<scoreDocs.length; i++) {
if(boostedDocs.containsKey(scoreDocs[i].doc)) {
boostedMap.put(scoreDocs[i].doc, maxScore+boostedDocs.lget());
} else {
break;
}
}
}
示例3: merge
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
protected List<T> merge(List<T> spans) {
Span spanArray[] = spans.toArray(new Span[spans.size()]);
Arrays.sort(spanArray, this);
IntIntOpenHashMap enclosedByMap = new IntIntOpenHashMap();
boolean isEnclosed;
for (int i = 0; i < spanArray.length; ++i) {
isEnclosed = false;
for (int j = spanArray.length - 1; (j > i) && (!isEnclosed); --j) {
// if spanArray[i] is enclosed by spanArray[j]
if ((spanArray[i].getStartPosition() >= spanArray[j].getStartPosition())
&& ((spanArray[i].getStartPosition() + spanArray[i].getLength()) <= (spanArray[j]
.getStartPosition() + spanArray[j].getLength()))) {
enclosedByMap.put(i, j);
isEnclosed = true;
}
}
}
// if no match could be found
if (enclosedByMap.size() == 0) {
return spans;
}
List<T> mergedMarkings = new ArrayList<T>(spans.size());
// starting with the smallest span, check if a span is enclosed by
// another
int largerSpanId;
for (int i = 0; i < spanArray.length; ++i) {
if (enclosedByMap.containsKey(i)) {
largerSpanId = enclosedByMap.lget();
spanArray[largerSpanId] = merge(spanArray[i], spanArray[largerSpanId]);
} else {
mergedMarkings.add((T) spanArray[i]);
}
}
return mergedMarkings;
}
示例4: BoostedComp
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
public BoostedComp(IntIntOpenHashMap boostedDocs, ScoreDoc[] scoreDocs, float maxScore) {
this.boostedMap = new IntFloatOpenHashMap(boostedDocs.size()*2);
for(int i=0; i<scoreDocs.length; i++) {
if(boostedDocs.containsKey(scoreDocs[i].doc)) {
boostedMap.put(scoreDocs[i].doc, maxScore+boostedDocs.lget());
} else {
break;
}
}
}
示例5: requestDocumentsWithWord
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
protected void requestDocumentsWithWord(String word, IntObjectOpenHashMap<IntArrayList[]> positionsInDocs,
IntIntOpenHashMap docLengths, int wordId, int numberOfWords) {
DocsAndPositionsEnum docPosEnum = null;
Term term = new Term(fieldName, word);
int localDocId, globalDocId, baseDocId;
IntArrayList positions[];
try {
for (int i = 0; i < reader.length; i++) {
docPosEnum = reader[i].termPositionsEnum(term);
baseDocId = contexts[i].docBase;
if (docPosEnum != null) {
while (docPosEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) {
localDocId = docPosEnum.docID();
globalDocId = localDocId + baseDocId;
// if this is the first word and we found a new document
if (!positionsInDocs.containsKey(globalDocId)) {
positions = new IntArrayList[numberOfWords];
positionsInDocs.put(globalDocId, positions);
} else {
positions = positionsInDocs.get(globalDocId);
}
if (positions[wordId] == null) {
positions[wordId] = new IntArrayList();
}
// Go through the positions inside this document
for (int p = 0; p < docPosEnum.freq(); ++p) {
positions[wordId].add(docPosEnum.nextPosition());
}
if (!docLengths.containsKey(globalDocId)) {
// Get the length of the document
docLengths.put(globalDocId, reader[i].document(localDocId).getField(docLengthFieldName)
.numericValue().intValue());
}
}
}
}
} catch (IOException e) {
LOGGER.error("Error while requesting documents for word \"" + word + "\".", e);
}
}
示例6: checkMonotonicity
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
/**
* Throws an exception, if the hierarchy is not monotonic.
*
* TODO: This is a potentially expensive check that should be done when loading the hierarchy
*
* @param manager
*/
public void checkMonotonicity(DataManager manager) {
// Obtain dictionary
String[] dictionary = null;
String[] header = manager.getDataGeneralized().getHeader();
for (int i=0; i<header.length; i++) {
if (header[i].equals(attribute)) {
dictionary = manager.getDataGeneralized().getDictionary().getMapping()[i];
}
}
// Check
if (dictionary==null) {
throw new IllegalStateException("Cannot obtain dictionary for attribute ("+attribute+")");
}
// Level value -> level+1 value
final IntIntOpenHashMap hMap = new IntIntOpenHashMap();
// Input->level->output.
for (int level = 0; level < (map[0].length - 1); level++) {
hMap.clear();
for (int i = 0; i < map.length; i++) {
final int outputCurrentLevel = map[i][level];
final int outputNextLevel = map[i][level + 1];
if (hMap.containsKey(outputCurrentLevel)) {
final int compare = hMap.get(outputCurrentLevel);
if (compare != outputNextLevel) {
String in = dictionary[outputCurrentLevel];
String out1 = dictionary[compare];
String out2 = dictionary[outputNextLevel];
throw new IllegalArgumentException("The transformation rule for the attribute '" + attribute + "' is not a hierarchy. ("+in+") can either be transformed to ("+out1+") or to ("+out2+")");
}
} else {
hMap.put(outputCurrentLevel, outputNextLevel);
}
}
}
}
示例7: dirichletMultinomialLikelihoodRatio
import com.carrotsearch.hppc.IntIntOpenHashMap; //导入方法依赖的package包/类
/** What is the probability that these two observations were drawn from
* the same multinomial with symmetric Dirichlet prior alpha, relative
* to the probability that they were drawn from different multinomials
* both drawn from this Dirichlet?
*/
public static double dirichletMultinomialLikelihoodRatio(IntIntOpenHashMap countsX,
IntIntOpenHashMap countsY,
double alpha, double alphaSum) {
// The likelihood for one DCM is
// Gamma( alpha_sum ) prod Gamma( alpha + N_i )
// prod Gamma ( alpha ) Gamma ( alpha_sum + N )
// When we divide this by the product of two other DCMs with the same
// alpha parameter, the first term in the numerator cancels with the
// first term in the denominator. Then moving the remaining alpha-only
// term to the numerator, we get
// prod Gamma(alpha) prod Gamma( alpha + X_i + Y_i )
// Gamma (alpha_sum) Gamma( alpha_sum + X_sum + Y_sum )
// ----------------------------------------------------------
// prod Gamma(alpha + X_i) prod Gamma(alpha + Y_i)
// Gamma( alpha_sum + X_sum ) Gamma( alpha_sum + Y_sum )
double logLikelihood = 0.0;
double logGammaAlpha = logGamma(alpha);
int totalX = 0;
int totalY = 0;
int key, x, y;
IntOpenHashSet distinctKeys = new IntOpenHashSet();
distinctKeys.addAll(countsX.keys());
distinctKeys.addAll(countsY.keys());
Iterator<IntCursor> iterator = distinctKeys.iterator();
while (iterator.hasNext()) {
key = iterator.next().value;
x = 0;
if (countsX.containsKey(key)) {
x = countsX.get(key);
}
y = 0;
if (countsY.containsKey(key)) {
y = countsY.get(key);
}
totalX += x;
totalY += y;
logLikelihood += logGamma(alpha) + logGamma(alpha + x + y)
- logGamma(alpha + x) - logGamma(alpha + y);
}
logLikelihood += logGamma(alphaSum + totalX) + logGamma(alphaSum + totalY)
- logGamma(alphaSum) - logGamma(alphaSum + totalX + totalY);
return logLikelihood;
}