本文整理汇总了Java中com.carrotsearch.hppc.ObjectIntOpenHashMap.lget方法的典型用法代码示例。如果您正苦于以下问题:Java ObjectIntOpenHashMap.lget方法的具体用法?Java ObjectIntOpenHashMap.lget怎么用?Java ObjectIntOpenHashMap.lget使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.carrotsearch.hppc.ObjectIntOpenHashMap
的用法示例。
在下文中一共展示了ObjectIntOpenHashMap.lget方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: register
import com.carrotsearch.hppc.ObjectIntOpenHashMap; //导入方法依赖的package包/类
/**
* Registers a new string at the dictionary.
*
* @param dimension
* the dimension
* @param string
* the string
* @return the int
*/
public int register(final int dimension, final String string) {
// Prepare
ObjectIntOpenHashMap<String> map = maps[dimension];
int size = map.size();
// Return or store
if (map.putIfAbsent(string, size)) {
return size;
} else {
return map.lget();
}
}
示例2: createMatrix
import com.carrotsearch.hppc.ObjectIntOpenHashMap; //导入方法依赖的package包/类
public static Matrix createMatrix(TokenizedDocument[] tokenizedDocuments, int windowSize) {
// create vocabularies
ObjectIntOpenHashMap<String> tokenVocabulary = new ObjectIntOpenHashMap<String>();
int tokenIds[][] = new int[tokenizedDocuments.length][];
for (int i = 0; i < tokenizedDocuments.length; ++i) {
tokenIds[i] = new int[tokenizedDocuments[i].tokens.length];
for (int j = 0; j < tokenizedDocuments[i].tokens.length; ++j) {
if (tokenVocabulary.containsKey(tokenizedDocuments[i].tokens[j])) {
tokenIds[i][j] = tokenVocabulary.lget();
} else {
tokenIds[i][j] = tokenVocabulary.size();
tokenVocabulary.put(tokenizedDocuments[i].tokens[j], tokenIds[i][j]);
}
}
}
// ObjectIntOpenHashMap<String> entityVocabulary = new ObjectIntOpenHashMap<String>();
int entityCount = 0;
// int entityIds[][] = new int[tokenizedDocuments.length][];
for (int i = 0; i < tokenizedDocuments.length; ++i) {
entityCount += tokenizedDocuments[i].entities.length;
// entityIds[i] = new int[tokenizedDocuments[i].entities.length];
// for (int j = 0; j < tokenizedDocuments[i].entities.length; ++j) {
// if (entityVocabulary.containsKey(tokenizedDocuments[i].entities[j].URI)) {
// entityIds[i][j] = entityVocabulary.lget();
// } else {
// entityIds[i][j] = entityVocabulary.size();
// entityVocabulary.put(tokenizedDocuments[i].entities[j].URI, entityIds[i][j]);
// }
// }
}
// Matrix matrix = new Basic2DMatrix(entityVocabulary.size(), tokenVocabulary.size());
Matrix matrix = new Basic2DMatrix(entityCount, tokenVocabulary.size());
// go through every document ...
int entityId = 0, end;
for (int d = 0; d < tokenizedDocuments.length; ++d) {
// ...and through every entity occurring inside the documents...
for (int e = 0; e < tokenizedDocuments[d].entities.length; ++e) {
// entityId = entityIds[d][e];
// ...and count the tokens before...
end = tokenizedDocuments[d].entities[e].start;
for (int t = Math.max(0, tokenizedDocuments[d].entities[e].start - windowSize); t < end; ++t) {
matrix.set(entityId, tokenIds[d][t], matrix.get(entityId, tokenIds[d][t]) + 1);
}
// ...and after the entity
end = Math.min(tokenIds[d].length, tokenizedDocuments[d].entities[e].end + windowSize);
for (int t = tokenizedDocuments[d].entities[e].end; t < end; ++t) {
matrix.set(entityId, tokenIds[d][t], matrix.get(entityId, tokenIds[d][t]) + 1);
}
++entityId;
}
}
return matrix;
}