本文整理汇总了Java中gnu.trove.map.hash.TObjectIntHashMap.get方法的典型用法代码示例。如果您正苦于以下问题:Java TObjectIntHashMap.get方法的具体用法?Java TObjectIntHashMap.get怎么用?Java TObjectIntHashMap.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gnu.trove.map.hash.TObjectIntHashMap
的用法示例。
在下文中一共展示了TObjectIntHashMap.get方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: writeChunkCounts
import gnu.trove.map.hash.TObjectIntHashMap; //导入方法依赖的package包/类
private static <T> void writeChunkCounts(JsonWriter writer, String name, final TObjectIntHashMap<T> map, int max) throws IOException
{
List<T> sortedCoords = new ArrayList<T>(map.keySet());
Collections.sort(sortedCoords, new Comparator<T>()
{
@Override
public int compare(T s1, T s2)
{
return map.get(s2) - map.get(s1);
}
});
int i = 0;
writer.name(name).beginArray();
for (T key : sortedCoords)
{
if ((max > 0) && (i++ > max))
{
break;
}
if (map.get(key) < 5)
{
continue;
}
writer.beginObject();
writer.name("key").value(key.toString());
writer.name("count").value(map.get(key));
writer.endObject();
}
writer.endArray();
}
示例2: StrippedPartition
import gnu.trove.map.hash.TObjectIntHashMap; //导入方法依赖的package包/类
public StrippedPartition(String[] columnContent) {
TObjectIntHashMap<String> valueToIndex = new TObjectIntHashMap<>();
LinkedHashMap<Integer, TEquivalence> helpMap = new LinkedHashMap<>();
for (int rowIndex = 0; rowIndex < columnContent.length; rowIndex++) {
String value = columnContent[rowIndex];
// if the value wasn't there yet, the row index becomes the representative
// for that equivalence class
if (!valueToIndex.containsKey(value)) {
valueToIndex.put(value, rowIndex);
TEquivalence equivalenceGroup = new EquivalenceGroupTIntHashSet();
equivalenceGroup.add(rowIndex);
helpMap.put(rowIndex, equivalenceGroup);
}
// otherwise find the right equivalence class and add the current element index
else {
int equivalenceGroupIndex = valueToIndex.get(value);
TEquivalence equivalenceClass = helpMap.get(equivalenceGroupIndex);
equivalenceClass.add(rowIndex);
}
}
// remove equivalence classes with only one element
for(Iterator<Map.Entry<Integer, TEquivalence>> it=helpMap.entrySet().iterator(); it.hasNext();) {
Map.Entry<Integer, TEquivalence> entry = it.next();
if (entry.getValue().size() <= 1) {
it.remove();
}
}
// sort the stripped partition by equivalence group sizes
this.addAll(helpMap.values());
}
示例3: getTermProbGivenField
import gnu.trove.map.hash.TObjectIntHashMap; //导入方法依赖的package包/类
public double getTermProbGivenField(String t, String f) {
TObjectIntHashMap<String> fieldMap = termInFieldFrequencies.get(f);
if (fieldMap == null) {
return smoothing;
//throw new IllegalArgumentException(String.format("Field %s not found.", f));
} else {
if (!fieldMap.containsKey(t)) {
return smoothing;
} else {
double num = fieldMap.get(t) + 0.0;
return num / fieldLengths.get(f);
}
}
}
示例4: count
import gnu.trove.map.hash.TObjectIntHashMap; //导入方法依赖的package包/类
@Override
public void count(ItemVO<Integer, Integer> item1, ItemVO<Integer, Integer> item2)
throws Exception {
ItemVO<Integer, Integer> mainKey, subKey;
TObjectIntHashMap<ItemVO<Integer, Integer>> set;
if (item1.getItem() < item2.getItem()) {
mainKey = item1;
subKey = item2;
} else {
mainKey = item2;
subKey = item1;
}
set = map.get(mainKey);
if (set == null) {
set = new TObjectIntHashMap<ItemVO<Integer, Integer>>();
map.put(mainKey, set);
}
Integer cnt = set.get(subKey);
if (cnt == null) {
counter++;
cnt = 1;
} else {
cnt++;
}
set.put(subKey, cnt);
}
示例5: run
import gnu.trove.map.hash.TObjectIntHashMap; //导入方法依赖的package包/类
@Override
public void run(Parameters argp) throws Exception {
TObjectIntHashMap<String> pubdateMap = Athena.init(argp).getDataset().getPubDateMap();
String key = argp.getString("input");
int year = pubdateMap.get(key);
System.out.println(year);
}
示例6: resultsMapToStringSet
import gnu.trove.map.hash.TObjectIntHashMap; //导入方法依赖的package包/类
@SuppressWarnings("rawtypes")
public static FixedSizeSortedSet<SearchResult> resultsMapToStringSet(SearchResultType resultType, TObjectIntHashMap<String> results, int limit) {
FixedSizeSortedSet<SearchResult> orderedResults = new FixedSizeSortedSet<SearchResult>(new SearchResultComparator(), limit);
int count = 0;
for (Object result : results.keys()) {
count = results.get(result);
orderedResults.add(new SearchResult<String>(resultType, (String) result, count));
}
return orderedResults;
}
示例7: addResultToMap
import gnu.trove.map.hash.TObjectIntHashMap; //导入方法依赖的package包/类
public static void addResultToMap(TObjectIntHashMap<String> results, TObjectIntHashMap<String> resultsToAdd) {
Object[] resultsToAddKeys = resultsToAdd.keys();
int[] resultsToAddValues = resultsToAdd.values();
for (int i = 0; i < resultsToAddKeys.length; i++) {
int count = 0;
if (results.contains(resultsToAddKeys[i])) {
count = results.get(resultsToAddKeys[i]);
}
count += resultsToAddValues[i];
results.put((String) resultsToAddKeys[i], count);
}
}
示例8: getOrAssign
import gnu.trove.map.hash.TObjectIntHashMap; //导入方法依赖的package包/类
public final int getOrAssign(String input) {
TObjectIntHashMap<String> bucket = getBucket(input);
int id = bucket.get(input);
if(id == bucket.getNoEntryValue()) {
id = nextId++;
bucket.put(input, id);
}
return id;
}
示例9: readAscii
import gnu.trove.map.hash.TObjectIntHashMap; //导入方法依赖的package包/类
/**
* Reads an ASCII STL file.
*
* @param is InputStream to read from.
* @return Mesh
* @throws IOException If an error occurs during reading.
*/
private Mesh readAscii(InputStream is) throws IOException {
BufferedReader br = new BufferedReader(new InputStreamReader(is));
String line = null;
/* Prepare empty mesh. */
Mesh mesh = new Mesh(100,100);
/* Prepare helper structures. */
TObjectIntHashMap<Vector3f> vertexBuffer = new TObjectIntHashMap<>();
int index = 0;
int[] vertexindices = new int[3];
while ((line = br.readLine()) != null && !line.startsWith("endsolid")) {
line = line.trim();
/* Detect end of STL file. */
if (line.startsWith("endsolid")) {
break;
}
/* Detect begin of facet. */
if (line.startsWith("facet normal ")) {
int vidx = 0;
while ((line = br.readLine()) != null) {
line = line.trim(); /* Trim line. */
/* Detect end of facet. */
if (line.equals("endfacet")) {
break;
}
/* Detect vertex. */
if (line.startsWith("vertex")) {
String[] splitVertex = line.split("\\s+");
Vector3f vertex = new Vector3f(Float.parseFloat(splitVertex[1]),Float.parseFloat(splitVertex[2]), Float.parseFloat(splitVertex[3]));
if (!vertexBuffer.containsKey(vertex)) {
mesh.addVertex(vertex);
vertexBuffer.put(vertex, index);
index++;
}
vertexindices[vidx] = vertexBuffer.get(vertex);
vidx++;
}
}
/* Add a new face to the Mesh. */
mesh.addFace(new Vector3i(vertexindices[0], vertexindices[1], vertexindices[2]));
}
}
/* Close the buffered reader. */
br.close();
/* This covers the case, where the file starts with 'solid ' but is not an ASCII file. Unfortunately, such files do exist. */
if (mesh.numberOfVertices() == 0) {
LOGGER.warn("The provided ASCII STL file does not seem to contain any normals or vertices. Trying to decode it as binary STL even though it was marked as being ASCII.");
InputStream newIs = Files.newInputStream(this.inputFile);
return this.readBinary(newIs, 80);
} else {
return mesh;
}
}
示例10: readBinary
import gnu.trove.map.hash.TObjectIntHashMap; //导入方法依赖的package包/类
/**
* Reads a binary STL file.
*
* @param is InputStream to read from.
* @param skip Number of bytes to skip before reading the STL file.
* @return Mesh
* @throws IOException If an error occurs during reading.
*/
private Mesh readBinary(InputStream is, int skip) throws IOException {
/* Prepare a ByteBuffer to read the rest of the STL file. */
byte[] bytes = new byte[50];
ByteBuffer buffer = ByteBuffer.wrap(bytes);
buffer.order(ByteOrder.LITTLE_ENDIAN);
/* Skip the STL header! */
is.skip(skip);
/* Read the bytes for the size (unsigned 32 bit int, little-endian). */
byte[] sizeBytes = new byte[4];
is.read(sizeBytes, 0, 4);
long triangles = ((sizeBytes[0] & 0xFF)) | ((sizeBytes[1] & 0xFF) << 8) | ((sizeBytes[2] & 0xFF) << 16) | ((sizeBytes[3] & 0xFF) << 24);
/* TODO: Properly handle models whose triangles > MAX_TRIANGLES. */
if (triangles <= 0) {
LOGGER.error("The number of triangles in the Mesh seems to be smaller than zero. This STL file is probably corrupt!");
return null;
} else if (triangles > MAX_TRIANGLES) {
LOGGER.error("The number of triangles in the Mesh exceeds the limit that can currently be processed by STLMeshDecoder. The Mesh will be downsampled!");
return null;
}
/* Prepare Mesh. */
Mesh mesh = new Mesh((int)triangles, (int)triangles);
/* Prepare helper structures. */
TObjectIntHashMap<Vector3f> vertexBuffer = new TObjectIntHashMap<>();
int index = 0;
int[] vertexindices = new int[3];
/* Now add all triangles. */
for (int i=0; i<triangles; i++) {
/* Ready 48 bytes from the stream. */
buffer.rewind();
is.read(bytes);
/* Read and ignore three floats. */
buffer.getFloat();
buffer.getFloat();
buffer.getFloat();
/* Add the vertices and the vertex-normal to the mesh. */
for (int vidx = 0; vidx < 3; vidx++) {
Vector3f vertex = new Vector3f(buffer.getFloat(), buffer.getFloat(), buffer.getFloat());
if (!vertexBuffer.containsKey(vertex)) {
mesh.addVertex(vertex);
vertexBuffer.put(vertex, index);
index++;
}
vertexindices[vidx] = vertexBuffer.get(vertex);
}
/* Add a new face to the Mesh. */
if (!mesh.addFace(new Vector3i(vertexindices[0], vertexindices[1], vertexindices[2]))) {
LOGGER.warn("Could not add face {}/{}/{} because index points to non-existing vertex.", vertexindices[0], vertexindices[1], vertexindices[2]);
}
}
/* Closes the InputStream. */
is.close();
return mesh;
}
示例11: parseThreeJSV4Geometry
import gnu.trove.map.hash.TObjectIntHashMap; //导入方法依赖的package包/类
/**
* Parses a Base64 encoded data url and treats it as Geometry JSON used by the Three.js JavaScript library.
* Tries to parse the structure into a 3D mesh.
*
* @param dataUrl Data URL that should be parsed.
* @return Mesh, if parsing fails that Mesh will be empty!
*/
public static Mesh parseThreeJSV4Geometry(String dataUrl) {
/* Convert Base64 string into byte array. */
byte[] bytes = dataURLtoByteArray(dataUrl, MIME_TYPE);
ObjectMapper mapper = new ObjectMapper();
try {
/* Read the JSON structure of the transmitted mesh data. */
JsonNode node = mapper.readTree(bytes);
JsonNode vertices = node.get(VERTICES_PROPERTY_NAME_THREEV4);
if (vertices == null || !vertices.isArray() || vertices.size() == 0) {
LOGGER.error("Submitted mesh does not contain any vertices. Aborting...");
return Mesh.EMPTY;
}
/* Create new Mesh. */
Mesh mesh = new Mesh(vertices.size()/9, vertices.size()/3);
/* Prepare helper structures. */
TObjectIntHashMap<Vector3f> vertexBuffer = new TObjectIntHashMap<>();
int index = 0;
int[] vertexindices = new int[3];
/* Add all the vertices and normals in the structure. */
for (int i=0; i<=vertices.size()-9; i+=9) {
for (int j=0; j<3; j++) {
int idx = i + 3*j;
Vector3f vertex = new Vector3f((float)vertices.get(idx).asDouble(), (float)vertices.get(idx+1).asDouble(),(float)vertices.get(idx+2).asDouble());
if (!vertexBuffer.containsKey(vertex)) {
vertexBuffer.put(vertex, index++);
mesh.addVertex(vertex);
}
vertexindices[j] = vertexBuffer.get(vertex);
}
mesh.addFace(new Vector3i(vertexindices[0], vertexindices[1], vertexindices[2]));
}
return mesh;
} catch (IOException e) {
LOGGER.error("Could not create 3d mesh from Base64 input because the file-format is not supported. {}", LogHelper.getStackTrace(e));
return Mesh.EMPTY;
}
}
示例12: calculate
import gnu.trove.map.hash.TObjectIntHashMap; //导入方法依赖的package包/类
/**
* The input should be a {@link Map} for each rater where the keys represent
* all the subjects that were rated by the raters and the values represent
* the annotations given by the raters. Agreement between the raters is
* determined by {@link #equals(Object)} for the INSTANCE type. Annotations
* for subjects which are not in both sets are ignored.
*
* @see "http://en.wikipedia.org/wiki/Cohen's_kappa"
*
* @param rater1
* The annotations from rater 1
* @param rater2
* The annotations from rater 2
* @return Cohen's Kappa [0,1]
*/
public static <K, A> double calculate(
final Map<K, A> rater1,
final Map<K, A> rater2)
{
int totalCount = 0;
int agreementCount = 0;
final TObjectIntHashMap<A> answerCountsR1 = new TObjectIntHashMap<A>();
final TObjectIntHashMap<A> answerCountsR2 = new TObjectIntHashMap<A>();
for (final K subjectKey : rater1.keySet())
{
// We can only form an agreement if both raters rated this
// specific subject, so let's check
if (rater2.keySet().contains(subjectKey))
{
final A r1a = rater1.get(subjectKey);
final A r2a = rater2.get(subjectKey);
// It's possible that the key exists but is mapped to
// a null value (for example, if majority voting was used
// to generate the set and there was no majority).
if (r1a == null || r2a == null)
continue;
// Get the answers from the raters
final A annotation1 = r1a;
final A annotation2 = r2a;
// Count the agreements
if (annotation1.equals(annotation2))
agreementCount++;
// Count each of the answers for each of the raters
answerCountsR1.putIfAbsent(annotation1, 0);
answerCountsR2.putIfAbsent(annotation2, 0);
answerCountsR1.increment(annotation1);
answerCountsR2.increment(annotation2);
// Keep a running total
totalCount++;
}
}
System.out.println(answerCountsR1);
final double PrA = agreementCount / (double) totalCount;
System.out.println(PrA);
double PrE = 0;
for (final A ann : answerCountsR1.keySet())
{
final Integer i = answerCountsR2.get(ann);
final double PrAnnR1 = answerCountsR1.get(ann) / (double) totalCount;
final double PrAnnR2 = (i == null ? 0 : i) / (double) totalCount;
PrE += PrAnnR1 * PrAnnR2;
}
System.out.println(PrE);
final double kappa = (PrA - PrE) / (1d - PrE);
return kappa;
}