当前位置: 首页>>代码示例>>Java>>正文


Java Generics.newHashMap方法代码示例

本文整理汇总了Java中edu.stanford.nlp.util.Generics.newHashMap方法的典型用法代码示例。如果您正苦于以下问题:Java Generics.newHashMap方法的具体用法?Java Generics.newHashMap怎么用?Java Generics.newHashMap使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在edu.stanford.nlp.util.Generics的用法示例。


在下文中一共展示了Generics.newHashMap方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getAllDependents

import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
 * Returns all the dependencies of a certain node.
 *
 * @param node The node to return dependents for
 * @return map of dependencies
 */
private static Map<Class<? extends CoreAnnotation>, Set<TreeGraphNode>> getAllDependents(TreeGraphNode node) {
  Map<Class<? extends CoreAnnotation>, Set<TreeGraphNode>> newMap = Generics.newHashMap();

  for (Class<?> o : node.label.keySet()) {
    try {
      // The line below will exception unless it's a GrammaticalRelationAnnotation,
      // so the effect is that only the GrammaticalRelationAnnotation things get put into newMap
      o.asSubclass(GrammaticalRelationAnnotation.class);
      newMap.put((Class<? extends CoreAnnotation>) o, (Set<TreeGraphNode>) node.label.get((Class<? extends CoreAnnotation>) o));//javac doesn't compile properly if generics are fully specified (but eclipse does...)
    } catch (Exception e) {
      // ignore a non-GrammaticalRelationAnnotation element
    }
  }
  return newMap;
}
 
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:22,代码来源:GrammaticalStructure.java

示例2: SemanticGraph

import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
 * Copies a the current graph, but also sets the mapping from the old to new
 * graph.
 */
public SemanticGraph(SemanticGraph g,
                     Map<IndexedWord, IndexedWord> prevToNewMap) {
  graph = new DirectedMultiGraph<IndexedWord, SemanticGraphEdge>();
  Collection<IndexedWord> oldRoots =
    new ArrayList<IndexedWord>(g.getRoots());
  if (prevToNewMap == null)
    prevToNewMap = Generics.newHashMap();
  Set<IndexedWord> vertexes = g.vertexSet();
  for (IndexedWord vertex : vertexes) {
    IndexedWord newVertex = new IndexedWord(vertex);
    addVertex(newVertex);
    prevToNewMap.put(vertex, newVertex);
  }
  roots = Generics.newHashSet();
  for (IndexedWord oldRoot : oldRoots) {
    roots.add(prevToNewMap.get(oldRoot));
  }
  for (SemanticGraphEdge edge : g.edgeIterable()) {
    IndexedWord newGov = prevToNewMap.get(edge.getGovernor());
    IndexedWord newDep = prevToNewMap.get(edge.getDependent());
    addEdge(newGov, newDep, edge.getRelation(), edge.getWeight(), edge.isExtra());
  }
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:28,代码来源:SemanticGraph.java

示例3: TransducerGraph

import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
public TransducerGraph() {
  arcs = new HashSet<Arc>();
  arcsBySource = Generics.newHashMap();
  arcsByTarget = Generics.newHashMap();
  arcsByInput = Generics.newHashMap();
  arcsBySourceAndInput = Generics.newHashMap();
  arcsByTargetAndInput = Generics.newHashMap();
  endNodes = new HashSet();
  setStartNode(DEFAULT_START_NODE);
}
 
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:11,代码来源:TransducerGraph.java

示例4: TextDateComponent

import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
public TextDateComponent(DateTimeFieldType fieldType, Locale locale, Boolean isShort)
{
  this.fieldType = fieldType;
  this.locale = locale;
  this.isShort = isShort;
  
  MutableDateTime dt = new MutableDateTime(0L, DateTimeZone.UTC);
  MutableDateTime.Property property = dt.property(fieldType);
  minValue = property.getMinimumValueOverall();
  maxValue = property.getMaximumValueOverall();
  this.validValues = new ArrayList<String>(maxValue-minValue+1);
  this.valueMapping = Generics.newHashMap();
  for (int i = minValue; i <= maxValue; i++) {
    property.set(i);
    if (isShort != null) {
      if (isShort) {
        addValue(property.getAsShortText(locale), i);
      } else {
        addValue(property.getAsText(locale), i);
      }
    } else {
      addValue(property.getAsShortText(locale), i);
      addValue(property.getAsText(locale), i);
    }
  }
  // Order by length for regex
  Collections.sort(validValues, STRING_LENGTH_REV_COMPARATOR);
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:29,代码来源:TimeFormatter.java

示例5: replaceMergedUsingTokenOffsets

import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
public static List<? extends CoreMap> replaceMergedUsingTokenOffsets(List<? extends CoreMap> list,
                                                    List<? extends MatchedExpression> matchedExprs)
{
  if (matchedExprs == null) return list;
  Map<Integer, Integer> tokenBeginToListIndexMap = Generics.newHashMap();
  Map<Integer, Integer> tokenEndToListIndexMap = Generics.newHashMap();
  for (int i = 0; i < list.size(); i++) {
    CoreMap cm = list.get(i);
    if (cm.has(CoreAnnotations.TokenBeginAnnotation.class) && cm.has(CoreAnnotations.TokenEndAnnotation.class)) {
      tokenBeginToListIndexMap.put(cm.get(CoreAnnotations.TokenBeginAnnotation.class), i);
      tokenEndToListIndexMap.put(cm.get(CoreAnnotations.TokenEndAnnotation.class), i+1);
    } else {
      tokenBeginToListIndexMap.put(i, i);
      tokenEndToListIndexMap.put(i+1, i+1);
    }
  }
  Collections.sort(matchedExprs, EXPR_TOKEN_OFFSET_COMPARATOR);
  List<CoreMap> merged = new ArrayList<CoreMap>(list.size());   // Approximate size
  int last = 0;
  for (MatchedExpression expr:matchedExprs) {
    int start = expr.tokenOffsets.first();
    int end = expr.tokenOffsets.second();
    Integer istart = tokenBeginToListIndexMap.get(start);
    Integer iend = tokenEndToListIndexMap.get(end);
    if (istart != null && iend != null) {
      if (istart >= last) {
        merged.addAll(list.subList(last,istart));
        CoreMap m = expr.getAnnotation();
        merged.add(m);
        last = iend;
      }
    }
  }
  // Add rest of elements
  if (last < list.size()) {
    merged.addAll(list.subList(last, list.size()));
  }
  return merged;
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:40,代码来源:MatchedExpression.java

示例6: deepCopyFromGraphs

import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
 * Like makeFromGraphs, but it makes a deep copy of the graphs and
 * renumbers the index words.
 * <br>
 * <code>lengths</code> must be a vector containing the number of
 * tokens in each sentence.  This is used to reindex the tokens.
 */
public static SemanticGraph deepCopyFromGraphs(List<SemanticGraph> graphs,
                                               List<Integer> lengths) {
  SemanticGraph newGraph = new SemanticGraph();
  Map<Integer, IndexedWord> newWords = Generics.newHashMap();
  List<IndexedWord> newRoots = new ArrayList<IndexedWord>();
  int vertexOffset = 0;
  for (int i = 0; i < graphs.size(); ++i) {
    SemanticGraph graph = graphs.get(i);
    for (IndexedWord vertex : graph.vertexSet()) {
      IndexedWord newVertex = new IndexedWord(vertex);
      newVertex.setIndex(vertex.index() + vertexOffset);
      newGraph.addVertex(newVertex);
      newWords.put(newVertex.index(), newVertex);
    }
    for (SemanticGraphEdge edge : graph.edgeIterable()) {
      IndexedWord gov = newWords.get(edge.getGovernor().index() +
                                     vertexOffset);
      IndexedWord dep = newWords.get(edge.getDependent().index() +
                                     vertexOffset);
      if (gov == null || dep == null) {
        throw new AssertionError("Counting problem (or broken edge)");
      }
      newGraph.addEdge(gov, dep, edge.getRelation(), edge.getWeight(), edge.isExtra());
    }
    for (IndexedWord root : graph.getRoots()) {
      newRoots.add(newWords.get(root.index() + vertexOffset));
    }
    vertexOffset += lengths.get(i);
  }
  newGraph.setRoots(newRoots);
  return newGraph;
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:40,代码来源:SemanticGraphFactory.java

示例7: colorChannel

import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
 * Color the tag for a particular channel this color
 * @param channel The channel to color
 * @param color The color to use
 */
public void colorChannel(String channel, Color color){
  if(this.channelColors == null){
    this.channelColors = Generics.newHashMap();
  }
  this.channelColors.put(channel.toLowerCase(),color);
}
 
开发者ID:jaimeguzman,项目名称:data_mining,代码行数:12,代码来源:OutputHandler.java

示例8: evaluate

import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
 * returns null if one of the surgeries eliminates the tree entirely.  The
 * operated-on tree is not to be trusted in this instance.
 */
@Override
public Tree evaluate(Tree t, TregexMatcher m) {
  newNodeNames = Generics.newHashMap();
  coindexer.setLastIndex(t);
  for (TsurgeonPattern child : children) {
    t = child.evaluate(t, m);
    if (t == null) {
      return null;
    }
  }
  return t;
}
 
开发者ID:paulirwin,项目名称:Stanford.NER.Net,代码行数:17,代码来源:TsurgeonPatternRoot.java

示例9: purgeRules

import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/** Remove A -&gt; A UnaryRules from bestRulesUnderMax. */
public final void purgeRules() {
  Map<UnaryRule,UnaryRule> bR = Generics.newHashMap();
  for (UnaryRule ur : bestRulesUnderMax.keySet()) {
    if (ur.parent != ur.child) {
      bR.put(ur, ur);
    } else {
      closedRulesWithParent[ur.parent].remove(ur);
      closedRulesWithChild[ur.child].remove(ur);
    }
  }
  bestRulesUnderMax = bR;
  makeCRArrays();
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:15,代码来源:UnaryGrammar.java

示例10: simplifyNoTypeConversion

import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
public CompositeValue simplifyNoTypeConversion(Env env, Object... args) {
  Map<String, Expression> m = value;
  Map<String, Expression> res = Generics.newHashMap (m.size());
  for (String s:m.keySet()) {
    res.put(s, m.get(s).simplify(env));
  }
  return new CompositeValue(res, true);
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:9,代码来源:Expressions.java

示例11: getAllDependents

import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
 * Returns all the dependencies of a certain node.
 *
 * @param node The node to return dependents for
 * @return map of dependencies
 */
private static <GR extends GrammaticalRelationAnnotation> // separating this out helps some compilers
Map<Class<? extends GrammaticalRelationAnnotation>, Set<TreeGraphNode>> getAllDependents(TreeGraphNode node) {
  Map<Class<? extends GrammaticalRelationAnnotation>, Set<TreeGraphNode>> newMap = Generics.newHashMap();

  for (Class<?> o : node.label.keySet()) {
    if (GrammaticalRelationAnnotation.class.isAssignableFrom(o)) {
      // ignore any non-GrammaticalRelationAnnotation element
      Class<GR> typedKey = ErasureUtils.uncheckedCast(o);
      newMap.put(typedKey, node.label.get(typedKey));
    }
  }
  return newMap;
}
 
开发者ID:paulirwin,项目名称:Stanford.NER.Net,代码行数:20,代码来源:GrammaticalStructure.java

示例12: main

import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
  * The main method reads (segmented, whitespace delimited) words from a file
  * and prints them with their English translation(s).
  *
  * The path and filename of the CEDict Lexicon can be supplied via the
  * "-dictPath" flag; otherwise the default filename "cedict_ts.u8" in the
  * current directory is checked.
  *
  * By default, only the first translation is printed.  If the "-all" flag
  * is given, all translations are printed.
  *
  * The input and output encoding can be specified using the "-encoding" flag.
  * Otherwise UTF-8 is assumed.
  */
 public static void main(String[] args) throws IOException {
   Map<String, Integer> flagsToNumArgs = Generics.newHashMap();
   flagsToNumArgs.put("-dictPath" , 1);
   flagsToNumArgs.put("-encoding" , 1);
   Map<String, String[]> argMap = StringUtils.argsToMap(args, flagsToNumArgs);
   String[] otherArgs = argMap.get(null);
   if (otherArgs.length < 1) {
     System.err.println("usage: ChineseEnglishWordMap [-all] [-dictPath path] [-encoding enc_string] inputFile");
     System.exit(1);
   }
   String filename = otherArgs[0];
   boolean allTranslations = argMap.containsKey("-all");
   String charset = defaultCharset;
   if (argMap.containsKey("-encoding")) {
     charset = argMap.get("-encoding")[0];
   }
   BufferedReader r = new BufferedReader(new InputStreamReader(new FileInputStream(filename), charset));

   TreebankLanguagePack tlp = new ChineseTreebankLanguagePack();
   String[] dpString = argMap.get("-dictPath");
   ChineseEnglishWordMap cewm = (dpString == null) ? new ChineseEnglishWordMap() : new ChineseEnglishWordMap(dpString[0]);
   int totalWords = 0, coveredWords = 0;

   PrintWriter pw = new PrintWriter(new OutputStreamWriter(System.out, charset), true);

   for (String line = r.readLine(); line != null; line = r.readLine()) {
     String[] words = line.split("\\s", 1000);
     for (String word : words) {
       totalWords++;
       if (word.length() == 0) continue;
       pw.print(StringUtils.pad(word + ':', 8));
       if (tlp.isPunctuationWord(word)) {
         totalWords--;
         pw.print(word);
} else if (isDigits(word)) {
  pw.print(word + " [NUMBER]");
       } else if (cewm.containsKey(word)) {
         coveredWords++;
         if (allTranslations) {
           List<String> trans = new ArrayList<String>(cewm.getAllTranslations(word));
           for (String s : trans) {
             pw.print((trans.indexOf(s) > 0 ? "|" : "") + s);
           }
         } else {
           pw.print(cewm.getFirstTranslation(word));
         }
       } else {
         pw.print("[UNK]");
       }
pw.println();
     }
     pw.println();
   }
   r.close();
   System.err.print("Finished translating " + totalWords + " words (");
   System.err.println(coveredWords + " were in dictionary).");
 }
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:72,代码来源:ChineseEnglishWordMap.java

示例13: AnnotatorPool

import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
/**
 * Create an empty AnnotatorPool.
 */
public AnnotatorPool() {
  this.annotators = Generics.newHashMap();
  this.factories = Generics.newHashMap();
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:8,代码来源:AnnotatorPool.java

示例14: GrammaticalRelation

import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
private GrammaticalRelation(Language language,
                           String shortName,
                           String longName,
                           Class<? extends GrammaticalRelationAnnotation> annotation,
                           GrammaticalRelation parent,
                           String sourcePattern,
                           TregexPatternCompiler tregexCompiler,
                           String[] targetPatterns,
                           String specificString) {
  this.language = language;
  this.shortName = shortName;
  this.longName = longName;
  this.parent = parent;
  this.specific = specificString; // this can be null!

  if (parent != null) {
    parent.addChild(this);
  }

  if (annotation != null) {
    if (GrammaticalRelation.annotationsToRelations.put(annotation, this) != null) {
      throw new IllegalArgumentException("Annotation cannot be associated with more than one relation!");
    }
    if (GrammaticalRelation.relationsToAnnotations.put(this, annotation) != null) {
      throw new IllegalArgumentException("There should only ever be one instance of each relation!");
    }
  }

  if (sourcePattern != null) {
    try {
      this.sourcePattern = Pattern.compile(sourcePattern);
    } catch (java.util.regex.PatternSyntaxException e) {
      throw new RuntimeException("Bad pattern: " + sourcePattern);
    }
  } else {
    this.sourcePattern = null;
  }

  for (String pattern : targetPatterns) {
    try {
      TregexPattern p = tregexCompiler.compile(pattern);
      this.targetPatterns.add(p);
    } catch (edu.stanford.nlp.trees.tregex.TregexParseException pe) {
      throw new RuntimeException("Bad pattern: " + pattern, pe);
    }
  }

  Map<String, GrammaticalRelation> sToR = stringsToRelations.get(language);
  if (sToR == null) {
    sToR = Generics.newHashMap();
    stringsToRelations.put(language, sToR);
  }
  GrammaticalRelation previous = sToR.put(toString(), this);
  if (previous != null) {
    if (!previous.isFromString() && !isFromString()) {
      throw new IllegalArgumentException("There is already a relation named " + toString() + '!');
    } else {
      /* We get here if we previously just built a fake relation from a string
       * we previously read in from a file.
       */
      // TODO is it worth copying all of the information from this real
      //      relation into the old fake one?
    }
  }
}
 
开发者ID:jaimeguzman,项目名称:data_mining,代码行数:66,代码来源:GrammaticalRelation.java

示例15: VariableStrings

import edu.stanford.nlp.util.Generics; //导入方法依赖的package包/类
public VariableStrings() {
  varsToStrings = Generics.newHashMap();
  numVarsSet = new IntCounter<String>();
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:5,代码来源:VariableStrings.java


注:本文中的edu.stanford.nlp.util.Generics.newHashMap方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。