当前位置: 首页>>代码示例>>Java>>正文


Java ClassicCounter.incrementCount方法代码示例

本文整理汇总了Java中edu.stanford.nlp.stats.ClassicCounter.incrementCount方法的典型用法代码示例。如果您正苦于以下问题:Java ClassicCounter.incrementCount方法的具体用法?Java ClassicCounter.incrementCount怎么用?Java ClassicCounter.incrementCount使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在edu.stanford.nlp.stats.ClassicCounter的用法示例。


在下文中一共展示了ClassicCounter.incrementCount方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getSegmentedWordLengthDistribution

import edu.stanford.nlp.stats.ClassicCounter; //导入方法依赖的package包/类
private Distribution<Integer> getSegmentedWordLengthDistribution(Treebank tb) {
  // CharacterLevelTagExtender ext = new CharacterLevelTagExtender();
  ClassicCounter<Integer> c = new ClassicCounter<Integer>();
  for (Iterator iterator = tb.iterator(); iterator.hasNext();) {
    Tree gold = (Tree) iterator.next();
    StringBuilder goldChars = new StringBuilder();
    Sentence goldYield = gold.yield();
    for (Iterator wordIter = goldYield.iterator(); wordIter.hasNext();) {
      Word word = (Word) wordIter.next();
      goldChars.append(word);
    }
    Sentence ourWords = segmentWords(goldChars.toString());
    for (int i = 0; i < ourWords.size(); i++) {
      c.incrementCount(Integer.valueOf(ourWords.get(i).toString().length()));
    }
  }
  return Distribution.getDistribution(c);
}
 
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:19,代码来源:ChineseMarkovWordSegmenter.java

示例2: getSegmentedWordLengthDistribution

import edu.stanford.nlp.stats.ClassicCounter; //导入方法依赖的package包/类
private Distribution<Integer> getSegmentedWordLengthDistribution(Treebank tb) {
  // CharacterLevelTagExtender ext = new CharacterLevelTagExtender();
  ClassicCounter<Integer> c = new ClassicCounter<Integer>();
  for (Iterator iterator = tb.iterator(); iterator.hasNext();) {
    Tree gold = (Tree) iterator.next();
    StringBuilder goldChars = new StringBuilder();
    ArrayList goldYield = gold.yield();
    for (Iterator wordIter = goldYield.iterator(); wordIter.hasNext();) {
      Word word = (Word) wordIter.next();
      goldChars.append(word);
    }
    List<HasWord> ourWords = segment(goldChars.toString());
    for (int i = 0; i < ourWords.size(); i++) {
      c.incrementCount(Integer.valueOf(ourWords.get(i).word().length()));
    }
  }
  return Distribution.getDistribution(c);
}
 
开发者ID:chbrown,项目名称:stanford-parser,代码行数:19,代码来源:ChineseMarkovWordSegmenter.java

示例3: cloneCounter

import edu.stanford.nlp.stats.ClassicCounter; //导入方法依赖的package包/类
/**
 * Make a copy of the array of counters.
 */
public ClassicCounter<Integer>[] cloneCounter(ClassicCounter<Integer>[] counter) {
  ClassicCounter<Integer>[] newcount = ErasureUtils.<ClassicCounter<Integer>>mkTArray(ClassicCounter.class, counter.length);
  for( int xx = 0; xx < counter.length; xx++ ) {
    ClassicCounter<Integer> cc = new ClassicCounter<Integer>();
    newcount[xx] = cc;
    for( Integer key : counter[xx].keySet() )
      cc.incrementCount(key, counter[xx].getCount(key));
  }
  return newcount;
}
 
开发者ID:nchambers,项目名称:probschemas,代码行数:14,代码来源:EntityModelInstance.java

示例4: train

import edu.stanford.nlp.stats.ClassicCounter; //导入方法依赖的package包/类
public void train(Collection<Tree> trees) {
  Numberer tagNumberer = Numberer.getGlobalNumberer("tags");
  lex.train(trees);
  ClassicCounter<String> initial = new ClassicCounter<String>();
  GeneralizedCounter ruleCounter = new GeneralizedCounter(2);
  for (Tree tree : trees) {
    List<Label> tags = tree.preTerminalYield();
    String last = null;
    for (Label tagLabel : tags) {
      String tag = tagLabel.value();
      tagNumberer.number(tag);
      if (last == null) {
        initial.incrementCount(tag);
      } else {
        ruleCounter.incrementCount2D(last, tag);
      }
      last = tag;
    }
  }
  int numTags = tagNumberer.total();
  POSes = new HashSet<String>(ErasureUtils.<Collection<String>>uncheckedCast(tagNumberer.objects()));
  initialPOSDist = Distribution.laplaceSmoothedDistribution(initial, numTags, 0.5);
  markovPOSDists = new HashMap<String, Distribution>();
  Set entries = ruleCounter.lowestLevelCounterEntrySet();
  for (Iterator iter = entries.iterator(); iter.hasNext();) {
    Map.Entry entry = (Map.Entry) iter.next();
    //      Map.Entry<List<String>, Counter> entry = (Map.Entry<List<String>, Counter>) iter.next();
    Distribution d = Distribution.laplaceSmoothedDistribution((ClassicCounter) entry.getValue(), numTags, 0.5);
    markovPOSDists.put(((List<String>) entry.getKey()).get(0), d);
  }
}
 
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:32,代码来源:ChineseMarkovWordSegmenter.java

示例5: computeInputPrior

import edu.stanford.nlp.stats.ClassicCounter; //导入方法依赖的package包/类
protected Distribution<String> computeInputPrior(Map<String, List<List<String>>> allTrainPaths) {
  ClassicCounter<String> result = new ClassicCounter<String>();
  for (Iterator<List<List<String>>> catI = allTrainPaths.values().iterator(); catI.hasNext();) {
    List<List<String>> pathList = catI.next();
    for (List<String> path : pathList) {
      for (String input : path) {
        result.incrementCount(input);
      }
    }
  }
  return Distribution.laplaceSmoothedDistribution(result, result.size() * 2, 0.5);
}
 
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:13,代码来源:GrammarCompactor.java

示例6: createGraphFromPaths

import edu.stanford.nlp.stats.ClassicCounter; //导入方法依赖的package包/类
/**
 * If markovOrder is zero, we always transition back to the start state
 * If markovOrder is negative, we assume that it is infinite
 */
public static TransducerGraph createGraphFromPaths(List paths, int markovOrder) {
  ClassicCounter pathCounter = new ClassicCounter();
  for (Object o : paths) {
    pathCounter.incrementCount(o);
  }
  return createGraphFromPaths(pathCounter, markovOrder);
}
 
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:12,代码来源:TransducerGraph.java

示例7: svmLightLineToRVFDatum

import edu.stanford.nlp.stats.ClassicCounter; //导入方法依赖的package包/类
public static RVFDatum<String, String> svmLightLineToRVFDatum(String l) {
  l = l.replaceFirst("#.*$", ""); // remove any trailing comments
  String[] line = l.split("\\s+");
  ClassicCounter<String> features = new ClassicCounter<String>();
  for (int i = 1; i < line.length; i++) {
    String[] f = line[i].split(":");
    if (f.length != 2) {
      throw new IllegalArgumentException("Bad data format: " + l);
    }
    double val = Double.parseDouble(f[1]);
    features.incrementCount(f[0], val);
  }
  return new RVFDatum<String, String>(features, line[0]);
}
 
开发者ID:paulirwin,项目名称:Stanford.NER.Net,代码行数:15,代码来源:RVFDataset.java

示例8: main

import edu.stanford.nlp.stats.ClassicCounter; //导入方法依赖的package包/类
public static void main(String[] args) {
  RVFDataset<String, String> data = new RVFDataset<String, String>();
  ClassicCounter<String> c1 = new ClassicCounter<String>();
  c1.incrementCount("fever", 3.5);
  c1.incrementCount("cough", 1.1);
  c1.incrementCount("congestion", 4.2);

  ClassicCounter<String> c2 = new ClassicCounter<String>();
  c2.incrementCount("fever", 1.5);
  c2.incrementCount("cough", 2.1);
  c2.incrementCount("nausea", 3.2);

  ClassicCounter<String> c3 = new ClassicCounter<String>();
  c3.incrementCount("cough", 2.5);
  c3.incrementCount("congestion", 3.2);

  data.add(new RVFDatum<String, String>(c1, "cold"));
  data.add(new RVFDatum<String, String>(c2, "flu"));
  data.add(new RVFDatum<String, String>(c3, "cold"));
  data.summaryStatistics();

  LinearClassifierFactory<String, String> factory = new LinearClassifierFactory<String, String>();
  factory.useQuasiNewton();

  LinearClassifier<String, String> c = factory.trainClassifier(data);

  ClassicCounter<String> c4 = new ClassicCounter<String>();
  c4.incrementCount("cough", 2.3);
  c4.incrementCount("fever", 1.3);

  RVFDatum<String, String> datum = new RVFDatum<String, String>(c4);

  c.justificationOf((Datum<String, String>) datum);
}
 
开发者ID:paulirwin,项目名称:Stanford.NER.Net,代码行数:35,代码来源:RVFDataset.java

示例9: computeInputPrior

import edu.stanford.nlp.stats.ClassicCounter; //导入方法依赖的package包/类
protected static Distribution<String> computeInputPrior(Map<String, List<List<String>>> allTrainPaths) {
  ClassicCounter<String> result = new ClassicCounter<String>();
  for (List<List<String>> pathList : allTrainPaths.values()) {
    for (List<String> path : pathList) {
      for (String input : path) {
        result.incrementCount(input);
      }
    }
  }
  return Distribution.laplaceSmoothedDistribution(result, result.size() * 2, 0.5);
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:12,代码来源:GrammarCompactor.java

示例10: getRVFDatum

import edu.stanford.nlp.stats.ClassicCounter; //导入方法依赖的package包/类
/**
 * @return the index-ed datum
 *
 *         Note, this returns a new RVFDatum object, not the original RVFDatum
 *         that was added to the dataset.
 */
@Override
public RVFDatum<L, F> getRVFDatum(int index) {
  ClassicCounter<F> c = new ClassicCounter<F>();
  for (int i = 0; i < data[index].length; i++) {
    c.incrementCount(featureIndex.get(data[index][i]), values[index][i]);
  }
  return new RVFDatum<L, F>(c, labelIndex.get(labels[index]));
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:15,代码来源:RVFDataset.java

示例11: getRVFDatum

import edu.stanford.nlp.stats.ClassicCounter; //导入方法依赖的package包/类
/**
 * @return the index-ed datum
 */
@Override
public RVFDatum<L, F> getRVFDatum(int index) {
  ClassicCounter<F> c = new ClassicCounter<F>();
  for (F key : featureIndex.objects(data[index])) {
    c.incrementCount(key);
  }
  return new RVFDatum<L, F>(c, labelIndex.get(labels[index]));
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:12,代码来源:Dataset.java

示例12: main

import edu.stanford.nlp.stats.ClassicCounter; //导入方法依赖的package包/类
public static void main(String[] args) {
  System.out.println("Testing unknown matching");
  String s = "\u5218\u00b7\u9769\u547d";
  if (s.matches(properNameMatch)) {
    System.out.println("hooray names!");
  } else {
    System.out.println("Uh-oh names!");
  }
  String s1 = "\uff13\uff10\uff10\uff10";
  if (s1.matches(numberMatch)) {
    System.out.println("hooray numbers!");
  } else {
    System.out.println("Uh-oh numbers!");
  }
  String s11 = "\u767e\u5206\u4e4b\u56db\u5341\u4e09\u70b9\u4e8c";
  if (s11.matches(numberMatch)) {
    System.out.println("hooray numbers!");
  } else {
    System.out.println("Uh-oh numbers!");
  }
  String s12 = "\u767e\u5206\u4e4b\u4e09\u5341\u516b\u70b9\u516d";
  if (s12.matches(numberMatch)) {
    System.out.println("hooray numbers!");
  } else {
    System.out.println("Uh-oh numbers!");
  }
  String s2 = "\u4e09\u6708";
  if (s2.matches(dateMatch)) {
    System.out.println("hooray dates!");
  } else {
    System.out.println("Uh-oh dates!");
  }

  System.out.println("Testing tagged word");
  ClassicCounter<TaggedWord> c = new ClassicCounter<TaggedWord>();
  TaggedWord tw1 = new TaggedWord("w", "t");
  c.incrementCount(tw1);
  TaggedWord tw2 = new TaggedWord("w", "t2");
  System.out.println(c.containsKey(tw2));
  System.out.println(tw1.equals(tw2));

  WordTag wt1 = toWordTag(tw1);
  WordTag wt2 = toWordTag(tw2);
  WordTag wt3 = new WordTag("w", "t2");
  System.out.println(wt1.equals(wt2));
  System.out.println(wt2.equals(wt3));
}
 
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:48,代码来源:ChineseUnknownWordModel.java

示例13: processTreeHelper

import edu.stanford.nlp.stats.ClassicCounter; //导入方法依赖的package包/类
public void processTreeHelper(String gP, String p, Tree t) {
  if (!t.isLeaf() && (doTags || !t.isPreTerminal())) { // stop at words/tags
    Map<String,ClassicCounter<List<String>>> nr;
    Map<List<String>,ClassicCounter<List<String>>> pr;
    Map<List<String>,ClassicCounter<List<String>>> gpr;
    if (t.isPreTerminal()) {
      nr = tagNodeRules;
      pr = tagPRules;
      gpr = tagGPRules;
    } else {
      nr = nodeRules;
      pr = pRules;
      gpr = gPRules;
    }
    String n = t.label().value();
    if (tlp != null) {
      p = tlp.basicCategory(p);
      gP = tlp.basicCategory(gP);
    }
    List<String> kidn = kidLabels(t);
    ClassicCounter<List<String>> cntr = nr.get(n);
    if (cntr == null) {
      cntr = new ClassicCounter<List<String>>();
      nr.put(n, cntr);
    }
    cntr.incrementCount(kidn);
    List<String> pairStr = new ArrayList<String>(2);
    pairStr.add(n);
    pairStr.add(p);
    cntr = pr.get(pairStr);
    if (cntr == null) {
      cntr = new ClassicCounter<List<String>>();
      pr.put(pairStr, cntr);
    }
    cntr.incrementCount(kidn);
    List<String> tripleStr = new ArrayList<String>(3);
    tripleStr.add(n);
    tripleStr.add(p);
    tripleStr.add(gP);
    cntr = gpr.get(tripleStr);
    if (cntr == null) {
      cntr = new ClassicCounter<List<String>>();
      gpr.put(tripleStr, cntr);
    }
    cntr.incrementCount(kidn);
    Tree[] kids = t.children();
    for (Tree kid : kids) {
      processTreeHelper(p, n, kid);
    }
  }
}
 
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:52,代码来源:ParentAnnotationStats.java


注:本文中的edu.stanford.nlp.stats.ClassicCounter.incrementCount方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。