本文整理匯總了Java中edu.stanford.nlp.trees.Tree.indexLeaves方法的典型用法代碼示例。如果您正苦於以下問題:Java Tree.indexLeaves方法的具體用法?Java Tree.indexLeaves怎麽用?Java Tree.indexLeaves使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類edu.stanford.nlp.trees.Tree
的用法示例。
在下文中一共展示了Tree.indexLeaves方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: complexityOf
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* Syntactic complexity as defined by Lin (1996).
*
* @param tree
* @return
*/
private static int complexityOf(Tree tree) {
tree.indexLeaves();
tree.percolateHeads(new CollinsHeadFinder());
tree.percolateHeadIndices();
Set<Dependency<Label,Label,Object>> deps = tree.dependencies();
int complexity = 0;
for (Dependency<Label,Label,Object> dep : deps) {
if (!(dep instanceof UnnamedConcreteDependency)) {
throw new RuntimeException("Cannot measure syntactic complexity.");
}
UnnamedConcreteDependency uDep = (UnnamedConcreteDependency) dep;
int headIndex = uDep.getGovernorIndex();
int depIndex = uDep.getDependentIndex();
complexity += Math.abs(headIndex - depIndex);
}
return complexity;
}
示例2: main
import edu.stanford.nlp.trees.Tree; //導入方法依賴的package包/類
/**
* Process an English text file.
*
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
if (args.length < 1) {
System.err.printf("Usage: java %s file [inputproperties_str] > json_output%n", CoreNLPToJSON.class.getName());
System.exit(-1);
}
String textFile = args[0];
InputProperties inputProperties = args.length > 1 ? InputProperties.fromString(args[1]) : new InputProperties();
StanfordCoreNLP coreNLP = new StanfordCoreNLP(properties);
// Configure tokenizer
EnglishPreprocessor preprocessor = new EnglishPreprocessor(true);
// Use a map with ordered keys so that the output is ordered by segmentId.
Map<Integer,SourceSegment> annotations = new TreeMap<Integer,SourceSegment>();
LineNumberReader reader = IOTools.getReaderFromFile(textFile);
for (String line; (line = reader.readLine()) != null;) {
Annotation annotation = coreNLP.process(line);
List<CoreMap> sentences = annotation.get(SentencesAnnotation.class);
if (sentences.size() != 1) {
throw new RuntimeException("Sentence splitting on line: " + String.valueOf(reader.getLineNumber()));
}
CoreMap sentence = sentences.get(0);
Tree tree = sentence.get(TreeAnnotation.class);
tree.indexLeaves();
int[] chunkVector = getChunkVector(tree);
List<CoreLabel> tokens = sentence.get(TokensAnnotation.class);
int numTokens = tokens.size();
SymmetricalWordAlignment alignment = preprocessor.processAndAlign(line);
if (alignment.e().size() != numTokens) {
throw new RuntimeException(String.format("Tokenizer configurations differ: %d/%d", alignment.e().size(), numTokens));
}
SourceSegment segment = new SourceSegment(numTokens);
segment.layoutSpec.addAll(makeLayoutSpec(alignment));
segment.inputProperties = inputProperties.toString();
for (int j = 0; j < numTokens; ++j) {
CoreLabel token = tokens.get(j);
String word = token.get(TextAnnotation.class);
segment.tokens.add(unescape(word));
String pos = mapPOS(token.get(PartOfSpeechAnnotation.class));
segment.pos.add(pos);
String ne = token.get(NamedEntityTagAnnotation.class);
segment.ner.add(ne);
segment.chunkVector[j] = chunkVector[j];
}
annotations.put(reader.getLineNumber()-1, segment);
}
reader.close();
System.err.printf("Processed %d sentences%n", reader.getLineNumber());
final SourceDocument jsonDocument = new SourceDocument(textFile, annotations);
// Convert to json
Gson gson = new Gson();
String json = gson.toJson(jsonDocument);
System.out.println(json);
}