本文整理汇总了Java中edu.stanford.nlp.trees.LabeledScoredTreeFactory类的典型用法代码示例。如果您正苦于以下问题:Java LabeledScoredTreeFactory类的具体用法?Java LabeledScoredTreeFactory怎么用?Java LabeledScoredTreeFactory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
LabeledScoredTreeFactory类属于edu.stanford.nlp.trees包,在下文中一共展示了LabeledScoredTreeFactory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ParsedToDep
import edu.stanford.nlp.trees.LabeledScoredTreeFactory; //导入依赖的package包/类
ParsedToDep(String[] args) {
HandleParameters params = new HandleParameters(args);
if( params.hasFlag("-output") )
_outputDir = params.get("-output");
if( params.hasFlag("-type") )
_inputType = params.get("-type").toLowerCase();
_dataPath = args[args.length - 1];
System.out.println("outputdir= " + _outputDir);
System.out.println("inputtype= " + _inputType);
_tlp = new PennTreebankLanguagePack();
_gsf = _tlp.grammaticalStructureFactory();
_tf = new LabeledScoredTreeFactory();
}
示例2: main
import edu.stanford.nlp.trees.LabeledScoredTreeFactory; //导入依赖的package包/类
public static void main(String[] args) throws IOException {
TreeJPanel tjp = new TreeJPanel();
// String ptbTreeString1 = "(ROOT (S (NP (DT This)) (VP (VBZ is) (NP (DT a) (NN test))) (. .)))";
String ptbTreeString = "(ROOT (S (NP (NNP Interactive_Tregex)) (VP (VBZ works)) (PP (IN for) (PRP me)) (. !))))";
if (args.length > 0) {
ptbTreeString = args[0];
}
Tree tree = (new PennTreeReader(new StringReader(ptbTreeString), new LabeledScoredTreeFactory(new StringLabelFactory()))).readTree();
tjp.setTree(tree);
tjp.setBackground(Color.white);
JFrame frame = new JFrame();
frame.getContentPane().add(tjp, BorderLayout.CENTER);
frame.addWindowListener(new WindowAdapter() {
@Override
public void windowClosing(WindowEvent e) {
System.exit(0);
}
});
frame.pack();
frame.setVisible(true);
frame.setVisible(true);
}
示例3: ExhaustivePCFGParser
import edu.stanford.nlp.trees.LabeledScoredTreeFactory; //导入依赖的package包/类
public ExhaustivePCFGParser(BinaryGrammar bg, UnaryGrammar ug, Lexicon lex, Options op) {
// System.out.println("ExhaustivePCFGParser constructor called.");
this.op = op;
this.tlp = op.langpack();
goalStr = tlp.startSymbol();
this.stateSpace = bg.stateSpace();
stateNumberer = Numberer.getGlobalNumberer(stateSpace);
this.bg = bg;
this.ug = ug;
this.lex = lex;
tf = new LabeledScoredTreeFactory(new StringLabelFactory());
numStates = stateNumberer.total();
isTag = new boolean[numStates];
for (int state = 0; state < numStates; state++) {
isTag[state] = tagNumberer.hasSeen(stateNumberer.object(state));
}
}
示例4: ExhaustivePCFGParser
import edu.stanford.nlp.trees.LabeledScoredTreeFactory; //导入依赖的package包/类
public ExhaustivePCFGParser(BinaryGrammar bg, UnaryGrammar ug, Lexicon lex, Options op, Index<String> stateIndex, Index<String> wordIndex, Index<String> tagIndex) {
// System.out.println("ExhaustivePCFGParser constructor called.");
this.bg = bg;
this.ug = ug;
this.lex = lex;
this.op = op;
this.tlp = op.langpack();
goalStr = tlp.startSymbol();
this.stateIndex = stateIndex;
this.wordIndex = wordIndex;
this.tagIndex = tagIndex;
tf = new LabeledScoredTreeFactory();
numStates = stateIndex.size();
isTag = new boolean[numStates];
// tag index is smaller, so we fill by iterating over the tag index
// rather than over the state index
for (String tag : tagIndex.objectsList()) {
int state = stateIndex.indexOf(tag);
if (state < 0) {
continue;
}
isTag[state] = true;
}
}
示例5: init
import edu.stanford.nlp.trees.LabeledScoredTreeFactory; //导入依赖的package包/类
private void init() {
// Setup the IDF maps.
_idf = new IDFMap(75000);
_idfLemmas = new IDFMap(75000);
_treeFactory = new LabeledScoredTreeFactory();
}
示例6: DataSimplifier
import edu.stanford.nlp.trees.LabeledScoredTreeFactory; //导入依赖的package包/类
public DataSimplifier() {
_tf = new LabeledScoredTreeFactory();
System.out.println("Loading Wordnet from: " + WordNet.findWordnetPath());
_wordnet = new WordNet(WordNet.findWordnetPath());
System.out.println("Loading IDF from: " + IDFMap.findIDFPath());
if( IDFMap.findIDFPath() == null )
System.out.println("ERROR: no path to the IDF file found in IDFMap.java findIDFPath()");
generalIDF = new IDFMap(IDFMap.findIDFPath());
}
示例7: stringsToTrees
import edu.stanford.nlp.trees.LabeledScoredTreeFactory; //导入依赖的package包/类
/**
* Build Tree objects from a collection of parses in string form.
*/
public static List<Tree> stringsToTrees(Collection<String> strings) {
if( strings != null ) {
List<Tree> trees = new ArrayList<Tree>();
TreeFactory tf = new LabeledScoredTreeFactory();
for( String str : strings )
trees.add(stringToTree(str, tf));
return trees;
}
else return null;
}
示例8: main
import edu.stanford.nlp.trees.LabeledScoredTreeFactory; //导入依赖的package包/类
/**
* @param args
*/
public static void main(String[] args) {
TreeFactory tf = new LabeledScoredTreeFactory();
List<Tree> trees = new ArrayList<Tree>();
trees.add(TreeOperator.stringToTree("(TOP (S (NP (person (NNP Dave))) (VP (VBD left) (NP (NP (DT the) (NN job) (JJ first) (NN thing)) (PP (IN in) (NP (DT the) (NN morning))))) (. .)) )", tf));
trees.add(TreeOperator.stringToTree("(TOP (S (NP (PRP He)) (VP (VP (VBD drank) (NP (NP (NNS lots)) (PP (IN of) (NP (NN coffee))))) (CC and) (VP (VBD picked) (NP (PRP her)) (PRT (RP up)))) (. .)) )", tf));
CorefStanford coref = new CorefStanford();
coref.processParses(trees);
}
示例9: analyzeParsesNoCoref
import edu.stanford.nlp.trees.LabeledScoredTreeFactory; //导入依赖的package包/类
/**
* @desc Process parses for verbs
* @param parses The list of parses in String form
*/
private void analyzeParsesNoCoref( Vector<String> parses ) {
TreeFactory tf = new LabeledScoredTreeFactory();
Vector<String> verbs = new Vector();
// Save the verbs in each parse
int sentence = 0;
for( String parse : parses ) {
try {
// Read the parse
PennTreeReader ptr = new PennTreeReader(new BufferedReader(new StringReader(parse)), tf);
Tree ansTree = ptr.readTree();
// Look for the verbs in the tree
if( ansTree != null ) {
Vector<Tree> parseVerbs = TreeOperator.verbTreesFromTree(ansTree);
for( Tree verb : parseVerbs )
// verbs.add(verb.firstChild().firstChild().value());
verbs.add(verb.firstChild().value());
}
} catch( Exception ex ) { ex.printStackTrace(); }
sentence++;
}
// do the pair counts
countPairs(verbs);
}
示例10: analyzeParses
import edu.stanford.nlp.trees.LabeledScoredTreeFactory; //导入依赖的package包/类
private void analyzeParses( GigaDoc doc, Collection<String> parses, Collection<EntityMention> entities ) {
if( entities != null ) {
TreeFactory tf = new LabeledScoredTreeFactory();
// Read in all the parse trees
Tree trees[] = new Tree[parses.size()];
int i = 0;
for( String parse : parses ) {
try {
PennTreeReader ptr = new PennTreeReader(new BufferedReader(new StringReader(parse)), tf);
trees[i] = ptr.readTree();
} catch( Exception ex ) { ex.printStackTrace(); }
i++;
}
// Convert all entity spans from character spans to word-based
for( EntityMention mention : entities ) {
mention.convertCharSpanToIndex(TreeOperator.toRaw(trees[mention.sentenceID()-1]));
doc.addEntity(mention);
// mentions[mention.sentenceID()-1].add(mention);
}
// Save the verbs in each parse
int sid = 0, eid = 0;
for( Tree tree : trees ) {
if( tree != null ) {
// Look for the verbs in the tree
Vector<Tree> parseVerbs = TreeOperator.verbTreesFromTree(tree);
for( Tree verb : parseVerbs ) {
// System.out.println(" verb: " + verb + " index: " + (TreeOperator.wordIndex(tree,verb)+1));
doc.addEvent(new WordEvent(eid, verb.firstChild().value(), TreeOperator.wordIndex(tree,verb)+1, sid+1));
eid++;
}
sid++;
}
}
}
}
示例11: GigaExtractor
import edu.stanford.nlp.trees.LabeledScoredTreeFactory; //导入依赖的package包/类
public GigaExtractor() {
_tf = new LabeledScoredTreeFactory();
System.out.println("Loading Wordnet from: " + WordNet.findWordnetPath());
_wordnet = new WordNet(WordNet.findWordnetPath());
System.out.println("Loading IDF from: " + IDFMap.findIDFPath());
generalIDF = new IDFMap(IDFMap.findIDFPath());
System.out.println("Loading duplicate story names from: duplicates");
duplicates = GigawordDuplicates.fromFile("duplicates");
}
示例12: getStanfordContituencyTree
import edu.stanford.nlp.trees.LabeledScoredTreeFactory; //导入依赖的package包/类
public Tree getStanfordContituencyTree() {
TreeFactory tf = new LabeledScoredTreeFactory();
StringReader r = new StringReader(getParseText());
TreeReader tr = new PennTreeReader(r, tf);
try {
return tr.readTree();
} catch (IOException e) {
throw new RuntimeException("Error: IOException should not be thrown by StringReader");
}
}
示例13: load
import edu.stanford.nlp.trees.LabeledScoredTreeFactory; //导入依赖的package包/类
public void load(String penn) throws IOException
{
Reader in = new StringReader(penn);
PennTreeReader tr = new PennTreeReader(in, new LabeledScoredTreeFactory(),
new NPTmpRetainingTreeNormalizer());
bufferTree = tr.readTree();
}
示例14: depScoreTree
import edu.stanford.nlp.trees.LabeledScoredTreeFactory; //导入依赖的package包/类
/** Use the DependencyGrammar to score the tree.
*
* @param tr A binarized tree (as returned by the PCFG parser
* @return The score for the tree according to the grammar
*/
private double depScoreTree(Tree tr) {
// System.err.println("Here's our tree:");
// tr.pennPrint();
// System.err.println(Trees.toDebugStructureString(tr));
Tree cwtTree = tr.deeperCopy(new LabeledScoredTreeFactory(), new CategoryWordTagFactory());
cwtTree.percolateHeads(binHeadFinder);
// System.err.println("Here's what it went to:");
// cwtTree.pennPrint();
List<IntDependency> deps = MLEDependencyGrammar.treeToDependencyList(cwtTree);
// System.err.println("Here's the deps:\n" + deps);
return dg.scoreAll(deps);
}
示例15: ExhaustiveDependencyParser
import edu.stanford.nlp.trees.LabeledScoredTreeFactory; //导入依赖的package包/类
public ExhaustiveDependencyParser(DependencyGrammar dg, Lexicon lex, Options op) {
this.dg = dg;
this.lex = lex;
this.op = op;
this.tlp = op.langpack();
tf = new LabeledScoredTreeFactory(new CategoryWordTagFactory());
}