本文整理汇总了Java中edu.berkeley.nlp.PCFGLA.StateSetTreeList类的典型用法代码示例。如果您正苦于以下问题:Java StateSetTreeList类的具体用法?Java StateSetTreeList怎么用?Java StateSetTreeList使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
StateSetTreeList类属于edu.berkeley.nlp.PCFGLA包,在下文中一共展示了StateSetTreeList类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: labelTrees
import edu.berkeley.nlp.PCFGLA.StateSetTreeList; //导入依赖的package包/类
private void labelTrees(Grammar grammar, List<Tree<String>> trainTrees,
List<List<String>> goldPOStags) {
List<Tree<String>> trainTreesNoGF = stripOffGF(trainTrees);
StateSetTreeList stateSetTrees = new StateSetTreeList(trainTreesNoGF,
grammar.numSubStates, false, tagNumberer);
int index = 0;
for (Tree<StateSet> stateSetTree : stateSetTrees) {
List<String> goldPOS = goldPOStags.get(index++);
Tree<String> labeledTree = guessGF(stateSetTree, grammar, goldPOS);
Tree<String> debinarizedTree = Trees.spliceNodes(labeledTree,
new Filter<String>() {
public boolean accept(String s) {
return s.startsWith("@");
}
});
System.out.println(debinarizedTree + "\n");
}
}
示例2: Calculator
import edu.berkeley.nlp.PCFGLA.StateSetTreeList; //导入依赖的package包/类
Calculator(StateSetTreeList myT, String consN, int i, Grammar gr,
Lexicon lex, SpanPredictor sp, int dimension, boolean notProject) {
// this.nGrWeights = nGrWeights;
this.nCounts = dimension;
this.consName = consN;
this.myTrees = myT;
this.doNotProjectConstraints = notProject;
this.myID = i;
gParser = new ArrayParser(gr, lex);
eParser = newEParser(gr, lex, sp);
}
示例3: Merger
import edu.berkeley.nlp.PCFGLA.StateSetTreeList; //导入依赖的package包/类
Merger(StateSetTreeList myT, String consN, int i, Grammar gr,
Lexicon lex, double[][] mergeWeights) {
this.consName = consN;
this.myTrees = myT;
this.myID = i;
this.mergeWeights = mergeWeights;
gParser = new ArrayParser(gr, lex);
eParser = new ConstrainedTwoChartsParser(gr, lex, null);
}
示例4: extractGrammar
import edu.berkeley.nlp.PCFGLA.StateSetTreeList; //导入依赖的package包/类
public Grammar extractGrammar(List<Tree<String>> trainTrees) {
tagNumberer = Numberer.getGlobalNumberer("tags");
substateNumberers = new ArrayList<Numberer>();
short[] numSubStates = countSymbols(trainTrees);
List<Tree<String>> trainTreesNoGF = stripOffGF(trainTrees);
StateSetTreeList stateSetTrees = new StateSetTreeList(trainTreesNoGF,
numSubStates, false, tagNumberer);
Grammar grammar = createGrammar(stateSetTrees, trainTrees, numSubStates);
return grammar;
}
示例5: createGrammar
import edu.berkeley.nlp.PCFGLA.StateSetTreeList; //导入依赖的package包/类
private Grammar createGrammar(StateSetTreeList stateSetTrees,
List<Tree<String>> trainTrees, short[] numSubStates) {
Grammar grammar = new Grammar(numSubStates, false, new NoSmoothing(),
null, -1);
int index = 0;
for (Tree<StateSet> stateSetTree : stateSetTrees) {
Tree<String> tree = trainTrees.get(index++);
setScores(stateSetTree, tree);
grammar.tallyStateSetTree(stateSetTree, grammar);
}
grammar.optimize(0); // M Step
return grammar;
}
示例6: ParsingObjectiveFunction
import edu.berkeley.nlp.PCFGLA.StateSetTreeList; //导入依赖的package包/类
public ParsingObjectiveFunction(Linearizer linearizer,
StateSetTreeList trainTrees, double sigma, int regularization,
String consName, int nProc, String outName,
boolean doNotProjectConstraints, boolean combinedLexicon) {
this.sigma = sigma;
this.myRegularization = regularization;
this.grammar = linearizer.getGrammar();// .copyGrammar();
this.lexicon = linearizer.getLexicon();// .copyLexicon();
this.spanPredictor = linearizer.getSpanPredictor();
this.linearizer = linearizer;
this.outFileName = outName;
this.dimension = linearizer.dimension();
nGrammarWeights = linearizer.getNGrammarWeights();
nLexiconWeights = linearizer.getNLexiconWeights();
nSpanWeights = linearizer.getNSpanWeights();
if (spanPredictor != null)
this.spanGoldCounts = spanPredictor
.countGoldSpanFeatures(trainTrees);
int nTreesPerBlock = trainTrees.size() / nProc;
this.consBaseName = consName;
boolean[][][][][] tmp = edu.berkeley.nlp.PCFGLA.ParserConstrainer
.loadData(consName + "-0.data");
if (tmp != null)
nTreesPerBlock = tmp.length;
// split the trees into chunks
this.nProcesses = nProc;
trainingTrees = new StateSetTreeList[nProcesses];
// allowedStates = new ArrayList[nProcesses];
for (int i = 0; i < nProcesses; i++) {
trainingTrees[i] = new StateSetTreeList();
// allowedStates[i] = new ArrayList<boolean[][][][]>();
}
int block = -1;
int inBlock = 0;
for (int i = 0; i < trainTrees.size(); i++) {
if (i % nTreesPerBlock == 0) {
block++;
// System.out.println(inBlock);
inBlock = 0;
}
trainingTrees[block % nProcesses].add(trainTrees.get(i));
inBlock++;
// if (cons!=null)
// allowedStates[i%nProcesses].add(ArrayUtil.clone(cons[i]));
}
for (int i = 0; i < nProcesses; i++) {
System.out.println("Process " + i + " has "
+ trainingTrees[i].size() + " trees.");
}
trainTrees = null;
pool = Executors.newFixedThreadPool(nProcesses);// CachedThreadPool();
tasks = new Calculator[nProcesses];
for (int i = 0; i < nProcesses; i++) {
tasks[i] = newCalculator(doNotProjectConstraints, i);
}
this.bestObjectiveSoFar = Double.POSITIVE_INFINITY;
}
示例7: ConditionalMerger
import edu.berkeley.nlp.PCFGLA.StateSetTreeList; //导入依赖的package包/类
/**
* @param processes
* @param consBaseName
* @param trainingTrees
*/
public ConditionalMerger(int processes, String consBaseName,
StateSetTreeList trainTrees, Grammar gr, Lexicon lex,
double mergingPercentage, String outFileName) {
this.nProcesses = processes;
this.consBaseName = consBaseName;
this.grammar = gr;// .copyGrammar();
this.lexicon = lex;// .copyLexicon();
this.mergingPercentage = mergingPercentage;
this.outFileName = outFileName;
int nTreesPerBlock = trainTrees.size() / processes;
this.consBaseName = consBaseName;
boolean[][][][][] tmp = edu.berkeley.nlp.PCFGLA.ParserConstrainer
.loadData(consBaseName + "-0.data");
if (tmp != null)
nTreesPerBlock = tmp.length;
// first compute the generative merging criterion
mergeWeights = GrammarMerger.computeMergeWeights(grammar, lexicon,
trainTrees);
double[][][] deltas = GrammarMerger.computeDeltas(grammar, lexicon,
mergeWeights, trainTrees);
boolean[][][] mergeThesePairs = GrammarMerger.determineMergePairs(
deltas, false, mergingPercentage, grammar);
Grammar tmpGrammar = grammar.copyGrammar(true);
Lexicon tmpLexicon = lexicon.copyLexicon();
tmpGrammar = GrammarMerger.doTheMerges(tmpGrammar, tmpLexicon,
mergeThesePairs, mergeWeights);
System.out.println("Generative merging criterion gives:");
GrammarMerger.printMergingStatistics(grammar, tmpGrammar);
mergeWeights = GrammarMerger.computeMergeWeights(grammar, lexicon,
trainTrees);
// split the trees into chunks
trainingTrees = new StateSetTreeList[nProcesses];
for (int i = 0; i < nProcesses; i++) {
trainingTrees[i] = new StateSetTreeList();
}
int block = -1;
int inBlock = 0;
for (int i = 0; i < trainTrees.size(); i++) {
if (i % nTreesPerBlock == 0) {
block++;
System.out.println(inBlock);
inBlock = 0;
}
trainingTrees[block % nProcesses].add(trainTrees.get(i));
inBlock++;
}
trainTrees = null;
pool = Executors.newFixedThreadPool(nProcesses);// CachedThreadPool();
tasks = new Merger[nProcesses];
for (int i = 0; i < nProcesses; i++) {
tasks[i] = new Merger(trainingTrees[i], consBaseName, i, grammar,
lexicon, mergeWeights);
}
}
示例8: createGrammar
import edu.berkeley.nlp.PCFGLA.StateSetTreeList; //导入依赖的package包/类
private static ParserData createGrammar(List<Tree<String>> trainTrees,
boolean smooth) {
tagNumberer = Numberer.getGlobalNumberer("tags");
substateNumberers = new ArrayList<Numberer>();
short[] numSubStates = countSymbols(trainTrees);
List<Tree<String>> trainTreesNoAnnotation = stripOffAnnotation(trainTrees);
StateSetTreeList stateSetTrees = new StateSetTreeList(
trainTreesNoAnnotation, numSubStates, false, tagNumberer);
Grammar grammar = new Grammar(numSubStates, false, new NoSmoothing(),
null, -1);
Lexicon lexicon = new SophisticatedLexicon(numSubStates,
SophisticatedLexicon.DEFAULT_SMOOTHING_CUTOFF, new double[] {
0.5, 0.1 }, new NoSmoothing(), 0);
if (smooth) {
System.out.println("Will smooth the grammar.");
Smoother grSmoother = new SmoothAcrossParentSubstate(0.01);
Smoother lexSmoother = new SmoothAcrossParentSubstate(0.1);
grammar.setSmoother(grSmoother);
lexicon.setSmoother(lexSmoother);
}
System.out.print("Creating grammar...");
int index = 0;
boolean secondHalf = false;
int nTrees = trainTrees.size();
for (Tree<StateSet> stateSetTree : stateSetTrees) {
Tree<String> tree = trainTrees.get(index++);
secondHalf = (index > nTrees / 2.0);
setScores(stateSetTree, tree);
lexicon.trainTree(stateSetTree, 0, null, secondHalf, false, 4);
grammar.tallyStateSetTree(stateSetTree, grammar);
}
lexicon.optimize();
grammar.optimize(0);
System.out.println("done.");
ParserData pData = new ParserData(lexicon, grammar, null,
Numberer.getNumberers(), numSubStates, 1, 0, Binarization.RIGHT);
return pData;
}