本文整理汇总了Java中edu.berkeley.nlp.PCFGLA.StateSetTreeList.size方法的典型用法代码示例。如果您正苦于以下问题:Java StateSetTreeList.size方法的具体用法?Java StateSetTreeList.size怎么用?Java StateSetTreeList.size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.berkeley.nlp.PCFGLA.StateSetTreeList
的用法示例。
在下文中一共展示了StateSetTreeList.size方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ParsingObjectiveFunction
import edu.berkeley.nlp.PCFGLA.StateSetTreeList; //导入方法依赖的package包/类
public ParsingObjectiveFunction(Linearizer linearizer,
StateSetTreeList trainTrees, double sigma, int regularization,
String consName, int nProc, String outName,
boolean doNotProjectConstraints, boolean combinedLexicon) {
this.sigma = sigma;
this.myRegularization = regularization;
this.grammar = linearizer.getGrammar();// .copyGrammar();
this.lexicon = linearizer.getLexicon();// .copyLexicon();
this.spanPredictor = linearizer.getSpanPredictor();
this.linearizer = linearizer;
this.outFileName = outName;
this.dimension = linearizer.dimension();
nGrammarWeights = linearizer.getNGrammarWeights();
nLexiconWeights = linearizer.getNLexiconWeights();
nSpanWeights = linearizer.getNSpanWeights();
if (spanPredictor != null)
this.spanGoldCounts = spanPredictor
.countGoldSpanFeatures(trainTrees);
int nTreesPerBlock = trainTrees.size() / nProc;
this.consBaseName = consName;
boolean[][][][][] tmp = edu.berkeley.nlp.PCFGLA.ParserConstrainer
.loadData(consName + "-0.data");
if (tmp != null)
nTreesPerBlock = tmp.length;
// split the trees into chunks
this.nProcesses = nProc;
trainingTrees = new StateSetTreeList[nProcesses];
// allowedStates = new ArrayList[nProcesses];
for (int i = 0; i < nProcesses; i++) {
trainingTrees[i] = new StateSetTreeList();
// allowedStates[i] = new ArrayList<boolean[][][][]>();
}
int block = -1;
int inBlock = 0;
for (int i = 0; i < trainTrees.size(); i++) {
if (i % nTreesPerBlock == 0) {
block++;
// System.out.println(inBlock);
inBlock = 0;
}
trainingTrees[block % nProcesses].add(trainTrees.get(i));
inBlock++;
// if (cons!=null)
// allowedStates[i%nProcesses].add(ArrayUtil.clone(cons[i]));
}
for (int i = 0; i < nProcesses; i++) {
System.out.println("Process " + i + " has "
+ trainingTrees[i].size() + " trees.");
}
trainTrees = null;
pool = Executors.newFixedThreadPool(nProcesses);// CachedThreadPool();
tasks = new Calculator[nProcesses];
for (int i = 0; i < nProcesses; i++) {
tasks[i] = newCalculator(doNotProjectConstraints, i);
}
this.bestObjectiveSoFar = Double.POSITIVE_INFINITY;
}
示例2: ConditionalMerger
import edu.berkeley.nlp.PCFGLA.StateSetTreeList; //导入方法依赖的package包/类
/**
* @param processes
* @param consBaseName
* @param trainingTrees
*/
public ConditionalMerger(int processes, String consBaseName,
StateSetTreeList trainTrees, Grammar gr, Lexicon lex,
double mergingPercentage, String outFileName) {
this.nProcesses = processes;
this.consBaseName = consBaseName;
this.grammar = gr;// .copyGrammar();
this.lexicon = lex;// .copyLexicon();
this.mergingPercentage = mergingPercentage;
this.outFileName = outFileName;
int nTreesPerBlock = trainTrees.size() / processes;
this.consBaseName = consBaseName;
boolean[][][][][] tmp = edu.berkeley.nlp.PCFGLA.ParserConstrainer
.loadData(consBaseName + "-0.data");
if (tmp != null)
nTreesPerBlock = tmp.length;
// first compute the generative merging criterion
mergeWeights = GrammarMerger.computeMergeWeights(grammar, lexicon,
trainTrees);
double[][][] deltas = GrammarMerger.computeDeltas(grammar, lexicon,
mergeWeights, trainTrees);
boolean[][][] mergeThesePairs = GrammarMerger.determineMergePairs(
deltas, false, mergingPercentage, grammar);
Grammar tmpGrammar = grammar.copyGrammar(true);
Lexicon tmpLexicon = lexicon.copyLexicon();
tmpGrammar = GrammarMerger.doTheMerges(tmpGrammar, tmpLexicon,
mergeThesePairs, mergeWeights);
System.out.println("Generative merging criterion gives:");
GrammarMerger.printMergingStatistics(grammar, tmpGrammar);
mergeWeights = GrammarMerger.computeMergeWeights(grammar, lexicon,
trainTrees);
// split the trees into chunks
trainingTrees = new StateSetTreeList[nProcesses];
for (int i = 0; i < nProcesses; i++) {
trainingTrees[i] = new StateSetTreeList();
}
int block = -1;
int inBlock = 0;
for (int i = 0; i < trainTrees.size(); i++) {
if (i % nTreesPerBlock == 0) {
block++;
System.out.println(inBlock);
inBlock = 0;
}
trainingTrees[block % nProcesses].add(trainTrees.get(i));
inBlock++;
}
trainTrees = null;
pool = Executors.newFixedThreadPool(nProcesses);// CachedThreadPool();
tasks = new Merger[nProcesses];
for (int i = 0; i < nProcesses; i++) {
tasks[i] = new Merger(trainingTrees[i], consBaseName, i, grammar,
lexicon, mergeWeights);
}
}