本文整理汇总了Java中edu.stanford.nlp.util.ReflectionLoading.loadByReflection方法的典型用法代码示例。如果您正苦于以下问题:Java ReflectionLoading.loadByReflection方法的具体用法?Java ReflectionLoading.loadByReflection怎么用?Java ReflectionLoading.loadByReflection使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.util.ReflectionLoading
的用法示例。
在下文中一共展示了ReflectionLoading.loadByReflection方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initializeTraining
import edu.stanford.nlp.util.ReflectionLoading; //导入方法依赖的package包/类
@Override
public void initializeTraining(double numTrees) {
this.uwModelTrainer =
ReflectionLoading.loadByReflection(uwModelTrainerClass);
uwModelTrainer.initializeTraining(op, this, wordIndex, tagIndex,
numTrees);
}
示例2: lex
import edu.stanford.nlp.util.ReflectionLoading; //导入方法依赖的package包/类
/**
* Returns a ChineseLexicon
*/
@Override
public Lexicon lex(Options op, Index<String> wordIndex, Index<String> tagIndex) {
if (useCharacterBasedLexicon) {
return lex = new ChineseCharacterBasedLexicon(this, wordIndex, tagIndex);
// } else if (useMaxentLexicon) {
// return lex = new ChineseMaxentLexicon();
}
if (op.lexOptions.uwModelTrainer == null) {
op.lexOptions.uwModelTrainer = "edu.stanford.nlp.parser.lexparser.ChineseUnknownWordModelTrainer";
}
ChineseLexicon clex = new ChineseLexicon(op, this, wordIndex, tagIndex);
if (segmenterClass != null) {
try {
segmenter = ReflectionLoading.loadByReflection(segmenterClass, this,
wordIndex, tagIndex);
} catch (ReflectionLoading.ReflectionLoadingException e) {
segmenter = ReflectionLoading.loadByReflection(segmenterClass);
}
}
if (segmenter != null) {
lex = new ChineseLexiconAndWordSegmenter(clex, segmenter);
ctlp.setTokenizerFactory(WordSegmentingTokenizer.factory(segmenter));
} else {
lex = clex;
}
return lex;
}
示例3: lex
import edu.stanford.nlp.util.ReflectionLoading; //导入方法依赖的package包/类
/**
* Returns a ChineseLexicon
*/
@Override
public Lexicon lex(Options op, Index<String> wordIndex, Index<String> tagIndex) {
if (useCharacterBasedLexicon) {
return lex = new ChineseCharacterBasedLexicon(this, wordIndex, tagIndex);
// } else if (useMaxentLexicon) {
// return lex = new ChineseMaxentLexicon();
}
if (op.lexOptions.uwModelTrainer == null) {
op.lexOptions.uwModelTrainer = "edu.stanford.nlp.parser.lexparser.ChineseUnknownWordModelTrainer";
}
ChineseLexicon clex = new ChineseLexicon(op, this, wordIndex, tagIndex);
if (segmenterClass != null) {
try {
segmenter = ReflectionLoading.loadByReflection(segmenterClass, this,
wordIndex, tagIndex);
} catch (ReflectionLoading.ReflectionLoadingException e) {
segmenter = ReflectionLoading.loadByReflection(segmenterClass);
}
}
if (segmenter != null) {
lex = new ChineseLexiconAndWordSegmenter(clex, segmenter);
} else {
lex = clex;
}
return lex;
}
示例4: getParserFromTextFile
import edu.stanford.nlp.util.ReflectionLoading; //导入方法依赖的package包/类
protected static LexicalizedParser getParserFromTextFile(String textFileOrUrl, Options op) {
try {
Timing tim = new Timing();
System.err.print("Loading parser from text file " + textFileOrUrl + ' ');
BufferedReader in = IOUtils.readerFromString(textFileOrUrl);
Timing.startTime();
String line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
op.readData(in);
System.err.print(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
Index<String> stateIndex = HashIndex.loadFromReader(in);
System.err.print(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
Index<String> wordIndex = HashIndex.loadFromReader(in);
System.err.print(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
Index<String> tagIndex = HashIndex.loadFromReader(in);
System.err.print(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
Lexicon lex = op.tlpParams.lex(op, wordIndex, tagIndex);
String uwmClazz = line.split(" +")[2];
if (!uwmClazz.equals("null")) {
UnknownWordModel model = ReflectionLoading.loadByReflection(uwmClazz, op, lex, wordIndex, tagIndex);
lex.setUnknownWordModel(model);
}
lex.readData(in);
System.err.print(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
UnaryGrammar ug = new UnaryGrammar(stateIndex);
ug.readData(in);
System.err.print(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
BinaryGrammar bg = new BinaryGrammar(stateIndex);
bg.readData(in);
System.err.print(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
DependencyGrammar dg = new MLEDependencyGrammar(op.tlpParams, op.directional, op.distance, op.coarseDistance, op.trainOptions.basicCategoryTagsInDependencyGrammar, op, wordIndex, tagIndex);
dg.readData(in);
System.err.print(".");
in.close();
System.err.println(" done [" + tim.toSecondsString() + " sec].");
return new LexicalizedParser(lex, bg, ug, dg, stateIndex, wordIndex, tagIndex, op);
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
示例5: init
import edu.stanford.nlp.util.ReflectionLoading; //导入方法依赖的package包/类
void init(TaggerConfig config) {
if (initted) return; // TODO: why not reinit?
this.config = config;
String lang, arch;
String[] openClassTags, closedClassTags;
if (config == null) {
lang = "english";
arch = "left3words";
openClassTags = StringUtils.EMPTY_STRING_ARRAY;
closedClassTags = StringUtils.EMPTY_STRING_ARRAY;
wordFunction = null;
} else {
this.VERBOSE = config.getVerbose();
lang = config.getLang();
arch = config.getArch();
openClassTags = config.getOpenClassTags();
closedClassTags = config.getClosedClassTags();
if (!config.getWordFunction().equals("")) {
wordFunction =
ReflectionLoading.loadByReflection(config.getWordFunction());
}
if (((openClassTags.length > 0) && !lang.equals("")) || ((closedClassTags.length > 0) && !lang.equals("")) || ((closedClassTags.length > 0) && (openClassTags.length > 0))) {
throw new RuntimeException("At least two of lang (\"" + lang + "\"), openClassTags (length " + openClassTags.length + ": " + Arrays.toString(openClassTags) + ")," +
"and closedClassTags (length " + closedClassTags.length + ": " + Arrays.toString(closedClassTags) + ") specified---you must choose one!");
} else if ((openClassTags.length == 0) && lang.equals("") && (closedClassTags.length == 0) && ! config.getLearnClosedClassTags()) {
System.err.println("warning: no language set, no open-class tags specified, and no closed-class tags specified; assuming ALL tags are open class tags");
}
}
if (openClassTags.length > 0) {
tags = new TTags();
tags.setOpenClassTags(openClassTags);
} else if (closedClassTags.length > 0) {
tags = new TTags();
tags.setClosedClassTags(closedClassTags);
} else {
tags = new TTags(lang);
}
defaultScore = lang.equals("english") ? 1.0 : 0.0;
if (config != null) {
rareWordThresh = config.getRareWordThresh();
minFeatureThresh = config.getMinFeatureThresh();
curWordMinFeatureThresh = config.getCurWordMinFeatureThresh();
rareWordMinFeatureThresh = config.getRareWordMinFeatureThresh();
veryCommonWordThresh = config.getVeryCommonWordThresh();
occurringTagsOnly = config.occurringTagsOnly();
possibleTagsOnly = config.possibleTagsOnly();
// System.err.println("occurringTagsOnly: "+occurringTagsOnly);
// System.err.println("possibleTagsOnly: "+possibleTagsOnly);
if(config.getDefaultScore() >= 0)
defaultScore = config.getDefaultScore();
}
if (config == null || config.getMode() == TaggerConfig.Mode.TRAIN) {
// initialize the extractors based on the arch variable
// you only need to do this when training; otherwise they will be
// restored from the serialized file
extractors = new Extractors(ExtractorFrames.getExtractorFrames(arch));
extractorsRare = new Extractors(ExtractorFramesRare.getExtractorFramesRare(arch, tags));
setExtractorsGlobal();
}
ambClasses = new AmbiguityClasses(tags);
initted = true;
}
示例6: ParserAnnotator
import edu.stanford.nlp.util.ReflectionLoading; //导入方法依赖的package包/类
public ParserAnnotator(String annotatorName, Properties props) {
String model = props.getProperty(annotatorName + ".model", LexicalizedParser.DEFAULT_PARSER_LOC);
if (model == null) {
throw new IllegalArgumentException("No model specified for " +
"Parser annotator " +
annotatorName);
}
this.VERBOSE = PropertiesUtils.getBool(props, annotatorName + ".debug", false);
// will use DEFAULT_FLAGS if the flags are not set in the properties
String[] flags = convertFlagsToArray(props.getProperty(annotatorName + ".flags"));
this.parser = loadModel(model, VERBOSE, flags);
this.maxSentenceLength = PropertiesUtils.getInt(props, annotatorName + ".maxlen", -1);
String treeMapClass = props.getProperty(annotatorName + ".treemap");
if (treeMapClass == null) {
this.treeMap = null;
} else {
this.treeMap = ReflectionLoading.loadByReflection(treeMapClass, props);
}
this.maxParseTime = PropertiesUtils.getLong(props, annotatorName + ".maxtime", 0);
String buildGraphsProperty = annotatorName + ".buildgraphs";
if (!this.parser.getTLPParams().supportsBasicDependencies()) {
if (props.getProperty(buildGraphsProperty) != null && PropertiesUtils.getBool(props, buildGraphsProperty)) {
System.err.println("WARNING: " + buildGraphsProperty + " set to true, but " + this.parser.getTLPParams().getClass() + " does not support dependencies");
}
this.BUILD_GRAPHS = false;
} else {
this.BUILD_GRAPHS = PropertiesUtils.getBool(props, buildGraphsProperty, true);
}
if (this.BUILD_GRAPHS) {
TreebankLanguagePack tlp = parser.getTLPParams().treebankLanguagePack();
this.gsf = tlp.grammaticalStructureFactory(tlp.punctuationWordRejectFilter(), tlp.typedDependencyHeadFinder());
} else {
this.gsf = null;
}
this.nThreads = PropertiesUtils.getInt(props, annotatorName + ".nthreads", PropertiesUtils.getInt(props, "nthreads", 1));
}
示例7: init
import edu.stanford.nlp.util.ReflectionLoading; //导入方法依赖的package包/类
void init(TaggerConfig config) {
if (initted) return; // TODO: why not reinit?
this.config = config;
String lang, arch;
String[] openClassTags, closedClassTags;
if (config == null) {
lang = "english";
arch = "left3words";
openClassTags = StringUtils.EMPTY_STRING_ARRAY;
closedClassTags = StringUtils.EMPTY_STRING_ARRAY;
wordFunction = null;
} else {
this.VERBOSE = config.getVerbose();
lang = config.getLang();
arch = config.getArch();
openClassTags = config.getOpenClassTags();
closedClassTags = config.getClosedClassTags();
if (!config.getWordFunction().equals("")) {
wordFunction =
ReflectionLoading.loadByReflection(config.getWordFunction());
}
if (((openClassTags.length > 0) && !lang.equals("")) || ((closedClassTags.length > 0) && !lang.equals("")) || ((closedClassTags.length > 0) && (openClassTags.length > 0))) {
throw new RuntimeException("At least two of lang (\"" + lang + "\"), openClassTags (length " + openClassTags.length + ": " + Arrays.toString(openClassTags) + ")," +
"and closedClassTags (length " + closedClassTags.length + ": " + Arrays.toString(closedClassTags) + ") specified---you must choose one!");
} else if ((openClassTags.length == 0) && lang.equals("") && (closedClassTags.length == 0) && ! config.getLearnClosedClassTags()) {
System.err.println("warning: no language set, no open-class tags specified, and no closed-class tags specified; assuming ALL tags are open class tags");
}
}
if (openClassTags.length > 0) {
tags = new TTags();
tags.setOpenClassTags(openClassTags);
} else if (closedClassTags.length > 0) {
tags = new TTags();
tags.setClosedClassTags(closedClassTags);
} else {
tags = new TTags(lang);
}
defaultScore = lang.equals("english") ? 1.0 : 0.0;
if (config != null) {
rareWordThresh = config.getRareWordThresh();
minFeatureThresh = config.getMinFeatureThresh();
curWordMinFeatureThresh = config.getCurWordMinFeatureThresh();
rareWordMinFeatureThresh = config.getRareWordMinFeatureThresh();
veryCommonWordThresh = config.getVeryCommonWordThresh();
occurringTagsOnly = config.occurringTagsOnly();
possibleTagsOnly = config.possibleTagsOnly();
// System.err.println("occurringTagsOnly: "+occurringTagsOnly);
// System.err.println("possibleTagsOnly: "+possibleTagsOnly);
if(config.getDefaultScore() >= 0)
defaultScore = config.getDefaultScore();
}
// just in case, reset the defaultScores array so it will be
// recached later when needed. can't initialize it now in case we
// don't know ysize yet
defaultScores = null;
if (config == null || config.getMode() == TaggerConfig.Mode.TRAIN) {
// initialize the extractors based on the arch variable
// you only need to do this when training; otherwise they will be
// restored from the serialized file
extractors = new Extractors(ExtractorFrames.getExtractorFrames(arch));
extractorsRare = new Extractors(ExtractorFramesRare.getExtractorFramesRare(arch, tags));
setExtractorsGlobal();
}
ambClasses = new AmbiguityClasses(tags);
initted = true;
}
示例8: getParserFromTextFile
import edu.stanford.nlp.util.ReflectionLoading; //导入方法依赖的package包/类
protected static LexicalizedParser getParserFromTextFile(String textFileOrUrl, Options op) {
try {
Timing tim = new Timing();
System.err.print("Loading parser from text file " + textFileOrUrl + ' ');
BufferedReader in = IOUtils.readReaderFromString(textFileOrUrl);
Timing.startTime();
String line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
op.readData(in);
System.err.print(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
Index<String> stateIndex = HashIndex.loadFromReader(in);
System.err.print(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
Index<String> wordIndex = HashIndex.loadFromReader(in);
System.err.print(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
Index<String> tagIndex = HashIndex.loadFromReader(in);
System.err.print(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
Lexicon lex = op.tlpParams.lex(op, wordIndex, tagIndex);
String uwmClazz = line.split(" +")[2];
if (!uwmClazz.equals("null")) {
UnknownWordModel model = ReflectionLoading.loadByReflection(uwmClazz, op, lex, wordIndex, tagIndex);
lex.setUnknownWordModel(model);
}
lex.readData(in);
System.err.print(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
UnaryGrammar ug = new UnaryGrammar(stateIndex);
ug.readData(in);
System.err.print(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
BinaryGrammar bg = new BinaryGrammar(stateIndex);
bg.readData(in);
System.err.print(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
DependencyGrammar dg = new MLEDependencyGrammar(op.tlpParams, op.directional, op.distance, op.coarseDistance, op.trainOptions.basicCategoryTagsInDependencyGrammar, op, wordIndex, tagIndex);
dg.readData(in);
System.err.print(".");
in.close();
System.err.println(" done [" + tim.toSecondsString() + " sec].");
return new LexicalizedParser(lex, bg, ug, dg, stateIndex, wordIndex, tagIndex, op);
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
示例9: getParserFromTextFile
import edu.stanford.nlp.util.ReflectionLoading; //导入方法依赖的package包/类
protected static LexicalizedParser getParserFromTextFile(String textFileOrUrl, Options op) {
try {
Timing tim = new Timing();
logger.trace("Loading parser from text file " + textFileOrUrl + ' ');
BufferedReader in = IOUtils.readReaderFromString(textFileOrUrl);
Timing.startTime();
String line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
op.readData(in);
logger.trace(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
Index<String> stateIndex = HashIndex.loadFromReader(in);
logger.trace(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
Index<String> wordIndex = HashIndex.loadFromReader(in);
logger.trace(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
Index<String> tagIndex = HashIndex.loadFromReader(in);
logger.trace(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
Lexicon lex = op.tlpParams.lex(op, wordIndex, tagIndex);
String uwmClazz = line.split(" +")[2];
if (!uwmClazz.equals("null")) {
UnknownWordModel model = ReflectionLoading.loadByReflection(uwmClazz, op, lex, wordIndex, tagIndex);
lex.setUnknownWordModel(model);
}
lex.readData(in);
logger.trace(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
UnaryGrammar ug = new UnaryGrammar(stateIndex);
ug.readData(in);
logger.trace(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
BinaryGrammar bg = new BinaryGrammar(stateIndex);
bg.readData(in);
logger.trace(".");
line = in.readLine();
confirmBeginBlock(textFileOrUrl, line);
DependencyGrammar dg = new MLEDependencyGrammar(op.tlpParams, op.directional, op.distance, op.coarseDistance, op.trainOptions.basicCategoryTagsInDependencyGrammar, op, wordIndex, tagIndex);
dg.readData(in);
logger.trace(".");
in.close();
logger.trace(" done [" + tim.toSecondsString() + " sec].");
return new LexicalizedParser(lex, bg, ug, dg, stateIndex, wordIndex, tagIndex, op);
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
示例10: solveL1
import edu.stanford.nlp.util.ReflectionLoading; //导入方法依赖的package包/类
/**
* Solves the problem using OWLQN. The solution is stored in the
* <code>lambda</code> array of <code>prob</code>. Note that the
* likelihood function will be a penalized L2 likelihood function unless you
* have turned this off via setting the priorSigmaS to 0.0.
*
* @param weight Controls the sparseness/regularization of the L1 solution.
* The bigger the number the sparser the solution. Weights between
* 0.01 and 1.0 typically give good performance.
*/
public void solveL1(double weight) {
LikelihoodFunction df = new LikelihoodFunction(prob, tol, useGaussianPrior, priorSigmaS, sigmaSquareds);
Minimizer<DiffFunction> owl = ReflectionLoading.loadByReflection("edu.stanford.nlp.optimization.OWLQNMinimizer", weight);
double[] result = owl.minimize(df, tol, new double[df.domainDimension()]);
prob.lambda = result;
System.err.println("after optimization value is " + df.valueAt(result));
}
示例11: solveL1
import edu.stanford.nlp.util.ReflectionLoading; //导入方法依赖的package包/类
/**
* Solves the problem using OWLQN. The solution
* is stored in the {@code lambda} array of {@code prob}. Note that the
* likelihood function will be a penalized L2 likelihood function unless you
* have turned this off via setting the priorSigmaS to 0.0.
*
* @param weight Controls the sparseness/regularization of the L1 solution.
* The bigger the number the sparser the solution. Weights between
* 0.01 and 1.0 typically give good performance.
*/
public void solveL1(double weight) {
LikelihoodFunction df = new LikelihoodFunction(prob, tol, useGaussianPrior, priorSigmaS, sigmaSquareds);
Minimizer<DiffFunction> owl = ReflectionLoading.loadByReflection("edu.stanford.nlp.optimization.OWLQNMinimizer", weight);
prob.lambda = owl.minimize(df, tol, new double[df.domainDimension()]);
printOptimizationResults(df, null);
}