本文整理汇总了Java中edu.stanford.nlp.util.PropertiesUtils.getBool方法的典型用法代码示例。如果您正苦于以下问题:Java PropertiesUtils.getBool方法的具体用法?Java PropertiesUtils.getBool怎么用?Java PropertiesUtils.getBool使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.util.PropertiesUtils
的用法示例。
在下文中一共展示了PropertiesUtils.getBool方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ArabicSegmentorAnnotator
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public ArabicSegmentorAnnotator(String name, Properties props) {
// We are only interested in {name}.* properties
String prefix = name + '.';
String model = null;
Properties segProps = new Properties();
for (String key : props.stringPropertyNames()) {
if (key.startsWith(prefix)) {
// skip past name and the subsequent "."
String modelKey = key.substring(prefix.length());
if (modelKey.equals("model")) {
model = props.getProperty(key);
} else {
segProps.setProperty(modelKey, props.getProperty(key));
}
}
}
this.VERBOSE = PropertiesUtils.getBool(props, name + ".verbose", true);
init(model, segProps);
}
示例2: ChineseSegmenterAnnotator
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public ChineseSegmenterAnnotator(String name, Properties props) {
String model = null;
// Keep only the properties that apply to this annotator
Properties modelProps = new Properties();
for (String key : props.stringPropertyNames()) {
if (key.startsWith(name + ".")) {
// skip past name and the subsequent "."
String modelKey = key.substring(name.length() + 1);
if (modelKey.equals("model")) {
model = props.getProperty(key);
} else {
modelProps.setProperty(modelKey, props.getProperty(key));
}
}
}
this.VERBOSE = PropertiesUtils.getBool(props, name + ".verbose", true);
if (model == null) {
throw new RuntimeException("Expected a property " + name + ".model");
}
loadModel(model, modelProps);
}
示例3: ArabicLexer
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public ArabicLexer(Reader r, LexedTokenFactory<?> tf, Properties props) {
this(r);
this.tokenFactory = tf;
tokenizeNL = PropertiesUtils.getBool(props, "tokenizeNLs", false);
useUTF8Ellipsis = PropertiesUtils.getBool(props, "useUTF8Ellipsis", false);
invertible = PropertiesUtils.getBool(props, "invertible", false);
normArDigits = PropertiesUtils.getBool(props, "normArDigits", false);
normArPunc = PropertiesUtils.getBool(props, "normArPunc", false);
normAlif = PropertiesUtils.getBool(props, "normAlif", false);
normYa = PropertiesUtils.getBool(props, "normYa", false);
removeDiacritics = PropertiesUtils.getBool(props, "removeDiacritics", false);
removeTatweel = PropertiesUtils.getBool(props, "removeTatweel", false);
removeQuranChars = PropertiesUtils.getBool(props, "removeQuranChars", false);
removeProMarker = PropertiesUtils.getBool(props, "removeProMarker", false);
removeSegMarker = PropertiesUtils.getBool(props, "removeSegMarker", false);
removeMorphMarker = PropertiesUtils.getBool(props, "removeMorphMarker", false);
atbEscaping = PropertiesUtils.getBool(props, "atbEscaping", false);
setupNormalizationMap();
}
示例4: JMWEAnnotator
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
* Annotator to capture Multi-Word Expressions (MWE).
* @param name
* annotator name
* @param props
* the properties
*/
public JMWEAnnotator(String name, Properties props) {
// set verbosity
this.verbose = PropertiesUtils.getBool(props, "customAnnotatorClass.jmwe.verbose", false);
// set underscoreSpaceReplacement
if (!PropertiesUtils.hasProperty(props, "customAnnotatorClass.jmwe.underscoreReplacement")) {
throw new RuntimeException("No customAnnotatorClass.jmwe.underscoreReplacement key in properties found");
}
underscoreSpaceReplacement = (String) props.get("customAnnotatorClass.jmwe.underscoreReplacement");
if (underscoreSpaceReplacement.contains("_")) {
throw new RuntimeException("The underscoreReplacement contains an underscore character");
}
// set index
if (!PropertiesUtils.hasProperty(props, "customAnnotatorClass.jmwe.indexData")) {
throw new RuntimeException("No customAnnotatorClass.jmwe.indexData key in properties found");
}
File indexFile = new File((String) props.get("customAnnotatorClass.jmwe.indexData"));
if (!indexFile.exists()) {
throw new RuntimeException("index file " + indexFile.getAbsoluteFile() + " does not exist");
}
this.index = new MWEIndex(indexFile);
// set detector
if (!PropertiesUtils.hasProperty(props, "customAnnotatorClass.jmwe.detector")) {
throw new RuntimeException("No customAnnotatorClass.jmwe.detector key in properties found");
}
this.detectorName = (String) props.get("customAnnotatorClass.jmwe.detector");
if (this.verbose) {
System.out.println("verbose: " + this.verbose);
System.out.println("underscoreReplacement: " + this.underscoreSpaceReplacement);
System.out.println("indexData: " + this.index);
System.out.println("detectorName: " + this.detectorName);
}
}
示例5: main
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public static void main(String[] args) {
Properties options = StringUtils.argsToProperties(args, optionArgDefs());
String annotations = PropertiesUtils.get(options, "annotations", null, String.class);
boolean changepreps = PropertiesUtils.getBool(options, "changepreps", false);
int sentenceCount = CoreNLPCache.loadSerialized(annotations);
CoreMap sentence;
for (int i = 0; i < sentenceCount; i++) {
try {
sentence = CoreNLPCache.get(i);
if (sentence == null) {
System.out.println();
System.err.println("Empty sentence #" + i);
continue;
}
printDependencies(sentence, changepreps);
//System.err.println("---------------------------");
} catch (Exception e) {
System.err.println("SourceSentence #" + i);
e.printStackTrace();
return;
}
}
}
示例6: main
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
*
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
if (args.length < 1) {
System.err.print(usage());
System.exit(-1);
}
Properties options = StringUtils.argsToProperties(args, argDefs());
int ngramOrder = PropertiesUtils.getInt(options, "order", BLEUMetric.DEFAULT_MAX_NGRAM_ORDER);
boolean disableTokenization = PropertiesUtils.getBool(options, "no-nist", false);
String metric = options.getProperty("metric", "bleu");
String[] refs = options.getProperty("").split("\\s+");
List<List<Sequence<IString>>> referencesList = MetricUtils.readReferences(refs, ! disableTokenization);
System.err.printf("Metric: %s with %d references%n", metric, referencesList.get(0).size());
LineNumberReader reader = new LineNumberReader(new InputStreamReader(
System.in));
int sourceInputId = 0;
for (String line; (line = reader.readLine()) != null; ++sourceInputId) {
line = disableTokenization ? line : NISTTokenizer.tokenize(line);
Sequence<IString> translation = IStrings.tokenize(line);
double score = getScore(translation, referencesList.get(sourceInputId), ngramOrder, metric);
System.out.printf("%.4f%n", score);
}
System.err.printf("Scored %d input segments%n", sourceInputId);
}
示例7: NGramLanguageModelFeaturizer
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
* Constructor called by Phrasal when NGramLanguageModelFeaturizer appears in
* <code>Phrasal.LANGUAGE_MODEL_OPT</code>.
*
* The first argument is always the language model filename and the second
* argument is always the feature name.
*
* Additional arguments are named parameters.
*/
public NGramLanguageModelFeaturizer(String...args) throws IOException {
if (args.length < 2) {
throw new RuntimeException(
"At least two arguments are needed: LM file name and LM feature name");
}
// Load the LM
this.lm = LanguageModelFactory.load(args[0]);
this.startToken = lm.getStartToken();
this.endToken = lm.getEndToken();
// Set the feature name
this.featureName = args[1];
// Named parameters
Properties options = FeatureUtils.argsToProperties(args);
this.isClassBased = PropertiesUtils.getBool(options, "classBased", false);
if (isClassBased && options.containsKey("classMap")) {
// A local class map that differs from the one specified by Phrasal.TARGET_CLASS_MAP
this.targetClassMap = new LocalTargetMap();
this.targetClassMap.load(options.getProperty("classMap"));
} else if (isClassBased) {
this.targetClassMap = TargetClassMap.getInstance();
} else {
this.targetClassMap = null;
}
}
示例8: LexicalReorderingFeaturizer
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
* Constructor for reflection loading discriminative lexicalized reordering.
*
* @param args
*/
public LexicalReorderingFeaturizer(String...args) {
Properties options = FeatureUtils.argsToProperties(args);
this.dynamic = PropertiesUtils.getBool(options, "dynamic", false);
if (dynamic) {
this.discriminativeSet = null;
this.mlrt = null;
this.featureTags = Arrays.stream(LexicalReorderingTable.msdBidirectionalPositionMapping).map(m ->
String.format("%s:%s", FEATURE_PREFIX, m)).toArray(String[]::new);
this.useAlignmentConstellations = false;
this.useClasses = false;
this.countFeatureIndex = -1;
this.lexicalCutoff = 0;
} else {
this.discriminativeSet = new ArrayList<>(Arrays.asList(LexicalReorderingTable.ReorderingTypes.values()));
this.useAlignmentConstellations = options.containsKey("conditionOnConstellations");
this.countFeatureIndex = PropertiesUtils.getInt(options, "countFeatureIndex", -1);
// Which reordering classes to extract
if (options.containsKey("classes")) {
String[] typeStrings = options.getProperty("classes").split("-");
discriminativeSet = new ArrayList<>();
for (String type : typeStrings) {
discriminativeSet.add(LexicalReorderingTable.ReorderingTypes.valueOf(type));
}
}
// Use class-based feature representations
this.useClasses = options.containsKey("useClasses");
if (useClasses) {
sourceMap = SourceClassMap.getInstance();
targetMap = TargetClassMap.getInstance();
}
this.mlrt = null;
this.featureTags = null;
this.lexicalCutoff = PropertiesUtils.getInt(options, "lexicalCutoff", 0);
}
}
示例9: getTokenizer
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public Tokenizer<HasWord> getTokenizer(Reader r, String extraOptions) {
boolean tokenizeNewlines = this.tokenizeNLs;
if (extraOptions != null) {
Properties prop = StringUtils.stringToProperties(extraOptions);
tokenizeNewlines = PropertiesUtils.getBool(prop, "tokenizeNLs", this.tokenizeNLs);
}
return new WordSegmentingTokenizer(segmenter, WhitespaceTokenizer.newCoreLabelWhitespaceTokenizer(r, tokenizeNewlines));
}
示例10: StanbolDeterministicCorefAnnotator
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public StanbolDeterministicCorefAnnotator(Properties props) {
try {
corefSystem = new SieveCoreferenceSystem(props);
mentionExtractor = new MentionExtractor(corefSystem.dictionaries(), corefSystem.semantics());
OLD_FORMAT = Boolean.parseBoolean(props.getProperty("oldCorefFormat", "false"));
allowReparsing = PropertiesUtils.getBool(props, Constants.ALLOW_REPARSING_PROP, Constants.ALLOW_REPARSING);
} catch (Exception e) {
System.err.println("ERROR: cannot create DeterministicCorefAnnotator!");
e.printStackTrace();
throw new RuntimeException(e);
}
}
示例11: POSTaggerAnnotator
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public POSTaggerAnnotator(String annotatorName, Properties props) {
String posLoc = props.getProperty(annotatorName + ".model");
if (posLoc == null) {
posLoc = DefaultPaths.DEFAULT_POS_MODEL;
}
boolean verbose = PropertiesUtils.getBool(props, annotatorName + ".verbose", false);
this.pos = loadModel(posLoc, verbose);
this.maxSentenceLength = PropertiesUtils.getInt(props, annotatorName + ".maxlen", Integer.MAX_VALUE);
this.nThreads = PropertiesUtils.getInt(props, annotatorName + ".nthreads", PropertiesUtils.getInt(props, "nthreads", 1));
}
示例12: NERClassifierCombiner
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public NERClassifierCombiner(Properties props)
throws FileNotFoundException
{
super(props);
applyNumericClassifiers = PropertiesUtils.getBool(props, APPLY_NUMERIC_CLASSIFIERS_PROPERTY, APPLY_NUMERIC_CLASSIFIERS_DEFAULT);
useSUTime = PropertiesUtils.getBool(props, NumberSequenceClassifier.USE_SUTIME_PROPERTY, NumberSequenceClassifier.USE_SUTIME_DEFAULT);
nsc = new NumberSequenceClassifier(new Properties(), useSUTime, props);
}
示例13: getTokenizer
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public Tokenizer<T> getTokenizer(Reader r, String extraOptions) {
Properties prop = StringUtils.stringToProperties(extraOptions);
boolean tokenizeNewlines =
PropertiesUtils.getBool(prop, "tokenizeNLs", this.tokenizeNLs);
return new WhitespaceTokenizer<T>(factory, r, tokenizeNewlines);
}
示例14: main
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
if (args.length < 1) {
System.err.print(usage());
System.exit(-1);
}
Properties options = StringUtils.argsToProperties(args, argDefs());
int BLEUOrder = PropertiesUtils.getInt(options, "order", BLEUMetric.DEFAULT_MAX_NGRAM_ORDER);
boolean doSmooth = PropertiesUtils.getBool(options, "smooth", false);
boolean disableTokenization = PropertiesUtils.getBool(options, "no-nist", false);
boolean doCased = PropertiesUtils.getBool(options, "cased", false);
// Setup the metric tokenization scheme. Applies to both the references and
// hypotheses
if (doCased) NISTTokenizer.lowercase(false);
NISTTokenizer.normalize( ! disableTokenization);
// Load the references
String[] refs = options.getProperty("").split("\\s+");
System.out.printf("Metric: BLEU-%d with %d references%n", BLEUOrder, refs.length);
List<List<Sequence<IString>>> referencesList = MetricUtils.readReferences(refs, true);
// For backwards compatibility
doSmooth |= System.getProperty("smoothBLEU") != null;
BLEUMetric<IString, String> bleu = new BLEUMetric<IString, String>(referencesList, BLEUOrder,
doSmooth);
BLEUMetric<IString, String>.BLEUIncrementalMetric incMetric = bleu
.getIncrementalMetric();
LineNumberReader reader = new LineNumberReader(new InputStreamReader(
System.in));
for (String line; (line = reader.readLine()) != null; ) {
line = NISTTokenizer.tokenize(line);
Sequence<IString> translation = IStrings.tokenize(line);
ScoredFeaturizedTranslation<IString, String> tran = new ScoredFeaturizedTranslation<IString, String>(
translation, null, 0);
incMetric.add(tran);
}
// Check for an incomplete set of translations
if (reader.getLineNumber() < referencesList.size()) {
System.err.printf("WARNING: Translation candidate file is shorter than references (%d/%d)%n",
reader.getLineNumber(), referencesList.size());
}
reader.close();
double[] ngramPrecisions = incMetric.ngramPrecisions();
System.out.printf("BLEU = %.3f, ", 100 * incMetric.score());
for (int i = 0; i < ngramPrecisions.length; i++) {
if (i != 0) {
System.out.print("/");
}
System.out.printf("%.3f", ngramPrecisions[i] * 100);
}
System.out.printf(" (BP=%.3f, ratio=%.3f %d/%d)%n", incMetric
.brevityPenalty(), ((1.0 * incMetric.candidateLength()) / incMetric
.effectiveReferenceLength()), incMetric.candidateLength(), incMetric
.effectiveReferenceLength());
System.out.printf("%nPrecision Details:%n");
double[][] precCounts = incMetric.ngramPrecisionCounts();
for (int i = 0; i < ngramPrecisions.length; i++) {
System.out.printf("\t%d:%d/%d%n", i, (int) precCounts[i][0], (int) precCounts[i][1]);
}
}
示例15: main
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
Properties options = StringUtils.argsToProperties(args, optionArgDefs());
String dependenciesFilename = PropertiesUtils.get(options, "input", null, String.class);
String outdirPath = PropertiesUtils.get(options, "outdir", ".", String.class);
String alignmentFilename = PropertiesUtils.get(options, "alignment", null, String.class);
String sourceTokensFilename = PropertiesUtils.get(options, "sourceTokens", null, String.class);
String targetTokensFilename = PropertiesUtils.get(options, "targetTokens", null, String.class);
String rightDepLMFilename = outdirPath + File.separator + "deplm.nonevents";
String leftDepLMFilename = outdirPath + File.separator + "deplm.data";
String classMapFilename = PropertiesUtils.get(options, "classMap", null, String.class);
useHeadClasses = PropertiesUtils.getBool(options, "headClasses", false);
if (classMapFilename != null) {
System.err.println("Loading word class mapping from " + classMapFilename);
classMap = new LocalWordClassMap();
classMap.load(classMapFilename);
} else {
classMap = null;
}
/* Include alignment information and generate a "FRAG" tuple for each unaligned word instead of the real one. */
boolean includeAlignment = (alignmentFilename != null && sourceTokensFilename != null);
LineNumberReader alignmentReader = null;
LineNumberReader sourceTokensReader = null;
LineNumberReader targetTokensReader = null;
if (includeAlignment) {
alignmentReader = IOTools.getReaderFromFile(alignmentFilename);
sourceTokensReader = IOTools.getReaderFromFile(sourceTokensFilename);
targetTokensReader = IOTools.getReaderFromFile(targetTokensFilename);
}
File leftDepLMFile = new File(leftDepLMFilename);
if (!leftDepLMFile.exists())
leftDepLMFile.createNewFile();
File rightDepLMFile = new File(rightDepLMFilename);
if (!rightDepLMFile.exists())
rightDepLMFile.createNewFile();
FileWriter leftFW = new FileWriter(leftDepLMFile.getAbsoluteFile());
FileWriter rightFW = new FileWriter(rightDepLMFile.getAbsoluteFile());
lmWriter = new BufferedWriter(leftFW);
noEventWriter = new BufferedWriter(rightFW);
LineNumberReader inputReader = IOTools.getReaderFromFile(dependenciesFilename);
HashMap<Integer, Pair<IndexedWord, List<Integer>>> dependencies = null;
while ((dependencies = DependencyUtils.getDependenciesFromCoNLLFileReader(inputReader, false, true)) != null) {
SymmetricalWordAlignment alignment = null;
if (includeAlignment) {
alignment = new SymmetricalWordAlignment(sourceTokensReader.readLine(), targetTokensReader.readLine(), alignmentReader.readLine());
}
updateCounts(dependencies, alignment);
}
inputReader.close();
lmWriter.close();
noEventWriter.close();
//headLmWriter.close();
}