本文整理汇总了Java中edu.stanford.nlp.util.PropertiesUtils.getInt方法的典型用法代码示例。如果您正苦于以下问题:Java PropertiesUtils.getInt方法的具体用法?Java PropertiesUtils.getInt怎么用?Java PropertiesUtils.getInt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.util.PropertiesUtils
的用法示例。
在下文中一共展示了PropertiesUtils.getInt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
* A main method for training and evaluating the postprocessor.
*
* @param args
*/
public static void main(String[] args) {
// Strips off hyphens
Properties options = StringUtils.argsToProperties(args, optionArgDefs());
if (options.containsKey("help") || args.length == 0) {
System.err.println(usage(GermanPostprocessor.class.getName()));
System.exit(-1);
}
int nThreads = PropertiesUtils.getInt(options, "nthreads", 1);
GermanPreprocessor preProcessor = new GermanPreprocessor();
GermanPostprocessor postProcessor = new GermanPostprocessor(options);
CRFPostprocessor.setup(postProcessor, preProcessor, options);
CRFPostprocessor.execute(nThreads, preProcessor, postProcessor);
}
示例2: main
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
* A main method for training and evaluating the postprocessor.
*
* @param args
*/
public static void main(String[] args) {
// Strips off hyphens
Properties options = StringUtils.argsToProperties(args, optionArgDefs());
if (options.containsKey("help") || args.length == 0) {
System.err.println(usage(FrenchPostprocessor.class.getName()));
System.exit(-1);
}
int nThreads = PropertiesUtils.getInt(options, "nthreads", 1);
FrenchPreprocessor preProcessor = new FrenchPreprocessor();
FrenchPostprocessor postProcessor = new FrenchPostprocessor(options);
CRFPostprocessor.setup(postProcessor, preProcessor, options);
CRFPostprocessor.execute(nThreads, preProcessor, postProcessor);
}
示例3: main
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
* A main method for training and evaluating the postprocessor.
*
* @param args
*/
public static void main(String[] args) {
// Strips off hyphens
Properties options = StringUtils.argsToProperties(args, optionArgDefs());
if (options.containsKey("help") || args.length == 0) {
System.err.println(usage(EnglishPostprocessor.class.getName()));
System.exit(-1);
}
int nThreads = PropertiesUtils.getInt(options, "nthreads", 1);
EnglishPreprocessor preProcessor = new EnglishPreprocessor();
EnglishPostprocessor postProcessor = new EnglishPostprocessor(options);
CRFPostprocessor.setup(postProcessor, preProcessor, options);
CRFPostprocessor.execute(nThreads, preProcessor, postProcessor);
}
示例4: main
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
* A main method for training and evaluating the postprocessor.
*
* @param args
*/
public static void main(String[] args) {
// Strips off hyphens
Properties options = StringUtils.argsToProperties(args, optionArgDefs());
if (options.containsKey("help") || args.length == 0) {
System.err.println(usage(SpanishPostprocessor.class.getName()));
System.exit(-1);
}
int nThreads = PropertiesUtils.getInt(options, "nthreads", 1);
SpanishPreprocessor preProcessor = new SpanishPreprocessor();
SpanishPostprocessor postProcessor = new SpanishPostprocessor(options);
CRFPostprocessor.setup(postProcessor, preProcessor, options);
CRFPostprocessor.execute(nThreads, preProcessor, postProcessor);
}
示例5: TargetFunctionWordInsertion
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
* Constructor.
*
* @param args
*/
public TargetFunctionWordInsertion(String...args) {
Properties options = FeatureUtils.argsToProperties(args);
if (args.length < 2) {
throw new RuntimeException("Must specify source and target unigram counts files");
}
System.err.println("Loading TargetFunctionWordInsertion template...");
String sourceFilename = options.getProperty("sourceFile");
String targetFilename = options.getProperty("targetFile");
this.rankCutoff = PropertiesUtils.getInt(options, "rankCutoff", DEFAULT_RANK_CUTOFF);
System.err.println("Source words:");
sourceFunctionWordSet = loadCountsFile(sourceFilename);
System.err.println("Target words:");
targetFunctionWordSet = loadCountsFile(targetFilename);
}
示例6: main
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
*
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
if (args.length < 1) {
System.err.print(usage());
System.exit(-1);
}
Properties options = StringUtils.argsToProperties(args, argDefs());
int ngramOrder = PropertiesUtils.getInt(options, "order", BLEUMetric.DEFAULT_MAX_NGRAM_ORDER);
boolean disableTokenization = PropertiesUtils.getBool(options, "no-nist", false);
String metric = options.getProperty("metric", "bleu");
String[] refs = options.getProperty("").split("\\s+");
List<List<Sequence<IString>>> referencesList = MetricUtils.readReferences(refs, ! disableTokenization);
System.err.printf("Metric: %s with %d references%n", metric, referencesList.get(0).size());
LineNumberReader reader = new LineNumberReader(new InputStreamReader(
System.in));
int sourceInputId = 0;
for (String line; (line = reader.readLine()) != null; ++sourceInputId) {
line = disableTokenization ? line : NISTTokenizer.tokenize(line);
Sequence<IString> translation = IStrings.tokenize(line);
double score = getScore(translation, referencesList.get(sourceInputId), ngramOrder, metric);
System.out.printf("%.4f%n", score);
}
System.err.printf("Scored %d input segments%n", sourceInputId);
}
示例7: LexicalReorderingFeaturizer
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
* Constructor for reflection loading discriminative lexicalized reordering.
*
* @param args
*/
public LexicalReorderingFeaturizer(String...args) {
Properties options = FeatureUtils.argsToProperties(args);
this.dynamic = PropertiesUtils.getBool(options, "dynamic", false);
if (dynamic) {
this.discriminativeSet = null;
this.mlrt = null;
this.featureTags = Arrays.stream(LexicalReorderingTable.msdBidirectionalPositionMapping).map(m ->
String.format("%s:%s", FEATURE_PREFIX, m)).toArray(String[]::new);
this.useAlignmentConstellations = false;
this.useClasses = false;
this.countFeatureIndex = -1;
this.lexicalCutoff = 0;
} else {
this.discriminativeSet = new ArrayList<>(Arrays.asList(LexicalReorderingTable.ReorderingTypes.values()));
this.useAlignmentConstellations = options.containsKey("conditionOnConstellations");
this.countFeatureIndex = PropertiesUtils.getInt(options, "countFeatureIndex", -1);
// Which reordering classes to extract
if (options.containsKey("classes")) {
String[] typeStrings = options.getProperty("classes").split("-");
discriminativeSet = new ArrayList<>();
for (String type : typeStrings) {
discriminativeSet.add(LexicalReorderingTable.ReorderingTypes.valueOf(type));
}
}
// Use class-based feature representations
this.useClasses = options.containsKey("useClasses");
if (useClasses) {
sourceMap = SourceClassMap.getInstance();
targetMap = TargetClassMap.getInstance();
}
this.mlrt = null;
this.featureTags = null;
this.lexicalCutoff = PropertiesUtils.getInt(options, "lexicalCutoff", 0);
}
}
示例8: RuleIndicator
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
* Constructor for reflection loading.
*
* @param args
*/
public RuleIndicator(String... args) {
Properties options = FeatureUtils.argsToProperties(args);
this.addLexicalizedRule = options.containsKey("addLexicalized");
this.addClassBasedRule = options.containsKey("addClassBased");
this.countFeatureIndex = PropertiesUtils.getInt(options, "countFeatureIndex", -1);
if (addClassBasedRule) {
sourceMap = SourceClassMap.getInstance();
targetMap = TargetClassMap.getInstance();
}
this.lexicalCutoff = PropertiesUtils.getInt(options, "lexicalCutoff", 0);
}
示例9: validateCommandLine
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
private static boolean validateCommandLine(String[] args) {
// Command line parsing
Properties options = StringUtils.argsToProperties(args, argDefs());
VERBOSE = options.containsKey("v");
SRC_FILE = options.getProperty("s", null);
OPTS_FILE = options.getProperty("o", null);
XSD_FILE = options.getProperty("x", null);
FIRST_ID = PropertiesUtils.getInt(options, "f", Integer.MIN_VALUE);
LAST_ID = PropertiesUtils.getInt(options,"l",Integer.MAX_VALUE);
return true;
}
示例10: POSTaggerAnnotator
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public POSTaggerAnnotator(String annotatorName, Properties props) {
String posLoc = props.getProperty(annotatorName + ".model");
if (posLoc == null) {
posLoc = DefaultPaths.DEFAULT_POS_MODEL;
}
boolean verbose = PropertiesUtils.getBool(props, annotatorName + ".verbose", false);
this.pos = loadModel(posLoc, verbose);
this.maxSentenceLength = PropertiesUtils.getInt(props, annotatorName + ".maxlen", Integer.MAX_VALUE);
this.nThreads = PropertiesUtils.getInt(props, annotatorName + ".nthreads", PropertiesUtils.getInt(props, "nthreads", 1));
}
示例11: main
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
if (args.length < 1) {
System.err.print(usage());
System.exit(-1);
}
Properties options = StringUtils.argsToProperties(args, argDefs());
int BLEUOrder = PropertiesUtils.getInt(options, "order", BLEUMetric.DEFAULT_MAX_NGRAM_ORDER);
boolean doSmooth = PropertiesUtils.getBool(options, "smooth", false);
boolean disableTokenization = PropertiesUtils.getBool(options, "no-nist", false);
boolean doCased = PropertiesUtils.getBool(options, "cased", false);
// Setup the metric tokenization scheme. Applies to both the references and
// hypotheses
if (doCased) NISTTokenizer.lowercase(false);
NISTTokenizer.normalize( ! disableTokenization);
// Load the references
String[] refs = options.getProperty("").split("\\s+");
System.out.printf("Metric: BLEU-%d with %d references%n", BLEUOrder, refs.length);
List<List<Sequence<IString>>> referencesList = MetricUtils.readReferences(refs, true);
// For backwards compatibility
doSmooth |= System.getProperty("smoothBLEU") != null;
BLEUMetric<IString, String> bleu = new BLEUMetric<IString, String>(referencesList, BLEUOrder,
doSmooth);
BLEUMetric<IString, String>.BLEUIncrementalMetric incMetric = bleu
.getIncrementalMetric();
LineNumberReader reader = new LineNumberReader(new InputStreamReader(
System.in));
for (String line; (line = reader.readLine()) != null; ) {
line = NISTTokenizer.tokenize(line);
Sequence<IString> translation = IStrings.tokenize(line);
ScoredFeaturizedTranslation<IString, String> tran = new ScoredFeaturizedTranslation<IString, String>(
translation, null, 0);
incMetric.add(tran);
}
// Check for an incomplete set of translations
if (reader.getLineNumber() < referencesList.size()) {
System.err.printf("WARNING: Translation candidate file is shorter than references (%d/%d)%n",
reader.getLineNumber(), referencesList.size());
}
reader.close();
double[] ngramPrecisions = incMetric.ngramPrecisions();
System.out.printf("BLEU = %.3f, ", 100 * incMetric.score());
for (int i = 0; i < ngramPrecisions.length; i++) {
if (i != 0) {
System.out.print("/");
}
System.out.printf("%.3f", ngramPrecisions[i] * 100);
}
System.out.printf(" (BP=%.3f, ratio=%.3f %d/%d)%n", incMetric
.brevityPenalty(), ((1.0 * incMetric.candidateLength()) / incMetric
.effectiveReferenceLength()), incMetric.candidateLength(), incMetric
.effectiveReferenceLength());
System.out.printf("%nPrecision Details:%n");
double[][] precCounts = incMetric.ngramPrecisionCounts();
for (int i = 0; i < ngramPrecisions.length; i++) {
System.out.printf("\t%d:%d/%d%n", i, (int) precCounts[i][0], (int) precCounts[i][1]);
}
}
示例12: main
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
*
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
if (args.length < 2) {
System.err.print(usage());
System.exit(-1);
}
Properties options = StringUtils.argsToProperties(args, argDefs());
int BLEUOrder = PropertiesUtils.getInt(options, "order", BLEUMetric.DEFAULT_MAX_NGRAM_ORDER);
boolean doCased = PropertiesUtils.getBool(options, "cased", false);
// Setup the metric tokenization scheme. Applies to both the references and
// hypotheses
if (doCased) NISTTokenizer.lowercase(false);
// Load the references
String[] parameters = options.getProperty("").split("\\s+");
String[] refs = new String[parameters.length - 1];
System.arraycopy(parameters, 1, refs, 0, refs.length);
List<InputProperties> inputProperties = InputProperties.parse(new File(parameters[0]));
List<List<Sequence<IString>>> referencesList = MetricUtils.readReferences(refs, true);
Map<String,BLEUMetric<IString, String>.BLEUIncrementalMetric> metrics =
BLEUGenreEvaluator.run(referencesList, inputProperties, BLEUOrder, System.in);
for (Map.Entry<String,BLEUMetric<IString, String>.BLEUIncrementalMetric> entry : metrics.entrySet()) {
String genre = entry.getKey();
BLEUMetric<IString, String>.BLEUIncrementalMetric incMetric = entry.getValue();
System.out.printf("Genre: %s%n", genre);
double[] ngramPrecisions = incMetric.ngramPrecisions();
System.out.printf("BLEU = %.3f, ", 100 * incMetric.score());
for (int i = 0; i < ngramPrecisions.length; i++) {
if (i != 0) {
System.out.print("/");
}
System.out.printf("%.3f", ngramPrecisions[i] * 100);
}
System.out.printf(" (BP=%.3f, ratio=%.3f %d/%d)%n", incMetric
.brevityPenalty(), ((1.0 * incMetric.candidateLength()) / incMetric
.effectiveReferenceLength()), incMetric.candidateLength(), incMetric
.effectiveReferenceLength());
System.out.printf("%nPrecision Details:%n");
double[][] precCounts = incMetric.ngramPrecisionCounts();
for (int i = 0; i < ngramPrecisions.length; i++) {
System.out.printf("\t%d:%d/%d%n", i, (int) precCounts[i][0], (int) precCounts[i][1]);
}
System.out.println();
}
}
示例13: main
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
* Start the service.
*
* @param args
*/
public static void main(String[] args) {
Properties options = StringUtils.argsToProperties(args, optionArgDefs());
int port = PropertiesUtils.getInt(options, "p", DEFAULT_HTTP_PORT);
boolean loadMockServlet = PropertiesUtils.getBool(options, "m", false);
boolean localHost = PropertiesUtils.getBool(options, "l", false);
String uiFile = options.getProperty("u", "debug.html");
String resourcePath = options.getProperty("r", ".");
// Parse arguments
String argList = options.getProperty("",null);
String[] parsedArgs = argList == null ? null : argList.split("\\s+");
if (parsedArgs == null || parsedArgs.length != 1) {
System.out.println(usage());
System.exit(-1);
}
String phrasalIniFile = parsedArgs[0];
// Setup the jetty server
Server server = new Server();
// Jetty 8 way of configuring the server
// Connector connector = new SelectChannelConnector();
// connector.setPort(port);
// server.addConnector(connector);
// Jetty9 way of configuring the server
ServerConnector connector = new ServerConnector(server);
connector.setPort(port);
server.addConnector(connector);
if (localHost) {
connector.setHost(DEBUG_URL);
}
// Setup the servlet context
ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS);
context.setContextPath("/");
// Add Phrasal servlet
PhrasalServlet servlet = loadMockServlet ? new PhrasalServlet() : new PhrasalServlet(phrasalIniFile);
context.addServlet(new ServletHolder(servlet), SERVLET_ROOT);
// TODO(spenceg): gzip compression causes an encoding problem for unicode characters
// on the client. Not sure if the compression or decompression is the problem.
// EnumSet<DispatcherType> dispatches = EnumSet.of(DispatcherType.REQUEST, DispatcherType.ASYNC);
// context.addFilter(new FilterHolder(new IncludableGzipFilter()), "/t", dispatches);
// Add debugging web-page
ResourceHandler resourceHandler = new ResourceHandler();
resourceHandler.setWelcomeFiles(new String[]{ uiFile });
resourceHandler.setResourceBase(resourcePath);
HandlerList handlers = new HandlerList();
handlers.setHandlers(new Handler[] { resourceHandler, context });
server.setHandler(handlers);
// Start the service
try {
logger.info("Starting PhrasalService on port: " + String.valueOf(port));
server.start();
server.join();
} catch (Exception e) {
logger.error("Servlet crashed. Service shutting down.");
e.printStackTrace();
}
}
示例14: NERCustomAnnotator
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public NERCustomAnnotator(String name, Properties properties) {
this(NERClassifierCombiner.createNERClassifierCombiner(name, properties), false,
PropertiesUtils.getInt(properties, name + ".nthreads", PropertiesUtils.getInt(properties, "nthreads", 1)),
PropertiesUtils.getLong(properties, name + ".maxtime", -1),
PropertiesUtils.getInt(properties, name + ".maxlength", Integer.MAX_VALUE));
}
示例15: OwnPOSTaggerAnnotator
import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public OwnPOSTaggerAnnotator(String annotatorName, Properties props) {
boolean verbose = PropertiesUtils.getBool(props, annotatorName + ".verbose", false);
this.pos = loadModel(annotatorName, verbose);
this.maxSentenceLength = PropertiesUtils.getInt(props, annotatorName + ".maxlen", Integer.MAX_VALUE);
this.nThreads = PropertiesUtils.getInt(props, annotatorName + ".nthreads", PropertiesUtils.getInt(props, "nthreads", 1));
}