当前位置: 首页>>代码示例>>Java>>正文


Java PropertiesUtils.getBool方法代码示例

本文整理汇总了Java中edu.stanford.nlp.util.PropertiesUtils.getBool方法的典型用法代码示例。如果您正苦于以下问题:Java PropertiesUtils.getBool方法的具体用法?Java PropertiesUtils.getBool怎么用?Java PropertiesUtils.getBool使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在edu.stanford.nlp.util.PropertiesUtils的用法示例。


在下文中一共展示了PropertiesUtils.getBool方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: ArabicSegmentorAnnotator

import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public ArabicSegmentorAnnotator(String name, Properties props) {
    // We are only interested in {name}.* properties
    String prefix = name + '.';
    String model = null;
    Properties segProps = new Properties();
    for (String key : props.stringPropertyNames()) {
        if (key.startsWith(prefix)) {
            // skip past name and the subsequent "."
            String modelKey = key.substring(prefix.length());
            if (modelKey.equals("model")) {
                model = props.getProperty(key);
            } else {
                segProps.setProperty(modelKey, props.getProperty(key));
            }
        }
    }
    this.VERBOSE = PropertiesUtils.getBool(props, name + ".verbose", true);
    init(model, segProps);

}
 
开发者ID:westei,项目名称:stanbol-stanfordnlp,代码行数:21,代码来源:ArabicSegmentorAnnotator.java

示例2: ChineseSegmenterAnnotator

import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public ChineseSegmenterAnnotator(String name, Properties props) {
  String model = null;
  // Keep only the properties that apply to this annotator
  Properties modelProps = new Properties();
  for (String key : props.stringPropertyNames()) {
    if (key.startsWith(name + ".")) {
      // skip past name and the subsequent "."
      String modelKey = key.substring(name.length() + 1);
      if (modelKey.equals("model")) {
        model = props.getProperty(key);
      } else {
        modelProps.setProperty(modelKey, props.getProperty(key));
      }
    }
  }
  this.VERBOSE = PropertiesUtils.getBool(props, name + ".verbose", true);
  if (model == null) {
    throw new RuntimeException("Expected a property " + name + ".model");
  }
  loadModel(model, modelProps);
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:22,代码来源:ChineseSegmenterAnnotator.java

示例3: ArabicLexer

import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public ArabicLexer(Reader r, LexedTokenFactory<?> tf, Properties props) {
  this(r);
  this.tokenFactory = tf;
  
  tokenizeNL = PropertiesUtils.getBool(props, "tokenizeNLs", false);
  useUTF8Ellipsis = PropertiesUtils.getBool(props, "useUTF8Ellipsis", false);
  invertible = PropertiesUtils.getBool(props, "invertible", false);
  normArDigits = PropertiesUtils.getBool(props, "normArDigits", false);
  normArPunc = PropertiesUtils.getBool(props, "normArPunc", false);
  normAlif = PropertiesUtils.getBool(props, "normAlif", false);
  normYa = PropertiesUtils.getBool(props, "normYa", false);
  removeDiacritics = PropertiesUtils.getBool(props, "removeDiacritics", false);
  removeTatweel = PropertiesUtils.getBool(props, "removeTatweel", false);
  removeQuranChars = PropertiesUtils.getBool(props, "removeQuranChars", false);
  removeProMarker = PropertiesUtils.getBool(props, "removeProMarker", false);
  removeSegMarker = PropertiesUtils.getBool(props, "removeSegMarker", false);
  removeMorphMarker = PropertiesUtils.getBool(props, "removeMorphMarker", false);
  atbEscaping = PropertiesUtils.getBool(props, "atbEscaping", false);

  setupNormalizationMap();
}
 
开发者ID:chbrown,项目名称:stanford-parser,代码行数:22,代码来源:ArabicLexer.java

示例4: JMWEAnnotator

import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
 * Annotator to capture Multi-Word Expressions (MWE).
 * @param name
 *            annotator name
 * @param props
 *            the properties
 */
public JMWEAnnotator(String name, Properties props) {
    // set verbosity
    this.verbose = PropertiesUtils.getBool(props, "customAnnotatorClass.jmwe.verbose", false);
    // set underscoreSpaceReplacement
    if (!PropertiesUtils.hasProperty(props, "customAnnotatorClass.jmwe.underscoreReplacement")) {
        throw new RuntimeException("No customAnnotatorClass.jmwe.underscoreReplacement key in properties found");
    }
    underscoreSpaceReplacement = (String) props.get("customAnnotatorClass.jmwe.underscoreReplacement");
    if (underscoreSpaceReplacement.contains("_")) {
        throw new RuntimeException("The underscoreReplacement contains an underscore character");
    }
    // set index
    if (!PropertiesUtils.hasProperty(props, "customAnnotatorClass.jmwe.indexData")) {
        throw new RuntimeException("No customAnnotatorClass.jmwe.indexData key in properties found");
    }
    File indexFile = new File((String) props.get("customAnnotatorClass.jmwe.indexData"));
    if (!indexFile.exists()) {
        throw new RuntimeException("index file " + indexFile.getAbsoluteFile() + " does not exist");
    }

    this.index = new MWEIndex(indexFile);
    // set detector
    if (!PropertiesUtils.hasProperty(props, "customAnnotatorClass.jmwe.detector")) {
        throw new RuntimeException("No customAnnotatorClass.jmwe.detector key in properties found");
    }
    this.detectorName = (String) props.get("customAnnotatorClass.jmwe.detector");

    if (this.verbose) {
        System.out.println("verbose: " + this.verbose);
        System.out.println("underscoreReplacement: " + this.underscoreSpaceReplacement);
        System.out.println("indexData: " + this.index);
        System.out.println("detectorName: " + this.detectorName);
    }
}
 
开发者ID:toliwa,项目名称:CoreNLP-jMWE,代码行数:42,代码来源:JMWEAnnotator.java

示例5: main

import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public static void main(String[] args) {

    Properties options = StringUtils.argsToProperties(args, optionArgDefs());
    String annotations = PropertiesUtils.get(options, "annotations", null, String.class);
    
    boolean changepreps = PropertiesUtils.getBool(options, "changepreps", false);
    
    int sentenceCount = CoreNLPCache.loadSerialized(annotations);
   
    
    CoreMap sentence;
    for (int i = 0; i < sentenceCount; i++) {
      try {  
        sentence = CoreNLPCache.get(i);
        if (sentence == null) {
          System.out.println();
          System.err.println("Empty sentence #" + i);
          continue;
        }
        printDependencies(sentence, changepreps);
        //System.err.println("---------------------------");
      } catch (Exception e) {
        System.err.println("SourceSentence #" + i);
        e.printStackTrace();
        return;
      }
    }
  }
 
开发者ID:stanfordnlp,项目名称:phrasal,代码行数:29,代码来源:SerializedDependencyToCoNLL.java

示例6: main

import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
 * 
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    System.err.print(usage());
    System.exit(-1);
  }
  
  Properties options = StringUtils.argsToProperties(args, argDefs());
  int ngramOrder = PropertiesUtils.getInt(options, "order", BLEUMetric.DEFAULT_MAX_NGRAM_ORDER);
  boolean disableTokenization = PropertiesUtils.getBool(options, "no-nist", false);
  String metric = options.getProperty("metric", "bleu");

  String[] refs = options.getProperty("").split("\\s+");
  List<List<Sequence<IString>>> referencesList = MetricUtils.readReferences(refs, ! disableTokenization);
  System.err.printf("Metric: %s with %d references%n", metric, referencesList.get(0).size());
  
  LineNumberReader reader = new LineNumberReader(new InputStreamReader(
      System.in));
  int sourceInputId = 0;
  for (String line; (line = reader.readLine()) != null; ++sourceInputId) {
    line = disableTokenization ? line : NISTTokenizer.tokenize(line);
    Sequence<IString> translation = IStrings.tokenize(line);
    double score = getScore(translation, referencesList.get(sourceInputId), ngramOrder, metric);
    System.out.printf("%.4f%n", score);
  }
  System.err.printf("Scored %d input segments%n", sourceInputId);
}
 
开发者ID:stanfordnlp,项目名称:phrasal,代码行数:32,代码来源:SentenceLevelEvaluation.java

示例7: NGramLanguageModelFeaturizer

import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
 * Constructor called by Phrasal when NGramLanguageModelFeaturizer appears in
 * <code>Phrasal.LANGUAGE_MODEL_OPT</code>.
 * 
 * The first argument is always the language model filename and the second
 * argument is always the feature name.
 * 
 * Additional arguments are named parameters.
 */
public NGramLanguageModelFeaturizer(String...args) throws IOException {
  if (args.length < 2) {
    throw new RuntimeException(
        "At least two arguments are needed: LM file name and LM feature name");
  }
  // Load the LM
  this.lm = LanguageModelFactory.load(args[0]);
  this.startToken = lm.getStartToken();
  this.endToken = lm.getEndToken();

  // Set the feature name
  this.featureName = args[1];

  // Named parameters
  Properties options = FeatureUtils.argsToProperties(args);
  this.isClassBased = PropertiesUtils.getBool(options, "classBased", false);
  if (isClassBased && options.containsKey("classMap")) {
    // A local class map that differs from the one specified by Phrasal.TARGET_CLASS_MAP
    this.targetClassMap = new LocalTargetMap();
    this.targetClassMap.load(options.getProperty("classMap"));
  } else if (isClassBased) {
    this.targetClassMap = TargetClassMap.getInstance();
  } else {
    this.targetClassMap = null;
  }    
}
 
开发者ID:stanfordnlp,项目名称:phrasal,代码行数:36,代码来源:NGramLanguageModelFeaturizer.java

示例8: LexicalReorderingFeaturizer

import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
/**
 * Constructor for reflection loading discriminative lexicalized reordering.
 * 
 * @param args
 */
public LexicalReorderingFeaturizer(String...args) {
  Properties options = FeatureUtils.argsToProperties(args);
  this.dynamic = PropertiesUtils.getBool(options, "dynamic", false);
  if (dynamic) {
    this.discriminativeSet = null;
    this.mlrt = null;
    this.featureTags = Arrays.stream(LexicalReorderingTable.msdBidirectionalPositionMapping).map(m -> 
    String.format("%s:%s", FEATURE_PREFIX, m)).toArray(String[]::new);
    this.useAlignmentConstellations = false;
    this.useClasses = false;
    this.countFeatureIndex = -1;
    this.lexicalCutoff = 0;

  } else {
    this.discriminativeSet = new ArrayList<>(Arrays.asList(LexicalReorderingTable.ReorderingTypes.values()));
    this.useAlignmentConstellations = options.containsKey("conditionOnConstellations");
    this.countFeatureIndex = PropertiesUtils.getInt(options, "countFeatureIndex", -1);
    // Which reordering classes to extract
    if (options.containsKey("classes")) {
      String[] typeStrings = options.getProperty("classes").split("-");
      discriminativeSet = new ArrayList<>();
      for (String type : typeStrings) {
        discriminativeSet.add(LexicalReorderingTable.ReorderingTypes.valueOf(type));
      }
    }
    // Use class-based feature representations
    this.useClasses = options.containsKey("useClasses");
    if (useClasses) {
      sourceMap = SourceClassMap.getInstance();
      targetMap = TargetClassMap.getInstance();
    }
    this.mlrt = null;
    this.featureTags = null;
    this.lexicalCutoff = PropertiesUtils.getInt(options, "lexicalCutoff", 0);
  }
}
 
开发者ID:stanfordnlp,项目名称:phrasal,代码行数:42,代码来源:LexicalReorderingFeaturizer.java

示例9: getTokenizer

import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public Tokenizer<HasWord> getTokenizer(Reader r, String extraOptions) {
  boolean tokenizeNewlines = this.tokenizeNLs;
  if (extraOptions != null) {
    Properties prop = StringUtils.stringToProperties(extraOptions);
    tokenizeNewlines = PropertiesUtils.getBool(prop, "tokenizeNLs", this.tokenizeNLs);
  }

  return new WordSegmentingTokenizer(segmenter, WhitespaceTokenizer.newCoreLabelWhitespaceTokenizer(r, tokenizeNewlines));
}
 
开发者ID:paulirwin,项目名称:Stanford.NER.Net,代码行数:10,代码来源:WordSegmentingTokenizer.java

示例10: StanbolDeterministicCorefAnnotator

import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public StanbolDeterministicCorefAnnotator(Properties props) {
  try {
    corefSystem = new SieveCoreferenceSystem(props);
    mentionExtractor = new MentionExtractor(corefSystem.dictionaries(), corefSystem.semantics());
    OLD_FORMAT = Boolean.parseBoolean(props.getProperty("oldCorefFormat", "false"));
    allowReparsing = PropertiesUtils.getBool(props, Constants.ALLOW_REPARSING_PROP, Constants.ALLOW_REPARSING);
  } catch (Exception e) {
    System.err.println("ERROR: cannot create DeterministicCorefAnnotator!");
    e.printStackTrace();
    throw new RuntimeException(e);
  }
}
 
开发者ID:westei,项目名称:stanbol-stanfordnlp,代码行数:13,代码来源:StanbolDeterministicCorefAnnotator.java

示例11: POSTaggerAnnotator

import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public POSTaggerAnnotator(String annotatorName, Properties props) {
  String posLoc = props.getProperty(annotatorName + ".model");
  if (posLoc == null) {
    posLoc = DefaultPaths.DEFAULT_POS_MODEL;
  }
  boolean verbose = PropertiesUtils.getBool(props, annotatorName + ".verbose", false);
  this.pos = loadModel(posLoc, verbose);
  this.maxSentenceLength = PropertiesUtils.getInt(props, annotatorName + ".maxlen", Integer.MAX_VALUE);
  this.nThreads = PropertiesUtils.getInt(props, annotatorName + ".nthreads", PropertiesUtils.getInt(props, "nthreads", 1));
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:11,代码来源:POSTaggerAnnotator.java

示例12: NERClassifierCombiner

import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public NERClassifierCombiner(Properties props)
  throws FileNotFoundException
{
  super(props);
  applyNumericClassifiers = PropertiesUtils.getBool(props, APPLY_NUMERIC_CLASSIFIERS_PROPERTY, APPLY_NUMERIC_CLASSIFIERS_DEFAULT);
  useSUTime = PropertiesUtils.getBool(props, NumberSequenceClassifier.USE_SUTIME_PROPERTY, NumberSequenceClassifier.USE_SUTIME_DEFAULT);
  nsc = new NumberSequenceClassifier(new Properties(), useSUTime, props);
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:9,代码来源:NERClassifierCombiner.java

示例13: getTokenizer

import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public Tokenizer<T> getTokenizer(Reader r, String extraOptions) {
  Properties prop = StringUtils.stringToProperties(extraOptions);
  boolean tokenizeNewlines = 
    PropertiesUtils.getBool(prop, "tokenizeNLs", this.tokenizeNLs);

  return new WhitespaceTokenizer<T>(factory, r, tokenizeNewlines);
}
 
开发者ID:amark-india,项目名称:eventspotter,代码行数:8,代码来源:WhitespaceTokenizer.java

示例14: main

import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    System.err.print(usage());
    System.exit(-1);
  }

  Properties options = StringUtils.argsToProperties(args, argDefs());
  int BLEUOrder = PropertiesUtils.getInt(options, "order", BLEUMetric.DEFAULT_MAX_NGRAM_ORDER);
  boolean doSmooth = PropertiesUtils.getBool(options, "smooth", false);
  boolean disableTokenization = PropertiesUtils.getBool(options, "no-nist", false);
  boolean doCased = PropertiesUtils.getBool(options, "cased", false);

  // Setup the metric tokenization scheme. Applies to both the references and
  // hypotheses
  if (doCased) NISTTokenizer.lowercase(false);
  NISTTokenizer.normalize( ! disableTokenization);

  // Load the references
  String[] refs = options.getProperty("").split("\\s+");
  System.out.printf("Metric: BLEU-%d with %d references%n", BLEUOrder, refs.length);
  List<List<Sequence<IString>>> referencesList = MetricUtils.readReferences(refs, true);

  // For backwards compatibility
  doSmooth |= System.getProperty("smoothBLEU") != null;

  BLEUMetric<IString, String> bleu = new BLEUMetric<IString, String>(referencesList, BLEUOrder,
        doSmooth);
  BLEUMetric<IString, String>.BLEUIncrementalMetric incMetric = bleu
      .getIncrementalMetric();

  LineNumberReader reader = new LineNumberReader(new InputStreamReader(
      System.in));
  for (String line; (line = reader.readLine()) != null; ) {
    line = NISTTokenizer.tokenize(line);
    Sequence<IString> translation = IStrings.tokenize(line);
    ScoredFeaturizedTranslation<IString, String> tran = new ScoredFeaturizedTranslation<IString, String>(
        translation, null, 0);
    incMetric.add(tran);
  }
  // Check for an incomplete set of translations
  if (reader.getLineNumber() < referencesList.size()) {
    System.err.printf("WARNING: Translation candidate file is shorter than references (%d/%d)%n", 
        reader.getLineNumber(), referencesList.size());
  }
  reader.close();

  double[] ngramPrecisions = incMetric.ngramPrecisions();
  System.out.printf("BLEU = %.3f, ", 100 * incMetric.score());
  for (int i = 0; i < ngramPrecisions.length; i++) {
    if (i != 0) {
      System.out.print("/");
    }
    System.out.printf("%.3f", ngramPrecisions[i] * 100);
  }
  System.out.printf(" (BP=%.3f, ratio=%.3f %d/%d)%n", incMetric
      .brevityPenalty(), ((1.0 * incMetric.candidateLength()) / incMetric
      .effectiveReferenceLength()), incMetric.candidateLength(), incMetric
      .effectiveReferenceLength());

  System.out.printf("%nPrecision Details:%n");
  double[][] precCounts = incMetric.ngramPrecisionCounts();
  for (int i = 0; i < ngramPrecisions.length; i++) {
    System.out.printf("\t%d:%d/%d%n", i, (int) precCounts[i][0], (int) precCounts[i][1]);
  }
}
 
开发者ID:stanfordnlp,项目名称:phrasal,代码行数:66,代码来源:BLEUMetric.java

示例15: main

import edu.stanford.nlp.util.PropertiesUtils; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
  Properties options = StringUtils.argsToProperties(args, optionArgDefs());
  String dependenciesFilename = PropertiesUtils.get(options, "input", null, String.class);
  String outdirPath = PropertiesUtils.get(options, "outdir", ".", String.class);
  String alignmentFilename = PropertiesUtils.get(options, "alignment", null, String.class);
  String sourceTokensFilename = PropertiesUtils.get(options, "sourceTokens", null, String.class);
  String targetTokensFilename = PropertiesUtils.get(options, "targetTokens", null, String.class);
  String rightDepLMFilename = outdirPath + File.separator + "deplm.nonevents";
  String leftDepLMFilename = outdirPath + File.separator + "deplm.data";
  String classMapFilename = PropertiesUtils.get(options, "classMap", null, String.class);
  
  useHeadClasses = PropertiesUtils.getBool(options, "headClasses", false);
  
  
  if (classMapFilename != null) {
    System.err.println("Loading word class mapping from " + classMapFilename);
    classMap = new LocalWordClassMap();
    classMap.load(classMapFilename);
  } else {
    classMap = null;
  }
  
  /* Include alignment information and generate a "FRAG" tuple for each unaligned word instead of the real one. */
  boolean includeAlignment = (alignmentFilename != null && sourceTokensFilename != null);

  LineNumberReader alignmentReader = null;
  LineNumberReader sourceTokensReader = null;
  LineNumberReader targetTokensReader = null;

  
  if (includeAlignment) {
    alignmentReader = IOTools.getReaderFromFile(alignmentFilename);
    sourceTokensReader = IOTools.getReaderFromFile(sourceTokensFilename);
    targetTokensReader = IOTools.getReaderFromFile(targetTokensFilename);
  }
  
  
  File leftDepLMFile = new File(leftDepLMFilename);
  if (!leftDepLMFile.exists())
    leftDepLMFile.createNewFile();

  
  File rightDepLMFile = new File(rightDepLMFilename);
  if (!rightDepLMFile.exists())
    rightDepLMFile.createNewFile();
  
 
  

  FileWriter leftFW = new FileWriter(leftDepLMFile.getAbsoluteFile());
  FileWriter rightFW = new FileWriter(rightDepLMFile.getAbsoluteFile());
 
  
  lmWriter = new BufferedWriter(leftFW);
  noEventWriter = new BufferedWriter(rightFW);

  LineNumberReader inputReader = IOTools.getReaderFromFile(dependenciesFilename);

  
  HashMap<Integer, Pair<IndexedWord, List<Integer>>>  dependencies =  null;
  while ((dependencies =  DependencyUtils.getDependenciesFromCoNLLFileReader(inputReader, false, true)) != null) {
    
    SymmetricalWordAlignment alignment = null;
    
    if (includeAlignment) {
      alignment = new SymmetricalWordAlignment(sourceTokensReader.readLine(), targetTokensReader.readLine(), alignmentReader.readLine());
    }
    
    updateCounts(dependencies, alignment);
  }
  
  inputReader.close();
  lmWriter.close();
  noEventWriter.close();
  //headLmWriter.close();
  
}
 
开发者ID:stanfordnlp,项目名称:phrasal,代码行数:78,代码来源:BuildDependencyLMData2.java


注:本文中的edu.stanford.nlp.util.PropertiesUtils.getBool方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。