本文整理汇总了Java中edu.stanford.nlp.util.StringUtils.joinWithOriginalWhiteSpace方法的典型用法代码示例。如果您正苦于以下问题:Java StringUtils.joinWithOriginalWhiteSpace方法的具体用法?Java StringUtils.joinWithOriginalWhiteSpace怎么用?Java StringUtils.joinWithOriginalWhiteSpace使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.util.StringUtils
的用法示例。
在下文中一共展示了StringUtils.joinWithOriginalWhiteSpace方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: parse
import edu.stanford.nlp.util.StringUtils; //导入方法依赖的package包/类
public static void parse(FigerSystem sys, int lineId, String text) {
Annotation annotation = new Annotation(text);
Preprocessing.pipeline.annotate(annotation);
// for each sentence
int sentId = 0;
for (CoreMap sentence : annotation.get(SentencesAnnotation.class)) {
// System.out.println("[l" + i + "][s"
// + sentId + "]tokenized sentence="
// + StringUtils.joinWithOriginalWhiteSpace(sentence
// .get(TokensAnnotation.class)));
List<Pair<Integer, Integer>> entityMentionOffsets = getNamedEntityMentions(sentence);
for (Pair<Integer, Integer> offset : entityMentionOffsets) {
String label = sys.predict(annotation, sentId,
offset.first, offset.second);
String mention = StringUtils.joinWithOriginalWhiteSpace(sentence.get(
TokensAnnotation.class).subList(offset.first, offset.second));
System.out.println("[l" + lineId + "][s" + sentId + "]mention"
+ mention + "(" + offset.first + ","
+ offset.second + ") = " + mention + ", pred = "
+ label);
}
sentId++;
}
}
示例2: parse
import edu.stanford.nlp.util.StringUtils; //导入方法依赖的package包/类
public static void parse(ParseStanfordFigerReverb sys, int lineId, String text) {
Annotation annotation = new Annotation(text);
Preprocessing.pipeline.annotate(annotation);
// for each sentence
int sentId = 0;
for (CoreMap sentence : annotation.get(SentencesAnnotation.class)) {
// System.out.println("[l" + i + "][s"
// + sentId + "]tokenized sentence="
// + StringUtils.joinWithOriginalWhiteSpace(sentence
// .get(TokensAnnotation.class)));
List<Pair<Integer, Integer>> entityMentionOffsets = getNamedEntityMentions(sentence);
for (Pair<Integer, Integer> offset : entityMentionOffsets) {
String label = sys.predict(annotation, sentId, offset.first, offset.second);
String mention = StringUtils.joinWithOriginalWhiteSpace(
sentence.get(TokensAnnotation.class).subList(offset.first, offset.second));
System.out.println("[l" + lineId + "][s" + sentId + "]mention" + mention + "(" + offset.first + ","
+ offset.second + ") = " + mention + ", pred = " + label);
}
sentId++;
}
}
示例3: main
import edu.stanford.nlp.util.StringUtils; //导入方法依赖的package包/类
public static void main(String[] args) {
System.out.println("figer system");
String textFile = null;
FigerSystem sys = instance();
Preprocessing.initPipeline();
String text = "Winnipeg Jets trade Evander Kane to Buffalo Sabres in blockbuster deal that involves Tyler Myers , Drew Stafford .";
Annotation annotation = new Annotation(text);
Preprocessing.pipeline.annotate(annotation);
BinaryExtractionNormalizer normalizer = new BinaryExtractionNormalizer();
HeadNounExtractor headnoun_extractor = new HeadNounExtractor();
// for each sentence
int sentId = 0;
CorenlpParsedArticle pa = new CorenlpParsedArticle();
int sectionId = 0;
pa.sectionId = sectionId;
List<CoreMap> sentences = annotation.get(SentencesAnnotation.class);
pa.numSentence = sentences.size();
int i = 0;
for (CoreMap sentence : annotation.get(SentencesAnnotation.class)) {
ParsedSentence ps = new ParsedSentence(pa.sectionId, i++, sentence, Preprocessing.gsf);
pa.parsedsentence.add(ps);
List<Pair<Integer, Integer>> entityMentionOffsets = getNamedEntityMentions(sentence);
for (Pair<Integer, Integer> offset : entityMentionOffsets) {
String label = sys.predict(annotation, sentId,
offset.first, offset.second);
String mention = StringUtils.joinWithOriginalWhiteSpace(sentence.get(
TokensAnnotation.class).subList(offset.first, offset.second));
System.out.println("[l" + 0 + "][s" + sentId + "]mention"
+ mention + "(" + offset.first + ","
+ offset.second + ") = " + mention + ", pred = "
+ label);
}
sentId++;
}
}
示例4: main2
import edu.stanford.nlp.util.StringUtils; //导入方法依赖的package包/类
public static void main2(String[] args) {
String textFile = null;
if (args.length == 1) {
textFile = args[0];
} else if (args.length == 2) {
configFile = args[0];
textFile = args[1];
} else {
usage();
System.exit(0);
}
// initialize the system
ParseStanfordFigerReverb sys = instance();
Preprocessing.initPipeline();
// preprocess the text
List<String> list = FileUtil.getLinesFromFile(textFile);
for (int i = 0; i < list.size(); i++) {
Annotation annotation = new Annotation(list.get(i));
Preprocessing.pipeline.annotate(annotation);
// for each sentence
int sentId = 0;
for (CoreMap sentence : annotation.get(SentencesAnnotation.class)) {
System.out.println("[l" + i + "][s" + sentId + "]tokenized sentence="
+ StringUtils.joinWithOriginalWhiteSpace(sentence.get(TokensAnnotation.class)));
List<Pair<Integer, Integer>> entityMentionOffsets = getNamedEntityMentions(sentence);
for (Pair<Integer, Integer> offset : entityMentionOffsets) {
String label = sys.predict(annotation, sentId, offset.first, offset.second);
String mention = StringUtils.joinWithOriginalWhiteSpace(
sentence.get(TokensAnnotation.class).subList(offset.first, offset.second));
System.out.println("[l" + i + "][s" + sentId + "]mention" + mention + "(" + offset.first + ","
+ offset.second + ") = " + mention + ", pred = " + label);
}
sentId++;
}
}
}
示例5: main2
import edu.stanford.nlp.util.StringUtils; //导入方法依赖的package包/类
public static void main2(String[] args) {
String textFile = null;
if (args.length == 1) {
textFile = args[0];
} else if (args.length == 2) {
configFile = args[0];
textFile = args[1];
} else {
usage();
System.exit(0);
}
// initialize the system
FigerSystem sys = instance();
Preprocessing.initPipeline();
// preprocess the text
List<String> list = FileUtil.getLinesFromFile(textFile);
for (int i = 0; i < list.size(); i++) {
Annotation annotation = new Annotation(list.get(i));
Preprocessing.pipeline.annotate(annotation);
// for each sentence
int sentId = 0;
for (CoreMap sentence : annotation.get(SentencesAnnotation.class)) {
System.out.println("[l"
+ i
+ "][s"
+ sentId
+ "]tokenized sentence="
+ StringUtils.joinWithOriginalWhiteSpace(sentence
.get(TokensAnnotation.class)));
List<Pair<Integer, Integer>> entityMentionOffsets = getNamedEntityMentions(sentence);
for (Pair<Integer, Integer> offset : entityMentionOffsets) {
String label = sys.predict(annotation, sentId,
offset.first, offset.second);
String mention = StringUtils
.joinWithOriginalWhiteSpace(sentence.get(
TokensAnnotation.class).subList(
offset.first, offset.second));
System.out.println("[l" + i + "][s" + sentId + "]mention"
+ mention + "(" + offset.first + ","
+ offset.second + ") = " + mention + ", pred = "
+ label);
}
sentId++;
}
}
}