本文整理匯總了Java中edu.stanford.nlp.tagger.maxent.MaxentTagger.tokenizeText方法的典型用法代碼示例。如果您正苦於以下問題:Java MaxentTagger.tokenizeText方法的具體用法?Java MaxentTagger.tokenizeText怎麽用?Java MaxentTagger.tokenizeText使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類edu.stanford.nlp.tagger.maxent.MaxentTagger
的用法示例。
在下文中一共展示了MaxentTagger.tokenizeText方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: doRun
import edu.stanford.nlp.tagger.maxent.MaxentTagger; //導入方法依賴的package包/類
@Override
protected List<Word> doRun(Language language, String sentence) {
MaxentTagger tagger = taggers.computeIfAbsent(language, lang -> {
if (lang == EN) {
return new MaxentTagger("edu/stanford/nlp/models/pos-tagger/english-left3words/english-left3words-distsim.tagger");
}
throw new UnsupportedLanguageException(lang);
});
PartOfSpeechSet partOfSpeechSet = PartOfSpeechSet.getPOSSet(language);
List<Word> words = new ArrayList<>();
List<List<HasWord>> sentences = MaxentTagger.tokenizeText(new StringReader(sentence));
sentences.forEach(s -> {
tagger.tagSentence(s).forEach(taggedWord ->
words.add(new Word(partOfSpeechSet.valueOf(taggedWord.tag()), taggedWord.value())));
});
return words;
}
示例2: tokenize
import edu.stanford.nlp.tagger.maxent.MaxentTagger; //導入方法依賴的package包/類
public ListMatrix<ListMatrix<String>> tokenize(String input) throws Exception {
ListMatrix<ListMatrix<String>> result = new DefaultListMatrix<ListMatrix<String>>();
StringReader sr = new StringReader(input);
List<List<HasWord>> sentences = MaxentTagger.tokenizeText(sr);
for (List<HasWord> tokSentence : sentences) {
ListMatrix<String> m = new DefaultListMatrix<String>();
for (HasWord t : tokSentence) {
m.add(t.word());
}
result.add(m);
}
return result;
}
示例3: tokenize
import edu.stanford.nlp.tagger.maxent.MaxentTagger; //導入方法依賴的package包/類
/**
* Splits the sentence into individual tokens.
*
* @param sentence Input sentence
* @return Array of tokens
*/
public static String[] tokenize(String sentence) {
List t = MaxentTagger.tokenizeText(new StringReader(sentence));
List<String> tokens = new ArrayList<String>();
for (int j = 0; j < t.size(); j++) {
Sentence s1 = (Sentence) t.get(j);
for (int i = 0; i < s1.length(); i++) {
HasWord w = s1.getHasWord(i);
tokens.add(w.word());
}
}
return (String[]) tokens.toArray(new String[tokens.size()]);
}
示例4: main
import edu.stanford.nlp.tagger.maxent.MaxentTagger; //導入方法依賴的package包/類
/**
* @param args
*/
public static void main(String[] args) throws Exception{
// TODO Auto-generated method stub
if (args.length != 2) {
System.err.println("usage: java TaggerDemo modelFile fileToTag");
return;
}
MaxentTagger tagger = new MaxentTagger(args[0] );
List<List<HasWord>> sentences = MaxentTagger.tokenizeText(new BufferedReader(new FileReader(args[1])));
for (List<HasWord> sentence : sentences) {
ArrayList<TaggedWord> tSentence = tagger.tagSentence(sentence);
System.out.println(Sentence.listToString(tSentence, false));
}
}
示例5: tag
import edu.stanford.nlp.tagger.maxent.MaxentTagger; //導入方法依賴的package包/類
public Vector<ArrayList<TaggedWord>> tag(String input) {
Vector<ArrayList<TaggedWord>> returnVector = new Vector<ArrayList<TaggedWord>>();
List<List<HasWord>> sentences = MaxentTagger
.tokenizeText(new BufferedReader(new StringReader(input)));
for (List<? extends HasWord> sentence : sentences) {
returnVector.add(tagger.tagSentence(sentence));
}
return returnVector;
}
示例6: main
import edu.stanford.nlp.tagger.maxent.MaxentTagger; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
if (args.length != 2) {
System.err.println("usage: java TaggerDemo modelFile fileToTag");
return;
}
MaxentTagger tagger = new MaxentTagger(args[0]);
List<List<HasWord>> sentences = MaxentTagger.tokenizeText(new BufferedReader(new FileReader(args[1])));
for (List<HasWord> sentence : sentences) {
List<TaggedWord> tSentence = tagger.tagSentence(sentence);
System.out.println(Sentence.listToString(tSentence, false));
}
}
示例7: tag
import edu.stanford.nlp.tagger.maxent.MaxentTagger; //導入方法依賴的package包/類
public String tag(String fn) {
String taggedString = new String();
try {
List<List<HasWord>> sentences = MaxentTagger.tokenizeText(new BufferedReader(new StringReader(fn)));
for (List<HasWord> sentence : sentences) {
ArrayList<TaggedWord> tSentence = (ArrayList<TaggedWord>) tagger.tagSentence(sentence);
taggedString = taggedString.concat(Sentence.listToString(tSentence, false));
}
return taggedString;
} catch(Exception e) {
e.printStackTrace();
System.out.println("Error during the text tagging operation.");
}
return null;
}
示例8: tag
import edu.stanford.nlp.tagger.maxent.MaxentTagger; //導入方法依賴的package包/類
private List<TaggedWord> tag(String text) {
List<TaggedWord> taggedSentences = new ArrayList<TaggedWord>();
List<List<HasWord>> sentences = MaxentTagger.tokenizeText(new StringReader(text));
for(List<HasWord> sentence : sentences) {
ArrayList<TaggedWord> taggedWords = _tagger.tagSentence(sentence);
taggedSentences.addAll(taggedWords);
}
return taggedSentences;
}
示例9: main
import edu.stanford.nlp.tagger.maxent.MaxentTagger; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
if (args.length != 2) {
System.err.println("usage: java TaggerDemo modelFile fileToTag");
return;
}
MaxentTagger tagger = new MaxentTagger(args[0]);
List<List<HasWord>> sentences = MaxentTagger.tokenizeText(new BufferedReader(new FileReader(args[1])));
for (List<HasWord> sentence : sentences) {
ArrayList<TaggedWord> tSentence = tagger.tagSentence(sentence);
System.out.println(Sentence.listToString(tSentence, false));
}
}
示例10: detect
import edu.stanford.nlp.tagger.maxent.MaxentTagger; //導入方法依賴的package包/類
public static List<List<HasWord>> detect(String text) {
return MaxentTagger.tokenizeText(new StringReader(text));
}