本文整理匯總了Java中opennlp.tools.sentdetect.SentenceModel類的典型用法代碼示例。如果您正苦於以下問題:Java SentenceModel類的具體用法?Java SentenceModel怎麽用?Java SentenceModel使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
SentenceModel類屬於opennlp.tools.sentdetect包,在下文中一共展示了SentenceModel類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: sentenceDetect
import opennlp.tools.sentdetect.SentenceModel; //導入依賴的package包/類
public String[] sentenceDetect(String text) {
File modelIn = null;
String sentences[] = null;
try {
File userDir = new File(System.getProperty("user.dir"));
if (this.turNLPInstance.getLanguage().equals("en_US")) {
modelIn = new File(userDir.getAbsolutePath().concat("/models/opennlp/en/en-sent.bin"));
} else if (this.turNLPInstance.getLanguage().equals("pt_BR")) {
modelIn = new File(userDir.getAbsolutePath().concat("/models/opennlp/pt/pt-sent.bin"));
}
SentenceModel model = new SentenceModel(modelIn);
SentenceDetectorME sentenceDetector = new SentenceDetectorME(model);
sentences = sentenceDetector.sentDetect(text);
} catch (IOException e) {
e.printStackTrace();
}
return sentences;
}
示例2: trainSentences
import opennlp.tools.sentdetect.SentenceModel; //導入依賴的package包/類
public static void trainSentences(final String inResource, String outFile) throws IOException {
InputStreamFactory inputStreamFactory = new InputStreamFactory() {
@Override
public InputStream createInputStream() throws IOException {
return Trainer.class.getResourceAsStream(inResource);
}
};
SentenceSampleStream samples = new SentenceSampleStream(new PlainTextByLineStream(inputStreamFactory, StandardCharsets.UTF_8));
TrainingParameters trainingParameters = new TrainingParameters();
trainingParameters.put(TrainingParameters.ALGORITHM_PARAM, ModelType.MAXENT.name());
trainingParameters.put(TrainingParameters.ITERATIONS_PARAM, "100");
trainingParameters.put(TrainingParameters.CUTOFF_PARAM, "0");
SentenceDetectorFactory sentenceDetectorFactory = SentenceDetectorFactory.create(null, "en", true, null, ".?!".toCharArray());
SentenceModel sentdetectModel = SentenceDetectorME.train("en", samples, sentenceDetectorFactory, trainingParameters);
//.train("en", samples, true, null, 100, 0);
samples.close();
FileOutputStream out = new FileOutputStream(outFile);
sentdetectModel.serialize(out);
out.close();
}
示例3: init
import opennlp.tools.sentdetect.SentenceModel; //導入依賴的package包/類
/**
* Initialization method. Creates a new graph and initializes the StanfordNLPCore pipeline if needed
* @param sent
* @param token
*/
private void init(InputStream sent, InputStream token, InputStream stop, InputStream exstop) throws IOException {
// creates a new SentenceDetector, POSTagger, and Tokenizer
SentenceModel sentModel = new SentenceModel(sent);
sent.close();
sdetector = new SentenceDetectorME(sentModel);
TokenizerModel tokenModel = new TokenizerModel(token);
token.close();
tokenizer = new TokenizerME(tokenModel);
BufferedReader br = new BufferedReader(new InputStreamReader(stop));
String line;
while ((line = br.readLine()) != null) {
stopwords.add(line);
}
br.close();
br = new BufferedReader(new InputStreamReader(exstop));
while ((line = br.readLine()) != null) {
extendedStopwords.add(line);
}
br.close();
}
示例4: doInitialize
import opennlp.tools.sentdetect.SentenceModel; //導入依賴的package包/類
@Override
public void doInitialize(UimaContext aContext) throws ResourceInitializationException {
try {
tokensModel.loadModel(TokenizerModel.class, getClass().getResourceAsStream("en_token.bin"));
sentencesModel.loadModel(SentenceModel.class, getClass().getResourceAsStream("en_sent.bin"));
posModel.loadModel(POSModel.class, getClass().getResourceAsStream("en_pos_maxent.bin"));
chunkModel.loadModel(ChunkerModel.class, getClass().getResourceAsStream("en_chunker.bin"));
} catch (BaleenException be) {
getMonitor().error("Unable to load OpenNLP Language Models", be);
throw new ResourceInitializationException(be);
}
try {
sentenceDetector = new SentenceDetectorME((SentenceModel) sentencesModel.getModel());
wordTokenizer = new TokenizerME((TokenizerModel) tokensModel.getModel());
posTagger = new POSTaggerME((POSModel) posModel.getModel());
phraseChunker = new ChunkerME((ChunkerModel) chunkModel.getModel());
} catch (Exception e) {
getMonitor().error("Unable to create OpenNLP taggers", e);
throw new ResourceInitializationException(e);
}
}
示例5: testLoad
import opennlp.tools.sentdetect.SentenceModel; //導入依賴的package包/類
@Test
public void testLoad() throws Exception{
SharedOpenNLPModel m = new SharedOpenNLPModel();
m.loadModel(TokenizerModel.class, OpenNLP.class.getResourceAsStream("en_token.bin"));
BaseModel bm = m.getModel();
assertNotNull(bm);
assertTrue(bm instanceof TokenizerModel);
assertEquals("en", bm.getLanguage());
//Trying to load a different model shouldn't change the resource
m.loadModel(SentenceModel.class, OpenNLP.class.getResourceAsStream("en_sent.bin"));
assertEquals(bm, m.getModel());
m.doDestroy();
}
示例6: main
import opennlp.tools.sentdetect.SentenceModel; //導入依賴的package包/類
public static void main(String[] strings) throws Exception {
String text = "“But I don’t want to go among mad people,” Alice remarked. " +
"“Oh, you can’t help that,” said the Cat: “we’re all mad here. I’m mad. You’re mad.” " +
"“How do you know I’m mad?” said Alice. " +
"“You must be,” said the Cat, “or you wouldn’t have come here.”";
try (InputStream modelIn = new FileInputStream(NATURAL_LANGUAGE_PROCESSING_SRC_MAIN_RESOURCES_EN_SENT_BIN)) {
SentenceModel model = new SentenceModel(modelIn);
SentenceDetectorME sentenceDetector = new SentenceDetectorME(model);
String sentences[] = sentenceDetector.sentDetect(text);
Span sentences2[] = sentenceDetector.sentPosDetect(text);
for (String sentence : sentences) {
System.out.println(sentence);
}
System.out.println(Arrays.deepToString(sentences2));
}
}
示例7: segmentEssay
import opennlp.tools.sentdetect.SentenceModel; //導入依賴的package包/類
public void segmentEssay(Essay essay) {
try {
smodel = new FileInputStream(
System.getProperty("user.dir")
+ "/Models/en-sent.bin"
);
SentenceModel model = new SentenceModel(smodel);
SentenceDetectorME sentenceDetector = new SentenceDetectorME(model);
ArrayList<String> newsent = new ArrayList<>();
for (int i = 0; i < essay.getSentences().size(); i++) {
newsent.addAll(asList(sentenceDetector.sentDetect(essay.getSentences().get(i))));
}
essay.setDetectedSentences(newsent);
} catch (IOException e) {
e.printStackTrace();
} finally {
if (smodel != null) {
try {
smodel.close();
} catch (IOException ignored) {
}
}
}
}
示例8: startStage
import opennlp.tools.sentdetect.SentenceModel; //導入依賴的package包/類
@Override
public void startStage(StageConfiguration config) {
// parse the config to map the params properly
textField = config.getProperty("textField", textField);
peopleField = config.getProperty("peopleField", peopleField);
posTextField = config.getProperty("posTextField", posTextField);
try {
// Sentence finder
SentenceModel sentModel = new SentenceModel(new FileInputStream(sentenceModelFile));
sentenceDetector = new SentenceDetectorME(sentModel);
// tokenizer
TokenizerModel tokenModel = new TokenizerModel(new FileInputStream(tokenModelFile));
tokenizer = new TokenizerME(tokenModel);
// person name finder
TokenNameFinderModel nameModel = new TokenNameFinderModel(new FileInputStream(personModelFile));
nameFinder = new NameFinderME(nameModel);
// load the part of speech tagger.
posTagger = new POSTaggerME(new POSModel(new FileInputStream(posModelFile)));
} catch (IOException e) {
log.info("Error loading up OpenNLP Models. {}", e.getLocalizedMessage());
e.printStackTrace();
}
}
示例9: exec
import opennlp.tools.sentdetect.SentenceModel; //導入依賴的package包/類
public DataBag exec(Tuple input) throws IOException
{
if(input.size() != 1) {
throw new IOException();
}
String inputString = input.get(0).toString();
if(inputString == null || inputString == "") {
return null;
}
DataBag outBag = bf.newDefaultBag();
if(sdetector == null) {
String loadFile = CachedFile.getFileName(MODEL_FILE, this.modelPath);
InputStream is = new FileInputStream(loadFile);
InputStream buffer = new BufferedInputStream(is);
SentenceModel model = new SentenceModel(buffer);
this.sdetector = new SentenceDetectorME(model);
}
String sentences[] = this.sdetector.sentDetect(inputString);
for(String sentence : sentences) {
Tuple outTuple = tf.newTuple(sentence);
outBag.add(outTuple);
}
return outBag;
}
示例10: inform
import opennlp.tools.sentdetect.SentenceModel; //導入依賴的package包/類
@Override
public void inform(ResourceLoader loader) throws IOException {
if(sentenceModelFile!=null) {
sentenceOp = new SentenceDetectorME(new SentenceModel(
loader.openResource(sentenceModelFile)));
}
if(tokenizerModelFile==null)
throw new IOException("Parameter 'tokenizerModle' is required, but is invalid:"+tokenizerModelFile);
tokenizerOp = new TokenizerME(new TokenizerModel(
loader.openResource(tokenizerModelFile)
));
if(parChunkingClass!=null) {
try {
Class c = Class.forName(parChunkingClass);
Object o = c.newInstance();
paragraphChunker = (ParagraphChunker) o;
}catch (Exception e){
throw new IOException(e);
}
}
}
示例11: initialize
import opennlp.tools.sentdetect.SentenceModel; //導入依賴的package包/類
public static void initialize() throws IOException {
/* normal model */
/*
model = new POSModelLoader().load(new File(RESOURCES + "pt.postagger.model"));
tModel = new TokenizerModel(new FileInputStream(RESOURCES + "pt.tokenizer.model"));
sModel = new SentenceModel(new FileInputStream(RESOURCES + "pt.sentdetect.model"));
*/
/* with VPP tag */
model = new POSModelLoader().load(new File(RESOURCES + "pt.postaggerVerbPP.model"));
tModel = new TokenizerModel(new FileInputStream(RESOURCES + "pt.tokenizerVerbPP.model"));
sModel = new SentenceModel(new FileInputStream(RESOURCES + "pt.sentDetectVerbPP.model"));
tagger = new POSTaggerME(model);
token = new TokenizerME(tModel);
sent = new SentenceDetectorME(sModel);
}
示例12: splitToSentences
import opennlp.tools.sentdetect.SentenceModel; //導入依賴的package包/類
public List<String> splitToSentences(String text) {
List<String> sentences = new ArrayList<String>();
//List<String> returnedSentences = new ArrayList<String>();
try {
InputStream modelIn = getClass().getResourceAsStream(sentBin);
SentenceModel model = new SentenceModel(modelIn);
SentenceDetectorME sentenceDetector = new SentenceDetectorME(model);
String[] initSentences = sentenceDetector.sentDetect(text);
for(String snt : initSentences){
sentences.add(snt);
}
modelIn.close();
} catch (IOException e) {
e.printStackTrace();
}
return sentences;
}
示例13: loadSentenceDetector
import opennlp.tools.sentdetect.SentenceModel; //導入依賴的package包/類
/** Load the sentence detector
*
* @param language
* @param modelDirectory
* @throws IOException
*/
protected void loadSentenceDetector(String language, String modelDirectory) throws IOException {
String modelFile = modelDirectory +
File.separatorChar + language + "-sent.bin";
log.info("Loading sentence model {}", modelFile);
InputStream modelStream = new FileInputStream(modelFile);
SentenceModel model = new SentenceModel(modelStream);
detector = new SentenceDetectorME(model);
}
示例14: EnglishIndexer
import opennlp.tools.sentdetect.SentenceModel; //導入依賴的package包/類
public EnglishIndexer() throws Exception {
mDicts = new EnglishDictionaries();
mBeamSize = ConfigurationManager.getConfiguration().getInt("BeamSize");
InputStream modelStream = null;
modelStream = getClass().getResourceAsStream("/opennlp15model-sa/en-sent.bin");
SentenceModel model = new SentenceModel(modelStream);
mSentenceDetector = new SentenceDetectorME(model);
modelStream.close();
modelStream = getClass().getResourceAsStream("/opennlp15model-sa/en-token.bin");
mTokenizer = new EnglishTokenizer(modelStream, mDicts);
modelStream.close();
// The parser model is about 15x the size of chunking model.
// Keep this in mind when using Deep Parsing.
modelStream = getClass().getResourceAsStream("/opennlp15model-sa/en-pos-maxent.bin");
//POSModel posModel = POSTaggerUtils.createPOSModel(modelStream);
POSModel posModel = new POSModel(modelStream);
mTagDictionary = posModel.getTagDictionary();
mPosTagger = new POSTaggerME(posModel);
modelStream.close();
modelStream = getClass().getResourceAsStream("/opennlp15model-sa/en-chunker.bin");
ChunkerModel chunkerModel = new ChunkerModel(modelStream);
mChunker = new ChunkerME(chunkerModel);
modelStream.close();
modelStream = getClass().getResourceAsStream("/opennlp15model-sa/en-parser-chunking.bin");
ParserModel parserModel = new ParserModel(modelStream);
mParser = ParserFactory.create(parserModel);
modelStream.close();
}
示例15: LanguageProcessor
import opennlp.tools.sentdetect.SentenceModel; //導入依賴的package包/類
public LanguageProcessor(String text) {
String dir = System.getProperty("user.dir");
try {
InputStream inputStream = new FileInputStream(dir + "/en-sent.bin");
model = new SentenceModel(inputStream);
this.text = text;
} catch (IOException e) {
e.printStackTrace();
}
}