本文整理汇总了Java中org.apache.uima.fit.factory.ExternalResourceFactory类的典型用法代码示例。如果您正苦于以下问题:Java ExternalResourceFactory类的具体用法?Java ExternalResourceFactory怎么用?Java ExternalResourceFactory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ExternalResourceFactory类属于org.apache.uima.fit.factory包,在下文中一共展示了ExternalResourceFactory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: beforeTest
import org.apache.uima.fit.factory.ExternalResourceFactory; //导入依赖的package包/类
@Override
public void beforeTest() throws UIMAException {
super.beforeTest();
final ExternalResourceDescription tokensDesc = ExternalResourceFactory.createExternalResourceDescription(
"lexica",
ClearNlpLexica.class);
final AnalysisEngineDescription tokeniserDesc = AnalysisEngineFactory.createEngineDescription(
ClearNlpTokeniser.class,
"lexica",
tokensDesc);
tokeniserAe = AnalysisEngineFactory.createEngine(tokeniserDesc);
final AnalysisEngineDescription parserDesc = AnalysisEngineFactory.createEngineDescription(ClearNlpParser.class,
"lexica",
tokensDesc);
ae = AnalysisEngineFactory.createEngine(parserDesc);
}
示例2: createAnalysisEngines
import org.apache.uima.fit.factory.ExternalResourceFactory; //导入依赖的package包/类
@Override
protected AnalysisEngine[] createAnalysisEngines() throws ResourceInitializationException {
// Use OpenNlp to generate the POS etc for us
final ExternalResourceDescription tokensDesc = ExternalResourceFactory.createExternalResourceDescription(
"tokens",
SharedOpenNLPModel.class);
final ExternalResourceDescription sentencesDesc = ExternalResourceFactory
.createExternalResourceDescription("sentences", SharedOpenNLPModel.class);
final ExternalResourceDescription posDesc = ExternalResourceFactory.createExternalResourceDescription("posTags",
SharedOpenNLPModel.class);
final ExternalResourceDescription chunksDesc = ExternalResourceFactory
.createExternalResourceDescription("phraseChunks", SharedOpenNLPModel.class);
return asArray(
createAnalysisEngine(OpenNLP.class, "tokens", tokensDesc, "sentences", sentencesDesc, "posTags",
posDesc, "phraseChunks", chunksDesc),
createAnalysisEngine(MaltParser.class));
}
示例3: setUp
import org.apache.uima.fit.factory.ExternalResourceFactory; //导入依赖的package包/类
@Before
public void setUp() throws ResourceInitializationException, ResourceAccessException {
// Create a description of an external resource - a fongo instance, in the same way we would
// have created a shared mongo resource
final ExternalResourceDescription erd = ExternalResourceFactory.createExternalResourceDescription(
SharedFongoResource.class, "fongo.collection", "test", "fongo.data", "[]");
// Create the analysis engine
final AnalysisEngineDescription aed = AnalysisEngineFactory.createEngineDescription(MongoPatternSaver.class,
MongoPatternSaver.KEY_MONGO, erd,
"collection", "test");
ae = AnalysisEngineFactory.createEngine(aed);
ae.initialize(new CustomResourceSpecifier_impl(), Collections.emptyMap());
sfr = (SharedFongoResource) ae.getUimaContext()
.getResourceObject(MongoPatternSaver.KEY_MONGO);
}
示例4: setUp
import org.apache.uima.fit.factory.ExternalResourceFactory; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
daoDesc = ExternalResourceFactory.createExternalResourceDescription(
XmiFileTreeCorpusDAOResource.class, corpusPathString);
tsd = CasCreationUtils
.mergeTypeSystems(Sets.newHashSet(
XmiFileTreeCorpusDAO.getTypeSystem(corpusPathString),
TypeSystemDescriptionFactory
.createTypeSystemDescription(),
TokenizerAPI.getTypeSystemDescription(),
SentenceSplitterAPI.getTypeSystemDescription()));
readerDesc = CollectionReaderFactory.createReaderDescription(
CorpusDAOCollectionReader.class, tsd,
CorpusDAOCollectionReader.CORPUS_DAO_KEY, daoDesc);
CAS aCAS = CasCreationUtils.createCas(tsd, null, null, null);
tokenizerSentenceSplitterDesc = AnalysisEngineFactory
.createEngineDescription(Unitizer.createTokenizerSentenceSplitterAED());
unitAnnotatorDesc = AnalysisEngineFactory.createEngineDescription(
UnitAnnotator.class, UnitAnnotator.PARAM_UNIT_TYPE_NAMES,
unitTypes);
}
示例5: setUp
import org.apache.uima.fit.factory.ExternalResourceFactory; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
daoDesc = ExternalResourceFactory.createExternalResourceDescription(
XmiFileTreeCorpusDAOResource.class, corpusPathString);
tsd = CasCreationUtils
.mergeTypeSystems(Sets.newHashSet(
XmiFileTreeCorpusDAO.getTypeSystem(corpusPathString),
TypeSystemDescriptionFactory
.createTypeSystemDescription(),
TokenizerAPI.getTypeSystemDescription(),
SentenceSplitterAPI.getTypeSystemDescription()));
reader = CollectionReaderFactory.createReaderDescription(
CorpusDAOCollectionReader.class, tsd,
CorpusDAOCollectionReader.CORPUS_DAO_KEY, daoDesc);
CAS aCAS = CasCreationUtils.createCas(tsd, null, null, null);
tokenizerSentenceSplitter = Unitizer.createTokenizerSentenceSplitterAED();
unitAnnotator = AnalysisEngineFactory.createEngineDescription(
UnitAnnotator.class, UnitAnnotator.PARAM_UNIT_TYPE_NAMES,
unitTypes);
unitClassifier = AnalysisEngineFactory.createEngineDescription(
UnitClassifier.class, UnitClassifier.PARAM_CLASS_TYPE_NAMES,
classTypes);
}
示例6: testChunks
import org.apache.uima.fit.factory.ExternalResourceFactory; //导入依赖的package包/类
@Test
public void testChunks() throws Exception{
ExternalResourceDescription tokensDesc = ExternalResourceFactory.createExternalResourceDescription("tokens", SharedOpenNLPModel.class);
ExternalResourceDescription sentencesDesc = ExternalResourceFactory.createExternalResourceDescription("sentences", SharedOpenNLPModel.class);
ExternalResourceDescription posDesc = ExternalResourceFactory.createExternalResourceDescription("posTags", SharedOpenNLPModel.class);
ExternalResourceDescription chunksDesc = ExternalResourceFactory.createExternalResourceDescription("phraseChunks", SharedOpenNLPModel.class);
AnalysisEngineDescription descNLP = AnalysisEngineFactory.createEngineDescription(OpenNLP.class, "tokens", tokensDesc, "sentences", sentencesDesc, "posTags", posDesc, "phraseChunks", chunksDesc);
AnalysisEngine aeNLP = AnalysisEngineFactory.createEngine(descNLP);
AnalysisEngine ae = AnalysisEngineFactory.createEngine(TestAnnotator.class);
jCas.setDocumentText("PERSON JOHN SMITH WAS SEEN ENTERING THE WAREHOUSE");
aeNLP.process(jCas);
ae.process(jCas);
assertEquals(1, JCasUtil.select(jCas, Person.class).size());
assertEquals("JOHN SMITH", JCasUtil.selectByIndex(jCas, Person.class, 0).getValue());
}
示例7: testProperty
import org.apache.uima.fit.factory.ExternalResourceFactory; //导入依赖的package包/类
@Test
public void testProperty() throws Exception{
ExternalResourceDescription erd = ExternalResourceFactory.createExternalResourceDescription(MONGO, SharedFongoResource.class, FONGO_COLLECTION, MONGO_COLL, FONGO_DATA, JSON.serialize(GAZ_DATA));
AnalysisEngineDescription aed = AnalysisEngineFactory.createEngineDescription(MongoRegex.class, MONGO, erd, COLLECTION, MONGO_COLL, TYPE, LOCATION, REGEX, LONDON_REGEX);
AnalysisEngine ae = AnalysisEngineFactory.createEngine(aed);
jCas.setDocumentText(TEXT);
ae.process(jCas);
assertEquals(1, JCasUtil.select(jCas, Location.class).size());
Location l = JCasUtil.selectByIndex(jCas, Location.class, 0);
assertEquals("London", l.getValue());
assertEquals("London", l.getCoveredText());
assertEquals("Property_Test", l.getGeoJson());
ae.destroy();
}
示例8: test
import org.apache.uima.fit.factory.ExternalResourceFactory; //导入依赖的package包/类
@Test
public void test() throws Exception{
ExternalResourceDescription erd = ExternalResourceFactory.createExternalResourceDescription(FILE_GAZETTEER, SharedFileResource.class);
AnalysisEngineDescription aed = AnalysisEngineFactory.createEngineDescription(File.class, FILE_GAZETTEER, erd, FILE_NAME, getClass().getResource(GAZETTEER_TXT).getPath(), TYPE, LOCATION);
AnalysisEngine ae = AnalysisEngineFactory.createEngine(aed);
jCas.setDocumentText("Hello world, this is a test");
ae.process(jCas);
assertEquals(1, JCasUtil.select(jCas, Location.class).size());
Location l = JCasUtil.selectByIndex(jCas, Location.class, 0);
assertEquals(WORLD, l.getValue());
assertEquals(WORLD, l.getCoveredText());
ae.destroy();
}
示例9: testmultipleHits
import org.apache.uima.fit.factory.ExternalResourceFactory; //导入依赖的package包/类
@Test
public void testmultipleHits() throws Exception{
ExternalResourceDescription erd = ExternalResourceFactory.createExternalResourceDescription(FILE_GAZETTEER, SharedFileResource.class);
AnalysisEngineDescription aed = AnalysisEngineFactory.createEngineDescription(File.class, FILE_GAZETTEER, erd, FILE_NAME, getClass().getResource(GAZETTEER_TXT).getPath(), TYPE, LOCATION);
AnalysisEngine ae = AnalysisEngineFactory.createEngine(aed);
// the same search term appears multiple times in text...
jCas.setDocumentText("Hello world, and hello world again.");
ae.process(jCas);
assertEquals(2, JCasUtil.select(jCas, Location.class).size());
Location l = JCasUtil.selectByIndex(jCas, Location.class, 0);
assertEquals(WORLD, l.getValue());
assertEquals(WORLD, l.getCoveredText());
ae.destroy();
}
示例10: testCaseSensitive
import org.apache.uima.fit.factory.ExternalResourceFactory; //导入依赖的package包/类
@Test
public void testCaseSensitive() throws Exception{
ExternalResourceDescription erd = ExternalResourceFactory.createExternalResourceDescription(FILE_GAZETTEER, SharedFileResource.class);
AnalysisEngineDescription aed = AnalysisEngineFactory.createEngineDescription(File.class, FILE_GAZETTEER, erd, FILE_NAME, getClass().getResource(GAZETTEER_TXT).getPath(), TYPE, LOCATION, "caseSensitive", true);
AnalysisEngine ae = AnalysisEngineFactory.createEngine(aed);
jCas.setDocumentText("This text mentions New York and Paris in upper case and new york in lower case");
ae.process(jCas);
// should match "new york" and "Paris", but not "New York"
assertEquals(2, JCasUtil.select(jCas, Location.class).size());
Location l1 = JCasUtil.selectByIndex(jCas, Location.class, 0);
Location l2 = JCasUtil.selectByIndex(jCas, Location.class, 1);
assertEquals("Paris", l1.getValue());
assertEquals("new york", l2.getValue());
ae.destroy();
}
示例11: testPlurals
import org.apache.uima.fit.factory.ExternalResourceFactory; //导入依赖的package包/类
@Test
public void testPlurals() throws Exception{
//This test demonstrates pluralisation in the gazetteer
ExternalResourceDescription erd = ExternalResourceFactory.createExternalResourceDescription(FILE_GAZETTEER, SharedFileResource.class);
AnalysisEngineDescription aed = AnalysisEngineFactory.createEngineDescription(File.class, FILE_GAZETTEER, erd, FILE_NAME, getClass().getResource(GAZETTEER_TXT).getPath(), TYPE, LOCATION, "plural", true);
AnalysisEngine ae = AnalysisEngineFactory.createEngine(aed);
jCas.setDocumentText("There may be many New Yorks and many Parises, but there's only one London.");
ae.process(jCas);
assertEquals(3, JCasUtil.select(jCas, Location.class).size());
Location l1 = JCasUtil.selectByIndex(jCas, Location.class, 0);
Location l2 = JCasUtil.selectByIndex(jCas, Location.class, 1);
Location l3 = JCasUtil.selectByIndex(jCas, Location.class, 2);
assertEquals("New Yorks", l1.getValue());
assertEquals("Parises", l2.getValue());
assertEquals("London", l3.getValue());
ae.destroy();
}
示例12: testWhitespaceExact
import org.apache.uima.fit.factory.ExternalResourceFactory; //导入依赖的package包/类
@Test
public void testWhitespaceExact() throws Exception{
//This test demonstrates the case where whitespace is preserved in gazetteer matching.
ExternalResourceDescription erd = ExternalResourceFactory.createExternalResourceDescription(FILE_GAZETTEER, SharedFileResource.class);
AnalysisEngineDescription aed = AnalysisEngineFactory.createEngineDescription(File.class, FILE_GAZETTEER, erd, FILE_NAME, getClass().getResource(GAZETTEER_TXT).getPath(), TYPE, LOCATION);
AnalysisEngine ae = AnalysisEngineFactory.createEngine(aed);
// words in term to search for separated by multiple spaces, tabs or newline...
jCas.setDocumentText("This text mentions New York, and New York again, and New York again, and New \nYork yet again");
ae.process(jCas);
// only one mention of "New York" has the two words separated by a single space (as in the gazetteer)
assertEquals(1, JCasUtil.select(jCas, Location.class).size());
Location l = JCasUtil.selectByIndex(jCas, Location.class, 0);
assertEquals(NEW_YORK, l.getValue());
ae.destroy();
}
示例13: testWhitespaceNormalized
import org.apache.uima.fit.factory.ExternalResourceFactory; //导入依赖的package包/类
@Test
public void testWhitespaceNormalized() throws Exception{
//This test demonstrates the case where whitespace is preserved in gazetteer matching.
ExternalResourceDescription erd = ExternalResourceFactory.createExternalResourceDescription(FILE_GAZETTEER, SharedFileResource.class);
AnalysisEngineDescription aed = AnalysisEngineFactory.createEngineDescription(File.class, FILE_GAZETTEER, erd, FILE_NAME, getClass().getResource(GAZETTEER_TXT).getPath(), TYPE, LOCATION, "exactWhitespace", false);
AnalysisEngine ae = AnalysisEngineFactory.createEngine(aed);
// words in term to search for separated by multiple spaces, tabs or newline...
jCas.setDocumentText("This text mentions New York, and New York again, and New York again, and New \nYork yet again");
ae.process(jCas);
// Three mentions of "New York" if we reduce any whitespace to a single space (exactWhitespace parameter, which ignores new lines)
assertEquals(3, JCasUtil.select(jCas, Location.class).size());
Location l = JCasUtil.selectByIndex(jCas, Location.class, 0);
assertEquals(NEW_YORK, l.getValue());
ae.destroy();
}
示例14: testReference
import org.apache.uima.fit.factory.ExternalResourceFactory; //导入依赖的package包/类
@Test
public void testReference() throws Exception{
//This test demonstrates the case where whitespace is preserved in gazetteer matching.
ExternalResourceDescription erd = ExternalResourceFactory.createExternalResourceDescription(FILE_GAZETTEER, SharedFileResource.class);
AnalysisEngineDescription aed = AnalysisEngineFactory.createEngineDescription(File.class, FILE_GAZETTEER, erd, FILE_NAME, getClass().getResource(GAZETTEER_TXT).getPath(), TYPE, LOCATION, "exactWhitespace", false);
AnalysisEngine ae = AnalysisEngineFactory.createEngine(aed);
// words in term to search for separated by multiple spaces, tabs or newline...
jCas.setDocumentText("This text mentions New York (also known as NY and the Big Apple).");
ae.process(jCas);
// 3 mentions of "New York" and nicknames...
assertEquals(3, JCasUtil.select(jCas, Location.class).size());
// ...but they're all the same entity, so only one ReferenceTarget
assertEquals(1, JCasUtil.select(jCas, ReferenceTarget .class).size());
Location l = JCasUtil.selectByIndex(jCas, Location.class, 0);
assertEquals(NEW_YORK, l.getValue());
ae.destroy();
}
示例15: testmultipleHitsWithText
import org.apache.uima.fit.factory.ExternalResourceFactory; //导入依赖的package包/类
@Test
public void testmultipleHitsWithText() throws Exception{
ExternalResourceDescription erd = ExternalResourceFactory.createExternalResourceDescription(FILE_GAZETTEER, SharedFileResource.class);
AnalysisEngineDescription aed = AnalysisEngineFactory.createEngineDescription(File.class, FILE_GAZETTEER, erd, FILE_NAME, getClass().getResource(GAZETTEER_TXT).getPath(), TYPE, LOCATION);
AnalysisEngine ae = AnalysisEngineFactory.createEngine(aed);
// the same search term appears multiple times in text...
jCas.setDocumentText("Hello world, and hello world again.");
// but then subset using a Text annotation
new Text(jCas, 10, jCas.getDocumentText().length()).addToIndexes();
ae.process(jCas);
assertEquals(1, JCasUtil.select(jCas, Location.class).size());
Location l = JCasUtil.selectByIndex(jCas, Location.class, 0);
assertEquals(WORLD, l.getValue());
assertEquals(WORLD, l.getCoveredText());
assertTrue(l.getBegin() > 10);
ae.destroy();
}