本文整理匯總了Java中opennlp.tools.util.Span類的典型用法代碼示例。如果您正苦於以下問題:Java Span類的具體用法?Java Span怎麽用?Java Span使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
Span類屬於opennlp.tools.util包,在下文中一共展示了Span類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: interpret
import opennlp.tools.util.Span; //導入依賴的package包/類
public Intent interpret(String query) {
String[] tokens = WhitespaceTokenizer.INSTANCE.tokenize(query);
double[] outcome = categorizer.categorize(tokens);
logger.debug(categorizer.getAllResults(outcome));
Intent intent = new Intent(categorizer.getBestCategory(outcome));
for (NameFinderME nameFinderME : nameFinderMEs) {
Span[] spans = nameFinderME.find(tokens);
String[] names = Span.spansToStrings(spans, tokens);
for (int i = 0; i < spans.length; i++) {
intent.getEntities().put(spans[i].getType(), names[i]);
}
}
logger.debug(intent.toString());
return intent;
}
示例2: incrementToken
import opennlp.tools.util.Span; //導入依賴的package包/類
@Override
public final boolean incrementToken() throws IOException {
if (sentences == null) {
fillSentences();
}
if (tokenOffset >= sentences.length) {
return false;
}
Span sentenceSpan = sentences[tokenOffset];
clearAttributes();
int start = sentenceSpan.getStart();
int end = sentenceSpan.getEnd();
termAtt.copyBuffer(inputSentence, start, end - start);
posIncrAtt.setPositionIncrement(1);
offsetAtt.setOffset(start, end);
tokenOffset++;
return true;
}
示例3: addSpan
import opennlp.tools.util.Span; //導入依賴的package包/類
public void addSpan(Span span, CSList<Character> labels) throws Exception {
if (span.getStart() == span.getEnd())
{
LabelledPosition lpos = this.get(span.getStart());
if (lpos == null)
{
lpos = new LabelledPosition();
this.put(span.getStart(),lpos);
lpos.IsSingleSpan = true;
}
else
lpos.IsSingleSpan = false;
lpos.IsStart = true;
lpos.IsEnd = true;
LabelledPosition.addNewLabels(lpos.StartLabels,labels);
LabelledPosition.addNewLabels(lpos.EndLabels,labels);
}
else
{
addStart(span.getStart(),labels);
addEnd(span.getEnd(),labels);
}
}
示例4: isGoodAsTopic
import opennlp.tools.util.Span; //導入依賴的package包/類
public Boolean isGoodAsTopic(String tag) throws Exception {
String[] tokens = tokenizeSentence(tag);
String[] postags = posTagTokens(tokens);
CSList<Parse> phrase = new CSList<Parse>();
for (int idx = 0;idx < tokens.length;idx++)
{
Parse parse = new Parse(tokens[idx], new Span(0,tokens[idx].length() - 1), postags[idx], 1.0, 1);
phrase.add(parse);
}
Boolean goodAsTopic;
Boolean goodAsTag;
RefSupport<Boolean> refVar0 = new RefSupport<Boolean>();
RefSupport<Boolean> refVar1 = new RefSupport<Boolean>();
isHighValueObject(phrase, refVar0, refVar1);
goodAsTag = refVar0.getValue();
goodAsTopic = refVar1.getValue();
return goodAsTopic;
}
示例5: getSentiment
import opennlp.tools.util.Span; //導入依賴的package包/類
public void getSentiment(ContentIndex contentindex) throws Exception {
for (int i = 0;i < contentindex.ParagraphIndexes.length;i++)
{
ParagraphIndex pindex = contentindex.ParagraphIndexes[i];
pindex.SentenceSentiments = new FloatVector[pindex.SentenceCount];
pindex.IndexedSentences = new String[pindex.SentenceCount];
pindex.SpanMap = (HashMap<String,Span>[]) new HashMap[pindex.SentenceCount];
for (int j = 0;j < pindex.SentenceCount;j++)
{
// if we do chunking instead of parsing, then use Shallow Accumulation Breaks
RefSupport<FloatVector> refVar5 = new RefSupport<FloatVector>();
getSentimentVector(pindex.SentenceParses[j],
pindex.SentenceFlags[j],contentindex.ContentParseDepth,refVar5);
pindex.SentenceSentiments[j] = refVar5.getValue();
RefSupport<String> refVar6 = new RefSupport<String>();
RefSupport<HashMap<String,Span>> refVar7 = new RefSupport<HashMap<String,Span>>();
makeIndexedSentence(pindex.SentenceParses[j],
pindex.SentenceFlags[j],pindex.SentenceSentiments[j],refVar6,refVar7);
pindex.IndexedSentences[j] = refVar6.getValue();
pindex.SpanMap[j] = refVar7.getValue();
}
}
}
示例6: getConcepts
import opennlp.tools.util.Span; //導入依賴的package包/類
@Override
public List<Concept> getConcepts(JCas jcas) throws AnalysisEngineProcessException {
List<Token> tokens = TypeUtil.getOrderedTokens(jcas);
String[] texts = tokens.stream().map(Token::getCoveredText).toArray(String[]::new);
String[] pos = tokens.stream().map(Token::getPartOfSpeech).toArray(String[]::new);
List<Span> spans = insertOutsideSpans(chunker.chunkAsSpans(texts, pos));
return IntStream.rangeClosed(0, spans.size() - type.size())
.mapToObj(i -> spans.subList(i, i + type.size()))
.filter(spansSublist -> type
.equals(spansSublist.stream().map(Span::getType).collect(toList())))
.map(spansSublist -> tokens.subList(spansSublist.get(0).getStart(),
spansSublist.get(spansSublist.size() - 1).getEnd()))
.filter(toks -> toks.size() >= minLength)
.map(toks -> TypeFactory.createConceptMention(jcas, getFirstTokenBegin(toks),
getLastTokenEnd(toks)))
.map(cmention -> TypeFactory.createConcept(jcas, cmention,
TypeFactory.createConceptType(jcas, "opennlp:" + String.join("-", type))))
.collect(toList());
}
示例7: score
import opennlp.tools.util.Span; //導入依賴的package包/類
@Override
public void score(List<LinkedSpan> linkedSpans, String docText, Span[] sentenceSpans, EntityLinkerProperties properties, AdminBoundaryContext additionalContext) {
//Map<Double, Double> latLongs = new HashMap<Double, Double>();
List<GazetteerEntry> allGazEntries = new ArrayList<>();
/**
* collect all the gaz entry references
*/
for (LinkedSpan<BaseLink> ls : linkedSpans) {
for (BaseLink bl : ls.getLinkedEntries()) {
if (bl instanceof GazetteerEntry) {
allGazEntries.add((GazetteerEntry) bl);
}
}
}
/**
* use the point clustering to score each hit
*/
Map<String, List<GazetteerEntry>> cluster = CLUSTERER.cluster(allGazEntries, PRECISION);
CLUSTERER.scoreClusters(cluster);
}
示例8: addParsedAsAnnotations
import opennlp.tools.util.Span; //導入依賴的package包/類
private void addParsedAsAnnotations(final JCas jCas, final int offset, final Parse parsed) {
final String type = parsed.getType();
// Ignore non phrase types
if (OpenNLPParser.PHRASE_TYPES.contains(type)) {
// Otherwise add new ParseChunks
final Span span = parsed.getSpan();
final PhraseChunk phraseChunk = new PhraseChunk(jCas);
phraseChunk.setBegin(offset + span.getStart());
phraseChunk.setEnd(offset + span.getEnd());
phraseChunk.setChunkType(parsed.getType());
addToJCasIndex(phraseChunk);
}
Arrays.stream(parsed.getChildren()).forEach(p -> addParsedAsAnnotations(jCas, offset, p));
}
示例9: parseSentence
import opennlp.tools.util.Span; //導入依賴的package包/類
private Parse parseSentence(final Sentence sentence, final Collection<WordToken> tokens) {
final String text = sentence.getCoveredText();
final Parse parse = new Parse(text, new Span(0, text.length()), AbstractBottomUpParser.INC_NODE, 1, 0);
// Add in the POS
int index = 0;
for (final WordToken token : tokens) {
final Span span = new Span(token.getBegin() - sentence.getBegin(), token.getEnd() - sentence.getBegin());
parse.insert(new Parse(text, span, AbstractBottomUpParser.TOK_NODE, 0, index));
index++;
}
// Parse the sentence
return parser.parse(parse);
}
示例10: dropOverlappingSpans
import opennlp.tools.util.Span; //導入依賴的package包/類
/**
* Drop overlapping spans.
*
* @param spans the spans
* @return the span[]
*/
public static Span[] dropOverlappingSpans(Span spans[]) {
List<Span> sortedSpans = new ArrayList<Span>(spans.length);
Collections.addAll(sortedSpans, spans);
Collections.sort(sortedSpans);
Iterator<Span> it = sortedSpans.iterator();
Span lastSpan = null;
while (it.hasNext()) {
Span span = it.next();
if (span.equals(lastSpan))
it.remove();
lastSpan = span;
}
return getTaggedSpans(sortedSpans);
}
示例11: singleTagging
import opennlp.tools.util.Span; //導入依賴的package包/類
/**
* Single tagging.
*
* @param spans the spans
* @return the span[]
*/
private static Span[] singleTagging(List<Span> spans) {
Iterator<Span> it = spans.iterator();
Span lastSpan = null;
while (it.hasNext()) {
Span span = it.next();
if (lastSpan != null) {
if (lastSpan.intersects(span)) {
it.remove();
span = lastSpan;
}
}
lastSpan = span;
}
return spans.toArray(new Span[spans.size()]);
}
示例12: getNamedEntity
import opennlp.tools.util.Span; //導入依賴的package包/類
/**
* Gets the named entity.
*
* @param tokenSpan the token span
* @return the named entity
*/
public static NamedEntity[] getNamedEntity(TokenSpan tokenSpan) {
Span[] spans = tokenSpan.getSpans();
String[] tokens = tokenSpan.getTokens();
String[] spanText = Span.spansToStrings(spans, tokens);
NamedEntity[] namedEntities = new NamedEntity[spans.length];
for (int i = 0; i < spans.length; i++) {
NamedEntity entity = new NamedEntity();
entity.setEntity(spanText[i]);
entity.setType(spans[i].getType().split("\\|"));
namedEntities[i] = entity;
}
return namedEntities;
}
示例13: find
import opennlp.tools.util.Span; //導入依賴的package包/類
public Set<String> find(String content, String field) {
try {
if (!nameFinderModels.containsKey(field)) {
throw new ElasticsearchException("Could not find field [{}], possible values {}", field, nameFinderModels.keySet());
}
TokenNameFinderModel finderModel= nameFinderModels.get(field);
if (threadLocal.get() == null || !threadLocal.get().equals(finderModel)) {
threadLocal.set(finderModel);
}
String[] tokens = SimpleTokenizer.INSTANCE.tokenize(content);
Span spans[] = new NameFinderME(finderModel).find(tokens);
String[] names = Span.spansToStrings(spans, tokens);
return Sets.newHashSet(names);
} finally {
threadLocal.remove();
}
}
示例14: testPersonNER
import opennlp.tools.util.Span; //導入依賴的package包/類
@Test
public void testPersonNER()
throws Exception
{
URL modelUrl = Thread.currentThread().getContextClassLoader()
.getResource("models/en-ner-persons.bin");
assertThat(modelUrl, is(notNullValue()));
TokenNameFinderModel model = new TokenNameFinderModel(modelUrl);
assertThat(model, is(notNullValue()));
NameFinderME nameFinder = new NameFinderME(model);
String[] tokens = SimpleTokenizer.INSTANCE
.tokenize("Mr. John Smith of New York, married Anne Green of London today.");
assertThat(tokens.length, is(15));
Span[] spans = nameFinder.find(tokens);
assertThat(spans.length, is(2));
String[] names = Span.spansToStrings(spans, tokens);
assertThat(names.length, is(2));
assertThat(names[0], is("John Smith"));
assertThat(names[1], is("Anne Green"));
}
示例15: testLocationNER
import opennlp.tools.util.Span; //導入依賴的package包/類
@Test
public void testLocationNER()
throws Exception
{
URL modelUrl = Thread.currentThread().getContextClassLoader()
.getResource("models/en-ner-locations.bin");
assertThat(modelUrl, is(notNullValue()));
TokenNameFinderModel model = new TokenNameFinderModel(modelUrl);
assertThat(model, is(notNullValue()));
NameFinderME nameFinder = new NameFinderME(model);
String[] tokens = SimpleTokenizer.INSTANCE
.tokenize("Mr. John Smith of New York, married Anne Green of London today.");
assertThat(tokens.length, is(15));
Span[] spans = nameFinder.find(tokens);
assertThat(spans.length, is(2));
String[] locations = Span.spansToStrings(spans, tokens);
assertThat(locations.length, is(2));
assertThat(locations[0], is("New York"));
assertThat(locations[1], is("London"));
}