本文整理汇总了C#中Annotation.get方法的典型用法代码示例。如果您正苦于以下问题:C# Annotation.get方法的具体用法?C# Annotation.get怎么用?C# Annotation.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Annotation
的用法示例。
在下文中一共展示了Annotation.get方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Analyze
/// <summary>
/// Executes Sentiment and EntitiesMentioned analysis.
/// </summary>
public IOutcome<AnalysisResult> Analyze(StanfordCoreNLP pipeline, string text)
{
//Create annotated document
Annotation doc = new Annotation(text);
pipeline.annotate(doc);
//Validate
var sentences = doc.get(typeof(CoreAnnotations.SentencesAnnotation));
if (sentences == null)
return Outcomes.Outcomes
.Failure<AnalysisResult>()
.WithMessage("No sentences detected.");
//Analyze
var result = new AnalysisResult()
{
Sentiment = GetSentiment((ArrayList)sentences),
MentionedEntities = GetMentions(doc)
};
return Outcomes.Outcomes
.Success<AnalysisResult>()
.WithValue(result);
}
示例2: Main
private static void Main()
{
// Path to the folder with models extracted from `stanford-corenlp-3.6.0-models.jar`
var jarRoot = @"..\..\..\..\paket-files\nlp.stanford.edu\stanford-corenlp-full-2015-12-09\models";
var modelsDirectory = jarRoot + @"\edu\stanford\nlp\models";
// Annotation pipeline configuration
var pipeline = new AnnotationPipeline();
pipeline.addAnnotator(new TokenizerAnnotator(false));
pipeline.addAnnotator(new WordsToSentencesAnnotator(false));
// Loading POS Tagger and including them into pipeline
var tagger = new MaxentTagger(modelsDirectory +
@"\pos-tagger\english-bidirectional\english-bidirectional-distsim.tagger");
pipeline.addAnnotator(new POSTaggerAnnotator(tagger));
// SUTime configuration
var sutimeRules = modelsDirectory + @"\sutime\defs.sutime.txt,"
+ modelsDirectory + @"\sutime\english.holidays.sutime.txt,"
+ modelsDirectory + @"\sutime\english.sutime.txt";
var props = new Properties();
props.setProperty("sutime.rules", sutimeRules);
props.setProperty("sutime.binders", "0");
pipeline.addAnnotator(new TimeAnnotator("sutime", props));
// Sample text for time expression extraction
var text = "Three interesting dates are 18 Feb 1997, the 20th of july and 4 days from today.";
var annotation = new Annotation(text);
annotation.set(new CoreAnnotations.DocDateAnnotation().getClass(), "2013-07-14");
pipeline.annotate(annotation);
Console.WriteLine("{0}\n", annotation.get(new CoreAnnotations.TextAnnotation().getClass()));
var timexAnnsAll = annotation.get(new TimeAnnotations.TimexAnnotations().getClass()) as ArrayList;
foreach (CoreMap cm in timexAnnsAll)
{
var tokens = cm.get(new CoreAnnotations.TokensAnnotation().getClass()) as List;
var first = tokens.get(0);
var last = tokens.get(tokens.size() - 1);
var time = cm.get(new TimeExpression.Annotation().getClass()) as TimeExpression;
Console.WriteLine("{0} [from char offset {1} to {2}] --> {3}", cm, first, last, time.getTemporal());
}
}
示例3: SUTimeDefautTest
public void SUTimeDefautTest()
{
var pipeline = new AnnotationPipeline();
pipeline.addAnnotator(new PTBTokenizerAnnotator(false));
pipeline.addAnnotator(new WordsToSentencesAnnotator(false));
var tagger =
new MaxentTagger(
Config.GetModel(@"pos-tagger\english-bidirectional\english-bidirectional-distsim.tagger"));
pipeline.addAnnotator(new POSTaggerAnnotator(tagger));
var sutimeRules = new[] {
Config.GetModel(@"sutime\defs.sutime.txt"),
Config.GetModel(@"sutime\english.holidays.sutime.txt"),
Config.GetModel(@"sutime\english.sutime.txt")
};
var props = new Properties();
props.setProperty("sutime.rules", String.Join(",", sutimeRules));
props.setProperty("sutime.binders", "0");
pipeline.addAnnotator(new TimeAnnotator("sutime", props));
const string text = "Three interesting dates are 18 Feb 1997, the 20th of july and 4 days from today.";
var annotation = new Annotation(text);
annotation.set(new CoreAnnotations.DocDateAnnotation().getClass(), "2013-07-14");
pipeline.annotate(annotation);
Console.WriteLine(annotation.get(new CoreAnnotations.TextAnnotation().getClass())+"\n");
var timexAnnsAll = (ArrayList)annotation.get(new TimeAnnotations.TimexAnnotations().getClass());
foreach (CoreMap cm in timexAnnsAll)
{
var tokens = (java.util.List)cm.get(new CoreAnnotations.TokensAnnotation().getClass());
var first = tokens.get(0);
var last = tokens.get(tokens.size() - 1);
var time = (TimeExpression)cm.get(new TimeExpression.Annotation().getClass());
Console.WriteLine("{0} [from char offset '{1}' to '{2}'] --> {3}",
cm, first, last, (time.getTemporal()));
}
}
示例4: Analyze
public static void Analyze()
{
long notesCount;
long rulesCount;
long articlesCount;
long andRulesCount;
long sentencesCount;
long structuresCount;
StanfordCoreNLP pipe;
Annotation annotation;
long articlesSentencesCount;
List<BsonDocument> articles;
pipe = new Notenizer(true).Pipeline;
notesCount = GetCount(DBConstants.NotesCollectionName);
rulesCount = GetCount(DBConstants.RulesCollectionName);
andRulesCount = GetCount(DBConstants.AndRulesCollectionName);
sentencesCount = GetCount(DBConstants.SentencesCollectionName);
structuresCount = GetCount(DBConstants.StructuresCollectionName);
articles = GetAll(DBConstants.ArticlesCollectionName);
articlesCount = articles.Count;
articlesSentencesCount = 0;
Console.WriteLine(String.Format("Getting number of sentences in {0} articles...", articlesCount));
for (int i = 0; i < articlesCount; i++)
{
Console.Write(String.Format("Parsing article no.{0} ... ", i + 1));
annotation = new Annotation(articles[i][DBConstants.TextFieldName].AsString);
pipe.annotate(annotation);
articlesSentencesCount += (annotation.get(typeof(CoreAnnotations.SentencesAnnotation)) as java.util.ArrayList).size();
Console.WriteLine("Done.");
}
Console.WriteLine(String.Format("{0}{0}Analysis:", Environment.NewLine));
Notify(DBConstants.NotesCollectionName, notesCount);
Notify(DBConstants.SentencesCollectionName, sentencesCount);
Notify(DBConstants.RulesCollectionName, rulesCount);
Notify(DBConstants.AndRulesCollectionName, andRulesCount);
Notify(DBConstants.StructuresCollectionName, structuresCount);
Notify(DBConstants.ArticlesCollectionName, articlesCount);
Console.WriteLine(String.Format("Number of sentences in articles: {0}", articlesSentencesCount));
}
示例5: TokenizeAndLemmatize
public List<string> TokenizeAndLemmatize(string documentText)
{
var annotation = new Annotation(documentText);
_pipeline.annotate(annotation);
var ret = new List<string>();
var tokenKey = ClassLiteral<CoreAnnotations.TokensAnnotation>.Value;
var lemmaKey = ClassLiteral<CoreAnnotations.LemmaAnnotation>.Value;
var tokenItems = annotation.get(tokenKey) as ArrayList;
if (tokenItems == null)
{
return ret;
}
ret.AddRange(tokenItems.OfType<CoreLabel>().Select(tmp => (string)tmp.get(lemmaKey)));
return ret;
}
示例6: CustomAnnotationPrint
public void CustomAnnotationPrint(Annotation annotation)
{
Console.WriteLine("-------------");
Console.WriteLine("Custom print:");
Console.WriteLine("-------------");
var sentences = (ArrayList)annotation.get(new CoreAnnotations.SentencesAnnotation().getClass());
foreach(CoreMap sentence in sentences)
{
Console.WriteLine("\n\nSentence : '{0}'", sentence);
var tokens = (ArrayList)sentence.get(new CoreAnnotations.TokensAnnotation().getClass());
foreach (CoreLabel token in tokens)
{
var word = token.get(new CoreAnnotations.TextAnnotation().getClass());
var pos = token.get(new CoreAnnotations.PartOfSpeechAnnotation().getClass());
var ner = token.get(new CoreAnnotations.NamedEntityTagAnnotation().getClass());
Console.WriteLine("{0} \t[pos={1}; ner={2}]", word, pos, ner);
}
Console.WriteLine("\nTree:");
var tree = (Tree)sentence.get(new TreeCoreAnnotations.TreeAnnotation().getClass());
using(var stream = new ByteArrayOutputStream())
{
tree.pennPrint(new PrintWriter(stream));
Console.WriteLine("The first sentence parsed is:\n {0}", stream.toString());
}
Console.WriteLine("\nDependencies:");
var deps = (SemanticGraph)sentence.get(new SemanticGraphCoreAnnotations.CollapsedDependenciesAnnotation().getClass());
foreach (SemanticGraphEdge edge in deps.edgeListSorted().toArray())
{
var gov = edge.getGovernor();
var dep = edge.getDependent();
Console.WriteLine(
"{0}({1}-{2},{3}-{4})", edge.getRelation(),
gov.word(), gov.index(), dep.word(), dep.index());
}
}
}
示例7: Annotation
/// <summary>
/// Gets the relevant terms.
/// </summary>
/// <param name="text">The text.</param>
/// <returns>
/// IDictionary of types of tokens and values
/// </returns>
IDictionary<string, IEnumerable<string>> INlpProvider.GetRelevantTerms(string text)
{
IDictionary<string, IEnumerable<string>> returnDictionary = new Dictionary<string, IEnumerable<string>>();
var annotation = new Annotation(text);
NlpProvider.pipeline.annotate(annotation);
var sentences = annotation.get(new CoreAnnotations.SentencesAnnotation().getClass()) as ArrayList;
foreach (CoreMap sentence in sentences)
{
var tokens = sentence.get(new CoreAnnotations.TokensAnnotation().getClass()) as ArrayList;
foreach (CoreLabel token in tokens)
{
string pos = token.get(new CoreAnnotations.PartOfSpeechAnnotation().getClass()).ToString().ToUpper();
if (NlpProvider.relevantPos.Contains(pos))
{
List<string> listOfValues;
if (returnDictionary.ContainsKey(pos))
{
listOfValues = returnDictionary[pos] as List<string>;
}
else
{
listOfValues = new List<string>();
returnDictionary.Add(pos, listOfValues);
}
string word = token.get(new CoreAnnotations.TextAnnotation().getClass()).ToString();
listOfValues.Add(word);
////var ner = token.get(new CoreAnnotations.NamedEntityTagAnnotation().getClass());
////var normalizedner = token.get(new CoreAnnotations.NormalizedNamedEntityTagAnnotation().getClass());
}
}
}
return returnDictionary;
}
示例8: extractTime
public void extractTime(string text)
{
sentenceInput = text;
string presentDate = "2015-10-10";
string curr = Environment.CurrentDirectory;
var jarRoot = curr + @"\stanford-corenlp-3.5.2-models";
var modelsDirectory = jarRoot + @"\edu\stanford\nlp\models";
// Annotation pipeline configuration
var pipeline = new AnnotationPipeline();
pipeline.addAnnotator(new TokenizerAnnotator(false));
pipeline.addAnnotator(new WordsToSentencesAnnotator(false));
// SUTime configuration
var sutimeRules = modelsDirectory + @"\sutime\defs.sutime.txt,"
+ modelsDirectory + @"\sutime\english.holidays.sutime.txt,"
+ modelsDirectory + @"\sutime\english.sutime.txt";
var props = new Properties();
props.setProperty("sutime.rules", sutimeRules);
props.setProperty("sutime.binders", "0");
props.setProperty("sutime.markTimeRanges", "true");
props.setProperty("sutime.includeRange", "true");
pipeline.addAnnotator(new TimeAnnotator("sutime", props));
// Sample text for time expression extraction
var annotation = new Annotation(text);
annotation.set(new CoreAnnotations.DocDateAnnotation().getClass(), presentDate);
pipeline.annotate(annotation);
// Console.WriteLine("{0}\n", annotation.get(new CoreAnnotations.TextAnnotation().getClass()));
var timexAnnsAll = annotation.get(new TimeAnnotations.TimexAnnotations().getClass()) as ArrayList;
foreach (CoreMap cm in timexAnnsAll)
{
var time = cm.get(new TimeExpression.Annotation().getClass()) as TimeExpression;
string typeTimex = time.getTemporal().getTimexType().toString();
if (typeTimex.ToLower() == "duration")
{
typeTime = "tPeriod";
valueTime = time.getTemporal().toISOString();
Console.WriteLine(valueTime);
}
if (typeTimex.ToLower() == "time" || typeTimex.ToLower() == "date")
{
string textOftime = time.getText().ToString();
char[] delimiterChars = { ' ' };
string[] words = textOftime.Split(delimiterChars);
string mainword = words[0];
var tagger = new MaxentTagger(modelsDirectory + @"\pos-tagger\english-bidirectional\english-bidirectional-distsim.tagger");
var sentences = MaxentTagger.tokenizeText(new StringReader(text));
var first = sentences.get(0) as ArrayList;
int size = first.size();
int i = 0;
int index = -3;
while (i < size)
{
if (first.get(i).ToString() == mainword)
index = i;
i++;
}
var taggedSentence = tagger.tagSentence(first);
string checker = taggedSentence.get(index - 1).ToString();
if (checker.ToLower() == "after/in" || checker.ToLower() == "since/in")
{
typeTime = "tTrigger";
valueTime = "Start : " + time.getTemporal().toISOString();
Console.WriteLine(valueTime);
}
else if (checker.ToLower() == "before/in")
{
if (typeTimex == "TIME")
{
typeTime = "tTrigger";
valueTime = "End : " + time.getTemporal().toISOString();
Console.WriteLine(valueTime);
}
else
{
DateTime result = new DateTime();
DateTime current = DateTime.ParseExact(presentDate, "yyyy-MM-dd", System.Globalization.CultureInfo.InvariantCulture);
string dt = time.getTemporal().toString();
char[] delimiter = { '-', '-', '-' };
string[] partsOfDate = time.getTemporal().toISOString().Split(delimiter);
//.........这里部分代码省略.........
示例9: GetDepencencies
/// <summary>
/// Gets depeendencies from sentence.
/// </summary>
/// <param name="annotation"></param>
/// <returns></returns>
private NotenizerDependencies GetDepencencies(Annotation annotation)
{
Tree tree;
NotenizerDependency dep;
GrammaticalStructure gramStruct;
NotenizerDependencies dependencies;
NotenizerDependency nsubjComplement;
TreebankLanguagePack treeBankLangPack;
java.util.Collection typedDependencies;
GrammaticalStructureFactory gramStructFact;
tree = annotation.get(typeof(TreeCoreAnnotations.TreeAnnotation)) as Tree;
treeBankLangPack = new PennTreebankLanguagePack();
gramStructFact = treeBankLangPack.grammaticalStructureFactory();
gramStruct = gramStructFact.newGrammaticalStructure(tree);
typedDependencies = gramStruct.typedDependenciesCollapsed();
dependencies = new NotenizerDependencies();
foreach (TypedDependency typedDependencyLoop in (typedDependencies as java.util.ArrayList))
{
dep = new NotenizerDependency(typedDependencyLoop);
dependencies.Add(dep);
if (dep.Relation.IsNominalSubject())
{
nsubjComplement = new NotenizerDependency(typedDependencyLoop);
nsubjComplement.TokenType = dep.TokenType == TokenType.Dependent ? TokenType.Governor : TokenType.Dependent;
dependencies.Add(nsubjComplement);
}
}
return dependencies;
}
示例10: GetMentions
/// <summary>
/// Convert our mentions into a list of strings.
/// </summary>
/// <param name="doc"></param>
/// <returns></returns>
private List<string> GetMentions(Annotation doc)
{
var mentions = (ArrayList) doc.get(typeof(CoreAnnotations.MentionsAnnotation));
var results = new List<string>();
foreach (CoreMap mention in mentions)
{
var entityType = mention.get(typeof(CoreAnnotations.EntityTypeAnnotation));
//Only get People!
if (entityType.ToString() == "PERSON" && !results.Contains(mention.ToString()))
results.Add(mention.toString());
}
results.Sort();
return results;
}
示例11: Parse
/// <summary>
/// Parses the sentence.
/// </summary>
/// <param name="annotation"></param>
/// <returns></returns>
public List<NotenizerNote> Parse(Annotation annotation)
{
List<NotenizerNote> sentencesNoted = new List<NotenizerNote>();
List<NotenizerNote> notesToSave = new List<NotenizerNote>();
Article article = GetArticle(annotation.ToString().Trim());
// ================== REFACTORED PART HERE ======================
foreach (Annotation sentenceLoop in annotation.get(typeof(CoreAnnotations.SentencesAnnotation)) as ArrayList)
{
NotenizerSentence sentence = new NotenizerSentence(sentenceLoop, article);
Note matchedNote;
NotenizerNoteRule rule = GetRuleForSentence(sentence, out matchedNote);
if (rule != null && rule.Structure.Dependencies != null && rule.Structure.Dependencies.Count > 0)
{
NotenizerNote parsedNote = ApplyRule(sentence, rule);
parsedNote.Note = matchedNote;
if (parsedNote.Note.AndRuleID != DBConstants.BsonNullValue)
parsedNote.AndRule = GetAndRuleForSentence(rule, parsedNote.Note.AndRuleID);
//Console.WriteLine("Parsed note: " + parsedNote.OriginalSentence + " ===> " + parsedNote.Text);
sentencesNoted.Add(parsedNote);
continue;
}
NotenizerNote note = _staticParser.Parse(sentence);
notesToSave.Add(note);
}
// inserting into DB AFTER ALL sentences from article were processed
// to avoid processed sentence to affect processing other sentences from article
foreach (NotenizerNote sentenceNotedLoop in notesToSave)
{
// save rule's structure
NotenizerNoteRule rule = sentenceNotedLoop.CreateRule();
sentenceNotedLoop.CreateStructure();
rule.Structure.Structure.ID = DB.InsertToCollection(DBConstants.StructuresCollectionName, DocumentCreator.CreateStructureDocument(rule)).Result;
// save sentence's structure
NotenizerStructure sentenceStructure = sentenceNotedLoop.OriginalSentence.Structure;
sentenceStructure.Structure.ID = DB.InsertToCollection(DBConstants.StructuresCollectionName, DocumentCreator.CreateStructureDocument(sentenceStructure)).Result;
// save rule
rule.ID = DB.InsertToCollection(DBConstants.RulesCollectionName, DocumentCreator.CreateRuleDocument(rule)).Result;
// save note
Note note = sentenceNotedLoop.CreateNote();
note.ID = DB.InsertToCollection(DBConstants.NotesCollectionName, DocumentCreator.CreateNoteDocument(
sentenceNotedLoop,
rule.ID,
String.Empty)).Result;
// save sentence
sentenceNotedLoop.OriginalSentence.Sentence.ID = DB.InsertToCollection(DBConstants.SentencesCollectionName, DocumentCreator.CreateSentenceDocument(
sentenceNotedLoop.OriginalSentence,
sentenceStructure.Structure.ID,
article.ID,
rule.ID,
String.Empty,
note.ID)).Result;
Console.WriteLine("Parsed note: " + sentenceNotedLoop.OriginalSentence + " ===> " + sentenceNotedLoop.Text);
sentencesNoted.Add(sentenceNotedLoop);
}
return sentencesNoted;
}