当前位置: 首页>>代码示例>>Java>>正文


Java CollapsedCCProcessedDependenciesAnnotation类代码示例

本文整理汇总了Java中edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation的典型用法代码示例。如果您正苦于以下问题:Java CollapsedCCProcessedDependenciesAnnotation类的具体用法?Java CollapsedCCProcessedDependenciesAnnotation怎么用?Java CollapsedCCProcessedDependenciesAnnotation使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


CollapsedCCProcessedDependenciesAnnotation类属于edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations包,在下文中一共展示了CollapsedCCProcessedDependenciesAnnotation类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getPaser

import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; //导入依赖的package包/类
public String getPaser(String text) {

		// create an empty Annotation just with the given text
		Annotation document = new Annotation(text);
		String rst = "";

		// run all Annotators on this text
		pipeline.annotate(document);

		// these are all the sentences in this document
		// a CoreMap is essentially a Map that uses class objects as keys and
		// has values with custom types
		List<CoreMap> sentences = document.get(SentencesAnnotation.class);
		if (sentences.size() == 1) {
			CoreMap sentence = sentences.get(0);
			SemanticGraph dependencies = sentence
					.get(CollapsedCCProcessedDependenciesAnnotation.class);
			rst = dependencies.toList();
		}
		return rst;
	}
 
开发者ID:hwang033,项目名称:tcvr,代码行数:22,代码来源:DependenciesPaser.java

示例2: getSemanticGraph

import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; //导入依赖的package包/类
public SemanticGraph getSemanticGraph(String text) {
	// create an empty Annotation just with the given text
	Annotation document = new Annotation(text);
	List<IndexedWord> words = null;

	// run all Annotators on this text
	pipeline.annotate(document);

	// these are all the sentences in this document
	// a CoreMap is essentially a Map that uses class objects as keys and
	// has values with custom types
	List<CoreMap> sentences = document.get(SentencesAnnotation.class);
	if (sentences.size() == 1) {
		CoreMap sentence = sentences.get(0);
		return sentence
				.get(CollapsedCCProcessedDependenciesAnnotation.class);
	}
	return null;
}
 
开发者ID:hwang033,项目名称:tcvr,代码行数:20,代码来源:DependenciesPaser.java

示例3: parse

import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; //导入依赖的package包/类
public static List<CoreMap> parse(String text) {
    
    // create an empty Annotation just with the given text
    Annotation document = new Annotation(text);
    
    // run all Annotators on this text
    pipeline.annotate(document);
    
    // these are all the sentences in this document
    // a CoreMap is essentially a Map that uses class objects as keys and has values with custom types
    List<CoreMap> sentences = document.get(SentencesAnnotation.class);
    List<Tree> trees = new ArrayList<>();
    List<Tree> dependencies = new ArrayList<>();
    
    for(CoreMap sentence: sentences) {
      // this is the parse tree of the current sentence
    	Tree t = sentence.get(TreeAnnotation.class);
    	SemanticGraph graph = sentence.get(CollapsedCCProcessedDependenciesAnnotation.class);
    	trees.add(t);
    }
    return sentences;
}
 
开发者ID:SeanTater,项目名称:uncc2014watsonsim,代码行数:23,代码来源:Trees.java

示例4: PreNERCoreMapWrapper

import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; //导入依赖的package包/类
/**
 *
 */
public PreNERCoreMapWrapper(final CoreMap cm, final HeadFinder hf, final AnalyticUUIDGenerator gen) {
  this.wrapper = new CoreMapWrapper(cm, gen);
  this.hf = hf;
  this.tree = Optional.ofNullable(cm.get(TreeAnnotation.class));
  this.basicDeps = Optional.ofNullable(cm.get(BasicDependenciesAnnotation.class));
  this.colDeps = Optional.ofNullable(cm.get(CollapsedDependenciesAnnotation.class));
  this.colCCDeps = Optional.ofNullable(cm.get(CollapsedCCProcessedDependenciesAnnotation.class));
  this.gen = gen;
}
 
开发者ID:hltcoe,项目名称:concrete-stanford-deprecated2,代码行数:13,代码来源:PreNERCoreMapWrapper.java

示例5: getDependencies

import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; //导入依赖的package包/类
public List<SemanticGraph> getDependencies(String text) {

		Annotation document = runPipeline(text);
		List<CoreMap> sentences = document.get(SentencesAnnotation.class);
		List<SemanticGraph> depGraphs = new ArrayList<SemanticGraph>();
		for (CoreMap sentence : sentences) {
			depGraphs.add(sentence.get(CollapsedCCProcessedDependenciesAnnotation.class));
		}

		return depGraphs;
	}
 
开发者ID:sunil3590,项目名称:artificial-guy,代码行数:12,代码来源:NLP.java

示例6: toPhrases

import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; //导入依赖的package包/类
/**
 * Transform a CoreMap instance into a list of Phrase instances
 * 
 * @param sentence
 * @param text
 * @return
 */
public static List<Phrase> toPhrases(CoreMap sentence, String text) {
	Tree root = sentence.get(TreeAnnotation.class);
	SemanticGraph graph = sentence.get(CollapsedCCProcessedDependenciesAnnotation.class);
	ArrayList<Phrase> phrases = new ArrayList<Phrase>();
	for (Tree node : root.children())
		if (node.isPrePreTerminal() || node.isPreTerminal())
			phrases.add(toPhrase(node, graph, text));
		else if (node.isPhrasal())
			for (Phrase p : toPhrases(node, graph, text))
				phrases.add(p);
	return phrases;
}
 
开发者ID:hakchul77,项目名称:irnlp_toolkit,代码行数:20,代码来源:StanfordNlpWrapper.java

示例7: findHeadBetween

import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; //导入依赖的package包/类
/**
 * Find a head index between beginning index and ending index
 * 
 * @param sentence
 * @param beginIndex
 *            beginning index (0-based including index)
 * @param endIndex
 *            ending index (0-based non-including index)
 * @return head index (0-based index)
 */
public static Integer findHeadBetween(CoreMap sentence, int beginIndex, int endIndex) {
	SemanticGraph graph = sentence.get(CollapsedCCProcessedDependenciesAnnotation.class);
	Integer headIndex = beginIndex; // 0-based index
	int minLevel = Integer.MAX_VALUE;
	for (int idx = beginIndex; idx < endIndex; idx++) {
		IndexedWord nodeByIdx = graph.getNodeByIndexSafe(idx + 1); // 0-based index to 1-based index
		int level = nodeByIdx != null ? graph.getPathToRoot(nodeByIdx).size() : Integer.MAX_VALUE;
		if (level < minLevel) {
			minLevel = level;
			headIndex = idx; // 0-based index
		}
	}
	return headIndex; // 0-based index
}
 
开发者ID:hakchul77,项目名称:irnlp_toolkit,代码行数:25,代码来源:StanfordNlpWrapper.java

示例8: testStanfordNlpWrapperForUtility

import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; //导入依赖的package包/类
/**
 * StanfordNlpWrapper Test for utility functions
 * 
 * @throws Exception
 */
public void testStanfordNlpWrapperForUtility() throws IOException {
	System.out.println("\n----- testStanfordNlpWrapperForUtility() ------------------------------");
	if (!TEST_UTILITY)
		return;

	String text = "Samsung Electronics is a South Korean multinational electronics company headquartered in Suwon, South Korea.";

	StanfordNlpWrapper nlp = new StanfordNlpWrapper(Env.STANFORDNLP_CFG);
	nlp.loadAll("tokenize, ssplit, pos, parse");
	assertTrue(nlp.annotator != null);

	Annotation annotation = nlp.annotate(text);
	CoreMap sentence = annotation.get(SentencesAnnotation.class).get(0);

	System.out.println("-toTokenStrings-----------------------------------------------------------------");
	List<String> toks = StanfordNlpWrapper.toTokenStrings(sentence);
	System.out.println(JString.join(" ", toks));

	System.out.println("-CollapsedCCProcessedDependenciesAnnotation-------------------------------------");
	System.out.println(sentence.get(CollapsedCCProcessedDependenciesAnnotation.class).toString().trim());

	System.out.println("[TEST] findHeadIndexBetween-----------------------------------------------------");
	int idx1 = toks.indexOf("Samsung");
	int idx2 = toks.indexOf("Electronics") + 1;
	int idx3 = toks.indexOf("company") + 1;

	Integer head1 = StanfordNlpWrapper.findHeadBetween(sentence, idx1, idx2);
	assertEquals("Electronics", toks.get(head1));
	System.out.printf("  <Head between [%d..%d)> = %d ==> %s\n", idx1, idx2, head1, toks.get(head1));

	Integer head2 = StanfordNlpWrapper.findHeadBetween(sentence, idx1, idx3);
	assertEquals("company", toks.get(head2));
	System.out.printf("  <Head between [%d..%d)> = %d ==> %s\n", idx1, idx3, head2, toks.get(head2));
}
 
开发者ID:hakchul77,项目名称:irnlp_toolkit,代码行数:40,代码来源:TestStanfordNLP.java

示例9: _graphs

import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; //导入依赖的package包/类
private static List<SemanticGraph> _graphs(Phrase p) {
	return p.memo(Phrase.sentences)
			.stream()
			.map(s -> s.get(CollapsedCCProcessedDependenciesAnnotation.class))
			.filter(Objects::nonNull)
			.collect(toList());
}
 
开发者ID:SeanTater,项目名称:uncc2014watsonsim,代码行数:8,代码来源:Phrase.java

示例10: getTextDependencyTree

import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; //导入依赖的package包/类
public List<SemanticGraph> getTextDependencyTree(String text) {
	List<CoreMap> annotateResults = annotate(text);
	ArrayList<SemanticGraph> results = new ArrayList<SemanticGraph>(); 
	for(CoreMap sentence : annotateResults) {
		results.add(sentence.get(CollapsedCCProcessedDependenciesAnnotation.class));
	}
	return results;
}
 
开发者ID:NextCenturyCorporation,项目名称:EVEREST-TripletExtraction,代码行数:9,代码来源:CoreNlpParser.java

示例11: addDepsCC

import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; //导入依赖的package包/类
@SuppressWarnings("rawtypes")
static void addDepsCC(Map<String,Object> sent_info, CoreMap sentence) {
	SemanticGraph dependencies = sentence.get(CollapsedCCProcessedDependenciesAnnotation.class);
	List deps = jsonFriendlyDeps(dependencies);
	sent_info.put("deps_cc", deps);
}
 
开发者ID:UKPLab,项目名称:tac2015-event-detection,代码行数:7,代码来源:JsonPipeline.java

示例12: runCoreNLP

import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; //导入依赖的package包/类
public static void runCoreNLP()
{
	Properties props = new Properties();
	props.put("annotators", "tokenize,ssplit");//, pos, lemma, ner");
	StanfordCoreNLP pipeline = new StanfordCoreNLP(props);

	// read some text in the text variable
	String text = "Hello how are you Ramesh"; // Add your text here!

			// create an empty Annotation just with the given text
	Annotation document = new Annotation(text);

	// run all Annotators on this text
	pipeline.annotate(document);

	// these are all the sentences in this document
	// a CoreMap is essentially a Map that uses class objects as keys and has values with custom types
	List<CoreMap> sentences = document.get(SentencesAnnotation.class);

	for(CoreMap sentence: sentences) {
		// traversing the words in the current sentence
		// a CoreLabel is a CoreMap with additional token-specific methods
		for (CoreLabel token: sentence.get(TokensAnnotation.class)) {
			// this is the text of the token
			//String word = token.get(TextAnnotation.class);
			// this is the POS tag of the token
			//String pos = token.get(PartOfSpeechAnnotation.class);
			// this is the NER label of the token
			//String ne = token.get(NamedEntityTagAnnotation.class);
			//System.out.println(token+"/"+ne);
		}

		// this is the parse tree of the current sentence
		Tree tree = sentence.get(TreeAnnotation.class);

		// this is the Stanford dependency graph of the current sentence
		SemanticGraph dependencies = sentence.get(CollapsedCCProcessedDependenciesAnnotation.class);
	}

	// This is the coreference link graph
	// Each chain stores a set of mentions that link to each other,
	// along with a method for getting the most representative mention
	// Both sentence and token offsets start at 1!
	Map<Integer, CorefChain> graph = 
			document.get(CorefChainAnnotation.class);
}
 
开发者ID:siddBanPsu,项目名称:WikiKreator,代码行数:47,代码来源:StanfordCoreNLPRunner.java

示例13: matchSVO

import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; //导入依赖的package包/类
public HashSet<String> matchSVO(Query query, CoreMap sentence, HashMap<String, HashSet<CoreLabel>> matchedTokens, String tokenType) {
    SemanticGraph dependencies = sentence.get(CollapsedCCProcessedDependenciesAnnotation.class);
    String verb = query.getVerb();// TODO .get(LemmaAnnotation.class);
    HashSet<String> dependencyMatches = new HashSet<String>(); // will add SUBJECT, VERB, OBJECT, S_PRONOUN, O_PRONOUN, LOCATION
    for (CoreLabel token : matchedTokens.get(tokenType)) { 
        if (PRONOUN_REGEX.matcher(token.tag()).find()) {
            if (tokenType.equals("SUBJECT")) {
                dependencyMatches.add("S_PRONOUN");
            }
            else if (tokenType.equals("OBJECT")) {
                dependencyMatches.add("O_PRONOUN");
            }
        }
        else { // for nouns that are not pronouns
            dependencyMatches.add(tokenType);
        }
        
        // From the semantic graph, get all verbs which have the given noun as a dependency
        List<IndexedWord> verbNodes = getVerbNodes(token, dependencies);

        // For each verb found, see if the verb matches the query verb
        for (IndexedWord verbNode : verbNodes){
            // TODO: Not only verb to verb, but also verb to adj (e.g) die == was dead
            if (verbNode != null) {
                if (wnMetricLin.computeWordSimilarityNoPos(verbNode.lemma(), verb) > MIN_WORD_TO_WORD_THRESHOLD) {
                    dependencyMatches.add("VERB");
                }
                if (!dependencyMatches.contains("OBJECT") && matchedTokens.containsKey("OBJECT")){
                    dependencyMatches = recursiveSearchKeyword(verbNode, dependencies, tokenType, dependencyMatches, matchedTokens);
                }
            }
        }
    }
    
    // If the query specifies a location, search for the location as a string inside the sentence using regular expressions,
    // ignoring "in", "on," "at" preceding the location if the user has entered them.
    String queryLocation = "";
    if (query.getLocation() != null && query.getLocation().length() > 0) {    
        queryLocation = query.getLocation();
        if (queryLocation.length() > 3) {
         String potentialPrep = queryLocation.substring(0,3);
         if (potentialPrep.equals("in ") || potentialPrep.equals("on ") || potentialPrep.equals("at ")){
             queryLocation = queryLocation.substring(3);
         }
     }
    }    
    Pattern isLocationMatch = Pattern.compile("(^|[\\-\"' \t])" + queryLocation + "[$\\.!?\\-,;\"' \t]");
    if (!queryLocation.equals("") && isLocationMatch.matcher(sentence.toString()).find()) {
        dependencyMatches.add("LOCATION");
    }

    // If the sentence contains O_PRONOUN & OBJECT or S_PRONOUN & SUBJECT, remove the pronoun
    if (dependencyMatches.contains("O_PRONOUN") && dependencyMatches.contains("OBJECT")){
        dependencyMatches.remove("O_PRONOUN");
    }
    if (dependencyMatches.contains("S_PRONOUN") && dependencyMatches.contains("SUBJECT")){
        dependencyMatches.remove("S_PRONOUN");
    }
    return dependencyMatches;
}
 
开发者ID:beallej,项目名称:event-detection,代码行数:61,代码来源:SEMILARSemanticAnalysisValidator.java

示例14: main

import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; //导入依赖的package包/类
public static void main(String[] args) {
 // creates a StanfordCoreNLP object, with POS tagging, lemmatization, NER, parsing, and coreference resolution 
    Properties props = new Properties();
    props.setProperty("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref");
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
    
    // read some text in the text variable
    String text = "Ricardo Usbeck sits at his table. He is a researcher. entity is an extinct genus of sauropterygian type."; // Add your text here!
    
    // create an empty Annotation just with the given text
    Annotation document = new Annotation(text);
    
    // run all Annotators on this text
    pipeline.annotate(document);
    
    // these are all the sentences in this document
    // a CoreMap is essentially a Map that uses class objects as keys and has values with custom types
    List<CoreMap> sentences = document.get(SentencesAnnotation.class);
    
    for(CoreMap sentence: sentences) {
      // traversing the words in the current sentence
      // a CoreLabel is a CoreMap with additional token-specific methods
      for (CoreLabel token: sentence.get(TokensAnnotation.class)) {
        // this is the text of the token
        String word = token.get(TextAnnotation.class);
        // this is the POS tag of the token
        String pos = token.get(PartOfSpeechAnnotation.class);
        // this is the NER label of the token
        String ne = token.get(NamedEntityTagAnnotation.class);
        System.out.println(word + "\t" + pos + "\t" + ne);
      }

      // this is the parse tree of the current sentence
      Tree tree = sentence.get(TreeAnnotation.class);
      System.out.println(tree.toString());

      // this is the Stanford dependency graph of the current sentence
      SemanticGraph dependencies = sentence.get(CollapsedCCProcessedDependenciesAnnotation.class);
      System.out.println(dependencies.toString());
    }

    // This is the coreference link graph
    // Each chain stores a set of mentions that link to each other,
    // along with a method for getting the most representative mention
    // Both sentence and token offsets start at 1!
    Map<Integer, CorefChain> graph = 
      document.get(CorefChainAnnotation.class);
    System.out.println(graph.toString());
}
 
开发者ID:dice-group,项目名称:Cetus,代码行数:50,代码来源:CorefTest.java

示例15: testStanfordNlpWrapperForPipeline

import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation; //导入依赖的package包/类
/**
 * StanfordNlpWrapper Test for pipeline functions
 * 
 * @throws IOException
 */
public void testStanfordNlpWrapperForPipeline() throws IOException {
	System.out.println("\n----- testStanfordNlpWrapperForPipeline() ------------------------------");
	if (!TEST_PIPELINE)
		return;

	String text = "Samsung Electronics is a South Korean multinational electronics company headquartered in Suwon, South Korea.";
	text += " It is the flagship subsidiary of the Samsung Group.";

	StanfordNlpWrapper nlp = new StanfordNlpWrapper(Env.STANFORDNLP_CFG);
	nlp.loadAll("tokenize, ssplit, pos, lemma, ner, regexner, parse, dcoref");
	assertTrue(nlp.annotator != null);

	Annotation annotation = nlp.annotate(text);
	System.out.println("-toXml--------------------------------------------------------------------------");
	System.out.println(nlp.toXml(annotation));
	System.out.println("-toPrettyStr--------------------------------------------------------------------");
	System.out.println(nlp.toPrettyStr(annotation));

	assertEquals(2, annotation.get(SentencesAnnotation.class).size());
	for (CoreMap sentence : annotation.get(SentencesAnnotation.class)) {
		System.out.println("-TextAnnotation-----------------------------------------------------------------");
		System.out.println(sentence.get(TextAnnotation.class));

		System.out.println("-toTokens-----------------------------------------------------------------------");
		System.out.println(JString.join("\n", StanfordNlpWrapper.toTokens(sentence, text)));

		System.out.println("-toPhrases-----------------------------------------------------------------------");
		System.out.println(JString.join("\n", StanfordNlpWrapper.toPhrases(sentence, text)));

		System.out.println("-TreeAnnotation-----------------------------------------------------------------");
		System.out.println(sentence.get(TreeAnnotation.class).pennString().trim());

		System.out.println("-BasicDependenciesAnnotation----------------------------------------------------");
		System.out.println(sentence.get(BasicDependenciesAnnotation.class).toString().trim());

		System.out.println("-CollapsedDependenciesAnnotation------------------------------------------------");
		System.out.println(sentence.get(CollapsedDependenciesAnnotation.class).toString().trim());

		System.out.println("-CollapsedCCProcessedDependenciesAnnotation-------------------------------------");
		System.out.println(sentence.get(CollapsedCCProcessedDependenciesAnnotation.class).toString().trim());
	}

	System.out.println("-toCoreferenceMap---------------------------------------------------------------");
	assertEquals(5, StanfordNlpWrapper.toCoreferenceMap(annotation).entrySet().size());
	for (Entry<Integer, List<CorefMention>> e : StanfordNlpWrapper.toCoreferenceMap(annotation).entrySet())
		for (CorefMention m : e.getValue())
			System.out.printf("%d\t%s\t%s\t%d\t%d\n", e.getKey(), m.mentionType, m.mentionSpan, m.sentNum, m.headIndex);
}
 
开发者ID:hakchul77,项目名称:irnlp_toolkit,代码行数:54,代码来源:TestStanfordNLP.java


注:本文中的edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。