本文整理汇总了Java中edu.stanford.nlp.parser.lexparser.LexicalizedParser.loadModel方法的典型用法代码示例。如果您正苦于以下问题:Java LexicalizedParser.loadModel方法的具体用法?Java LexicalizedParser.loadModel怎么用?Java LexicalizedParser.loadModel使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.parser.lexparser.LexicalizedParser
的用法示例。
在下文中一共展示了LexicalizedParser.loadModel方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: writeImage
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入方法依赖的package包/类
public static void writeImage(String sentence, String outFile, int scale) throws Exception {
LexicalizedParser lp = null;
try {
lp = LexicalizedParser.loadModel("edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz");
} catch (Exception e) {
System.err.println("Could not load file englishPCFG.ser.gz. Try placing this file in the same directory as Dependencee.jar");
return;
}
lp.setOptionFlags(new String[]{"-maxLength", "500", "-retainTmpSubcategories"});
TokenizerFactory<CoreLabel> tokenizerFactory =
PTBTokenizer.factory(new CoreLabelTokenFactory(), "");
List<CoreLabel> wordList = tokenizerFactory.getTokenizer(new StringReader(sentence)).tokenize();
Tree tree = lp.apply(wordList);
writeImage(tree, outFile, scale);
}
示例2: testWriteImage
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入方法依赖的package包/类
/**
* Test of writeImage method, of class Main.
*/
@Test
public void testWriteImage() throws Exception {
String text = "A quick brown fox jumped over the lazy dog.";
TreebankLanguagePack tlp = new PennTreebankLanguagePack();
GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
LexicalizedParser lp = LexicalizedParser.loadModel();
lp.setOptionFlags(new String[]{"-maxLength", "500", "-retainTmpSubcategories"});
TokenizerFactory<CoreLabel> tokenizerFactory =
PTBTokenizer.factory(new CoreLabelTokenFactory(), "");
List<CoreLabel> wordList = tokenizerFactory.getTokenizer(new StringReader(text)).tokenize();
Tree tree = lp.apply(wordList);
GrammaticalStructure gs = gsf.newGrammaticalStructure(tree);
Collection<TypedDependency> tdl = gs.typedDependenciesCollapsed();
Main.writeImage(tdl, "image.png", 3);
assert (new File("image.png").exists());
}
示例3: initLexResources
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入方法依赖的package包/类
private void initLexResources() {
try {
options = new Options();
options.testOptions.verbose = true;
// Parser
parser = LexicalizedParser.loadModel(_serializedGrammar);
//parser = new LexicalizedParser(_serializedGrammar, options);
} catch( Exception ex ) { ex.printStackTrace(); }
// Dependency tree info
TreebankLanguagePack tlp = new PennTreebankLanguagePack();
gsf = tlp.grammaticalStructureFactory();
}
示例4: main
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
LexicalizedParser parser = LexicalizedParser.loadModel();
File[] files=new File(fileDir).listFiles();
int num=0;
double score=0.0;
for(File f:files)
{
if(f.isDirectory())
continue;
BufferedReader br=new BufferedReader(new FileReader(f.getAbsolutePath()));
String line="";
while((line=br.readLine())!=null)
{
StringTokenizer st=new StringTokenizer(line, "\\.");
while(st.hasMoreTokens())
{
score=score+parser.parse(st.nextToken()).score();
num++;
}
}
System.out.println(score+" for "+f.getName());
br.close();
}
System.out.println(score+"/"+num);
System.out.println(parser.parse(s).score());
}
示例5: main
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入方法依赖的package包/类
/**
* The main method demonstrates the easiest way to load a parser. Simply
* call loadModel and specify the path of a serialized grammar model, which
* can be a file, a resource on the classpath, or even a URL. For example,
* this demonstrates loading from the models jar file, which you therefore
* need to include in the classpath for ParserDemo to work.
*/
public static void main(String[] args) {
LexicalizedParser lp = LexicalizedParser
.loadModel("edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz");
if (args.length > 0) {
demoDP(lp, args[0]);
} else {
demoAPI(lp);
}
}
示例6: main
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入方法依赖的package包/类
public static void main(String[] args) {
// TODO Auto-generated method stub
LexicalizedParser lp = LexicalizedParser
.loadModel("edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz");
if (args.length > 0) {
String contentfilename = args[0];
String authorfilename = args[1];
String dependencyfilename = args[2];
DependencyParser dependencyParser = new DependencyParser();
ArrayList<ArrayList<String>> ret = dependencyParser
.getDependencyByLine(lp, contentfilename, authorfilename);
try {
BufferedWriter bw = new BufferedWriter(new FileWriter(
dependencyfilename));
for (ArrayList<String> arr : ret) {
bw.write(arr.get(0) + "\t" + arr.get(1) + "\t" + arr.get(2)
+ "\t" + arr.get(3) + "\t" + arr.get(4) + "\t"
+ arr.get(5) + "\t" + arr.get(6) + "\n");
}
bw.flush();
bw.close();
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
} else {
System.out
.println("java -jar GenerateDependency.jar contentfilename authorfilename dependencyfilename");
}
}
示例7: init
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入方法依赖的package包/类
public void init() {
this.lp = LexicalizedParser.loadModel("edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz",
"-maxLength", "80", "-retainTmpSubcategories");
this.tlp = new PennTreebankLanguagePack();
this.gsf = this.tlp.grammaticalStructureFactory();
//this.parsedTree = new ArrayList<DependencyTree>();
//this.trees = new ArrayList<Tree>();
}
示例8: loadModel
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入方法依赖的package包/类
private static LexicalizedParser loadModel(String parserLoc,
boolean verbose,
String[] flags) {
if (verbose) {
System.err.println("Loading Parser Model [" + parserLoc + "] ...");
}
LexicalizedParser result = LexicalizedParser.loadModel(parserLoc, flags);
// lp.setOptionFlags(new String[]{"-outputFormat", "penn,typedDependenciesCollapsed", "-retainTmpSubcategories"});
// treePrint = lp.getTreePrint();
return result;
}
示例9: getGraph
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入方法依赖的package包/类
public static Graph getGraph(String sentence) throws Exception {
LexicalizedParser lp = LexicalizedParser.loadModel("edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz");
lp.setOptionFlags(new String[]{"-maxLength", "500", "-retainTmpSubcategories"});
TokenizerFactory<CoreLabel> tokenizerFactory =
PTBTokenizer.factory(new CoreLabelTokenFactory(), "");
List<CoreLabel> wordList = tokenizerFactory.getTokenizer(new StringReader(sentence)).tokenize();
Tree tree = lp.apply(wordList);
GrammaticalStructure gs = gsf.newGrammaticalStructure(tree);
Collection<TypedDependency> tdl = gs.typedDependencies();
return getGraph(tree, tdl);
}
示例10: loadModel
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入方法依赖的package包/类
private void loadModel(String modelFile)
{
if (modelFile.equals("") || modelFile == null) {
parser = LexicalizedParser.loadModel(DefaultPaths.DEFAULT_PARSER_MODEL, new String[]{});
}
else {
parser = LexicalizedParser.loadModel(modelFile, new String[]{});
}
}
示例11: run
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入方法依赖的package包/类
@Override
public void run() {
try {
if (zipFilename != null) {
parser = LexicalizedParser.loadModelFromZip(zipFilename, filename);
} else {
parser = LexicalizedParser.loadModel(filename);
}
} catch (Exception ex) {
JOptionPane.showMessageDialog(ParserPanel.this, "Error loading parser: " + filename, null, JOptionPane.ERROR_MESSAGE);
setStatus("Error loading parser");
parser = null;
} catch (OutOfMemoryError e) {
JOptionPane.showMessageDialog(ParserPanel.this, "Could not load parser. Out of memory.", null, JOptionPane.ERROR_MESSAGE);
setStatus("Error loading parser");
parser = null;
}
stopProgressMonitor();
if (parser != null) {
setStatus("Loaded parser.");
parserFileLabel.setText("Parser: " + filename);
parseButton.setEnabled(true);
parseNextButton.setEnabled(true);
saveOutputButton.setEnabled(true);
tlp = parser.getOp().langpack();
encoding = tlp.getEncoding();
}
}
示例12: main
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入方法依赖的package包/类
public static void main(String[] args) {
LexicalizedParser lp = LexicalizedParser.loadModel("edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz");
if (args.length > 0) {
demoDP(lp, args[0]);
} else {
demoAPI(lp);
}
}
示例13: createParser
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入方法依赖的package包/类
public static LexicalizedParser createParser(String grammarPath) {
return LexicalizedParser.loadModel(grammarPath);
}
示例14: clusterSentencesByEvents
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入方法依赖的package包/类
/**
* 按照事件聚类的结果来对文本中的句子进行聚类
*
* @throws IOException
*/
public void clusterSentencesByEvents() throws IOException {
final File clusterResultDir = new File(this.resultDir);
// 获取所有的事件聚类结果文件(.read)
final String[] filenames_cluster_read = clusterResultDir.list(new FilenameFilter() {
@Override
public boolean accept(File file, String name) {
if (name.endsWith(".read")) {
return true;
}
return false;
}
});
// 加载词性标注模型
OpenNlpPOSTagger.getInstance(this.moduleFilePath);
//加载依存分析模型
final String grammar = "edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz";
final String[] options = { "-maxLength", "80", "-retainTmpSubcategories" };
final LexicalizedParser lp = LexicalizedParser.loadModel(grammar, options);
//TreebankLanguagePack tlp = lp.getOp().langpack();
//GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
final ExecutorService executorService = Executors.newFixedThreadPool(this.threadNum);
final List<Callable<Boolean>> tasks = new ArrayList<Callable<Boolean>>();
for (final String filename_cluster_read : filenames_cluster_read) {
//添加任务到任务列表
tasks.add(new SentenceExtractThread(
this.resultDir, filename_cluster_read, this.extractedSentencesSaveDir, this.textDir, lp, this.dictPath));
}
if(tasks.size() > 0){
try {
//执行任务组,所有任务执行完毕之前,主线程阻塞
final List<Future<Boolean>> futures = executorService.invokeAll(tasks);
executorService.shutdown();
if(futures != null){
for (final Future<Boolean> future : futures) {
future.get();
}
}
} catch (InterruptedException | ExecutionException e) {
this.log.error("执行任务组出错!", e);
//e.printStackTrace();
}
}
}
示例15: main
import edu.stanford.nlp.parser.lexparser.LexicalizedParser; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
final File clusterResultDir = new File("src/tmp");
// 获取所有的事件聚类结果文件(.read)
final String[] filenames_cluster_read = clusterResultDir.list(new FilenameFilter() {
@Override
public boolean accept(File file, String name) {
// TODO Auto-generated method stub
if (name.endsWith(".read")) {
return true;
}
return false;
}
});
// 加载词性标注模型
OpenNlpPOSTagger.getInstance("src/en-pos-maxent.bin");
//加载依存分析模型
final String grammar = "edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz";
final String[] options = { "-maxLength", "80", "-retainTmpSubcategories" };
final LexicalizedParser lp = LexicalizedParser.loadModel(grammar, options);
//TreebankLanguagePack tlp = lp.getOp().langpack();
//GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
final ExecutorService executorService = Executors.newFixedThreadPool(2);
final List<Callable<Boolean>> tasks = new ArrayList<Callable<Boolean>>();
for (final String filename_cluster_read : filenames_cluster_read) {
//添加任务到任务列表
tasks.add(new SentenceExtractThread(
"src/tmp", filename_cluster_read, "src/tmp/extract_sent", "src/tmp/text_dir", lp, "D:/WordNet/2.1/dict"));
}
if(tasks.size() > 0){
try {
//执行任务组,所有任务执行完毕之前,主线程阻塞
final List<Future<Boolean>> futures = executorService.invokeAll(tasks);
executorService.shutdown();
if(futures != null){
for (final Future<Boolean> future : futures) {
future.get();
}
}
} catch (InterruptedException | ExecutionException e) {
//log.error("执行任务组出错!", e);
e.printStackTrace();
}
}
}