本文整理汇总了Java中com.hp.hpl.jena.tdb.TDBFactory类的典型用法代码示例。如果您正苦于以下问题:Java TDBFactory类的具体用法?Java TDBFactory怎么用?Java TDBFactory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TDBFactory类属于com.hp.hpl.jena.tdb包,在下文中一共展示了TDBFactory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: copyToTdb
import com.hp.hpl.jena.tdb.TDBFactory; //导入依赖的package包/类
private void copyToTdb() throws RepositoryException {
if ( !needsSave || null == tdbdir ) {
return;
}
final Dataset dataset = TDBFactory.createDataset( tdbdir.getAbsolutePath() );
try {
rc.export( new TdbExporter( dataset ) );
}
catch ( RepositoryException | RDFHandlerException e ) {
log.error( "Problem exporting data to TDB", e );
dataset.abort();
}
finally {
dataset.close();
}
}
示例2: loadOntology
import com.hp.hpl.jena.tdb.TDBFactory; //导入依赖的package包/类
private void loadOntology(java.awt.event.ActionEvent evt) {// GEN-FIRST:event_loadOntology
if (null != dataset) {
dataset.end();
}
JFileChooser fc = new JFileChooser();
fc.setCurrentDirectory(new File(System.getProperty("user.dir")));
fc.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY);
int returnVal = fc.showOpenDialog(this);
if (returnVal == JFileChooser.APPROVE_OPTION) {
dataset = TDBFactory.createDataset(fc.getSelectedFile().toString());
ontologyNameField.setText(fc.getSelectedFile().getName());
ontoPath = fc.getSelectedFile().toPath();
} else {
JOptionPane.showMessageDialog(this, "Loading ontology failed");
}
}
示例3: testSomeMethod2
import com.hp.hpl.jena.tdb.TDBFactory; //导入依赖的package包/类
@Test
public void testSomeMethod2() throws Exception {
Dataset ds = TDBFactory.createDataset("/scratch/WORK2/jena/dataset2/");
OntModel model1 = ModelFactory.createOntologyModel(OntModelSpec.OWL_MEM, ds.getNamedModel("vijaym1"));
OntModel model2 = ModelFactory.createOntologyModel(OntModelSpec.OWL_MEM, ds.getNamedModel("vijaym2"));
OntClass thing = model1.createClass("http://www.w3.org/2002/07/owl#Thing");
model1.createIndividual("http://example.com/onto1#VijayRaj", thing);
model2.createIndividual("http://example.;cegilovcom/onto2#VijayRaj", thing);
Model m = model1.union(model2);
FileWriter fw = new FileWriter("/scratch/WORK2/jena/testModels/mergetestds.xml");
RDFDataMgr.write(fw, ds, RDFFormat.NQUADS_UTF8);
}
示例4: createPersistentDatasetFromCode
import com.hp.hpl.jena.tdb.TDBFactory; //导入依赖的package包/类
/**
* Creates a persistent Jena TDB data set and Lucene index.
* @return
* @throws IOException
*/
public Dataset createPersistentDatasetFromCode() throws IOException{
log.info("Construct a persistent Jena data set with lucene index using code") ;
// Build a text dataset by code.
TextQuery.init();
// Remove old files and folders
deleteFiles(JENA_TDB_TEMP_FOLDER);
deleteFiles(LUCENE_INDEX_TEMP_FOLDER);
// Creates new folders
JENA_TDB_TEMP_FOLDER.mkdirs();
LUCENE_INDEX_TEMP_FOLDER.mkdirs();
// Creates persisted Jena data set and Lucene index
Dataset jenaDataset = TDBFactory.createDataset(JENA_TDB_TEMP_FOLDER.getAbsolutePath()) ;
// Lucene, persisted.
Directory luceneIndex = FSDirectory.open(LUCENE_INDEX_TEMP_FOLDER);
// Define the index mapping
EntityDefinition entDef = new EntityDefinition("uri", "text", RDFS.label.asNode()) ;
// Join together into a dataset
return TextDatasetFactory.createLucene(jenaDataset, luceneIndex, entDef) ;
}
示例5: TDBloading
import com.hp.hpl.jena.tdb.TDBFactory; //导入依赖的package包/类
/**
* Load jena TDB
*/
private void TDBloading(){
logger.info("TDB loading");
// create model from tdb
Dataset dataset = TDBFactory.createDataset(tdbDirectory);
// assume we want the default model, or we could get a named model here
dataset.begin(ReadWrite.READ);
model = dataset.getDefaultModel();
dataset.end() ;
// if model is null load local dataset into jena TDB
if(model == null)
TDBloading(datasetFile);
}
示例6: demoOfUsingADirectory
import com.hp.hpl.jena.tdb.TDBFactory; //导入依赖的package包/类
static void demoOfUsingADirectory() {
// Make a TDB-backed dataset
String directory = TDB_DIR;
// read something
Dataset dataset = TDBFactory.createDataset(directory);
logger.debug("read tx start!!!");
demoOfReadTransaction(dataset);
logger.debug("read tx end!!!");
dataset.close();
// write something
dataset = TDBFactory.createDataset(directory);
logger.debug("write tx start!!!");
demoOfWriteTransaction(dataset);
logger.debug("write tx end!!!");
dataset.close();
// read again
dataset = TDBFactory.createDataset(directory);
logger.debug("read tx start!!!");
demoOfReadTransaction(dataset);
logger.debug("read tx end!!!");
dataset.close();
}
示例7: addModel
import com.hp.hpl.jena.tdb.TDBFactory; //导入依赖的package包/类
@Test
public void addModel() throws Exception {
final Dataset ds = TDBFactory.createDataset();
final DatasetPopulator dsp = new DatasetPopulator(ds);
final Model model = ModelFactory.createDefaultModel();
final Resource s = model.createResource();
final Property p = model.createProperty("urn:example:prop", "foo");
final Resource o = model.createResource();
model.add(s, p, o);
dsp.addModel(model);
ds.begin(ReadWrite.READ);
try {
assertTrue(ds.getDefaultModel().containsAll(model));
} finally {
ds.end();
}
}
示例8: inferMissingPropertyNames
import com.hp.hpl.jena.tdb.TDBFactory; //导入依赖的package包/类
@Test
public void inferMissingPropertyNames() throws Exception {
final Dataset ds = TDBFactory.createDataset();
final DatasetPopulator dsp = new DatasetPopulator(ds);
dsp.addModel(loadModel("infer-property-names/data.ttl"));
final Model x = loadModel("infer-property-names/expected.ttl");
ds.begin(ReadWrite.READ);
try {
final Model m = ds.getDefaultModel();
assertTrue(m.containsAll(x));
} finally {
ds.end();
}
}
示例9: test_rdfcreation_fb
import com.hp.hpl.jena.tdb.TDBFactory; //导入依赖的package包/类
@Test
public void test_rdfcreation_fb() throws SAXException, IOException, ParserConfigurationException, Exception {
Document dataDoc = parser.parse(RdfFactoryTest.class.getResourceAsStream(
"/data/fb-20121231.xml"), -1);
RdfFactory factory = new RdfFactory(new RunConfig(domain));
factory.createRdfs(dataDoc, testTdbDir);
Dataset dataset = TDBFactory.createDataset(testTdbDir);
dataset.begin(ReadWrite.READ);
Model model = dataset.getDefaultModel();
Assert.assertFalse("No RDF was generated. TDB directory: " + testTdbDir, model.isEmpty());
dataset.end();
}
示例10: test_rdfcreation_msft
import com.hp.hpl.jena.tdb.TDBFactory; //导入依赖的package包/类
@Test
public void test_rdfcreation_msft() throws SAXException, IOException, ParserConfigurationException, Exception {
Document dataDoc = parser.parse(RdfFactoryTest.class.getResourceAsStream(
"/data/msft-20130630.xml"), -1);
RdfFactory factory = new RdfFactory(new RunConfig(domain));
factory.createRdfs(dataDoc, testTdbDir);
Dataset dataset = TDBFactory.createDataset(testTdbDir);
dataset.begin(ReadWrite.READ);
Model model = dataset.getDefaultModel();
Assert.assertFalse("No RDF was generated. TDB directory: " + testTdbDir, model.isEmpty());
dataset.end();
}
示例11: Environment
import com.hp.hpl.jena.tdb.TDBFactory; //导入依赖的package包/类
/**
* Create a (possibly) shared NLP environment. The given data directory
* must be created (usually from a downloaded zipfile, check the README).
* Expect many open files and many reads. Network filesystems are known to
* perform poorly as data directories. Strive to use a local directory if
* possible, or at least the Lucene indices otherwise.
*
* config.properties can be either in the data directory or the working
* directory. This is to allow sharing (read-only) indices while still
* allowing separate development configurations.
*/
public Environment() {
// Now do some per-thread setup
db = new Database(this);
rdf = TDBFactory.assembleDataset(
pathMustExist("rdf/jena-lucene.ttl"));
// Lucene indexes have huge overhead so avoid re-instantiating by putting them in the Environment
IndexReader reader;
try {
reader = DirectoryReader.open(new MMapDirectory(Paths.get(getConfOrDie("lucene_index"))));
} catch (IOException e) {
e.printStackTrace();
throw new RuntimeException("The candidate-answer Lucene index failed to open.");
}
lucene = new IndexSearcher(reader);
//lucene.setSimilarity(new BM25Similarity());
}
示例12: readSemTrig
import com.hp.hpl.jena.tdb.TDBFactory; //导入依赖的package包/类
static public HashMap<String, String> readSemTrig (ArrayList<String> eventIdentifierArray,String trigFolder) {
HashMap<String, String> tokenIdMap = new HashMap<String, String>();
ArrayList<File> trigFiles = Util.makeRecursiveFileList(new File(trigFolder), ".trig");
// System.out.println("trigFiles.size() = " + trigFiles.size());
for (int i = 0; i < trigFiles.size(); i++) {
File file = trigFiles.get(i);
Dataset dataset = TDBFactory.createDataset();
dataset = RDFDataMgr.loadDataset(file.getAbsolutePath());
Iterator<String> it = dataset.listNames();
while (it.hasNext()) {
String name = it.next();
if (name.equals(instanceGraph)) {
Model namedModel = dataset.getNamedModel(name);
StmtIterator siter = namedModel.listStatements();
while (siter.hasNext()) {
Statement s = siter.nextStatement();
updateTokenMap(eventIdentifierArray, tokenIdMap, s);
}
}
}
dataset.close();
}
return tokenIdMap;
}
示例13: save
import com.hp.hpl.jena.tdb.TDBFactory; //导入依赖的package包/类
/**
* Method to save the nanopub.
* @param f Receives the file.
* @throws Exception It can throw an exception.
*/
public void save(String f) throws Exception {
this.quads = this.getAllQuads();
if (quads == null) {
throw new Exception(
"Quad list is null. Do you call createNanoPub() first?");
}
if (quads.size() == 0) {
throw new Exception("Quad list is empty.");
}
Dataset ds = TDBFactory.createDataset();
DatasetGraph dsg = ds.asDatasetGraph();
for (int i = 0; i < quads.size(); i++) {
dsg.add(quads.get(i));
}
RDFDataMgr.write(new FileOutputStream(new File(f)), dsg,
RDFFormat.NQUADS);
}
示例14: mainNodeId
import com.hp.hpl.jena.tdb.TDBFactory; //导入依赖的package包/类
public static void mainNodeId(String datafile, String queryFile) {
Quack.init() ;
Query query = QueryFactory.read(queryFile) ;
Dataset dsMem = TDBFactory.createDataset() ;
RDFDataMgr.read(dsMem, datafile) ;
// // TDB current execution.
// Quack.setOpExecutorFactory(dsMem, OpExecutorQuackTDB.factoryTDB1) ;
// doOne("TDB", dsMem, query) ;
//ARQ.setExecutionLogging(InfoLevel.ALL) ;
Quack.explain(true) ;
Quack.setOpExecutorFactory(dsMem, OpExecutorQuackTDB.factoryPredicateObject) ;
doOne("Quack/PredObj", dsMem, query) ;
System.out.flush() ;
// Quack.setOpExecutorFactory(dsMem, OpExecutorQuackTDB.factorySubstitute) ;
// doOne("Quack/Plain", dsMem, query) ;
// System.out.flush() ;
// try {
// StepPredicateObjectList.UseNaiveExecution = true ;
// doOne("QuackPredObj[simple]", dsMem, query) ;
// } finally { StepPredicateObjectList.UseNaiveExecution = false ; }
}
示例15: perspectiveRelationsToTrig
import com.hp.hpl.jena.tdb.TDBFactory; //导入依赖的package包/类
public static void perspectiveRelationsToTrig (String pathToTrigFile, ArrayList<PerspectiveObject> perspectiveObjects) {
try {
OutputStream fos = new FileOutputStream(pathToTrigFile);
Dataset ds = TDBFactory.createDataset();
Model defaultModel = ds.getDefaultModel();
//ResourcesUri.prefixModel(defaultModel);
// Model provenanceModel = ds.getNamedModel("http://www.newsreader-project.eu/perspective");
ResourcesUri.prefixModelGaf(defaultModel);
String attrBase = pathToTrigFile+"_";
JenaSerialization.addJenaPerspectiveObjects(attrBase, ResourcesUri.grasp, "wasAttributedTo", perspectiveObjects, 1);
RDFDataMgr.write(fos, ds, RDFFormat.TRIG_PRETTY);
fos.close();
} catch (IOException e) {
e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
}
}