本文整理汇总了Java中com.hp.hpl.jena.rdf.model.Model.listStatements方法的典型用法代码示例。如果您正苦于以下问题:Java Model.listStatements方法的具体用法?Java Model.listStatements怎么用?Java Model.listStatements使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.hp.hpl.jena.rdf.model.Model
的用法示例。
在下文中一共展示了Model.listStatements方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getUndefinedResources
import com.hp.hpl.jena.rdf.model.Model; //导入方法依赖的package包/类
public Collection<Resource> getUndefinedResources(Model model) {
Set<Resource> result = new HashSet<Resource>();
StmtIterator it = model.listStatements();
while (it.hasNext()) {
Statement stmt = it.nextStatement();
if (stmt.getSubject().isURIResource()
&& stmt.getSubject().getURI().startsWith(namespace)
&& !resources.contains(stmt.getSubject())) {
result.add(stmt.getSubject());
}
if (stmt.getPredicate().equals(RDF.type)) continue;
if (stmt.getObject().isURIResource()
&& stmt.getResource().getURI().startsWith(namespace)
&& !resources.contains(stmt.getResource())) {
result.add(stmt.getResource());
}
}
return result;
}
示例2: runQueryOnInstance
import com.hp.hpl.jena.rdf.model.Model; //导入方法依赖的package包/类
/**
* Runs a given Jena Query on a given instance and adds the inferred triples
* to a given Model.
* @param queryWrapper the wrapper of the CONSTRUCT query to execute
* @param queryModel the query Model
* @param newTriples the Model to write the triples to
* @param instance the instance to run the inferences on
* @param checkContains true to only call add if a Triple wasn't there yet
* @return true if changes were done (only meaningful if checkContains == true)
*/
public static boolean runQueryOnInstance(QueryWrapper queryWrapper, Model queryModel, Model newTriples, Resource instance, boolean checkContains) {
boolean changed = false;
QueryExecution qexec = ARQFactory.get().createQueryExecution(queryWrapper.getQuery(), queryModel);
QuerySolutionMap bindings = new QuerySolutionMap();
bindings.add(SPIN.THIS_VAR_NAME, instance);
Map<String,RDFNode> initialBindings = queryWrapper.getTemplateBinding();
if(initialBindings != null) {
for(String varName : initialBindings.keySet()) {
RDFNode value = initialBindings.get(varName);
bindings.add(varName, value);
}
}
qexec.setInitialBinding(bindings);
Model cm = qexec.execConstruct();
StmtIterator cit = cm.listStatements();
while(cit.hasNext()) {
Statement s = cit.nextStatement();
if(!checkContains || !queryModel.contains(s)) {
changed = true;
newTriples.add(s);
}
}
return changed;
}
示例3: bulkUpdate
import com.hp.hpl.jena.rdf.model.Model; //导入方法依赖的package包/类
/**
* Method called to Chunk the triples into N-Sized batches and post to VIVO.
* This is designed to work around / handle errors when posting sets of triples
* over 10,000 to the API.
*
* @param namedGraph String with named graph.
* @param changeModel Jena model with set of changes to sync to store.
* @param changeType Either add or remove.
* @return Boolean true if update was successful.
* @throws IOException
*/
private Boolean bulkUpdate(String namedGraph, Model changeModel, String changeType) throws IOException {
// Temporary model to hold
Model tmpModel = ModelFactory.createDefaultModel();
Integer bSize = Integer.parseInt(batchSize);
// Use an integer to count triples rather than calling size on the model
// during each loop.
Integer size = 0;
StmtIterator iter = changeModel.listStatements();
while (iter.hasNext()) {
Statement stmt = iter.nextStatement(); // get next statement
tmpModel.add(stmt);
size++;
if (size >= bSize) {
// Submit
log.info("Submitting " + size + " triples to " + namedGraph);
submitBatch(tmpModel, namedGraph, changeType);
// Reset the tmp model.
tmpModel.removeAll();
// Reset the counter.
size = 0;
}
}
log.info("model size:" + tmpModel.size());
// Submit the remaining statements, if any.
if (tmpModel.size() > 0) {
submitBatch(tmpModel, namedGraph, changeType);
}
return true;
}
示例4: getUndefinedClasses
import com.hp.hpl.jena.rdf.model.Model; //导入方法依赖的package包/类
public Collection<Resource> getUndefinedClasses(Model model) {
Set<Resource> result = new HashSet<Resource>();
StmtIterator it = model.listStatements(null, RDF.type, (RDFNode) null);
while (it.hasNext()) {
Statement stmt = it.nextStatement();
if (stmt.getObject().isURIResource()
&& stmt.getResource().getURI().startsWith(namespace)
&& !classes.contains(stmt.getObject())) {
result.add(stmt.getResource());
}
}
return result;
}
示例5: getUndefinedProperties
import com.hp.hpl.jena.rdf.model.Model; //导入方法依赖的package包/类
public Collection<Property> getUndefinedProperties(Model model) {
Set<Property> result = new HashSet<Property>();
StmtIterator it = model.listStatements();
while (it.hasNext()) {
Statement stmt = it.nextStatement();
if (stmt.getPredicate().getURI().startsWith(namespace)
&& !properties.contains(stmt.getPredicate())) {
result.add(stmt.getPredicate());
}
}
return result;
}
示例6: usesVocabulary
import com.hp.hpl.jena.rdf.model.Model; //导入方法依赖的package包/类
public boolean usesVocabulary(Model model) {
StmtIterator it = model.listStatements();
while (it.hasNext()) {
Statement stmt = it.nextStatement();
if (stmt.getPredicate().getURI().startsWith(namespace)) {
return true;
}
if (stmt.getPredicate().equals(RDF.type) && stmt.getResource().getURI().startsWith(namespace)) {
return true;
}
}
return false;
}
示例7: triplesInvolvingVocabulary
import com.hp.hpl.jena.rdf.model.Model; //导入方法依赖的package包/类
public Model triplesInvolvingVocabulary(Model model) {
Model result = ModelFactory.createDefaultModel();
result.getNsPrefixMap().putAll(model.getNsPrefixMap());
StmtIterator it = model.listStatements();
while (it.hasNext()) {
Statement stmt = it.next();
if (properties.contains(stmt.getPredicate())
|| (stmt.getPredicate().equals(RDF.type) && classes.contains(stmt.getObject()))) {
result.add(stmt);
}
}
return result;
}
示例8: collectProperties
import com.hp.hpl.jena.rdf.model.Model; //导入方法依赖的package包/类
private Collection<Property> collectProperties(Model m, Resource r) {
Collection<Property> result = new TreeSet<Property>();
StmtIterator it = r.listProperties();
while (it.hasNext()) {
result.add(new Property(it.nextStatement(), false));
}
it = m.listStatements(null, null, r);
while (it.hasNext()) {
result.add(new Property(it.nextStatement(), true));
}
return result;
}
示例9: main
import com.hp.hpl.jena.rdf.model.Model; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException, WktParseException{
InputStream in=ConvexHullTest.class.getClassLoader().getResourceAsStream("lifeline_geometry.ttl");
Model model=ModelFactory.createDefaultModel();
model.read(in,null,"TTL");
StmtIterator stmts=model.listStatements(null, GEOM.hasGeometry, (RDFNode)null);
while(stmts.hasNext()){
Statement stmt=stmts.next();
System.out.print(stmt.getSubject().getLocalName()+ " : ");
String ewkt=stmt.getObject().asResource().getProperty(GEOM.asBody).getObject().asLiteral().getString();
EwktReader reader=new EwktReader(ewkt);
Geometry geometry=reader.readGeometry();
if (geometry.geometryTypeId()==GeometryType.TYPE_POLYHEDRALSURFACE){
PolyhedralSurface ps=new ConvexHull().buildConvexHull((PolyhedralSurface)geometry);
Polyhedron ph=new Polyhedron(ps);
MVBB obb=new MVBB(ph);
obb.computeMinBB();
Box box=obb.getBox();
System.out.println(box.getVolume()/1000000000);
System.out.print(box.getOrientation().getN1());
System.out.print(box.getOrientation().getN2());
System.out.println(box.getOrientation().getN3());
// System.out.println(box.getMin());
// System.out.println(box.getMax());
}
}
}
示例10: collapseAnnotations
import com.hp.hpl.jena.rdf.model.Model; //导入方法依赖的package包/类
private List<Statement> collapseAnnotations(Property property, Model model){
List<Statement> stmts = new ArrayList<Statement>();
Map<String, Map<String,Statement>> literalMap = new HashMap<String, Map<String,Statement>>();
StmtIterator stmtIt = model.listStatements(null, property, (RDFNode) null);
stmtIt.forEachRemaining(stmt -> {
RDFNode obj = stmt.getObject();
if(obj.isLiteral()){
Literal lit = obj.asLiteral();
String lang = lit.getLanguage();
if(lang == null){
lang = "null";
}
Map<String, Statement> stmtMap = literalMap.get(lang);
if(stmtMap == null) {
stmtMap = new HashMap<String, Statement>();
literalMap.put(lang, stmtMap);
}
Statement statement = stmtMap.get(lang);
boolean insert = false;
if(statement == null) insert = true;
else{
String statementLF = ((Literal)statement.getObject()).getLexicalForm();
String lexicalForm = lit.getLexicalForm();
if(lexicalForm.length() > statementLF.length()) insert = true;
}
if(insert) stmtMap.put(lang, statement);
}
});
model.removeAll(null, property, (RDFNode) null);
literalMap.forEach((subject, stmtMap) -> {
stmtMap.forEach((lang, stmt) -> {
model.add(stmt);
});
});
return stmts;
}
示例11: keepMaxLengthAnnotationsOnly
import com.hp.hpl.jena.rdf.model.Model; //导入方法依赖的package包/类
private List<Statement> keepMaxLengthAnnotationsOnly(Property property, Model model){
List<Statement> stmts = new ArrayList<Statement>();
Map<Resource, Map<String,Statement>> literalMap = new HashMap<Resource, Map<String,Statement>>();
StmtIterator stmtIt = model.listStatements(null, property, (RDFNode) null);
stmtIt.forEachRemaining(stmt -> {
Resource subj = stmt.getSubject();
RDFNode obj = stmt.getObject();
if(obj.isLiteral()){
Literal lit = obj.asLiteral();
String lang = lit.getLanguage();
if(lang == null){
lang = "null";
}
Map<String, Statement> stmtMap = literalMap.get(subj);
if(stmtMap == null) {
stmtMap = new HashMap<String, Statement>();
literalMap.put(subj, stmtMap);
}
Statement statement = stmtMap.get(lang);
boolean insert = false;
if(statement == null) insert = true;
else{
String statementLF = ((Literal)statement.getObject()).getLexicalForm();
String lexicalForm = lit.getLexicalForm();
if(lexicalForm.length() > statementLF.length()) insert = true;
}
if(insert) stmtMap.put(lang, stmt);
}
});
model.removeAll(null, property, (RDFNode) null);
literalMap.forEach((subject, stmtMap) -> {
stmtMap.forEach((lang, stmt) -> {
model.add(stmt);
});
});
return stmts;
}
示例12: getSynonyms
import com.hp.hpl.jena.rdf.model.Model; //导入方法依赖的package包/类
private List<Statement> getSynonyms(Property property, Model model){
List<Statement> stmts = new ArrayList<Statement>();
try{
Site wnSite = siteManager.getSite(Constants.wordNetSiteID);
FieldQuery fieldQuery = DefaultQueryFactory.getInstance().createFieldQuery();
fieldQuery.addSelectedField(property.getURI());
fieldQuery.setOffset(0);
fieldQuery.setLimit(3);
StmtIterator labelIt = model.listStatements(null, property, (RDFNode)null);
labelIt.forEachRemaining(stmt -> {
Resource subject = stmt.getSubject();
RDFNode object = stmt.getObject();
if(object.isLiteral()){
String label = ((Literal)object).getLexicalForm();
String lang = ((Literal)object).getLanguage();
if(lang == null || lang.equals("it")){
label = lemmatize(label);
Constraint similarityConstraint = new SimilarityConstraint(label, null);
fieldQuery.setConstraint(property.getURI(), similarityConstraint);
/* Add label as synonym.
* But first we lemmatise the term in order to increase the recall.
*/
Literal objLiteral = null;
if(lang != null) objLiteral = ResourceFactory.createLangLiteral(label, lang);
else objLiteral = ResourceFactory.createPlainLiteral(label);
stmts.add(new StatementImpl(subject, synonym, objLiteral));
final String lab = label;
QueryResultList<Representation> result = wnSite.find(fieldQuery);
result.forEach(representation -> {
float[] score = new float[]{0};
Iterator<Object> objIt = representation.get(Constants.entityHubScore);
if(objIt != null){
objIt.forEachRemaining(obj -> {
score[0] = (float) obj;
});
}
if(score[0] > Constants.wordnetSynonymityConfidence){
objIt = representation.get(property.getURI());
if(objIt != null){
objIt.forEachRemaining(obj -> {
Text text = (Text) obj;
String value = text.getText();
String language = text.getLanguage();
// Add labels of synomyms
log.debug("Syn {} ({}) - {} : {}", subject, lab, value, score);
Literal synLabel = ResourceFactory.createLangLiteral(lemmatize(value), language);
stmts.add(new StatementImpl(subject, synonym, synLabel));
});
}
}
});
}
}
});
} catch(Exception e){
log.error(e.getMessage(), e);
}
return stmts;
}
示例13: copyFromTdb
import com.hp.hpl.jena.rdf.model.Model; //导入方法依赖的package包/类
private void copyFromTdb( Dataset dataset ) throws RepositoryException {
ValueFactory vf = rc.getValueFactory();
if ( dataset.supportsTransactions() ) {
dataset.begin( ReadWrite.READ );
}
// Get model inside the transaction
Model model = dataset.getDefaultModel();
StmtIterator si = model.listStatements();
try {
rc.begin();
while ( si.hasNext() ) {
Statement stmt = si.next();
com.hp.hpl.jena.rdf.model.Resource rsr = stmt.getSubject();
Property pred = stmt.getPredicate();
RDFNode val = stmt.getObject();
Node valnode = val.asNode();
Resource sub;
try {
sub = ( rsr.isAnon()
? vf.createBNode( valnode.getBlankNodeLabel() )
: vf.createURI( rsr.toString() ) );
}
catch ( UnsupportedOperationException uoo ) {
log.warn( uoo, uoo );
continue;
}
URI pred2 = vf.createURI( pred.toString() );
Value val2;
if ( val.isLiteral() ) {
Literal lit = val.asLiteral();
String dtstr = lit.getDatatypeURI();
URI dt = ( null == dtstr ? null : vf.createURI( dtstr ) );
String langstr = lit.getLanguage();
if ( null == dt ) {
if ( langstr.isEmpty() ) {
val2 = vf.createLiteral( lit.toString() );
}
else {
val2 = vf.createLiteral( lit.toString(), langstr );
}
}
else {
val2 = vf.createLiteral( lit.toString(), dt );
}
}
else {
if ( val.isAnon() ) {
val2 = vf.createBNode( valnode.getBlankNodeLabel() );
}
else {
val2 = vf.createURI( val.toString() );
}
}
rc.add( sub, pred2, val2 );
}
rc.commit();
}
catch ( RepositoryException re ) {
rc.rollback();
throw re;
}
finally {
if ( dataset.supportsTransactions() ) {
dataset.end();
}
}
}
示例14: extract
import com.hp.hpl.jena.rdf.model.Model; //导入方法依赖的package包/类
public static void extract(Model m,String name){
Property groups=m.createProperty("http://www.buildingsmart-tech.org/ifc/IFC4/final#groups");
Property isGroupedBy=m.createProperty("http://www.buildingsmart-tech.org/ifc/IFC4/final#isGroupedBy");
Resource SingleValue=m.createResource("http://www.buildingsmart-tech.org/ifc/IFC4/final#P_SINGLEVALUE");
Model vocabulary=ModelFactory.createDefaultModel();
StmtIterator stmt1=m.listStatements(null, RDFS.comment,(String)null);
StmtIterator stmt2=m.listStatements(null,RDFS.domain,(RDFNode)null);
StmtIterator stmt3=m.listStatements(null,RDFS.range,(RDFNode)null);
StmtIterator stmt4=m.listStatements(null, RDF.type, RDF.Property);
StmtIterator stmt6=m.listStatements(null, RDFS.label, (String)null);
StmtIterator stmt8=m.listStatements(null, groups, (RDFNode)null);
StmtIterator stmt9=m.listStatements(null, isGroupedBy, (RDFNode)null);
vocabulary.add(stmt1);
vocabulary.add(stmt2);
vocabulary.add(stmt3);
vocabulary.add(stmt4);
vocabulary.add(stmt6);
vocabulary.add(stmt8);
vocabulary.add(stmt9);
m.remove(vocabulary);
StmtIterator stmt7=m.listStatements(null, RDF.type, (RDFNode)null);
List<Statement> ss=new ArrayList<Statement>();
while(stmt7.hasNext()){
Statement s=stmt7.next();
if(s.getObject().asResource().getURI().startsWith("http://www.buildingsmart-tech.org/ifc/IFC4/final")){
ss.add(s);
}
}
m.remove(ss);
try {
OutputStream voc=new FileOutputStream("C:\\users\\chi\\desktop\\output\\"+name+".ttl");
OutputStream rule=new FileOutputStream("C:\\users\\chi\\desktop\\output\\"+name+"_rule.ttl");
vocabulary.setNsPrefixes(m.getNsPrefixMap());
vocabulary.write(voc,"TTL");
m.write(rule,"TTL");
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}