本文整理汇总了Java中org.apache.jena.atlas.logging.Log.debug方法的典型用法代码示例。如果您正苦于以下问题:Java Log.debug方法的具体用法?Java Log.debug怎么用?Java Log.debug使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.jena.atlas.logging.Log
的用法示例。
在下文中一共展示了Log.debug方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: loadVocabularies
import org.apache.jena.atlas.logging.Log; //导入方法依赖的package包/类
@Override
public void loadVocabularies() {
QuerySolutionMap binding = new QuerySolutionMap();
binding.add("linkset", this.dataset);
Query query = QueryFactory.create(linksetVocabularyQuery);
QueryExecution qexec = QueryExecutionFactory.create(query, voidInstance.getVoidModel(),
binding);
try {
ResultSet results = qexec.execSelect();
for (; results.hasNext();) {
QuerySolution soln = results.nextSolution();
OntResource vocabulary = soln.getResource("vocabulary").as(
OntResource.class);
vocabularies.add(vocabulary);
}
} catch (Exception e) {
Log.debug(Linkset.class, "Failed linksetVocabularyQuery");
Log.debug(Linkset.class, e.getStackTrace().toString());
} finally {
qexec.close();
}
}
示例2: loadClassPartitionStatistics
import org.apache.jena.atlas.logging.Log; //导入方法依赖的package包/类
public void loadClassPartitionStatistics(OntModel voidModel) {
Query query = QueryFactory.create(classPartitionStatisticsQuery);
QueryExecution qexec = QueryExecutionFactory.create(query, voidModel);
try {
ResultSet results = qexec.execSelect();
for (; results.hasNext();) {
QuerySolution soln = results.nextSolution();
OntResource dataset = soln.getResource("Dataset").as(
OntResource.class);
OntResource clazz = soln.getResource("Class").as(
OntResource.class);
Integer entities = (soln.getLiteral("Entities") != null) ? soln
.getLiteral("Entities").getInt() : null;
Dataset ds = this.getDataset(dataset);
if (ds!=null)ds.getPartitions().addClassPartition(clazz, entities);
}
} catch (Exception e) {
Log.debug(
this,
"Unable to execute classPartitionStatisticsQuery " + query);
} finally {
qexec.close();
}
}
示例3: loadPropertyPartitionStatistics
import org.apache.jena.atlas.logging.Log; //导入方法依赖的package包/类
public void loadPropertyPartitionStatistics(OntModel voidModel) {
Query query = QueryFactory.create(propertyPartitionStatisticsQuery);
QueryExecution qexec = QueryExecutionFactory.create(query, voidModel);
try {
ResultSet results = qexec.execSelect();
for (; results.hasNext();) {
QuerySolution soln = results.nextSolution();
OntResource dataset = soln.getResource("Dataset").as(
OntResource.class);
OntResource property = soln.getResource("Property").as(
OntResource.class);
Integer triples = (soln.getLiteral("Triples") != null) ? soln
.getLiteral("Triples").getInt() : null;
Dataset ds = this.getDataset(dataset);
if (ds!=null)ds.getPartitions().addPropertyPartition(property, triples);
}
} catch (Exception e) {
Log.debug(
this,
"Unable to execute propertyPartitionStatisticsQuery " + query);
} finally {
qexec.close();
}
}
示例4: queryPropertyPartitionStatistics
import org.apache.jena.atlas.logging.Log; //导入方法依赖的package包/类
protected void queryPropertyPartitionStatistics() {
Query query = QueryFactory.create(propertyPartitionStatisticsQuery);
QueryExecution qexec = QueryExecutionFactory.sparqlService(
this.sparqlEndPoint.toString(), query);
try {
ResultSet results = qexec.execSelect();
for (; results.hasNext();) {
QuerySolution soln = results.nextSolution();
OntResource property = soln.getResource("p").as(
OntResource.class);
Integer triples = (soln.getLiteral("count") != null) ? soln
.getLiteral("count").getInt() : null;
partitions.addPropertyPartition(property, triples);
}
} catch (Exception e) {
Log.debug(
Dataset.class,
"Unable to connect to SPARQLEndpoint to execute propertyPartitionStatisticsQuery: "
+ this.sparqlEndPoint.toString());
} finally {
qexec.close();
}
}
示例5: loadVocabularies
import org.apache.jena.atlas.logging.Log; //导入方法依赖的package包/类
public void loadVocabularies() {
QuerySolutionMap binding = new QuerySolutionMap();
binding.add("dataset", dataset);
Query query = QueryFactory.create(datasetVocabularyQuery);
QueryExecution qexec = QueryExecutionFactory.create(query, voidInstance.getVoidModel(),
binding);
try {
ResultSet results = qexec.execSelect();
for (; results.hasNext();) {
QuerySolution soln = results.nextSolution();
OntResource vocabulary = soln.getResource("vocabulary").as(
OntResource.class);
vocabularies.add(vocabulary);
}
} catch (Exception e) {
Log.debug(Dataset.class, "Failed datasetVocabularyQuery");
Log.debug(Dataset.class, e.getStackTrace().toString());
} finally {
qexec.close();
}
}
示例6: updatePartitions
import org.apache.jena.atlas.logging.Log; //导入方法依赖的package包/类
public void updatePartitions() {
//Use OntModelSpec.OWL_MEM_RDFS_INF to ensure all default classes and properties are also discovered.
OntModelSpec partitionModelSpec = new OntModelSpec(OntModelSpec.OWL_MEM_RDFS_INF);
partitionModelSpec.setDocumentManager(voidInstance.getVoidModel().getDocumentManager());
partitionModelSpec.getDocumentManager().setProcessImports(true);
OntModel partitionModel = ModelFactory
.createOntologyModel(partitionModelSpec);
for (OntResource vocabulary : this.getVocabularies()) {
try {
partitionModel.read(vocabulary.getURI());
} catch (Exception e) {
Log.debug(Void.class, "Failed to locate dataset vocabulary: "
+ vocabulary + " " + e.getMessage());
}
}
updateClassPartition(partitionModel);
updatePropertyPartition(partitionModel);
}
示例7: updateClassPartition
import org.apache.jena.atlas.logging.Log; //导入方法依赖的package包/类
private void updateClassPartition(OntModel partitionModel) {
Query query = QueryFactory.create(classPartitionQuery);
QueryExecution qexec = QueryExecutionFactory.create(query,
partitionModel);
try {
ResultSet results = qexec.execSelect();
for (; results.hasNext();) {
QuerySolution soln = results.nextSolution();
OntResource clazz = soln.getResource("class").as(
OntResource.class);
if (!clazz.isAnon()) partitions.addClassPartition(clazz, null);
}
} catch (Exception e) {
Log.debug(Dataset.class, "Failed to execute classPartitionQuery");
} finally {
qexec.close();
}
}
示例8: queryClassPartitionStatistics
import org.apache.jena.atlas.logging.Log; //导入方法依赖的package包/类
private void queryClassPartitionStatistics() {
Query query = QueryFactory.create(classPartitionStatisticsQuery);
QueryExecution qexec = QueryExecutionFactory.sparqlService(
this.sparqlEndPoint.toString(), query);
try {
ResultSet results = qexec.execSelect();
for (; results.hasNext();) {
QuerySolution soln = results.nextSolution();
OntResource clazz = soln.getResource("class").as(
OntResource.class);
Integer entities = (soln.getLiteral("count") != null) ? soln
.getLiteral("count").getInt() : null;
partitions.addClassPartition(clazz, entities);
}
} catch (Exception e) {
Log.debug(
Dataset.class,
"Unable to connect to SPARQLEndpoint to execute classPartitionStatisticsQuery: "
+ this.sparqlEndPoint.toString());
} finally {
qexec.close();
}
}
示例9: updatePropertyPartition
import org.apache.jena.atlas.logging.Log; //导入方法依赖的package包/类
protected void updatePropertyPartition(OntModel partitionModel) {
Query query = QueryFactory.create(propertyPartitionQuery);
QueryExecution qexec = QueryExecutionFactory.create(query,
partitionModel);
try {
ResultSet results = qexec.execSelect();
for (; results.hasNext();) {
QuerySolution soln = results.nextSolution();
OntResource property = soln.getResource("property").as(
OntResource.class);
partitions.addPropertyPartition(property, null);
}
} catch (Exception e) {
Log.debug(Dataset.class,
"Failed to execute to execute propertyPartitionQuery");
} finally {
qexec.close();
}
}
示例10: join
import org.apache.jena.atlas.logging.Log; //导入方法依赖的package包/类
@Override
public Table join(Table tableLeft, Table tableRight)
{
if ( debug )
{
Log.debug(this,"Join") ;
dump(tableLeft) ;
dump(tableRight) ;
}
return joinWorker(tableLeft, tableRight, false, null) ;
}
示例11: leftJoin
import org.apache.jena.atlas.logging.Log; //导入方法依赖的package包/类
@Override
public Table leftJoin(Table tableLeft, Table tableRight, ExprList exprs)
{
if ( debug )
{
Log.debug(this,"Left Join") ;
dump(tableLeft) ;
dump(tableRight) ;
if ( exprs != null )
Log.debug(this,exprs.toString()) ;
}
return joinWorker(tableLeft, tableRight, true, exprs) ;
}
示例12: diff
import org.apache.jena.atlas.logging.Log; //导入方法依赖的package包/类
@Override
public Table diff(Table tableLeft, Table tableRight)
{
if ( debug )
{
Log.debug(this,"Diff") ;
dump(tableLeft) ;
dump(tableRight) ;
}
return diffWorker(tableLeft, tableRight) ;
}
示例13: minus
import org.apache.jena.atlas.logging.Log; //导入方法依赖的package包/类
@Override
public Table minus(Table tableLeft, Table tableRight)
{
if ( debug )
{
Log.debug(this,"Minus") ;
dump(tableLeft) ;
dump(tableRight) ;
}
return minusWorker(tableLeft, tableRight) ;
}
示例14: queryDatasetStatistics
import org.apache.jena.atlas.logging.Log; //导入方法依赖的package包/类
private void queryDatasetStatistics() {
Query query = QueryFactory.create(datasetStatisticsQuery);
QueryExecution qexec = QueryExecutionFactory.sparqlService(
this.sparqlEndPoint.toString(), query);
try {
ResultSet results = qexec.execSelect();
for (; results.hasNext();) {
// ?triples ?entities ?classes ?predicates ?subjects ?objects
QuerySolution soln = results.nextSolution();
triples = (soln.getLiteral("triples") != null) ? soln
.getLiteral("triples").getInt() : null;
entities = (soln.getLiteral("entities") != null) ? soln
.getLiteral("entities").getInt() : null;
classes = (soln.getLiteral("classes") != null) ? soln
.getLiteral("classes").getInt() : null;
properties = (soln.getLiteral("properties") != null) ? soln
.getLiteral("properties").getInt() : null;
distinctSubjects = (soln.getLiteral("distinctSubjects") != null) ? soln
.getLiteral("distinctSubjects").getInt() : null;
distinctObjects = (soln.getLiteral("distinctObjects") != null) ? soln
.getLiteral("distinctObjects").getInt() : null;
}
} catch (Exception e) {
Log.debug(Dataset.class,
"Unable to connect to SPARQLEndpoint to execute datasetStatisticsQuery: "
+ this.sparqlEndPoint.toString());
} finally {
}
qexec.close();
}
示例15: union
import org.apache.jena.atlas.logging.Log; //导入方法依赖的package包/类
@Override
public Table union(Table tableLeft, Table tableRight)
{
if ( debug )
{
Log.debug(this,"Union") ;
dump(tableLeft) ;
dump(tableRight) ;
}
QueryIterConcat output = new QueryIterConcat(execCxt) ;
output.add(tableLeft.iterator(execCxt)) ;
output.add(tableRight.iterator(execCxt)) ;
return new TableN(output) ;
}