本文整理汇总了Java中org.openrdf.model.vocabulary.DCTERMS类的典型用法代码示例。如果您正苦于以下问题:Java DCTERMS类的具体用法?Java DCTERMS怎么用?Java DCTERMS使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DCTERMS类属于org.openrdf.model.vocabulary包,在下文中一共展示了DCTERMS类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getMetadata
import org.openrdf.model.vocabulary.DCTERMS; //导入依赖的package包/类
private Map<URI, String> getMetadata( CommandLine cmd ) {
Map<URI, String> map = new HashMap<>();
Map<String, URI> mets = new HashMap<>();
mets.put( "organization", DCTERMS.CREATOR );
mets.put( "poc", DCTERMS.PUBLISHER );
mets.put( "summary", DCTERMS.DESCRIPTION );
mets.put( "title", RDFS.LABEL );
for ( Map.Entry<String, URI> en : mets.entrySet() ) {
if ( cmd.hasOption( en.getKey() ) ) {
String rawval = cmd.getOptionValue( en.getKey() );
map.put( en.getValue(), rawval );
}
}
return map;
}
示例2: handleTuple
import org.openrdf.model.vocabulary.DCTERMS; //导入依赖的package包/类
@Override
public void handleTuple( BindingSet set, ValueFactory fac ) {
URI pred = fac.createURI( set.getValue( "p" ).stringValue() );
Value val = set.getValue( "o" );
// for baseuri, we need the subject, not the object
// and also, we use the VOID_DS as the key elsewhere in the code
if ( RDF.TYPE.equals( pred ) ) {
pred = SEMTOOL.Database;
val = set.getValue( "db" );
}
else if ( pred.getNamespace().equals( DC.NAMESPACE ) ) {
// silently handle the old DC namespace (ignore our DC-specific URIs)
if ( !( MetadataConstants.DCT_CREATED.equals( pred )
|| MetadataConstants.DCT_MODIFIED.equals( pred ) ) ) {
pred = fac.createURI( DCTERMS.NAMESPACE, pred.getLocalName() );
}
}
result.put( pred, val );
}
示例3: getPerspectiveStatements
import org.openrdf.model.vocabulary.DCTERMS; //导入依赖的package包/类
protected static Model getPerspectiveStatements( Perspective p,
ValueFactory vf, UriBuilder urib, User user ) {
Model statements = new LinkedHashModel();
URI pid = p.getId();
Date now = new Date();
statements.add( new StatementImpl( pid, RDF.TYPE, SEMPERS.Perspective ) );
statements.add( new StatementImpl( pid, RDFS.LABEL,
vf.createLiteral( p.getLabel() ) ) );
if ( null != p.getDescription() ) {
statements.add( new StatementImpl( pid, DCTERMS.DESCRIPTION,
vf.createLiteral( p.getDescription() ) ) );
}
statements.add( new StatementImpl( pid, DCTERMS.CREATED,
vf.createLiteral( now ) ) );
statements.add( new StatementImpl( pid, DCTERMS.MODIFIED,
vf.createLiteral( now ) ) );
statements.add( new StatementImpl( pid, DCTERMS.CREATOR,
vf.createLiteral( getAuthorInfo( user ) ) ) );
return statements;
}
示例4: getMockResource
import org.openrdf.model.vocabulary.DCTERMS; //导入依赖的package包/类
/**
* Creates a mock resource.
*
* @return Resource created with fake data.
*/
public static Record getMockResource() {
final GregorianCalendar calendar = new GregorianCalendar();
calendar.set(2013, 9, 23);
final Record rep = Record.create();
rep.setID(new URIImpl("ks:r15_rep"));
rep.set(RDF.TYPE, KS.REPRESENTATION);
rep.set(NFO.FILE_NAME, "r15.txt");
rep.set(NFO.FILE_SIZE, 1533L);
rep.set(NFO.FILE_CREATED, new Date());
rep.set(NIE.MIME_TYPE, "text/plain");
final Record resource = Record.create();
resource.setID(new URIImpl("ks:r15"));
resource.set(RDF.TYPE, KS.RESOURCE);
resource.set(DCTERMS.TITLE, "This is the news title");
resource.set(DCTERMS.ISSUED, calendar);
resource.set(NIE.IS_STORED_AS, rep);
return resource;
}
示例5: getMockResource
import org.openrdf.model.vocabulary.DCTERMS; //导入依赖的package包/类
private static Record getMockResource() {
final GregorianCalendar calendar = new GregorianCalendar();
calendar.set(2013, 9, 23);
final Record rep = Record.create();
rep.setID(new URIImpl("ks:r15_rep"));
rep.set(RDF.TYPE, KS.REPRESENTATION);
rep.set(NFO.FILE_NAME, "r15.txt");
rep.set(NFO.FILE_SIZE, 1533L);
rep.set(NFO.FILE_CREATED, new Date());
rep.set(NIE.MIME_TYPE, "text/plain");
final Record resource = Record.create();
resource.setID(new URIImpl("ks:r15"));
resource.set(RDF.TYPE, KS.RESOURCE);
resource.set(DCTERMS.TITLE, "This is the news title");
resource.set(DCTERMS.ISSUED, calendar);
resource.set(NIE.IS_STORED_AS, rep);
return resource;
}
示例6: getMockResource
import org.openrdf.model.vocabulary.DCTERMS; //导入依赖的package包/类
public static Record getMockResource() {
final GregorianCalendar calendar = new GregorianCalendar();
calendar.set(2013, 9, 23);
final Record rep = Record.create();
rep.setID(new URIImpl("ks:r15_rep"));
rep.set(RDF.TYPE, KS.REPRESENTATION);
rep.set(NFO.FILE_NAME, "r15.txt");
rep.set(NFO.FILE_SIZE, 1533L);
rep.set(NFO.FILE_CREATED, new Date());
rep.set(NIE.MIME_TYPE, "text/plain");
rep.set(DCTERMS.CREATOR, "John", "Steve", "Mark");
rep.set(DCTERMS.ISSUED, 1000, 1005, 1007);
final Record resource = Record.create();
resource.setID(new URIImpl("ks:r15"));
resource.set(RDF.TYPE, KS.RESOURCE);
resource.set(DCTERMS.TITLE, "This is the news title");
resource.set(DCTERMS.ISSUED, calendar);
resource.set(NIE.IS_STORED_AS, rep);
return resource;
}
示例7: createRecordStream
import org.openrdf.model.vocabulary.DCTERMS; //导入依赖的package包/类
static Stream<Record> createRecordStream() {
return Stream.create(new AbstractIterator<Record>() {
private int index = 0;
@Override
protected Record computeNext() {
++this.index;
if (this.index > NUM_RECORDS) {
return endOfData();
}
if (this.index % 100 == 0) {
LOGGER.info("{} records generated", this.index);
}
final ValueFactory factory = Data.getValueFactory();
final Record record = Record.create(factory.createURI("ex:resource" + this.index),
KS.RESOURCE);
record.set(DCTERMS.TITLE, "Resource " + this.index);
record.set(DCTERMS.CREATOR, "John Smith");
record.set(DCTERMS.CREATED, new Date());
// record.set(DCTERMS.ABSTRACT, Strings.repeat("... bla ...\n", 1000));
return record;
}
});
}
示例8: testEntityProperties
import org.openrdf.model.vocabulary.DCTERMS; //导入依赖的package包/类
/**
* Test Entities Parsing and Properties
*/
private void testEntityProperties(Enhancements enhancements) {
Assert.assertFalse(enhancements.getEntities().isEmpty());
Entity paris = enhancements.getEntity("http://dbpedia.org/resource/Paris");
Assert.assertNotNull(paris);
Assert.assertFalse(paris.getProperties().isEmpty());
//entity has been added to the analysis result
Assert.assertFalse(paris.getProperties().isEmpty());
Assert.assertFalse(paris.getValues(RDFS.LABEL.toString()).isEmpty());
Assert.assertEquals("Paris", paris.getValue(RDFS.LABEL.toString(), "en"));
Assert.assertTrue(paris.getValues(RDF.TYPE.toString()).contains("http://dbpedia.org/ontology/Place"));
// Assert.assertTrue(Float.parseFloat(paris.getFirstPropertyValue("http://stanbol.apache.org/ontology/entityhub/entityhub#entityRank")) > 0.5f);
Assert.assertTrue(paris.getValues(DCTERMS.SUBJECT.toString()).contains("http://dbpedia.org/resource/Category:Capitals_in_Europe"));
EntityAnnotation parisEa = enhancements.getEntityAnnotation(paris.getUri());
Assert.assertEquals(parisEa.getEntityTypes(), Collections.singleton("http://dbpedia.org/ontology/Municipality"));
Assert.assertEquals("Paris", parisEa.getEntityLabel());
// Assert.assertEquals("dbpedia", parisEa.getDataset());
Assert.assertEquals("en", parisEa.getEntityLabelLang());
}
示例9: exportIteration
import org.openrdf.model.vocabulary.DCTERMS; //导入依赖的package包/类
/**
* LDP-Style to serialize a resource.
*
* @param writer the writer to serialize to
* @param subject the resource to serialize
* @param iteration the Iteration containing the data
* @throws RDFHandlerException
* @throws RepositoryException
*/
public static void exportIteration(RDFWriter writer, URI subject, CloseableIteration<Statement, RepositoryException> iteration) throws RDFHandlerException, RepositoryException {
writer.startRDF();
writer.handleNamespace(LDP.PREFIX, LDP.NAMESPACE);
writer.handleNamespace(RDF.PREFIX, RDF.NAMESPACE);
writer.handleNamespace(XSD.PREFIX, XSD.NAMESPACE);
writer.handleNamespace(DCTERMS.PREFIX, DCTERMS.NAMESPACE);
writer.handleNamespace("parent", subject.getNamespace());
writer.handleNamespace("child", subject.stringValue().replaceFirst("/*$", "/"));
writer.handleNamespace("this", subject.stringValue().replaceFirst("/*$", "#"));
while (iteration.hasNext()) {
writer.handleStatement(iteration.next());
}
writer.endRDF();
}
示例10: loadPerspective
import org.openrdf.model.vocabulary.DCTERMS; //导入依赖的package包/类
private static Perspective loadPerspective( URI perspectiveURI, RepositoryConnection rc,
UriBuilder urib ) {
try {
Perspective perspective = new Perspective( perspectiveURI );
Collection<Statement> stmts
= Iterations.asList( rc.getStatements( perspectiveURI, null, null, false ) );
for ( Statement s : stmts ) {
URI pred = s.getPredicate();
Value val = s.getObject();
if ( val instanceof Literal ) {
if ( RDFS.LABEL.equals( pred ) ) {
perspective.setLabel( val.stringValue() );
}
else if ( DCTERMS.DESCRIPTION.equals( pred ) ) {
perspective.setDescription( val.stringValue() );
}
}
}
perspective.setInsights( loadInsights( perspective, rc, urib ) );
return perspective;
}
catch ( RepositoryException e ) {
log.error( e, e );
}
throw new IllegalArgumentException( "unknown perspective: " + perspectiveURI );
}
示例11: testCreateStatements
import org.openrdf.model.vocabulary.DCTERMS; //导入依赖的package包/类
@Test
public void testCreateStatements() throws Exception {
InsightManagerImpl imi = new InsightManagerImpl();
EngineUtil2.createInsightStatements( SRCFILE, imi );
Collection<Statement> stmts
= InsightManagerImpl.getModel( imi, new LocalUserImpl() );
if ( log.isTraceEnabled() ) {
File tmpdir = FileUtils.getTempDirectory();
try ( Writer w = new BufferedWriter( new FileWriter( new File( tmpdir,
SRCFILE.getName() ) ) ) ) {
TurtleWriter tw = new TurtleWriter( w );
tw.startRDF();
tw.handleNamespace( SEMPERS.PREFIX, SEMPERS.NAMESPACE );
tw.handleNamespace( SEMONTO.PREFIX, SEMONTO.NAMESPACE );
tw.handleNamespace( SEMCORE.PREFIX, SEMCORE.NAMESPACE );
tw.handleNamespace( SPIN.PREFIX, SPIN.NAMESPACE );
tw.handleNamespace( SP.PREFIX, SP.NAMESPACE );
tw.handleNamespace( UI.PREFIX, UI.NAMESPACE );
tw.handleNamespace( RDFS.PREFIX, RDFS.NAMESPACE );
tw.handleNamespace( RDF.PREFIX, RDF.NAMESPACE );
tw.handleNamespace( OWL.PREFIX, OWL.NAMESPACE );
tw.handleNamespace( OLO.PREFIX, OLO.NAMESPACE );
tw.handleNamespace( DCTERMS.PREFIX, DCTERMS.NAMESPACE );
tw.handleNamespace( XMLSchema.PREFIX, XMLSchema.NAMESPACE );
for ( Statement s : stmts ) {
tw.handleStatement( s );
}
tw.endRDF();
}
}
assertEquals(48, stmts.size() );
}
示例12: readMetadata
import org.openrdf.model.vocabulary.DCTERMS; //导入依赖的package包/类
@Nullable
private static void readMetadata(final Record metadata,
final MultivaluedMap<String, String> headers) {
// Read Content-Type header
final String mime = headers.getFirst(HttpHeaders.CONTENT_TYPE);
metadata.set(NIE.MIME_TYPE, mime != null ? mime : MediaType.APPLICATION_OCTET_STREAM);
// Read Content-MD5 header, if available
final String md5 = headers.getFirst("Content-MD5");
if (md5 != null) {
final Record hash = Record.create();
hash.set(NFO.HASH_ALGORITHM, "MD5");
hash.set(NFO.HASH_VALUE, md5);
metadata.set(NFO.HAS_HASH, hash);
}
// Read Content-Language header, if possible
final String language = headers.getFirst(HttpHeaders.CONTENT_LANGUAGE);
try {
metadata.set(DCTERMS.LANGUAGE, Data.languageCodeToURI(language));
} catch (final Throwable ex) {
LOGGER.warn("Invalid {}: {}", HttpHeaders.CONTENT_LANGUAGE, language);
}
// Read custom X-KS-Meta header
final String encodedMeta = headers.getFirst(Protocol.HEADER_META);
if (encodedMeta != null) {
final InputStream in = new ByteArrayInputStream(encodedMeta.getBytes(Charsets.UTF_8));
final Stream<Statement> statements = RDFUtil.readRDF(in, RDFFormat.TURTLE,
Data.getNamespaceMap(), null, true);
final Record record = Record.decode(statements,
ImmutableSet.<URI>of(KS.REPRESENTATION), true).getUnique();
metadata.setID(record.getID());
for (final URI property : record.getProperties()) {
metadata.set(property, record.get(property));
}
}
}
示例13: test
import org.openrdf.model.vocabulary.DCTERMS; //导入依赖的package包/类
@Test
public void test() {
final Record r = Record.create();
r.setID(Data.getValueFactory().createURI("test:r"));
r.set(DCTERMS.TITLE, "this is the title");
final Record r2 = Record.create();
r2.setID(Data.getValueFactory().createURI("test:r2"));
r2.set(DCTERMS.TITLE, "this is the title");
final Record m = Record.create();
m.setID(Data.getValueFactory().createURI("test:x"));
m.set(NIF.END_INDEX, 15);
m.set(RDFS.COMMENT, "first", "second", "third");
m.set(KS.MENTION_OF, r, r2);
System.out.println(m.toString(Data.getNamespaceMap(), true));
}
示例14: processDocumentHeader
import org.openrdf.model.vocabulary.DCTERMS; //导入依赖的package包/类
private void processDocumentHeader(BELStatement belStatement, NanopubCreator npCreator) {
BELDocumentHeader h = belDoc.getDocumentHeader();
npCreator.addProvenanceStatement(provWasDerivedFrom, belDocResource);
if (h.getName() != null) {
npCreator.addProvenanceStatement(belDocResource, DC.TITLE, vf.createLiteral(h.getName()));
}
if (h.getDescription() != null) {
npCreator.addProvenanceStatement(belDocResource, DC.DESCRIPTION, vf.createLiteral(h.getDescription()));
}
if (h.getCopyright() != null) {
npCreator.addProvenanceStatement(belDocResource, DC.RIGHTS, vf.createLiteral(h.getCopyright()));
}
if (h.getLicense() != null) {
npCreator.addProvenanceStatement(belDocResource, DCTERMS.LICENSE, vf.createLiteral(h.getLicense()));
}
if (h.getVersion() != null) {
npCreator.addProvenanceStatement(belDocResource, pavVersion, vf.createLiteral(h.getVersion()));
}
if (h.getAuthor() != null || h.getContactInfo() != null) {
BNode author = newBNode();
npCreator.addProvenanceStatement(belDocResource, NanopubVocab.PAV_AUTHOREDBY, author);
if (h.getAuthor() != null) {
npCreator.addProvenanceStatement(author, RDFS.LABEL, vf.createLiteral(h.getAuthor()));
}
if (h.getContactInfo() != null) {
npCreator.addProvenanceStatement(author, RDFS.COMMENT, vf.createLiteral(h.getContactInfo()));
}
}
}
示例15: datasetProvenance
import org.openrdf.model.vocabulary.DCTERMS; //导入依赖的package包/类
public Model datasetProvenance(String datasetId) {
Model statements = new LinkedHashModel();
URI dataset = generateDatasetUri(datasetId);
URI record = generateRecordUri(datasetId);
Literal now = ValueFactoryImpl.getInstance().createLiteral(new Date());
statements.add(record, DCTERMS.MODIFIED, now);
statements.add(record, DCTERMS.ISSUED, now);
statements.add(record, RDF.TYPE, ODSVoc.DCAT_CATALOGRECORD);
statements.add(record, ODSVoc.FOAF_PRIMARYTOPIC, dataset);
statements.add(catalogUri, ODSVoc.DCAT_CAT_PROP_DATASET, dataset);
statements.add(catalogUri, ODSVoc.DCAT_CAT_PROP_RECORD, record);
statements.add(dataset, RDF.TYPE, ODSVoc.DCAT_DATASET);
return statements;
}