本文整理汇总了Java中org.apache.clerezza.rdf.core.UriRef类的典型用法代码示例。如果您正苦于以下问题:Java UriRef类的具体用法?Java UriRef怎么用?Java UriRef使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
UriRef类属于org.apache.clerezza.rdf.core包,在下文中一共展示了UriRef类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getResourceValue
import org.apache.clerezza.rdf.core.UriRef; //导入依赖的package包/类
private String getResourceValue(NonLiteral nl, Map<BNode, String> bNodeMap) {
if (nl == null) {
return null;
} else if (nl instanceof UriRef) {
return ((UriRef) nl).getUnicodeString();
} else if (nl instanceof BNode) {
String bNodeId = bNodeMap.get(nl);
if (bNodeId == null) {
bNodeId = Integer.toString(bNodeMap.size());
bNodeMap.put((BNode) nl, bNodeId);
}
return new StringBuilder("_:b").append(bNodeId).toString();
} else {
throw new IllegalStateException("Unknwon NonLiteral type " + nl.getClass().getName()
+ "!");
}
}
示例2: init
import org.apache.clerezza.rdf.core.UriRef; //导入依赖的package包/类
@BeforeClass
public static void init(){
LiteralFactory lf = LiteralFactory.getInstance();
UriRef pers1 = new UriRef("http://www.example.org/test#pers1");
UriRef pers2 = new UriRef("http://www.example.org/test#pers2");
MGraph data = new SimpleMGraph();
//NOTE: This test a language literal with and without language as
// well as a xsd:string typed literal. To test correct handling of
// RDF1.1
data.add(new TripleImpl(pers1, RDF.type, FOAF.Person));
data.add(new TripleImpl(pers1, FOAF.name, new PlainLiteralImpl("Rupert Westenthaler",
new Language("de"))));
data.add(new TripleImpl(pers1, FOAF.nick, new PlainLiteralImpl("westei")));
data.add(new TripleImpl(pers1, FOAF.mbox, lf.createTypedLiteral("[email protected]")));
data.add(new TripleImpl(pers1, FOAF.age, lf.createTypedLiteral(38)));
data.add(new TripleImpl(pers1, FOAF.knows, pers2));
data.add(new TripleImpl(pers2, FOAF.name, new PlainLiteralImpl("Reto Bachmann-Gmür")));
rdfData = data.getGraph();
}
示例3: transform
import org.apache.clerezza.rdf.core.UriRef; //导入依赖的package包/类
/**
* Performs the actual transformation mapping the data extracted from OSM XML data to a Clerezza graph.
* @return
*/
public TripleCollection transform(){
TripleCollection resultGraph = new SimpleMGraph();
processXmlBinary();
for(String wayId: osmWayNodeMap.keySet()) {
OsmWay wayObj = osmWayNodeMap.get(wayId);
UriRef wayUri = new UriRef("http://fusepoolp3.eu/osm/way/" + wayId);
resultGraph.add(new TripleImpl(wayUri, RDF.type, new UriRef("http://schema.org/PostalAddress")));
resultGraph.add(new TripleImpl(wayUri, new UriRef("http://schema.org/streetAddress"), new PlainLiteralImpl(wayObj.getTagName())));
UriRef geometryUri = new UriRef("http://fusepoolp3.eu/osm/geometry/" + wayId);
resultGraph.add(new TripleImpl(wayUri, new UriRef("http://www.opengis.net/ont/geosparql#geometry"), geometryUri));
String linestring = getWktLineString(wayObj.getNodeReferenceList());
resultGraph.add(new TripleImpl(geometryUri, new UriRef("http://www.opengis.net/ont/geosparql#asWKT"), new PlainLiteralImpl(linestring)));
}
return resultGraph;
}
示例4: transformTextAnnotation
import org.apache.clerezza.rdf.core.UriRef; //导入依赖的package包/类
private void transformTextAnnotation(Annotation anno) {
//we need to distinquish different types of fise:TextAnnotations
//(1) Language Annotations
Set<UriRef> dcTypes = asSet(getReferences(anno.ctx.src, anno.enh, DC_TYPE));
if (dcTypes.contains(DC_LINGUISTIC_SYSTEM)) { // this is a language annotation
transformLanguageAnnotation(anno);
return;
}
//(2) Sentiment Annotation
//Sentiment Annotations do use ?enh dct:type fise:Sentiment
if(dcTypes.contains(FISE_SENTIMENT_TYPE)){
transformSentimentAnnotation(anno);
return;
}
//(3) Topic Annotations
Iterator<Triple> relation = anno.ctx.src.filter(null, DC_RELATION, anno.enh);
while (relation.hasNext()) {
NonLiteral related = relation.next().getSubject();
if(hasValue(anno.ctx.src, related, RDF_TYPE, null, ENHANCER_TOPICANNOTATION)){
transformTopicClassification(anno);
return;
}
}
//(4) Entity Mention Annotations (all remaining)
transformEntityMentionAnnotation(anno);
}
示例5: transformSelection2Nif
import org.apache.clerezza.rdf.core.UriRef; //导入依赖的package包/类
/**
* Transforms a <code>fise:TextAnnotation</code> with a selected area of an text
* to a <a href="http://persistence.uni-leipzig.org/nlp2rdf/">NIF 2.0</a>
* <code>nif:String</code>. This method expects the parsed selector URI to
* be encoded using <a href="http://tools.ietf.org/html/rfc5147">RFC 5147</a>
* as encoding scheme<p>
* The implementation of this method is according to the mapping specification
* as defined by the <a href="https://github.com/fusepoolP3/overall-architecture/blob/master/wp3/fp-anno-model/fp-anno-model.md#fisetextannotation-transformation">
* <code>fise:TextAnnotation</code> transformation<a> section of the FAM model.
* @param anno the annotation
* @param selector the <a href="http://tools.ietf.org/html/rfc5147">RFC 5147</a>
* encoded URI for the selector (typically created by using
* {@link #createSelectorRessource(Annotation)})
*/
private void transformSelection2Nif(Annotation anno, UriRef selector){
anno.ctx.tar.add(new TripleImpl(selector, RDF_TYPE, NIF_STRING));
anno.ctx.tar.add(new TripleImpl(selector, RDF_TYPE, NIF_RFC5147STRING));
copyValue(anno.ctx, anno.enh, ENHANCER_START, selector, NIF_BEGIN_INDEX);
copyValue(anno.ctx, anno.enh, ENHANCER_END, selector, NIF_END_INDEX);
copyValue(anno.ctx, anno.enh, ENHANCER_SELECTED_TEXT, selector, NIF_ANCHOR_OF);
copyValue(anno.ctx, anno.enh, ENHANCER_SELECTION_HEAD, selector, NIF_HEAD);
copyValue(anno.ctx, anno.enh, ENHANCER_SELECTION_TAIL, selector, NIF_TAIL);
copyValue(anno.ctx, anno.enh, ENHANCER_SELECTION_PREFIX, selector, NIF_BEFORE);
copyValue(anno.ctx, anno.enh, ENHANCER_SELECTION_SUFFIX, selector, NIF_AFTER);
UriRef contextUri = createRFC5147URI(anno.getExtractedFrom(), null, null);
if(anno.ctx.addNifContext(contextUri)){ //is this a new context
anno.ctx.tar.add(new TripleImpl(contextUri, RDF_TYPE, NIF_CONTEXT));
anno.ctx.tar.add(new TripleImpl(contextUri, RDF_TYPE, NIF_RFC5147STRING));
anno.ctx.tar.add(new TripleImpl(contextUri, NIF_SOURCE_URL, anno.getExtractedFrom()));
//NOTE: this does not add the contents of the ContentItem (nif:isString)
}
anno.ctx.tar.add(new TripleImpl(selector, NIF_REFERENCE_CONTEXT, contextUri));
}
示例6: hasValue
import org.apache.clerezza.rdf.core.UriRef; //导入依赖的package包/类
/**
* Checks if the value is parsed of the parsed triple filter.
* IMPARTANT: This method expects that exactly one of subject, predicate and
* object is <code>null</code>
* @param source the triple collection
* @param sub subject filter (<code>null</code> for wildcard)
* @param pred predicate filter (<code>null</code> for wildcard)
* @param obj Object filter (<code>null</code> for wildcard)
* @param value the value
* @return <code>true</code> if the parsed value is part of the triples selected
* by the parsed triple pattern.
*/
public boolean hasValue(TripleCollection source, NonLiteral sub, UriRef pred, Resource obj, Resource value){
if(value == null){
return false;
}
Iterator<Triple> it = source.filter(sub, pred, obj);
while(it.hasNext()){
Triple t = it.next();
Resource act = sub == null ? t.getSubject() : pred == null
? t.getPredicate() : t.getObject();
if(act.equals(value)){
return true;
}
}
return false;
}
示例7: assertOptValues
import org.apache.clerezza.rdf.core.UriRef; //导入依赖的package包/类
private <T extends Resource> Set<T> assertOptValues(TripleCollection graph,
NonLiteral subject, UriRef property, Class<T> type) {
Iterator<Triple> it = graph.filter(subject, property, null);
if(!it.hasNext()){
return Collections.emptySet();
}
Set<T> values = new HashSet<T>();
while(it.hasNext()){
Resource value = it.next().getObject();
assertTrue(type.getSimpleName()+" expected but value "+ value +
" had the type "+value.getClass().getSimpleName()+"!",
type.isAssignableFrom(value.getClass()));
values.add(type.cast(value));
}
return values;
}
示例8: testDefaultEnhancements
import org.apache.clerezza.rdf.core.UriRef; //导入依赖的package包/类
@Test
public void testDefaultEnhancements() throws EngineException, IOException, ParseException {
//engine = new SpeechToTextEngine(ciFactory, MP);
log.info(">>> Default Model Sphinix Testing WAV <<<");
ContentItem ci = createContentItem("temp.wav", "audio/wav1");
assertFalse(engine.canEnhance(ci) == CANNOT_ENHANCE);
//System.out.println("##################################################"+ci.getMetadata());
System.out.println("##### Engine open ");
engine.computeEnhancements(ci);
System.out.println("##### Engine Close");
Entry<UriRef,Blob> contentPart = ContentItemHelper.getBlob(ci, singleton("text/plain"));
// String text = ContentItemHelper.getText(contentPart.getValue());
//System.out.println("##################################################"+ci.getMetadata());
assertNotNull(contentPart);
Blob plainTextBlob = contentPart.getValue();
//log.info("Recongised String: {}",ContentItemHelper.getText(plainTextBlob));
assertNotNull(plainTextBlob);
}
示例9: testCustomEnhancements
import org.apache.clerezza.rdf.core.UriRef; //导入依赖的package包/类
@Test
public void testCustomEnhancements() throws EngineException, IOException, ParseException {
//engine = new SpeechToTextEngine(ciFactory, MP);
log.info(">>> Custom Model Sphinix Testing WAV <<<");
ContentItem ci = createContentItem("temp.wav", "audio/wav");
assertFalse(engine.canEnhance(ci) == CANNOT_ENHANCE);
//engine.config.setCustomLangModel("en-us.lm.dmp");
//engine.config.setCustomDictModel("en-cmu.dict");
String acousticResource[]={"feat.params", "mdef", "means", "mixture_weights", "noisedict", "transition_matrices", "variances","feature_transform"};
for(String resourceName: acousticResource) {
//engine.config.setCustomAcousticModel(resourceName);
}
// SphinxConfig.CUSTOM_MODEL_AVAILABLE=true;
//System.out.println("##################################################"+ci.getMetadata());
engine.computeEnhancements(ci);
Entry<UriRef,Blob> contentPart = ContentItemHelper.getBlob(ci, singleton("text/plain"));
// String text = ContentItemHelper.getText(contentPart.getValue());
//System.out.println("##################################################"+ci.getMetadata());
assertNotNull(contentPart);
Blob plainTextBlob = contentPart.getValue();
assertNotNull(plainTextBlob);
}
示例10: generateRdf
import org.apache.clerezza.rdf.core.UriRef; //导入依赖的package包/类
/**
* Get SIOC content from the RDF as text and return it.
*
* @param entity
* @return
* @throws IOException
*/
@Override
protected TripleCollection generateRdf(HttpRequestEntity entity) throws IOException {
String text = "";
Graph graph = Parser.getInstance().parse(entity.getData(), "text/turtle");
Iterator<Triple> triples = graph.filter(null, SIOC.content, null);
if (triples.hasNext()) {
Literal literal = (Literal) triples.next().getObject();
text = literal.getLexicalForm();
}
final TripleCollection result = new SimpleMGraph();
final Resource resource = entity.getContentLocation() == null
? new BNode()
: new UriRef(entity.getContentLocation().toString());
final GraphNode node = new GraphNode(resource, result);
node.addProperty(RDF.type, TEXUAL_CONTENT);
node.addPropertyValue(SIOC.content, text);
node.addPropertyValue(new UriRef("http://example.org/ontology#textLength"), text.length());
return result;
}
示例11: addOriginalElement
import org.apache.clerezza.rdf.core.UriRef; //导入依赖的package包/类
/**
* Add new element to the dictionary without any change.
*
* @param labelText
* @param labelType
* @param uri
*/
public void addOriginalElement(String labelText, UriRef labelType, String uri) {
Concept concept;
if (keywords.containsKey(labelText)) {
concept = keywords.get(labelText);
} else {
concept = new Concept(labelText, labelType, uri);
}
keywords.put(labelText, concept);
if (concept.isPrefLabel()) {
prefLabels.put(uri, labelText);
}
}
示例12: createBudgetLine
import org.apache.clerezza.rdf.core.UriRef; //导入依赖的package包/类
/**
* Return the budget line Object id and populate trpList with created triples
* @param trpList
* @param name
* @param childrens
* @return
*/
private UriRef createBudgetLine(MGraph trpList,String name, List<String> childrens){
UriRef budgetLineRoot = Entities.generateNewEntity();
//creation du nom
//TODO : real language aware object creation
trpList.add(new TripleImpl(budgetLineRoot, SKOS.prefLabel, lf.createTypedLiteral(name)));
//add type of object
trpList.add(new TripleImpl(budgetLineRoot, RDF.type, Onthology.budgetLineType.getUri()));
//create childrens for each parent
for(String child : childrens){
UriRef childBudgetLine = createBudgetLine(trpList, child, Collections.<String> emptyList());
trpList.add(new TripleImpl(budgetLineRoot, Onthology.hasBudgetLine.getUri(), childBudgetLine));
}
return budgetLineRoot;
}
示例13: getSkosOnto
import org.apache.clerezza.rdf.core.UriRef; //导入依赖的package包/类
@Deprecated //"use the model endpoint instead
@Path("/skosontology/all")
@GET
@Consumes(WILDCARD)
@Produces(RDF_XML)
//TODO : change query parameter name from skos to \"model"
//TODO : change endpoint from skosontology to ontologiesmodel
//TODO : remove the \"/all" and remove the /skosontology model
//TODO : without model parameter, this endpoint provide the list of available ontologies
//TODO : remove the skos_ prefix in the file name as it not really mean something
public Response getSkosOnto(@QueryParam(value = "skos") String modelName, @Context HttpHeaders headers) throws JSONException {
log.warn("This endpoint is deprecated, use the model endpoint instead");
UriRef soURI = new UriRef("urn:x-onto-utils:skosOntology" + modelName);
MGraph g;
Set<UriRef> l = tcManager.listMGraphs();
if(l.contains(soURI)){
g = tcManager.getMGraph(soURI);
}
else{
//initialisation, loading of the graph
g = tcManager.createMGraph(soURI);
g.addAll(parser.parse(this.getClass().getResourceAsStream("/skos_"+modelName+".rdf"), "Application/rdf+xml"));
}
return okMGraphResponse(headers, g);
}
示例14: getInputText
import org.apache.clerezza.rdf.core.UriRef; //导入依赖的package包/类
/**
* Extracts text from the content item.
*
* @param ci
* @return
* @throws IOException
*/
public static String getInputText(ContentItem ci) throws IOException {
Map.Entry<UriRef, Blob> contentPart = ContentItemHelper.getBlob(ci, SUPPORTED_MIMTYPES);
if (contentPart == null) {
throw new IllegalStateException("No ContentPart with Mimetype '" + TEXT_PLAIN_MIMETYPE
+ "' found for ContentItem "
+ ci.getUri() + ": This is also checked in the canEnhance method! -> This "
+ "indicated an Bug in the implementation of the " + "EnhancementJobManager!");
}
final String text = ContentItemHelper.getText(contentPart.getValue());
if (text.trim().length() == 0) {
throw new IllegalArgumentException(
"No text contained in ContentPart {" + contentPart.getKey() +
"} of ContentItem {" +
ci.getUri() + "}"
);
}
return text;
}
示例15: createDbpediaResourceURI
import org.apache.clerezza.rdf.core.UriRef; //导入依赖的package包/类
/**
* Creates language specific entity URIs for DBPedia. Those URIs do follow
* the following pattern:<ul>
* <li> for '<code>en</code>': <code>http://dbpedia.org/resource/{sensePage}</code>
* <li> for all other languages: <code>http://{lang}.dbpedia.org/resource/{sensePage}</code>
* </ul>
* @param lang the language. if <code>null</code> English is assumed
* @param sensePage the sense Page (local name of the entity)
* @return the {@link UriRef} for the URI of the Entity
*/
private UriRef createDbpediaResourceURI(String lang, String sensePage, Cross[] crosses){
//if the language is English directly create the Entity URI based on the
//sensePage string
if(lang == null || "en".equalsIgnoreCase(lang)){
return new UriRef(new StringBuilder(MLConstants.DBPEDIA_RESOURCE_PREFIX)
.append(sensePage).toString());
}
//for other languages try to create the English resource URI based on the
//crosses (NOTE: would be better if this would be a Map with the language
//as key)
if(crosses != null){
for(Cross cross : crosses){
if("en".equalsIgnoreCase(cross.getLang())){
return new UriRef(new StringBuilder(MLConstants.DBPEDIA_RESOURCE_PREFIX)
.append(cross.getPage()).toString());
}
}
}
//fall back to the language specific entity URI
return new UriRef(new StringBuilder("http://")
.append(lang.toLowerCase(Locale.ROOT)).append('.')
.append(DBPEDIA_RESOURCE_HOST_AND_PATH)
.append(sensePage).toString());
}
开发者ID:michelemostarda,项目名称:machinelinking-stanbol-enhancement-engine,代码行数:35,代码来源:MLAnnotateEnhancementEngine.java