本文整理汇总了Python中ckanext.dcat.processors.RDFParser类的典型用法代码示例。如果您正苦于以下问题:Python RDFParser类的具体用法?Python RDFParser怎么用?Python RDFParser使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了RDFParser类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test__datasets
def test__datasets(self):
p = RDFParser()
p.g = _default_graph()
eq_(len([d for d in p._datasets()]), 3)
示例2: test_distribution_format_format_normalized
def test_distribution_format_format_normalized(self):
g = Graph()
dataset1 = URIRef("http://example.org/datasets/1")
g.add((dataset1, RDF.type, DCAT.Dataset))
distribution1_1 = URIRef("http://example.org/datasets/1/ds/1")
g.add((distribution1_1, RDF.type, DCAT.Distribution))
g.add((distribution1_1, DCAT.mediaType, Literal('text/csv')))
g.add((distribution1_1, DCT['format'], Literal('Comma Separated Values')))
g.add((dataset1, DCAT.distribution, distribution1_1))
p = RDFParser(profiles=['euro_dcat_ap'])
p.g = g
datasets = [d for d in p.datasets()]
resource = datasets[0]['resources'][0]
if toolkit.check_ckan_version(min_version='2.3'):
eq_(resource['format'], u'CSV')
eq_(resource['mimetype'], u'text/csv')
else:
eq_(resource['format'], u'Comma Separated Values')
示例3: test_spatial_one_dct_spatial_instance_no_uri
def test_spatial_one_dct_spatial_instance_no_uri(self):
g = Graph()
dataset = URIRef('http://example.org/datasets/1')
g.add((dataset, RDF.type, DCAT.Dataset))
location_ref = BNode()
g.add((dataset, DCT.spatial, location_ref))
g.add((location_ref, RDF.type, DCT.Location))
g.add((location_ref,
LOCN.geometry,
Literal('{"type": "Point", "coordinates": [23, 45]}', datatype=GEOJSON_IMT)))
g.add((location_ref, SKOS.prefLabel, Literal('Newark')))
p = RDFParser(profiles=['euro_dcat_ap'])
p.g = g
datasets = [d for d in p.datasets()]
extras = self._extras(datasets[0])
assert_true('spatial_uri' not in extras)
eq_(extras['spatial_text'], 'Newark')
eq_(extras['spatial'], '{"type": "Point", "coordinates": [23, 45]}')
示例4: test_dataset_json_ld_1
def test_dataset_json_ld_1(self):
contents = self._get_file_contents('catalog_pod.jsonld')
p = RDFParser(profiles=['euro_dcat_ap'])
p.parse(contents, _format='json-ld')
datasets = [d for d in p.datasets()]
eq_(len(datasets), 1)
dataset = datasets[0]
extras = dict((e['key'], e['value']) for e in dataset['extras'])
eq_(dataset['title'], 'U.S. Widget Manufacturing Statistics')
eq_(extras['contact_name'], 'Jane Doe')
eq_(extras['contact_email'], 'mailto:[email protected]')
eq_(extras['publisher_name'], 'Widget Services')
eq_(extras['publisher_email'], '[email protected]')
eq_(len(dataset['resources']), 4)
resource = [r for r in dataset['resources'] if r['name'] == 'widgets.csv'][0]
eq_(resource['name'], u'widgets.csv')
eq_(resource['url'], u'https://data.agency.gov/datasets/widgets-statistics/widgets.csv')
eq_(resource['download_url'], u'https://data.agency.gov/datasets/widgets-statistics/widgets.csv')
示例5: test_datasets_none_found
def test_datasets_none_found(self):
p = RDFParser()
p.g = Graph()
eq_(len([d for d in p.datasets()]), 0)
示例6: test_spatial_both_geojson_and_wkt
def test_spatial_both_geojson_and_wkt(self):
g = Graph()
dataset = URIRef('http://example.org/datasets/1')
g.add((dataset, RDF.type, DCAT.Dataset))
spatial_uri = URIRef('http://geonames/Newark')
g.add((dataset, DCT.spatial, spatial_uri))
g.add((spatial_uri, RDF.type, DCT.Location))
g.add((spatial_uri,
LOCN.geometry,
Literal('{"type": "Point", "coordinates": [23, 45]}', datatype=GEOJSON_IMT)))
g.add((spatial_uri,
LOCN.geometry,
Literal('POINT (67 89)', datatype=GSP.wktLiteral)))
p = RDFParser(profiles=['euro_dcat_ap'])
p.g = g
datasets = [d for d in p.datasets()]
extras = self._extras(datasets[0])
eq_(extras['spatial'], '{"type": "Point", "coordinates": [23, 45]}')
示例7: test_distribution_format_IMT_field
def test_distribution_format_IMT_field(self):
g = Graph()
dataset1 = URIRef("http://example.org/datasets/1")
g.add((dataset1, RDF.type, DCAT.Dataset))
distribution1_1 = URIRef("http://example.org/datasets/1/ds/1")
imt = BNode()
g.add((imt, RDF.type, DCT.IMT))
g.add((imt, RDF.value, Literal('text/turtle')))
g.add((imt, RDFS.label, Literal('Turtle')))
g.add((distribution1_1, RDF.type, DCAT.Distribution))
g.add((distribution1_1, DCT['format'], imt))
g.add((dataset1, DCAT.distribution, distribution1_1))
p = RDFParser(profiles=['euro_dcat_ap'])
p.g = g
datasets = [d for d in p.datasets()]
resource = datasets[0]['resources'][0]
eq_(resource['format'], u'Turtle')
eq_(resource['mimetype'], u'text/turtle')
示例8: test_spatial_wrong_geometries
def test_spatial_wrong_geometries(self):
g = Graph()
dataset = URIRef('http://example.org/datasets/1')
g.add((dataset, RDF.type, DCAT.Dataset))
spatial_uri = URIRef('http://geonames/Newark')
g.add((dataset, DCT.spatial, spatial_uri))
g.add((spatial_uri, RDF.type, DCT.Location))
g.add((spatial_uri,
LOCN.geometry,
Literal('Not GeoJSON', datatype=GEOJSON_IMT)))
g.add((spatial_uri,
LOCN.geometry,
Literal('Not WKT', datatype=GSP.wktLiteral)))
p = RDFParser(profiles=['euro_dcat_ap'])
p.g = g
datasets = [d for d in p.datasets()]
extras = self._extras(datasets[0])
assert_true('spatial' not in extras)
示例9: test_dataset_ttl
def test_dataset_ttl(self):
dataset = factories.Dataset(
notes='Test dataset'
)
url = url_for('dcat_dataset', _id=dataset['id'], _format='ttl')
app = self._get_test_app()
response = app.get(url)
eq_(response.headers['Content-Type'], 'text/turtle')
content = response.body
# Parse the contents to check it's an actual serialization
p = RDFParser()
p.parse(content, _format='turtle')
dcat_datasets = [d for d in p.datasets()]
eq_(len(dcat_datasets), 1)
dcat_dataset = dcat_datasets[0]
eq_(dcat_dataset['title'], dataset['title'])
eq_(dcat_dataset['notes'], dataset['notes'])
示例10: test_catalog_modified_date
def test_catalog_modified_date(self):
dataset1 = factories.Dataset(title='First dataset')
time.sleep(1)
dataset2 = factories.Dataset(title='Second dataset')
url = url_for('dcat_catalog',
_format='ttl',
modified_since=dataset2['metadata_modified'])
app = self._get_test_app()
response = app.get(url)
content = response.body
p = RDFParser()
p.parse(content, _format='turtle')
dcat_datasets = [d for d in p.datasets()]
eq_(len(dcat_datasets), 1)
eq_(dcat_datasets[0]['title'], dataset2['title'])
示例11: test_spatial_multiple_dct_spatial_instances
def test_spatial_multiple_dct_spatial_instances(self):
g = Graph()
dataset = URIRef("http://example.org/datasets/1")
g.add((dataset, RDF.type, DCAT.Dataset))
spatial_uri = URIRef("http://geonames/Newark")
g.add((dataset, DCT.spatial, spatial_uri))
location_ref = BNode()
g.add((location_ref, RDF.type, DCT.Location))
g.add((dataset, DCT.spatial, location_ref))
g.add(
(location_ref, LOCN.geometry, Literal('{"type": "Point", "coordinates": [23, 45]}', datatype=GEOJSON_IMT))
)
location_ref = BNode()
g.add((location_ref, RDF.type, DCT.Location))
g.add((dataset, DCT.spatial, location_ref))
g.add((location_ref, SKOS.prefLabel, Literal("Newark")))
p = RDFParser(profiles=["euro_dcat_ap"])
p.g = g
datasets = [d for d in p.datasets()]
extras = self._extras(datasets[0])
eq_(extras["spatial_uri"], "http://geonames/Newark")
eq_(extras["spatial_text"], "Newark")
eq_(extras["spatial"], '{"type": "Point", "coordinates": [23, 45]}')
示例12: test_dataset_ttl
def test_dataset_ttl(self):
dataset = factories.Dataset(notes="Test dataset")
url = url_for("dcat_dataset", _id=dataset["id"], _format="ttl")
app = self._get_test_app()
response = app.get(url)
eq_(response.headers["Content-Type"], "text/turtle")
content = response.body
# Parse the contents to check it's an actual serialization
p = RDFParser()
p.parse(content, _format="turtle")
dcat_datasets = [d for d in p.datasets()]
eq_(len(dcat_datasets), 1)
dcat_dataset = dcat_datasets[0]
eq_(dcat_dataset["title"], dataset["title"])
eq_(dcat_dataset["notes"], dataset["notes"])
示例13: test_parse_subcatalog
def test_parse_subcatalog(self):
publisher = {'name': 'Publisher',
'email': '[email protected]',
'type': 'Publisher',
'uri': 'http://pub.lish.er'}
dataset = {
'id': '4b6fe9ca-dc77-4cec-92a4-55c6624a5bd6',
'name': 'test-dataset',
'title': 'test dataset',
'extras': [
{'key': 'source_catalog_title', 'value': 'Subcatalog example'},
{'key': 'source_catalog_homepage', 'value': 'http://subcatalog.example'},
{'key': 'source_catalog_description', 'value': 'Subcatalog example description'},
{'key': 'source_catalog_language', 'value': 'http://publications.europa.eu/resource/authority/language/ITA'},
{'key': 'source_catalog_modified', 'value': '2000-01-01'},
{'key': 'source_catalog_publisher', 'value': json.dumps(publisher)}
]
}
catalog_dict = {
'title': 'My Catalog',
'description': 'An Open Data Catalog',
'homepage': 'http://example.com',
'language': 'de',
}
s = RDFSerializer()
s.serialize_catalog(catalog_dict, dataset_dicts=[dataset])
g = s.g
p = RDFParser(profiles=['euro_dcat_ap'])
p.g = g
# at least one subcatalog with hasPart
subcatalogs = list(p.g.objects(None, DCT.hasPart))
assert_true(subcatalogs)
# at least one dataset in subcatalogs
subdatasets = []
for subcatalog in subcatalogs:
datasets = p.g.objects(subcatalog, DCAT.dataset)
for dataset in datasets:
subdatasets.append((dataset,subcatalog,))
assert_true(subdatasets)
datasets = dict([(d['title'], d) for d in p.datasets()])
for subdataset, subcatalog in subdatasets:
title = unicode(list(p.g.objects(subdataset, DCT.title))[0])
dataset = datasets[title]
has_subcat = False
for ex in dataset['extras']:
exval = ex['value']
exkey = ex['key']
if exkey == 'source_catalog_homepage':
has_subcat = True
eq_(exval, unicode(subcatalog))
# check if we had subcatalog in extras
assert_true(has_subcat)
示例14: test_profiles_are_called_on_datasets
def test_profiles_are_called_on_datasets(self):
p = RDFParser()
p._profiles = [MockRDFProfile1, MockRDFProfile2]
p.g = _default_graph()
for dataset in p.datasets():
assert dataset['profile_1']
assert dataset['profile_2']
示例15: test_tags_with_commas
def test_tags_with_commas(self):
g = Graph()
dataset = URIRef('http://example.org/datasets/1')
g.add((dataset, RDF.type, DCAT.Dataset))
g.add((dataset, DCAT.keyword, Literal('Tree, forest, shrub')))
p = RDFParser(profiles=['euro_dcat_ap'])
p.g = g
datasets = [d for d in p.datasets()]
eq_(len(datasets[0]['tags']), 3)