本文整理汇总了Python中openspending.model.Dataset类的典型用法代码示例。如果您正苦于以下问题:Python Dataset类的具体用法?Python Dataset怎么用?Python Dataset使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Dataset类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_browser_for_entity
def test_browser_for_entity(self):
h.skip_if_stubbed_solr()
from openspending.model import Dataset, Entry
dataset = Dataset(name='testdataset')
Dataset.c.save(dataset, manipulate=True)
dataset_ref_dict = dataset.to_ref_dict()
entity = self._make_one(name="Test Entity", label="Test Entity Label")
entity_ref_dict = entity.to_ref_dict()
entry = {'name': 'Test Entry',
'label': 'Test Entry Label',
'from': entity_ref_dict,
'to': entity_ref_dict,
'amount': 10.0,
'dataset': dataset_ref_dict}
Entry.c.save(entry)
h.clean_and_reindex_solr()
entity_url = url(controller='entity', id=str(entity['_id']),
slug='test-entity-label', action='view')
response = self.app.get(entity_url)
h.assert_equal(response._status, '200 OK')
h.assert_true('<b>1 entries</b> found.<br />' in response)
h.assert_true('entries.json">' in response)
h.assert_true('entries.csv">' in response)
示例2: load_from_databank
def load_from_databank(sourcejson, dataproviderjson, dry_run=False, overwrite=True, meta_only=False, file_dir = None):
print "Working on ", sourcejson['fields']['indicator']
dataorg = DataOrg.by_name(dataproviderjson['fields']['title'])
dataorgMeta = {
'description': dataproviderjson['fields']['description'],
'label': dataproviderjson['fields']['title']
}
if not dataorg:
dataorg = DataOrg(dataorgMeta)
db.session.add(dataorg)
#dataorg will update with id here
db.session.commit()
#get or create dataset
dataset = Dataset.by_label(sourcejson['fields']['indicator'])
description = "http://databank.edip-maps.net/admin/etldata/dataconnection/" + str(sourcejson['pk']) + "/"
modelDataset = {'dataset':
{
'label': sourcejson['fields']['indicator'],
'name': sourcejson['fields']['indicator'],
'description': description,
'dataType': sourcejson['fields']['data_type'],
'dataorg_id': dataorg.id
}
}
if not dataset:
#create one
dataset = Dataset(modelDataset['dataset'])
#dataset.ORoperations = dataproviderjson['fields'].get('ORoperations', {})
#dataset.data = dataproviderjson['fields'].get('mapping',{})
db.session.add(dataset)
else:
#dataset.ORoperations = dataproviderjson['fields'].get('ORoperations', {})
#dataset.data = dataproviderjson['fields'].get('mapping',{})
dataset.update(modelDataset['dataset'])
db.session.commit()
systemaccount = Account.by_id(1)
if dataset.source:
try:
print "trying to delete source"
print dataset.source
dataset.source.delete()
except Exception, e:
print "could not delete source", e
示例3: create
def create(self):
"""
Adds a new dataset dynamically through a POST request
"""
# User must be authenticated so we should have a user object in
# c.account, if not abort with error message
if not c.account:
abort(status_code=400, detail='user not authenticated')
# Check if the params are there ('metadata', 'csv_file')
if len(request.params) != 2:
abort(status_code=400, detail='incorrect number of params')
metadata = request.params['metadata'] \
if 'metadata' in request.params \
else abort(status_code=400, detail='metadata is missing')
csv_file = request.params['csv_file'] \
if 'csv_file' in request.params \
else abort(status_code=400, detail='csv_file is missing')
# We proceed with the dataset
try:
model = json.load(urllib2.urlopen(metadata))
except:
abort(status_code=400, detail='JSON model could not be parsed')
try:
log.info("Validating model")
model = validate_model(model)
except Invalid as i:
log.error("Errors occured during model validation:")
for field, error in i.asdict().items():
log.error("%s: %s", field, error)
abort(status_code=400, detail='Model is not well formed')
dataset = Dataset.by_name(model['dataset']['name'])
if dataset is None:
dataset = Dataset(model)
require.dataset.create()
dataset.managers.append(c.account)
dataset.private = True # Default value
db.session.add(dataset)
else:
require.dataset.update(dataset)
log.info("Dataset: %s", dataset.name)
source = Source(dataset=dataset, creator=c.account, url=csv_file)
log.info(source)
for source_ in dataset.sources:
if source_.url == csv_file:
source = source_
break
db.session.add(source)
db.session.commit()
# Send loading of source into celery queue
load_source.delay(source.id)
return to_jsonp(dataset_apply_links(dataset.as_dict()))
示例4: test_views_update
def test_views_update(self):
cra = Dataset.by_name('cra')
views = cra.data['views']
views[0]['label'] = 'Banana'
response = self.app.post(url(controller='editor',
action='views_update', dataset='cra'),
params={'views': json.dumps(views)},
extra_environ={'REMOTE_USER': 'test'},
expect_errors=True)
assert '200' in response.status, response.status
cra = Dataset.by_name('cra')
assert 'Banana' in repr(cra.data['views'])
示例5: test_retract
def test_retract(self):
cra = Dataset.by_name('cra')
assert cra.private is False, cra.private
response = self.app.post(url(controller='editor',
action='retract', dataset='cra'),
extra_environ={'REMOTE_USER': 'test'})
cra = Dataset.by_name('cra')
assert cra.private is True, cra.private
response = self.app.post(url(controller='editor',
action='retract', dataset='cra'),
extra_environ={'REMOTE_USER': 'test'},
expect_errors=True)
assert '400' in response.status, response.status
示例6: test_publish
def test_publish(self):
cra = Dataset.by_name('cra')
cra.private = True
db.session.commit()
response = self.app.post(url(controller='editor',
action='publish', dataset='cra'),
extra_environ={'REMOTE_USER': 'test'})
cra = Dataset.by_name('cra')
assert cra.private is False, cra.private
response = self.app.post(url(controller='editor',
action='publish', dataset='cra'),
extra_environ={'REMOTE_USER': 'test'},
expect_errors=True)
assert '400' in response.status, response.status
示例7: TestCompoundDimension
class TestCompoundDimension(DatabaseTestCase):
def setup(self):
super(TestCompoundDimension, self).setup()
self.engine = db.engine
self.meta = db.metadata
self.meta.bind = self.engine
self.ds = Dataset(SIMPLE_MODEL)
self.entity = self.ds['to']
self.classifier = self.ds['function']
def test_is_compound(self):
h.assert_true(self.entity.is_compound)
def test_basic_properties(self):
assert self.entity.name=='to', self.entity.name
assert self.classifier.name=='function', self.classifier.name
def test_generated_tables(self):
#assert not hasattr(self.entity, 'table'), self.entity
#self.ds.generate()
assert hasattr(self.entity, 'table'), self.entity
assert self.entity.table.name=='test__' + self.entity.taxonomy, self.entity.table.name
assert hasattr(self.entity, 'alias')
assert self.entity.alias.name==self.entity.name, self.entity.alias.name
cols = self.entity.table.c
assert 'id' in cols
assert_raises(KeyError, cols.__getitem__, 'field')
def test_attributes_exist_on_object(self):
assert len(self.entity.attributes)==2, self.entity.attributes
assert_raises(KeyError, self.entity.__getitem__, 'field')
assert self.entity['name'].name=='name'
assert self.entity['name'].datatype=='id'
def test_attributes_exist_on_table(self):
assert hasattr(self.entity, 'table'), self.entity
assert 'name' in self.entity.table.c, self.entity.table.c
assert 'label' in self.entity.table.c, self.entity.table.c
def test_members(self):
self.ds.generate()
self.entity.load(self.ds.bind, {'name': 'one', 'label': 'Label One'})
self.entity.load(self.ds.bind, {'name': 'two', 'label': 'Label Two'})
members = list(self.entity.members())
h.assert_equal(len(members), 2)
members = list(self.entity.members(self.entity.alias.c.name == 'one'))
h.assert_equal(len(members), 1)
示例8: csvimport_fixture
def csvimport_fixture(name):
model_fp = csvimport_fixture_file(name, 'model.json')
mapping_fp = csvimport_fixture_file(name, 'mapping.json')
model = json.load(model_fp)
if mapping_fp:
model['mapping'] = json.load(mapping_fp)
dataset = Dataset(model)
dataset.generate()
db.session.add(dataset)
data_path = csvimport_fixture_path(name, 'data.csv')
user = h.make_account()
source = Source(dataset, user, data_path)
db.session.add(source)
db.session.commit()
return source
示例9: test_new_wrong_user
def test_new_wrong_user(self):
# First we add a Dataset with user 'test_new'
user = Account.by_name('test_new')
assert user.api_key == 'd0610659-627b-4403-8b7f-6e2820ebc95d'
u = url(controller='api/version2', action='create')
params = {
'metadata':
'https://dl.dropbox.com/u/3250791/sample-openspending-model.json',
'csv_file':
'http://mk.ucant.org/info/data/sample-openspending-dataset.csv'
}
apikey_header = 'apikey {0}'.format(user.api_key)
response = self.app.post(u, params, {'Authorization':apikey_header})
#Dataset.by_name('openspending-example').private = False
assert "200" in response.status
assert Dataset.by_name('openspending-example')
# After that we try to update the Dataset with user 'test_new2'
user = Account.by_name('test_new2')
assert user.api_key == 'c011c340-8dad-419c-8138-1c6ded86ead5'
u = url(controller='api/version2', action='create')
params = {
'metadata':
'https://dl.dropbox.com/u/3250791/sample-openspending-model.json',
'csv_file':
'http://mk.ucant.org/info/data/sample-openspending-dataset.csv'
}
apikey_header = 'apikey {0}'.format(user.api_key)
response = self.app.post(u, params, {'Authorization':apikey_header},
expect_errors=True)
assert '403' in response.status
示例10: remove_dataset
def remove_dataset(dataset_name):
log.warn("Dropping dataset '%s'", dataset_name)
from openspending.model import Dataset, meta as db
dataset = Dataset.by_name(dataset_name)
dataset.drop()
db.session.delete(dataset)
db.session.commit()
示例11: index
def index(self):
# Get all of the datasets available to the account of the logged in
# or an anonymous user (if c.account is None)
c.datasets = Dataset.all_by_account(c.account)
c.num_entries = dataset_entries(None)
return templating.render('home/index.html')
示例12: archive_one
def archive_one(dataset_name, archive_dir):
"""
Find the dataset, create the archive directory and start archiving
"""
# Find the dataset
dataset = Dataset.by_name(dataset_name)
# If no dataset found, exit with error message
if dataset is None:
exit_with_error("Dataset not found. Unable to archive it.")
# If the archive_dir exists we have to ask the user if we should overwrite
if os.path.exists(archive_dir):
# If user doesn't want to write over it we exit
if not get_confirmation("%s exists. Do you want to overwrite?"
% archive_dir):
sys.exit(0)
# If the archive dir is a file we don't do anything
if os.path.isfile(archive_dir):
exit_with_error("Cannot overwrite a file (need a directory).")
# If the archive_dir doesn't exist we create it
else:
try:
os.makedirs(archive_dir)
except OSError:
# If we couldn't create it, we exit with an error message
exit_with_error("Couldn't create archive directory.")
# Archive the model (dataset metadata)
archive_model(dataset, archive_dir)
# Archive the visualisations
archive_visualisations(dataset, archive_dir)
# Download all sources
update(os.path.join(archive_dir, 'sources'), dataset)
示例13: index
def index():
#page = request.args.get('page')
q = Dataset.get_all_admin().all()
returnset = []
for theobj in q:
returnset.append(theobj)
# if len(fields) < 1 and not getsources:
# return jsonify(q)
# returnset = []
# for obj in q:
# tempobj = {}
# if len(fields) >0:
# for field in fields:
# tempobj[field] = getattr(obj, field)
# else:
# tempobj = obj.as_dict()
# if getsources:
# tempobj['sources'] = obj.sources.all()
# returnset.append(tempobj)
# TODO: Facets for territories and languages
# TODO: filters on facet dimensions
#maybe put the pager back in
# print q
# pager = Pager(q)
return jsonify(returnset)
示例14: permissions
def permissions(self):
"""
Check a user's permissions for a given dataset. This could also be
done via request to the user, but since we're not really doing a
RESTful service we do this via the api instead.
"""
# Check the parameters. Since we only use one parameter we check it
# here instead of creating a specific parameter parser
if len(request.params) != 1 or 'dataset' not in request.params:
return to_jsonp({'error': 'Parameter dataset missing'})
# Get the dataset we want to check permissions for
dataset = Dataset.by_name(request.params['dataset'])
# Return permissions
return to_jsonp({
"create":\
can.dataset.create() and dataset is None,
"read":\
False if dataset is None else can.dataset.read(dataset),
"update":\
False if dataset is None else can.dataset.update(dataset),
"delete":\
False if dataset is None else can.dataset.delete(dataset)
})
示例15: test_delete
def test_delete(self):
cra = Dataset.by_name('cra')
assert len(cra) == 36, len(cra)
# double-check authz
response = self.app.post(url(controller='editor',
action='delete', dataset='cra'),
expect_errors=True)
assert '403' in response.status
cra = Dataset.by_name('cra')
assert len(cra) == 36, len(cra)
response = self.app.post(url(controller='editor',
action='delete', dataset='cra'),
extra_environ={'REMOTE_USER': 'test'})
cra = Dataset.by_name('cra')
assert cra is None, cra