本文整理汇总了Python中openspending.model.dataset.Dataset.all_by_account方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.all_by_account方法的具体用法?Python Dataset.all_by_account怎么用?Python Dataset.all_by_account使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类openspending.model.dataset.Dataset
的用法示例。
在下文中一共展示了Dataset.all_by_account方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: index
# 需要导入模块: from openspending.model.dataset import Dataset [as 别名]
# 或者: from openspending.model.dataset.Dataset import all_by_account [as 别名]
def index(self):
# Get all of the datasets available to the account of the logged in
# or an anonymous user (if c.account is None)
c.datasets = Dataset.all_by_account(c.account)
c.territories = DatasetTerritory.dataset_counts(c.datasets)
c.num_entries = dataset_entries(None)
return templating.render('home/index.html')
示例2: dataset_index
# 需要导入模块: from openspending.model.dataset import Dataset [as 别名]
# 或者: from openspending.model.dataset.Dataset import all_by_account [as 别名]
def dataset_index(account, source=None):
# Get all of the public datasets ordered by when they were last updated
results = Dataset.all_by_account(account, order=False)
results = results.order_by(Dataset.updated_at.desc())
# Filter category if that has been provided
if source:
results = results.filter(Dataset.source == source)
return list(results)
示例3: search
# 需要导入模块: from openspending.model.dataset import Dataset [as 别名]
# 或者: from openspending.model.dataset.Dataset import all_by_account [as 别名]
def search(self):
parser = SearchParamParser(request.params)
params, errors = parser.parse()
if errors:
response.status = 400
return to_jsonp({'errors': errors})
expand_facets = params.pop('expand_facet_dimensions')
format = params.pop('format')
if format == 'csv':
params['stats'] = False
params['facet_field'] = None
datasets = params.pop('dataset', None)
if datasets is None or not datasets:
q = Dataset.all_by_account(c.account)
if params.get('category'):
q = q.filter_by(category=params.pop('category'))
datasets = q.all()
expand_facets = False
if not datasets:
return {'errors': ["No dataset available."]}
params['filter']['dataset'] = []
for dataset in datasets:
require.dataset.read(dataset)
params['filter']['dataset'].append(dataset.name)
response.last_modified = max([d.updated_at for d in datasets])
etag_cache_keygen(parser.key(), response.last_modified)
if params['pagesize'] > parser.defaults['pagesize']:
# http://wiki.nginx.org/X-accel#X-Accel-Buffering
response.headers['X-Accel-Buffering'] = 'no'
if format == 'csv':
csv_headers(response, 'entries.csv')
streamer = CSVStreamingResponse(
datasets,
params,
pagesize=parser.defaults['pagesize']
)
return streamer.response()
else:
json_headers(filename='entries.json')
streamer = JSONStreamingResponse(
datasets,
params,
pagesize=parser.defaults['pagesize'],
expand_facets=util.expand_facets
if expand_facets else None,
callback=request.params.get('callback')
)
return streamer.response()
solr_browser = Browser(**params)
try:
solr_browser.execute()
except SolrException as e:
return {'errors': [unicode(e)]}
entries = []
for dataset, entry in solr_browser.get_entries():
entry = entry_apply_links(dataset.name, entry)
entry['dataset'] = dataset_apply_links(dataset.as_dict())
entries.append(entry)
if format == 'csv':
return write_csv(entries, response,
filename='entries.csv')
if expand_facets and len(datasets) == 1:
facets = solr_browser.get_expanded_facets(datasets[0])
else:
facets = solr_browser.get_facets()
return to_jsonp({
'stats': solr_browser.get_stats(),
'facets': facets,
'results': entries
})