本文整理汇总了Python中catalog.elastic_models.Declaration类的典型用法代码示例。如果您正苦于以下问题:Python Declaration类的具体用法?Python Declaration怎么用?Python Declaration使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Declaration类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sitemap
def sitemap(request):
# TODO: REFACTOR ME?
urls = [
reverse("wagtail_serve", args=[""]),
reverse("wagtail_serve", args=["about/"]),
reverse("wagtail_serve", args=["api/"]),
reverse("wagtail_serve", args=["news/"]),
reverse("regions_home"),
reverse("business_intelligence"),
]
for news in NewsPage.objects.live():
urls.append(news.url)
search = Declaration.search().params(search_type="count")
search.aggs.bucket(
'per_region', 'terms', field='general.post.region', size=0)
for r in search.execute().aggregations.per_region.buckets:
if r.key == "":
continue
urls.append(reverse("region", kwargs={"region_name": r.key}))
subsearch = Declaration.search()\
.filter(
Term(general__post__region=r.key) &
Not(Term(general__post__office='')))\
.params(search_type="count")
subsearch.aggs.bucket(
'per_office', 'terms', field='general.post.office', size=0)
for subr in subsearch.execute().aggregations.per_office.buckets:
urls.append(reverse(
"region_office",
kwargs={"region_name": r.key, "office_name": subr.key}))
search = Declaration.search().params(search_type="count")
search.aggs.bucket(
'per_office', 'terms', field='general.post.office', size=0)
for r in search.execute().aggregations.per_office.buckets:
if r.key == "":
continue
urls.append(reverse("office", kwargs={"office_name": r.key}))
search = Declaration.search().extra(fields=[], size=100000)
for r in search.execute():
urls.append(reverse("details", kwargs={"declaration_id": r._id}))
return render(request, "sitemap.jinja",
{"urls": urls}, content_type="application/xml")
示例2: handle
def handle(self, *args, **options):
try:
file_path = args[0]
except IndexError:
raise CommandError('First argument must be a source file')
with open(file_path, 'r', newline='', encoding='utf-8') as source:
reader = csv.DictReader(source, delimiter=";")
counter = 0
Declaration.init() # Apparently this is required to init mappings
for row in reader:
item = Declaration(**self.map_fields(row))
item.save()
counter += 1
self.stdout.write(
'Loaded {} items to persistence storage'.format(counter))
示例3: office
def office(request, office_name):
search = Declaration.search()\
.filter('term', general__post__office=office_name)
return {
'query': office_name,
'results': paginated_search(request, search)
}
示例4: details
def details(request, declaration_id):
try:
declaration = Declaration.get(id=int(declaration_id))
except (ValueError, NotFoundError):
raise Http404("Таких не знаємо!")
return {
"declaration": declaration
}
示例5: stats_processor
def stats_processor(request):
s = Declaration.search()
# res = s.params(search_type="count").aggs.metric(
# "distinct_names", "cardinality", field="full_name").execute()
return {
'total_declarations': s.count(),
'total_persons': s.count() # res.aggregations.distinct_names.value
}
示例6: regions_home
def regions_home(request):
search = Declaration.search().params(search_type="count")
search.aggs.bucket(
'per_region', 'terms', field='general.post.region', size=0)
res = search.execute()
return {
'facets': res.aggregations.per_region.buckets
}
示例7: sitemap
def sitemap(request):
# TODO: REFACTOR ME?
urls = [
reverse("home"),
reverse("about"),
reverse("regions_home"),
]
search = Declaration.search().params(search_type="count")
search.aggs.bucket(
'per_region', 'terms', field='general.post.region', size=0)
for r in search.execute().aggregations.per_region.buckets:
urls.append(reverse("region", kwargs={"region_name": r.key}))
subsearch = Declaration.search()\
.filter(
Term(general__post__region=r.key) &
Not(Term(general__post__office='')))\
.params(search_type="count")
subsearch.aggs.bucket(
'per_office', 'terms', field='general.post.office', size=0)
for subr in subsearch.execute().aggregations.per_office.buckets:
urls.append(reverse(
"region_office",
kwargs={"region_name": r.key, "office_name": subr.key}))
search = Declaration.search().params(search_type="count")
search.aggs.bucket(
'per_office', 'terms', field='general.post.office', size=0)
for r in search.execute().aggregations.per_office.buckets:
urls.append(reverse("office", kwargs={"office_name": r.key}))
search = Declaration.search().extra(fields=[], size=100000)
for r in search.execute():
urls.append(reverse("details", kwargs={"declaration_id": r._id}))
return render(request, "sitemap.jinja",
{"urls": urls}, content_type="application/xml")
示例8: search
def search(request):
query = request.GET.get("q", "")
search = Declaration.search()
if query:
search = search.query("match", _all=query)
else:
search = search.query('match_all')
return {
"query": query,
"results": paginated_search(request, search)
}
示例9: handle
def handle(self, *args, **options):
try:
file_path = args[0]
except IndexError:
raise CommandError('First argument must be a source file')
with open(file_path, 'r', newline='', encoding='utf-8') as source:
decls = json.load(source)
counter = 0
Declaration.init() # Apparently this is required to init mappings
for row in decls:
mapped = self.map_fields(row)
res = Declaration.search().filter(
Term(general__last_name=mapped[
'general']['last_name'].lower().split('-')) &
Term(general__name=mapped[
'general']['name'].lower().split('-')) &
Term(intro__declaration_year=mapped[
'intro']['declaration_year'])
)
if mapped['general']['patronymic']:
res = res.filter(Term(general__patronymic=mapped[
'general']['patronymic'].lower()))
res = res.execute()
if not res.hits:
item = Declaration(**mapped)
item.save()
counter += 1
self.stdout.write(
'Loaded {} items to persistence storage'.format(counter))
示例10: search
def search(request):
query = request.GET.get("q", "")
search = Declaration.search()
if query:
search = search.query(
"match", _all={"query": query, "operator": "and"})
if not search.count():
search = Declaration.search().query(
"match",
_all={
"query": query,
"operator": "or",
"minimum_should_match": "2"
}
)
else:
search = search.query('match_all')
return {
"query": query,
"results": paginated_search(request, search)
}
示例11: region
def region(request, region_name):
search = Declaration.search()\
.filter(
Term(general__post__region=region_name) &
Not(Term(general__post__office='')))\
.params(search_type="count")
search.aggs.bucket(
'per_office', 'terms', field='general.post.office', size=0)
res = search.execute()
return {
'facets': res.aggregations.per_office.buckets,
'region_name': region_name
}
示例12: handle
def handle(self, *args, **options):
try:
file_path = args[0]
id_prefix = args[1]
except IndexError:
raise CommandError(
'First argument must be a source file and second is a id prefix')
groups = defaultdict(list)
with open(file_path, 'r', newline='', encoding='utf-8') as source:
reader = csv.DictReader(source, delimiter=',')
counter = 0
for row in reader:
status_col = 'Status' if 'Status' in row else 'Статус'
if row[status_col] == '' or row[status_col] == 'Ок':
groups[row[self._group_column(row)]].append(row)
counter += 1
self.stdout.write('Read {} valid rows from the input file'.format(counter))
Declaration.init() # Apparently this is required to init mappings
declarations = map(self.merge_group, groups.values())
counter = 0
for declaration in declarations:
mapped = self.map_fields(declaration, id_prefix)
res = Declaration.search().filter(
Term(general__last_name=mapped[
'general']['last_name'].lower().split('-')) &
Term(general__name=mapped[
'general']['name'].lower().split('-')) &
Term(intro__declaration_year=mapped[
'intro']['declaration_year'])
)
if mapped['general']['patronymic']:
res = res.filter(Term(general__patronymic=mapped[
'general']['patronymic'].lower()))
res = res.execute()
if res.hits:
self.stdout.write(
"%s (%s) already exists" % (
mapped['general']['full_name'],
mapped['intro']['declaration_year']))
mapped['_id'] = res.hits[0]._id
item = Declaration(**mapped)
item.save()
counter += 1
self.stdout.write('Loaded {} items to persistence storage'.format(counter))
示例13: handle
def handle(self, *args, **options):
try:
file_path = args[0]
id_prefix = args[1]
except IndexError:
raise CommandError("First argument must be a source file and second is a id prefix")
groups = defaultdict(list)
with open(file_path, "r", newline="", encoding="utf-8") as source:
reader = csv.DictReader(source, delimiter=",")
counter = 0
for row in reader:
status_col = "Status" if "Status" in row else "Статус"
if row[status_col] == "" or row[status_col] == "Ок":
groups[row[self._group_column(row)]].append(row)
counter += 1
self.stdout.write("Read {} valid rows from the input file".format(counter))
Declaration.init() # Apparently this is required to init mappings
declarations = map(self.merge_group, groups.values())
counter = 0
for declaration in declarations:
mapped = self.map_fields(declaration, id_prefix)
res = Declaration.search().filter(
Terms(general__last_name=mapped["general"]["last_name"].lower().split("-"))
& Terms(general__name=mapped["general"]["name"].lower().split("-"))
& Term(intro__declaration_year=mapped["intro"]["declaration_year"])
)
if mapped["general"]["patronymic"]:
res = res.filter(Term(general__patronymic=mapped["general"]["patronymic"].lower()))
res = res.execute()
if res.hits:
self.stdout.write(
"%s (%s) already exists" % (mapped["general"]["full_name"], mapped["intro"]["declaration_year"])
)
mapped["_id"] = res.hits[0]._id
else:
self.stdout.write(
"%s (%s) created" % (mapped["general"]["full_name"], mapped["intro"]["declaration_year"])
)
item = Declaration(**mapped)
item.save()
counter += 1
self.stdout.write("Loaded {} items to persistence storage".format(counter))
示例14: assume
def assume(q, fuzziness):
search = Declaration.search()\
.suggest(
'name',
q,
completion={
'field': 'general.full_name_suggest',
'size': 10,
'fuzzy': {
'fuzziness': fuzziness,
'unicode_aware': 1
}
}
)
res = search.execute()
if res.success():
return [val['text'] for val in res.suggest['name'][0]['options']]
else:
[]
示例15: handle
def handle(self, *args, **options):
dump_to_file = len(args) > 0
all_decls = Declaration.search().query('match_all').scan()
table = self._generate_table(all_decls)
report = self._run_knitr(table, fragment_only=not dump_to_file)
if dump_to_file:
file_path = args[0]
with open(file_path, 'w', encoding='utf-8') as f:
f.write(report)
else:
root_page = Site.objects.get(is_default_site=True).root_page
try:
analytics_page = root_page.get_children().get(slug=settings.ANALYTICS_SLUG).specific
except Page.DoesNotExist:
page_instance = RawHTMLPage(owner=None, title=settings.ANALYTICS_TITLE, slug=settings.ANALYTICS_SLUG)
analytics_page = root_page.add_child(instance=page_instance)
analytics_page.body = '<div class="analytics-wrapper">' + report + '</div>'
revision = analytics_page.save_revision(user=None)
revision.publish()
self.stdout.write('Analytics page "{}" has been published.'.format(analytics_page.url))