本文整理汇总了Python中models.Category.objects方法的典型用法代码示例。如果您正苦于以下问题:Python Category.objects方法的具体用法?Python Category.objects怎么用?Python Category.objects使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类models.Category
的用法示例。
在下文中一共展示了Category.objects方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: all_list
# 需要导入模块: from models import Category [as 别名]
# 或者: from models.Category import objects [as 别名]
def all_list(request):
""".. :py:method::
webanan的主页views方法
"""
domains_list = []
domain_list = Domain.objects().all()
for do in domain_list:
domains_list.append(do.name)
title = u'各主机文章数量'
per_dict = {}
domains = Domain.objects().all()
article_all = Article.objects().count()
for domain in domains:
categories = Category.objects(belong_Domain=domain).all()
domain_count = 0
for cate in categories:
article_num = Article.objects(belong_cate=cate).count()
domain_count = domain_count + article_num
per_dict[domain.name] = float(u"%.2f" % (float(domain_count)/article_all))
per_dict = OrderedDict(sorted(per_dict.items(), key=lambda t: t[1]))
return render_to_response('webanan_list.html',{
'title': title,
'per_dict':per_dict,
'domains':domains_list,
})
示例2: sec_list
# 需要导入模块: from models import Category [as 别名]
# 或者: from models.Category import objects [as 别名]
def sec_list(request,domain=None):
""".. :py:method::
webanan的各下属分类的views方法
"""
domains_list = []
domain_list = Domain.objects().all()
for do in domain_list:
domains_list.append(do.name)
title = u'{}分类文章数量'.format(domain)
per_dict = {}
domain_obj = Domain.objects(name=domain).first()
if domain_obj:
categories = Category.objects(belong_Domain=domain_obj).all()
article_all = 0
for cate in categories:
article_all = article_all + Article.objects(belong_cate=cate).count()
for cate in categories:
article_num = Article.objects(belong_cate=cate).count()
per_dict[cate.name] = float(u"%.2f" % (float(article_num)/article_all))
per_dict = OrderedDict(sorted(per_dict.items(), key=lambda t: t[1]))
else:
raise Http404
return render_to_response('webanan_list.html',{
'title': title,
'per_dict':per_dict,
'domains':domains_list,
})
示例3: category
# 需要导入模块: from models import Category [as 别名]
# 或者: from models.Category import objects [as 别名]
def category(slug):
""" Category page controller """
category = Category.objects().filter_by(slug=slug).first()
if not post:
abort(404)
paginator = paginate(request, Post.objects().filter_by(is_published=True,
category_id=category.id))
return {'category': category, 'paginator': paginator}
示例4: post
# 需要导入模块: from models import Category [as 别名]
# 或者: from models.Category import objects [as 别名]
def post(self):
''''''
content = self.get_argument('content')
category = Category.objects(name=self.get_argument('category')).first()
print category
try:
TodoItem(content=content, category=category).save()
except Exception, e:
return self.write(json.dumps({'stat':'error'}))
示例5: crawl_category
# 需要导入模块: from models import Category [as 别名]
# 或者: from models.Category import objects [as 别名]
def crawl_category(self, ctx='', **kwargs):
res = requests.get(HOST)
res.raise_for_status()
tree = lxml.html.fromstring(res.content)
dept_nodes = tree.cssselect('div#top-navigation ul.navigation li.menu-item a')
for dept_node in dept_nodes:
key = dept_node.text.strip()
if 'brand' in key.lower():
continue
combine_url = dept_node.get('href')
match = re.search(r'https?://.+', combine_url)
if not match:
combine_url = '%s%s' % (HOST, combine_url)
r = requests.get(combine_url)
r.raise_for_status()
t = lxml.html.fromstring(r.content)
pagesize_node = None
link_nodes = t.cssselect('div.atg_store_filter ul.atg_store_pager li')
for link_node in link_nodes:
if link_node.get('class') and 'nextLink' in link_node.get('class'):
break
pagesize_node = link_node
pagesize = int(pagesize_node.cssselect('a')[0].text.strip()) if pagesize_node else 1
is_new = False; is_updated = False
category = Category.objects(key=key).first()
if not category:
is_new = True
category = Category(key=key)
category.is_leaf = True
if combine_url and combine_url != category.combine_url:
category.combine_url = combine_url
is_updated = True
if pagesize and pagesize != category.pagesize:
category.pagesize = pagesize
is_updated = True
category.hit_time = datetime.utcnow()
category.save()
print category.key; print category.cats; print category.pagesize; print category.combine_url; print is_new; print is_updated; print;
common_saved.send(sender=ctx, obj_type='Category', key=category.key, url=category.combine_url, \
is_new=is_new, is_updated=((not is_new) and is_updated) )
示例6: crawl_listing
# 需要导入模块: from models import Category [as 别名]
# 或者: from models.Category import objects [as 别名]
def crawl_listing(self, url, ctx='', **kwargs):
res = requests.get(url)
res.raise_for_status()
tree = lxml.html.fromstring(res.content)
category = Category.objects(key=kwargs.get('key')).first()
if not category:
common_failed.send(sender=ctx, url=url, reason='category %s not found in db' % kwargs.get('key'))
return
product_nodes = tree.cssselect('div#searchResults a')
for product_node in product_nodes:
price = None; listprice = None
price = product_node.cssselect('.price-6pm')[0].text
listprice_node = product_node.cssselect('.discount')
listprice = ''.join(listprice_node[0].xpath('text()')) if listprice_node else None
# eliminate products of no discountIndexError:
if price is None or listprice is None:
# common_failed.send(sender=ctx, url=url, \
# reason='listing product %s.%s cannot crawl price info -> %s / %s' % (key, title, price, listprice))
continue
key = product_node.get('data-product-id')
if not key:
common_failed.send(sender=ctx, url=url, reason='listing product has no key')
continue
combine_url = product_node.get('href')
key = '%s_%s' % (key, combine_url.split('/')[-1])
match = re.search(r'https?://.+', combine_url)
if not match:
combine_url = '%s%s' % (HOST, combine_url)
brand = product_node.cssselect('.brandName')[0].text.strip()
title = product_node.cssselect('.productName')[0].text.strip()
is_new = False; is_updated = False
product = Product.objects(key=key).first()
if not product:
is_new = True
product = Product(key=key)
product.updated = False
product.event_type = False
if title and title != product.title:
product.title = title
is_updated = True
if brand and brand != product.brand:
product.brand = brand
is_updated = True
if combine_url and combine_url != product.combine_url:
product.combine_url = combine_url
is_updated = True
if price and price != product.price:
product.price = price
is_updated = True
if listprice and listprice != product.listprice:
product.listprice = listprice
is_updated = True
if category.cats and set(category.cats).difference(product.dept):
product.dept = list(set(category.cats) | set(product.dept or []))
is_updated = True
if category.key not in product.category_key:
product.category_key.append(category.key)
is_updated = True
if is_updated:
product.list_update_time = datetime.utcnow()
# To pick the product which fit our needs, such as a certain discount, brand, dept etc.
selected = Picker(site='6pm').pick(product)
if not selected:
continue
product.hit_time = datetime.utcnow()
product.save()
common_saved.send(sender=ctx, obj_type='Product', key=product.key, url=product.combine_url, \
is_new=is_new, is_updated=((not is_new) and is_updated) )
print product.key; print product.brand; print product.title; \
print product.price, ' / ', product.listprice; print product.combine_url; \
print product.dept; print
# Go to the next page to keep on crawling.
next_page = None
page_node = tree.cssselect('div.pagination')
if not page_node:
return
last_node =page_node[0].cssselect('.last')
if last_node:
#.........这里部分代码省略.........
示例7: Server
# 需要导入模块: from models import Category [as 别名]
# 或者: from models.Category import objects [as 别名]
print product.updated
print
if __name__ == '__main__':
# import zerorpc
# from settings import CRAWLER_PORT
# server = zerorpc.Server(Server())
# server.bind('tcp://0.0.0.0:{0}'.format(CRAWLER_PORT))
# server.run()
s = Server()
# s.crawl_category()
counter = 0
categories = Category.objects()
for category in categories:
counter += 1
print '~~~~~~~~~~', counter
print category.cats
print category.combine_url; print
s.crawl_listing(category.combine_url, **{'key': category.key})
# for product in Product.objects(updated=False):
# print product.combine_url
# try:
# s.crawl_product(product.combine_url, **{'key': product.key})
# except requests.exceptions.HTTPError:
# continue
# except:
# print traceback.format_exc()
示例8: wrapper
# 需要导入模块: from models import Category [as 别名]
# 或者: from models.Category import objects [as 别名]
def wrapper(*args, **kwargs):
output = func(*args, **kwargs)
if not isinstance(output, dict):
return output
output.update({'categories': Category.objects().all()})
return output