当前位置: 首页>>代码示例>>Python>>正文


Python Post.published方法代码示例

本文整理汇总了Python中models.Post.published方法的典型用法代码示例。如果您正苦于以下问题:Python Post.published方法的具体用法?Python Post.published怎么用?Python Post.published使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在models.Post的用法示例。


在下文中一共展示了Post.published方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_context_data

# 需要导入模块: from models import Post [as 别名]
# 或者: from models.Post import published [as 别名]
    def get_context_data(self, *args, **kwargs):

        ctx = super(PostList, self).get_context_data(*args, **kwargs)

        page = self.request.GET.get('p', 1)

        if self.request.user.is_authenticated():
            objects = Post.all().order("-post_date")
        else:
            objects = Post.published().order("-post_date")


        pager = Paginator(objects, PAGE_SIZE)

        try:
            page_obj = pager.page(page)
        except InvalidPageException:
            raise Http404

        ctx.update({
            'paginator': pager,
            'page_obj': page_obj,
        })

        return ctx
开发者ID:sleepyjames,项目名称:sleepyjames-aeblog,代码行数:27,代码来源:views.py

示例2: _get_posts

# 需要导入模块: from models import Post [as 别名]
# 或者: from models.Post import published [as 别名]
def _get_posts(s, batch, county_id, n, p):
    url = 'http://api.arbetsformedlingen.se/af/v0/platsannonser/matchning?lanid=%s&antalrader=%s&sida=%s' % (county_id, n, p)
    response = s.get(url=url)

    try:
        obj = json.loads(response.text)
        for index, item in enumerate(obj["matchningslista"]["matchningdata"]):
            try:
                post = Post.query.filter_by(external_id=str(item["annonsid"])).first()
                if post:
                    app.logger.info('Post %s already exists, adding to new batch.' % post.external_id)
                else:
                    post = Post()
                    post.external_id = str(item["annonsid"])
                    post.published = parse(item["publiceraddatum"])
                    
                    # profession is mandatory
                    post.profession = Profession.query.filter_by(name=item["yrkesbenamning"]).one()

                    if "antalplatser" in item:
                        try:
                            post.num_jobs = int(item["antalplatser"])
                        except ValueError:
                            app.logger.warning('Could not parse number of jobs for %s' % post.external_id)

                    # municipality is optional
                    try:
                        post.municipality = Municipality.query.filter_by(id=int(item["kommunkod"])).one()
                    except (NoResultFound, KeyError):
                        app.logger.warning('No municipality match "%s", post annonsid=%s, saving with unspecified.' % (item["kommunkod"], post.external_id))
                        post.municipality = Municipality.query.filter_by(id="Ospecifierad arbetsort").one()

                    post.match_data = item
                    db_session.add(post)
                post.batches.append(batch)
                db_session.commit()
                db_session.flush()
            except KeyError as e:
                app.logger.error('Attribute "%s" missing from dict. Aborting.' % e)
                app.logger.debug(json.dumps(item, indent=4))
            except IntegrityError:
                app.logger.warning('Post already exists for %s' % item["annonsid"])
                db_session.rollback()
            except ValueError as e:
                app.logger.error(e)
                app.logger.debug(json.dumps(item, indent=4))
            except NoResultFound as e:
                app.logger.error('Could not find match for %s. Aborting.' % item["annonsid"])
                app.logger.debug(e)

        # fixme: this will abort parsing if server runs out of memory
        pages = obj["matchningslista"]["antal_sidor"]
        if p < pages:
            app.logger.debug('Retrieving page %s of %s' % (p + 1, pages))
            _get_posts(s, batch, county_id, n, p + 1)
    except ValueError:
        app.logger.error('Could not parse json response from API. Aborting.')
        app.logger.debug(response.text)
开发者ID:alexwall1,项目名称:ldstat,代码行数:60,代码来源:batch.py

示例3: get_object

# 需要导入模块: from models import Post [as 别名]
# 或者: from models.Post import published [as 别名]
    def get_object(self):
        slug = self.kwargs.get('slug', '')

        if self.request.user.is_authenticated():
            qs = Post.all()
        else:
            qs = Post.published()

        try:
            self.object = qs.filter('slug =', slug).fetch(1)[0]
        except IndexError:
            self.object = None
开发者ID:sleepyjames,项目名称:sleepyjames-aeblog,代码行数:14,代码来源:views.py

示例4: rss_worker

# 需要导入模块: from models import Post [as 别名]
# 或者: from models.Post import published [as 别名]
def rss_worker(f):
    """RSS gevent worker function"""
    logging.info("Starting reader process for feed %s", f.id)

    id = f.id
    error_count = f.errors

    # Check ETag, Modified: Attempt Conditional HTTP retrieval
    # to reduce excessive polling
    if f.etag:
        d = feedparser.parse(f.url, etag=f.etag)
    elif f.last_modified:
        d = feedparser.parse(f.url, modified=f.last_modified)
    else:
        d = feedparser.parse(f.url)

    # Check returned HTTP status code
    if 'status' in d and d.status < 400:
        # Site appears to be UP
        logging.info("Feed %s is UP, status %s", f.url, str(d.status))

        # Reset error counter on successful connect
        if error_count > 0:
            q = Feed.update(errors=0).where(Feed.id == id)
            q.execute()

        # Get RSS/ATOM version number
        logging.info("Feed version: %s", d.version)

        # Catch status 301 Moved Permanently, update feed address
        if d.status == 301:
            q = Feed.update(url=d.href).where(Feed.id == id)
            q.execute()

        # Conditional HTTP:
        # Check for Etag in result and write to DB
        if 'etag' in d:
            logging.info("Etag: %s", d.etag)
            q = Feed.update(etag=d.etag).where(Feed.id == id)
            q.execute()

        # Conditional HTTP
        # Check for Last-Modified in result and write to DB
        if 'modified' in d:
            logging.info("Modified %s", d.modified)
            q = Feed.update(last_modified=d.modified).where(Feed.id == id)
            q.execute()

        # Check for feed modification date, write to DB
        if 'published' in d:
            logging.info("Published: %s", d.published)

        if 'updated' in d:
            logging.info("Updated: %s", d.updated)

        # Check for 'not-modified' status code, skip updates if found
        if d.status == 304:
            logging.info("Feed %s -- no updates found, skipping", f.url)
            return

        # If post entries exist, process them
        for post in d.entries:

            post_content = ""
            post_title = post.get('title', 'No title')

            h = html.parser.HTMLParser()
            desc = post.get('description', '')
            desc = h.unescape(desc) # unescape HTML entities
            post_description = re.sub(r'<[^>]*?>', '', desc) # crudely strip HTML tags in description

            post_published = arrow.get(post.get('published_parsed')) or arrow.now()
            if 'content' in post:
                post_content = post.content[0].value
            post_link = post.get('link', '')

            # Get post checksum (title + description + link url)
            check_string = (post_title + post_description + post_link).encode('utf8')
            post_checksum = hashlib.sha224(check_string).hexdigest()

            # If post checksum not found in DB, add post
            if Post.select().where(Post.md5 == post_checksum).count() == 0:
                p = Post()
                p.title = post_title
                p.description = post_description
                p.published = post_published.datetime  # convert from Arrow to datetime for DB
                p.content = post_content
                p.link = post_link
                p.feed = id
                p.md5 = post_checksum
                p.save()

            # TODO: Filter text for dangerous content (e.g. XSRF?)
            # Feedparser already does this to some extent

            # TODO: Spawn websocket message with new posts for web client

    else:
        # Site appears to be down

#.........这里部分代码省略.........
开发者ID:KyubiSystems,项目名称:Wisewolf,代码行数:103,代码来源:server.py


注:本文中的models.Post.published方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。