本文整理汇总了Python中MoinMoin.Page.Page.getParentPage方法的典型用法代码示例。如果您正苦于以下问题:Python Page.getParentPage方法的具体用法?Python Page.getParentPage怎么用?Python Page.getParentPage使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MoinMoin.Page.Page
的用法示例。
在下文中一共展示了Page.getParentPage方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: send_title
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getParentPage [as 别名]
def send_title(request, text, **keywords):
"""
Output the page header (and title).
TODO: check all code that call us and add page keyword for the
current page being rendered.
@param request: the request object
@param text: the title text
@keyword link: URL for the title
@keyword msg: additional message (after saving)
@keyword pagename: 'PageName'
@keyword page: the page instance that called us.
@keyword print_mode: 1 (or 0)
@keyword media: css media type, defaults to 'screen'
@keyword allow_doubleclick: 1 (or 0)
@keyword html_head: additional <head> code
@keyword body_attr: additional <body> attributes
@keyword body_onload: additional "onload" JavaScript code
"""
from MoinMoin.Page import Page
_ = request.getText
if keywords.has_key('page'):
page = keywords['page']
pagename = page.page_name
else:
pagename = keywords.get('pagename', '')
page = Page(request, pagename)
scriptname = request.getScriptname()
pagename_quoted = quoteWikinameURL(pagename)
# get name of system pages
page_front_page = getFrontPage(request).page_name
page_help_contents = getSysPage(request, 'HelpContents').page_name
page_title_index = getSysPage(request, 'TitleIndex').page_name
page_word_index = getSysPage(request, 'WordIndex').page_name
page_user_prefs = getSysPage(request, 'UserPreferences').page_name
page_help_formatting = getSysPage(request, 'HelpOnFormatting').page_name
page_find_page = getSysPage(request, 'FindPage').page_name
page_home_page = getattr(getHomePage(request), 'page_name', None)
page_parent_page = getattr(page.getParentPage(), 'page_name', None)
# Prepare the HTML <head> element
user_head = [request.cfg.html_head]
# include charset information - needed for moin_dump or any other case
# when reading the html without a web server
user_head.append('''<meta http-equiv="Content-Type" content="text/html;charset=%s">\n''' % config.charset)
meta_keywords = request.getPragma('keywords')
meta_desc = request.getPragma('description')
if meta_keywords:
user_head.append('<meta name="keywords" content="%s">\n' % escape(meta_keywords, 1))
if meta_desc:
user_head.append('<meta name="description" content="%s">\n' % escape(meta_desc, 1))
# search engine precautions / optimization:
# if it is an action or edit/search, send query headers (noindex,nofollow):
if request.query_string:
user_head.append(request.cfg.html_head_queries)
elif request.request_method == 'POST':
user_head.append(request.cfg.html_head_posts)
# if it is a special page, index it and follow the links - we do it
# for the original, English pages as well as for (the possibly
# modified) frontpage:
elif pagename in [page_front_page, request.cfg.page_front_page,
page_title_index, ]:
user_head.append(request.cfg.html_head_index)
# if it is a normal page, index it, but do not follow the links, because
# there are a lot of illegal links (like actions) or duplicates:
else:
user_head.append(request.cfg.html_head_normal)
if keywords.has_key('pi_refresh') and keywords['pi_refresh']:
user_head.append('<meta http-equiv="refresh" content="%(delay)d;URL=%(url)s">' % keywords['pi_refresh'])
# output buffering increases latency but increases throughput as well
output = []
# later: <html xmlns=\"http://www.w3.org/1999/xhtml\">
output.append("""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
%s
%s
%s
""" % (
''.join(user_head),
keywords.get('html_head', ''),
request.theme.html_head({
'title': escape(text),
'sitename': escape(request.cfg.html_pagetitle or request.cfg.sitename),
'print_mode': keywords.get('print_mode', False),
'media': keywords.get('media', 'screen'),
})
))
# Links
output.append('<link rel="Start" href="%s/%s">\n' % (scriptname, quoteWikinameURL(page_front_page)))
#.........这里部分代码省略.........
示例2: send_title
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getParentPage [as 别名]
def send_title(self, text, **keywords):
"""
Output the page header (and title).
@param text: the title text
@keyword page: the page instance that called us - using this is more efficient than using pagename..
@keyword pagename: 'PageName'
@keyword print_mode: 1 (or 0)
@keyword editor_mode: 1 (or 0)
@keyword media: css media type, defaults to 'screen'
@keyword allow_doubleclick: 1 (or 0)
@keyword html_head: additional <head> code
@keyword body_attr: additional <body> attributes
@keyword body_onload: additional "onload" JavaScript code
"""
request = self.request
_ = request.getText
rev = request.rev
if keywords.has_key('page'):
page = keywords['page']
pagename = page.page_name
else:
pagename = keywords.get('pagename', '')
page = Page(request, pagename)
if keywords.get('msg', ''):
raise DeprecationWarning("Using send_page(msg=) is deprecated! Use theme.add_msg() instead!")
scriptname = request.script_root
# get name of system pages
page_front_page = wikiutil.getFrontPage(request).page_name
page_help_contents = wikiutil.getLocalizedPage(request, 'HelpContents').page_name
page_title_index = wikiutil.getLocalizedPage(request, 'TitleIndex').page_name
page_site_navigation = wikiutil.getLocalizedPage(request, 'SiteNavigation').page_name
page_word_index = wikiutil.getLocalizedPage(request, 'WordIndex').page_name
page_help_formatting = wikiutil.getLocalizedPage(request, 'HelpOnFormatting').page_name
page_find_page = wikiutil.getLocalizedPage(request, 'FindPage').page_name
home_page = wikiutil.getInterwikiHomePage(request) # sorry theme API change!!! Either None or tuple (wikiname,pagename) now.
page_parent_page = getattr(page.getParentPage(), 'page_name', None)
# Prepare the HTML <head> element
user_head = [request.cfg.html_head]
# include charset information - needed for moin_dump or any other case
# when reading the html without a web server
user_head.append('''<meta http-equiv="Content-Type" content="%s;charset=%s">\n''' % (page.output_mimetype, page.output_charset))
meta_keywords = request.getPragma('keywords')
meta_desc = request.getPragma('description')
if meta_keywords:
user_head.append('<meta name="keywords" content="%s">\n' % wikiutil.escape(meta_keywords, 1))
if meta_desc:
user_head.append('<meta name="description" content="%s">\n' % wikiutil.escape(meta_desc, 1))
# search engine precautions / optimization:
# if it is an action or edit/search, send query headers (noindex,nofollow):
if request.query_string:
user_head.append(request.cfg.html_head_queries)
elif request.method == 'POST':
user_head.append(request.cfg.html_head_posts)
# we don't want to have BadContent stuff indexed:
elif pagename in ['BadContent', 'LocalBadContent', ]:
user_head.append(request.cfg.html_head_posts)
# if it is a special page, index it and follow the links - we do it
# for the original, English pages as well as for (the possibly
# modified) frontpage:
elif pagename in [page_front_page, request.cfg.page_front_page,
page_title_index, 'TitleIndex',
page_find_page, 'FindPage',
page_site_navigation, 'SiteNavigation',
'RecentChanges', ]:
user_head.append(request.cfg.html_head_index)
# if it is a normal page, index it, but do not follow the links, because
# there are a lot of illegal links (like actions) or duplicates:
else:
user_head.append(request.cfg.html_head_normal)
if 'pi_refresh' in keywords and keywords['pi_refresh']:
user_head.append('<meta http-equiv="refresh" content="%d;URL=%s">' % keywords['pi_refresh'])
# output buffering increases latency but increases throughput as well
output = []
# later: <html xmlns=\"http://www.w3.org/1999/xhtml\">
output.append("""\
<!doctype html>
<!--[if lt IE 7]> <html class="no-js ie6 oldie" lang="en"> <![endif]-->
<!--[if IE 7]> <html class="no-js ie7 oldie" lang="en"> <![endif]-->
<!--[if IE 8]> <html class="no-js ie8 oldie" lang="en"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en"> <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>%(title)s</title>
<meta name="description" content="">
<meta name="author" content="">
<meta name="viewport" content="width=device-width,initial-scale=1">
<link rel="stylesheet" href="%(static_url)scss/style.css">
<script src="%(static_url)sjs/libs/moin.common.js"></script>
<script type="text/javascript">
<!--
#.........这里部分代码省略.........
示例3: send_title
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getParentPage [as 别名]
def send_title(self, text, **keywords):
""" Override
Output the page header (and title).
@param text: the title text
@keyword page: the page instance that called us - using this is more efficient than using pagename..
@keyword pagename: 'PageName'
@keyword print_mode: 1 (or 0)
@keyword editor_mode: 1 (or 0)
@keyword media: css media type, defaults to 'screen'
@keyword allow_doubleclick: 1 (or 0)
@keyword html_head: additional <head> code
@keyword body_attr: additional <body> attributes
@keyword body_onload: additional "onload" JavaScript code
"""
request = self.request
_ = request.getText
rev = request.rev
if keywords.has_key('page'):
page = keywords['page']
pagename = page.page_name
else:
pagename = keywords.get('pagename', '')
page = Page(request, pagename)
if keywords.get('msg', ''):
raise DeprecationWarning("Using send_page(msg=) is deprecated! Use theme.add_msg() instead!")
scriptname = request.script_root
# get name of system pages
page_front_page = wikiutil.getFrontPage(request).page_name
page_help_contents = wikiutil.getLocalizedPage(request, 'HelpContents').page_name
page_title_index = wikiutil.getLocalizedPage(request, 'TitleIndex').page_name
page_site_navigation = wikiutil.getLocalizedPage(request, 'SiteNavigation').page_name
page_word_index = wikiutil.getLocalizedPage(request, 'WordIndex').page_name
page_help_formatting = wikiutil.getLocalizedPage(request, 'HelpOnFormatting').page_name
page_find_page = wikiutil.getLocalizedPage(request, 'FindPage').page_name
home_page = wikiutil.getInterwikiHomePage(request) # sorry theme API change!!! Either None or tuple (wikiname,pagename) now.
page_parent_page = getattr(page.getParentPage(), 'page_name', None)
# set content_type, including charset, so web server doesn't touch it:
request.content_type = "text/html; charset=%s" % (config.charset, )
# Prepare the HTML <head> element
user_head = [request.cfg.html_head]
# include charset information - needed for moin_dump or any other case
# when reading the html without a web server
user_head.append('''<meta charset="%s">\n''' % (page.output_charset))
meta_keywords = request.getPragma('keywords')
meta_desc = request.getPragma('description')
if meta_keywords:
user_head.append('<meta name="keywords" content="%s">\n' % wikiutil.escape(meta_keywords, 1))
if meta_desc:
user_head.append('<meta name="description" content="%s">\n' % wikiutil.escape(meta_desc, 1))
# add meta statement if user has doubleclick on edit turned on or it is default
if (pagename and keywords.get('allow_doubleclick', 0) and
not keywords.get('print_mode', 0) and
request.user.edit_on_doubleclick):
if request.user.may.write(pagename): # separating this gains speed
user_head.append('<meta name="edit_on_doubleclick" content="%s">\n' % (request.script_root or '/'))
# search engine precautions / optimization:
# if it is an action or edit/search, send query headers (noindex,nofollow):
if request.query_string:
user_head.append(request.cfg.html_head_queries)
elif request.method == 'POST':
user_head.append(request.cfg.html_head_posts)
# we don't want to have BadContent stuff indexed:
elif pagename in ['BadContent', 'LocalBadContent', ]:
user_head.append(request.cfg.html_head_posts)
# if it is a special page, index it and follow the links - we do it
# for the original, English pages as well as for (the possibly
# modified) frontpage:
elif pagename in [page_front_page, request.cfg.page_front_page,
page_title_index, 'TitleIndex',
page_find_page, 'FindPage',
page_site_navigation, 'SiteNavigation',
'RecentChanges', ]:
user_head.append(request.cfg.html_head_index)
# if it is a normal page, index it, but do not follow the links, because
# there are a lot of illegal links (like actions) or duplicates:
else:
user_head.append(request.cfg.html_head_normal)
if 'pi_refresh' in keywords and keywords['pi_refresh']:
user_head.append('<meta http-equiv="refresh" content="%d;URL=%s">' % keywords['pi_refresh'])
# output buffering increases latency but increases throughput as well
output = []
output.append("""\
<!DOCTYPE html>
<html lang="%s">
<head>
%s
<meta name="viewport" content="width=device-width, initial-scale=1.0">
%s
%s
#.........这里部分代码省略.........