本文整理汇总了Python中MoinMoin.Page.Page.getRevList方法的典型用法代码示例。如果您正苦于以下问题:Python Page.getRevList方法的具体用法?Python Page.getRevList怎么用?Python Page.getRevList使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MoinMoin.Page.Page
的用法示例。
在下文中一共展示了Page.getRevList方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: convert_editlog
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getRevList [as 别名]
def convert_editlog(page, output = None, overwrite = False):
pagedir = page.getPagePath()
pagename = wikiname(pagedir)
if not output:
output = pagename
edit_log = editlog.EditLog(request, page.getPagePath('edit-log'))
changes = {}
for log in edit_log:
# not supported. perhaps add anyway?
if log.action in ('ATTNEW', 'ATTDEL', 'ATTDRW'):
continue
# 1201095949 192.168.2.23 E start [email protected]
author = log.hostname
if log.userid:
userdata = user.User(request, log.userid)
if userdata.name:
author = userdata.name
try:
action = {
'SAVE' : 'E',
'SAVENEW' : 'C',
'SAVE/REVERT' : 'R',
}[log.action]
except KeyError:
action = log.action
mtime = str(log.ed_time_usecs / USEC)
changes[mtime] = u"\t".join([mtime, log.addr, action, dw.cleanID(log.pagename), author, log.comment])
# see if we have missing entries, try to recover
page = Page(request, pagename)
if len(page.getRevList()) != len(changes):
print "RECOVERING edit-log, missing %d entries" % (len(page.getRevList()) - len(changes))
for rev in page.getRevList():
page = Page(request, pagename, rev = rev)
mtime = page.mtime_usecs() / USEC
if not mtime:
pagefile, realrev, exists = page.get_rev(rev = rev);
if os.path.exists(pagefile):
mtime = int(os.path.getmtime(pagefile))
print "Recovered %s: %s" % (rev, mtime)
mtime = str(mtime)
if not changes.has_key(mtime):
changes[mtime] = u"\t".join([mtime, '127.0.0.1', '?', dw.cleanID(pagename), 'root', 'recovered entry'])
print "ADDING %s" % mtime
changes = sorted(changes.values())
out_file = os.path.join(output_dir, 'meta', dw.metaFN(output, '.changes'))
writefile(out_file, changes, overwrite = overwrite)
示例2: _do_queued_updates
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getRevList [as 别名]
def _do_queued_updates(self, request, amount=5):
""" Assumes that the write lock is acquired """
self.touch()
writer = xapidx.Index(self.dir, True)
writer.configure(self.prefixMap, self.indexValueMap)
# do all page updates
pages = self.update_queue.pages()[:amount]
for name in pages:
p = Page(request, name)
if request.cfg.xapian_index_history:
for rev in p.getRevList():
self._index_page(writer, Page(request, name, rev=rev), mode='update')
else:
self._index_page(writer, p, mode='update')
self.update_queue.remove([name])
# do page/attachment removals
items = self.remove_queue.pages()[:amount]
for item in items:
_item = item.split('//')
p = Page(request, _item[0])
self._remove_item(writer, p, _item[1])
self.remove_queue.remove([item])
writer.close()
示例3: _index_page
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getRevList [as 别名]
def _index_page(self, request, connection, pagename, mode="update"):
""" Index a page.
Index all revisions (if wanted by configuration) and all attachments.
@param request: request suitable for indexing
@param connection: the Indexer connection object
@param pagename: a page name
@param mode: 'add' = just add, no checks
'update' = check if already in index and update if needed (mtime)
"""
page = Page(request, pagename)
revlist = page.getRevList() # recent revs first, does not include deleted revs
logging.debug("indexing page %r, %d revs found" % (pagename, len(revlist)))
if not revlist:
# we have an empty revision list, that means the page is not there any more,
# likely it (== all of its revisions, all of its attachments) got either renamed or nuked
wikiname = request.cfg.interwikiname or u"Self"
sc = self.get_search_connection()
docs_to_delete = sc.get_all_documents_with_fields(wikiname=wikiname, pagename=pagename)
# any page rev, any attachment
sc.close()
for doc in docs_to_delete:
connection.delete(doc.id)
logging.debug("page %s (all revs, all attachments) removed from xapian index" % pagename)
else:
if request.cfg.xapian_index_history:
index_revs, remove_revs = revlist, []
else:
if page.exists(): # is current rev not deleted?
index_revs, remove_revs = revlist[:1], revlist[1:]
else:
index_revs, remove_revs = [], revlist
for revno in index_revs:
updated = self._index_page_rev(request, connection, pagename, revno, mode=mode)
logging.debug("updated page %r rev %d (updated==%r)" % (pagename, revno, updated))
if not updated:
# we reached the revisions that are already present in the index
break
for revno in remove_revs:
# XXX remove_revs can be rather long for pages with many revs and
# XXX most page revs usually will be already deleted. optimize?
self._remove_page_rev(request, connection, pagename, revno)
logging.debug("removed page %r rev %d" % (pagename, revno))
from MoinMoin.action import AttachFile
for attachmentname in AttachFile._get_files(request, pagename):
self._index_attachment(request, connection, pagename, attachmentname, mode)
示例4: _getHits
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getRevList [as 别名]
def _getHits(self, pages, matchSearchFunction):
""" Get the hit tuples in pages through matchSearchFunction """
logging.debug("_getHits searching in %d pages ..." % len(pages))
hits = []
revisionCache = {}
fs_rootpage = self.fs_rootpage
for hit in pages:
if 'values' in hit:
valuedict = hit['values']
uid = hit['uid']
else:
valuedict = hit
uid = None
wikiname = valuedict['wikiname']
pagename = valuedict['pagename']
attachment = valuedict['attachment']
logging.debug("_getHits processing %r %r %r" % (wikiname, pagename, attachment))
if 'revision' in valuedict and valuedict['revision']:
revision = int(valuedict['revision'])
else:
revision = 0
if wikiname in (self.request.cfg.interwikiname, 'Self'): # THIS wiki
page = Page(self.request, pagename, rev=revision)
if not self.historysearch and revision:
revlist = page.getRevList()
# revlist can be empty if page was nuked/renamed since it was included in xapian index
if not revlist or revlist[0] != revision:
# nothing there at all or not the current revision
continue
if attachment:
if pagename == fs_rootpage: # not really an attachment
page = Page(self.request, "%s/%s" % (fs_rootpage, attachment))
hits.append((wikiname, page, None, None))
else:
matches = matchSearchFunction(page=None, uid=uid)
hits.append((wikiname, page, attachment, matches))
else:
matches = matchSearchFunction(page=page, uid=uid)
logging.debug("matchSearchFunction %r returned %r" % (matchSearchFunction, matches))
if matches:
if not self.historysearch and \
pagename in revisionCache and \
revisionCache[pagename][0] < revision:
hits.remove(revisionCache[pagename][1])
del revisionCache[pagename]
hits.append((wikiname, page, attachment, matches))
revisionCache[pagename] = (revision, hits[-1])
else: # other wiki
hits.append((wikiname, pagename, attachment, None, revision))
return hits
示例5: _getHits
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getRevList [as 别名]
def _getHits(self, pages):
""" Get the hit tuples in pages through _get_match """
logging.debug("_getHits searching in %d pages ..." % len(pages))
hits = []
revisionCache = {}
fs_rootpage = self.fs_rootpage
for hit in pages:
uid = hit.get('uid')
wikiname = hit['wikiname']
pagename = hit['pagename']
attachment = hit['attachment']
revision = int(hit.get('revision', 0))
logging.debug("_getHits processing %r %r %d %r" % (wikiname, pagename, revision, attachment))
if wikiname in (self.request.cfg.interwikiname, 'Self'): # THIS wiki
page = Page(self.request, pagename, rev=revision)
if not self.historysearch and revision:
revlist = page.getRevList()
# revlist can be empty if page was nuked/renamed since it was included in xapian index
if not revlist or revlist[0] != revision:
# nothing there at all or not the current revision
logging.debug("no history search, skipping non-current revision...")
continue
if attachment:
# revision currently is 0 ever
if pagename == fs_rootpage: # not really an attachment
page = Page(self.request, "%s/%s" % (fs_rootpage, attachment))
hits.append((wikiname, page, None, None, revision))
else:
matches = self._get_match(page=None, uid=uid)
hits.append((wikiname, page, attachment, matches, revision))
else:
matches = self._get_match(page=page, uid=uid)
logging.debug("self._get_match %r" % matches)
if matches:
if not self.historysearch and pagename in revisionCache and revisionCache[pagename][0] < revision:
hits.remove(revisionCache[pagename][1])
del revisionCache[pagename]
hits.append((wikiname, page, attachment, matches, revision))
revisionCache[pagename] = (revision, hits[-1])
else: # other wiki
hits.append((wikiname, pagename, attachment, None, revision))
logging.debug("_getHits returning %r." % hits)
return hits
示例6: _index_pages
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getRevList [as 别名]
def _index_pages(self, request, files=None, mode='update'):
""" Index all pages (and all given files)
This should be called from indexPages or indexPagesInNewThread only!
This may take some time, depending on the size of the wiki and speed
of the machine.
When called in a new thread, lock is acquired before the call,
and this method must release it when it finishes or fails.
@param request: the current request
@keyword files: an optional list of files to index
@keyword mode: how to index the files, either 'add', 'update' or
'rebuild'
"""
# rebuilding the DB: delete it and add everything
if mode == 'rebuild':
for f in os.listdir(self.dir):
os.unlink(os.path.join(self.dir, f))
mode = 'add'
try:
self.touch()
writer = xapidx.Index(self.dir, True)
writer.configure(self.prefixMap, self.indexValueMap)
pages = request.rootpage.getPageList(user='', exists=1)
logging.debug("indexing all (%d) pages..." % len(pages))
for pagename in pages:
p = Page(request, pagename)
request.page = p
if request.cfg.xapian_index_history:
for rev in p.getRevList():
self._index_page(writer,
Page(request, pagename, rev=rev),
mode)
else:
self._index_page(writer, p, mode)
if files:
logging.debug("indexing all files...")
for fname in files:
fname = fname.strip()
self._index_file(request, writer, fname, mode)
writer.close()
finally:
writer.__del__()
示例7: execute
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getRevList [as 别名]
#.........这里部分代码省略.........
items_limit,
max_items,
unique,
diffs,
ddiffs,
)
# Feed envelope
page = Page(request, pagename)
f = output.cofeed(
ROOT(NS(u"", ATOM_NAMESPACE), NS(u"wiki", RSSWIKI_NAMESPACE), E_CURSOR((ATOM_NAMESPACE, u"feed")))
)
f.send(E((ATOM_NAMESPACE, u"id"), full_url(request, page).encode(config.charset))),
f.send(E((ATOM_NAMESPACE, u"title"), cfg.sitename.encode(config.charset))),
f.send(E((ATOM_NAMESPACE, u"link"), {u"href": request.url_root.encode(config.charset)})),
f.send(E((ATOM_NAMESPACE, u"summary"), ("RecentChanges at %s" % cfg.sitename).encode(config.charset))),
# Icon
# E((ATOM_NAMESPACE, u'link'), {u'href': logo.encode(config.charset)}),
# if cfg.interwikiname:
# handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)
for item in logdata:
anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
page = Page(request, item.pagename)
# link = full_url(request, page, anchor=anchor)
if ddiffs:
link = full_url(request, page, querystr={"action": "diff"})
else:
link = full_url(request, page)
# description
desc_text = item.comment
if diffs:
# TODO: rewrite / extend wikiutil.pagediff
# searching for the matching pages doesn't really belong here
revisions = page.getRevList()
rl = len(revisions)
for idx in range(rl):
rev = revisions[idx]
if rev <= item.rev:
if idx + 1 < rl:
lines = wikiutil.pagediff(
request, item.pagename, revisions[idx + 1], item.pagename, 0, ignorews=1
)
if len(lines) > 20:
lines = lines[:20] + ["...\n"]
lines = "\n".join(lines)
lines = wikiutil.escape(lines)
desc_text = "%s\n<pre>\n%s\n</pre>\n" % (desc_text, lines)
break
# if desc_text:
# handler.simpleNode('description', desc_text)
# contributor
edattr = {}
# if cfg.show_hosts:
# edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
if item.editor[0] == "interwiki":
edname = "%s:%s" % item.editor[1]
##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
else: # 'ip'
edname = item.editor[1]
##edattr[(None, 'link')] = link + "?action=info"
history_link = full_url(request, page, querystr={"action": "info"})
f.send(
E(
(ATOM_NAMESPACE, u"entry"),
E((ATOM_NAMESPACE, u"id"), link.encode(config.charset)),
E((ATOM_NAMESPACE, u"title"), item.pagename.encode(config.charset)),
E((ATOM_NAMESPACE, u"updated"), timefuncs.W3CDate(item.time).encode(config.charset)),
E((ATOM_NAMESPACE, u"link"), {u"href": link.encode(config.charset)}),
E((ATOM_NAMESPACE, u"summary"), desc_text.encode(config.charset)),
E((ATOM_NAMESPACE, u"author"), E((ATOM_NAMESPACE, u"name"), edname.encode(config.charset))),
# E((ATOM_NAMESPACE, u'title'), item.pagename.encode(config.charset)),
# wiki extensions
E((RSSWIKI_NAMESPACE, u"wiki:version"), ("%i" % (item.ed_time_usecs)).encode(config.charset)),
E((RSSWIKI_NAMESPACE, u"wiki:status"), (u"deleted", u"updated")[page.exists()]),
E((RSSWIKI_NAMESPACE, u"wiki:diff"), link.encode(config.charset)),
E((RSSWIKI_NAMESPACE, u"wiki:history"), history_link.encode(config.charset)),
# handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
# handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )
)
)
# emit logo data
# if logo:
# handler.startNode('image', attr={
# (handler.xmlns['rdf'], 'about'): logo,
# })
# handler.simpleNode('title', cfg.sitename)
# handler.simpleNode('link', baseurl)
# handler.simpleNode('url', logo)
# handler.endNode('image')
f.close()
request.write(output.read())
示例8: execute
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getRevList [as 别名]
#.........这里部分代码省略.........
if cfg.interwikiname:
handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)
handler.startNode('items')
handler.startNode(('rdf', 'Seq'))
for item in logdata:
link = "%s%s#%04d%02d%02d%02d%02d%02d" % ((interwiki,
wikiutil.quoteWikinameURL(item.pagename),) + item.time[:6])
handler.simpleNode(('rdf', 'li'), None, attr={
(handler.xmlns['rdf'], 'resource'): link,
})
handler.endNode(('rdf', 'Seq'))
handler.endNode('items')
handler.endNode('channel')
# emit logo data
if logo:
handler.startNode('image', attr={
(handler.xmlns['rdf'], 'about'): logo,
})
handler.simpleNode('title', cfg.sitename)
handler.simpleNode('link', interwiki)
handler.simpleNode('url', logo)
handler.endNode('image')
# emit items
for item in logdata:
page = Page(request, item.pagename)
link = interwiki + wikiutil.quoteWikinameURL(item.pagename)
rdflink = "%s#%04d%02d%02d%02d%02d%02d" % ((link,) + item.time[:6])
handler.startNode('item', attr={
(handler.xmlns['rdf'], 'about'): rdflink,
})
# general attributes
handler.simpleNode('title', item.pagename)
if ddiffs:
handler.simpleNode('link', link+"?action=diff")
else:
handler.simpleNode('link', link)
handler.simpleNode(('dc', 'date'), util.W3CDate(item.time))
# description
desc_text = item.comment
if diffs:
# TODO: rewrite / extend wikiutil.pagediff
# searching for the matching pages doesn't really belong here
revisions = page.getRevList()
rl = len(revisions)
for idx in range(rl):
rev = revisions[idx]
if rev <= item.rev:
if idx+1 < rl:
lines = wikiutil.pagediff(request, item.pagename, revisions[idx+1], item.pagename, 0, ignorews=1)
if len(lines) > 20: lines = lines[:20] + ['...\n']
desc_text = '%s\n<pre>\n%s\n</pre>\n' % (desc_text, '\n'.join(lines))
break
if desc_text:
handler.simpleNode('description', desc_text)
# contributor
edattr = {}
if cfg.show_hosts:
edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
if isinstance(item.editor, Page):
edname = item.editor.page_name
##edattr[(None, 'link')] = interwiki + wikiutil.quoteWikiname(edname)
else:
edname = item.editor
##edattr[(None, 'link')] = link + "?action=info"
# this edattr stuff, esp. None as first tuple element breaks things (tracebacks)
# if you know how to do this right, please send us a patch
handler.startNode(('dc', 'contributor'))
handler.startNode(('rdf', 'Description'), attr=edattr)
handler.simpleNode(('rdf', 'value'), edname)
handler.endNode(('rdf', 'Description'))
handler.endNode(('dc', 'contributor'))
# wiki extensions
handler.simpleNode(('wiki', 'version'), "%i" % (item.ed_time_usecs))
handler.simpleNode(('wiki', 'status'), ('deleted', 'updated')[page.exists()])
handler.simpleNode(('wiki', 'diff'), link + "?action=diff")
handler.simpleNode(('wiki', 'history'), link + "?action=info")
# handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
# handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )
handler.endNode('item')
# end SAX stream
handler.endDocument()
# send the generated XML document
request.http_headers(["Content-Type: text/xml; charset=%s" % config.charset] + request.nocache)
request.write(out.getvalue())
request.finish()
request.no_closing_html_code = 1
示例9: do_diff
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getRevList [as 别名]
def do_diff(pagename, request):
""" Handle "action=diff"
checking for either a "rev=formerrevision" parameter
or rev1 and rev2 parameters
"""
if not request.user.may.read(pagename):
Page(request, pagename).send_page(request)
return
try:
date = request.form['date'][0]
try:
date = long(date) # must be long for py 2.2.x
except StandardError:
date = 0
except KeyError:
date = 0
try:
rev1 = request.form['rev1'][0]
try:
rev1 = int(rev1)
except StandardError:
rev1 = 0
except KeyError:
rev1 = -1
try:
rev2 = request.form['rev2'][0]
try:
rev2 = int(rev2)
except StandardError:
rev2 = 0
except KeyError:
rev2 = 0
if rev1 == -1 and rev2 == 0:
try:
rev1 = request.form['rev'][0]
try:
rev1 = int(rev1)
except StandardError:
rev1 = -1
except KeyError:
rev1 = -1
# spacing flag?
try:
ignorews = int(request.form['ignorews'][0])
except (KeyError, ValueError, TypeError):
ignorews = 0
_ = request.getText
# get a list of old revisions, and back out if none are available
currentpage = Page(request, pagename)
revisions = currentpage.getRevList()
if len(revisions) < 2:
currentpage.send_page(request, msg=_("No older revisions available!"))
return
if date: # this is how we get called from RecentChanges
rev1 = 0
log = editlog.EditLog(request, rootpagename=pagename)
for line in log.reverse():
if date >= line.ed_time_usecs and int(line.rev) != 99999999:
rev1 = int(line.rev)
break
else:
rev1 = 1
rev2 = 0
# Start output
# This action generate content in the user language
request.setContentLanguage(request.lang)
request.http_headers()
wikiutil.send_title(request, _('Diff for "%s"') % (pagename,), pagename=pagename)
if (rev1>0 and rev2>0 and rev1>rev2) or (rev1==0 and rev2>0):
rev1,rev2 = rev2,rev1
oldrev1,oldcount1 = None,0
oldrev2,oldcount2 = None,0
# get the filename of the version to compare to
edit_count = 0
for rev in revisions:
edit_count += 1
if rev <= rev1:
oldrev1,oldcount1 = rev,edit_count
if rev2 and rev >= rev2:
oldrev2,oldcount2 = rev,edit_count
if (oldrev1 and oldrev2) or (oldrev1 and not rev2):
break
if rev1 == -1:
oldpage = Page(request, pagename, rev=revisions[1])
oldcount1 = oldcount1 - 1
elif rev1 == 0:
oldpage = currentpage
#.........这里部分代码省略.........
示例10: execute
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getRevList [as 别名]
#.........这里部分代码省略.........
handler.simpleNode(('rdf', 'li'), None, attr={(handler.xmlns['rdf'], 'resource'): link, })
handler.endNode(('rdf', 'Seq'))
handler.endNode('items')
handler.endNode('channel')
# emit logo data
if logo:
handler.startNode('image', attr={
(handler.xmlns['rdf'], 'about'): logo,
})
handler.simpleNode('title', cfg.sitename)
handler.simpleNode('link', baseurl)
handler.simpleNode('url', logo)
handler.endNode('image')
# emit items
for item in logdata:
page = Page(request, item.pagename)
anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
rdflink = full_url(request, page, anchor=anchor)
handler.startNode('item', attr={(handler.xmlns['rdf'], 'about'): rdflink, })
# general attributes
handler.simpleNode('title', item.pagename)
if ddiffs:
handler.simpleNode('link', full_url(request, page, querystr={'action': 'diff'}))
else:
handler.simpleNode('link', full_url(request, page))
handler.simpleNode(('dc', 'date'), timefuncs.W3CDate(item.time))
# description
if item.editor[0] == 'interwiki':
edname = "%s:%s" % item.editor[1]
##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
else: # 'ip'
edname = item.editor[1]
##edattr[(None, 'link')] = link + "?action=info"
# this edattr stuff, esp. None as first tuple element breaks things (tracebacks)
# if you know how to do this right, please send us a patch
user = edname.split(':')[-1]
user_link = full_url(request, Page(request, user))
desc_text = 'Cambio por <a href="%s">%s</a> -- "%s"' % (user_link, user, item.comment)
if diffs:
# TODO: rewrite / extend wikiutil.pagediff
# searching for the matching pages doesn't really belong here
revisions = page.getRevList()
rl = len(revisions)
for idx in range(rl):
rev = revisions[idx]
if rev <= item.rev:
if idx + 1 < rl:
lines = wikiutil.pagediff(request, item.pagename, revisions[idx+1], item.pagename, 0, ignorews=1)
if len(lines) > 20:
lines = lines[:20] + ['... (Continua)\n']
# vamos a colorear las lineas!
fixed_lines = []
for line in lines:
line = wikiutil.escape(line)
if line.startswith('+'):
line = "<font color='green'>%s</font>" % line
elif line.startswith('-'):
line = "<font color='red'>%s</font>" % line
fixed_lines.append(line)
lines = fixed_lines
lines = '\n'.join(lines)
desc_text = '%s\n<pre>\n%s\n</pre>\n' % (desc_text, lines)
break
if desc_text:
handler.simpleNode('description', desc_text)
# contributor
edattr = {}
if cfg.show_hosts:
edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
handler.startNode(('dc', 'contributor'))
handler.startNode(('rdf', 'Description'), attr=edattr)
handler.simpleNode(('rdf', 'value'), edname)
handler.endNode(('rdf', 'Description'))
handler.endNode(('dc', 'contributor'))
# wiki extensions
handler.simpleNode(('wiki', 'version'), "%i" % (item.ed_time_usecs))
handler.simpleNode(('wiki', 'status'), ('deleted', 'updated')[page.exists()])
handler.simpleNode(('wiki', 'diff'), full_url(request, page, querystr={'action': 'diff'}))
handler.simpleNode(('wiki', 'history'), full_url(request, page, querystr={'action': 'info'}))
# handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
# handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )
handler.endNode('item')
# end SAX stream
handler.endDocument()
request.write(out.getvalue())
示例11: test_page_change_message
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getRevList [as 别名]
def test_page_change_message(request):
page = Page(request, "FrontPage")
print "Provided with a dumb change type argument, this should raise an exception!"
py.test.raises(notification.UnknownChangeType, notification.page_change_message,
"StupidType", request, page, "en", revisions=page.getRevList())
示例12: xmlrpc_getDiff
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getRevList [as 别名]
def xmlrpc_getDiff(self, pagename, from_rev, to_rev, n_name=None):
"""
Gets the binary difference between two page revisions.
@param pagename: unicode string qualifying the page name
@param fromRev: integer specifying the source revision. May be None to
refer to a virtual empty revision which leads to a diff
containing the whole page.
@param toRev: integer specifying the target revision. May be None to
refer to the current revision. If the current revision is the same
as fromRev, there will be a special error condition "ALREADY_CURRENT"
@param n_name: do a tag check verifying that n_name was the normalised
name of the last tag
If both fromRev and toRev are None, this function acts similar to getPage, i.e. it will diff("",currentRev).
@return: Returns a dict:
* status (not a field, implicit, returned as Fault if not SUCCESS):
* "SUCCESS" - if the diff could be retrieved successfully
* "NOT_EXIST" - item does not exist
* "FROMREV_INVALID" - the source revision is invalid
* "TOREV_INVALID" - the target revision is invalid
* "INTERNAL_ERROR" - there was an internal error
* "INVALID_TAG" - the last tag does not match the supplied normalised name
* "ALREADY_CURRENT" - this not merely an error condition. It rather means that
there is no new revision to diff against which is a good thing while
synchronisation.
* current: the revision number of the current revision (not the one which was diff'ed against)
* diff: Binary object that transports a zlib-compressed binary diff (see bdiff.py, taken from Mercurial)
* conflict: if there is a conflict on the page currently
"""
from MoinMoin.util.bdiff import textdiff, compress
from MoinMoin.wikisync import TagStore
pagename = self._instr(pagename)
if n_name is not None:
n_name = self._instr(n_name)
# User may read page?
if not self.request.user.may.read(pagename):
return self.notAllowedFault()
def allowed_rev_type(data):
if data is None:
return True
return isinstance(data, int) and data > 0
if not allowed_rev_type(from_rev):
return xmlrpclib.Fault("FROMREV_INVALID", "Incorrect type for from_rev.")
if not allowed_rev_type(to_rev):
return xmlrpclib.Fault("TOREV_INVALID", "Incorrect type for to_rev.")
currentpage = Page(self.request, pagename)
if not currentpage.exists():
return xmlrpclib.Fault("NOT_EXIST", "Page does not exist.")
revisions = currentpage.getRevList()
if from_rev is not None and from_rev not in revisions:
return xmlrpclib.Fault("FROMREV_INVALID", "Unknown from_rev.")
if to_rev is not None and to_rev not in revisions:
return xmlrpclib.Fault("TOREV_INVALID", "Unknown to_rev.")
# use lambda to defer execution in the next lines
if from_rev is None:
oldcontents = lambda: ""
else:
oldpage = Page(self.request, pagename, rev=from_rev)
oldcontents = lambda: oldpage.get_raw_body_str()
if to_rev is None:
newpage = currentpage
newcontents = lambda: currentpage.get_raw_body_str()
else:
newpage = Page(self.request, pagename, rev=to_rev)
newcontents = lambda: newpage.get_raw_body_str()
if oldcontents() and oldpage.get_real_rev() == newpage.get_real_rev():
return xmlrpclib.Fault("ALREADY_CURRENT", "There are no changes.")
if n_name is not None:
tags = TagStore(newpage)
last_tag = tags.get_last_tag()
if last_tag is not None and last_tag.normalised_name != n_name:
return xmlrpclib.Fault("INVALID_TAG", "The used tag is incorrect because the normalised name does not match.")
newcontents = newcontents()
conflict = wikiutil.containsConflictMarker(newcontents)
diffblob = xmlrpclib.Binary(compress(textdiff(oldcontents(), newcontents)))
return {"conflict": conflict, "diff": diffblob, "diffversion": 1, "current": currentpage.get_real_rev()}
示例13: testGetRevList
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getRevList [as 别名]
def testGetRevList(self):
page = Page(self.request, u"FrontPage")
assert 1 in page.getRevList()
示例14: execute
# 需要导入模块: from MoinMoin.Page import Page [as 别名]
# 或者: from MoinMoin.Page.Page import getRevList [as 别名]
def execute(pagename, request):
""" Handle "action=diff"
checking for either a "rev=formerrevision" parameter
or rev1 and rev2 parameters
"""
if not request.user.may.read(pagename):
Page(request, pagename).send_page()
return
try:
date = request.form['date'][0]
try:
date = long(date) # must be long for py 2.2.x
except StandardError:
date = 0
except KeyError:
date = 0
try:
rev1 = int(request.form.get('rev1', [-1])[0])
except StandardError:
rev1 = 0
try:
rev2 = int(request.form.get('rev2', [0])[0])
except StandardError:
rev2 = 0
if rev1 == -1 and rev2 == 0:
rev1 = request.rev
if rev1 is None:
rev1 = -1
# spacing flag?
ignorews = int(request.form.get('ignorews', [0])[0])
_ = request.getText
# get a list of old revisions, and back out if none are available
currentpage = Page(request, pagename)
currentrev = currentpage.current_rev()
if currentrev < 2:
request.theme.add_msg(_("No older revisions available!"), "error")
currentpage.send_page()
return
if date: # this is how we get called from RecentChanges
rev1 = 0
log = editlog.EditLog(request, rootpagename=pagename)
for line in log.reverse():
if date >= line.ed_time_usecs and int(line.rev) != 99999999:
rev1 = int(line.rev)
break
else:
rev1 = 1
rev2 = 0
# Start output
# This action generates content in the user language
request.setContentLanguage(request.lang)
request.emit_http_headers()
request.theme.send_title(_('Diff for "%s"') % (pagename, ), pagename=pagename, allow_doubleclick=1)
if rev1 > 0 and rev2 > 0 and rev1 > rev2 or rev1 == 0 and rev2 > 0:
rev1, rev2 = rev2, rev1
if rev1 == -1:
oldrev = currentrev - 1
oldpage = Page(request, pagename, rev=oldrev)
elif rev1 == 0:
oldrev = currentrev
oldpage = currentpage
else:
oldrev = rev1
oldpage = Page(request, pagename, rev=oldrev)
if rev2 == 0:
newrev = currentrev
newpage = currentpage
else:
newrev = rev2
newpage = Page(request, pagename, rev=newrev)
edit_count = abs(newrev - oldrev)
f = request.formatter
request.write(f.div(1, id="content"))
oldrev = oldpage.get_real_rev()
newrev = newpage.get_real_rev()
revlist = currentpage.getRevList()
# code below assumes that the page exists and has at least
# one revision in the revlist, just bail out if not. Users
# shouldn't really run into this anyway.
if not revlist:
request.write(f.div(0)) # end content div
request.theme.send_footer(pagename)
request.theme.send_closing_html()
#.........这里部分代码省略.........