本文整理汇总了Python中MoinMoin.PageEditor.PageEditor.mtime_usecs方法的典型用法代码示例。如果您正苦于以下问题:Python PageEditor.mtime_usecs方法的具体用法?Python PageEditor.mtime_usecs怎么用?Python PageEditor.mtime_usecs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MoinMoin.PageEditor.PageEditor
的用法示例。
在下文中一共展示了PageEditor.mtime_usecs方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getblacklist
# 需要导入模块: from MoinMoin.PageEditor import PageEditor [as 别名]
# 或者: from MoinMoin.PageEditor.PageEditor import mtime_usecs [as 别名]
def getblacklist(request, pagename, do_update):
""" Get blacklist, possibly downloading new copy
@param request: current request (request instance)
@param pagename: bad content page name (unicode)
@rtype: list
@return: list of blacklisted regular expressions
"""
from MoinMoin.PageEditor import PageEditor
p = PageEditor(request, pagename, uid_override="Antispam subsystem")
mymtime = wikiutil.version2timestamp(p.mtime_usecs())
if do_update:
tooold = time.time() - 1800
failure = caching.CacheEntry(request, "antispam", "failure", scope='wiki')
fail_time = failure.mtime() # only update if no failure in last hour
if (mymtime < tooold) and (fail_time < tooold):
logging.info("%d *BadContent too old, have to check for an update..." % tooold)
import xmlrpclib
import socket
timeout = 15 # time out for reaching the master server via xmlrpc
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
master_url = request.cfg.antispam_master_url
master = xmlrpclib.ServerProxy(master_url)
try:
# Get BadContent info
master.putClientInfo('ANTISPAM-CHECK', request.url)
response = master.getPageInfo(pagename)
# It seems that response is always a dict
if isinstance(response, dict) and 'faultCode' in response:
raise WikirpcError("failed to get BadContent information",
response)
# Compare date against local BadContent copy
masterdate = response['lastModified']
if isinstance(masterdate, datetime.datetime):
# for python 2.5
mydate = datetime.datetime(*tuple(time.gmtime(mymtime))[0:6])
else:
# for python <= 2.4.x
mydate = xmlrpclib.DateTime(tuple(time.gmtime(mymtime)))
logging.debug("master: %s mine: %s" % (masterdate, mydate))
if mydate < masterdate:
# Get new copy and save
logging.info("Fetching page from %s..." % master_url)
master.putClientInfo('ANTISPAM-FETCH', request.url)
response = master.getPage(pagename)
if isinstance(response, dict) and 'faultCode' in response:
raise WikirpcError("failed to get BadContent data", response)
p._write_file(response)
mymtime = wikiutil.version2timestamp(p.mtime_usecs())
else:
failure.update("") # we didn't get a modified version, this avoids
# permanent polling for every save when there
# is no updated master page
except (socket.error, xmlrpclib.ProtocolError), err:
logging.error('Timeout / socket / protocol error when accessing %s: %s' % (master_url, str(err)))
# update cache to wait before the next try
failure.update("")
except (xmlrpclib.Fault, ), err:
logging.error('Fault on %s: %s' % (master_url, str(err)))
# update cache to wait before the next try
failure.update("")
except Error, err:
# In case of Error, we log the error and use the local BadContent copy.
logging.error(str(err))
示例2: getblacklist
# 需要导入模块: from MoinMoin.PageEditor import PageEditor [as 别名]
# 或者: from MoinMoin.PageEditor.PageEditor import mtime_usecs [as 别名]
def getblacklist(request, pagename, do_update):
""" Get blacklist, possibly downloading new copy
@param request: current request (request instance)
@param pagename: bad content page name (unicode)
@rtype: list
@return: list of blacklisted regular expressions
"""
from MoinMoin.PageEditor import PageEditor
p = PageEditor(request, pagename, uid_override="Antispam subsystem")
invalidate_cache = False
if do_update:
tooold = time.time() - 3600
mymtime = wikiutil.version2timestamp(p.mtime_usecs())
failure = caching.CacheEntry(request, "antispam", "failure")
fail_time = failure.mtime() # only update if no failure in last hour
if (mymtime < tooold) and (fail_time < tooold):
dprint("%d *BadContent too old, have to check for an update..." % tooold)
import xmlrpclib
# TODO replace following with import socket when we require py 2.3
# also change the call / exception names accordingly
from MoinMoin.support import timeoutsocket
timeout = 15 # time out for reaching the master server via xmlrpc
old_timeout = timeoutsocket.getDefaultSocketTimeout()
timeoutsocket.setDefaultSocketTimeout(timeout)
# For production code
uri = "http://moinmaster.wikiwikiweb.de:8000/?action=xmlrpc2"
# For testing (use your test wiki as BadContent source)
##uri = "http://localhost/main/?action=xmlrpc2")
master = xmlrpclib.ServerProxy(uri)
try:
# Get BadContent info
master.putClientInfo('ANTISPAM-CHECK',
request.http_host+request.script_name)
response = master.getPageInfo(pagename)
# It seems that response is always a dict
if isinstance(response, dict) and 'faultCode' in response:
raise WikirpcError("failed to get BadContent information",
response)
# Compare date against local BadContent copy
masterdate = response['lastModified']
mydate = xmlrpclib.DateTime(tuple(time.gmtime(mymtime)))
dprint("master: %s mine: %s" % (masterdate, mydate))
if mydate < masterdate:
# Get new copy and save
dprint("Fetching page from master...")
master.putClientInfo('ANTISPAM-FETCH',
request.http_host + request.script_name)
response = master.getPage(pagename)
if isinstance(response, dict) and 'faultCode' in response:
raise WikirpcError("failed to get BadContent data",
response)
p._write_file(response)
invalidate_cache = True
except (timeoutsocket.Timeout, timeoutsocket.error, xmlrpclib.ProtocolError), err:
# Log the error
# TODO: check if this does not fill the logs!
dprint('Timeout / socket / protocol error when accessing'
' moinmaster: %s' % str(err))
# update cache to wait before the next try
failure.update("")
except Error, err:
# In case of Error, we log the error and use the local
# BadContent copy.
dprint(str(err))
# set back socket timeout
timeoutsocket.setDefaultSocketTimeout(old_timeout)