本文整理汇总了Python中pynab.log.error函数的典型用法代码示例。如果您正苦于以下问题:Python error函数的具体用法?Python error怎么用?Python error使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了error函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: search_lxml
def search_lxml(show, content):
"""Search TVRage online API for show data."""
try:
tree = etree.fromstring(content)
except:
log.error('Problem parsing XML with lxml')
return None
matches = defaultdict(list)
# parse show names in the same order as returned by tvrage, first one is usually the good one
for xml_show in XPATH_SHOW(tree):
for name in extract_names(xml_show):
ratio = int(difflib.SequenceMatcher(None, show['clean_name'], clean_name(name)).ratio() * 100)
if ratio == 100:
log.debug('Found 100% xml_match: {}'.format(name))
return xmltodict.parse(etree.tostring(xml_show))['show']
matches[ratio].append(xml_show)
# if no 100% is found, check highest ratio matches
for ratio, xml_matches in sorted(matches.items(), reverse=True):
for xml_match in xml_matches:
if ratio >= 80:
log.debug('Found {:d}% xml_match: {}'.format(ratio, XPATH_NAME(xml_match)[0]))
return xmltodict.parse(etree.tostring(xml_match))['show']
elif 80 > ratio > 60:
if 'country' in show and show['country'] and XPATH_COUNTRY(xml_match):
if str.lower(show['country']) == str.lower(XPATH_COUNTRY(xml_match)):
log.debug('Found {:d}% xml_match: {}'.format(ratio, XPATH_NAME(xml_match)[0]))
return xmltodict.parse(etree.tostring(xml_match))['show']
ratio, highests = sorted(matches.items(), reverse=True)[0]
log.warning('No TVRage match found for {}, highest match was {}%.'.format(show['clean_name'], ratio))
示例2: save
def save(binary):
"""Save a single binary to the DB, including all
segments/parts (which takes the longest).
--
Note: Much quicker. Hooray!
"""
log.debug('Saving to binary: ' + binary['name'])
existing_binary = db.binaries.find_one({'name': binary['name']})
try:
if existing_binary:
merge(existing_binary['parts'], binary['parts'])
db.binaries.update({'_id': existing_binary['_id']}, {
'$set': {
'parts': existing_binary['parts']
}
})
else:
db.binaries.insert({
'name': binary['name'],
'group_name': binary['group_name'],
'posted': binary['posted'],
'posted_by': binary['posted_by'],
'category_id': binary['category_id'],
'regex_id': binary['regex_id'],
'req_id': binary['req_id'],
'xref': binary['xref'],
'total_parts': binary['total_parts'],
'parts': binary['parts']
})
except:
log.error('Binary was too large to fit in DB!')
示例3: get
def get(self, group_name, messages=None):
"""Get a set of messages from the server for the specified group."""
log.info('{}: Getting {:d} messages...'.format(group_name, len(messages)))
data = ''
if messages:
try:
_, total, first, last, _ = self.connection.group(group_name)
log.debug('{}: Total articles in group: {:d}'.format(group_name, total))
for message in messages:
article = '<{}>'.format(message)
log.debug('{}: Getting article: {}'.format(group_name, article))
response, (number, message_id, lines) = self.connection.body(article)
res = pynab.yenc.yenc_decode(lines)
if res:
data += res
else:
return None
except nntplib.NNTPError as nntpe:
log.error('{}: Problem retrieving messages from server: {}.'.format(group_name, nntpe))
return None
return data
else:
log.error('{}: No messages were specified.'.format(group_name))
return None
示例4: create
def create(self, node=None):
if not node:
node = self.node
try:
self['xep_0060'].create_node(self.pubsub_server, node)
except:
log.error('pubsub: could not create node: %s' % node)
示例5: update_blacklist
def update_blacklist():
"""Check for Blacklist update and load them into Mongo."""
blacklist_url = config.postprocess.get('blacklist_url')
if blacklist_url:
response = requests.get(blacklist_url)
lines = response.text.splitlines()
for line in lines:
elements = line.split('\t\t')
if len(elements) == 4:
log.debug('Updating blacklist {}...'.format(elements[1]))
db.blacklists.update(
{
'regex': elements[1]
},
{
'$setOnInsert': {
'status': 0
},
'$set': {
'group_name': elements[0],
'regex': elements[1],
'description': elements[3],
}
},
upsert=True
)
return True
else:
log.error('No blacklist update url in config.')
return False
示例6: scan_missing
def scan_missing(group_name):
try:
return pynab.groups.scan_missing_segments(group_name)
except Exception as e:
log.error('scan: nntp server is flipping out, hopefully they fix their shit: {}'.format(
traceback.format_exc(e)
))
示例7: get
def get(self, group_name, messages=None):
"""Get a set of messages from the server for the specified group."""
self.connect()
data = ''
if messages:
try:
_, total, first, last, _ = self.connection.group(group_name)
for message in messages:
article = '<{}>'.format(message)
response, (number, message_id, lines) = self.connection.body(article)
res = pynab.yenc.yenc_decode(lines)
if res:
data += res
else:
return None
except nntplib.NNTPError as nntpe:
log.error('server: [{}]: problem retrieving messages: {}.'.format(group_name, nntpe))
self.connection = None
self.connect()
return None
except socket.timeout:
log.error('server: socket timed out, reconnecting')
self.connection = None
self.connect()
return None
return data
else:
return None
示例8: day_to_post
def day_to_post(self, group_name, days):
"""Converts a datetime to approximate article number for the specified group."""
log.debug('{}: Finding post {:d} days old...'.format(group_name, days))
_, count, first, last, _ = self.connection.group(group_name)
target_date = datetime.datetime.now(pytz.utc) - datetime.timedelta(days)
first_date = self.post_date(group_name, first)
last_date = self.post_date(group_name, last)
if first_date and last_date:
if target_date < first_date:
log.warning(
'{}: First available article is newer than target date, starting from first available.'.format(
group_name))
return first
elif target_date > last_date:
log.warning(
'{}: Target date is more recent than newest article. Try a longer backfill.'.format(group_name))
return False
log.debug('{}: Searching for post where goal: {}, first: {}, last: {}'
.format(group_name, target_date, first_date, last_date)
)
upper = last
lower = first
interval = math.floor((upper - lower) * 0.5)
next_date = last_date
log.debug('{}: Start: {:d} End: {:d} Interval: {:d}'.format(group_name, lower, upper, interval))
while self.days_old(next_date) < days:
skip = 1
temp_date = self.post_date(group_name, upper - interval)
while temp_date > target_date:
upper = upper - interval - (skip - 1)
log.debug('{}: New upperbound: {:d} is {:d} days old.'
.format(group_name, upper, self.days_old(temp_date))
)
skip *= 2
temp_date = self.post_date(group_name, upper - interval)
interval = math.ceil(interval / 2)
if interval <= 0:
break
skip = 1
log.debug('{}: Set interval to {:d} articles.'.format(group_name, interval))
next_date = self.post_date(group_name, upper - 1)
while not next_date:
upper = upper - skip
skip *= 2
log.debug('{}: Article was lost, getting next: {:d}'.format(group_name, upper))
next_date = self.post_date(group_name, upper - 1)
log.debug('{}: Article is {:d} which is {:d} days old.'.format(group_name, upper, self.days_old(next_date)))
return upper
else:
log.error('{}: Could not get group information.'.format(group_name))
return False
示例9: publish
def publish(self, node, data):
payload = ET.fromstring("<test xmlns='test'>{}</test>".format(data))
try:
self['xep_0060'].publish(self.pubsub_server, node, payload=payload)
except Exception as e:
log.error('pubsub: could not publish to: {}'.format(node))
log.error('Exception "{}" of type {}'.format(e, type(e)))
示例10: get
def get(self):
try:
result = self['xep_0060'].get_item(self.pubsub_server, self.node, self.data)
for item in result['pubsub']['items']['substanzas']:
print('Retrieved item %s: %s' % (item['id'], tostring(item['payload'])))
except:
log.error('pubsub: could not retrieve item %s from node %s' % (self.data, self.node))
示例11: copy_file
def copy_file(engine, data, ordering, type):
"""
Handles a fast-copy, or a slowass one.
If you're using postgres or a mysql derivative, this should work fine.
Anything else? Welllllllllllllp. It's gonna be slow. Really slow.
In fact, I'm going to point out just how slow it is.
"""
insert_start = time.time()
if 'mysql' in config.db.get('engine'):
# ho ho ho
conn = engine.raw_connection()
cur = conn.cursor()
(fd, filename) = tempfile.mkstemp(prefix='pynab')
filename = filename.replace('\\', '/')
try:
file = os.fdopen(fd, 'wb')
data.seek(0)
t = data.read(1048576)
while t:
file.write(t.encode('utf-8'))
t = data.read(1048576)
file.close()
data.close()
query = "LOAD DATA LOCAL INFILE '{}' INTO TABLE {} FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '\"' ({})" \
.format(filename, type.__tablename__, ','.join(ordering))
cur.execute((query))
conn.commit()
cur.close()
os.remove(filename)
except Exception as e:
log.error(e)
return False
elif 'postgre' in config.db.get('engine'):
conn = engine.raw_connection()
cur = conn.cursor()
try:
cur.copy_expert(
"COPY {} ({}) FROM STDIN WITH CSV ESCAPE E'\\\\'".format(type.__tablename__, ', '.join(ordering)), data)
except Exception as e:
log.error(e)
return False
conn.commit()
cur.close()
else:
# this... this is the slow one
# i don't even want to think about how slow this is
# it's really slow
# slower than the github api
engine.execute(type.__table__.insert(), data)
insert_end = time.time()
log.debug('parts: {} insert: {:.2f}s'.format(config.db.get('engine'), insert_end - insert_start))
return True
示例12: process
def process(limit=None, category=0):
"""Process releases for NFO parts and download them."""
with Server() as server:
with db_session() as db:
# noinspection PyComparisonWithNone,PyComparisonWithNone
query = db.query(Release).join(Group).join(NZB).filter(Release.nfo == None).filter(
Release.nfo_metablack_id == None)
if category:
query = query.filter(Release.category_id == int(category))
if limit:
releases = query.order_by(Release.posted.desc()).limit(limit)
else:
releases = query.order_by(Release.posted.desc()).all()
for release in releases:
found = False
nzb = pynab.nzbs.get_nzb_details(release.nzb)
if nzb:
nfos = []
for nfo in nzb['nfos']:
for part in nfo['segments']:
if int(part['size']) > NFO_MAX_FILESIZE:
continue
nfos.append(part)
for nfo in nfos:
try:
article = server.get(release.group.name, [nfo['message_id'], ])
except Exception as e:
# if usenet's not accessible, don't block it forever
log.error('nfo: unable to get nfo: {}'.format(e))
continue
if article:
data = gzip.compress(article.encode('utf-8'))
nfo = NFO(data=data)
db.add(nfo)
release.nfo = nfo
release.nfo_metablack_id = None
db.add(release)
log.debug('nfo: [{}] - nfo added'.format(
release.search_name
))
found = True
break
if not found:
log.debug('nfo: [{}] - [{}] - no nfos in release'.format(
release.id,
release.search_name
))
mb = MetaBlack(nfo=release, status='IMPOSSIBLE')
db.add(mb)
db.commit()
示例13: start
def start(self, event):
self.get_roster()
self.send_presence()
try:
getattr(self, self.action)()
except:
log.error('pubsub: could not execute: %s' % self.action)
示例14: update
def update(group_name):
try:
return pynab.groups.scan(group_name, limit=config.scan.get('group_scan_limit', 2000000))
except pynab.server.AuthException as e:
log.error('server: {}'.format(e))
except Exception as e:
log.error('scan: nntp server is flipping out, hopefully they fix their shit: {}'.format(
traceback.format_exc(e)
))
示例15: search
def search(show):
"""Search TVRage's online API for show data."""
try:
r = requests.get(TVRAGE_FULL_SEARCH_URL, params={'show': show['clean_name']})
except:
log.error('Problem retrieving TVRage XML. The API is probably down.')
return None
content = r.content
return search_lxml(show, content)