本文整理汇总了Python中pynab.log.debug函数的典型用法代码示例。如果您正苦于以下问题:Python debug函数的具体用法?Python debug怎么用?Python debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了debug函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: parseNzedbirc
def parseNzedbirc(unformattedPre):
PRE_REGEX = regex.compile(
'(?P<preType>.+): \[DT: (?<pretime>.+)\]\[TT: (?P<name>.+)\]\[SC: (?P<source>.+)\]\[CT: (?P<category>.+)\]\[RQ: (?P<request>.+)\]\[SZ: (?P<size>.+)\]\[FL: (?P<files>.+)\]\[FN: (?P<filename>.+)\]')
formattedPre = {}
try:
formattedPre = PRE_REGEX.search(unformattedPre).groupdict()
except Exception as e:
log.debug("pre: Error parsing nzedbirc - {}".format(e))
if formattedPre['preType'] == "NUK":
formattedPre['nuked'] = True
else:
formattedPre['nuked'] = False
#Deal with splitting out requests if they exist
if formattedPre['request'] != "N/A":
formattedPre['requestid'] = formattedPre['request'].split(":")[0]
formattedPre['requestgroup'] = formattedPre['request'].split(":")[1]
else:
formattedPre['requestid'] = None
formattedPre['searchname'] = releases.clean_release_name(formattedPre['name'])
#remove any columns we dont need. Perhaps a way to filter these out via regex? Or a way to ignore via sqlalchemy
formattedPre.pop("preType", None)
formattedPre.pop("size", None)
formattedPre.pop("files", None)
formattedPre.pop("request", None)
return formattedPre
示例2: update_blacklist
def update_blacklist():
"""Check for Blacklist update and load them into Mongo."""
blacklist_url = config.postprocess.get('blacklist_url')
if blacklist_url:
response = requests.get(blacklist_url)
lines = response.text.splitlines()
for line in lines:
elements = line.split('\t\t')
if len(elements) == 4:
log.debug('Updating blacklist {}...'.format(elements[1]))
db.blacklists.update(
{
'regex': elements[1]
},
{
'$setOnInsert': {
'status': 0
},
'$set': {
'group_name': elements[0],
'regex': elements[1],
'description': elements[3],
}
},
upsert=True
)
return True
else:
log.error('No blacklist update url in config.')
return False
示例3: save
def save(binary):
"""Save a single binary to the DB, including all
segments/parts (which takes the longest).
--
Note: Much quicker. Hooray!
"""
log.debug('Saving to binary: ' + binary['name'])
existing_binary = db.binaries.find_one({'name': binary['name']})
try:
if existing_binary:
merge(existing_binary['parts'], binary['parts'])
db.binaries.update({'_id': existing_binary['_id']}, {
'$set': {
'parts': existing_binary['parts']
}
})
else:
db.binaries.insert({
'name': binary['name'],
'group_name': binary['group_name'],
'posted': binary['posted'],
'posted_by': binary['posted_by'],
'category_id': binary['category_id'],
'regex_id': binary['regex_id'],
'req_id': binary['req_id'],
'xref': binary['xref'],
'total_parts': binary['total_parts'],
'parts': binary['parts']
})
except:
log.error('Binary was too large to fit in DB!')
示例4: api
def api():
log.debug('Handling request for {0}.'.format(request.fullpath))
# these are really basic, don't check much
function = request.query.t or pynab.api.api_error(200)
for r, func in pynab.api.functions.items():
# reform s|search into ^s$|^search$
# if we don't, 's' matches 'caps' (s)
r = '|'.join(['^{0}$'.format(r) for r in r.split('|')])
if re.search(r, function):
dataset = dict()
dataset['get_link'] = get_link
data = func(dataset)
output_format = request.query.o or 'xml'
if output_format == 'xml':
# return as xml
response.set_header('Content-type', 'application/rss+xml')
return data
elif output_format == 'json':
# bottle auto-converts into json
return xmltodict.parse(data)
else:
return pynab.api.api_error(201)
# didn't match any functions
return pynab.api.api_error(202)
示例5: check_release_files
def check_release_files(server, group_name, nzb):
"""Retrieves rar metadata for release files."""
rar_files = []
for rar in nzb['rars']:
messages = []
if not isinstance(rar['segments']['segment'], list):
rar['segments']['segment'] = [rar['segments']['segment'], ]
for s in rar['segments']['segment']:
messages.append(s['#text'])
if messages:
data = server.get(group_name, messages)
if data:
t = None
try:
with tempfile.NamedTemporaryFile('wb', delete=False) as t:
t.write(data.encode('ISO-8859-1'))
t.flush()
rar_files += lib.rar.RarFile(t.name).infolist()
except:
continue
finally:
log.debug('Deleting temporary file {}...'.format(t.name))
os.remove(t.name)
break
passworded = any([r.is_encrypted for r in rar_files])
file_count = len(rar_files)
size = sum([r.file_size for r in rar_files])
return (passworded, file_count, size), rar_files
return (False, 0, 0), []
示例6: post_date
def post_date(self, group_name, article):
"""Retrieves the date of the specified post."""
log.debug('{}: Retrieving date of article {:d}'.format(group_name, article))
i = 0
while i < 10:
articles = []
try:
self.connection.group(group_name)
_, articles = self.connection.over('{0:d}-{0:d}'.format(article))
except nntplib.NNTPError as e:
log.debug(e)
# leave this alone - we don't expect any data back
pass
try:
art_num, overview = articles[0]
except IndexError:
log.warning('{}: Server was missing article {:d}.'.format(group_name, article))
# if the server is missing an article, it's usually part of a large group
# so skip along quickishly, the datefinder will autocorrect itself anyway
article += int(article * 0.0001)
#article += 1
i += 1
continue
if art_num and overview:
return dateutil.parser.parse(overview['date']).astimezone(pytz.utc)
else:
return None
示例7: get
def get(self, group_name, messages=None):
"""Get a set of messages from the server for the specified group."""
log.info('{}: Getting {:d} messages...'.format(group_name, len(messages)))
data = ''
if messages:
try:
_, total, first, last, _ = self.connection.group(group_name)
log.debug('{}: Total articles in group: {:d}'.format(group_name, total))
for message in messages:
article = '<{}>'.format(message)
log.debug('{}: Getting article: {}'.format(group_name, article))
response, (number, message_id, lines) = self.connection.body(article)
res = pynab.yenc.yenc_decode(lines)
if res:
data += res
else:
return None
except nntplib.NNTPError as nntpe:
log.error('{}: Problem retrieving messages from server: {}.'.format(group_name, nntpe))
return None
return data
else:
log.error('{}: No messages were specified.'.format(group_name))
return None
示例8: search
def search(name, year):
"""Search OMDB for a movie and return the IMDB ID."""
log.info('Searching for movie: {}'.format(name))
# if we managed to parse the year from the name
# include it, since it'll narrow results
if year:
year_query = '&y={}'.format(year.replace('(', '').replace(')', ''))
else:
year_query = ''
r = requests.get(OMDB_SEARCH_URL + name + year_query)
try:
data = r.json()
except:
log.debug('There was a problem accessing the API page.')
return None
if 'Search' in data:
for movie in data['Search']:
# doublecheck, but the api should've searched properly
ratio = difflib.SequenceMatcher(None, clean_name(name), clean_name(movie['Title'])).ratio()
if ratio > 0.8 and year == movie['Year'] and movie['Type'] == 'movie':
log.info('OMDB movie match found: {}'.format(movie['Title']))
return movie
示例9: rename_bad_releases
def rename_bad_releases(category):
for release in db.releases.find(
{"category._id": int(category), "$or": [{"nfo": {"$nin": [None, False]}}, {"files.count": {"$exists": True}}]}
):
log.debug("Finding name for {}...".format(release["search_name"]))
name, category_id = pynab.releases.discover_name(release)
if name and not category_id:
# don't change anything, it was fine
pass
elif name and category_id:
# we found a new name!
log.info(
"Renaming {} ({:d}) to {} ({:d})...".format(
release["search_name"], release["category"]["_id"], name, category_id
)
)
category = db.categories.find_one({"_id": category_id})
category["parent"] = db.categories.find_one({"_id": category["parent_id"]})
db.releases.update(
{"_id": release["_id"]},
{"$set": {"search_name": pynab.releases.clean_release_name(name), "category": category}},
)
else:
# bad release!
log.debug("Noting unwanted release {} ({:d})...".format(release["search_name"], release["category"]["_id"]))
db.releases.update({"_id": release["_id"]}, {"$set": {"unwanted": True}})
示例10: create_nodes
def create_nodes(self):
categories = set(self.categories().keys())
existing = self.pubsub_nodes()
log.debug("nabbot: existing: {} :: categories: {}".format(existing, categories))
for catid in categories - existing:
log.warning("nabbot: creating node {}.".format(catid))
self.xmpp.create(catid)
示例11: determine_category
def determine_category(name, group_name=''):
"""Categorise release based on release name and group name."""
category = ''
if is_hashed(name):
category = CAT_MISC_OTHER
else:
if group_name:
category = check_group_category(name, group_name)
if not category:
for parent_category in parent_category_regex.keys():
category = check_parent_category(name, parent_category)
if category:
break
if not category:
category = CAT_MISC_OTHER
log.debug('category: ({}) [{}]: {}'.format(
group_name,
name,
category
))
return category
示例12: copy_file
def copy_file(engine, data, ordering, type):
"""
Handles a fast-copy, or a slowass one.
If you're using postgres or a mysql derivative, this should work fine.
Anything else? Welllllllllllllp. It's gonna be slow. Really slow.
In fact, I'm going to point out just how slow it is.
"""
insert_start = time.time()
if 'mysql' in config.db.get('engine'):
# ho ho ho
conn = engine.raw_connection()
cur = conn.cursor()
(fd, filename) = tempfile.mkstemp(prefix='pynab')
filename = filename.replace('\\', '/')
try:
file = os.fdopen(fd, 'wb')
data.seek(0)
t = data.read(1048576)
while t:
file.write(t.encode('utf-8'))
t = data.read(1048576)
file.close()
data.close()
query = "LOAD DATA LOCAL INFILE '{}' INTO TABLE {} FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '\"' ({})" \
.format(filename, type.__tablename__, ','.join(ordering))
cur.execute((query))
conn.commit()
cur.close()
os.remove(filename)
except Exception as e:
log.error(e)
return False
elif 'postgre' in config.db.get('engine'):
conn = engine.raw_connection()
cur = conn.cursor()
try:
cur.copy_expert(
"COPY {} ({}) FROM STDIN WITH CSV ESCAPE E'\\\\'".format(type.__tablename__, ', '.join(ordering)), data)
except Exception as e:
log.error(e)
return False
conn.commit()
cur.close()
else:
# this... this is the slow one
# i don't even want to think about how slow this is
# it's really slow
# slower than the github api
engine.execute(type.__table__.insert(), data)
insert_end = time.time()
log.debug('parts: {} insert: {:.2f}s'.format(config.db.get('engine'), insert_end - insert_start))
return True
示例13: process
def process(limit=None, category=0):
"""Process releases for NFO parts and download them."""
with Server() as server:
with db_session() as db:
# noinspection PyComparisonWithNone,PyComparisonWithNone
query = db.query(Release).join(Group).join(NZB).filter(Release.nfo == None).filter(
Release.nfo_metablack_id == None)
if category:
query = query.filter(Release.category_id == int(category))
if limit:
releases = query.order_by(Release.posted.desc()).limit(limit)
else:
releases = query.order_by(Release.posted.desc()).all()
for release in releases:
found = False
nzb = pynab.nzbs.get_nzb_details(release.nzb)
if nzb:
nfos = []
for nfo in nzb['nfos']:
for part in nfo['segments']:
if int(part['size']) > NFO_MAX_FILESIZE:
continue
nfos.append(part)
for nfo in nfos:
try:
article = server.get(release.group.name, [nfo['message_id'], ])
except Exception as e:
# if usenet's not accessible, don't block it forever
log.error('nfo: unable to get nfo: {}'.format(e))
continue
if article:
data = gzip.compress(article.encode('utf-8'))
nfo = NFO(data=data)
db.add(nfo)
release.nfo = nfo
release.nfo_metablack_id = None
db.add(release)
log.debug('nfo: [{}] - nfo added'.format(
release.search_name
))
found = True
break
if not found:
log.debug('nfo: [{}] - [{}] - no nfos in release'.format(
release.id,
release.search_name
))
mb = MetaBlack(nfo=release, status='IMPOSSIBLE')
db.add(mb)
db.commit()
示例14: names_from_nfos
def names_from_nfos(release):
"""Attempt to grab a release name from its NFO."""
log.debug('Parsing NFO for release details in: {}'.format(release['search_name']))
nfo = pynab.nfos.get(release['nfo']).decode('ascii', 'ignore')
if nfo:
return pynab.nfos.attempt_parse(nfo)
else:
log.debug('NFO not available for release: {}'.format(release['search_name']))
return []
示例15: is_blacklisted
def is_blacklisted(subject, group_name):
log.debug('{0}: Checking {1} against active blacklists...'.format(group_name, subject))
blacklists = db.blacklists.find({'status': 1})
for blacklist in blacklists:
if re.search(blacklist['group_name'], group_name):
# too spammy
#log.debug('{0}: Checking blacklist {1}...'.format(group_name, blacklist['regex']))
if re.search(blacklist['regex'], subject):
return True
return False