本文整理汇总了Python中utils.log.debug函数的典型用法代码示例。如果您正苦于以下问题:Python debug函数的具体用法?Python debug怎么用?Python debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了debug函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: collect_files
def collect_files(self, task_id=None):
t1 = time.clock()
self.files(self.path)
self.result['no_extension'] = {'file_count': 0, 'file_list': []}
for extension, values in self.type_nums.iteritems():
extension = extension.strip()
self.result[extension] = {'file_count': len(values), 'file_list': []}
# .php : 123
log.debug('{0} : {1}'.format(extension, len(values)))
if task_id is not None:
# Store
ext = CobraExt(task_id, extension, len(values))
db.session.add(ext)
for f in self.file:
es = f.split(os.extsep)
if len(es) >= 2:
# Exists Extension
# os.extsep + es[len(es) - 1]
if f.endswith(extension):
self.result[extension]['file_list'].append(f)
else:
# Didn't have extension
self.result['no_extension']['file_count'] = int(self.result['no_extension']['file_count']) + 1
self.result['no_extension']['file_list'].append(f)
if task_id is not None:
db.session.commit()
t2 = time.clock()
self.result['file_nums'] = self.file_id
self.result['collect_time'] = t2 - t1
return self.result
示例2: fetch_image_from_page_data
def fetch_image_from_page_data(self):
image = None
image_file = None
if self.page_data:
content = self.page_data
elif settings.BACKED_BY_AWS.get('pages_on_s3') and self.feed.s3_page:
key = settings.S3_PAGES_BUCKET.get_key(self.feed.s3_pages_key)
compressed_content = key.get_contents_as_string()
stream = StringIO(compressed_content)
gz = gzip.GzipFile(fileobj=stream)
try:
content = gz.read()
except IOError:
content = None
else:
content = MFeedPage.get_data(feed_id=self.feed.pk)
url = self._url_from_html(content)
if not url:
try:
content = requests.get(self.feed.feed_link).content
url = self._url_from_html(content)
except (AttributeError, SocketError, requests.ConnectionError,
requests.models.MissingSchema, requests.sessions.InvalidSchema,
requests.sessions.TooManyRedirects,
requests.models.InvalidURL,
requests.models.ChunkedEncodingError,
requests.models.ContentDecodingError,
LocationParseError, OpenSSLError, PyAsn1Error), e:
logging.debug(" ---> ~SN~FRFailed~FY to fetch ~FGfeed icon~FY: %s" % e)
示例3: count_unreads_for_subscribers
def count_unreads_for_subscribers(self, feed):
UNREAD_CUTOFF = datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
user_subs = UserSubscription.objects.filter(feed=feed,
active=True,
user__profile__last_seen_on__gte=UNREAD_CUTOFF)\
.order_by('-last_read_date')
if not user_subs.count():
return
for sub in user_subs:
if not sub.needs_unread_recalc:
sub.needs_unread_recalc = True
sub.save()
if self.options['compute_scores']:
stories = MStory.objects(story_feed_id=feed.pk,
story_date__gte=UNREAD_CUTOFF)\
.read_preference(pymongo.ReadPreference.PRIMARY)
stories = Feed.format_stories(stories, feed.pk)
logging.debug(u' ---> [%-30s] ~FYComputing scores: ~SB%s stories~SN with ~SB%s subscribers ~SN(%s/%s/%s)' % (
feed.title[:30], len(stories), user_subs.count(),
feed.num_subscribers, feed.active_subscribers, feed.premium_subscribers))
self.calculate_feed_scores_with_stories(user_subs, stories)
elif self.options.get('mongodb_replication_lag'):
logging.debug(u' ---> [%-30s] ~BR~FYSkipping computing scores: ~SB%s seconds~SN of mongodb lag' % (
feed.title[:30], self.options.get('mongodb_replication_lag')))
示例4: fetch
def fetch(self):
"""
Uses feedparser to download the feed. Will be parsed later.
"""
identity = self.get_identity()
log_msg = u'%2s ---> [%-30s] Fetching feed (%d)' % (identity,
unicode(self.feed)[:30],
self.feed.id)
logging.debug(log_msg)
self.feed.set_next_scheduled_update()
etag=self.feed.etag
modified = self.feed.last_modified.utctimetuple()[:7] if self.feed.last_modified else None
if self.options.get('force') or not self.feed.fetched_once:
modified = None
etag = None
USER_AGENT = 'NewsBlur Feed Fetcher (%s subscriber%s) - %s' % (
self.feed.num_subscribers,
's' if self.feed.num_subscribers != 1 else '',
URL
)
self.fpf = feedparser.parse(self.feed.feed_address,
agent=USER_AGENT,
etag=etag,
modified=modified)
return FEED_OK, self.fpf
示例5: save_page
def save_page(self, html):
if html and len(html) > 100:
if settings.BACKED_BY_AWS.get('pages_on_s3'):
k = Key(settings.S3_PAGES_BUCKET)
k.key = self.feed.s3_pages_key
k.set_metadata('Content-Encoding', 'gzip')
k.set_metadata('Content-Type', 'text/html')
k.set_metadata('Access-Control-Allow-Origin', '*')
out = StringIO.StringIO()
f = gzip.GzipFile(fileobj=out, mode='w')
f.write(html)
f.close()
compressed_html = out.getvalue()
k.set_contents_from_string(compressed_html)
k.set_acl('public-read')
try:
feed_page = MFeedPage.objects.get(feed_id=self.feed.pk)
feed_page.delete()
logging.debug(' --->> [%-30s] ~FYTransfering page data to S3...' % (self.feed))
except MFeedPage.DoesNotExist:
pass
self.feed.s3_page = True
self.feed.save()
else:
try:
feed_page = MFeedPage.objects.get(feed_id=self.feed.pk)
feed_page.page_data = html
feed_page.save()
except MFeedPage.DoesNotExist:
feed_page = MFeedPage.objects.create(feed_id=self.feed.pk, page_data=html)
return feed_page
示例6: create_zip
def create_zip(archive, files):
'''Creates a zip file containing the files being backed up.'''
import zipfile
from utils.misc import add_file_hash
try:
# zipfile always follows links
with zipfile.ZipFile(archive, 'w') as zipf:
zipf.comment = 'Created by s3-backup'
for f in files:
f = f.strip()
if os.path.exists(f):
zipf.write(f)
add_file_hash(archive, f)
log.debug('Added %s.' % f)
else:
log.error('%s does not exist.' % f)
if zipf.testzip() != None:
log.error('An error occured creating the zip archive.')
except zipfile.BadZipfile:
# I assume this only happens on reads? Just in case...
log.critical('The zip file is corrupt.')
except zipfile.LargeZipFile:
log.critical('The zip file is greater than 2 GB.'
' Enable zip64 functionality.')
示例7: fetch
def fetch(self):
""" Downloads and parses a feed.
"""
socket.setdefaulttimeout(30)
identity = self.get_identity()
log_msg = u'%2s ---> [%-30s] Fetching feed (%d)' % (identity,
unicode(self.feed)[:30],
self.feed.id)
logging.debug(log_msg)
# Check if feed still needs to be updated
# feed = Feed.objects.get(pk=self.feed.pk)
# if feed.next_scheduled_update > datetime.datetime.now() and not self.options.get('force'):
# log_msg = u' ---> Already fetched %s (%d)' % (self.feed.feed_title,
# self.feed.id)
# logging.debug(log_msg)
# feed.save_feed_history(303, "Already fetched")
# return FEED_SAME, None
# else:
self.feed.set_next_scheduled_update()
etag=self.feed.etag
modified = self.feed.last_modified.utctimetuple()[:7] if self.feed.last_modified else None
if self.options.get('force'):
modified = None
etag = None
self.fpf = feedparser.parse(self.feed.feed_address,
agent=USER_AGENT,
etag=etag,
modified=modified)
return FEED_OK, self.fpf
示例8: _test_error
def _test_error(self):
outgoing_error_msg = OutgoingErrorMsg(tc.TID, GENERIC_E)
data = outgoing_error_msg.encode()
tid, msg_type, msg_dict = decode(data)
incoming_error_msg = IncomingErrorMsg(msg_dict)
log.debug(incoming_error_msg.error)
assert incoming_error_msg.error == GENERIC_E
示例9: query
def query(cls, text):
try:
cls.ES.default_indices = cls.index_name()
cls.ES.indices.refresh()
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
return []
logging.info("~FGSearch ~FCfeeds~FG by address: ~SB%s" % text)
q = MatchQuery('address', text, operator="and", type="phrase")
results = cls.ES.search(query=q, sort="num_subscribers:desc", size=5,
doc_types=[cls.type_name()])
if not results.total:
logging.info("~FGSearch ~FCfeeds~FG by title: ~SB%s" % text)
q = MatchQuery('title', text, operator="and")
results = cls.ES.search(query=q, sort="num_subscribers:desc", size=5,
doc_types=[cls.type_name()])
if not results.total:
logging.info("~FGSearch ~FCfeeds~FG by link: ~SB%s" % text)
q = MatchQuery('link', text, operator="and")
results = cls.ES.search(query=q, sort="num_subscribers:desc", size=5,
doc_types=[cls.type_name()])
return results
示例10: collect_feedback
def collect_feedback(cls):
seen_posts = set()
try:
data = urllib2.urlopen('https://forum.newsblur.com/posts.json').read()
except (urllib2.HTTPError), e:
logging.debug(" ***> Failed to collect feedback: %s" % e)
return
示例11: test_dont_query_myself
def test_dont_query_myself(self):
log.debug('test start')
self.lookup.start()
# Ongoing queries to (sorted: oldest first):
# 155-4, 157-3,
# Queued nodes to query (sorted by log_distance to info_hash):
# 158-1, 159-0
# Notice 159-2 is kicked out from the queue
eq_(self.lookup.num_parallel_queries, 2)
nodes = [Node(tc.CLIENT_ADDR, self.lookup._my_id)]
self.lookup._on_response(*_gen_nodes_args(
tc.NODES_LD_IH[157][3],
nodes))
eq_(self.lookup._get_announce_candidates(),
[tc.NODES_LD_IH[157][3],
])
# This response triggers a new query to 158-1 (ignoring myself)
eq_(self.lookup.num_parallel_queries, 2)
# Ongoing queries to (sorted: oldest first):
# 155-4, 158-1
# Queued nodes to query (sorted by log_distance to info_hash):
# 159-0
self.lookup._on_timeout(tc.NODES_LD_IH[155][4])
# This timeout triggers a new query (to 159-0)
eq_(self.lookup.num_parallel_queries, 2)
self.lookup._on_timeout(tc.NODES_LD_IH[158][1])
# No more nodes to send queries to
eq_(self.lookup.num_parallel_queries, 1)
ok_(not self.lookup.is_done)
self.lookup._on_timeout(tc.NODES_LD_IH[159][0])
# No more nodes to send queries to
eq_(self.lookup.num_parallel_queries, 0)
ok_(self.lookup.is_done)
示例12: test_different_delay
def test_different_delay(self):
# NOTICE: this test might fail if your configuration
# (interpreter/processor) is too slow
task_delays = (1, 1, 1, .5, 1, 1, 2, 1, 1, 1,
1, 1.5, 1, 1, 1, 1, .3)
expected_list = ([],
['a', 16, 3, 'b'], #9 is cancelled
['a', 0, 1, 2, 4, 5, 7, 8, 10, 12, 13, 15, 'c', 'b'],
['a', 11, 'c', 'b'],
['a', 6, 'c', 'b'],
)
tasks = [Task(delay, self.callback_f, i) \
for i, delay in enumerate(task_delays)]
for task in tasks:
self.task_m.add(task)
for i, expected in enumerate(expected_list):
while True:
task = self.task_m.consume_task()
if task is None:
break
task.fire_callbacks()
log.debug('#: %d, result: %s, expected: %s' % (i,
self.callback_order, expected))
assert self.callback_order == expected
self.callback_order = []
self.task_m.add(Task(0, self.callback_f, 'a'))
self.task_m.add(Task(.5, self.callback_f, 'b'))
self.task_m.add(Task(1, self.callback_f, 'c'))
time.sleep(.5)
tasks[9].cancel() # too late (already fired)
tasks[14].cancel() # should be cancelled
示例13: test_cancel
def test_cancel(self):
for i in xrange(5):
self.task_m.add(Task(.1, self.callback_f, i))
c_task = Task(.1, self.callback_f, 5)
self.task_m.add(c_task)
for i in xrange(6,10):
self.task_m.add(Task(.1, self.callback_f, i))
while True:
task = self.task_m.consume_task()
if task is None:
break
task.fire_callback()
log.debug('%s' % self.callback_order)
assert self.callback_order == []
ok_(not c_task.cancelled)
c_task.cancel()
ok_(c_task.cancelled)
time.sleep(.1)
while True:
task = self.task_m.consume_task()
if task is None:
break
task.fire_callbacks()
log.debug('%s' % self.callback_order)
assert self.callback_order == [0,1,2,3,4, 6,7,8,9]
示例14: query
def query(cls, feed_ids, query, order, offset, limit, strip=False):
cls.create_elasticsearch_mapping()
cls.ES.indices.refresh()
if strip:
query = re.sub(r'([^\s\w_\-])+', ' ', query) # Strip non-alphanumeric
sort = "date:desc" if order == "newest" else "date:asc"
string_q = pyes.query.QueryStringQuery(query, default_operator="AND")
feed_q = pyes.query.TermsQuery('feed_id', feed_ids[:1000])
q = pyes.query.BoolQuery(must=[string_q, feed_q])
try:
results = cls.ES.search(q, indices=cls.index_name(), doc_types=[cls.type_name()],
partial_fields={}, sort=sort, start=offset, size=limit)
except pyes.exceptions.NoServerAvailable:
logging.debug(" ***> ~FRNo search server available.")
return []
logging.info(" ---> ~FG~SNSearch ~FCstories~FG for: ~SB%s~SN (across %s feed%s)" %
(query, len(feed_ids), 's' if len(feed_ids) != 1 else ''))
try:
result_ids = [r.get_id() for r in results]
except pyes.InvalidQuery, e:
logging.info(" ---> ~FRInvalid search query \"%s\": %s" % (query, e))
return []
示例15: main
def main():
lang = 'zh'
if len(sys.argv) == 2:
lang = sys.argv[1]
cd = sys.path[0]
translation_path = os.path.join(cd, '../translation')
# load lua
pregame_file = os.path.join(translation_path, 'en_pregame.lua')
client_file = os.path.join(translation_path, 'en_client.lua')
ui_mgr = UiMgr()
log.debug('loading lua file %s' % pregame_file)
ui_mgr.load_lua_file(pregame_file)
log.debug('loading lua file %s' % client_file)
ui_mgr.load_lua_file(client_file)
log.info('read %d lines.' % len(ui_mgr.ui_lines))
# save merged lines
translate_file = os.path.join(translation_path, '%s_translate.txt' % lang)
if os.path.exists(translate_file):
choose = input('%s_translate.txt file exists, merge? [y/N]' % lang)
choose = choose.lower().strip()
if choose != '' and choose[0] == 'y':
log.info('merging to translate file.')
ui_mgr.apply_translate_from_txt_file(translate_file)
else:
log.info('skipped.')
return
with open(translate_file, 'wt', encoding='utf-8') as fp:
fp.writelines(ui_mgr.get_txt_lines(replace=True))
log.info('save translate file succeed.')