本文整理汇总了Python中settings.LOG.debug方法的典型用法代码示例。如果您正苦于以下问题:Python LOG.debug方法的具体用法?Python LOG.debug怎么用?Python LOG.debug使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类settings.LOG
的用法示例。
在下文中一共展示了LOG.debug方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: end_user_activity
# 需要导入模块: from settings import LOG [as 别名]
# 或者: from settings.LOG import debug [as 别名]
def end_user_activity(cls, user, activity_type="login", end_datetime=None):
"""Helper function to complete an existing user activity log entry."""
# Do nothing if the max # of records is zero or None
# (i.e. this functionality is disabled)
if not settings.USER_LOG_MAX_RECORDS:
return
assert user is not None, "A valid user must always be specified."
if not end_datetime: # must be done outside the function header (else becomes static)
end_datetime = datetime.now()
activity_type = cls.get_activity_int(activity_type)
cur_user_log_entry = get_object_or_None(cls, user=user, end_datetime=None)
# No unstopped starts. Start should have been called first!
if not cur_user_log_entry:
logging.warn(
"%s: Had to create a user log entry, but STOPPING('%d')! @ %s"
% (user.username, activity_type, end_datetime)
)
cur_user_log_entry = cls.begin_user_activity(
user=user, activity_type=activity_type, start_datetime=end_datetime
)
logging.debug("%s: Logging LOGOUT activity @ %s" % (user.username, end_datetime))
cur_user_log_entry.end_datetime = end_datetime
cur_user_log_entry.save() # total-seconds will be computed here.
示例2: recurse_nodes_to_extract_knowledge_map
# 需要导入模块: from settings import LOG [as 别名]
# 或者: from settings.LOG import debug [as 别名]
def recurse_nodes_to_extract_knowledge_map(node, node_cache):
"""
Internal function for recursing the topic tree and building the knowledge map.
Requires rebranding of metadata done by recurse_nodes function.
"""
assert node["kind"] == "Topic"
if node.get("in_knowledge_map", None):
if node["slug"] not in knowledge_map["topics"]:
logging.debug("Not in knowledge map: %s" % node["slug"])
node["in_knowledge_map"] = False
for node in node_cache["Topic"][node["slug"]]:
node["in_knowledge_map"] = False
knowledge_topics[node["slug"]] = topic_tools.get_all_leaves(node, leaf_type="Exercise")
if not knowledge_topics[node["slug"]]:
sys.stderr.write("Removing topic from topic tree: no exercises. %s" % node["slug"])
del knowledge_topics[node["slug"]]
del knowledge_map["topics"][node["slug"]]
node["in_knowledge_map"] = False
for node in node_cache["Topic"][node["slug"]]:
node["in_knowledge_map"] = False
else:
if node["slug"] in knowledge_map["topics"]:
sys.stderr.write("Removing topic from topic tree; does not belong. '%s'" % node["slug"])
logging.warn("Removing from knowledge map: %s" % node["slug"])
del knowledge_map["topics"][node["slug"]]
for child in [n for n in node.get("children", []) if n["kind"] == "Topic"]:
recurse_nodes_to_extract_knowledge_map(child, node_cache)
示例3: __init__
# 需要导入模块: from settings import LOG [as 别名]
# 或者: from settings.LOG import debug [as 别名]
def __init__(self, comment=None, fixture=None, **kwargs):
self.return_dict = {}
self.return_dict['comment'] = comment
self.return_dict['class']=type(self).__name__
self.return_dict['uname'] = platform.uname()
self.return_dict['fixture'] = fixture
try:
self.verbosity = int(kwargs.get("verbosity"))
except:
self.verbosity = 1
try:
branch = subprocess.Popen(["git", "describe", "--contains", "--all", "HEAD"], stdout=subprocess.PIPE).communicate()[0]
self.return_dict['branch'] = branch[:-1]
head = subprocess.Popen(["git", "log", "--pretty=oneline", "--abbrev-commit", "--max-count=1"], stdout=subprocess.PIPE).communicate()[0]
self.return_dict['head'] = head[:-1]
except:
self.return_dict['branch'] = None
self.return_dict['head'] = None
# if setup fails, what could we do?
# let the exception bubble up is the best.
try:
self._setup(**kwargs)
except Exception as e:
logging.debug("Failed setup (%s); trying to tear down" % e)
try:
self._teardown()
except:
pass
raise e
示例4: call_outside_command_with_output
# 需要导入模块: from settings import LOG [as 别名]
# 或者: from settings.LOG import debug [as 别名]
def call_outside_command_with_output(kalite_location, command, *args, **kwargs):
"""
Runs call_command for a KA Lite installation at the given location,
and returns the output.
"""
# build the command
cmd = (sys.executable,kalite_location + "/kalite/manage.py",command)
for arg in args:
cmd += (arg,)
for key,val in kwargs.items():
key = key.replace("_","-")
prefix = "--" if command != "runcherrypyserver" else ""
if isinstance(val,bool):
cmd += ("%s%s" % (prefix,key),)
else:
cmd += ("%s%s=%s" % (prefix,key,str(val)),)
logging.debug(cmd)
# Execute the command, using subprocess/Popen
cwd = os.getcwd()
os.chdir(kalite_location + "/kalite")
p = subprocess.Popen(cmd, shell=False, cwd=os.path.split(cmd[0])[0], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = p.communicate()
os.chdir(cwd)
logging.debug(out[1] if out[1] else out[0])
# tuple output of stdout, stderr, and exit code
return out + (1 if out[1] else 0,)
示例5: am_i_online
# 需要导入模块: from settings import LOG [as 别名]
# 或者: from settings.LOG import debug [as 别名]
def am_i_online(url, expected_val=None, search_string=None, timeout=5, allow_redirects=True):
"""Test whether we are online or not.
returns True or False.
Eats all exceptions!
"""
assert not (search_string and expected_val is not None), "Search string and expected value cannot both be set"
try:
if not search_string and expected_val is None:
response = requests.head(url)
else:
response = requests.get(url, timeout=timeout, allow_redirects=allow_redirects)
# Validate that response came from the requested url
if response.status_code != 200:
return False
elif not allow_redirects and response.url != url:
return False
# Check the output, if expected values are specified
if expected_val is not None:
return expected_val == response.text
elif search_string:
return search_string in response.text
return True
except Exception as e:
logging.debug("am_i_online: %s" % e)
return False
示例6: update_json
# 需要导入模块: from settings import LOG [as 别名]
# 或者: from settings.LOG import debug [as 别名]
def update_json(youtube_id, lang_code, downloaded, api_response, time_of_attempt):
"""Update language_srt_map to reflect download status
lang_code in IETF format
"""
# Open JSON file
filepath = get_lang_map_filepath(lang_code)
language_srt_map = softload_json(filepath, logger=logging.error)
if not language_srt_map:
return False
# create updated entry
entry = language_srt_map[youtube_id]
entry["downloaded"] = downloaded
entry["api_response"] = api_response
entry["last_attempt"] = time_of_attempt
if api_response == "success":
entry["last_success"] = time_of_attempt
# update full-size JSON with new information
language_srt_map[youtube_id].update(entry)
# write it to file
json_file = open(filepath, "wb")
json_file.write(json.dumps(language_srt_map))
json_file.close()
logging.debug("File updated.")
return True
示例7: save
# 需要导入模块: from settings import LOG [as 别名]
# 或者: from settings.LOG import debug [as 别名]
def save(self, *args, **kwargs):
"""When this model is saved, check if the activity is ended.
If so, compute total_seconds and update the corresponding summary log."""
# Do nothing if the max # of records is zero or None
# (i.e. this functionality is disabled)
if not settings.USER_LOG_MAX_RECORDS:
return
# Compute total_seconds, save to summary
# Note: only supports setting end_datetime once!
if self.end_datetime and not self.total_seconds:
self.full_clean()
# The top computation is more lenient: user activity is just time logged in, literally.
# The bottom computation is more strict: user activity is from start until the last "action"
# recorded--in the current case, that means from login until the last moment an exercise or
# video log was updated.
#self.total_seconds = datediff(self.end_datetime, self.start_datetime, units="seconds")
self.total_seconds = 0 if not self.last_active_datetime else datediff(self.last_active_datetime, self.start_datetime, units="seconds")
# Confirm the result (output info first for easier debugging)
logging.debug("%s: total learning time: %d seconds" % (self.user.username, self.total_seconds))
assert self.total_seconds >= 0, "Total learning time should always be non-negative."
# Save only completed log items to the UserLogSummary
UserLogSummary.add_log_to_summary(self)
super(UserLog, self).save(*args, **kwargs)
if UserLog.objects.count() > settings.USER_LOG_MAX_RECORDS:
# Unfortunately, could not do an aggregate delete when doing a
# slice in query
to_discard = UserLog.objects.order_by("start_datetime")[0:UserLog.objects.count()-settings.USER_LOG_MAX_RECORDS]
UserLog.objects.filter(pk__in=to_discard).delete()
示例8: validate_times
# 需要导入模块: from settings import LOG [as 别名]
# 或者: from settings.LOG import debug [as 别名]
def validate_times(srt_content, srt_issues):
times = re.findall("([0-9:,]+) --> ([0-9:,]+)\r\n", srt_content, re.S | re.M)
parse_time = lambda str: datetime.datetime.strptime(str, "%H:%M:%S,%f")
for i in range(len(times)):
try:
between_subtitle_time = datediff(parse_time(times[i][0]), parse_time(times[i-1][1] if i > 0 else "00:00:00,000"))
within_subtitle_time = datediff(parse_time(times[i][1]), parse_time(times[i][0]))
if between_subtitle_time > 60.:
srt_issues.append("Between-subtitle gap of %5.2f seconds" % between_subtitle_time)
if within_subtitle_time > 60.:
srt_issues.append("Within-subtitle duration of %5.2f seconds" % within_subtitle_time)
elif within_subtitle_time == 0.:
logging.debug("Subtitle flies by too fast (%s --> %s)." % times[i])
#print "Start: %s\tB: %5.2f\tW: %5.2f" % (parse_time(times[i][0]), between_subtitle_time, within_subtitle_time)
except Exception as e:
if not times[i][1].startswith('99:59:59'):
srt_issues.append("Error checking times: %s" % e)
else:
if len(times) - i > 1 and len(times) - i - 1 > len(times)/10.:
if i == 0:
srt_issues.append("No subtitles have a valid starting point.")
else:
logging.debug("Hit end of movie, but %d (of %d) subtitle(s) remain in the queue." % (len(times) - i - 1, len(times)))
break
示例9: refresh_topic_cache_wrapper_fn
# 需要导入模块: from settings import LOG [as 别名]
# 或者: from settings.LOG import debug [as 别名]
def refresh_topic_cache_wrapper_fn(request, cached_nodes={}, *args, **kwargs):
"""
Centralized logic for how to refresh the topic cache, for each type of object.
When the object is desired to be used, this code runs to refresh data,
balancing between correctness and efficiency.
"""
if not cached_nodes:
cached_nodes = {"topics": topicdata.TOPICS}
for node in cached_nodes.values():
if not node:
continue
has_children = bool(node.get("children"))
has_grandchildren = has_children and any(["children" in child for child in node["children"]])
# Propertes not yet marked
if node["kind"] == "Video":
if force or "urls" not in node: #
#stamp_urls_on_video(node, force=force) # will be done by force below
recount_videos_and_invalidate_parents(node["parent"], force=True)
elif node["kind"] == "Topic":
if not force and (not has_grandchildren or "nvideos_local" not in node):
# if forcing, would do this here, and again below--so skip if forcing.
logging.debug("cache miss: stamping urls on videos")
for video in topic_tools.get_topic_videos(path=node["path"]):
stamp_urls_on_video(video, force=force)
recount_videos_and_invalidate_parents(node, force=force or not has_grandchildren)
kwargs.update(cached_nodes)
return handler(request, *args, **kwargs)
示例10: recurse_nodes_to_clean_related_videos
# 需要导入模块: from settings import LOG [as 别名]
# 或者: from settings.LOG import debug [as 别名]
def recurse_nodes_to_clean_related_videos(node):
"""
Internal function for recursing the topic tree and marking related exercises.
Requires rebranding of metadata done by recurse_nodes function.
"""
def get_video_node(video_slug, node):
if node["kind"] == "Topic":
for child in node.get("children", []):
video_node = get_video_node(video_slug, child)
if video_node:
return video_node
elif node["kind"] == "Video" and node["slug"] == video_slug:
return node
return None
if node["kind"] == "Exercise":
videos_to_delete = []
for vi, video_slug in enumerate(node["related_video_readable_ids"]):
if not get_video_node(video_slug, topictree):
videos_to_delete.append(vi)
for vi in reversed(videos_to_delete):
logging.debug("Deleting unknown video %s" % node["related_video_readable_ids"][vi])
del node["related_video_readable_ids"][vi]
for child in node.get("children", []):
recurse_nodes_to_clean_related_videos(child)
示例11: begin_user_activity
# 需要导入模块: from settings import LOG [as 别名]
# 或者: from settings.LOG import debug [as 别名]
def begin_user_activity(cls, user, activity_type="login", start_datetime=None):
"""Helper function to create a user activity log entry."""
# Do nothing if the max # of records is zero or None
# (i.e. this functionality is disabled)
if not settings.USER_LOG_MAX_RECORDS:
return
assert user is not None, "A valid user must always be specified."
if not start_datetime: # must be done outside the function header (else becomes static)
start_datetime = datetime.now()
activity_type = cls.get_activity_int(activity_type)
cur_user_log_entry = get_object_or_None(cls, user=user, end_datetime=None)
logging.debug("%s: BEGIN activity(%d) @ %s" % (user.username, activity_type, start_datetime))
# Seems we're logging in without logging out of the previous.
# Best thing to do is simulate a login
# at the previous last update time.
#
# Note: this can be a recursive call
if cur_user_log_entry:
logging.warn("%s: END activity on a begin @ %s" % (user.username, start_datetime))
cls.end_user_activity(
user=user, activity_type=activity_type, end_datetime=cur_user_log_entry.last_active_datetime
)
# Create a new entry
cur_user_log_entry = cls(
user=user, activity_type=activity_type, start_datetime=start_datetime, last_active_datetime=start_datetime
)
cur_user_log_entry.save()
return cur_user_log_entry
示例12: process_request
# 需要导入模块: from settings import LOG [as 别名]
# 或者: from settings.LOG import debug [as 别名]
def process_request(self, request):
next = request.GET.get("next", "")
if next.startswith("/"):
logging.debug("next='%s'" % next)
request.next = next
else:
request.next = ""
示例13: invalidate_inmemory_caches
# 需要导入模块: from settings import LOG [as 别名]
# 或者: from settings.LOG import debug [as 别名]
def invalidate_inmemory_caches():
for module in (i18n, topic_tools):
for cache_var in getattr(module, "CACHE_VARS", []):
logging.debug("Emptying cache %s.%s" % (module.__name__, cache_var))
setattr(module, cache_var, None)
logging.info("Great success emptying the in-memory cache.")
示例14: download_kmap_icons
# 需要导入模块: from settings import LOG [as 别名]
# 或者: from settings.LOG import debug [as 别名]
def download_kmap_icons(knowledge_map):
for key, value in knowledge_map["topics"].items():
# Note: id here is retrieved from knowledge_map, so we're OK
# that we blew away ID in the topic tree earlier.
if "icon_url" not in value:
logging.debug("No icon URL for %s" % key)
value["icon_url"] = iconfilepath + value["id"] + iconextension
knowledge_map["topics"][key] = value
out_path = data_path + "../" + value["icon_url"]
if os.path.exists(out_path) and not force_icons:
continue
icon_khan_url = "http://www.khanacademy.org" + value["icon_url"]
sys.stdout.write("Downloading icon %s from %s..." % (value["id"], icon_khan_url))
sys.stdout.flush()
try:
icon = requests.get(icon_khan_url)
except Exception as e:
sys.stdout.write("\n") # complete the "downloading" output
sys.stderr.write("Failed to download %-80s: %s\n" % (icon_khan_url, e))
continue
if icon.status_code == 200:
iconfile = file(data_path + "../" + value["icon_url"], "w")
iconfile.write(icon.content)
else:
sys.stdout.write(" [NOT FOUND]")
value["icon_url"] = iconfilepath + defaulticon + iconextension
sys.stdout.write(" done.\n") # complete the "downloading" output
示例15: update_json
# 需要导入模块: from settings import LOG [as 别名]
# 或者: from settings.LOG import debug [as 别名]
def update_json(youtube_id, lang_code, downloaded, api_response, time_of_attempt):
"""Update language_srt_map to reflect download status
lang_code in IETF format
"""
# Open JSON file
filepath = get_lang_map_filepath(lang_code)
try:
with open(filepath, "r") as fp:
language_srt_map = json.load(fp)
except Exception as e:
logging.error("Something went wrong while trying to open the json file (%s): %s" % (filepath, e))
return False
# create updated entry
entry = language_srt_map[youtube_id]
entry["downloaded"] = downloaded
entry["api_response"] = api_response
entry["last_attempt"] = time_of_attempt
if api_response == "success":
entry["last_success"] = time_of_attempt
# update full-size JSON with new information
language_srt_map[youtube_id].update(entry)
# write it to file
json_file = open(filepath, "wb")
json_file.write(json.dumps(language_srt_map))
json_file.close()
logging.debug("File updated.")
return True