当前位置: 首页>>代码示例>>Python>>正文


Python UserLog.is_enabled方法代码示例

本文整理汇总了Python中kalite.main.models.UserLog.is_enabled方法的典型用法代码示例。如果您正苦于以下问题:Python UserLog.is_enabled方法的具体用法?Python UserLog.is_enabled怎么用?Python UserLog.is_enabled使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在kalite.main.models.UserLog的用法示例。


在下文中一共展示了UserLog.is_enabled方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: query_logs

# 需要导入模块: from kalite.main.models import UserLog [as 别名]
# 或者: from kalite.main.models.UserLog import is_enabled [as 别名]
def query_logs(users, items, logtype, logdict):
    """
    Get a specified subset of logs for a particular set of users for either exercises or videos.
    users: list of users to query against.
    items: list of either exercises of videos to query.
    logtype: video or exercise - in future this could be expanded to query activity logs too.
    logdict: user keyed dictionary of logs (presumed to be empty by this code)
    """

    if logtype == "exercise":
        all_logs = ExerciseLog.objects.filter(user__in=users, exercise_id__in=items).values(
                        'user', 'complete', 'exercise_id', 'attempts', 'points', 'struggling', 'completion_timestamp', 'streak_progress').order_by('completion_timestamp')
    elif logtype == "video":
        all_logs = VideoLog.objects.filter(user__in=users, video_id__in=items).values(
            'user', 'complete', 'video_id', 'total_seconds_watched', 'completion_timestamp', 'points').order_by('completion_timestamp')
    elif logtype == "activity" and UserLog.is_enabled():
        all_logs = UserLog.objects.filter(user__in=users).values(
            'user', 'last_active_datetime', 'total_seconds').order_by('last_active_datetime')
    elif logtype == "summaryactivity" and UserLog.is_enabled():
        all_logs = UserLogSummary.objects.filter(user__in=users).values(
            'user', 'device', 'total_seconds').order_by('end_datetime')
    else:
        assert False, "Unknown log type: '%s'" % logtype  # indicates a programming error

    for log in all_logs:
        logdict[log['user']].append(log)
    return logdict
开发者ID:julianharty,项目名称:ka-lite-central,代码行数:29,代码来源:api_views.py

示例2: test_query_login_teacher

# 需要导入模块: from kalite.main.models import UserLog [as 别名]
# 或者: from kalite.main.models.UserLog import is_enabled [as 别名]
    def test_query_login_teacher(self):
        """Check the # of queries when logging in as a teacher."""
        teacher = FacilityUser(is_teacher=True, username="t1", facility=self.facility)
        passwd = self._gen_valid_password()
        teacher.set_password(passwd)
        teacher.save()

        with self.assertNumQueries(FuzzyInt(25, 43) + 3 * UserLog.is_enabled()):
            self.browser_login_teacher("t1", passwd, self.facility)
开发者ID:JGOODYEARUCSD,项目名称:ka-lite,代码行数:11,代码来源:query_tests.py

示例3: test_query_login_student

# 需要导入模块: from kalite.main.models import UserLog [as 别名]
# 或者: from kalite.main.models.UserLog import is_enabled [as 别名]
    def test_query_login_student(self):
        """Check the # of queries when logging in as a student."""
        student = FacilityUser(is_teacher=False, username="s1", facility=self.facility)
        passwd = self._gen_valid_password()
        student.set_password(passwd)
        student.save()

        expected_num_queries = 30 + 3*UserLog.is_enabled()
        with self.assertNumQueries(FuzzyInt(expected_num_queries - 3, expected_num_queries + 5)):
            self.browser_login_student("s1", passwd, self.facility)
开发者ID:JGOODYEARUCSD,项目名称:ka-lite,代码行数:12,代码来源:query_tests.py

示例4: generate_fake_exercise_logs

# 需要导入模块: from kalite.main.models import UserLog [as 别名]
# 或者: from kalite.main.models.UserLog import is_enabled [as 别名]

#.........这里部分代码省略.........

    # Actually generate!
    else:
        # Get (or create) user type
        try:
            user_settings = json.loads(facility_user.notes)
        except:
            user_settings = sample_user_settings()
            facility_user.notes = json.dumps(user_settings)
            facility_user.save()
        date_diff_started = datetime.timedelta(seconds=datediff(date_diff, units="seconds") * user_settings["time_in_program"])  # when this user started in the program, relative to NOW

        for topic in topics:
            # Get all exercises related to the topic
            exercises = get_topic_exercises(topic_id=topic)

            # Problem:
            #   Not realistic for students to have lots of unfinished exercises.
            #   If they start them, they tend to get stuck, right?
            #
            # So, need to make it more probable that they will finish an exercise,
            #   and less probable that they start one.
            #
            # What we need is P(streak|started), not P(streak)

            # Probability of doing any particular exercise
            p_exercise = probability_of(qty="exercise", user_settings=user_settings)
            logging.debug("# exercises: %d; p(exercise)=%4.3f, user settings: %s\n" % (len(exercises), p_exercise, json.dumps(user_settings)))

            # of exercises is related to
            for j, exercise in enumerate(exercises):
                if random.random() > p_exercise:
                    continue

                # Probability of completing this exercise, and .. proportion of attempts
                p_completed = probability_of(qty="completed", user_settings=user_settings)
                p_attempts = probability_of(qty="attempts", user_settings=user_settings)

                attempts = int(random.random() * p_attempts * 30 + 10)  # always enough to have completed
                completed = (random.random() < p_completed)
                if completed:
                    streak_progress = 100
                else:
                    streak_progress = max(0, min(90, random.gauss(100 * user_settings["speed_of_learning"], 20)))
                    streak_progress = int(floor(streak_progress / 10.)) * 10
                points = streak_progress / 10 * 12 if completed else 0  # only get points when you master.

                # Choose a rate of exercises, based on their effort level and speed of learning.
                #   Compute the latest possible start time.
                #   Then sample a start time between their start time
                #   and the latest possible start_time
                rate_of_exercises = 0.66 * user_settings["effort_level"] + 0.33 * user_settings["speed_of_learning"]  # exercises per day
                time_for_attempts = min(datetime.timedelta(days=rate_of_exercises * attempts), date_diff_started)  # protect with min
                time_delta_completed = datetime.timedelta(seconds=random.randint(int(datediff(time_for_attempts, units="seconds")), int(datediff(date_diff_started, units="seconds"))))
                date_completed = datetime.datetime.now() - time_delta_completed

                # Always create new
                logging.info("Creating exercise log: %-12s: %-25s (%d points, %d attempts, %d%% streak on %s)" % (
                    facility_user.first_name,
                    exercise["name"],
                    points,
                    attempts,
                    streak_progress,
                    date_completed,
                ))
                try:
                    elog = ExerciseLog.objects.get(user=facility_user, exercise_id=exercise["name"])
                except ExerciseLog.DoesNotExist:
                    elog = ExerciseLog(
                        user=facility_user,
                        exercise_id=exercise["name"],
                        attempts=int(attempts),
                        streak_progress=streak_progress,
                        points=int(points),
                        complete=completed,
                        completion_timestamp=date_completed,
                    )
                    try:
                        elog.save()

                        # For now, make all attempts on an exercise into a single UserLog.
                        seconds_per_attempt = 10 * (1 + user_settings["speed_of_learning"] * random.random())
                        time_to_navigate = 15 * (0.5 + random.random())  #between 7.5s and 22.5s
                        time_to_logout = 5 * (0.5 + random.random()) # between 2.5 and 7.5s
                        if UserLog.is_enabled():
                            ulog = UserLog(
                                user=facility_user,
                                activity_type=1,
                                start_datetime = date_completed - datetime.timedelta(seconds=int(attempts * seconds_per_attempt + time_to_navigate)),
                                end_datetime = date_completed + datetime.timedelta(seconds=time_to_logout),
                                last_active_datetime = date_completed,
                            )
                            ulog.save()
                            user_logs.append(ulog)
                    except Exception as e:
                        logging.error("Error saving exercise log: %s" % e)
                        continue
                exercise_logs.append(elog)

    return (exercise_logs, user_logs)
开发者ID:2flcastro,项目名称:ka-lite,代码行数:104,代码来源:generaterealdata.py

示例5: compute_data

# 需要导入模块: from kalite.main.models import UserLog [as 别名]
# 或者: from kalite.main.models.UserLog import is_enabled [as 别名]
def compute_data(data_types, who, where):
    """
    Compute the data in "data_types" for each user in "who", for the topics selected by "where"

    who: list of users
    where: topic_path
    data_types can include:
        pct_mastery
        effort
        attempts
    """

    # None indicates that the data hasn't been queried yet.
    #   We'll query it on demand, for efficiency
    topics = None
    exercises = None
    videos = None

    # Initialize an empty dictionary of data, video logs, exercise logs, for each user
    data = OrderedDict(zip([w.id for w in who], [dict() for i in range(len(who))]))  # maintain the order of the users
    vid_logs = dict(zip([w.id for w in who], [[] for i in range(len(who))]))
    ex_logs = dict(zip([w.id for w in who], [[] for i in range(len(who))]))
    if UserLog.is_enabled():
        activity_logs = dict(zip([w.id for w in who], [[] for i in range(len(who))]))

    # Set up queries (but don't run them), so we have really easy aliases.
    #   Only do them if they haven't been done yet (tell this by passing in a value to the lambda function)
    # Topics: topics.
    # Exercises: names (ids for ExerciseLog objects)
    # Videos: video_id (ids for VideoLog objects)

    # This lambda partial creates a function to return all items with a particular path from the NODE_CACHE.
    search_fun_single_path = partial(lambda t, p: t["path"].startswith(p), p=tuple(where))
    # This lambda partial creates a function to return all items with paths matching a list of paths from NODE_CACHE.
    search_fun_multi_path = partial(lambda ts, p: any([t["path"].startswith(p) for t in ts]),  p=tuple(where))
    # Functions that use the functions defined above to return topics, exercises, and videos based on paths.
    query_topics = partial(lambda t, sf: t if t is not None else [t[0]["id"] for t in filter(sf, get_node_cache('Topic').values())], sf=search_fun_single_path)
    query_exercises = partial(lambda e, sf: e if e is not None else [ex[0]["id"] for ex in filter(sf, get_node_cache('Exercise').values())], sf=search_fun_multi_path)
    query_videos = partial(lambda v, sf: v if v is not None else [vid[0]["id"] for vid in filter(sf, get_node_cache('Video').values())], sf=search_fun_multi_path)

    # No users, don't bother.
    if len(who) > 0:

        # Query out all exercises, videos, exercise logs, and video logs before looping to limit requests.
        # This means we could pull data for n-dimensional coach report displays with the same number of requests!
        # Note: User activity is polled inside the loop, to prevent possible slowdown for exercise and video reports.
        exercises = query_exercises(exercises)

        videos = query_videos(videos)

        if exercises:
            ex_logs = query_logs(data.keys(), exercises, "exercise", ex_logs)

        if videos:
            vid_logs = query_logs(data.keys(), videos, "video", vid_logs)

        for data_type in (data_types if not hasattr(data_types, "lower") else [data_types]):  # convert list from string, if necessary
            if data_type in data[data.keys()[0]]:  # if the first user has it, then all do; no need to calc again.
                continue

            #
            # These are summary stats: you only get one per user
            #
            if data_type == "pct_mastery":

                # Efficient query out, spread out to dict
                for user in data.keys():
                    data[user][data_type] = 0 if not ex_logs[user] else 100. * sum([el['complete'] for el in ex_logs[user]]) / float(len(exercises))

            elif data_type == "effort":
                if "ex:attempts" in data[data.keys()[0]] and "vid:total_seconds_watched" in data[data.keys()[0]]:
                    # exercises and videos would be initialized already
                    for user in data.keys():
                        avg_attempts = 0 if len(exercises) == 0 else sum(data[user]["ex:attempts"].values()) / float(len(exercises))
                        avg_video_points = 0 if len(videos) == 0 else sum(data[user]["vid:total_seconds_watched"].values()) / float(len(videos))
                        data[user][data_type] = 100. * (0.5 * avg_attempts / 10. + 0.5 * avg_video_points / 750.)
                else:
                    data_types += ["ex:attempts", "vid:total_seconds_watched", "effort"]

            #
            # These are detail stats: you get many per user
            #
            # Just querying out data directly: Video
            elif data_type.startswith("vid:") and data_type[4:] in [f.name for f in VideoLog._meta.fields]:

                for user in data.keys():
                    data[user][data_type] = OrderedDict([(v['video_id'], v[data_type[4:]]) for v in vid_logs[user]])

            # Just querying out data directly: Exercise
            elif data_type.startswith("ex:") and data_type[3:] in [f.name for f in ExerciseLog._meta.fields]:

                for user in data.keys():
                    data[user][data_type] = OrderedDict([(el['exercise_id'], el[data_type[3:]]) for el in ex_logs[user]])

            # User Log Queries
            elif data_type.startswith("user:") and data_type[5:] in [f.name for f in UserLog._meta.fields] and UserLog.is_enabled():

                activity_logs = query_logs(data.keys(), "", "activity", activity_logs)

                for user in data.keys():
#.........这里部分代码省略.........
开发者ID:julianharty,项目名称:ka-lite-central,代码行数:103,代码来源:api_views.py

示例6: _

# 需要导入模块: from kalite.main.models import UserLog [as 别名]
# 或者: from kalite.main.models.UserLog import is_enabled [as 别名]
    {"key": "ex:attempts",        "name": _("Attempts"),   "type": "number", "description": _("Number of times submitting an answer to an exercise.")},
    {"key": "ex:streak_progress", "name": _("Streak"),     "type": "number", "description": _("Maximum number of consecutive correct answers on an exercise.")},
    {"key": "ex:points",          "name": _("Exercise points"),    "type": "number", "description": _("[Pointless at the moment; tracks mastery linearly]")},
    { "key": "ex:completion_timestamp", "name": _("Time exercise completed"),"type": "datetime", "description": _("Day/time the exercise was completed.") },
    {"key": "vid:points",          "name": _("Video points"),      "type": "number", "description": _("Points earned while watching a video (750 max / video).")},
    { "key": "vid:total_seconds_watched","name": _("Video time"),   "type": "number", "description": _("Total seconds spent watching a video.") },
    { "key": "vid:completion_timestamp", "name": _("Time video completed"),"type": "datetime", "description": _("Day/time the video was completed.") },
]

user_log_stats_dict = [
    { "key": "usersum:total_seconds", "name": _("Time Active (s)"), "type": "number", "description": _("Total time spent actively logged in.")},
    { "key": "user:total_seconds", "name": _("Active Time Per Login"), "type": "number", "description": _("Duration of each login session."), "noscatter": True, "timeline": True},
    { "key": "user:last_active_datetime", "name": _("Time Session Completed"),"type": "datetime", "description": _("Day/time the login session finished.")},
]

if UserLog.is_enabled():
    stats_dict.extend(user_log_stats_dict)

def get_data_form(request, *args, **kwargs):
    """Get the basic data form, by combining information from
    keyword arguments and the request.REQUEST object.
    Along the way, check permissions to make sure whatever's being requested is OK.

    Request objects get priority over keyword args.
    """
    assert not args, "all non-request args should be keyword args"

    # Pull the form parameters out of the request or
    data = dict()
    # Default to empty string, as it makes template handling cleaner later.
    for field in ["facility", "group", "user", "xaxis", "yaxis"]:
开发者ID:julianharty,项目名称:ka-lite-central,代码行数:33,代码来源:api_views.py

示例7: test_query_logout_student

# 需要导入模块: from kalite.main.models import UserLog [as 别名]
# 或者: from kalite.main.models.UserLog import is_enabled [as 别名]
 def test_query_logout_student(self):
     """"""
     self.test_query_login_student()
     with self.assertNumQueries(FuzzyInt(2, 11) + 11*UserLog.is_enabled()):
         self.browser_logout_user()
开发者ID:JGOODYEARUCSD,项目名称:ka-lite,代码行数:7,代码来源:query_tests.py

示例8: test_query_logout_admin

# 需要导入模块: from kalite.main.models import UserLog [as 别名]
# 或者: from kalite.main.models.UserLog import is_enabled [as 别名]
 def test_query_logout_admin(self):
     """"""
     self.test_query_login_admin()
     with self.assertNumQueries(FuzzyInt(6, 7) + 0*UserLog.is_enabled()):
         self.browser_logout_user()
开发者ID:JGOODYEARUCSD,项目名称:ka-lite,代码行数:7,代码来源:query_tests.py


注:本文中的kalite.main.models.UserLog.is_enabled方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。