本文整理汇总了C++中row_log函数的典型用法代码示例。如果您正苦于以下问题:C++ row_log函数的具体用法?C++ row_log怎么用?C++ row_log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了row_log函数的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: row_urgent_pending
static bool row_urgent_pending(struct request_queue *q)
{
struct row_data *rd = q->elevator->elevator_data;
if (rd->urgent_in_flight) {
row_log(rd->dispatch_queue, "%d urgent requests in flight",
rd->urgent_in_flight);
return false;
}
if (rd->pending_urgent_rq) {
row_log(rd->dispatch_queue, "Urgent request pending");
return true;
}
row_log(rd->dispatch_queue, "no urgent request pending/in flight");
return false;
}
示例2: row_restart_cycle
static void row_restart_cycle(struct row_data *rd,
int start_idx, int end_idx)
{
int i;
row_dump_queues_stat(rd);
for (i = start_idx; i < end_idx; i++) {
if (rd->row_queues[i].nr_dispatched <
rd->row_queues[i].disp_quantum)
row_mark_rowq_unserved(rd, i);
rd->row_queues[i].nr_dispatched = 0;
}
row_log(rd->dispatch_queue, "Restarting cycle for class @ %d-%d",
start_idx, end_idx);
}
示例3: row_dispatch_requests
/*
* row_dispatch_requests() - selects the next request to dispatch
* @q: requests queue
* @force: flag indicating if forced dispatch
*
* Return 0 if no requests were moved to the dispatch queue.
* 1 otherwise
*
*/
static int row_dispatch_requests(struct request_queue *q, int force)
{
struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
int ret = 0, currq, ioprio_class_to_serve, start_idx, end_idx;
if (force && hrtimer_active(&rd->rd_idle_data.hr_timer)) {
(void)hrtimer_cancel(&rd->rd_idle_data.hr_timer);
row_log_rowq(rd, rd->rd_idle_data.idling_queue_idx,
"Canceled delayed work on %d - forced dispatch",
rd->rd_idle_data.idling_queue_idx);
rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
}
ioprio_class_to_serve = row_get_ioprio_class_to_serve(rd, force);
row_log(rd->dispatch_queue, "Dispatching from %d priority class",
ioprio_class_to_serve);
switch (ioprio_class_to_serve) {
case IOPRIO_CLASS_NONE:
goto done;
case IOPRIO_CLASS_RT:
start_idx = ROWQ_HIGH_PRIO_IDX;
end_idx = ROWQ_REG_PRIO_IDX;
break;
case IOPRIO_CLASS_BE:
start_idx = ROWQ_REG_PRIO_IDX;
end_idx = ROWQ_LOW_PRIO_IDX;
break;
case IOPRIO_CLASS_IDLE:
start_idx = ROWQ_LOW_PRIO_IDX;
end_idx = ROWQ_MAX_PRIO;
break;
default:
pr_err("%s(): Invalid I/O priority class", __func__);
goto done;
}
currq = row_get_next_queue(q, rd, start_idx, end_idx);
/* Dispatch */
if (currq >= 0) {
row_dispatch_insert(rd, currq);
ret = 1;
}
done:
return ret;
}
示例4: row_completed_req
static void row_completed_req(struct request_queue *q, struct request *rq)
{
struct row_data *rd = q->elevator->elevator_data;
if (rq->cmd_flags & REQ_URGENT) {
if (!rd->urgent_in_flight) {
WARN_ON(1);
pr_err("%s(): URGENT req but urgent_in_flight = F",
__func__);
}
rd->urgent_in_flight = false;
rq->cmd_flags &= ~REQ_URGENT;
}
row_log(q, "completed %s %s req.",
(rq->cmd_flags & REQ_URGENT ? "URGENT" : "regular"),
(rq_data_dir(rq) == READ ? "READ" : "WRITE"));
}
示例5: row_idle_hrtimer_fn
static enum hrtimer_restart row_idle_hrtimer_fn(struct hrtimer *hr_timer)
{
struct idling_data *read_data =
container_of(hr_timer, struct idling_data, hr_timer);
struct row_data *rd =
container_of(read_data, struct row_data, rd_idle_data);
row_log_rowq(rd, rd->rd_idle_data.idling_queue_idx,
"Performing delayed work");
/* Mark idling process as done */
rd->row_queues[rd->rd_idle_data.idling_queue_idx].
idle_data.begin_idling = false;
rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
if (!rd->nr_reqs[READ] && !rd->nr_reqs[WRITE])
row_log(rd->dispatch_queue, "No requests in scheduler");
else
kblockd_schedule_work(&read_data->idle_work);
return HRTIMER_NORESTART;
}
示例6: row_get_ioprio_class_to_serve
/*
* row_get_ioprio_class_to_serve() - Return the next I/O priority
* class to dispatch requests from
* @rd: pointer to struct row_data
* @force: flag indicating if forced dispatch
*
* This function returns the next I/O priority class to serve
* {IOPRIO_CLASS_NONE, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE}.
* If there are no more requests in scheduler or if we're idling on some queue
* IOPRIO_CLASS_NONE will be returned.
* If idling is scheduled on a lower priority queue than the one that needs
* to be served, it will be canceled.
*
*/
static int row_get_ioprio_class_to_serve(struct row_data *rd, int force)
{
int i;
int ret = IOPRIO_CLASS_NONE;
if (!rd->nr_reqs[READ] && !rd->nr_reqs[WRITE]) {
row_log(rd->dispatch_queue, "No more requests in scheduler");
goto check_idling;
}
/* First, go over the high priority queues */
for (i = 0; i < ROWQ_REG_PRIO_IDX; i++) {
if (!list_empty(&rd->row_queues[i].fifo)) {
if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
if (hrtimer_try_to_cancel(
&rd->rd_idle_data.hr_timer) >= 0) {
row_log(rd->dispatch_queue,
"Canceling delayed work on %d. RT pending",
rd->rd_idle_data.idling_queue_idx);
rd->rd_idle_data.idling_queue_idx =
ROWQ_MAX_PRIO;
}
}
if (row_regular_req_pending(rd) &&
(rd->reg_prio_starvation.starvation_counter >=
rd->reg_prio_starvation.starvation_limit))
ret = IOPRIO_CLASS_BE;
else if (row_low_req_pending(rd) &&
(rd->low_prio_starvation.starvation_counter >=
rd->low_prio_starvation.starvation_limit))
ret = IOPRIO_CLASS_IDLE;
else
ret = IOPRIO_CLASS_RT;
goto done;
}
}
/*
* At the moment idling is implemented only for READ queues.
* If enabled on WRITE, this needs updating
*/
if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
row_log(rd->dispatch_queue, "Delayed work pending. Exiting");
goto done;
}
check_idling:
/* Check for (high priority) idling and enable if needed */
for (i = 0; i < ROWQ_REG_PRIO_IDX && !force; i++) {
if (rd->row_queues[i].idle_data.begin_idling &&
row_queues_def[i].idling_enabled)
goto initiate_idling;
}
/* Regular priority queues */
for (i = ROWQ_REG_PRIO_IDX; i < ROWQ_LOW_PRIO_IDX; i++) {
if (list_empty(&rd->row_queues[i].fifo)) {
/* We can idle only if this is not a forced dispatch */
if (rd->row_queues[i].idle_data.begin_idling &&
!force && row_queues_def[i].idling_enabled)
goto initiate_idling;
} else {
if (row_low_req_pending(rd) &&
(rd->low_prio_starvation.starvation_counter >=
rd->low_prio_starvation.starvation_limit))
ret = IOPRIO_CLASS_IDLE;
else
ret = IOPRIO_CLASS_BE;
goto done;
}
}
if (rd->nr_reqs[READ] || rd->nr_reqs[WRITE])
ret = IOPRIO_CLASS_IDLE;
goto done;
initiate_idling:
hrtimer_start(&rd->rd_idle_data.hr_timer,
ktime_set(0, rd->rd_idle_data.idle_time_ms * NSEC_PER_MSEC),
HRTIMER_MODE_REL);
rd->rd_idle_data.idling_queue_idx = i;
row_log_rowq(rd, i, "Scheduled delayed work on %d. exiting", i);
done:
//.........这里部分代码省略.........
示例7: row_dispatch_requests
/*
* row_dispatch_requests() - selects the next request to dispatch
* @q: requests queue
* @force: flag indicating if forced dispatch
*
* Return 0 if no requests were moved to the dispatch queue.
* 1 otherwise
*
*/
static int row_dispatch_requests(struct request_queue *q, int force)
{
struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
int ret = 0, currq, ioprio_class_to_serve, start_idx, end_idx;
int expire_index = -1;
if (force && hrtimer_active(&rd->rd_idle_data.hr_timer)) {
if (hrtimer_try_to_cancel(&rd->rd_idle_data.hr_timer) >= 0) {
row_log(rd->dispatch_queue,
"Canceled delayed work on %d - forced dispatch",
rd->rd_idle_data.idling_queue_idx);
rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
}
}
if (rd->pending_urgent_rq) {
row_log(rd->dispatch_queue, "dispatching urgent request");
row_dispatch_insert(rd, rd->pending_urgent_rq);
ret = 1;
goto done;
}
ioprio_class_to_serve = row_get_ioprio_class_to_serve(rd, force);
row_log(rd->dispatch_queue, "Dispatching from %d priority class",
ioprio_class_to_serve);
if (ioprio_class_to_serve == IOPRIO_CLASS_RT) {
expire_index = row_be_expire_adjust(rd);
if (expire_index >= ROWQ_REG_PRIO_IDX)
ioprio_class_to_serve = IOPRIO_CLASS_BE;
}
switch (ioprio_class_to_serve) {
case IOPRIO_CLASS_NONE:
rd->last_served_ioprio_class = IOPRIO_CLASS_NONE;
goto done;
case IOPRIO_CLASS_RT:
if (expire_index >= 0) {
start_idx = expire_index;
end_idx = expire_index + 1;
expire_index = -1;
} else {
start_idx = ROWQ_HIGH_PRIO_IDX;
end_idx = ROWQ_REG_PRIO_IDX;
}
break;
case IOPRIO_CLASS_BE:
if (expire_index > 0) {
start_idx = expire_index;
end_idx = expire_index + 1;
expire_index = -1;
} else {
start_idx = ROWQ_REG_PRIO_IDX;
end_idx = ROWQ_LOW_PRIO_IDX;
}
break;
case IOPRIO_CLASS_IDLE:
start_idx = ROWQ_LOW_PRIO_IDX;
end_idx = ROWQ_MAX_PRIO;
break;
default:
pr_err("%s(): Invalid I/O priority class", __func__);
goto done;
}
currq = row_get_next_queue(q, rd, start_idx, end_idx);
/* Dispatch */
if (currq >= 0) {
row_dispatch_insert(rd,
rq_entry_fifo(rd->row_queues[currq].fifo.next));
ret = 1;
}
done:
return ret;
}
示例8: row_get_ioprio_class_to_serve
static int row_get_ioprio_class_to_serve(struct row_data *rd, int force)
{
int i;
int ret = IOPRIO_CLASS_NONE;
if (!rd->nr_reqs[READ] && !rd->nr_reqs[WRITE]) {
row_log(rd->dispatch_queue, "No more requests in scheduler");
goto check_idling;
}
for (i = 0; i < ROWQ_REG_PRIO_IDX; i++) {
if (!list_empty(&rd->row_queues[i].fifo)) {
if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
if (hrtimer_try_to_cancel(
&rd->rd_idle_data.hr_timer) >= 0) {
row_log(rd->dispatch_queue,
"Canceling delayed work on %d. RT pending",
rd->rd_idle_data.idling_queue_idx);
rd->rd_idle_data.idling_queue_idx =
ROWQ_MAX_PRIO;
}
}
if (row_regular_req_pending(rd) &&
(rd->reg_prio_starvation.starvation_counter >=
rd->reg_prio_starvation.starvation_limit))
ret = IOPRIO_CLASS_BE;
else if (row_low_req_pending(rd) &&
(rd->low_prio_starvation.starvation_counter >=
rd->low_prio_starvation.starvation_limit))
ret = IOPRIO_CLASS_IDLE;
else
ret = IOPRIO_CLASS_RT;
goto done;
}
}
if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
row_log(rd->dispatch_queue, "Delayed work pending. Exiting");
goto done;
}
check_idling:
for (i = 0; i < ROWQ_REG_PRIO_IDX && !force; i++) {
if (rd->row_queues[i].idle_data.begin_idling &&
row_queues_def[i].idling_enabled)
goto initiate_idling;
}
for (i = ROWQ_REG_PRIO_IDX; i < ROWQ_LOW_PRIO_IDX; i++) {
if (list_empty(&rd->row_queues[i].fifo)) {
if (rd->row_queues[i].idle_data.begin_idling &&
!force && row_queues_def[i].idling_enabled)
goto initiate_idling;
} else {
if (row_low_req_pending(rd) &&
(rd->low_prio_starvation.starvation_counter >=
rd->low_prio_starvation.starvation_limit))
ret = IOPRIO_CLASS_IDLE;
else
ret = IOPRIO_CLASS_BE;
goto done;
}
}
if (rd->nr_reqs[READ] || rd->nr_reqs[WRITE])
ret = IOPRIO_CLASS_IDLE;
goto done;
initiate_idling:
hrtimer_start(&rd->rd_idle_data.hr_timer,
ktime_set(0, rd->rd_idle_data.idle_time_ms * NSEC_PER_MSEC),
HRTIMER_MODE_REL);
rd->rd_idle_data.idling_queue_idx = i;
row_log_rowq(rd, i, "Scheduled delayed work on %d. exiting", i);
done:
return ret;
}