cfq: pass around cfq_io_cq instead of io_context

Now that io_cq is managed by block core and guaranteed to exist for
any in-flight request, it is easier and carries more information to
pass around cfq_io_cq than io_context.

This patch updates cfq_init_prio_data(), cfq_find_alloc_queue() and
cfq_get_queue() to take @cic instead of @ioc.  This change removes a
duplicate cfq_cic_lookup() from cfq_find_alloc_queue().

This change enables the use of cic-cached ioprio in the next patch.

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Tejun Heo 2012-03-19 15:10:57 -07:00 committed by Jens Axboe
parent 9a9e8a26da
commit abede6da27

View file

@ -468,7 +468,7 @@ static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
static void cfq_dispatch_insert(struct request_queue *, struct request *); static void cfq_dispatch_insert(struct request_queue *, struct request *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync, static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
struct io_context *ioc, struct bio *bio, struct cfq_io_cq *cic, struct bio *bio,
gfp_t gfp_mask); gfp_t gfp_mask);
static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq) static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
@ -2560,7 +2560,7 @@ static void cfq_exit_icq(struct io_cq *icq)
} }
} }
static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
int ioprio_class; int ioprio_class;
@ -2568,7 +2568,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
if (!cfq_cfqq_prio_changed(cfqq)) if (!cfq_cfqq_prio_changed(cfqq))
return; return;
ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); ioprio_class = IOPRIO_PRIO_CLASS(cic->icq.ioc->ioprio);
switch (ioprio_class) { switch (ioprio_class) {
default: default:
printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
@ -2580,11 +2580,11 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
cfqq->ioprio_class = task_nice_ioclass(tsk); cfqq->ioprio_class = task_nice_ioclass(tsk);
break; break;
case IOPRIO_CLASS_RT: case IOPRIO_CLASS_RT:
cfqq->ioprio = task_ioprio(ioc); cfqq->ioprio = task_ioprio(cic->icq.ioc);
cfqq->ioprio_class = IOPRIO_CLASS_RT; cfqq->ioprio_class = IOPRIO_CLASS_RT;
break; break;
case IOPRIO_CLASS_BE: case IOPRIO_CLASS_BE:
cfqq->ioprio = task_ioprio(ioc); cfqq->ioprio = task_ioprio(cic->icq.ioc);
cfqq->ioprio_class = IOPRIO_CLASS_BE; cfqq->ioprio_class = IOPRIO_CLASS_BE;
break; break;
case IOPRIO_CLASS_IDLE: case IOPRIO_CLASS_IDLE:
@ -2613,8 +2613,8 @@ static void changed_ioprio(struct cfq_io_cq *cic, struct bio *bio)
cfqq = cic->cfqq[BLK_RW_ASYNC]; cfqq = cic->cfqq[BLK_RW_ASYNC];
if (cfqq) { if (cfqq) {
struct cfq_queue *new_cfqq; struct cfq_queue *new_cfqq;
new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc, new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
bio, GFP_ATOMIC); GFP_ATOMIC);
if (new_cfqq) { if (new_cfqq) {
cic->cfqq[BLK_RW_ASYNC] = new_cfqq; cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
cfq_put_queue(cfqq); cfq_put_queue(cfqq);
@ -2671,23 +2671,18 @@ static void changed_cgroup(struct cfq_io_cq *cic)
#endif /* CONFIG_CFQ_GROUP_IOSCHED */ #endif /* CONFIG_CFQ_GROUP_IOSCHED */
static struct cfq_queue * static struct cfq_queue *
cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
struct io_context *ioc, struct bio *bio, gfp_t gfp_mask) struct bio *bio, gfp_t gfp_mask)
{ {
struct blkio_cgroup *blkcg; struct blkio_cgroup *blkcg;
struct cfq_queue *cfqq, *new_cfqq = NULL; struct cfq_queue *cfqq, *new_cfqq = NULL;
struct cfq_io_cq *cic;
struct cfq_group *cfqg; struct cfq_group *cfqg;
retry: retry:
rcu_read_lock(); rcu_read_lock();
blkcg = bio_blkio_cgroup(bio); blkcg = bio_blkio_cgroup(bio);
cfqg = cfq_lookup_create_cfqg(cfqd, blkcg); cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
cic = cfq_cic_lookup(cfqd, ioc);
/* cic always exists here */
cfqq = cic_to_cfqq(cic, is_sync); cfqq = cic_to_cfqq(cic, is_sync);
/* /*
@ -2716,7 +2711,7 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
if (cfqq) { if (cfqq) {
cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
cfq_init_prio_data(cfqq, ioc); cfq_init_prio_data(cfqq, cic);
cfq_link_cfqq_cfqg(cfqq, cfqg); cfq_link_cfqq_cfqg(cfqq, cfqg);
cfq_log_cfqq(cfqd, cfqq, "alloced"); cfq_log_cfqq(cfqd, cfqq, "alloced");
} else } else
@ -2746,11 +2741,11 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
} }
static struct cfq_queue * static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
struct bio *bio, gfp_t gfp_mask) struct bio *bio, gfp_t gfp_mask)
{ {
const int ioprio = task_ioprio(ioc); const int ioprio = task_ioprio(cic->icq.ioc);
const int ioprio_class = task_ioprio_class(ioc); const int ioprio_class = task_ioprio_class(cic->icq.ioc);
struct cfq_queue **async_cfqq = NULL; struct cfq_queue **async_cfqq = NULL;
struct cfq_queue *cfqq = NULL; struct cfq_queue *cfqq = NULL;
@ -2760,7 +2755,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
} }
if (!cfqq) if (!cfqq)
cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, bio, gfp_mask); cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
/* /*
* pin the queue now that it's allocated, scheduler exit will prune it * pin the queue now that it's allocated, scheduler exit will prune it
@ -3030,7 +3025,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
struct cfq_queue *cfqq = RQ_CFQQ(rq); struct cfq_queue *cfqq = RQ_CFQQ(rq);
cfq_log_cfqq(cfqd, cfqq, "insert_request"); cfq_log_cfqq(cfqd, cfqq, "insert_request");
cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc); cfq_init_prio_data(cfqq, RQ_CIC(rq));
rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
list_add_tail(&rq->queuelist, &cfqq->fifo); list_add_tail(&rq->queuelist, &cfqq->fifo);
@ -3234,7 +3229,7 @@ static int cfq_may_queue(struct request_queue *q, int rw)
cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
if (cfqq) { if (cfqq) {
cfq_init_prio_data(cfqq, cic->icq.ioc); cfq_init_prio_data(cfqq, cic);
return __cfq_may_queue(cfqq); return __cfq_may_queue(cfqq);
} }
@ -3326,7 +3321,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
new_queue: new_queue:
cfqq = cic_to_cfqq(cic, is_sync); cfqq = cic_to_cfqq(cic, is_sync);
if (!cfqq || cfqq == &cfqd->oom_cfqq) { if (!cfqq || cfqq == &cfqd->oom_cfqq) {
cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, bio, gfp_mask); cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
cic_set_cfqq(cic, cfqq, is_sync); cic_set_cfqq(cic, cfqq, is_sync);
} else { } else {
/* /*