block: interface update for ioc/icq creation functions

Make the following interface updates to prepare for future ioc related
changes.

* create_io_context() returning ioc only works for %current because it
  doesn't increment ref on the ioc.  Drop @task parameter from it and
  always assume %current.

* Make create_io_context_slowpath() return 0 or -errno and rename it
  to create_task_io_context().

* Make ioc_create_icq() take @ioc as parameter instead of assuming
  that of %current.  The caller, get_request(), is updated to create
  ioc explicitly and then pass it into ioc_create_icq().

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Tejun Heo 2012-03-05 13:15:24 -08:00 committed by Jens Axboe
parent b679281a64
commit 24acfc34fb
3 changed files with 26 additions and 28 deletions

View File

@ -855,7 +855,7 @@ retry:
*/
if (!ioc && !retried) {
spin_unlock_irq(q->queue_lock);
create_io_context(current, gfp_mask, q->node);
create_io_context(gfp_mask, q->node);
spin_lock_irq(q->queue_lock);
retried = true;
goto retry;
@ -919,7 +919,9 @@ retry:
/* create icq if missing */
if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
icq = ioc_create_icq(q, gfp_mask);
ioc = create_io_context(gfp_mask, q->node);
if (ioc)
icq = ioc_create_icq(ioc, q, gfp_mask);
if (!icq)
goto fail_alloc;
}
@ -1005,7 +1007,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
* up to a big batch of them for a small period time.
* See ioc_batching, ioc_set_batching
*/
create_io_context(current, GFP_NOIO, q->node);
create_io_context(GFP_NOIO, q->node);
ioc_set_batching(q, current->io_context);
spin_lock_irq(q->queue_lock);

View File

@ -212,15 +212,14 @@ void ioc_clear_queue(struct request_queue *q)
}
}
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
int node)
int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
{
struct io_context *ioc;
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
node);
if (unlikely(!ioc))
return;
return -ENOMEM;
/* initialize */
atomic_long_set(&ioc->refcount, 1);
@ -244,6 +243,8 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
else
kmem_cache_free(iocontext_cachep, ioc);
task_unlock(task);
return 0;
}
/**
@ -275,7 +276,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
return ioc;
}
task_unlock(task);
} while (create_io_context(task, gfp_flags, node));
} while (!create_task_io_context(task, gfp_flags, node));
return NULL;
}
@ -319,26 +320,23 @@ EXPORT_SYMBOL(ioc_lookup_icq);
/**
* ioc_create_icq - create and link io_cq
* @ioc: io_context of interest
* @q: request_queue of interest
* @gfp_mask: allocation mask
*
* Make sure io_cq linking %current->io_context and @q exists. If either
* io_context and/or icq don't exist, they will be created using @gfp_mask.
* Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
* will be created using @gfp_mask.
*
* The caller is responsible for ensuring @ioc won't go away and @q is
* alive and will stay alive until this function returns.
*/
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
gfp_t gfp_mask)
{
struct elevator_type *et = q->elevator->type;
struct io_context *ioc;
struct io_cq *icq;
/* allocate stuff */
ioc = create_io_context(current, gfp_mask, q->node);
if (!ioc)
return NULL;
icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
q->node);
if (!icq)

View File

@ -200,32 +200,30 @@ static inline int blk_do_io_stat(struct request *rq)
*/
void get_io_context(struct io_context *ioc);
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
gfp_t gfp_mask);
void ioc_clear_queue(struct request_queue *q);
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
int node);
int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
/**
* create_io_context - try to create task->io_context
* @task: target task
* @gfp_mask: allocation mask
* @node: allocation node
*
* If @task->io_context is %NULL, allocate a new io_context and install it.
* Returns the current @task->io_context which may be %NULL if allocation
* failed.
* If %current->io_context is %NULL, allocate a new io_context and install
* it. Returns the current %current->io_context which may be %NULL if
* allocation failed.
*
* Note that this function can't be called with IRQ disabled because
* task_lock which protects @task->io_context is IRQ-unsafe.
* task_lock which protects %current->io_context is IRQ-unsafe.
*/
static inline struct io_context *create_io_context(struct task_struct *task,
gfp_t gfp_mask, int node)
static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
{
WARN_ON_ONCE(irqs_disabled());
if (unlikely(!task->io_context))
create_io_context_slowpath(task, gfp_mask, node);
return task->io_context;
if (unlikely(!current->io_context))
create_task_io_context(current, gfp_mask, node);
return current->io_context;
}
/*