block, cfq: kill ioc_gone
Now that cic's are immediately unlinked under both locks, there's no need to count and drain cic's before module unload. RCU callback completion is waited with rcu_barrier(). While at it, remove residual RCU operations on cic_list. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
b9a1920837
commit
b50b636bce
|
@ -62,10 +62,6 @@ static const int cfq_hist_divisor = 4;
|
||||||
static struct kmem_cache *cfq_pool;
|
static struct kmem_cache *cfq_pool;
|
||||||
static struct kmem_cache *cfq_ioc_pool;
|
static struct kmem_cache *cfq_ioc_pool;
|
||||||
|
|
||||||
static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
|
|
||||||
static struct completion *ioc_gone;
|
|
||||||
static DEFINE_SPINLOCK(ioc_gone_lock);
|
|
||||||
|
|
||||||
#define CFQ_PRIO_LISTS IOPRIO_BE_NR
|
#define CFQ_PRIO_LISTS IOPRIO_BE_NR
|
||||||
#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
|
#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
|
||||||
#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
|
#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
|
||||||
|
@ -2671,26 +2667,8 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
|
||||||
|
|
||||||
static void cfq_cic_free_rcu(struct rcu_head *head)
|
static void cfq_cic_free_rcu(struct rcu_head *head)
|
||||||
{
|
{
|
||||||
struct cfq_io_context *cic;
|
kmem_cache_free(cfq_ioc_pool,
|
||||||
|
container_of(head, struct cfq_io_context, rcu_head));
|
||||||
cic = container_of(head, struct cfq_io_context, rcu_head);
|
|
||||||
|
|
||||||
kmem_cache_free(cfq_ioc_pool, cic);
|
|
||||||
elv_ioc_count_dec(cfq_ioc_count);
|
|
||||||
|
|
||||||
if (ioc_gone) {
|
|
||||||
/*
|
|
||||||
* CFQ scheduler is exiting, grab exit lock and check
|
|
||||||
* the pending io context count. If it hits zero,
|
|
||||||
* complete ioc_gone and set it back to NULL
|
|
||||||
*/
|
|
||||||
spin_lock(&ioc_gone_lock);
|
|
||||||
if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
|
|
||||||
complete(ioc_gone);
|
|
||||||
ioc_gone = NULL;
|
|
||||||
}
|
|
||||||
spin_unlock(&ioc_gone_lock);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cfq_cic_free(struct cfq_io_context *cic)
|
static void cfq_cic_free(struct cfq_io_context *cic)
|
||||||
|
@ -2705,7 +2683,7 @@ static void cfq_release_cic(struct cfq_io_context *cic)
|
||||||
|
|
||||||
BUG_ON(!(dead_key & CIC_DEAD_KEY));
|
BUG_ON(!(dead_key & CIC_DEAD_KEY));
|
||||||
radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
|
radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
|
||||||
hlist_del_rcu(&cic->cic_list);
|
hlist_del(&cic->cic_list);
|
||||||
cfq_cic_free(cic);
|
cfq_cic_free(cic);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2782,7 +2760,6 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
|
||||||
INIT_HLIST_NODE(&cic->cic_list);
|
INIT_HLIST_NODE(&cic->cic_list);
|
||||||
cic->exit = cfq_exit_cic;
|
cic->exit = cfq_exit_cic;
|
||||||
cic->release = cfq_release_cic;
|
cic->release = cfq_release_cic;
|
||||||
elv_ioc_count_inc(cfq_ioc_count);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return cic;
|
return cic;
|
||||||
|
@ -3072,7 +3049,7 @@ static int cfq_create_cic(struct cfq_data *cfqd, gfp_t gfp_mask)
|
||||||
|
|
||||||
ret = radix_tree_insert(&ioc->radix_root, q->id, cic);
|
ret = radix_tree_insert(&ioc->radix_root, q->id, cic);
|
||||||
if (likely(!ret)) {
|
if (likely(!ret)) {
|
||||||
hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
|
hlist_add_head(&cic->cic_list, &ioc->cic_list);
|
||||||
list_add(&cic->queue_list, &cfqd->cic_list);
|
list_add(&cic->queue_list, &cfqd->cic_list);
|
||||||
cic = NULL;
|
cic = NULL;
|
||||||
} else if (ret == -EEXIST) {
|
} else if (ret == -EEXIST) {
|
||||||
|
@ -4156,19 +4133,9 @@ static int __init cfq_init(void)
|
||||||
|
|
||||||
static void __exit cfq_exit(void)
|
static void __exit cfq_exit(void)
|
||||||
{
|
{
|
||||||
DECLARE_COMPLETION_ONSTACK(all_gone);
|
|
||||||
blkio_policy_unregister(&blkio_policy_cfq);
|
blkio_policy_unregister(&blkio_policy_cfq);
|
||||||
elv_unregister(&iosched_cfq);
|
elv_unregister(&iosched_cfq);
|
||||||
ioc_gone = &all_gone;
|
rcu_barrier(); /* make sure all cic RCU frees are complete */
|
||||||
/* ioc_gone's update must be visible before reading ioc_count */
|
|
||||||
smp_wmb();
|
|
||||||
|
|
||||||
/*
|
|
||||||
* this also protects us from entering cfq_slab_kill() with
|
|
||||||
* pending RCU callbacks
|
|
||||||
*/
|
|
||||||
if (elv_ioc_count_read(cfq_ioc_count))
|
|
||||||
wait_for_completion(&all_gone);
|
|
||||||
cfq_slab_kill();
|
cfq_slab_kill();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -196,22 +196,5 @@ enum {
|
||||||
INIT_LIST_HEAD(&(rq)->csd.list); \
|
INIT_LIST_HEAD(&(rq)->csd.list); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/*
|
|
||||||
* io context count accounting
|
|
||||||
*/
|
|
||||||
#define elv_ioc_count_mod(name, __val) this_cpu_add(name, __val)
|
|
||||||
#define elv_ioc_count_inc(name) this_cpu_inc(name)
|
|
||||||
#define elv_ioc_count_dec(name) this_cpu_dec(name)
|
|
||||||
|
|
||||||
#define elv_ioc_count_read(name) \
|
|
||||||
({ \
|
|
||||||
unsigned long __val = 0; \
|
|
||||||
int __cpu; \
|
|
||||||
smp_wmb(); \
|
|
||||||
for_each_possible_cpu(__cpu) \
|
|
||||||
__val += per_cpu(name, __cpu); \
|
|
||||||
__val; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#endif /* CONFIG_BLOCK */
|
#endif /* CONFIG_BLOCK */
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue