blk-throttle: fix race between blkcg_bio_issue_check() and cgroup_rmdir()

We've triggered a WARNING in blk_throtl_bio() when throttling writeback
io, which complains blkg->refcnt is already 0 when calling blkg_get(),
and then kernel crashes with invalid page request.
After investigating this issue, we've found it is caused by a race
between blkcg_bio_issue_check() and cgroup_rmdir(), which is described
below:

writeback kworker               cgroup_rmdir
                                  cgroup_destroy_locked
                                    kill_css
                                      css_killed_ref_fn
                                        css_killed_work_fn
                                          offline_css
                                            blkcg_css_offline
  blkcg_bio_issue_check
    rcu_read_lock
    blkg_lookup
                                              spin_trylock(q->queue_lock)
                                              blkg_destroy
                                              spin_unlock(q->queue_lock)
    blk_throtl_bio
    spin_lock_irq(q->queue_lock)
    ...
    spin_unlock_irq(q->queue_lock)
  rcu_read_unlock

Since rcu can only prevent blkg from releasing when it is being used,
the blkg->refcnt can be decreased to 0 during blkg_destroy() and schedule
blkg release.
Then trying to blkg_get() in blk_throtl_bio() will complains the WARNING.
And then the corresponding blkg_put() will schedule blkg release again,
which result in double free.
This race is introduced by commit ae11889636 ("blkcg: consolidate blkg
creation in blkcg_bio_issue_check()"). Before this commit, it will
lookup first and then try to lookup/create again with queue_lock. Since
revive this logic is a bit drastic, so fix it by only offlining pd during
blkcg_css_offline(), and move the rest destruction (especially
blkg_put()) into blkcg_css_free(), which should be the right way as
discussed.

Fixes: ae11889636 ("blkcg: consolidate blkg creation in blkcg_bio_issue_check()")
Reported-by: Jiufei Xue <jiufei.xue@linux.alibaba.com>
Signed-off-by: Joseph Qi <joseph.qi@linux.alibaba.com>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Joseph Qi 2018-03-16 14:51:27 +08:00 committed by Jens Axboe
parent 5f990d3160
commit 4c6994806f
2 changed files with 63 additions and 16 deletions

View File

@ -307,11 +307,28 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
} }
} }
static void blkg_pd_offline(struct blkcg_gq *blkg)
{
int i;
lockdep_assert_held(blkg->q->queue_lock);
lockdep_assert_held(&blkg->blkcg->lock);
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
if (blkg->pd[i] && !blkg->pd[i]->offline &&
pol->pd_offline_fn) {
pol->pd_offline_fn(blkg->pd[i]);
blkg->pd[i]->offline = true;
}
}
}
static void blkg_destroy(struct blkcg_gq *blkg) static void blkg_destroy(struct blkcg_gq *blkg)
{ {
struct blkcg *blkcg = blkg->blkcg; struct blkcg *blkcg = blkg->blkcg;
struct blkcg_gq *parent = blkg->parent; struct blkcg_gq *parent = blkg->parent;
int i;
lockdep_assert_held(blkg->q->queue_lock); lockdep_assert_held(blkg->q->queue_lock);
lockdep_assert_held(&blkcg->lock); lockdep_assert_held(&blkcg->lock);
@ -320,13 +337,6 @@ static void blkg_destroy(struct blkcg_gq *blkg)
WARN_ON_ONCE(list_empty(&blkg->q_node)); WARN_ON_ONCE(list_empty(&blkg->q_node));
WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
if (blkg->pd[i] && pol->pd_offline_fn)
pol->pd_offline_fn(blkg->pd[i]);
}
if (parent) { if (parent) {
blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes); blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios); blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
@ -369,6 +379,7 @@ static void blkg_destroy_all(struct request_queue *q)
struct blkcg *blkcg = blkg->blkcg; struct blkcg *blkcg = blkg->blkcg;
spin_lock(&blkcg->lock); spin_lock(&blkcg->lock);
blkg_pd_offline(blkg);
blkg_destroy(blkg); blkg_destroy(blkg);
spin_unlock(&blkcg->lock); spin_unlock(&blkcg->lock);
} }
@ -995,25 +1006,25 @@ static struct cftype blkcg_legacy_files[] = {
* @css: css of interest * @css: css of interest
* *
* This function is called when @css is about to go away and responsible * This function is called when @css is about to go away and responsible
* for shooting down all blkgs associated with @css. blkgs should be * for offlining all blkgs pd and killing all wbs associated with @css.
* removed while holding both q and blkcg locks. As blkcg lock is nested * blkgs pd offline should be done while holding both q and blkcg locks.
* inside q lock, this function performs reverse double lock dancing. * As blkcg lock is nested inside q lock, this function performs reverse
* double lock dancing.
* *
* This is the blkcg counterpart of ioc_release_fn(). * This is the blkcg counterpart of ioc_release_fn().
*/ */
static void blkcg_css_offline(struct cgroup_subsys_state *css) static void blkcg_css_offline(struct cgroup_subsys_state *css)
{ {
struct blkcg *blkcg = css_to_blkcg(css); struct blkcg *blkcg = css_to_blkcg(css);
struct blkcg_gq *blkg;
spin_lock_irq(&blkcg->lock); spin_lock_irq(&blkcg->lock);
while (!hlist_empty(&blkcg->blkg_list)) { hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
struct blkcg_gq, blkcg_node);
struct request_queue *q = blkg->q; struct request_queue *q = blkg->q;
if (spin_trylock(q->queue_lock)) { if (spin_trylock(q->queue_lock)) {
blkg_destroy(blkg); blkg_pd_offline(blkg);
spin_unlock(q->queue_lock); spin_unlock(q->queue_lock);
} else { } else {
spin_unlock_irq(&blkcg->lock); spin_unlock_irq(&blkcg->lock);
@ -1027,11 +1038,43 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css)
wb_blkcg_offline(blkcg); wb_blkcg_offline(blkcg);
} }
/**
* blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg
* @blkcg: blkcg of interest
*
* This function is called when blkcg css is about to free and responsible for
* destroying all blkgs associated with @blkcg.
* blkgs should be removed while holding both q and blkcg locks. As blkcg lock
* is nested inside q lock, this function performs reverse double lock dancing.
*/
static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)
{
spin_lock_irq(&blkcg->lock);
while (!hlist_empty(&blkcg->blkg_list)) {
struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
struct blkcg_gq,
blkcg_node);
struct request_queue *q = blkg->q;
if (spin_trylock(q->queue_lock)) {
blkg_destroy(blkg);
spin_unlock(q->queue_lock);
} else {
spin_unlock_irq(&blkcg->lock);
cpu_relax();
spin_lock_irq(&blkcg->lock);
}
}
spin_unlock_irq(&blkcg->lock);
}
static void blkcg_css_free(struct cgroup_subsys_state *css) static void blkcg_css_free(struct cgroup_subsys_state *css)
{ {
struct blkcg *blkcg = css_to_blkcg(css); struct blkcg *blkcg = css_to_blkcg(css);
int i; int i;
blkcg_destroy_all_blkgs(blkcg);
mutex_lock(&blkcg_pol_mutex); mutex_lock(&blkcg_pol_mutex);
list_del(&blkcg->all_blkcgs_node); list_del(&blkcg->all_blkcgs_node);
@ -1371,8 +1414,11 @@ void blkcg_deactivate_policy(struct request_queue *q,
spin_lock(&blkg->blkcg->lock); spin_lock(&blkg->blkcg->lock);
if (blkg->pd[pol->plid]) { if (blkg->pd[pol->plid]) {
if (pol->pd_offline_fn) if (!blkg->pd[pol->plid]->offline &&
pol->pd_offline_fn) {
pol->pd_offline_fn(blkg->pd[pol->plid]); pol->pd_offline_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid]->offline = true;
}
pol->pd_free_fn(blkg->pd[pol->plid]); pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL; blkg->pd[pol->plid] = NULL;
} }

View File

@ -88,6 +88,7 @@ struct blkg_policy_data {
/* the blkg and policy id this per-policy data belongs to */ /* the blkg and policy id this per-policy data belongs to */
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
int plid; int plid;
bool offline;
}; };
/* /*