blk-cgroup: remove blkcg_bio_issue_check

blkcg_bio_issue_check is a giant inline function that does three entirely
different things.  Factor out the blk-cgroup related bio initalization
into a new helper, and the open code the sequence in the only caller,
relying on the fact that all the actual functionality is stubbed out for
non-cgroup builds.

Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2020-06-27 09:31:58 +02:00 committed by Jens Axboe
parent 93b8063804
commit db18a53e5b
5 changed files with 47 additions and 57 deletions

View File

@ -1813,6 +1813,40 @@ void bio_clone_blkg_association(struct bio *dst, struct bio *src)
}
EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
static int blk_cgroup_io_type(struct bio *bio)
{
if (op_is_discard(bio->bi_opf))
return BLKG_IOSTAT_DISCARD;
if (op_is_write(bio->bi_opf))
return BLKG_IOSTAT_WRITE;
return BLKG_IOSTAT_READ;
}
void blk_cgroup_bio_start(struct bio *bio)
{
int rwd = blk_cgroup_io_type(bio), cpu;
struct blkg_iostat_set *bis;
cpu = get_cpu();
bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
u64_stats_update_begin(&bis->sync);
/*
* If the bio is flagged with BIO_CGROUP_ACCT it means this is a split
* bio and we would have already accounted for the size of the bio.
*/
if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
bio_set_flag(bio, BIO_CGROUP_ACCT);
bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
}
bis->cur.ios[rwd]++;
u64_stats_update_end(&bis->sync);
if (cgroup_subsys_on_dfl(io_cgrp_subsys))
cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu);
put_cpu();
}
static int __init blkcg_init(void)
{
blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio",

View File

@ -1073,8 +1073,13 @@ generic_make_request_checks(struct bio *bio)
if (unlikely(!current->io_context))
create_task_io_context(current, GFP_ATOMIC, q->node);
if (!blkcg_bio_issue_check(q, bio))
if (blk_throtl_bio(bio)) {
blkcg_bio_issue_init(bio);
return false;
}
blk_cgroup_bio_start(bio);
blkcg_bio_issue_init(bio);
if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
trace_block_bio_queue(q, bio);

View File

@ -2158,9 +2158,10 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
}
#endif
bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
struct bio *bio)
bool blk_throtl_bio(struct bio *bio)
{
struct request_queue *q = bio->bi_disk->queue;
struct blkcg_gq *blkg = bio->bi_blkg;
struct throtl_qnode *qn = NULL;
struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
struct throtl_service_queue *sq;

View File

@ -288,10 +288,12 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
extern void blk_throtl_register_queue(struct request_queue *q);
bool blk_throtl_bio(struct bio *bio);
#else /* CONFIG_BLK_DEV_THROTTLING */
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { }
static inline void blk_throtl_register_queue(struct request_queue *q) { }
static inline bool blk_throtl_bio(struct bio *bio) { return false; }
#endif /* CONFIG_BLK_DEV_THROTTLING */
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);

View File

@ -517,14 +517,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q, false)))
#ifdef CONFIG_BLK_DEV_THROTTLING
extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
struct bio *bio);
#else
static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
struct bio *bio) { return false; }
#endif
bool __blkcg_punt_bio_submit(struct bio *bio);
static inline bool blkcg_punt_bio_submit(struct bio *bio)
@ -540,50 +532,6 @@ static inline void blkcg_bio_issue_init(struct bio *bio)
bio_issue_init(&bio->bi_issue, bio_sectors(bio));
}
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio)
{
struct blkcg_gq *blkg = bio->bi_blkg;
bool throtl = false;
throtl = blk_throtl_bio(q, blkg, bio);
if (!throtl) {
struct blkg_iostat_set *bis;
int rwd, cpu;
if (op_is_discard(bio->bi_opf))
rwd = BLKG_IOSTAT_DISCARD;
else if (op_is_write(bio->bi_opf))
rwd = BLKG_IOSTAT_WRITE;
else
rwd = BLKG_IOSTAT_READ;
cpu = get_cpu();
bis = per_cpu_ptr(blkg->iostat_cpu, cpu);
u64_stats_update_begin(&bis->sync);
/*
* If the bio is flagged with BIO_CGROUP_ACCT it means this is a
* split bio and we would have already accounted for the size of
* the bio.
*/
if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
bio_set_flag(bio, BIO_CGROUP_ACCT);
bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
}
bis->cur.ios[rwd]++;
u64_stats_update_end(&bis->sync);
if (cgroup_subsys_on_dfl(io_cgrp_subsys))
cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu);
put_cpu();
}
blkcg_bio_issue_init(bio);
return !throtl;
}
static inline void blkcg_use_delay(struct blkcg_gq *blkg)
{
if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
@ -657,6 +605,7 @@ static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
}
void blk_cgroup_bio_start(struct bio *bio);
void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
void blkcg_maybe_throttle_current(void);
@ -710,8 +659,7 @@ static inline void blkg_put(struct blkcg_gq *blkg) { }
static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
static inline void blkcg_bio_issue_init(struct bio *bio) { }
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio) { return true; }
static inline void blk_cgroup_bio_start(struct bio *bio) { }
#define blk_queue_for_each_rl(rl, q) \
for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)