erofs: add REQ_RAHEAD flag to readahead requests

Let's add REQ_RAHEAD flag so it'd be easier to identify
readahead I/O requests in blktrace.

Reviewed-by: Chao Yu <yuchao0@huawei.com>
Link: https://lore.kernel.org/r/20200919072730.24989-3-hsiangkao@redhat.com
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
This commit is contained in:
Gao Xiang 2020-09-19 15:27:30 +08:00
parent bf9a123b9c
commit 6ea5aad32d
2 changed files with 12 additions and 7 deletions

View file

@ -224,7 +224,7 @@ static inline struct bio *erofs_read_raw_page(struct bio *bio,
bio_set_dev(bio, sb->s_bdev);
bio->bi_iter.bi_sector = (sector_t)blknr <<
LOG_SECTORS_PER_BLOCK;
bio->bi_opf = REQ_OP_READ;
bio->bi_opf = REQ_OP_READ | (ra ? REQ_RAHEAD : 0);
}
err = bio_add_page(bio, page, PAGE_SIZE, 0);

View file

@ -135,6 +135,7 @@ struct z_erofs_decompress_frontend {
struct z_erofs_collector clt;
struct erofs_map_blocks map;
bool readahead;
/* used for applying cache strategy on the fly */
bool backmost;
erofs_off_t headoffset;
@ -1148,7 +1149,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
}
static void z_erofs_submit_queue(struct super_block *sb,
z_erofs_next_pcluster_t owned_head,
struct z_erofs_decompress_frontend *f,
struct list_head *pagepool,
struct z_erofs_decompressqueue *fgq,
bool *force_fg)
@ -1157,6 +1158,7 @@ static void z_erofs_submit_queue(struct super_block *sb,
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
void *bi_private;
z_erofs_next_pcluster_t owned_head = f->clt.owned_head;
/* since bio will be NULL, no need to initialize last_index */
pgoff_t last_index;
unsigned int nr_bios = 0;
@ -1212,6 +1214,8 @@ static void z_erofs_submit_queue(struct super_block *sb,
LOG_SECTORS_PER_BLOCK;
bio->bi_private = bi_private;
bio->bi_opf = REQ_OP_READ;
if (f->readahead)
bio->bi_opf |= REQ_RAHEAD;
++nr_bios;
}
@ -1243,14 +1247,14 @@ static void z_erofs_submit_queue(struct super_block *sb,
}
static void z_erofs_runqueue(struct super_block *sb,
struct z_erofs_collector *clt,
struct z_erofs_decompress_frontend *f,
struct list_head *pagepool, bool force_fg)
{
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
if (f->clt.owned_head == Z_EROFS_PCLUSTER_TAIL)
return;
z_erofs_submit_queue(sb, clt->owned_head, pagepool, io, &force_fg);
z_erofs_submit_queue(sb, f, pagepool, io, &force_fg);
/* handle bypass queue (no i/o pclusters) immediately */
z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
@ -1281,7 +1285,7 @@ static int z_erofs_readpage(struct file *file, struct page *page)
(void)z_erofs_collector_end(&f.clt);
/* if some compressed cluster ready, need submit them anyway */
z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, true);
z_erofs_runqueue(inode->i_sb, &f, &pagepool, true);
if (err)
erofs_err(inode->i_sb, "failed to read, err [%d]", err);
@ -1307,6 +1311,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
f.readahead = true;
f.headoffset = readahead_pos(rac);
while ((page = readahead_page(rac))) {
@ -1340,7 +1345,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
(void)z_erofs_collector_end(&f.clt);
z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, sync);
z_erofs_runqueue(inode->i_sb, &f, &pagepool, sync);
if (f.map.mpage)
put_page(f.map.mpage);