mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
mm: refactor readahead defines in mm.h
All users of VM_MAX_READAHEAD actually convert it to kbytes and then to pages. Define the macro explicitly as (SZ_128K / PAGE_SIZE). This simplifies the expression in every filesystem. Also rename the macro to VM_READAHEAD_PAGES to properly convey its meaning. Finally remove unused VM_MIN_READAHEAD [akpm@linux-foundation.org: fix fs/io_uring.c, per Stephen] Link: http://lkml.kernel.org/r/20181221144053.24318-1-nborisov@suse.com Signed-off-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: Matthew Wilcox <willy@infradead.org> Reviewed-by: David Hildenbrand <david@redhat.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Eric Van Hensbergen <ericvh@gmail.com> Cc: Latchesar Ionkov <lucho@ionkov.net> Cc: Dominique Martinet <asmadeus@codewreck.org> Cc: David Howells <dhowells@redhat.com> Cc: Chris Mason <clm@fb.com> Cc: Josef Bacik <josef@toxicpanda.com> Cc: David Sterba <dsterba@suse.com> Cc: Miklos Szeredi <miklos@szeredi.hu> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b57e622e6d
commit
b5420237ec
7 changed files with 8 additions and 9 deletions
|
@ -500,8 +500,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
|||
if (!q->stats)
|
||||
goto fail_stats;
|
||||
|
||||
q->backing_dev_info->ra_pages =
|
||||
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
|
||||
q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
|
||||
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
|
||||
q->backing_dev_info->name = "block";
|
||||
q->node = node_id;
|
||||
|
|
|
@ -92,7 +92,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
|
|||
return ret;
|
||||
|
||||
if (v9ses->cache)
|
||||
sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_SIZE;
|
||||
sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
|
||||
|
||||
sb->s_flags |= SB_ACTIVE | SB_DIRSYNC;
|
||||
if (!v9ses->cache)
|
||||
|
|
|
@ -399,7 +399,7 @@ static int afs_fill_super(struct super_block *sb,
|
|||
ret = super_setup_bdi(sb);
|
||||
if (ret)
|
||||
return ret;
|
||||
sb->s_bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
|
||||
sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
|
||||
|
||||
/* allocate the root inode and dentry */
|
||||
if (as->dyn_root) {
|
||||
|
|
|
@ -2958,7 +2958,7 @@ int open_ctree(struct super_block *sb,
|
|||
sb->s_bdi->congested_fn = btrfs_congested_fn;
|
||||
sb->s_bdi->congested_data = fs_info;
|
||||
sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
|
||||
sb->s_bdi->ra_pages = VM_MAX_READAHEAD * SZ_1K / PAGE_SIZE;
|
||||
sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
|
||||
sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
|
||||
sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
|
||||
|
||||
|
|
|
@ -1010,7 +1010,7 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
|
||||
sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
|
||||
/* fuse does it's own writeback accounting */
|
||||
sb->s_bdi->capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT;
|
||||
|
||||
|
|
|
@ -923,7 +923,7 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
|
|||
/* Use 8x RA size as a decent limiter for both reads/writes */
|
||||
max_pages = filp->f_ra.ra_pages;
|
||||
if (!max_pages)
|
||||
max_pages = VM_MAX_READAHEAD >> (PAGE_SHIFT - 10);
|
||||
max_pages = VM_READAHEAD_PAGES;
|
||||
max_pages *= 8;
|
||||
|
||||
/* If max pages are exceeded, reset the state */
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/page_ref.h>
|
||||
#include <linux/memremap.h>
|
||||
#include <linux/overflow.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
struct mempolicy;
|
||||
struct anon_vma;
|
||||
|
@ -2402,8 +2403,7 @@ int __must_check write_one_page(struct page *page);
|
|||
void task_dirty_inc(struct task_struct *tsk);
|
||||
|
||||
/* readahead.c */
|
||||
#define VM_MAX_READAHEAD 128 /* kbytes */
|
||||
#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
|
||||
#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
|
||||
|
||||
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
||||
pgoff_t offset, unsigned long nr_to_read);
|
||||
|
|
Loading…
Reference in a new issue