buffer: add folio_alloc_buffers() helper

Folio version of alloc_page_buffers() helper.  This is required to convert
create_page_buffers() to folio_create_buffers() later in the series.

alloc_page_buffers() has been modified to call folio_alloc_buffers() which
adds one call to compound_head() but folio_alloc_buffers() removes one
call to compound_head() compared to the existing alloc_page_buffers()
implementation.

Link: https://lkml.kernel.org/r/20230417123618.22094-3-p.raghav@samsung.com
Signed-off-by: Pankaj Raghav <p.raghav@samsung.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Pankaj Raghav 2023-04-17 14:36:16 +02:00 committed by Andrew Morton
parent 465e5e6a16
commit c71124a8af
2 changed files with 17 additions and 8 deletions

View File

@ -843,7 +843,7 @@ int remove_inode_buffers(struct inode *inode)
}
/*
* Create the appropriate buffers when given a page for data area and
* Create the appropriate buffers when given a folio for data area and
* the size of each buffer.. Use the bh->b_this_page linked list to
* follow the buffers created. Return NULL if unable to create more
* buffers.
@ -851,8 +851,8 @@ int remove_inode_buffers(struct inode *inode)
* The retry flag is used to differentiate async IO (paging, swapping)
* which may not fail from ordinary buffer allocations.
*/
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
bool retry)
struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
bool retry)
{
struct buffer_head *bh, *head;
gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
@ -862,12 +862,12 @@ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
if (retry)
gfp |= __GFP_NOFAIL;
/* The page lock pins the memcg */
memcg = page_memcg(page);
/* The folio lock pins the memcg */
memcg = folio_memcg(folio);
old_memcg = set_active_memcg(memcg);
head = NULL;
offset = PAGE_SIZE;
offset = folio_size(folio);
while ((offset -= size) >= 0) {
bh = alloc_buffer_head(gfp);
if (!bh)
@ -879,8 +879,8 @@ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
bh->b_size = size;
/* Link the buffer to its page */
set_bh_page(bh, page, offset);
/* Link the buffer to its folio */
folio_set_bh(bh, folio, offset);
}
out:
set_active_memcg(old_memcg);
@ -899,6 +899,13 @@ no_grow:
goto out;
}
EXPORT_SYMBOL_GPL(folio_alloc_buffers);
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
bool retry)
{
return folio_alloc_buffers(page_folio(page), size, retry);
}
EXPORT_SYMBOL_GPL(alloc_page_buffers);
static inline void

View File

@ -199,6 +199,8 @@ void set_bh_page(struct buffer_head *bh,
void folio_set_bh(struct buffer_head *bh, struct folio *folio,
unsigned long offset);
bool try_to_free_buffers(struct folio *);
struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
bool retry);
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
bool retry);
void create_empty_buffers(struct page *, unsigned long,