mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-27 21:03:32 +00:00
shmem: move memcg charge out of shmem_add_to_page_cache()
Extract shmem's memcg charging out of shmem_add_to_page_cache(): it's misleading done there, because many calls are dealing with a swapcache page, whose memcg is nowadays always remembered while swapped out, then the charge re-levied when it's brought back into swapcache. Temporarily move it back up to the shmem_get_folio_gfp() level, where the memcg was charged before v5.8; but the next commit goes on to move it back down to a new home. In making this change, it becomes clear that shmem_swapin_folio() does not need to know the vma, just the fault mm (if any): call it fault_mm rather than charge_mm - let mem_cgroup_charge() decide whom to charge. Link: https://lkml.kernel.org/r/4b2143c5-bf32-64f0-841-81a81158dac@google.com Signed-off-by: Hugh Dickins <hughd@google.com> Reviewed-by: Jan Kara <jack@suse.cz> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Carlos Maiolino <cem@kernel.org> Cc: Christian Brauner <brauner@kernel.org> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Darrick J. Wong <djwong@kernel.org> Cc: Dave Chinner <dchinner@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Tim Chen <tim.c.chen@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
4199f51a7e
commit
054a9f7ccd
1 changed files with 29 additions and 39 deletions
68
mm/shmem.c
68
mm/shmem.c
|
@ -146,9 +146,8 @@ static unsigned long shmem_default_max_inodes(void)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
|
static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
|
||||||
struct folio **foliop, enum sgp_type sgp,
|
struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
|
||||||
gfp_t gfp, struct vm_area_struct *vma,
|
struct mm_struct *fault_mm, vm_fault_t *fault_type);
|
||||||
vm_fault_t *fault_type);
|
|
||||||
|
|
||||||
static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
|
static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
|
||||||
{
|
{
|
||||||
|
@ -760,12 +759,10 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
|
||||||
*/
|
*/
|
||||||
static int shmem_add_to_page_cache(struct folio *folio,
|
static int shmem_add_to_page_cache(struct folio *folio,
|
||||||
struct address_space *mapping,
|
struct address_space *mapping,
|
||||||
pgoff_t index, void *expected, gfp_t gfp,
|
pgoff_t index, void *expected, gfp_t gfp)
|
||||||
struct mm_struct *charge_mm)
|
|
||||||
{
|
{
|
||||||
XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
|
XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
|
||||||
long nr = folio_nr_pages(folio);
|
long nr = folio_nr_pages(folio);
|
||||||
int error;
|
|
||||||
|
|
||||||
VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
|
VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
|
||||||
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
|
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
|
||||||
|
@ -776,16 +773,7 @@ static int shmem_add_to_page_cache(struct folio *folio,
|
||||||
folio->mapping = mapping;
|
folio->mapping = mapping;
|
||||||
folio->index = index;
|
folio->index = index;
|
||||||
|
|
||||||
if (!folio_test_swapcache(folio)) {
|
gfp &= GFP_RECLAIM_MASK;
|
||||||
error = mem_cgroup_charge(folio, charge_mm, gfp);
|
|
||||||
if (error) {
|
|
||||||
if (folio_test_pmd_mappable(folio)) {
|
|
||||||
count_vm_event(THP_FILE_FALLBACK);
|
|
||||||
count_vm_event(THP_FILE_FALLBACK_CHARGE);
|
|
||||||
}
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
folio_throttle_swaprate(folio, gfp);
|
folio_throttle_swaprate(folio, gfp);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
@ -813,15 +801,12 @@ static int shmem_add_to_page_cache(struct folio *folio,
|
||||||
} while (xas_nomem(&xas, gfp));
|
} while (xas_nomem(&xas, gfp));
|
||||||
|
|
||||||
if (xas_error(&xas)) {
|
if (xas_error(&xas)) {
|
||||||
error = xas_error(&xas);
|
folio->mapping = NULL;
|
||||||
goto error;
|
folio_ref_sub(folio, nr);
|
||||||
|
return xas_error(&xas);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
error:
|
|
||||||
folio->mapping = NULL;
|
|
||||||
folio_ref_sub(folio, nr);
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1324,10 +1309,8 @@ static int shmem_unuse_swap_entries(struct inode *inode,
|
||||||
|
|
||||||
if (!xa_is_value(folio))
|
if (!xa_is_value(folio))
|
||||||
continue;
|
continue;
|
||||||
error = shmem_swapin_folio(inode, indices[i],
|
error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE,
|
||||||
&folio, SGP_CACHE,
|
mapping_gfp_mask(mapping), NULL, NULL);
|
||||||
mapping_gfp_mask(mapping),
|
|
||||||
NULL, NULL);
|
|
||||||
if (error == 0) {
|
if (error == 0) {
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
|
@ -1810,12 +1793,11 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
|
||||||
*/
|
*/
|
||||||
static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
|
static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
|
||||||
struct folio **foliop, enum sgp_type sgp,
|
struct folio **foliop, enum sgp_type sgp,
|
||||||
gfp_t gfp, struct vm_area_struct *vma,
|
gfp_t gfp, struct mm_struct *fault_mm,
|
||||||
vm_fault_t *fault_type)
|
vm_fault_t *fault_type)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = inode->i_mapping;
|
struct address_space *mapping = inode->i_mapping;
|
||||||
struct shmem_inode_info *info = SHMEM_I(inode);
|
struct shmem_inode_info *info = SHMEM_I(inode);
|
||||||
struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
|
|
||||||
struct swap_info_struct *si;
|
struct swap_info_struct *si;
|
||||||
struct folio *folio = NULL;
|
struct folio *folio = NULL;
|
||||||
swp_entry_t swap;
|
swp_entry_t swap;
|
||||||
|
@ -1843,7 +1825,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
|
||||||
if (fault_type) {
|
if (fault_type) {
|
||||||
*fault_type |= VM_FAULT_MAJOR;
|
*fault_type |= VM_FAULT_MAJOR;
|
||||||
count_vm_event(PGMAJFAULT);
|
count_vm_event(PGMAJFAULT);
|
||||||
count_memcg_event_mm(charge_mm, PGMAJFAULT);
|
count_memcg_event_mm(fault_mm, PGMAJFAULT);
|
||||||
}
|
}
|
||||||
/* Here we actually start the io */
|
/* Here we actually start the io */
|
||||||
folio = shmem_swapin(swap, gfp, info, index);
|
folio = shmem_swapin(swap, gfp, info, index);
|
||||||
|
@ -1880,8 +1862,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
|
||||||
}
|
}
|
||||||
|
|
||||||
error = shmem_add_to_page_cache(folio, mapping, index,
|
error = shmem_add_to_page_cache(folio, mapping, index,
|
||||||
swp_to_radix_entry(swap), gfp,
|
swp_to_radix_entry(swap), gfp);
|
||||||
charge_mm);
|
|
||||||
if (error)
|
if (error)
|
||||||
goto failed;
|
goto failed;
|
||||||
|
|
||||||
|
@ -1929,7 +1910,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
|
||||||
struct address_space *mapping = inode->i_mapping;
|
struct address_space *mapping = inode->i_mapping;
|
||||||
struct shmem_inode_info *info = SHMEM_I(inode);
|
struct shmem_inode_info *info = SHMEM_I(inode);
|
||||||
struct shmem_sb_info *sbinfo;
|
struct shmem_sb_info *sbinfo;
|
||||||
struct mm_struct *charge_mm;
|
struct mm_struct *fault_mm;
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
pgoff_t hindex;
|
pgoff_t hindex;
|
||||||
gfp_t huge_gfp;
|
gfp_t huge_gfp;
|
||||||
|
@ -1946,7 +1927,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
|
||||||
}
|
}
|
||||||
|
|
||||||
sbinfo = SHMEM_SB(inode->i_sb);
|
sbinfo = SHMEM_SB(inode->i_sb);
|
||||||
charge_mm = vma ? vma->vm_mm : NULL;
|
fault_mm = vma ? vma->vm_mm : NULL;
|
||||||
|
|
||||||
folio = filemap_get_entry(mapping, index);
|
folio = filemap_get_entry(mapping, index);
|
||||||
if (folio && vma && userfaultfd_minor(vma)) {
|
if (folio && vma && userfaultfd_minor(vma)) {
|
||||||
|
@ -1958,7 +1939,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
|
||||||
|
|
||||||
if (xa_is_value(folio)) {
|
if (xa_is_value(folio)) {
|
||||||
error = shmem_swapin_folio(inode, index, &folio,
|
error = shmem_swapin_folio(inode, index, &folio,
|
||||||
sgp, gfp, vma, fault_type);
|
sgp, gfp, fault_mm, fault_type);
|
||||||
if (error == -EEXIST)
|
if (error == -EEXIST)
|
||||||
goto repeat;
|
goto repeat;
|
||||||
|
|
||||||
|
@ -2044,9 +2025,16 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
|
||||||
if (sgp == SGP_WRITE)
|
if (sgp == SGP_WRITE)
|
||||||
__folio_set_referenced(folio);
|
__folio_set_referenced(folio);
|
||||||
|
|
||||||
error = shmem_add_to_page_cache(folio, mapping, hindex,
|
error = mem_cgroup_charge(folio, fault_mm, gfp);
|
||||||
NULL, gfp & GFP_RECLAIM_MASK,
|
if (error) {
|
||||||
charge_mm);
|
if (folio_test_pmd_mappable(folio)) {
|
||||||
|
count_vm_event(THP_FILE_FALLBACK);
|
||||||
|
count_vm_event(THP_FILE_FALLBACK_CHARGE);
|
||||||
|
}
|
||||||
|
goto unacct;
|
||||||
|
}
|
||||||
|
|
||||||
|
error = shmem_add_to_page_cache(folio, mapping, hindex, NULL, gfp);
|
||||||
if (error)
|
if (error)
|
||||||
goto unacct;
|
goto unacct;
|
||||||
|
|
||||||
|
@ -2644,8 +2632,10 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
|
||||||
if (unlikely(pgoff >= max_off))
|
if (unlikely(pgoff >= max_off))
|
||||||
goto out_release;
|
goto out_release;
|
||||||
|
|
||||||
ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
|
ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
|
||||||
gfp & GFP_RECLAIM_MASK, dst_vma->vm_mm);
|
if (ret)
|
||||||
|
goto out_release;
|
||||||
|
ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_release;
|
goto out_release;
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue