mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-14 06:35:12 +00:00
d21077fbc2
This adds the general_profit KSM sysfs knob and the process profit metric knobs to ksm_stat. 1) expose general_profit metric The documentation mentions a general profit metric, however this metric is not calculated. In addition the formula depends on the size of internal structures, which makes it more difficult for an administrator to make the calculation. Adding the metric for a better user experience. 2) document general_profit sysfs knob 3) calculate ksm process profit metric The ksm documentation mentions the process profit metric and how to calculate it. This adds the calculation of the metric. 4) mm: expose ksm process profit metric in ksm_stat This exposes the ksm process profit metric in /proc/<pid>/ksm_stat. The documentation mentions the formula for the ksm process profit metric, however it does not calculate it. In addition the formula depends on the size of internal structures. So it makes sense to expose it. 5) document new procfs ksm knobs Link: https://lkml.kernel.org/r/20230418051342.1919757-3-shr@devkernel.io Signed-off-by: Stefan Roesch <shr@devkernel.io> Reviewed-by: Bagas Sanjaya <bagasdotme@gmail.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: David Hildenbrand <david@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
122 lines
3.1 KiB
C
122 lines
3.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __LINUX_KSM_H
|
|
#define __LINUX_KSM_H
|
|
/*
|
|
* Memory merging support.
|
|
*
|
|
* This code enables dynamic sharing of identical pages found in different
|
|
* memory areas, even if they are not shared by fork().
|
|
*/
|
|
|
|
#include <linux/bitops.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/rmap.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/coredump.h>
|
|
|
|
#ifdef CONFIG_KSM
|
|
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end, int advice, unsigned long *vm_flags);
|
|
|
|
void ksm_add_vma(struct vm_area_struct *vma);
|
|
int ksm_enable_merge_any(struct mm_struct *mm);
|
|
|
|
int __ksm_enter(struct mm_struct *mm);
|
|
void __ksm_exit(struct mm_struct *mm);
|
|
|
|
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
{
|
|
int ret;
|
|
|
|
if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) {
|
|
ret = __ksm_enter(mm);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
if (test_bit(MMF_VM_MERGE_ANY, &oldmm->flags))
|
|
set_bit(MMF_VM_MERGE_ANY, &mm->flags);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline void ksm_exit(struct mm_struct *mm)
|
|
{
|
|
if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
|
|
__ksm_exit(mm);
|
|
}
|
|
|
|
/*
|
|
* When do_swap_page() first faults in from swap what used to be a KSM page,
|
|
* no problem, it will be assigned to this vma's anon_vma; but thereafter,
|
|
* it might be faulted into a different anon_vma (or perhaps to a different
|
|
* offset in the same anon_vma). do_swap_page() cannot do all the locking
|
|
* needed to reconstitute a cross-anon_vma KSM page: for now it has to make
|
|
* a copy, and leave remerging the pages to a later pass of ksmd.
|
|
*
|
|
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
|
|
* but what if the vma was unmerged while the page was swapped out?
|
|
*/
|
|
struct page *ksm_might_need_to_copy(struct page *page,
|
|
struct vm_area_struct *vma, unsigned long address);
|
|
|
|
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
|
|
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
|
|
|
|
#ifdef CONFIG_MEMORY_FAILURE
|
|
void collect_procs_ksm(struct page *page, struct list_head *to_kill,
|
|
int force_early);
|
|
#endif
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
long ksm_process_profit(struct mm_struct *);
|
|
#endif /* CONFIG_PROC_FS */
|
|
|
|
#else /* !CONFIG_KSM */
|
|
|
|
static inline void ksm_add_vma(struct vm_area_struct *vma)
|
|
{
|
|
}
|
|
|
|
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void ksm_exit(struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
#ifdef CONFIG_MEMORY_FAILURE
|
|
static inline void collect_procs_ksm(struct page *page,
|
|
struct list_head *to_kill, int force_early)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_MMU
|
|
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end, int advice, unsigned long *vm_flags)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline struct page *ksm_might_need_to_copy(struct page *page,
|
|
struct vm_area_struct *vma, unsigned long address)
|
|
{
|
|
return page;
|
|
}
|
|
|
|
static inline void rmap_walk_ksm(struct folio *folio,
|
|
struct rmap_walk_control *rwc)
|
|
{
|
|
}
|
|
|
|
static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
|
|
{
|
|
}
|
|
#endif /* CONFIG_MMU */
|
|
#endif /* !CONFIG_KSM */
|
|
|
|
#endif /* __LINUX_KSM_H */
|