mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
98931dd95f
file-backed transparent hugepages. Johannes Weiner has arranged for zswap memory use to be tracked and managed on a per-cgroup basis. Munchun Song adds a /proc knob ("hugetlb_optimize_vmemmap") for runtime enablement of the recent huge page vmemmap optimization feature. Baolin Wang contributes a series to fix some issues around hugetlb pagetable invalidation. Zhenwei Pi has fixed some interactions between hwpoisoned pages and virtualization. Tong Tiangen has enabled the use of the presently x86-only page_table_check debugging feature on arm64 and riscv. David Vernet has done some fixup work on the memcg selftests. Peter Xu has taught userfaultfd to handle write protection faults against shmem- and hugetlbfs-backed files. More DAMON development from SeongJae Park - adding online tuning of the feature and support for monitoring of fixed virtual address ranges. Also easier discovery of which monitoring operations are available. Nadav Amit has done some optimization of TLB flushing during mprotect(). Neil Brown continues to labor away at improving our swap-over-NFS support. David Hildenbrand has some fixes to anon page COWing versus get_user_pages(). Peng Liu fixed some errors in the core hugetlb code. Joao Martins has reduced the amount of memory consumed by device-dax's compound devmaps. Some cleanups of the arch-specific pagemap code from Anshuman Khandual. Muchun Song has found and fixed some errors in the TLB flushing of transparent hugepages. Roman Gushchin has done more work on the memcg selftests. And, of course, many smaller fixes and cleanups. Notably, the customary million cleanup serieses from Miaohe Lin. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCYo52xQAKCRDdBJ7gKXxA jtJFAQD238KoeI9z5SkPMaeBRYSRQmNll85mxs25KapcEgWgGQD9FAb7DJkqsIVk PzE+d9hEfirUGdL6cujatwJ6ejYR8Q8= =nFe6 -----END PGP SIGNATURE----- Merge tag 'mm-stable-2022-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull MM updates from Andrew Morton: "Almost all of MM here. A few things are still getting finished off, reviewed, etc. - Yang Shi has improved the behaviour of khugepaged collapsing of readonly file-backed transparent hugepages. - Johannes Weiner has arranged for zswap memory use to be tracked and managed on a per-cgroup basis. - Munchun Song adds a /proc knob ("hugetlb_optimize_vmemmap") for runtime enablement of the recent huge page vmemmap optimization feature. - Baolin Wang contributes a series to fix some issues around hugetlb pagetable invalidation. - Zhenwei Pi has fixed some interactions between hwpoisoned pages and virtualization. - Tong Tiangen has enabled the use of the presently x86-only page_table_check debugging feature on arm64 and riscv. - David Vernet has done some fixup work on the memcg selftests. - Peter Xu has taught userfaultfd to handle write protection faults against shmem- and hugetlbfs-backed files. - More DAMON development from SeongJae Park - adding online tuning of the feature and support for monitoring of fixed virtual address ranges. Also easier discovery of which monitoring operations are available. - Nadav Amit has done some optimization of TLB flushing during mprotect(). - Neil Brown continues to labor away at improving our swap-over-NFS support. - David Hildenbrand has some fixes to anon page COWing versus get_user_pages(). - Peng Liu fixed some errors in the core hugetlb code. - Joao Martins has reduced the amount of memory consumed by device-dax's compound devmaps. - Some cleanups of the arch-specific pagemap code from Anshuman Khandual. - Muchun Song has found and fixed some errors in the TLB flushing of transparent hugepages. - Roman Gushchin has done more work on the memcg selftests. ... and, of course, many smaller fixes and cleanups. Notably, the customary million cleanup serieses from Miaohe Lin" * tag 'mm-stable-2022-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (381 commits) mm: kfence: use PAGE_ALIGNED helper selftests: vm: add the "settings" file with timeout variable selftests: vm: add "test_hmm.sh" to TEST_FILES selftests: vm: check numa_available() before operating "merge_across_nodes" in ksm_tests selftests: vm: add migration to the .gitignore selftests/vm/pkeys: fix typo in comment ksm: fix typo in comment selftests: vm: add process_mrelease tests Revert "mm/vmscan: never demote for memcg reclaim" mm/kfence: print disabling or re-enabling message include/trace/events/percpu.h: cleanup for "percpu: improve percpu_alloc_percpu event trace" include/trace/events/mmflags.h: cleanup for "tracing: incorrect gfp_t conversion" mm: fix a potential infinite loop in start_isolate_page_range() MAINTAINERS: add Muchun as co-maintainer for HugeTLB zram: fix Kconfig dependency warning mm/shmem: fix shmem folio swapoff hang cgroup: fix an error handling path in alloc_pagecache_max_30M() mm: damon: use HPAGE_PMD_SIZE tracing: incorrect isolate_mote_t cast in mm_vmscan_lru_isolate nodemask.h: fix compilation error with GCC12 ...
287 lines
6.6 KiB
C
287 lines
6.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_HIGHMEM_INTERNAL_H
|
|
#define _LINUX_HIGHMEM_INTERNAL_H
|
|
|
|
/*
|
|
* Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
|
|
*/
|
|
#ifdef CONFIG_KMAP_LOCAL
|
|
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
|
|
void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
|
|
void kunmap_local_indexed(void *vaddr);
|
|
void kmap_local_fork(struct task_struct *tsk);
|
|
void __kmap_local_sched_out(void);
|
|
void __kmap_local_sched_in(void);
|
|
static inline void kmap_assert_nomap(void)
|
|
{
|
|
DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
|
|
}
|
|
#else
|
|
static inline void kmap_local_fork(struct task_struct *tsk) { }
|
|
static inline void kmap_assert_nomap(void) { }
|
|
#endif
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
#include <asm/highmem.h>
|
|
|
|
#ifndef ARCH_HAS_KMAP_FLUSH_TLB
|
|
static inline void kmap_flush_tlb(unsigned long addr) { }
|
|
#endif
|
|
|
|
#ifndef kmap_prot
|
|
#define kmap_prot PAGE_KERNEL
|
|
#endif
|
|
|
|
void *kmap_high(struct page *page);
|
|
void kunmap_high(struct page *page);
|
|
void __kmap_flush_unused(void);
|
|
struct page *__kmap_to_page(void *addr);
|
|
|
|
static inline void *kmap(struct page *page)
|
|
{
|
|
void *addr;
|
|
|
|
might_sleep();
|
|
if (!PageHighMem(page))
|
|
addr = page_address(page);
|
|
else
|
|
addr = kmap_high(page);
|
|
kmap_flush_tlb((unsigned long)addr);
|
|
return addr;
|
|
}
|
|
|
|
static inline void kunmap(struct page *page)
|
|
{
|
|
might_sleep();
|
|
if (!PageHighMem(page))
|
|
return;
|
|
kunmap_high(page);
|
|
}
|
|
|
|
static inline struct page *kmap_to_page(void *addr)
|
|
{
|
|
return __kmap_to_page(addr);
|
|
}
|
|
|
|
static inline void kmap_flush_unused(void)
|
|
{
|
|
__kmap_flush_unused();
|
|
}
|
|
|
|
static inline void *kmap_local_page(struct page *page)
|
|
{
|
|
return __kmap_local_page_prot(page, kmap_prot);
|
|
}
|
|
|
|
static inline void *kmap_local_folio(struct folio *folio, size_t offset)
|
|
{
|
|
struct page *page = folio_page(folio, offset / PAGE_SIZE);
|
|
return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
|
|
}
|
|
|
|
static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
|
|
{
|
|
return __kmap_local_page_prot(page, prot);
|
|
}
|
|
|
|
static inline void *kmap_local_pfn(unsigned long pfn)
|
|
{
|
|
return __kmap_local_pfn_prot(pfn, kmap_prot);
|
|
}
|
|
|
|
static inline void __kunmap_local(void *vaddr)
|
|
{
|
|
kunmap_local_indexed(vaddr);
|
|
}
|
|
|
|
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
|
{
|
|
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
|
migrate_disable();
|
|
else
|
|
preempt_disable();
|
|
|
|
pagefault_disable();
|
|
return __kmap_local_page_prot(page, prot);
|
|
}
|
|
|
|
static inline void *kmap_atomic(struct page *page)
|
|
{
|
|
return kmap_atomic_prot(page, kmap_prot);
|
|
}
|
|
|
|
static inline void *kmap_atomic_pfn(unsigned long pfn)
|
|
{
|
|
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
|
migrate_disable();
|
|
else
|
|
preempt_disable();
|
|
|
|
pagefault_disable();
|
|
return __kmap_local_pfn_prot(pfn, kmap_prot);
|
|
}
|
|
|
|
static inline void __kunmap_atomic(void *addr)
|
|
{
|
|
kunmap_local_indexed(addr);
|
|
pagefault_enable();
|
|
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
|
migrate_enable();
|
|
else
|
|
preempt_enable();
|
|
}
|
|
|
|
unsigned int __nr_free_highpages(void);
|
|
extern atomic_long_t _totalhigh_pages;
|
|
|
|
static inline unsigned int nr_free_highpages(void)
|
|
{
|
|
return __nr_free_highpages();
|
|
}
|
|
|
|
static inline unsigned long totalhigh_pages(void)
|
|
{
|
|
return (unsigned long)atomic_long_read(&_totalhigh_pages);
|
|
}
|
|
|
|
static inline void totalhigh_pages_add(long count)
|
|
{
|
|
atomic_long_add(count, &_totalhigh_pages);
|
|
}
|
|
|
|
static inline bool is_kmap_addr(const void *x)
|
|
{
|
|
unsigned long addr = (unsigned long)x;
|
|
return addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP);
|
|
}
|
|
#else /* CONFIG_HIGHMEM */
|
|
|
|
static inline struct page *kmap_to_page(void *addr)
|
|
{
|
|
return virt_to_page(addr);
|
|
}
|
|
|
|
static inline void *kmap(struct page *page)
|
|
{
|
|
might_sleep();
|
|
return page_address(page);
|
|
}
|
|
|
|
static inline void kunmap_high(struct page *page) { }
|
|
static inline void kmap_flush_unused(void) { }
|
|
|
|
static inline void kunmap(struct page *page)
|
|
{
|
|
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
|
|
kunmap_flush_on_unmap(page_address(page));
|
|
#endif
|
|
}
|
|
|
|
static inline void *kmap_local_page(struct page *page)
|
|
{
|
|
return page_address(page);
|
|
}
|
|
|
|
static inline void *kmap_local_folio(struct folio *folio, size_t offset)
|
|
{
|
|
return page_address(&folio->page) + offset;
|
|
}
|
|
|
|
static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
|
|
{
|
|
return kmap_local_page(page);
|
|
}
|
|
|
|
static inline void *kmap_local_pfn(unsigned long pfn)
|
|
{
|
|
return kmap_local_page(pfn_to_page(pfn));
|
|
}
|
|
|
|
static inline void __kunmap_local(void *addr)
|
|
{
|
|
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
|
|
kunmap_flush_on_unmap(addr);
|
|
#endif
|
|
}
|
|
|
|
static inline void *kmap_atomic(struct page *page)
|
|
{
|
|
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
|
migrate_disable();
|
|
else
|
|
preempt_disable();
|
|
pagefault_disable();
|
|
return page_address(page);
|
|
}
|
|
|
|
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
|
{
|
|
return kmap_atomic(page);
|
|
}
|
|
|
|
static inline void *kmap_atomic_pfn(unsigned long pfn)
|
|
{
|
|
return kmap_atomic(pfn_to_page(pfn));
|
|
}
|
|
|
|
static inline void __kunmap_atomic(void *addr)
|
|
{
|
|
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
|
|
kunmap_flush_on_unmap(addr);
|
|
#endif
|
|
pagefault_enable();
|
|
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
|
migrate_enable();
|
|
else
|
|
preempt_enable();
|
|
}
|
|
|
|
static inline unsigned int nr_free_highpages(void) { return 0; }
|
|
static inline unsigned long totalhigh_pages(void) { return 0UL; }
|
|
|
|
static inline bool is_kmap_addr(const void *x)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#endif /* CONFIG_HIGHMEM */
|
|
|
|
/**
|
|
* kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
|
|
* @__addr: Virtual address to be unmapped
|
|
*
|
|
* Unmaps an address previously mapped by kmap_atomic() and re-enables
|
|
* pagefaults. Depending on PREEMP_RT configuration, re-enables also
|
|
* migration and preemption. Users should not count on these side effects.
|
|
*
|
|
* Mappings should be unmapped in the reverse order that they were mapped.
|
|
* See kmap_local_page() for details on nesting.
|
|
*
|
|
* @__addr can be any address within the mapped page, so there is no need
|
|
* to subtract any offset that has been added. In contrast to kunmap(),
|
|
* this function takes the address returned from kmap_atomic(), not the
|
|
* page passed to it. The compiler will warn you if you pass the page.
|
|
*/
|
|
#define kunmap_atomic(__addr) \
|
|
do { \
|
|
BUILD_BUG_ON(__same_type((__addr), struct page *)); \
|
|
__kunmap_atomic(__addr); \
|
|
} while (0)
|
|
|
|
/**
|
|
* kunmap_local - Unmap a page mapped via kmap_local_page().
|
|
* @__addr: An address within the page mapped
|
|
*
|
|
* @__addr can be any address within the mapped page. Commonly it is the
|
|
* address return from kmap_local_page(), but it can also include offsets.
|
|
*
|
|
* Unmapping should be done in the reverse order of the mapping. See
|
|
* kmap_local_page() for details.
|
|
*/
|
|
#define kunmap_local(__addr) \
|
|
do { \
|
|
BUILD_BUG_ON(__same_type((__addr), struct page *)); \
|
|
__kunmap_local(__addr); \
|
|
} while (0)
|
|
|
|
#endif
|