mm/memory_hotplug: export mhp_supports_memmap_on_memory()

In preparation for adding sysfs ABI to toggle memmap_on_memory semantics
for drivers adding memory, export the mhp_supports_memmap_on_memory()
helper. This allows drivers to check if memmap_on_memory support is
available before trying to request it, and display an appropriate
message if it isn't available. As part of this, remove the size argument
to this - with recent updates to allow memmap_on_memory for larger
ranges, and the internal splitting of altmaps into respective memory
blocks, the size argument is meaningless.

[akpm@linux-foundation.org: fix build]
Link: https://lkml.kernel.org/r/20240124-vv-dax_abi-v7-4-20d16cb8d23d@intel.com
Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
Acked-by: David Hildenbrand <david@redhat.com>
Suggested-by: David Hildenbrand <david@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Li Zhijian <lizhijian@fujitsu.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Huang Ying <ying.huang@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Vishal Verma 2024-01-24 12:03:49 -08:00 committed by Andrew Morton
parent 51e7849cd6
commit 42d9358252
2 changed files with 12 additions and 11 deletions

View File

@ -137,6 +137,7 @@ struct mhp_params {
bool mhp_range_allowed(u64 start, u64 size, bool need_mapping); bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
struct range mhp_get_pluggable_range(bool need_mapping); struct range mhp_get_pluggable_range(bool need_mapping);
bool mhp_supports_memmap_on_memory(void);
/* /*
* Zone resizing functions * Zone resizing functions
@ -278,6 +279,11 @@ static inline bool movable_node_is_enabled(void)
return false; return false;
} }
static inline bool mhp_supports_memmap_on_memory(void)
{
return false;
}
static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {} static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {}
static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {} static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {}
static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {} static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {}

View File

@ -1337,7 +1337,7 @@ static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
} }
#endif #endif
static bool mhp_supports_memmap_on_memory(unsigned long size) bool mhp_supports_memmap_on_memory(void)
{ {
unsigned long vmemmap_size = memory_block_memmap_size(); unsigned long vmemmap_size = memory_block_memmap_size();
unsigned long memmap_pages = memory_block_memmap_on_memory_pages(); unsigned long memmap_pages = memory_block_memmap_on_memory_pages();
@ -1346,17 +1346,11 @@ static bool mhp_supports_memmap_on_memory(unsigned long size)
* Besides having arch support and the feature enabled at runtime, we * Besides having arch support and the feature enabled at runtime, we
* need a few more assumptions to hold true: * need a few more assumptions to hold true:
* *
* a) We span a single memory block: memory onlining/offlinin;g happens * a) The vmemmap pages span complete PMDs: We don't want vmemmap code
* in memory block granularity. We don't want the vmemmap of online
* memory blocks to reside on offline memory blocks. In the future,
* we might want to support variable-sized memory blocks to make the
* feature more versatile.
*
* b) The vmemmap pages span complete PMDs: We don't want vmemmap code
* to populate memory from the altmap for unrelated parts (i.e., * to populate memory from the altmap for unrelated parts (i.e.,
* other memory blocks) * other memory blocks)
* *
* c) The vmemmap pages (and thereby the pages that will be exposed to * b) The vmemmap pages (and thereby the pages that will be exposed to
* the buddy) have to cover full pageblocks: memory onlining/offlining * the buddy) have to cover full pageblocks: memory onlining/offlining
* code requires applicable ranges to be page-aligned, for example, to * code requires applicable ranges to be page-aligned, for example, to
* set the migratetypes properly. * set the migratetypes properly.
@ -1368,7 +1362,7 @@ static bool mhp_supports_memmap_on_memory(unsigned long size)
* altmap as an alternative source of memory, and we do not exactly * altmap as an alternative source of memory, and we do not exactly
* populate a single PMD. * populate a single PMD.
*/ */
if (!mhp_memmap_on_memory() || size != memory_block_size_bytes()) if (!mhp_memmap_on_memory())
return false; return false;
/* /*
@ -1391,6 +1385,7 @@ static bool mhp_supports_memmap_on_memory(unsigned long size)
return arch_supports_memmap_on_memory(vmemmap_size); return arch_supports_memmap_on_memory(vmemmap_size);
} }
EXPORT_SYMBOL_GPL(mhp_supports_memmap_on_memory);
static void __ref remove_memory_blocks_and_altmaps(u64 start, u64 size) static void __ref remove_memory_blocks_and_altmaps(u64 start, u64 size)
{ {
@ -1526,7 +1521,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
* Self hosted memmap array * Self hosted memmap array
*/ */
if ((mhp_flags & MHP_MEMMAP_ON_MEMORY) && if ((mhp_flags & MHP_MEMMAP_ON_MEMORY) &&
mhp_supports_memmap_on_memory(memory_block_size_bytes())) { mhp_supports_memmap_on_memory()) {
ret = create_altmaps_and_memory_blocks(nid, group, start, size, mhp_flags); ret = create_altmaps_and_memory_blocks(nid, group, start, size, mhp_flags);
if (ret) if (ret)
goto error; goto error;