memblock: test suite and a small cleanup

* A small cleanup of unused variable in __next_mem_pfn_range_in_zone
 * Initial test suite to simulate memblock behaviour in userspace
 -----BEGIN PGP SIGNATURE-----
 
 iQFHBAABCAAxFiEEeOVYVaWZL5900a/pOQOGJssO/ZEFAmI9bD4THHJwcHRAbGlu
 dXguaWJtLmNvbQAKCRA5A4Ymyw79kXwhB/wNXR1wUb/eD3eKD+aNa2KMY5+8csjD
 ghJph8wQmM9U9hsLViv3/M/H5+bY/s0riZNulKYrcmzW2BgIzF2ebcoqgfQ89YGV
 bLx7lMJGxG/lCglur9m6KnOF89//Owq6Vfk7Jd6jR/F+43JO/3+5siCbTo6NrbVw
 3DjT/WzvaICA646foyFTh8WotnIRbB2iYX1k/vIA3gwJ2C6n7WwoKzxU3ulKMUzg
 hVlhcuTVnaV4mjFBbl23wC7i4l9dgPO9M4ZrTtlEsNHeV6uoFYRObwy6/q/CsBqI
 avwgV0bQDch+QuCteUXcqIcnBpcUAfGxgiqp2PYX4lXA4gYTbo7plTna
 =IemP
 -----END PGP SIGNATURE-----

Merge tag 'memblock-v5.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock

Pull memblock updates from Mike Rapoport:
 "Test suite and a small cleanup:

   - A small cleanup of unused variable in __next_mem_pfn_range_in_zone

   - Initial test suite to simulate memblock behaviour in userspace"

* tag 'memblock-v5.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock: (27 commits)
  memblock tests: Add TODO and README files
  memblock tests: Add memblock_alloc_try_nid tests for bottom up
  memblock tests: Add memblock_alloc_try_nid tests for top down
  memblock tests: Add memblock_alloc_from tests for bottom up
  memblock tests: Add memblock_alloc_from tests for top down
  memblock tests: Add memblock_alloc tests for bottom up
  memblock tests: Add memblock_alloc tests for top down
  memblock tests: Add simulation of physical memory
  memblock tests: Split up reset_memblock function
  memblock tests: Fix testing with 32-bit physical addresses
  memblock: __next_mem_pfn_range_in_zone: remove unneeded local variable nid
  memblock tests: Add memblock_free tests
  memblock tests: Add memblock_add_node test
  memblock tests: Add memblock_remove tests
  memblock tests: Add memblock_reserve tests
  memblock tests: Add memblock_add tests
  memblock tests: Add memblock reset function
  memblock tests: Add skeleton of the memblock simulator
  tools/include: Add debugfs.h stub
  tools/include: Add pfn.h stub
  ...
This commit is contained in:
Linus Torvalds 2022-03-27 13:36:06 -07:00
commit 02f9a04d76
42 changed files with 3934 additions and 71 deletions

View File

@ -12550,6 +12550,7 @@ S: Maintained
F: Documentation/core-api/boot-time-mm.rst
F: include/linux/memblock.h
F: mm/memblock.c
F: tools/testing/memblock/
MEMORY CONTROLLER DRIVERS
M: Krzysztof Kozlowski <krzk@kernel.org>

View File

@ -1284,11 +1284,10 @@ __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
{
int zone_nid = zone_to_nid(zone);
phys_addr_t spa, epa;
int nid;
__next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
&memblock.memory, &memblock.reserved,
&spa, &epa, &nid);
&spa, &epa, NULL);
while (*idx != U64_MAX) {
unsigned long epfn = PFN_DOWN(epa);
@ -1315,7 +1314,7 @@ __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
__next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
&memblock.memory, &memblock.reserved,
&spa, &epa, &nid);
&spa, &epa, NULL);
}
/* signal end of iteration */

View File

@ -4,6 +4,8 @@
#include <asm/atomic.h>
void atomic_long_set(atomic_long_t *v, long i);
/* atomic_cmpxchg_relaxed */
#ifndef atomic_cmpxchg_relaxed
#define atomic_cmpxchg_relaxed atomic_cmpxchg

View File

@ -0,0 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _TOOLS_LINUX_CACHE_H
#define _TOOLS_LINUX_CACHE_H
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif

View File

@ -0,0 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _TOOLS_DEBUGFS_H
#define _TOOLS_DEBUGFS_H
#endif

View File

@ -1,4 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _TOOLS_INCLUDE_LINUX_GFP_H
#define _TOOLS_INCLUDE_LINUX_GFP_H
#include <linux/types.h>
#define __GFP_BITS_SHIFT 26
#define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
#define __GFP_HIGH 0x20u
#define __GFP_IO 0x40u
#define __GFP_FS 0x80u
#define __GFP_NOWARN 0x200u
#define __GFP_ZERO 0x8000u
#define __GFP_ATOMIC 0x80000u
#define __GFP_ACCOUNT 0x100000u
#define __GFP_DIRECT_RECLAIM 0x400000u
#define __GFP_KSWAPD_RECLAIM 0x2000000u
#define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM | __GFP_KSWAPD_RECLAIM)
#define GFP_ZONEMASK 0x0fu
#define GFP_ATOMIC (__GFP_HIGH | __GFP_ATOMIC | __GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
}
#endif /* _TOOLS_INCLUDE_LINUX_GFP_H */

5
tools/include/linux/io.h Normal file
View File

@ -0,0 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _TOOLS_IO_H
#define _TOOLS_IO_H
#endif

View File

@ -15,6 +15,8 @@
#define UINT_MAX (~0U)
#endif
#define _RET_IP_ ((unsigned long)__builtin_return_address(0))
#define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1)
#define __PERF_ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
@ -51,6 +53,10 @@
_min1 < _min2 ? _min1 : _min2; })
#endif
#define max_t(type, x, y) max((type)x, (type)y)
#define min_t(type, x, y) min((type)x, (type)y)
#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
#ifndef BUG_ON
#ifdef NDEBUG
#define BUG_ON(cond) do { if (cond) {} } while (0)

42
tools/include/linux/mm.h Normal file
View File

@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _TOOLS_LINUX_MM_H
#define _TOOLS_LINUX_MM_H
#include <linux/mmzone.h>
#include <uapi/linux/const.h>
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define PHYS_ADDR_MAX (~(phys_addr_t)0)
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
#define __va(x) ((void *)((unsigned long)(x)))
#define __pa(x) ((unsigned long)(x))
#define pfn_to_page(pfn) ((void *)((pfn) * PAGE_SIZE))
#define phys_to_virt phys_to_virt
static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
static inline void totalram_pages_inc(void)
{
}
static inline void totalram_pages_add(long count)
{
}
#endif

10
tools/include/linux/pfn.h Normal file
View File

@ -0,0 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _TOOLS_LINUX_PFN_H_
#define _TOOLS_LINUX_PFN_H_
#include <linux/mm.h>
#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
#define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT)
#endif

View File

@ -1,20 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SLAB_H
#define SLAB_H
#ifndef _TOOLS_SLAB_H
#define _TOOLS_SLAB_H
#include <linux/types.h>
#include <linux/gfp.h>
#define SLAB_HWCACHE_ALIGN 1
#define SLAB_PANIC 2
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
void *kmalloc(size_t size, gfp_t);
void kfree(void *);
#define kzalloc_node(size, flags, node) kmalloc(size, flags)
void *kmalloc(size_t size, gfp_t gfp);
void kfree(void *p);
bool slab_is_available(void);
enum slab_state {
DOWN,
PARTIAL,
PARTIAL_NODE,
UP,
FULL
};
static inline void *kzalloc(size_t size, gfp_t gfp)
{
return kmalloc(size, gfp | __GFP_ZERO);
return kmalloc(size, gfp | __GFP_ZERO);
}
void *kmem_cache_alloc(struct kmem_cache *cachep, int flags);
@ -24,4 +35,4 @@ struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
unsigned int align, unsigned int flags,
void (*ctor)(void *));
#endif /* SLAB_H */
#endif /* _TOOLS_SLAB_H */

View File

@ -63,10 +63,20 @@ typedef __u64 __bitwise __be64;
typedef __u16 __bitwise __sum16;
typedef __u32 __bitwise __wsum;
#ifdef CONFIG_PHYS_ADDR_T_64BIT
typedef u64 phys_addr_t;
#else
typedef u32 phys_addr_t;
#endif
typedef struct {
int counter;
} atomic_t;
typedef struct {
long counter;
} atomic_long_t;
#ifndef __aligned_u64
# define __aligned_u64 __u64 __attribute__((aligned(8)))
#endif

38
tools/lib/slab.c Normal file
View File

@ -0,0 +1,38 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <string.h>
#include <urcu/uatomic.h>
#include <linux/slab.h>
#include <malloc.h>
#include <linux/gfp.h>
int kmalloc_nr_allocated;
int kmalloc_verbose;
void *kmalloc(size_t size, gfp_t gfp)
{
void *ret;
if (!(gfp & __GFP_DIRECT_RECLAIM))
return NULL;
ret = malloc(size);
uatomic_inc(&kmalloc_nr_allocated);
if (kmalloc_verbose)
printf("Allocating %p from malloc\n", ret);
if (gfp & __GFP_ZERO)
memset(ret, 0, size);
return ret;
}
void kfree(void *p)
{
if (!p)
return;
uatomic_dec(&kmalloc_nr_allocated);
if (kmalloc_verbose)
printf("Freeing %p to malloc\n", p);
free(p);
}

4
tools/testing/memblock/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
main
memblock.c
linux/memblock.h
asm/cmpxchg.h

View File

@ -0,0 +1,55 @@
# SPDX-License-Identifier: GPL-2.0
# Memblock simulator requires AddressSanitizer (libasan) and liburcu development
# packages installed
CFLAGS += -I. -I../../include -Wall -O2 -fsanitize=address \
-fsanitize=undefined -D CONFIG_PHYS_ADDR_T_64BIT
LDFLAGS += -fsanitize=address -fsanitize=undefined
TARGETS = main
TEST_OFILES = tests/alloc_nid_api.o tests/alloc_helpers_api.o tests/alloc_api.o \
tests/basic_api.o tests/common.o
DEP_OFILES = memblock.o lib/slab.o mmzone.o slab.o
OFILES = main.o $(DEP_OFILES) $(TEST_OFILES)
EXTR_SRC = ../../../mm/memblock.c
ifeq ($(BUILD), 32)
CFLAGS += -m32
LDFLAGS += -m32
endif
# Process user parameters
include scripts/Makefile.include
main: $(OFILES)
$(OFILES): include
include: ../../../include/linux/memblock.h ../../include/linux/*.h \
../../include/asm/*.h
@mkdir -p linux
test -L linux/memblock.h || ln -s ../../../../include/linux/memblock.h linux/memblock.h
test -L asm/cmpxchg.h || ln -s ../../../arch/x86/include/asm/cmpxchg.h asm/cmpxchg.h
memblock.c: $(EXTR_SRC)
test -L memblock.c || ln -s $(EXTR_SRC) memblock.c
clean:
$(RM) $(TARGETS) $(OFILES) linux/memblock.h memblock.c asm/cmpxchg.h
help:
@echo 'Memblock simulator'
@echo ''
@echo 'Available targets:'
@echo ' main - Build the memblock simulator'
@echo ' clean - Remove generated files and symlinks in the directory'
@echo ''
@echo 'Configuration:'
@echo ' make NUMA=1 - simulate enabled NUMA'
@echo ' make MOVABLE_NODE=1 - override `movable_node_is_enabled`'
@echo ' definition to simulate movable NUMA nodes'
@echo ' make 32BIT_PHYS_ADDR_T=1 - Use 32 bit physical addresses'
vpath %.c ../../lib
.PHONY: clean include help

View File

@ -0,0 +1,107 @@
==================
Memblock simulator
==================
Introduction
============
Memblock is a boot time memory allocator[1] that manages memory regions before
the actual memory management is initialized. Its APIs allow to register physical
memory regions, mark them as available or reserved, allocate a block of memory
within the requested range and/or in specific NUMA node, and many more.
Because it is used so early in the booting process, testing and debugging it is
difficult. This test suite, usually referred as memblock simulator, is
an attempt at testing the memblock mechanism. It runs one monolithic test that
consist of a series of checks that exercise both the basic operations and
allocation functionalities of memblock. The main data structure of the boot time
memory allocator is initialized at the build time, so the checks here reuse its
instance throughout the duration of the test. To ensure that tests don't affect
each other, region arrays are reset in between.
As this project uses the actual memblock code and has to run in user space,
some of the kernel definitions were stubbed by the initial commit that
introduced memblock simulator (commit 16802e55dea9 ("memblock tests: Add
skeleton of the memblock simulator")) and a few preparation commits just
before it. Most of them don't match the kernel implementation, so one should
consult them first before making any significant changes to the project.
Usage
=====
To run the tests, build the main target and run it:
$ make && ./main
A successful run produces no output. It is also possible to override different
configuration parameters. For example, to simulate enabled NUMA, use:
$ make NUMA=1
For the full list of options, see `make help`.
Project structure
=================
The project has one target, main, which calls a group of checks for basic and
allocation functions. Tests for each group are defined in dedicated files, as it
can be seen here:
memblock
|-- asm ------------------,
|-- lib |-- implement function and struct stubs
|-- linux ------------------'
|-- scripts
| |-- Makefile.include -- handles `make` parameters
|-- tests
| |-- alloc_api.(c|h) -- memblock_alloc tests
| |-- alloc_helpers_api.(c|h) -- memblock_alloc_from tests
| |-- alloc_nid_api.(c|h) -- memblock_alloc_try_nid tests
| |-- basic_api.(c|h) -- memblock_add/memblock_reserve/... tests
| |-- common.(c|h) -- helper functions for resetting memblock;
|-- main.c --------------. dummy physical memory definition
|-- Makefile `- test runner
|-- README
|-- TODO
|-- .gitignore
Simulating physical memory
==========================
Some allocation functions clear the memory in the process, so it is required for
memblock to track valid memory ranges. To achieve this, the test suite registers
with memblock memory stored by test_memory struct. It is a small wrapper that
points to a block of memory allocated via malloc. For each group of allocation
tests, dummy physical memory is allocated, added to memblock, and then released
at the end of the test run. The structure of a test runner checking allocation
functions is as follows:
int memblock_alloc_foo_checks(void)
{
reset_memblock_attributes(); /* data structure reset */
dummy_physical_memory_init(); /* allocate and register memory */
(...allocation checks...)
dummy_physical_memory_cleanup(); /* free the memory */
}
There's no need to explicitly free the dummy memory from memblock via
memblock_free() call. The entry will be erased by reset_memblock_regions(),
called at the beginning of each test.
Known issues
============
1. Requesting a specific NUMA node via memblock_alloc_node() does not work as
intended. Once the fix is in place, tests for this function can be added.
2. Tests for memblock_alloc_low() can't be easily implemented. The function uses
ARCH_LOW_ADDRESS_LIMIT marco, which can't be changed to point at the low
memory of the memory_block.
References
==========
1. Boot time memory management documentation page:
https://www.kernel.org/doc/html/latest/core-api/boot-time-mm.html

View File

@ -0,0 +1,28 @@
TODO
=====
1. Add verbose output (e.g., what is being tested and how many tests cases are
passing)
2. Add flags to Makefile:
+ verbosity level
+ enable memblock_dbg() messages (i.e. pass "-D CONFIG_DEBUG_MEMORY_INIT"
flag)
3. Add tests trying to memblock_add() or memblock_reserve() 129th region.
This will trigger memblock_double_array(), make sure it succeeds.
*Important:* These tests require valid memory ranges, use dummy physical
memory block from common.c to implement them. It is also very
likely that the current MEM_SIZE won't be enough for these
test cases. Use realloc to adjust the size accordingly.
4. Add test cases using this functions (implement them for both directions):
+ memblock_alloc_raw()
+ memblock_alloc_exact_nid_raw()
+ memblock_alloc_try_nid_raw()
5. Add tests for memblock_alloc_node() to check if the correct NUMA node is set
for the new region
6. Update comments in tests/basic_api.c to match the style used in
tests/alloc_*.c

View File

@ -0,0 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _TOOLS_DMA_H
#define _TOOLS_DMA_H
#endif

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _MM_INTERNAL_H
#define _MM_INTERNAL_H
struct page {};
void memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order)
{
}
#endif

View File

@ -0,0 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
enum slab_state slab_state;
bool slab_is_available(void)
{
return slab_state >= UP;
}

View File

@ -0,0 +1,34 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_INIT_H
#define _LINUX_INIT_H
#include <linux/compiler.h>
#include <asm/export.h>
#include <linux/memory_hotplug.h>
#define __section(section) __attribute__((__section__(section)))
#define __initconst
#define __meminit
#define __meminitdata
#define __refdata
#define __initdata
struct obs_kernel_param {
const char *str;
int (*setup_func)(char *st);
int early;
};
#define __setup_param(str, unique_id, fn, early) \
static const char __setup_str_##unique_id[] __initconst \
__aligned(1) = str; \
static struct obs_kernel_param __setup_##unique_id \
__used __section(".init.setup") \
__aligned(__alignof__(struct obs_kernel_param)) = \
{ __setup_str_##unique_id, fn, early }
#define early_param(str, fn) \
__setup_param(str, fn, fn, 1)
#endif

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _MEMBLOCK_LINUX_KERNEL_H
#define _MEMBLOCK_LINUX_KERNEL_H
#include <../../include/linux/kernel.h>
#include <linux/errno.h>
#include <string.h>
#include <linux/printk.h>
#include <linux/linkage.h>
#include <linux/kconfig.h>
#endif

View File

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _KMEMLEAK_H
#define _KMEMLEAK_H
static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size)
{
}
static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
int min_count, gfp_t gfp)
{
}
static inline void dump_stack(void)
{
}
#endif

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MEMORY_HOTPLUG_H
#define _LINUX_MEMORY_HOTPLUG_H
#include <linux/numa.h>
#include <linux/pfn.h>
#include <linux/cache.h>
#include <linux/types.h>
static inline bool movable_node_is_enabled(void)
{
#ifdef MOVABLE_NODE
return true;
#else
return false;
#endif
}
#endif

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _TOOLS_MMZONE_H
#define _TOOLS_MMZONE_H
#include <linux/atomic.h>
struct pglist_data *first_online_pgdat(void);
struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
#define for_each_online_pgdat(pgdat) \
for (pgdat = first_online_pgdat(); \
pgdat; \
pgdat = next_online_pgdat(pgdat))
enum zone_type {
__MAX_NR_ZONES
};
#define MAX_NR_ZONES __MAX_NR_ZONES
#define MAX_ORDER 11
#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
#define pageblock_order (MAX_ORDER - 1)
#define pageblock_nr_pages BIT(pageblock_order)
struct zone {
atomic_long_t managed_pages;
};
typedef struct pglist_data {
struct zone node_zones[MAX_NR_ZONES];
} pg_data_t;
#endif

View File

@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PRINTK_H
#define _PRINTK_H
#include <stdio.h>
#include <asm/bug.h>
/*
* memblock_dbg is called with u64 arguments that don't match the "%llu"
* specifier in printf. This results in warnings that cannot be fixed without
* modifying memblock.c, which we wish to avoid. As these messaged are not used
* in testing anyway, the mismatch can be ignored.
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wformat"
#define printk printf
#pragma GCC diagnostic push
#define pr_info printk
#define pr_debug printk
#define pr_cont printk
#define pr_err printk
#define pr_warn printk
#endif

View File

@ -0,0 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "tests/basic_api.h"
#include "tests/alloc_api.h"
#include "tests/alloc_helpers_api.h"
#include "tests/alloc_nid_api.h"
int main(int argc, char **argv)
{
memblock_basic_checks();
memblock_alloc_checks();
memblock_alloc_helpers_checks();
memblock_alloc_nid_checks();
return 0;
}

View File

@ -0,0 +1,20 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/mmzone.h>
struct pglist_data *first_online_pgdat(void)
{
return NULL;
}
struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
{
return NULL;
}
void reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
{
}
void atomic_long_set(atomic_long_t *v, long i)
{
}

View File

@ -0,0 +1,19 @@
# SPDX-License-Identifier: GPL-2.0
# Definitions for user-provided arguments
# Simulate CONFIG_NUMA=y
ifeq ($(NUMA), 1)
CFLAGS += -D CONFIG_NUMA
endif
# Simulate movable NUMA memory regions
ifeq ($(MOVABLE_NODE), 1)
CFLAGS += -D MOVABLE_NODE
endif
# Use 32 bit physical addresses.
# Remember to install 32-bit version of dependencies.
ifeq ($(32BIT_PHYS_ADDR_T), 1)
CFLAGS += -m32 -U CONFIG_PHYS_ADDR_T_64BIT
LDFLAGS += -m32
endif

View File

@ -0,0 +1,750 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "alloc_api.h"
/*
* A simple test that tries to allocate a small memory region.
* Expect to allocate an aligned region near the end of the available memory.
*/
static int alloc_top_down_simple_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
phys_addr_t size = SZ_2;
phys_addr_t expected_start;
setup_memblock();
expected_start = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
allocated_ptr = memblock_alloc(size, SMP_CACHE_BYTES);
assert(allocated_ptr);
assert(rgn->size == size);
assert(rgn->base == expected_start);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == size);
return 0;
}
/*
* A test that tries to allocate memory next to a reserved region that starts at
* the misaligned address. Expect to create two separate entries, with the new
* entry aligned to the provided alignment:
*
* +
* | +--------+ +--------|
* | | rgn2 | | rgn1 |
* +------------+--------+---------+--------+
* ^
* |
* Aligned address boundary
*
* The allocation direction is top-down and region arrays are sorted from lower
* to higher addresses, so the new region will be the first entry in
* memory.reserved array. The previously reserved region does not get modified.
* Region counter and total size get updated.
*/
static int alloc_top_down_disjoint_check(void)
{
/* After allocation, this will point to the "old" region */
struct memblock_region *rgn1 = &memblock.reserved.regions[1];
struct memblock_region *rgn2 = &memblock.reserved.regions[0];
struct region r1;
void *allocated_ptr = NULL;
phys_addr_t r2_size = SZ_16;
/* Use custom alignment */
phys_addr_t alignment = SMP_CACHE_BYTES * 2;
phys_addr_t total_size;
phys_addr_t expected_start;
setup_memblock();
r1.base = memblock_end_of_DRAM() - SZ_2;
r1.size = SZ_2;
total_size = r1.size + r2_size;
expected_start = memblock_end_of_DRAM() - alignment;
memblock_reserve(r1.base, r1.size);
allocated_ptr = memblock_alloc(r2_size, alignment);
assert(allocated_ptr);
assert(rgn1->size == r1.size);
assert(rgn1->base == r1.base);
assert(rgn2->size == r2_size);
assert(rgn2->base == expected_start);
assert(memblock.reserved.cnt == 2);
assert(memblock.reserved.total_size == total_size);
return 0;
}
/*
* A test that tries to allocate memory when there is enough space at the end
* of the previously reserved block (i.e. first fit):
*
* | +--------+--------------|
* | | r1 | r2 |
* +--------------+--------+--------------+
*
* Expect a merge of both regions. Only the region size gets updated.
*/
static int alloc_top_down_before_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
/*
* The first region ends at the aligned address to test region merging
*/
phys_addr_t r1_size = SMP_CACHE_BYTES;
phys_addr_t r2_size = SZ_512;
phys_addr_t total_size = r1_size + r2_size;
setup_memblock();
memblock_reserve(memblock_end_of_DRAM() - total_size, r1_size);
allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
assert(allocated_ptr);
assert(rgn->size == total_size);
assert(rgn->base == memblock_end_of_DRAM() - total_size);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == total_size);
return 0;
}
/*
* A test that tries to allocate memory when there is not enough space at the
* end of the previously reserved block (i.e. second fit):
*
* | +-----------+------+ |
* | | r2 | r1 | |
* +------------+-----------+------+-----+
*
* Expect a merge of both regions. Both the base address and size of the region
* get updated.
*/
static int alloc_top_down_after_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
struct region r1;
void *allocated_ptr = NULL;
phys_addr_t r2_size = SZ_512;
phys_addr_t total_size;
setup_memblock();
/*
* The first region starts at the aligned address to test region merging
*/
r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
r1.size = SZ_8;
total_size = r1.size + r2_size;
memblock_reserve(r1.base, r1.size);
allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
assert(allocated_ptr);
assert(rgn->size == total_size);
assert(rgn->base == r1.base - r2_size);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == total_size);
return 0;
}
/*
* A test that tries to allocate memory when there are two reserved regions with
* a gap too small to fit the new region:
*
* | +--------+----------+ +------|
* | | r3 | r2 | | r1 |
* +-------+--------+----------+---+------+
*
* Expect to allocate a region before the one that starts at the lower address,
* and merge them into one. The region counter and total size fields get
* updated.
*/
static int alloc_top_down_second_fit_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
struct region r1, r2;
void *allocated_ptr = NULL;
phys_addr_t r3_size = SZ_1K;
phys_addr_t total_size;
setup_memblock();
r1.base = memblock_end_of_DRAM() - SZ_512;
r1.size = SZ_512;
r2.base = r1.base - SZ_512;
r2.size = SZ_256;
total_size = r1.size + r2.size + r3_size;
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
assert(allocated_ptr);
assert(rgn->size == r2.size + r3_size);
assert(rgn->base == r2.base - r3_size);
assert(memblock.reserved.cnt == 2);
assert(memblock.reserved.total_size == total_size);
return 0;
}
/*
* A test that tries to allocate memory when there are two reserved regions with
* a gap big enough to accommodate the new region:
*
* | +--------+--------+--------+ |
* | | r2 | r3 | r1 | |
* +-----+--------+--------+--------+-----+
*
* Expect to merge all of them, creating one big entry in memblock.reserved
* array. The region counter and total size fields get updated.
*/
static int alloc_in_between_generic_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
struct region r1, r2;
void *allocated_ptr = NULL;
phys_addr_t gap_size = SMP_CACHE_BYTES;
phys_addr_t r3_size = SZ_64;
/*
* Calculate regions size so there's just enough space for the new entry
*/
phys_addr_t rgn_size = (MEM_SIZE - (2 * gap_size + r3_size)) / 2;
phys_addr_t total_size;
setup_memblock();
r1.size = rgn_size;
r1.base = memblock_end_of_DRAM() - (gap_size + rgn_size);
r2.size = rgn_size;
r2.base = memblock_start_of_DRAM() + gap_size;
total_size = r1.size + r2.size + r3_size;
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
assert(allocated_ptr);
assert(rgn->size == total_size);
assert(rgn->base == r1.base - r2.size - r3_size);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == total_size);
return 0;
}
/*
* A test that tries to allocate memory when the memory is filled with reserved
* regions with memory gaps too small to fit the new region:
*
* +-------+
* | new |
* +--+----+
* | +-----+ +-----+ +-----+ |
* | | res | | res | | res | |
* +----+-----+----+-----+----+-----+----+
*
* Expect no allocation to happen.
*/
static int alloc_small_gaps_generic_check(void)
{
void *allocated_ptr = NULL;
phys_addr_t region_size = SZ_1K;
phys_addr_t gap_size = SZ_256;
phys_addr_t region_end;
setup_memblock();
region_end = memblock_start_of_DRAM();
while (region_end < memblock_end_of_DRAM()) {
memblock_reserve(region_end + gap_size, region_size);
region_end += gap_size + region_size;
}
allocated_ptr = memblock_alloc(region_size, SMP_CACHE_BYTES);
assert(!allocated_ptr);
return 0;
}
/*
* A test that tries to allocate memory when all memory is reserved.
* Expect no allocation to happen.
*/
static int alloc_all_reserved_generic_check(void)
{
void *allocated_ptr = NULL;
setup_memblock();
/* Simulate full memory */
memblock_reserve(memblock_start_of_DRAM(), MEM_SIZE);
allocated_ptr = memblock_alloc(SZ_256, SMP_CACHE_BYTES);
assert(!allocated_ptr);
return 0;
}
/*
* A test that tries to allocate memory when the memory is almost full,
* with not enough space left for the new region:
*
* +-------+
* | new |
* +-------+
* |-----------------------------+ |
* | reserved | |
* +-----------------------------+---+
*
* Expect no allocation to happen.
*/
static int alloc_no_space_generic_check(void)
{
void *allocated_ptr = NULL;
setup_memblock();
phys_addr_t available_size = SZ_256;
phys_addr_t reserved_size = MEM_SIZE - available_size;
/* Simulate almost-full memory */
memblock_reserve(memblock_start_of_DRAM(), reserved_size);
allocated_ptr = memblock_alloc(SZ_1K, SMP_CACHE_BYTES);
assert(!allocated_ptr);
return 0;
}
/*
* A test that tries to allocate memory when the memory is almost full,
* but there is just enough space left:
*
* |---------------------------+---------|
* | reserved | new |
* +---------------------------+---------+
*
* Expect to allocate memory and merge all the regions. The total size field
* gets updated.
*/
static int alloc_limited_space_generic_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
phys_addr_t available_size = SZ_256;
phys_addr_t reserved_size = MEM_SIZE - available_size;
setup_memblock();
/* Simulate almost-full memory */
memblock_reserve(memblock_start_of_DRAM(), reserved_size);
allocated_ptr = memblock_alloc(available_size, SMP_CACHE_BYTES);
assert(allocated_ptr);
assert(rgn->size == MEM_SIZE);
assert(rgn->base == memblock_start_of_DRAM());
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == MEM_SIZE);
return 0;
}
/*
* A test that tries to allocate memory when there is no available memory
* registered (i.e. memblock.memory has only a dummy entry).
* Expect no allocation to happen.
*/
static int alloc_no_memory_generic_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
reset_memblock_regions();
allocated_ptr = memblock_alloc(SZ_1K, SMP_CACHE_BYTES);
assert(!allocated_ptr);
assert(rgn->size == 0);
assert(rgn->base == 0);
assert(memblock.reserved.total_size == 0);
return 0;
}
/*
* A simple test that tries to allocate a small memory region.
* Expect to allocate an aligned region at the beginning of the available
* memory.
*/
static int alloc_bottom_up_simple_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
setup_memblock();
allocated_ptr = memblock_alloc(SZ_2, SMP_CACHE_BYTES);
assert(allocated_ptr);
assert(rgn->size == SZ_2);
assert(rgn->base == memblock_start_of_DRAM());
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == SZ_2);
return 0;
}
/*
* A test that tries to allocate memory next to a reserved region that starts at
* the misaligned address. Expect to create two separate entries, with the new
* entry aligned to the provided alignment:
*
* +
* | +----------+ +----------+ |
* | | rgn1 | | rgn2 | |
* +----+----------+---+----------+-----+
* ^
* |
* Aligned address boundary
*
* The allocation direction is bottom-up, so the new region will be the second
* entry in memory.reserved array. The previously reserved region does not get
* modified. Region counter and total size get updated.
*/
static int alloc_bottom_up_disjoint_check(void)
{
struct memblock_region *rgn1 = &memblock.reserved.regions[0];
struct memblock_region *rgn2 = &memblock.reserved.regions[1];
struct region r1;
void *allocated_ptr = NULL;
phys_addr_t r2_size = SZ_16;
/* Use custom alignment */
phys_addr_t alignment = SMP_CACHE_BYTES * 2;
phys_addr_t total_size;
phys_addr_t expected_start;
setup_memblock();
r1.base = memblock_start_of_DRAM() + SZ_2;
r1.size = SZ_2;
total_size = r1.size + r2_size;
expected_start = memblock_start_of_DRAM() + alignment;
memblock_reserve(r1.base, r1.size);
allocated_ptr = memblock_alloc(r2_size, alignment);
assert(allocated_ptr);
assert(rgn1->size == r1.size);
assert(rgn1->base == r1.base);
assert(rgn2->size == r2_size);
assert(rgn2->base == expected_start);
assert(memblock.reserved.cnt == 2);
assert(memblock.reserved.total_size == total_size);
return 0;
}
/*
* A test that tries to allocate memory when there is enough space at
* the beginning of the previously reserved block (i.e. first fit):
*
* |------------------+--------+ |
* | r1 | r2 | |
* +------------------+--------+---------+
*
* Expect a merge of both regions. Only the region size gets updated.
*/
static int alloc_bottom_up_before_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
phys_addr_t r1_size = SZ_512;
phys_addr_t r2_size = SZ_128;
phys_addr_t total_size = r1_size + r2_size;
setup_memblock();
memblock_reserve(memblock_start_of_DRAM() + r1_size, r2_size);
allocated_ptr = memblock_alloc(r1_size, SMP_CACHE_BYTES);
assert(allocated_ptr);
assert(rgn->size == total_size);
assert(rgn->base == memblock_start_of_DRAM());
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == total_size);
return 0;
}
/*
* A test that tries to allocate memory when there is not enough space at
* the beginning of the previously reserved block (i.e. second fit):
*
* | +--------+--------------+ |
* | | r1 | r2 | |
* +----+--------+--------------+---------+
*
* Expect a merge of both regions. Only the region size gets updated.
*/
static int alloc_bottom_up_after_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
struct region r1;
void *allocated_ptr = NULL;
phys_addr_t r2_size = SZ_512;
phys_addr_t total_size;
setup_memblock();
/*
* The first region starts at the aligned address to test region merging
*/
r1.base = memblock_start_of_DRAM() + SMP_CACHE_BYTES;
r1.size = SZ_64;
total_size = r1.size + r2_size;
memblock_reserve(r1.base, r1.size);
allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
assert(allocated_ptr);
assert(rgn->size == total_size);
assert(rgn->base == r1.base);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == total_size);
return 0;
}
/*
* A test that tries to allocate memory when there are two reserved regions, the
* first one starting at the beginning of the available memory, with a gap too
* small to fit the new region:
*
* |------------+ +--------+--------+ |
* | r1 | | r2 | r3 | |
* +------------+-----+--------+--------+--+
*
* Expect to allocate after the second region, which starts at the higher
* address, and merge them into one. The region counter and total size fields
* get updated.
*/
static int alloc_bottom_up_second_fit_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[1];
struct region r1, r2;
void *allocated_ptr = NULL;
phys_addr_t r3_size = SZ_1K;
phys_addr_t total_size;
setup_memblock();
r1.base = memblock_start_of_DRAM();
r1.size = SZ_512;
r2.base = r1.base + r1.size + SZ_512;
r2.size = SZ_256;
total_size = r1.size + r2.size + r3_size;
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
assert(allocated_ptr);
assert(rgn->size == r2.size + r3_size);
assert(rgn->base == r2.base);
assert(memblock.reserved.cnt == 2);
assert(memblock.reserved.total_size == total_size);
return 0;
}
/* Test case wrappers */
static int alloc_simple_check(void)
{
memblock_set_bottom_up(false);
alloc_top_down_simple_check();
memblock_set_bottom_up(true);
alloc_bottom_up_simple_check();
return 0;
}
static int alloc_disjoint_check(void)
{
memblock_set_bottom_up(false);
alloc_top_down_disjoint_check();
memblock_set_bottom_up(true);
alloc_bottom_up_disjoint_check();
return 0;
}
static int alloc_before_check(void)
{
memblock_set_bottom_up(false);
alloc_top_down_before_check();
memblock_set_bottom_up(true);
alloc_bottom_up_before_check();
return 0;
}
static int alloc_after_check(void)
{
memblock_set_bottom_up(false);
alloc_top_down_after_check();
memblock_set_bottom_up(true);
alloc_bottom_up_after_check();
return 0;
}
static int alloc_in_between_check(void)
{
memblock_set_bottom_up(false);
alloc_in_between_generic_check();
memblock_set_bottom_up(true);
alloc_in_between_generic_check();
return 0;
}
static int alloc_second_fit_check(void)
{
memblock_set_bottom_up(false);
alloc_top_down_second_fit_check();
memblock_set_bottom_up(true);
alloc_bottom_up_second_fit_check();
return 0;
}
static int alloc_small_gaps_check(void)
{
memblock_set_bottom_up(false);
alloc_small_gaps_generic_check();
memblock_set_bottom_up(true);
alloc_small_gaps_generic_check();
return 0;
}
static int alloc_all_reserved_check(void)
{
memblock_set_bottom_up(false);
alloc_all_reserved_generic_check();
memblock_set_bottom_up(true);
alloc_all_reserved_generic_check();
return 0;
}
static int alloc_no_space_check(void)
{
memblock_set_bottom_up(false);
alloc_no_space_generic_check();
memblock_set_bottom_up(true);
alloc_no_space_generic_check();
return 0;
}
static int alloc_limited_space_check(void)
{
memblock_set_bottom_up(false);
alloc_limited_space_generic_check();
memblock_set_bottom_up(true);
alloc_limited_space_generic_check();
return 0;
}
static int alloc_no_memory_check(void)
{
memblock_set_bottom_up(false);
alloc_no_memory_generic_check();
memblock_set_bottom_up(true);
alloc_no_memory_generic_check();
return 0;
}
int memblock_alloc_checks(void)
{
reset_memblock_attributes();
dummy_physical_memory_init();
alloc_simple_check();
alloc_disjoint_check();
alloc_before_check();
alloc_after_check();
alloc_second_fit_check();
alloc_small_gaps_check();
alloc_in_between_check();
alloc_all_reserved_check();
alloc_no_space_check();
alloc_limited_space_check();
alloc_no_memory_check();
dummy_physical_memory_cleanup();
return 0;
}

View File

@ -0,0 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _MEMBLOCK_ALLOCS_H
#define _MEMBLOCK_ALLOCS_H
#include "common.h"
int memblock_alloc_checks(void);
#endif

View File

@ -0,0 +1,393 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "alloc_helpers_api.h"
/*
* A simple test that tries to allocate a memory region above a specified,
* aligned address:
*
* +
* | +-----------+ |
* | | rgn | |
* +----------+-----------+---------+
* ^
* |
* Aligned min_addr
*
* Expect to allocate a cleared region at the minimal memory address.
*/
static int alloc_from_simple_generic_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
char *b;
phys_addr_t size = SZ_16;
phys_addr_t min_addr;
setup_memblock();
min_addr = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr);
b = (char *)allocated_ptr;
assert(allocated_ptr);
assert(*b == 0);
assert(rgn->size == size);
assert(rgn->base == min_addr);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == size);
return 0;
}
/*
* A test that tries to allocate a memory region above a certain address.
* The minimal address here is not aligned:
*
* + +
* | + +---------+ |
* | | | rgn | |
* +------+------+---------+------------+
* ^ ^------.
* | |
* min_addr Aligned address
* boundary
*
* Expect to allocate a cleared region at the closest aligned memory address.
*/
static int alloc_from_misaligned_generic_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
char *b;
phys_addr_t size = SZ_32;
phys_addr_t min_addr;
setup_memblock();
/* A misaligned address */
min_addr = memblock_end_of_DRAM() - (SMP_CACHE_BYTES * 2 - 1);
allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr);
b = (char *)allocated_ptr;
assert(allocated_ptr);
assert(*b == 0);
assert(rgn->size == size);
assert(rgn->base == memblock_end_of_DRAM() - SMP_CACHE_BYTES);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == size);
return 0;
}
/*
* A test that tries to allocate a memory region above an address that is too
* close to the end of the memory:
*
* + +
* | +--------+---+ |
* | | rgn + | |
* +-----------+--------+---+------+
* ^ ^
* | |
* | min_addr
* |
* Aligned address
* boundary
*
* Expect to prioritize granting memory over satisfying the minimal address
* requirement.
*/
static int alloc_from_top_down_high_addr_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
phys_addr_t size = SZ_32;
phys_addr_t min_addr;
setup_memblock();
/* The address is too close to the end of the memory */
min_addr = memblock_end_of_DRAM() - SZ_16;
allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr);
assert(allocated_ptr);
assert(rgn->size == size);
assert(rgn->base == memblock_end_of_DRAM() - SMP_CACHE_BYTES);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == size);
return 0;
}
/*
* A test that tries to allocate a memory region when there is no space
* available above the minimal address above a certain address:
*
* +
* | +---------+-------------|
* | | rgn | |
* +--------+---------+-------------+
* ^
* |
* min_addr
*
* Expect to prioritize granting memory over satisfying the minimal address
* requirement and to allocate next to the previously reserved region. The
* regions get merged into one.
*/
static int alloc_from_top_down_no_space_above_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
phys_addr_t r1_size = SZ_64;
phys_addr_t r2_size = SZ_2;
phys_addr_t total_size = r1_size + r2_size;
phys_addr_t min_addr;
setup_memblock();
min_addr = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
/* No space above this address */
memblock_reserve(min_addr, r2_size);
allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
assert(allocated_ptr);
assert(rgn->base == min_addr - r1_size);
assert(rgn->size == total_size);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == total_size);
return 0;
}
/*
* A test that tries to allocate a memory region with a minimal address below
* the start address of the available memory. As the allocation is top-down,
* first reserve a region that will force allocation near the start.
* Expect successful allocation and merge of both regions.
*/
static int alloc_from_top_down_min_addr_cap_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
phys_addr_t r1_size = SZ_64;
phys_addr_t min_addr;
phys_addr_t start_addr;
setup_memblock();
start_addr = (phys_addr_t)memblock_start_of_DRAM();
min_addr = start_addr - SMP_CACHE_BYTES * 3;
memblock_reserve(start_addr + r1_size, MEM_SIZE - r1_size);
allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
assert(allocated_ptr);
assert(rgn->base == start_addr);
assert(rgn->size == MEM_SIZE);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == MEM_SIZE);
return 0;
}
/*
* A test that tries to allocate a memory region above an address that is too
* close to the end of the memory:
*
* +
* |-----------+ + |
* | rgn | | |
* +-----------+--------------+-----+
* ^ ^
* | |
* Aligned address min_addr
* boundary
*
* Expect to prioritize granting memory over satisfying the minimal address
* requirement. Allocation happens at beginning of the available memory.
*/
static int alloc_from_bottom_up_high_addr_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
phys_addr_t size = SZ_32;
phys_addr_t min_addr;
setup_memblock();
/* The address is too close to the end of the memory */
min_addr = memblock_end_of_DRAM() - SZ_8;
allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr);
assert(allocated_ptr);
assert(rgn->size == size);
assert(rgn->base == memblock_start_of_DRAM());
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == size);
return 0;
}
/*
* A test that tries to allocate a memory region when there is no space
* available above the minimal address above a certain address:
*
* +
* |-----------+ +-------------------|
* | rgn | | |
* +-----------+----+-------------------+
* ^
* |
* min_addr
*
* Expect to prioritize granting memory over satisfying the minimal address
* requirement and to allocate at the beginning of the available memory.
*/
static int alloc_from_bottom_up_no_space_above_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
phys_addr_t r1_size = SZ_64;
phys_addr_t min_addr;
phys_addr_t r2_size;
setup_memblock();
min_addr = memblock_start_of_DRAM() + SZ_128;
r2_size = memblock_end_of_DRAM() - min_addr;
/* No space above this address */
memblock_reserve(min_addr - SMP_CACHE_BYTES, r2_size);
allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
assert(allocated_ptr);
assert(rgn->base == memblock_start_of_DRAM());
assert(rgn->size == r1_size);
assert(memblock.reserved.cnt == 2);
assert(memblock.reserved.total_size == r1_size + r2_size);
return 0;
}
/*
* A test that tries to allocate a memory region with a minimal address below
* the start address of the available memory. Expect to allocate a region
* at the beginning of the available memory.
*/
static int alloc_from_bottom_up_min_addr_cap_check(void)
{
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
phys_addr_t r1_size = SZ_64;
phys_addr_t min_addr;
phys_addr_t start_addr;
setup_memblock();
start_addr = (phys_addr_t)memblock_start_of_DRAM();
min_addr = start_addr - SMP_CACHE_BYTES * 3;
allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
assert(allocated_ptr);
assert(rgn->base == start_addr);
assert(rgn->size == r1_size);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == r1_size);
return 0;
}
/* Test case wrappers */
static int alloc_from_simple_check(void)
{
memblock_set_bottom_up(false);
alloc_from_simple_generic_check();
memblock_set_bottom_up(true);
alloc_from_simple_generic_check();
return 0;
}
static int alloc_from_misaligned_check(void)
{
memblock_set_bottom_up(false);
alloc_from_misaligned_generic_check();
memblock_set_bottom_up(true);
alloc_from_misaligned_generic_check();
return 0;
}
static int alloc_from_high_addr_check(void)
{
memblock_set_bottom_up(false);
alloc_from_top_down_high_addr_check();
memblock_set_bottom_up(true);
alloc_from_bottom_up_high_addr_check();
return 0;
}
static int alloc_from_no_space_above_check(void)
{
memblock_set_bottom_up(false);
alloc_from_top_down_no_space_above_check();
memblock_set_bottom_up(true);
alloc_from_bottom_up_no_space_above_check();
return 0;
}
static int alloc_from_min_addr_cap_check(void)
{
memblock_set_bottom_up(false);
alloc_from_top_down_min_addr_cap_check();
memblock_set_bottom_up(true);
alloc_from_bottom_up_min_addr_cap_check();
return 0;
}
int memblock_alloc_helpers_checks(void)
{
reset_memblock_attributes();
dummy_physical_memory_init();
alloc_from_simple_check();
alloc_from_misaligned_check();
alloc_from_high_addr_check();
alloc_from_no_space_above_check();
alloc_from_min_addr_cap_check();
dummy_physical_memory_cleanup();
return 0;
}

View File

@ -0,0 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _MEMBLOCK_ALLOC_HELPERS_H
#define _MEMBLOCK_ALLOC_HELPERS_H
#include "common.h"
int memblock_alloc_helpers_checks(void);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _MEMBLOCK_ALLOC_NID_H
#define _MEMBLOCK_ALLOC_NID_H
#include "common.h"
int memblock_alloc_nid_checks(void);
#endif

View File

@ -0,0 +1,903 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <string.h>
#include <linux/memblock.h>
#include "basic_api.h"
#define EXPECTED_MEMBLOCK_REGIONS 128
static int memblock_initialization_check(void)
{
assert(memblock.memory.regions);
assert(memblock.memory.cnt == 1);
assert(memblock.memory.max == EXPECTED_MEMBLOCK_REGIONS);
assert(strcmp(memblock.memory.name, "memory") == 0);
assert(memblock.reserved.regions);
assert(memblock.reserved.cnt == 1);
assert(memblock.memory.max == EXPECTED_MEMBLOCK_REGIONS);
assert(strcmp(memblock.reserved.name, "reserved") == 0);
assert(!memblock.bottom_up);
assert(memblock.current_limit == MEMBLOCK_ALLOC_ANYWHERE);
return 0;
}
/*
* A simple test that adds a memory block of a specified base address
* and size to the collection of available memory regions (memblock.memory).
* It checks if a new entry was created and if region counter and total memory
* were correctly updated.
*/
static int memblock_add_simple_check(void)
{
struct memblock_region *rgn;
rgn = &memblock.memory.regions[0];
struct region r = {
.base = SZ_1G,
.size = SZ_4M
};
reset_memblock_regions();
memblock_add(r.base, r.size);
assert(rgn->base == r.base);
assert(rgn->size == r.size);
assert(memblock.memory.cnt == 1);
assert(memblock.memory.total_size == r.size);
return 0;
}
/*
* A simple test that adds a memory block of a specified base address, size
* NUMA node and memory flags to the collection of available memory regions.
* It checks if the new entry, region counter and total memory size have
* expected values.
*/
static int memblock_add_node_simple_check(void)
{
struct memblock_region *rgn;
rgn = &memblock.memory.regions[0];
struct region r = {
.base = SZ_1M,
.size = SZ_16M
};
reset_memblock_regions();
memblock_add_node(r.base, r.size, 1, MEMBLOCK_HOTPLUG);
assert(rgn->base == r.base);
assert(rgn->size == r.size);
#ifdef CONFIG_NUMA
assert(rgn->nid == 1);
#endif
assert(rgn->flags == MEMBLOCK_HOTPLUG);
assert(memblock.memory.cnt == 1);
assert(memblock.memory.total_size == r.size);
return 0;
}
/*
* A test that tries to add two memory blocks that don't overlap with one
* another. It checks if two correctly initialized entries were added to the
* collection of available memory regions (memblock.memory) and if this
* change was reflected in memblock.memory's total size and region counter.
*/
static int memblock_add_disjoint_check(void)
{
struct memblock_region *rgn1, *rgn2;
rgn1 = &memblock.memory.regions[0];
rgn2 = &memblock.memory.regions[1];
struct region r1 = {
.base = SZ_1G,
.size = SZ_8K
};
struct region r2 = {
.base = SZ_1G + SZ_16K,
.size = SZ_8K
};
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_add(r2.base, r2.size);
assert(rgn1->base == r1.base);
assert(rgn1->size == r1.size);
assert(rgn2->base == r2.base);
assert(rgn2->size == r2.size);
assert(memblock.memory.cnt == 2);
assert(memblock.memory.total_size == r1.size + r2.size);
return 0;
}
/*
* A test that tries to add two memory blocks, where the second one overlaps
* with the beginning of the first entry (that is r1.base < r2.base + r2.size).
* After this, it checks if two entries are merged into one region that starts
* at r2.base and has size of two regions minus their intersection. It also
* verifies the reported total size of the available memory and region counter.
*/
static int memblock_add_overlap_top_check(void)
{
struct memblock_region *rgn;
phys_addr_t total_size;
rgn = &memblock.memory.regions[0];
struct region r1 = {
.base = SZ_512M,
.size = SZ_1G
};
struct region r2 = {
.base = SZ_256M,
.size = SZ_512M
};
total_size = (r1.base - r2.base) + r1.size;
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_add(r2.base, r2.size);
assert(rgn->base == r2.base);
assert(rgn->size == total_size);
assert(memblock.memory.cnt == 1);
assert(memblock.memory.total_size == total_size);
return 0;
}
/*
* A test that tries to add two memory blocks, where the second one overlaps
* with the end of the first entry (that is r2.base < r1.base + r1.size).
* After this, it checks if two entries are merged into one region that starts
* at r1.base and has size of two regions minus their intersection. It verifies
* that memblock can still see only one entry and has a correct total size of
* the available memory.
*/
static int memblock_add_overlap_bottom_check(void)
{
struct memblock_region *rgn;
phys_addr_t total_size;
rgn = &memblock.memory.regions[0];
struct region r1 = {
.base = SZ_128M,
.size = SZ_512M
};
struct region r2 = {
.base = SZ_256M,
.size = SZ_1G
};
total_size = (r2.base - r1.base) + r2.size;
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_add(r2.base, r2.size);
assert(rgn->base == r1.base);
assert(rgn->size == total_size);
assert(memblock.memory.cnt == 1);
assert(memblock.memory.total_size == total_size);
return 0;
}
/*
* A test that tries to add two memory blocks, where the second one is
* within the range of the first entry (that is r1.base < r2.base &&
* r2.base + r2.size < r1.base + r1.size). It checks if two entries are merged
* into one region that stays the same. The counter and total size of available
* memory are expected to not be updated.
*/
static int memblock_add_within_check(void)
{
struct memblock_region *rgn;
rgn = &memblock.memory.regions[0];
struct region r1 = {
.base = SZ_8M,
.size = SZ_32M
};
struct region r2 = {
.base = SZ_16M,
.size = SZ_1M
};
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_add(r2.base, r2.size);
assert(rgn->base == r1.base);
assert(rgn->size == r1.size);
assert(memblock.memory.cnt == 1);
assert(memblock.memory.total_size == r1.size);
return 0;
}
/*
* A simple test that tries to add the same memory block twice. The counter
* and total size of available memory are expected to not be updated.
*/
static int memblock_add_twice_check(void)
{
struct region r = {
.base = SZ_16K,
.size = SZ_2M
};
reset_memblock_regions();
memblock_add(r.base, r.size);
memblock_add(r.base, r.size);
assert(memblock.memory.cnt == 1);
assert(memblock.memory.total_size == r.size);
return 0;
}
static int memblock_add_checks(void)
{
memblock_add_simple_check();
memblock_add_node_simple_check();
memblock_add_disjoint_check();
memblock_add_overlap_top_check();
memblock_add_overlap_bottom_check();
memblock_add_within_check();
memblock_add_twice_check();
return 0;
}
/*
* A simple test that marks a memory block of a specified base address
* and size as reserved and to the collection of reserved memory regions
* (memblock.reserved). It checks if a new entry was created and if region
* counter and total memory size were correctly updated.
*/
static int memblock_reserve_simple_check(void)
{
struct memblock_region *rgn;
rgn = &memblock.reserved.regions[0];
struct region r = {
.base = SZ_2G,
.size = SZ_128M
};
reset_memblock_regions();
memblock_reserve(r.base, r.size);
assert(rgn->base == r.base);
assert(rgn->size == r.size);
return 0;
}
/*
* A test that tries to mark two memory blocks that don't overlap as reserved
* and checks if two entries were correctly added to the collection of reserved
* memory regions (memblock.reserved) and if this change was reflected in
* memblock.reserved's total size and region counter.
*/
static int memblock_reserve_disjoint_check(void)
{
struct memblock_region *rgn1, *rgn2;
rgn1 = &memblock.reserved.regions[0];
rgn2 = &memblock.reserved.regions[1];
struct region r1 = {
.base = SZ_256M,
.size = SZ_16M
};
struct region r2 = {
.base = SZ_512M,
.size = SZ_512M
};
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
assert(rgn1->base == r1.base);
assert(rgn1->size == r1.size);
assert(rgn2->base == r2.base);
assert(rgn2->size == r2.size);
assert(memblock.reserved.cnt == 2);
assert(memblock.reserved.total_size == r1.size + r2.size);
return 0;
}
/*
* A test that tries to mark two memory blocks as reserved, where the
* second one overlaps with the beginning of the first (that is
* r1.base < r2.base + r2.size).
* It checks if two entries are merged into one region that starts at r2.base
* and has size of two regions minus their intersection. The test also verifies
* that memblock can still see only one entry and has a correct total size of
* the reserved memory.
*/
static int memblock_reserve_overlap_top_check(void)
{
struct memblock_region *rgn;
phys_addr_t total_size;
rgn = &memblock.reserved.regions[0];
struct region r1 = {
.base = SZ_1G,
.size = SZ_1G
};
struct region r2 = {
.base = SZ_128M,
.size = SZ_1G
};
total_size = (r1.base - r2.base) + r1.size;
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
assert(rgn->base == r2.base);
assert(rgn->size == total_size);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == total_size);
return 0;
}
/*
* A test that tries to mark two memory blocks as reserved, where the
* second one overlaps with the end of the first entry (that is
* r2.base < r1.base + r1.size).
* It checks if two entries are merged into one region that starts at r1.base
* and has size of two regions minus their intersection. It verifies that
* memblock can still see only one entry and has a correct total size of the
* reserved memory.
*/
static int memblock_reserve_overlap_bottom_check(void)
{
struct memblock_region *rgn;
phys_addr_t total_size;
rgn = &memblock.reserved.regions[0];
struct region r1 = {
.base = SZ_2K,
.size = SZ_128K
};
struct region r2 = {
.base = SZ_128K,
.size = SZ_128K
};
total_size = (r2.base - r1.base) + r2.size;
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
assert(rgn->base == r1.base);
assert(rgn->size == total_size);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == total_size);
return 0;
}
/*
* A test that tries to mark two memory blocks as reserved, where the second
* one is within the range of the first entry (that is
* (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)).
* It checks if two entries are merged into one region that stays the
* same. The counter and total size of available memory are expected to not be
* updated.
*/
static int memblock_reserve_within_check(void)
{
struct memblock_region *rgn;
rgn = &memblock.reserved.regions[0];
struct region r1 = {
.base = SZ_1M,
.size = SZ_8M
};
struct region r2 = {
.base = SZ_2M,
.size = SZ_64K
};
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
assert(rgn->base == r1.base);
assert(rgn->size == r1.size);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == r1.size);
return 0;
}
/*
* A simple test that tries to reserve the same memory block twice.
* The region counter and total size of reserved memory are expected to not
* be updated.
*/
static int memblock_reserve_twice_check(void)
{
struct region r = {
.base = SZ_16K,
.size = SZ_2M
};
reset_memblock_regions();
memblock_reserve(r.base, r.size);
memblock_reserve(r.base, r.size);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == r.size);
return 0;
}
static int memblock_reserve_checks(void)
{
memblock_reserve_simple_check();
memblock_reserve_disjoint_check();
memblock_reserve_overlap_top_check();
memblock_reserve_overlap_bottom_check();
memblock_reserve_within_check();
memblock_reserve_twice_check();
return 0;
}
/*
* A simple test that tries to remove the first entry of the array of
* available memory regions. By "removing" a region we mean overwriting it
* with the next region in memblock.memory. To check this is the case, the
* test adds two memory blocks and verifies that the value of the latter
* was used to erase r1 region. It also checks if the region counter and
* total size were updated to expected values.
*/
static int memblock_remove_simple_check(void)
{
struct memblock_region *rgn;
rgn = &memblock.memory.regions[0];
struct region r1 = {
.base = SZ_2K,
.size = SZ_4K
};
struct region r2 = {
.base = SZ_128K,
.size = SZ_4M
};
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_add(r2.base, r2.size);
memblock_remove(r1.base, r1.size);
assert(rgn->base == r2.base);
assert(rgn->size == r2.size);
assert(memblock.memory.cnt == 1);
assert(memblock.memory.total_size == r2.size);
return 0;
}
/*
* A test that tries to remove a region that was not registered as available
* memory (i.e. has no corresponding entry in memblock.memory). It verifies
* that array, regions counter and total size were not modified.
*/
static int memblock_remove_absent_check(void)
{
struct memblock_region *rgn;
rgn = &memblock.memory.regions[0];
struct region r1 = {
.base = SZ_512K,
.size = SZ_4M
};
struct region r2 = {
.base = SZ_64M,
.size = SZ_1G
};
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_remove(r2.base, r2.size);
assert(rgn->base == r1.base);
assert(rgn->size == r1.size);
assert(memblock.memory.cnt == 1);
assert(memblock.memory.total_size == r1.size);
return 0;
}
/*
* A test that tries to remove a region which overlaps with the beginning of
* the already existing entry r1 (that is r1.base < r2.base + r2.size). It
* checks if only the intersection of both regions is removed from the available
* memory pool. The test also checks if the regions counter and total size are
* updated to expected values.
*/
static int memblock_remove_overlap_top_check(void)
{
struct memblock_region *rgn;
phys_addr_t r1_end, r2_end, total_size;
rgn = &memblock.memory.regions[0];
struct region r1 = {
.base = SZ_32M,
.size = SZ_32M
};
struct region r2 = {
.base = SZ_16M,
.size = SZ_32M
};
r1_end = r1.base + r1.size;
r2_end = r2.base + r2.size;
total_size = r1_end - r2_end;
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_remove(r2.base, r2.size);
assert(rgn->base == r1.base + r2.base);
assert(rgn->size == total_size);
assert(memblock.memory.cnt == 1);
assert(memblock.memory.total_size == total_size);
return 0;
}
/*
* A test that tries to remove a region which overlaps with the end of the
* first entry (that is r2.base < r1.base + r1.size). It checks if only the
* intersection of both regions is removed from the available memory pool.
* The test also checks if the regions counter and total size are updated to
* expected values.
*/
static int memblock_remove_overlap_bottom_check(void)
{
struct memblock_region *rgn;
phys_addr_t total_size;
rgn = &memblock.memory.regions[0];
struct region r1 = {
.base = SZ_2M,
.size = SZ_64M
};
struct region r2 = {
.base = SZ_32M,
.size = SZ_256M
};
total_size = r2.base - r1.base;
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_remove(r2.base, r2.size);
assert(rgn->base == r1.base);
assert(rgn->size == total_size);
assert(memblock.memory.cnt == 1);
assert(memblock.memory.total_size == total_size);
return 0;
}
/*
* A test that tries to remove a region which is within the range of the
* already existing entry (that is
* (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)).
* It checks if the region is split into two - one that ends at r2.base and
* second that starts at r2.base + size, with appropriate sizes. The test
* also checks if the region counter and total size were updated to
* expected values.
*/
static int memblock_remove_within_check(void)
{
struct memblock_region *rgn1, *rgn2;
phys_addr_t r1_size, r2_size, total_size;
rgn1 = &memblock.memory.regions[0];
rgn2 = &memblock.memory.regions[1];
struct region r1 = {
.base = SZ_1M,
.size = SZ_32M
};
struct region r2 = {
.base = SZ_16M,
.size = SZ_1M
};
r1_size = r2.base - r1.base;
r2_size = (r1.base + r1.size) - (r2.base + r2.size);
total_size = r1_size + r2_size;
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_remove(r2.base, r2.size);
assert(rgn1->base == r1.base);
assert(rgn1->size == r1_size);
assert(rgn2->base == r2.base + r2.size);
assert(rgn2->size == r2_size);
assert(memblock.memory.cnt == 2);
assert(memblock.memory.total_size == total_size);
return 0;
}
static int memblock_remove_checks(void)
{
memblock_remove_simple_check();
memblock_remove_absent_check();
memblock_remove_overlap_top_check();
memblock_remove_overlap_bottom_check();
memblock_remove_within_check();
return 0;
}
/*
* A simple test that tries to free a memory block that was marked earlier
* as reserved. By "freeing" a region we mean overwriting it with the next
* entry in memblock.reserved. To check this is the case, the test reserves
* two memory regions and verifies that the value of the latter was used to
* erase r1 region.
* The test also checks if the region counter and total size were updated.
*/
static int memblock_free_simple_check(void)
{
struct memblock_region *rgn;
rgn = &memblock.reserved.regions[0];
struct region r1 = {
.base = SZ_4M,
.size = SZ_1M
};
struct region r2 = {
.base = SZ_8M,
.size = SZ_1M
};
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
memblock_free((void *)r1.base, r1.size);
assert(rgn->base == r2.base);
assert(rgn->size == r2.size);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == r2.size);
return 0;
}
/*
* A test that tries to free a region that was not marked as reserved
* (i.e. has no corresponding entry in memblock.reserved). It verifies
* that array, regions counter and total size were not modified.
*/
static int memblock_free_absent_check(void)
{
struct memblock_region *rgn;
rgn = &memblock.reserved.regions[0];
struct region r1 = {
.base = SZ_2M,
.size = SZ_8K
};
struct region r2 = {
.base = SZ_16M,
.size = SZ_128M
};
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_free((void *)r2.base, r2.size);
assert(rgn->base == r1.base);
assert(rgn->size == r1.size);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == r1.size);
return 0;
}
/*
* A test that tries to free a region which overlaps with the beginning of
* the already existing entry r1 (that is r1.base < r2.base + r2.size). It
* checks if only the intersection of both regions is freed. The test also
* checks if the regions counter and total size are updated to expected
* values.
*/
static int memblock_free_overlap_top_check(void)
{
struct memblock_region *rgn;
phys_addr_t total_size;
rgn = &memblock.reserved.regions[0];
struct region r1 = {
.base = SZ_8M,
.size = SZ_32M
};
struct region r2 = {
.base = SZ_1M,
.size = SZ_8M
};
total_size = (r1.size + r1.base) - (r2.base + r2.size);
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_free((void *)r2.base, r2.size);
assert(rgn->base == r2.base + r2.size);
assert(rgn->size == total_size);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == total_size);
return 0;
}
/*
* A test that tries to free a region which overlaps with the end of the
* first entry (that is r2.base < r1.base + r1.size). It checks if only the
* intersection of both regions is freed. The test also checks if the
* regions counter and total size are updated to expected values.
*/
static int memblock_free_overlap_bottom_check(void)
{
struct memblock_region *rgn;
phys_addr_t total_size;
rgn = &memblock.reserved.regions[0];
struct region r1 = {
.base = SZ_8M,
.size = SZ_32M
};
struct region r2 = {
.base = SZ_32M,
.size = SZ_32M
};
total_size = r2.base - r1.base;
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_free((void *)r2.base, r2.size);
assert(rgn->base == r1.base);
assert(rgn->size == total_size);
assert(memblock.reserved.cnt == 1);
assert(memblock.reserved.total_size == total_size);
return 0;
}
/*
* A test that tries to free a region which is within the range of the
* already existing entry (that is
* (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)).
* It checks if the region is split into two - one that ends at r2.base and
* second that starts at r2.base + size, with appropriate sizes. It is
* expected that the region counter and total size fields were updated t
* reflect that change.
*/
static int memblock_free_within_check(void)
{
struct memblock_region *rgn1, *rgn2;
phys_addr_t r1_size, r2_size, total_size;
rgn1 = &memblock.reserved.regions[0];
rgn2 = &memblock.reserved.regions[1];
struct region r1 = {
.base = SZ_1M,
.size = SZ_8M
};
struct region r2 = {
.base = SZ_4M,
.size = SZ_1M
};
r1_size = r2.base - r1.base;
r2_size = (r1.base + r1.size) - (r2.base + r2.size);
total_size = r1_size + r2_size;
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_free((void *)r2.base, r2.size);
assert(rgn1->base == r1.base);
assert(rgn1->size == r1_size);
assert(rgn2->base == r2.base + r2.size);
assert(rgn2->size == r2_size);
assert(memblock.reserved.cnt == 2);
assert(memblock.reserved.total_size == total_size);
return 0;
}
static int memblock_free_checks(void)
{
memblock_free_simple_check();
memblock_free_absent_check();
memblock_free_overlap_top_check();
memblock_free_overlap_bottom_check();
memblock_free_within_check();
return 0;
}
int memblock_basic_checks(void)
{
memblock_initialization_check();
memblock_add_checks();
memblock_reserve_checks();
memblock_remove_checks();
memblock_free_checks();
return 0;
}

View File

@ -0,0 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _MEMBLOCK_BASIC_H
#define _MEMBLOCK_BASIC_H
#include "common.h"
int memblock_basic_checks(void);
#endif

View File

@ -0,0 +1,48 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "tests/common.h"
#include <string.h>
#define INIT_MEMBLOCK_REGIONS 128
#define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
static struct test_memory memory_block;
void reset_memblock_regions(void)
{
memset(memblock.memory.regions, 0,
memblock.memory.cnt * sizeof(struct memblock_region));
memblock.memory.cnt = 1;
memblock.memory.max = INIT_MEMBLOCK_REGIONS;
memblock.memory.total_size = 0;
memset(memblock.reserved.regions, 0,
memblock.reserved.cnt * sizeof(struct memblock_region));
memblock.reserved.cnt = 1;
memblock.reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS;
memblock.reserved.total_size = 0;
}
void reset_memblock_attributes(void)
{
memblock.memory.name = "memory";
memblock.reserved.name = "reserved";
memblock.bottom_up = false;
memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
}
void setup_memblock(void)
{
reset_memblock_regions();
memblock_add((phys_addr_t)memory_block.base, MEM_SIZE);
}
void dummy_physical_memory_init(void)
{
memory_block.base = malloc(MEM_SIZE);
assert(memory_block.base);
}
void dummy_physical_memory_cleanup(void)
{
free(memory_block.base);
}

View File

@ -0,0 +1,34 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _MEMBLOCK_TEST_H
#define _MEMBLOCK_TEST_H
#include <stdlib.h>
#include <assert.h>
#include <linux/types.h>
#include <linux/memblock.h>
#include <linux/sizes.h>
#define MEM_SIZE SZ_16K
/*
* Available memory registered with memblock needs to be valid for allocs
* test to run. This is a convenience wrapper for memory allocated in
* dummy_physical_memory_init() that is later registered with memblock
* in setup_memblock().
*/
struct test_memory {
void *base;
};
struct region {
phys_addr_t base;
phys_addr_t size;
};
void reset_memblock_regions(void);
void reset_memblock_attributes(void);
void setup_memblock(void);
void dummy_physical_memory_init(void);
void dummy_physical_memory_cleanup(void);
#endif

View File

@ -5,7 +5,8 @@ CFLAGS += -I. -I../../include -g -Og -Wall -D_LGPL_SOURCE -fsanitize=address \
LDFLAGS += -fsanitize=address -fsanitize=undefined
LDLIBS+= -lpthread -lurcu
TARGETS = main idr-test multiorder xarray
CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o
CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o \
slab.o
OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
regression4.o tag_check.o multiorder.o idr-test.o iteration_check.o \
iteration_check_2.o benchmark.o

View File

@ -14,7 +14,6 @@
int nr_allocated;
int preempt_count;
int kmalloc_verbose;
int test_verbose;
struct kmem_cache {
@ -78,32 +77,6 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
pthread_mutex_unlock(&cachep->lock);
}
void *kmalloc(size_t size, gfp_t gfp)
{
void *ret;
if (!(gfp & __GFP_DIRECT_RECLAIM))
return NULL;
ret = malloc(size);
uatomic_inc(&nr_allocated);
if (kmalloc_verbose)
printf("Allocating %p from malloc\n", ret);
if (gfp & __GFP_ZERO)
memset(ret, 0, size);
return ret;
}
void kfree(void *p)
{
if (!p)
return;
uatomic_dec(&nr_allocated);
if (kmalloc_verbose)
printf("Freeing %p to malloc\n", p);
free(p);
}
struct kmem_cache *
kmem_cache_create(const char *name, unsigned int size, unsigned int align,
unsigned int flags, void (*ctor)(void *))

View File

@ -1,33 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _GFP_H
#define _GFP_H
#include <linux/types.h>
#define __GFP_BITS_SHIFT 26
#define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
#define __GFP_HIGH 0x20u
#define __GFP_IO 0x40u
#define __GFP_FS 0x80u
#define __GFP_NOWARN 0x200u
#define __GFP_ZERO 0x8000u
#define __GFP_ATOMIC 0x80000u
#define __GFP_ACCOUNT 0x100000u
#define __GFP_DIRECT_RECLAIM 0x400000u
#define __GFP_KSWAPD_RECLAIM 0x2000000u
#define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM|__GFP_KSWAPD_RECLAIM)
#define GFP_ZONEMASK 0x0fu
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
}
#endif