From 43f0f97dd6f033709079d4346d384160dc3fc68f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 31 Jan 2020 13:45:32 +0100 Subject: [PATCH 01/10] m68k: mm: Remove stray nocache in ColdFire pgalloc Since ColdFire V4e is a software TLB-miss architecture, there is no need for page-tables to be mapped uncached. Remove this stray nocache_page() dance, which isn't paired with a cache_page() and looks like a copy/paste/edit fail. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Acked-by: Greg Ungerer Tested-by: Michael Schmitz Tested-by: Greg Ungerer Link: https://lore.kernel.org/r/20200131125403.481739981@infradead.org Signed-off-by: Geert Uytterhoeven --- arch/m68k/include/asm/mcf_pgalloc.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h index 82ec54c2eaa4..cb0e36fb2a60 100644 --- a/arch/m68k/include/asm/mcf_pgalloc.h +++ b/arch/m68k/include/asm/mcf_pgalloc.h @@ -55,12 +55,8 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm) } pte = kmap(page); - if (pte) { + if (pte) clear_page(pte); - __flush_page_to_ram(pte); - flush_tlb_kernel_page(pte); - nocache_page(pte); - } kunmap(page); return page; From fd1aa6303c4d6f3c1489eadc5cc37e581c3b41ea Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 31 Jan 2020 13:45:33 +0100 Subject: [PATCH 02/10] m68k: mm: Fix ColdFire pgd_alloc() I also notice that building for m5475evb_defconfig with vanilla v5.5 triggers this scary looking warning due to a mismatch between the pgd size and the (8k!) page size: | In function 'pgd_alloc.isra.111', | inlined from 'mm_alloc_pgd' at kernel/fork.c:634:12, | inlined from 'mm_init.isra.112' at kernel/fork.c:1043:6: | ./arch/m68k/include/asm/string.h:72:25: warning: '__builtin_memcpy' forming offset [4097, 8192] is out of the bounds [0, 4096] of object 'kernel_pg_dir' with type 'pgd_t[1024]' {aka 'struct [1024]'} [-Warray-bounds] | #define memcpy(d, s, n) __builtin_memcpy(d, s, n) | ^~~~~~~~~~~~~~~~~~~~~~~~~ | ./arch/m68k/include/asm/mcf_pgalloc.h:93:2: note: in expansion of macro 'memcpy' | memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE); | ^~~~~~ Signed-off-by: Will Deacon Signed-off-by: Peter Zijlstra (Intel) Acked-by: Greg Ungerer Tested-by: Michael Schmitz Tested-by: Greg Ungerer Link: https://lore.kernel.org/r/20200131125403.540057688@infradead.org Signed-off-by: Geert Uytterhoeven --- arch/m68k/include/asm/mcf_pgalloc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h index cb0e36fb2a60..595e0b8c4759 100644 --- a/arch/m68k/include/asm/mcf_pgalloc.h +++ b/arch/m68k/include/asm/mcf_pgalloc.h @@ -86,7 +86,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN); if (!new_pgd) return NULL; - memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE); + memcpy(new_pgd, swapper_pg_dir, PTRS_PER_PGD * sizeof(pgd_t)); memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT); return new_pgd; } From 13076a29d52e91d29ab6b13e7279c9eacd0b6dbb Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 31 Jan 2020 13:45:34 +0100 Subject: [PATCH 03/10] m68k: mm: Unify Motorola MMU page setup Seeing how there are 5 copies of this magic code, one of which is unexplainably different, unify and document things. Suggested-by: Will Deacon Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Acked-by: Greg Ungerer Tested-by: Michael Schmitz Tested-by: Greg Ungerer Link: https://lore.kernel.org/r/20200131125403.597688427@infradead.org Signed-off-by: Geert Uytterhoeven --- arch/m68k/include/asm/motorola_pgalloc.h | 23 ++++++++---------- arch/m68k/mm/memory.c | 5 ++-- arch/m68k/mm/motorola.c | 30 +++++++++++++++++++----- 3 files changed, 36 insertions(+), 22 deletions(-) diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h index ff9cc401ffd1..85af0c602e9f 100644 --- a/arch/m68k/include/asm/motorola_pgalloc.h +++ b/arch/m68k/include/asm/motorola_pgalloc.h @@ -5,6 +5,9 @@ #include #include +extern void mmu_page_ctor(void *page); +extern void mmu_page_dtor(void *page); + extern pmd_t *get_pointer_table(void); extern int free_pointer_table(pmd_t *); @@ -13,25 +16,21 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) pte_t *pte; pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); - if (pte) { - __flush_page_to_ram(pte); - flush_tlb_kernel_page(pte); - nocache_page(pte); - } + if (pte) + mmu_page_ctor(pte); return pte; } static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - cache_page(pte); + mmu_page_dtor(pte); free_page((unsigned long) pte); } static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { struct page *page; - pte_t *pte; page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); if(!page) @@ -41,18 +40,16 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm) return NULL; } - pte = kmap(page); - __flush_page_to_ram(pte); - flush_tlb_kernel_page(pte); - nocache_page(pte); + mmu_page_ctor(kmap(page)); kunmap(page); + return page; } static inline void pte_free(struct mm_struct *mm, pgtable_t page) { pgtable_pte_page_dtor(page); - cache_page(kmap(page)); + mmu_page_dtor(kmap(page)); kunmap(page); __free_page(page); } @@ -61,7 +58,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, unsigned long address) { pgtable_pte_page_dtor(page); - cache_page(kmap(page)); + mmu_page_dtor(kmap(page)); kunmap(page); __free_page(page); } diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c index 227c04fe60d2..36683f2a28d0 100644 --- a/arch/m68k/mm/memory.c +++ b/arch/m68k/mm/memory.c @@ -77,8 +77,7 @@ pmd_t *get_pointer_table (void) if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) return NULL; - flush_tlb_kernel_page(page); - nocache_page(page); + mmu_page_ctor(page); new = PD_PTABLE(page); PD_MARKBITS(new) = 0xfe; @@ -112,7 +111,7 @@ int free_pointer_table (pmd_t *ptable) if (PD_MARKBITS(dp) == 0xff) { /* all tables in page are free, free page */ list_del(dp); - cache_page((void *)page); + mmu_page_dtor((void *)page); free_page (page); return 1; } else if (ptable_list.next != dp) { diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 4857985b8080..be61f35a7432 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -45,6 +45,28 @@ unsigned long mm_cachebits; EXPORT_SYMBOL(mm_cachebits); #endif + +/* + * Motorola 680x0 user's manual recommends using uncached memory for address + * translation tables. + * + * Seeing how the MMU can be external on (some of) these chips, that seems like + * a very important recommendation to follow. Provide some helpers to combat + * 'variation' amongst the users of this. + */ + +void mmu_page_ctor(void *page) +{ + __flush_page_to_ram(page); + flush_tlb_kernel_page(page); + nocache_page(page); +} + +void mmu_page_dtor(void *page) +{ + cache_page(page); +} + /* size of memory already mapped in head.S */ extern __initdata unsigned long m68k_init_mapped_size; @@ -60,9 +82,7 @@ static pte_t * __init kernel_page_table(void) __func__, PAGE_SIZE, PAGE_SIZE); clear_page(ptablep); - __flush_page_to_ram(ptablep); - flush_tlb_kernel_page(ptablep); - nocache_page(ptablep); + mmu_page_ctor(ptablep); return ptablep; } @@ -106,9 +126,7 @@ static pmd_t * __init kernel_ptr_table(void) __func__, PAGE_SIZE, PAGE_SIZE); clear_page(last_pgtable); - __flush_page_to_ram(last_pgtable); - flush_tlb_kernel_page(last_pgtable); - nocache_page(last_pgtable); + mmu_page_ctor(last_pgtable); } return last_pgtable; From 5ad272abee9fe0a781d49b03f334c0a3b3e418c1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 31 Jan 2020 13:45:35 +0100 Subject: [PATCH 04/10] m68k: mm: Move the pointer table allocator to motorola.c Only the Motorola MMU makes use of this allocator, it is a waste of .text to include it for Sun3/ColdFire. Also, this is going to avoid build issues when we're going to make it more Motorola specific. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Acked-by: Greg Ungerer Tested-by: Michael Schmitz Tested-by: Greg Ungerer Link: https://lore.kernel.org/r/20200131125403.654652162@infradead.org Signed-off-by: Geert Uytterhoeven --- arch/m68k/mm/memory.c | 102 ---------------------------------------- arch/m68k/mm/motorola.c | 102 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+), 102 deletions(-) diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c index 36683f2a28d0..65e0c4071912 100644 --- a/arch/m68k/mm/memory.c +++ b/arch/m68k/mm/memory.c @@ -22,108 +22,6 @@ #include -/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from - struct page instead of separately kmalloced struct. Stolen from - arch/sparc/mm/srmmu.c ... */ - -typedef struct list_head ptable_desc; -static LIST_HEAD(ptable_list); - -#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) -#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) -#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index) - -#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t)) - -void __init init_pointer_table(unsigned long ptable) -{ - ptable_desc *dp; - unsigned long page = ptable & PAGE_MASK; - unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE); - - dp = PD_PTABLE(page); - if (!(PD_MARKBITS(dp) & mask)) { - PD_MARKBITS(dp) = 0xff; - list_add(dp, &ptable_list); - } - - PD_MARKBITS(dp) &= ~mask; - pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp)); - - /* unreserve the page so it's possible to free that page */ - __ClearPageReserved(PD_PAGE(dp)); - init_page_count(PD_PAGE(dp)); - - return; -} - -pmd_t *get_pointer_table (void) -{ - ptable_desc *dp = ptable_list.next; - unsigned char mask = PD_MARKBITS (dp); - unsigned char tmp; - unsigned int off; - - /* - * For a pointer table for a user process address space, a - * table is taken from a page allocated for the purpose. Each - * page can hold 8 pointer tables. The page is remapped in - * virtual address space to be noncacheable. - */ - if (mask == 0) { - void *page; - ptable_desc *new; - - if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) - return NULL; - - mmu_page_ctor(page); - - new = PD_PTABLE(page); - PD_MARKBITS(new) = 0xfe; - list_add_tail(new, dp); - - return (pmd_t *)page; - } - - for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE) - ; - PD_MARKBITS(dp) = mask & ~tmp; - if (!PD_MARKBITS(dp)) { - /* move to end of list */ - list_move_tail(dp, &ptable_list); - } - return (pmd_t *) (page_address(PD_PAGE(dp)) + off); -} - -int free_pointer_table (pmd_t *ptable) -{ - ptable_desc *dp; - unsigned long page = (unsigned long)ptable & PAGE_MASK; - unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE); - - dp = PD_PTABLE(page); - if (PD_MARKBITS (dp) & mask) - panic ("table already free!"); - - PD_MARKBITS (dp) |= mask; - - if (PD_MARKBITS(dp) == 0xff) { - /* all tables in page are free, free page */ - list_del(dp); - mmu_page_dtor((void *)page); - free_page (page); - return 1; - } else if (ptable_list.next != dp) { - /* - * move this descriptor to the front of the list, since - * it has one or more free tables. - */ - list_move(dp, &ptable_list); - } - return 0; -} - /* invalidate page in both caches */ static inline void clear040(unsigned long paddr) { diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index be61f35a7432..2102f9397c94 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -67,6 +67,108 @@ void mmu_page_dtor(void *page) cache_page(page); } +/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from + struct page instead of separately kmalloced struct. Stolen from + arch/sparc/mm/srmmu.c ... */ + +typedef struct list_head ptable_desc; +static LIST_HEAD(ptable_list); + +#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) +#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) +#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index) + +#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t)) + +void __init init_pointer_table(unsigned long ptable) +{ + ptable_desc *dp; + unsigned long page = ptable & PAGE_MASK; + unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE); + + dp = PD_PTABLE(page); + if (!(PD_MARKBITS(dp) & mask)) { + PD_MARKBITS(dp) = 0xff; + list_add(dp, &ptable_list); + } + + PD_MARKBITS(dp) &= ~mask; + pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp)); + + /* unreserve the page so it's possible to free that page */ + __ClearPageReserved(PD_PAGE(dp)); + init_page_count(PD_PAGE(dp)); + + return; +} + +pmd_t *get_pointer_table (void) +{ + ptable_desc *dp = ptable_list.next; + unsigned char mask = PD_MARKBITS (dp); + unsigned char tmp; + unsigned int off; + + /* + * For a pointer table for a user process address space, a + * table is taken from a page allocated for the purpose. Each + * page can hold 8 pointer tables. The page is remapped in + * virtual address space to be noncacheable. + */ + if (mask == 0) { + void *page; + ptable_desc *new; + + if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) + return NULL; + + mmu_page_ctor(page); + + new = PD_PTABLE(page); + PD_MARKBITS(new) = 0xfe; + list_add_tail(new, dp); + + return (pmd_t *)page; + } + + for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE) + ; + PD_MARKBITS(dp) = mask & ~tmp; + if (!PD_MARKBITS(dp)) { + /* move to end of list */ + list_move_tail(dp, &ptable_list); + } + return (pmd_t *) (page_address(PD_PAGE(dp)) + off); +} + +int free_pointer_table (pmd_t *ptable) +{ + ptable_desc *dp; + unsigned long page = (unsigned long)ptable & PAGE_MASK; + unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE); + + dp = PD_PTABLE(page); + if (PD_MARKBITS (dp) & mask) + panic ("table already free!"); + + PD_MARKBITS (dp) |= mask; + + if (PD_MARKBITS(dp) == 0xff) { + /* all tables in page are free, free page */ + list_del(dp); + mmu_page_dtor((void *)page); + free_page (page); + return 1; + } else if (ptable_list.next != dp) { + /* + * move this descriptor to the front of the list, since + * it has one or more free tables. + */ + list_move(dp, &ptable_list); + } + return 0; +} + /* size of memory already mapped in head.S */ extern __initdata unsigned long m68k_init_mapped_size; From ef22d8abd876e805b604e8f655127de2beee2869 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 31 Jan 2020 13:45:36 +0100 Subject: [PATCH 05/10] m68k: mm: Restructure Motorola MMU page-table layout The Motorola 68xxx MMUs, 040 (and later) have a fixed 7,7,{5,6} page-table setup, where the last depends on the page-size selected (8k vs 4k resp.), and head.S selects 4K pages. For 030 (and earlier) we explicitly program 7,7,6 and 4K pages in %tc. However, the current code implements this mightily weird. What it does is group 16 of those (6 bit) pte tables into one 4k page to not waste space. The down-side is that that forces pmd_t to be a 16-tuple pointing to consecutive pte tables. This breaks the generic code which assumes READ_ONCE(*pmd) will be word sized. Therefore implement a straight forward 7,7,6 3 level page-table setup, with the addition (for 020/030) of (partial) large-page support. For now this increases the memory footprint for pte-tables 15 fold. Tested with ARAnyM/68040 emulation. Suggested-by: Will Deacon Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Acked-by: Greg Ungerer Tested-by: Michael Schmitz Tested-by: Greg Ungerer Link: https://lore.kernel.org/r/20200131125403.711478295@infradead.org Signed-off-by: Geert Uytterhoeven --- arch/m68k/include/asm/motorola_pgtable.h | 15 ++-------- arch/m68k/include/asm/page.h | 6 ++-- arch/m68k/include/asm/pgtable_mm.h | 10 +++---- arch/m68k/mm/kmap.c | 36 +++++++++++------------- arch/m68k/mm/motorola.c | 28 +++++++++--------- 5 files changed, 39 insertions(+), 56 deletions(-) diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h index 62bedc61f110..4d94e462bb2b 100644 --- a/arch/m68k/include/asm/motorola_pgtable.h +++ b/arch/m68k/include/asm/motorola_pgtable.h @@ -108,13 +108,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) { - unsigned long ptbl = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED; - unsigned long *ptr = pmdp->pmd; - short i = 16; - while (--i >= 0) { - *ptr++ = ptbl; - ptbl += (sizeof(pte_t)*PTRS_PER_PTE/16); - } + pmd_val(*pmdp) = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED; } static inline void pud_set(pud_t *pudp, pmd_t *pmdp) @@ -138,12 +132,7 @@ static inline void pud_set(pud_t *pudp, pmd_t *pmdp) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE) #define pmd_present(pmd) (pmd_val(pmd) & _PAGE_TABLE) -#define pmd_clear(pmdp) ({ \ - unsigned long *__ptr = pmdp->pmd; \ - short __i = 16; \ - while (--__i >= 0) \ - *__ptr++ = 0; \ -}) +#define pmd_clear(pmdp) ({ pmd_val(*pmdp) = 0; }) #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h index 05e1e1e77a9a..c02326b56ae2 100644 --- a/arch/m68k/include/asm/page.h +++ b/arch/m68k/include/asm/page.h @@ -22,9 +22,9 @@ * These are used to make use of C type-checking.. */ #if !defined(CONFIG_MMU) || CONFIG_PGTABLE_LEVELS == 3 -typedef struct { unsigned long pmd[16]; } pmd_t; -#define pmd_val(x) ((&x)->pmd[0]) -#define __pmd(x) ((pmd_t) { { (x) }, }) +typedef struct { unsigned long pmd; } pmd_t; +#define pmd_val(x) ((&x)->pmd) +#define __pmd(x) ((pmd_t) { (x) } ) #endif typedef struct { unsigned long pte; } pte_t; diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h index 2bf5c3501e78..f0e5167de834 100644 --- a/arch/m68k/include/asm/pgtable_mm.h +++ b/arch/m68k/include/asm/pgtable_mm.h @@ -36,7 +36,7 @@ /* PMD_SHIFT determines the size of the area a second-level page table can map */ #if CONFIG_PGTABLE_LEVELS == 3 -#define PMD_SHIFT 22 +#define PMD_SHIFT 18 #endif #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) @@ -67,8 +67,8 @@ #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 1024 #else -#define PTRS_PER_PTE 1024 -#define PTRS_PER_PMD 8 +#define PTRS_PER_PTE 64 +#define PTRS_PER_PMD 128 #define PTRS_PER_PGD 128 #endif #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) @@ -76,8 +76,8 @@ /* Virtual address region for use by kernel_map() */ #ifdef CONFIG_SUN3 -#define KMAP_START 0x0DC00000 -#define KMAP_END 0x0E000000 +#define KMAP_START 0x0dc00000 +#define KMAP_END 0x0e000000 #elif defined(CONFIG_COLDFIRE) #define KMAP_START 0xe0000000 #define KMAP_END 0xf0000000 diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index 120030ad8dc4..14d31d216cef 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c @@ -24,8 +24,6 @@ #undef DEBUG -#define PTRTREESIZE (256*1024) - /* * For 040/060 we can use the virtual memory area like other architectures, * but for 020/030 we want to use early termination page descriptors and we @@ -50,7 +48,7 @@ static inline void free_io_area(void *addr) #else -#define IO_SIZE (256*1024) +#define IO_SIZE PMD_SIZE static struct vm_struct *iolist; @@ -81,14 +79,13 @@ static void __free_io_area(void *addr, unsigned long size) #if CONFIG_PGTABLE_LEVELS == 3 if (CPU_IS_020_OR_030) { - int pmd_off = (virtaddr/PTRTREESIZE) & 15; - int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK; + int pmd_type = pmd_val(*pmd_dir) & _DESCTYPE_MASK; if (pmd_type == _PAGE_PRESENT) { - pmd_dir->pmd[pmd_off] = 0; - virtaddr += PTRTREESIZE; - size -= PTRTREESIZE; - continue; + pmd_clear(pmd_dir); + virtaddr += PMD_SIZE; + size -= PMD_SIZE; + } else if (pmd_type == 0) continue; } @@ -249,7 +246,7 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla while ((long)size > 0) { #ifdef DEBUG - if (!(virtaddr & (PTRTREESIZE-1))) + if (!(virtaddr & (PMD_SIZE-1))) printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr); #endif pgd_dir = pgd_offset_k(virtaddr); @@ -263,10 +260,10 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla #if CONFIG_PGTABLE_LEVELS == 3 if (CPU_IS_020_OR_030) { - pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; - physaddr += PTRTREESIZE; - virtaddr += PTRTREESIZE; - size -= PTRTREESIZE; + pmd_val(*pmd_dir) = physaddr; + physaddr += PMD_SIZE; + virtaddr += PMD_SIZE; + size -= PMD_SIZE; } else #endif { @@ -367,13 +364,12 @@ void kernel_set_cachemode(void *addr, unsigned long size, int cmode) #if CONFIG_PGTABLE_LEVELS == 3 if (CPU_IS_020_OR_030) { - int pmd_off = (virtaddr/PTRTREESIZE) & 15; + unsigned long pmd = pmd_val(*pmd_dir); - if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) { - pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] & - _CACHEMASK040) | cmode; - virtaddr += PTRTREESIZE; - size -= PTRTREESIZE; + if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) { + *pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode); + virtaddr += PMD_SIZE; + size -= PMD_SIZE; continue; } } diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 2102f9397c94..c888ef46da3e 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -236,8 +236,6 @@ static pmd_t * __init kernel_ptr_table(void) static void __init map_node(int node) { -#define PTRTREESIZE (256*1024) -#define ROOTTREESIZE (32*1024*1024) unsigned long physaddr, virtaddr, size; pgd_t *pgd_dir; p4d_t *p4d_dir; @@ -255,21 +253,21 @@ static void __init map_node(int node) while (size > 0) { #ifdef DEBUG - if (!(virtaddr & (PTRTREESIZE-1))) + if (!(virtaddr & (PMD_SIZE-1))) printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK, virtaddr); #endif pgd_dir = pgd_offset_k(virtaddr); if (virtaddr && CPU_IS_020_OR_030) { - if (!(virtaddr & (ROOTTREESIZE-1)) && - size >= ROOTTREESIZE) { + if (!(virtaddr & (PGDIR_SIZE-1)) && + size >= PGDIR_SIZE) { #ifdef DEBUG printk ("[very early term]"); #endif pgd_val(*pgd_dir) = physaddr; - size -= ROOTTREESIZE; - virtaddr += ROOTTREESIZE; - physaddr += ROOTTREESIZE; + size -= PGDIR_SIZE; + virtaddr += PGDIR_SIZE; + physaddr += PGDIR_SIZE; continue; } } @@ -289,8 +287,8 @@ static void __init map_node(int node) #ifdef DEBUG printk ("[early term]"); #endif - pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; - physaddr += PTRTREESIZE; + pmd_val(*pmd_dir) = physaddr; + physaddr += PMD_SIZE; } else { int i; #ifdef DEBUG @@ -298,15 +296,15 @@ static void __init map_node(int node) #endif zero_pgtable = kernel_ptr_table(); pte_dir = (pte_t *)zero_pgtable; - pmd_dir->pmd[0] = virt_to_phys(pte_dir) | - _PAGE_TABLE | _PAGE_ACCESSED; + pmd_set(pmd_dir, pte_dir); + pte_val(*pte_dir++) = 0; physaddr += PAGE_SIZE; - for (i = 1; i < 64; physaddr += PAGE_SIZE, i++) + for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++) pte_val(*pte_dir++) = physaddr; } - size -= PTRTREESIZE; - virtaddr += PTRTREESIZE; + size -= PMD_SIZE; + virtaddr += PMD_SIZE; } else { if (!pmd_present(*pmd_dir)) { #ifdef DEBUG From ef9285f69f0efbc75d01cbb09fe65882effd0a25 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 31 Jan 2020 13:45:37 +0100 Subject: [PATCH 06/10] m68k: mm: Improve kernel_page_table() With the PTE-tables now only being 256 bytes, allocating a full page for them is a giant waste. Start by improving the boot time allocator such that init_mm initialization will at least have optimal memory density. Much thanks to Will Deacon in help with debugging and ferreting out lost information on these dusty MMUs. Notes: - _TABLE_MASK is reduced to account for the shorter (256 byte) alignment of pte-tables, per the manual, table entries should only ever have state in the low 4 bits (Used,WrProt,Desc1,Desc0) so it is still longer than strictly required. (Thanks Will!!!) - Also use kernel_page_table() for the 020/030 zero_pgtable case and consequently remove the zero_pgtable init hack (will fix up later). Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Acked-by: Greg Ungerer Tested-by: Michael Schmitz Tested-by: Greg Ungerer Link: https://lore.kernel.org/r/20200131125403.768263973@infradead.org Signed-off-by: Geert Uytterhoeven --- arch/m68k/include/asm/motorola_pgtable.h | 13 +++++- arch/m68k/mm/init.c | 5 --- arch/m68k/mm/motorola.c | 51 ++++++++++++++---------- 3 files changed, 41 insertions(+), 28 deletions(-) diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h index 4d94e462bb2b..2ad0f4166841 100644 --- a/arch/m68k/include/asm/motorola_pgtable.h +++ b/arch/m68k/include/asm/motorola_pgtable.h @@ -23,7 +23,18 @@ #define _DESCTYPE_MASK 0x003 #define _CACHEMASK040 (~0x060) -#define _TABLE_MASK (0xfffffe00) + +/* + * Currently set to the minimum alignment of table pointers (256 bytes). + * The hardware only uses the low 4 bits for state: + * + * 3 - Used + * 2 - Write Protected + * 0,1 - Descriptor Type + * + * and has the rest of the bits reserved. + */ +#define _TABLE_MASK (0xffffff00) #define _PAGE_TABLE (_PAGE_SHORT) #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE) diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 27c453f4fffe..a9b6d26cff8a 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -42,7 +42,6 @@ EXPORT_SYMBOL(empty_zero_page); #if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) extern void init_pointer_table(unsigned long ptable); -extern pmd_t *zero_pgtable; #endif #ifdef CONFIG_MMU @@ -135,10 +134,6 @@ static inline void init_pointer_tables(void) if (pud_present(*pud)) init_pointer_table(pgd_page_vaddr(kernel_pg_dir[i])); } - - /* insert also pointer table that we used to unmap the zero page */ - if (zero_pgtable) - init_pointer_table((unsigned long)zero_pgtable); #endif } diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index c888ef46da3e..600f9c1d96f8 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -174,27 +174,35 @@ extern __initdata unsigned long m68k_init_mapped_size; extern unsigned long availmem; +static pte_t *last_pte_table __initdata = NULL; + static pte_t * __init kernel_page_table(void) { - pte_t *ptablep; + pte_t *pte_table = last_pte_table; - ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); - if (!ptablep) - panic("%s: Failed to allocate %lu bytes align=%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + if (((unsigned long)last_pte_table & ~PAGE_MASK) == 0) { + pte_table = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!pte_table) { + panic("%s: Failed to allocate %lu bytes align=%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); + } - clear_page(ptablep); - mmu_page_ctor(ptablep); + clear_page(pte_table); + mmu_page_ctor(pte_table); - return ptablep; + last_pte_table = pte_table; + } + + last_pte_table += PTRS_PER_PTE; + + return pte_table; } -static pmd_t *last_pgtable __initdata = NULL; -pmd_t *zero_pgtable __initdata = NULL; +static pmd_t *last_pmd_table __initdata = NULL; static pmd_t * __init kernel_ptr_table(void) { - if (!last_pgtable) { + if (!last_pmd_table) { unsigned long pmd, last; int i; @@ -213,25 +221,25 @@ static pmd_t * __init kernel_ptr_table(void) last = pmd; } - last_pgtable = (pmd_t *)last; + last_pmd_table = (pmd_t *)last; #ifdef DEBUG - printk("kernel_ptr_init: %p\n", last_pgtable); + printk("kernel_ptr_init: %p\n", last_pmd_table); #endif } - last_pgtable += PTRS_PER_PMD; - if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) { - last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE, + last_pmd_table += PTRS_PER_PMD; + if (((unsigned long)last_pmd_table & ~PAGE_MASK) == 0) { + last_pmd_table = (pmd_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); - if (!last_pgtable) + if (!last_pmd_table) panic("%s: Failed to allocate %lu bytes align=%lx\n", __func__, PAGE_SIZE, PAGE_SIZE); - clear_page(last_pgtable); - mmu_page_ctor(last_pgtable); + clear_page(last_pmd_table); + mmu_page_ctor(last_pmd_table); } - return last_pgtable; + return last_pmd_table; } static void __init map_node(int node) @@ -294,8 +302,7 @@ static void __init map_node(int node) #ifdef DEBUG printk ("[zero map]"); #endif - zero_pgtable = kernel_ptr_table(); - pte_dir = (pte_t *)zero_pgtable; + pte_dir = kernel_page_table(); pmd_set(pmd_dir, pte_dir); pte_val(*pte_dir++) = 0; From 61c64a25ae8df45c2cd2f76343e20c3d266382ea Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 31 Jan 2020 13:45:38 +0100 Subject: [PATCH 07/10] m68k: mm: Use table allocator for pgtables With the new page-table layout, using full (4k) pages for (256 byte) pte-tables is immensely wastefull. Move the pte-tables over to the same allocator already used for the (512 byte) higher level tables (pgd/pmd). This reduces the pte-table waste from 15x to 2x. Due to no longer being bound to 16 consecutive tables, this might actually already be more efficient than the old code for sparse tables. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Acked-by: Greg Ungerer Tested-by: Michael Schmitz Tested-by: Greg Ungerer Link: https://lore.kernel.org/r/20200131125403.825295149@infradead.org Signed-off-by: Geert Uytterhoeven --- arch/m68k/include/asm/motorola_pgalloc.h | 44 +++++------------------- arch/m68k/include/asm/motorola_pgtable.h | 8 ++++- arch/m68k/include/asm/page.h | 5 +++ 3 files changed, 21 insertions(+), 36 deletions(-) diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h index 85af0c602e9f..c15e04277be7 100644 --- a/arch/m68k/include/asm/motorola_pgalloc.h +++ b/arch/m68k/include/asm/motorola_pgalloc.h @@ -13,54 +13,28 @@ extern int free_pointer_table(pmd_t *); static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { - pte_t *pte; - - pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); - if (pte) - mmu_page_ctor(pte); - - return pte; + return (pte_t *)get_pointer_table(); } static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - mmu_page_dtor(pte); - free_page((unsigned long) pte); + free_pointer_table((void *)pte); } static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { - struct page *page; - - page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); - if(!page) - return NULL; - if (!pgtable_pte_page_ctor(page)) { - __free_page(page); - return NULL; - } - - mmu_page_ctor(kmap(page)); - kunmap(page); - - return page; + return (pte_t *)get_pointer_table(); } -static inline void pte_free(struct mm_struct *mm, pgtable_t page) +static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable) { - pgtable_pte_page_dtor(page); - mmu_page_dtor(kmap(page)); - kunmap(page); - __free_page(page); + free_pointer_table((void *)pgtable); } -static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable, unsigned long address) { - pgtable_pte_page_dtor(page); - mmu_page_dtor(kmap(page)); - kunmap(page); - __free_page(page); + free_pointer_table((void *)pgtable); } @@ -99,9 +73,9 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t * static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) { - pmd_set(pmd, page_address(page)); + pmd_set(pmd, page); } -#define pmd_pgtable(pmd) pmd_page(pmd) +#define pmd_pgtable(pmd) ((pgtable_t)__pmd_page(pmd)) static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) { diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h index 2ad0f4166841..4b91a470ad58 100644 --- a/arch/m68k/include/asm/motorola_pgtable.h +++ b/arch/m68k/include/asm/motorola_pgtable.h @@ -144,7 +144,13 @@ static inline void pud_set(pud_t *pudp, pmd_t *pmdp) #define pmd_bad(pmd) ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE) #define pmd_present(pmd) (pmd_val(pmd) & _PAGE_TABLE) #define pmd_clear(pmdp) ({ pmd_val(*pmdp) = 0; }) -#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) + +/* + * m68k does not have huge pages (020/030 actually could), but generic code + * expects pmd_page() to exists, only to then DCE it all. Provide a dummy to + * make the compiler happy. + */ +#define pmd_page(pmd) NULL #define pud_none(pud) (!pud_val(pud)) diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h index c02326b56ae2..23a5c143df9d 100644 --- a/arch/m68k/include/asm/page.h +++ b/arch/m68k/include/asm/page.h @@ -30,7 +30,12 @@ typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; + +#if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE) typedef struct page *pgtable_t; +#else +typedef pte_t *pgtable_t; +#endif #define pte_val(x) ((x).pte) #define pgd_val(x) ((x).pgd) From 0e071ee6815692a3b241bbe9a9a29f7cdec023ed Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 31 Jan 2020 13:45:39 +0100 Subject: [PATCH 08/10] m68k: mm: Extend table allocator for multiple sizes In addition to the PGD/PMD table size (128*4) add a PTE table size (64*4) to the table allocator. This completely removes the pte-table overhead compared to the old code, even for dense tables. Notes: - the allocator gained a list_empty() check to deal with there not being any pages at all. - the free mask is extended to cover more than the 8 bits required for the (512 byte) PGD/PMD tables. - NR_PAGETABLE accounting is restored. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Acked-by: Greg Ungerer Tested-by: Michael Schmitz Tested-by: Greg Ungerer Link: https://lore.kernel.org/r/20200131125403.882175409@infradead.org Signed-off-by: Geert Uytterhoeven --- arch/m68k/include/asm/motorola_pgalloc.h | 31 ++++++----- arch/m68k/mm/init.c | 16 +++--- arch/m68k/mm/motorola.c | 65 ++++++++++++++++-------- 3 files changed, 70 insertions(+), 42 deletions(-) diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h index c15e04277be7..c66e42917912 100644 --- a/arch/m68k/include/asm/motorola_pgalloc.h +++ b/arch/m68k/include/asm/motorola_pgalloc.h @@ -8,61 +8,68 @@ extern void mmu_page_ctor(void *page); extern void mmu_page_dtor(void *page); -extern pmd_t *get_pointer_table(void); -extern int free_pointer_table(pmd_t *); +enum m68k_table_types { + TABLE_PGD = 0, + TABLE_PMD = 0, /* same size as PGD */ + TABLE_PTE = 1, +}; + +extern void init_pointer_table(void *table, int type); +extern void *get_pointer_table(int type); +extern int free_pointer_table(void *table, int type); static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { - return (pte_t *)get_pointer_table(); + return get_pointer_table(TABLE_PTE); } static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - free_pointer_table((void *)pte); + free_pointer_table(pte, TABLE_PTE); } static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { - return (pte_t *)get_pointer_table(); + return get_pointer_table(TABLE_PTE); } static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable) { - free_pointer_table((void *)pgtable); + free_pointer_table(pgtable, TABLE_PTE); } static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable, unsigned long address) { - free_pointer_table((void *)pgtable); + free_pointer_table(pgtable, TABLE_PTE); } static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) { - return get_pointer_table(); + return get_pointer_table(TABLE_PMD); } static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd) { - return free_pointer_table(pmd); + return free_pointer_table(pmd, TABLE_PMD); } static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, unsigned long address) { - return free_pointer_table(pmd); + return free_pointer_table(pmd, TABLE_PMD); } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - pmd_free(mm, (pmd_t *)pgd); + free_pointer_table(pgd, TABLE_PGD); } static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - return (pgd_t *)get_pointer_table(); + return get_pointer_table(TABLE_PGD); } diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index a9b6d26cff8a..3e3a74a5b215 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -40,10 +40,6 @@ void *empty_zero_page; EXPORT_SYMBOL(empty_zero_page); -#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) -extern void init_pointer_table(unsigned long ptable); -#endif - #ifdef CONFIG_MMU pg_data_t pg_data_map[MAX_NUMNODES]; @@ -127,12 +123,16 @@ static inline void init_pointer_tables(void) int i; /* insert pointer tables allocated so far into the tablelist */ - init_pointer_table((unsigned long)kernel_pg_dir); + init_pointer_table(kernel_pg_dir, TABLE_PGD); for (i = 0; i < PTRS_PER_PGD; i++) { - pud_t *pud = (pud_t *)(&kernel_pg_dir[i]); + pud_t *pud = (pud_t *)&kernel_pg_dir[i]; + pmd_t *pmd_dir; - if (pud_present(*pud)) - init_pointer_table(pgd_page_vaddr(kernel_pg_dir[i])); + if (!pud_present(*pud)) + continue; + + pmd_dir = (pmd_t *)pgd_page_vaddr(kernel_pg_dir[i]); + init_pointer_table(pmd_dir, TABLE_PMD); } #endif } diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 600f9c1d96f8..fc16190ec2d6 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -72,24 +72,35 @@ void mmu_page_dtor(void *page) arch/sparc/mm/srmmu.c ... */ typedef struct list_head ptable_desc; -static LIST_HEAD(ptable_list); + +static struct list_head ptable_list[2] = { + LIST_HEAD_INIT(ptable_list[0]), + LIST_HEAD_INIT(ptable_list[1]), +}; #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) -#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index) +#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index) -#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t)) +static const int ptable_shift[2] = { + 7+2, /* PGD, PMD */ + 6+2, /* PTE */ +}; -void __init init_pointer_table(unsigned long ptable) +#define ptable_size(type) (1U << ptable_shift[type]) +#define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1) + +void __init init_pointer_table(void *table, int type) { ptable_desc *dp; + unsigned long ptable = (unsigned long)table; unsigned long page = ptable & PAGE_MASK; - unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE); + unsigned int mask = 1U << ((ptable - page)/ptable_size(type)); dp = PD_PTABLE(page); if (!(PD_MARKBITS(dp) & mask)) { - PD_MARKBITS(dp) = 0xff; - list_add(dp, &ptable_list); + PD_MARKBITS(dp) = ptable_mask(type); + list_add(dp, &ptable_list[type]); } PD_MARKBITS(dp) &= ~mask; @@ -102,12 +113,11 @@ void __init init_pointer_table(unsigned long ptable) return; } -pmd_t *get_pointer_table (void) +void *get_pointer_table(int type) { - ptable_desc *dp = ptable_list.next; - unsigned char mask = PD_MARKBITS (dp); - unsigned char tmp; - unsigned int off; + ptable_desc *dp = ptable_list[type].next; + unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp); + unsigned int tmp, off; /* * For a pointer table for a user process address space, a @@ -122,30 +132,39 @@ pmd_t *get_pointer_table (void) if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) return NULL; + if (type == TABLE_PTE) { + /* + * m68k doesn't have SPLIT_PTE_PTLOCKS for not having + * SMP. + */ + pgtable_pte_page_ctor(virt_to_page(page)); + } + mmu_page_ctor(page); new = PD_PTABLE(page); - PD_MARKBITS(new) = 0xfe; + PD_MARKBITS(new) = ptable_mask(type) - 1; list_add_tail(new, dp); return (pmd_t *)page; } - for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE) + for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type)) ; PD_MARKBITS(dp) = mask & ~tmp; if (!PD_MARKBITS(dp)) { /* move to end of list */ - list_move_tail(dp, &ptable_list); + list_move_tail(dp, &ptable_list[type]); } - return (pmd_t *) (page_address(PD_PAGE(dp)) + off); + return page_address(PD_PAGE(dp)) + off; } -int free_pointer_table (pmd_t *ptable) +int free_pointer_table(void *table, int type) { ptable_desc *dp; - unsigned long page = (unsigned long)ptable & PAGE_MASK; - unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE); + unsigned long ptable = (unsigned long)table; + unsigned long page = ptable & PAGE_MASK; + unsigned int mask = 1U << ((ptable - page)/ptable_size(type)); dp = PD_PTABLE(page); if (PD_MARKBITS (dp) & mask) @@ -153,18 +172,20 @@ int free_pointer_table (pmd_t *ptable) PD_MARKBITS (dp) |= mask; - if (PD_MARKBITS(dp) == 0xff) { + if (PD_MARKBITS(dp) == ptable_mask(type)) { /* all tables in page are free, free page */ list_del(dp); mmu_page_dtor((void *)page); + if (type == TABLE_PTE) + pgtable_pte_page_dtor(virt_to_page(page)); free_page (page); return 1; - } else if (ptable_list.next != dp) { + } else if (ptable_list[type].next != dp) { /* * move this descriptor to the front of the list, since * it has one or more free tables. */ - list_move(dp, &ptable_list); + list_move(dp, &ptable_list[type]); } return 0; } From 518a6b58243aff43361c1f027697dfe5a89cbc4f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 31 Jan 2020 13:45:40 +0100 Subject: [PATCH 09/10] m68k: mm: Fully initialize the page-table allocator Also iterate the PMD tables to populate the PTE table allocator. This also fully replaces the previous zero_pgtable hack. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Acked-by: Greg Ungerer Tested-by: Michael Schmitz Tested-by: Greg Ungerer Link: https://lore.kernel.org/r/20200131125403.938797587@infradead.org Signed-off-by: Geert Uytterhoeven --- arch/m68k/mm/init.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 3e3a74a5b215..b88d510d4fe3 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -120,7 +120,7 @@ void free_initmem(void) static inline void init_pointer_tables(void) { #if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) - int i; + int i, j; /* insert pointer tables allocated so far into the tablelist */ init_pointer_table(kernel_pg_dir, TABLE_PGD); @@ -133,6 +133,17 @@ static inline void init_pointer_tables(void) pmd_dir = (pmd_t *)pgd_page_vaddr(kernel_pg_dir[i]); init_pointer_table(pmd_dir, TABLE_PMD); + + for (j = 0; j < PTRS_PER_PMD; j++) { + pmd_t *pmd = &pmd_dir[j]; + pte_t *pte_dir; + + if (!pmd_present(*pmd)) + continue; + + pte_dir = (pte_t *)__pmd_page(*pmd); + init_pointer_table(pte_dir, TABLE_PTE); + } } #endif } From de9e354e1f8f975092e6635a0596cf0b9a6b82ac Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 31 Jan 2020 13:45:41 +0100 Subject: [PATCH 10/10] m68k: mm: Change ColdFire pgtable_t To match what we did to the Motorola MMU routines, change the ColdFire pgalloc. The result is that ColdFire and Sun3 pgalloc are actually very similar and could conceivably be unified. Signed-off-by: Will Deacon Signed-off-by: Peter Zijlstra (Intel) Acked-by: Greg Ungerer Tested-by: Michael Schmitz Tested-by: Greg Ungerer Link: https://lore.kernel.org/r/20200131125403.995781825@infradead.org Signed-off-by: Geert Uytterhoeven --- arch/m68k/include/asm/mcf_pgalloc.h | 25 +++++++++++++------------ arch/m68k/include/asm/page.h | 7 ++++++- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h index 595e0b8c4759..bc1228e00518 100644 --- a/arch/m68k/include/asm/mcf_pgalloc.h +++ b/arch/m68k/include/asm/mcf_pgalloc.h @@ -28,21 +28,22 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address) return (pmd_t *) pgd; } -#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \ - (unsigned long)(page_address(page))) +#define pmd_populate(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte)) -#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte)) +#define pmd_populate_kernel pmd_populate -#define pmd_pgtable(pmd) pmd_page(pmd) +#define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT) -static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable, unsigned long address) { + struct page *page = virt_to_page(pgtable); + pgtable_pte_page_dtor(page); __free_page(page); } -static inline struct page *pte_alloc_one(struct mm_struct *mm) +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { struct page *page = alloc_pages(GFP_DMA, 0); pte_t *pte; @@ -54,16 +55,16 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm) return NULL; } - pte = kmap(page); - if (pte) - clear_page(pte); - kunmap(page); + pte = page_address(page); + clear_page(pte); - return page; + return pte; } -static inline void pte_free(struct mm_struct *mm, struct page *page) +static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable) { + struct page *page = virt_to_page(pgtable); + pgtable_pte_page_dtor(page); __free_page(page); } diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h index 23a5c143df9d..da546487e177 100644 --- a/arch/m68k/include/asm/page.h +++ b/arch/m68k/include/asm/page.h @@ -31,7 +31,12 @@ typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; -#if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE) +#if defined(CONFIG_SUN3) +/* + * Sun3 still uses the asm-generic/pgalloc.h code and thus needs this + * definition. It would be possible to unify Sun3 and ColdFire pgalloc and have + * all of m68k use the same type. + */ typedef struct page *pgtable_t; #else typedef pte_t *pgtable_t;