parisc: Map kernel text and data on huge pages

Adjust the linker script and map_pages() to map kernel text and data on
physical 1MB huge/large pages.

Signed-off-by: Helge Deller <deller@gmx.de>
This commit is contained in:
Helge Deller 2015-11-22 00:07:44 +01:00
parent 736d216933
commit 41b85a1163
3 changed files with 31 additions and 26 deletions

View File

@ -289,6 +289,14 @@ int main(void)
DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
DEFINE(ASM_PT_INITIAL, PT_INITIAL);
BLANK();
/* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text
* and kernel data on physical huge pages */
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
#else
DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
#endif
BLANK();
DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));

View File

@ -60,7 +60,7 @@ SECTIONS
EXIT_DATA
}
PERCPU_SECTION(8)
. = ALIGN(PAGE_SIZE);
. = ALIGN(HUGEPAGE_SIZE);
__init_end = .;
/* freed after init ends here */
@ -116,7 +116,7 @@ SECTIONS
* that we can properly leave these
* as writable
*/
. = ALIGN(PAGE_SIZE);
. = ALIGN(HUGEPAGE_SIZE);
data_start = .;
EXCEPTION_TABLE(8)
@ -135,8 +135,11 @@ SECTIONS
_edata = .;
/* BSS */
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8)
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
/* bootmap is allocated in setup_bootmem() directly behind bss. */
. = ALIGN(HUGEPAGE_SIZE);
_end = . ;
STABS_DEBUG

View File

@ -407,15 +407,11 @@ static void __init map_pages(unsigned long start_vaddr,
unsigned long vaddr;
unsigned long ro_start;
unsigned long ro_end;
unsigned long fv_addr;
unsigned long gw_addr;
extern const unsigned long fault_vector_20;
extern void * const linux_gateway_page;
unsigned long kernel_end;
ro_start = __pa((unsigned long)_text);
ro_end = __pa((unsigned long)&data_start);
fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
kernel_end = __pa((unsigned long)&_end);
end_paddr = start_paddr + size;
@ -473,24 +469,25 @@ static void __init map_pages(unsigned long start_vaddr,
for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
pte_t pte;
/*
* Map the fault vector writable so we can
* write the HPMC checksum.
*/
if (force)
pte = __mk_pte(address, pgprot);
else if (parisc_text_address(vaddr) &&
address != fv_addr)
else if (parisc_text_address(vaddr)) {
pte = __mk_pte(address, PAGE_KERNEL_EXEC);
if (address >= ro_start && address < kernel_end)
pte = pte_mkhuge(pte);
}
else
#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
if (address >= ro_start && address < ro_end
&& address != fv_addr
&& address != gw_addr)
pte = __mk_pte(address, PAGE_KERNEL_RO);
else
if (address >= ro_start && address < ro_end) {
pte = __mk_pte(address, PAGE_KERNEL_EXEC);
pte = pte_mkhuge(pte);
} else
#endif
{
pte = __mk_pte(address, pgprot);
if (address >= ro_start && address < kernel_end)
pte = pte_mkhuge(pte);
}
if (address >= end_paddr) {
if (force)
@ -534,15 +531,12 @@ void free_initmem(void)
/* force the kernel to see the new TLB entries */
__flush_tlb_range(0, init_begin, init_end);
/* Attempt to catch anyone trying to execute code here
* by filling the page with BRK insns.
*/
memset((void *)init_begin, 0x00, init_end - init_begin);
/* finally dump all the instructions which were cached, since the
* pages are no-longer executable */
flush_icache_range(init_begin, init_end);
free_initmem_default(-1);
free_initmem_default(POISON_FREE_INITMEM);
/* set up a new led state on systems shipped LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
@ -712,8 +706,8 @@ static void __init pagetable_init(void)
unsigned long size;
start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
size = pmem_ranges[range].pages << PAGE_SHIFT;
end_paddr = start_paddr + size;
map_pages((unsigned long)__va(start_paddr), start_paddr,
size, PAGE_KERNEL, 0);