x86/mm: Use a struct to reduce parameters for SME PGD mapping

In preparation for follow-on patches, combine the PGD mapping parameters
into a struct to reduce the number of function arguments and allow for
direct updating of the next pagetable mapping area pointer.

Tested-by: Gabriel Craciunescu <nix.or.die@gmail.com>
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20180110192605.6026.96206.stgit@tlendack-t1.amdoffice.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Tom Lendacky 2018-01-10 13:26:05 -06:00 committed by Ingo Molnar
parent 1303880179
commit bacf6b499e

View file

@ -464,6 +464,14 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT); set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
} }
struct sme_populate_pgd_data {
void *pgtable_area;
pgd_t *pgd;
pmdval_t pmd_val;
unsigned long vaddr;
};
static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start, static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
unsigned long end) unsigned long end)
{ {
@ -486,15 +494,14 @@ static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
#define PUD_FLAGS _KERNPG_TABLE_NOENC #define PUD_FLAGS _KERNPG_TABLE_NOENC
#define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) #define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area, static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
unsigned long vaddr, pmdval_t pmd_val)
{ {
pgd_t *pgd_p; pgd_t *pgd_p;
p4d_t *p4d_p; p4d_t *p4d_p;
pud_t *pud_p; pud_t *pud_p;
pmd_t *pmd_p; pmd_t *pmd_p;
pgd_p = pgd_base + pgd_index(vaddr); pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
if (native_pgd_val(*pgd_p)) { if (native_pgd_val(*pgd_p)) {
if (IS_ENABLED(CONFIG_X86_5LEVEL)) if (IS_ENABLED(CONFIG_X86_5LEVEL))
p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK);
@ -504,15 +511,15 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
pgd_t pgd; pgd_t pgd;
if (IS_ENABLED(CONFIG_X86_5LEVEL)) { if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
p4d_p = pgtable_area; p4d_p = ppd->pgtable_area;
memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D); memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D; ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS); pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS);
} else { } else {
pud_p = pgtable_area; pud_p = ppd->pgtable_area;
memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS); pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS);
} }
@ -520,44 +527,41 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
} }
if (IS_ENABLED(CONFIG_X86_5LEVEL)) { if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
p4d_p += p4d_index(vaddr); p4d_p += p4d_index(ppd->vaddr);
if (native_p4d_val(*p4d_p)) { if (native_p4d_val(*p4d_p)) {
pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK); pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK);
} else { } else {
p4d_t p4d; p4d_t p4d;
pud_p = pgtable_area; pud_p = ppd->pgtable_area;
memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS); p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS);
native_set_p4d(p4d_p, p4d); native_set_p4d(p4d_p, p4d);
} }
} }
pud_p += pud_index(vaddr); pud_p += pud_index(ppd->vaddr);
if (native_pud_val(*pud_p)) { if (native_pud_val(*pud_p)) {
if (native_pud_val(*pud_p) & _PAGE_PSE) if (native_pud_val(*pud_p) & _PAGE_PSE)
goto out; return;
pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK); pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK);
} else { } else {
pud_t pud; pud_t pud;
pmd_p = pgtable_area; pmd_p = ppd->pgtable_area;
memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD; ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS); pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS);
native_set_pud(pud_p, pud); native_set_pud(pud_p, pud);
} }
pmd_p += pmd_index(vaddr); pmd_p += pmd_index(ppd->vaddr);
if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
native_set_pmd(pmd_p, native_make_pmd(pmd_val)); native_set_pmd(pmd_p, native_make_pmd(ppd->pmd_val));
out:
return pgtable_area;
} }
static unsigned long __init sme_pgtable_calc(unsigned long len) static unsigned long __init sme_pgtable_calc(unsigned long len)
@ -615,11 +619,10 @@ void __init sme_encrypt_kernel(void)
unsigned long workarea_start, workarea_end, workarea_len; unsigned long workarea_start, workarea_end, workarea_len;
unsigned long execute_start, execute_end, execute_len; unsigned long execute_start, execute_end, execute_len;
unsigned long kernel_start, kernel_end, kernel_len; unsigned long kernel_start, kernel_end, kernel_len;
struct sme_populate_pgd_data ppd;
unsigned long pgtable_area_len; unsigned long pgtable_area_len;
unsigned long paddr, pmd_flags; unsigned long paddr, pmd_flags;
unsigned long decrypted_base; unsigned long decrypted_base;
void *pgtable_area;
pgd_t *pgd;
if (!sme_active()) if (!sme_active())
return; return;
@ -683,18 +686,18 @@ void __init sme_encrypt_kernel(void)
* pagetables and when the new encrypted and decrypted kernel * pagetables and when the new encrypted and decrypted kernel
* mappings are populated. * mappings are populated.
*/ */
pgtable_area = (void *)execute_end; ppd.pgtable_area = (void *)execute_end;
/* /*
* Make sure the current pagetable structure has entries for * Make sure the current pagetable structure has entries for
* addressing the workarea. * addressing the workarea.
*/ */
pgd = (pgd_t *)native_read_cr3_pa(); ppd.pgd = (pgd_t *)native_read_cr3_pa();
paddr = workarea_start; paddr = workarea_start;
while (paddr < workarea_end) { while (paddr < workarea_end) {
pgtable_area = sme_populate_pgd(pgd, pgtable_area, ppd.pmd_val = paddr + PMD_FLAGS;
paddr, ppd.vaddr = paddr;
paddr + PMD_FLAGS); sme_populate_pgd_large(&ppd);
paddr += PMD_PAGE_SIZE; paddr += PMD_PAGE_SIZE;
} }
@ -708,17 +711,17 @@ void __init sme_encrypt_kernel(void)
* populated with new PUDs and PMDs as the encrypted and decrypted * populated with new PUDs and PMDs as the encrypted and decrypted
* kernel mappings are created. * kernel mappings are created.
*/ */
pgd = pgtable_area; ppd.pgd = ppd.pgtable_area;
memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD); memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
pgtable_area += sizeof(*pgd) * PTRS_PER_PGD; ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
/* Add encrypted kernel (identity) mappings */ /* Add encrypted kernel (identity) mappings */
pmd_flags = PMD_FLAGS | _PAGE_ENC; pmd_flags = PMD_FLAGS | _PAGE_ENC;
paddr = kernel_start; paddr = kernel_start;
while (paddr < kernel_end) { while (paddr < kernel_end) {
pgtable_area = sme_populate_pgd(pgd, pgtable_area, ppd.pmd_val = paddr + pmd_flags;
paddr, ppd.vaddr = paddr;
paddr + pmd_flags); sme_populate_pgd_large(&ppd);
paddr += PMD_PAGE_SIZE; paddr += PMD_PAGE_SIZE;
} }
@ -736,9 +739,9 @@ void __init sme_encrypt_kernel(void)
pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT); pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT);
paddr = kernel_start; paddr = kernel_start;
while (paddr < kernel_end) { while (paddr < kernel_end) {
pgtable_area = sme_populate_pgd(pgd, pgtable_area, ppd.pmd_val = paddr + pmd_flags;
paddr + decrypted_base, ppd.vaddr = paddr + decrypted_base;
paddr + pmd_flags); sme_populate_pgd_large(&ppd);
paddr += PMD_PAGE_SIZE; paddr += PMD_PAGE_SIZE;
} }
@ -746,30 +749,29 @@ void __init sme_encrypt_kernel(void)
/* Add decrypted workarea mappings to both kernel mappings */ /* Add decrypted workarea mappings to both kernel mappings */
paddr = workarea_start; paddr = workarea_start;
while (paddr < workarea_end) { while (paddr < workarea_end) {
pgtable_area = sme_populate_pgd(pgd, pgtable_area, ppd.pmd_val = paddr + PMD_FLAGS;
paddr, ppd.vaddr = paddr;
paddr + PMD_FLAGS); sme_populate_pgd_large(&ppd);
pgtable_area = sme_populate_pgd(pgd, pgtable_area, ppd.vaddr = paddr + decrypted_base;
paddr + decrypted_base, sme_populate_pgd_large(&ppd);
paddr + PMD_FLAGS);
paddr += PMD_PAGE_SIZE; paddr += PMD_PAGE_SIZE;
} }
/* Perform the encryption */ /* Perform the encryption */
sme_encrypt_execute(kernel_start, kernel_start + decrypted_base, sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
kernel_len, workarea_start, (unsigned long)pgd); kernel_len, workarea_start, (unsigned long)ppd.pgd);
/* /*
* At this point we are running encrypted. Remove the mappings for * At this point we are running encrypted. Remove the mappings for
* the decrypted areas - all that is needed for this is to remove * the decrypted areas - all that is needed for this is to remove
* the PGD entry/entries. * the PGD entry/entries.
*/ */
sme_clear_pgd(pgd, kernel_start + decrypted_base, sme_clear_pgd(ppd.pgd, kernel_start + decrypted_base,
kernel_end + decrypted_base); kernel_end + decrypted_base);
sme_clear_pgd(pgd, workarea_start + decrypted_base, sme_clear_pgd(ppd.pgd, workarea_start + decrypted_base,
workarea_end + decrypted_base); workarea_end + decrypted_base);
/* Flush the TLB - no globals so cr3 is enough */ /* Flush the TLB - no globals so cr3 is enough */