mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-06 08:46:46 +00:00
[S390] lockless get_user_pages_fast()
Implement get_user_pages_fast without locking in the fastpath on s390. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
87799ebab7
commit
80217147a3
9 changed files with 395 additions and 28 deletions
|
@ -101,6 +101,7 @@ config S390
|
||||||
select HAVE_KERNEL_BZIP2
|
select HAVE_KERNEL_BZIP2
|
||||||
select HAVE_KERNEL_LZMA
|
select HAVE_KERNEL_LZMA
|
||||||
select HAVE_KERNEL_LZO
|
select HAVE_KERNEL_LZO
|
||||||
|
select HAVE_GET_USER_PAGES_FAST
|
||||||
select ARCH_INLINE_SPIN_TRYLOCK
|
select ARCH_INLINE_SPIN_TRYLOCK
|
||||||
select ARCH_INLINE_SPIN_TRYLOCK_BH
|
select ARCH_INLINE_SPIN_TRYLOCK_BH
|
||||||
select ARCH_INLINE_SPIN_LOCK
|
select ARCH_INLINE_SPIN_LOCK
|
||||||
|
|
|
@ -21,9 +21,11 @@
|
||||||
|
|
||||||
unsigned long *crst_table_alloc(struct mm_struct *, int);
|
unsigned long *crst_table_alloc(struct mm_struct *, int);
|
||||||
void crst_table_free(struct mm_struct *, unsigned long *);
|
void crst_table_free(struct mm_struct *, unsigned long *);
|
||||||
|
void crst_table_free_rcu(struct mm_struct *, unsigned long *);
|
||||||
|
|
||||||
unsigned long *page_table_alloc(struct mm_struct *);
|
unsigned long *page_table_alloc(struct mm_struct *);
|
||||||
void page_table_free(struct mm_struct *, unsigned long *);
|
void page_table_free(struct mm_struct *, unsigned long *);
|
||||||
|
void page_table_free_rcu(struct mm_struct *, unsigned long *);
|
||||||
void disable_noexec(struct mm_struct *, struct task_struct *);
|
void disable_noexec(struct mm_struct *, struct task_struct *);
|
||||||
|
|
||||||
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
|
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
|
||||||
|
@ -176,4 +178,6 @@ static inline void pmd_populate(struct mm_struct *mm,
|
||||||
#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
|
#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
|
||||||
#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
|
#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
|
||||||
|
|
||||||
|
extern void rcu_table_freelist_finish(void);
|
||||||
|
|
||||||
#endif /* _S390_PGALLOC_H */
|
#endif /* _S390_PGALLOC_H */
|
||||||
|
|
|
@ -316,6 +316,7 @@ extern unsigned long VMALLOC_START;
|
||||||
|
|
||||||
/* Bits in the segment table entry */
|
/* Bits in the segment table entry */
|
||||||
#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
|
#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
|
||||||
|
#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
|
||||||
#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
|
#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
|
||||||
#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
|
#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
|
||||||
#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
|
#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
|
||||||
|
|
|
@ -64,10 +64,9 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb,
|
||||||
if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS))
|
if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS))
|
||||||
__tlb_flush_mm(tlb->mm);
|
__tlb_flush_mm(tlb->mm);
|
||||||
while (tlb->nr_ptes > 0)
|
while (tlb->nr_ptes > 0)
|
||||||
pte_free(tlb->mm, tlb->array[--tlb->nr_ptes]);
|
page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]);
|
||||||
while (tlb->nr_pxds < TLB_NR_PTRS)
|
while (tlb->nr_pxds < TLB_NR_PTRS)
|
||||||
/* pgd_free frees the pointer as region or segment table */
|
crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]);
|
||||||
pgd_free(tlb->mm, tlb->array[tlb->nr_pxds++]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
|
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
|
||||||
|
@ -75,6 +74,8 @@ static inline void tlb_finish_mmu(struct mmu_gather *tlb,
|
||||||
{
|
{
|
||||||
tlb_flush_mmu(tlb, start, end);
|
tlb_flush_mmu(tlb, start, end);
|
||||||
|
|
||||||
|
rcu_table_freelist_finish();
|
||||||
|
|
||||||
/* keep the page table cache within bounds */
|
/* keep the page table cache within bounds */
|
||||||
check_pgt_cache();
|
check_pgt_cache();
|
||||||
|
|
||||||
|
@ -103,7 +104,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
|
||||||
if (tlb->nr_ptes >= tlb->nr_pxds)
|
if (tlb->nr_ptes >= tlb->nr_pxds)
|
||||||
tlb_flush_mmu(tlb, 0, 0);
|
tlb_flush_mmu(tlb, 0, 0);
|
||||||
} else
|
} else
|
||||||
pte_free(tlb->mm, pte);
|
page_table_free(tlb->mm, (unsigned long *) pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -124,7 +125,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
|
||||||
if (tlb->nr_ptes >= tlb->nr_pxds)
|
if (tlb->nr_ptes >= tlb->nr_pxds)
|
||||||
tlb_flush_mmu(tlb, 0, 0);
|
tlb_flush_mmu(tlb, 0, 0);
|
||||||
} else
|
} else
|
||||||
pmd_free(tlb->mm, pmd);
|
crst_table_free(tlb->mm, (unsigned long *) pmd);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -146,7 +147,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
|
||||||
if (tlb->nr_ptes >= tlb->nr_pxds)
|
if (tlb->nr_ptes >= tlb->nr_pxds)
|
||||||
tlb_flush_mmu(tlb, 0, 0);
|
tlb_flush_mmu(tlb, 0, 0);
|
||||||
} else
|
} else
|
||||||
pud_free(tlb->mm, pud);
|
crst_table_free(tlb->mm, (unsigned long *) pud);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,6 @@
|
||||||
#
|
#
|
||||||
|
|
||||||
obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
|
obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
|
||||||
page-states.o
|
page-states.o gup.o
|
||||||
obj-$(CONFIG_CMM) += cmm.o
|
obj-$(CONFIG_CMM) += cmm.o
|
||||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||||
|
|
225
arch/s390/mm/gup.c
Normal file
225
arch/s390/mm/gup.c
Normal file
|
@ -0,0 +1,225 @@
|
||||||
|
/*
|
||||||
|
* Lockless get_user_pages_fast for s390
|
||||||
|
*
|
||||||
|
* Copyright IBM Corp. 2010
|
||||||
|
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||||
|
*/
|
||||||
|
#include <linux/sched.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
#include <linux/hugetlb.h>
|
||||||
|
#include <linux/vmstat.h>
|
||||||
|
#include <linux/pagemap.h>
|
||||||
|
#include <linux/rwsem.h>
|
||||||
|
#include <asm/pgtable.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The performance critical leaf functions are made noinline otherwise gcc
|
||||||
|
* inlines everything into a single function which results in too much
|
||||||
|
* register pressure.
|
||||||
|
*/
|
||||||
|
static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
|
||||||
|
unsigned long end, int write, struct page **pages, int *nr)
|
||||||
|
{
|
||||||
|
unsigned long mask, result;
|
||||||
|
pte_t *ptep, pte;
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
|
result = write ? 0 : _PAGE_RO;
|
||||||
|
mask = result | _PAGE_INVALID | _PAGE_SPECIAL;
|
||||||
|
|
||||||
|
ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
|
||||||
|
do {
|
||||||
|
pte = *ptep;
|
||||||
|
barrier();
|
||||||
|
if ((pte_val(pte) & mask) != result)
|
||||||
|
return 0;
|
||||||
|
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||||
|
page = pte_page(pte);
|
||||||
|
if (!page_cache_get_speculative(page))
|
||||||
|
return 0;
|
||||||
|
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
|
||||||
|
put_page(page);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
pages[*nr] = page;
|
||||||
|
(*nr)++;
|
||||||
|
|
||||||
|
} while (ptep++, addr += PAGE_SIZE, addr != end);
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
|
||||||
|
unsigned long end, int write, struct page **pages, int *nr)
|
||||||
|
{
|
||||||
|
unsigned long mask, result;
|
||||||
|
struct page *head, *page;
|
||||||
|
int refs;
|
||||||
|
|
||||||
|
result = write ? 0 : _SEGMENT_ENTRY_RO;
|
||||||
|
mask = result | _SEGMENT_ENTRY_INV;
|
||||||
|
if ((pmd_val(pmd) & mask) != result)
|
||||||
|
return 0;
|
||||||
|
VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
|
||||||
|
|
||||||
|
refs = 0;
|
||||||
|
head = pmd_page(pmd);
|
||||||
|
page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
|
||||||
|
do {
|
||||||
|
VM_BUG_ON(compound_head(page) != head);
|
||||||
|
pages[*nr] = page;
|
||||||
|
(*nr)++;
|
||||||
|
page++;
|
||||||
|
refs++;
|
||||||
|
} while (addr += PAGE_SIZE, addr != end);
|
||||||
|
|
||||||
|
if (!page_cache_add_speculative(head, refs)) {
|
||||||
|
*nr -= refs;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
|
||||||
|
*nr -= refs;
|
||||||
|
while (refs--)
|
||||||
|
put_page(head);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
|
||||||
|
unsigned long end, int write, struct page **pages, int *nr)
|
||||||
|
{
|
||||||
|
unsigned long next;
|
||||||
|
pmd_t *pmdp, pmd;
|
||||||
|
|
||||||
|
pmdp = (pmd_t *) pudp;
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
|
||||||
|
pmdp = (pmd_t *) pud_deref(pud);
|
||||||
|
pmdp += pmd_index(addr);
|
||||||
|
#endif
|
||||||
|
do {
|
||||||
|
pmd = *pmdp;
|
||||||
|
barrier();
|
||||||
|
next = pmd_addr_end(addr, end);
|
||||||
|
if (pmd_none(pmd))
|
||||||
|
return 0;
|
||||||
|
if (unlikely(pmd_huge(pmd))) {
|
||||||
|
if (!gup_huge_pmd(pmdp, pmd, addr, next,
|
||||||
|
write, pages, nr))
|
||||||
|
return 0;
|
||||||
|
} else if (!gup_pte_range(pmdp, pmd, addr, next,
|
||||||
|
write, pages, nr))
|
||||||
|
return 0;
|
||||||
|
} while (pmdp++, addr = next, addr != end);
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
|
||||||
|
unsigned long end, int write, struct page **pages, int *nr)
|
||||||
|
{
|
||||||
|
unsigned long next;
|
||||||
|
pud_t *pudp, pud;
|
||||||
|
|
||||||
|
pudp = (pud_t *) pgdp;
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
|
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
|
||||||
|
pudp = (pud_t *) pgd_deref(pgd);
|
||||||
|
pudp += pud_index(addr);
|
||||||
|
#endif
|
||||||
|
do {
|
||||||
|
pud = *pudp;
|
||||||
|
barrier();
|
||||||
|
next = pud_addr_end(addr, end);
|
||||||
|
if (pud_none(pud))
|
||||||
|
return 0;
|
||||||
|
if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr))
|
||||||
|
return 0;
|
||||||
|
} while (pudp++, addr = next, addr != end);
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* get_user_pages_fast() - pin user pages in memory
|
||||||
|
* @start: starting user address
|
||||||
|
* @nr_pages: number of pages from start to pin
|
||||||
|
* @write: whether pages will be written to
|
||||||
|
* @pages: array that receives pointers to the pages pinned.
|
||||||
|
* Should be at least nr_pages long.
|
||||||
|
*
|
||||||
|
* Attempt to pin user pages in memory without taking mm->mmap_sem.
|
||||||
|
* If not successful, it will fall back to taking the lock and
|
||||||
|
* calling get_user_pages().
|
||||||
|
*
|
||||||
|
* Returns number of pages pinned. This may be fewer than the number
|
||||||
|
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
||||||
|
* were pinned, returns -errno.
|
||||||
|
*/
|
||||||
|
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||||
|
struct page **pages)
|
||||||
|
{
|
||||||
|
struct mm_struct *mm = current->mm;
|
||||||
|
unsigned long addr, len, end;
|
||||||
|
unsigned long next;
|
||||||
|
pgd_t *pgdp, pgd;
|
||||||
|
int nr = 0;
|
||||||
|
|
||||||
|
start &= PAGE_MASK;
|
||||||
|
addr = start;
|
||||||
|
len = (unsigned long) nr_pages << PAGE_SHIFT;
|
||||||
|
end = start + len;
|
||||||
|
if (end < start)
|
||||||
|
goto slow_irqon;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* local_irq_disable() doesn't prevent pagetable teardown, but does
|
||||||
|
* prevent the pagetables from being freed on s390.
|
||||||
|
*
|
||||||
|
* So long as we atomically load page table pointers versus teardown,
|
||||||
|
* we can follow the address down to the the page and take a ref on it.
|
||||||
|
*/
|
||||||
|
local_irq_disable();
|
||||||
|
pgdp = pgd_offset(mm, addr);
|
||||||
|
do {
|
||||||
|
pgd = *pgdp;
|
||||||
|
barrier();
|
||||||
|
next = pgd_addr_end(addr, end);
|
||||||
|
if (pgd_none(pgd))
|
||||||
|
goto slow;
|
||||||
|
if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
|
||||||
|
goto slow;
|
||||||
|
} while (pgdp++, addr = next, addr != end);
|
||||||
|
local_irq_enable();
|
||||||
|
|
||||||
|
VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
|
||||||
|
return nr;
|
||||||
|
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
slow:
|
||||||
|
local_irq_enable();
|
||||||
|
slow_irqon:
|
||||||
|
/* Try to get the remaining pages with get_user_pages */
|
||||||
|
start += nr << PAGE_SHIFT;
|
||||||
|
pages += nr;
|
||||||
|
|
||||||
|
down_read(&mm->mmap_sem);
|
||||||
|
ret = get_user_pages(current, mm, start,
|
||||||
|
(end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
|
||||||
|
up_read(&mm->mmap_sem);
|
||||||
|
|
||||||
|
/* Have to be a bit careful with return values */
|
||||||
|
if (nr > 0) {
|
||||||
|
if (ret < 0)
|
||||||
|
ret = nr;
|
||||||
|
else
|
||||||
|
ret += nr;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
|
@ -68,7 +68,7 @@ void arch_release_hugepage(struct page *page)
|
||||||
ptep = (pte_t *) page[1].index;
|
ptep = (pte_t *) page[1].index;
|
||||||
if (!ptep)
|
if (!ptep)
|
||||||
return;
|
return;
|
||||||
pte_free(&init_mm, ptep);
|
page_table_free(&init_mm, (unsigned long *) ptep);
|
||||||
page[1].index = 0;
|
page[1].index = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,8 +38,6 @@
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
|
|
||||||
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
|
||||||
|
|
||||||
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
|
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
|
||||||
|
|
||||||
unsigned long empty_zero_page, zero_page_mask;
|
unsigned long empty_zero_page, zero_page_mask;
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/quicklist.h>
|
#include <linux/quicklist.h>
|
||||||
|
#include <linux/rcupdate.h>
|
||||||
|
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
|
@ -23,6 +24,67 @@
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
|
|
||||||
|
struct rcu_table_freelist {
|
||||||
|
struct rcu_head rcu;
|
||||||
|
struct mm_struct *mm;
|
||||||
|
unsigned int pgt_index;
|
||||||
|
unsigned int crst_index;
|
||||||
|
unsigned long *table[0];
|
||||||
|
};
|
||||||
|
|
||||||
|
#define RCU_FREELIST_SIZE \
|
||||||
|
((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
|
||||||
|
/ sizeof(unsigned long))
|
||||||
|
|
||||||
|
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
||||||
|
static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
|
||||||
|
|
||||||
|
static void __page_table_free(struct mm_struct *mm, unsigned long *table);
|
||||||
|
static void __crst_table_free(struct mm_struct *mm, unsigned long *table);
|
||||||
|
|
||||||
|
static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
|
||||||
|
struct rcu_table_freelist *batch = *batchp;
|
||||||
|
|
||||||
|
if (batch)
|
||||||
|
return batch;
|
||||||
|
batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
|
||||||
|
if (batch) {
|
||||||
|
batch->mm = mm;
|
||||||
|
batch->pgt_index = 0;
|
||||||
|
batch->crst_index = RCU_FREELIST_SIZE;
|
||||||
|
*batchp = batch;
|
||||||
|
}
|
||||||
|
return batch;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void rcu_table_freelist_callback(struct rcu_head *head)
|
||||||
|
{
|
||||||
|
struct rcu_table_freelist *batch =
|
||||||
|
container_of(head, struct rcu_table_freelist, rcu);
|
||||||
|
|
||||||
|
while (batch->pgt_index > 0)
|
||||||
|
__page_table_free(batch->mm, batch->table[--batch->pgt_index]);
|
||||||
|
while (batch->crst_index < RCU_FREELIST_SIZE)
|
||||||
|
__crst_table_free(batch->mm, batch->table[batch->crst_index++]);
|
||||||
|
free_page((unsigned long) batch);
|
||||||
|
}
|
||||||
|
|
||||||
|
void rcu_table_freelist_finish(void)
|
||||||
|
{
|
||||||
|
struct rcu_table_freelist *batch = __get_cpu_var(rcu_table_freelist);
|
||||||
|
|
||||||
|
if (!batch)
|
||||||
|
return;
|
||||||
|
call_rcu(&batch->rcu, rcu_table_freelist_callback);
|
||||||
|
__get_cpu_var(rcu_table_freelist) = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void smp_sync(void *arg)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_64BIT
|
#ifndef CONFIG_64BIT
|
||||||
#define ALLOC_ORDER 1
|
#define ALLOC_ORDER 1
|
||||||
#define TABLES_PER_PAGE 4
|
#define TABLES_PER_PAGE 4
|
||||||
|
@ -78,23 +140,53 @@ unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
|
||||||
}
|
}
|
||||||
page->index = page_to_phys(shadow);
|
page->index = page_to_phys(shadow);
|
||||||
}
|
}
|
||||||
spin_lock(&mm->context.list_lock);
|
spin_lock_bh(&mm->context.list_lock);
|
||||||
list_add(&page->lru, &mm->context.crst_list);
|
list_add(&page->lru, &mm->context.crst_list);
|
||||||
spin_unlock(&mm->context.list_lock);
|
spin_unlock_bh(&mm->context.list_lock);
|
||||||
return (unsigned long *) page_to_phys(page);
|
return (unsigned long *) page_to_phys(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __crst_table_free(struct mm_struct *mm, unsigned long *table)
|
||||||
|
{
|
||||||
|
unsigned long *shadow = get_shadow_table(table);
|
||||||
|
|
||||||
|
if (shadow)
|
||||||
|
free_pages((unsigned long) shadow, ALLOC_ORDER);
|
||||||
|
free_pages((unsigned long) table, ALLOC_ORDER);
|
||||||
|
}
|
||||||
|
|
||||||
void crst_table_free(struct mm_struct *mm, unsigned long *table)
|
void crst_table_free(struct mm_struct *mm, unsigned long *table)
|
||||||
{
|
{
|
||||||
unsigned long *shadow = get_shadow_table(table);
|
|
||||||
struct page *page = virt_to_page(table);
|
struct page *page = virt_to_page(table);
|
||||||
|
|
||||||
spin_lock(&mm->context.list_lock);
|
spin_lock_bh(&mm->context.list_lock);
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
spin_unlock(&mm->context.list_lock);
|
spin_unlock_bh(&mm->context.list_lock);
|
||||||
if (shadow)
|
__crst_table_free(mm, table);
|
||||||
free_pages((unsigned long) shadow, ALLOC_ORDER);
|
}
|
||||||
free_pages((unsigned long) table, ALLOC_ORDER);
|
|
||||||
|
void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
|
||||||
|
{
|
||||||
|
struct rcu_table_freelist *batch;
|
||||||
|
struct page *page = virt_to_page(table);
|
||||||
|
|
||||||
|
spin_lock_bh(&mm->context.list_lock);
|
||||||
|
list_del(&page->lru);
|
||||||
|
spin_unlock_bh(&mm->context.list_lock);
|
||||||
|
if (atomic_read(&mm->mm_users) < 2 &&
|
||||||
|
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
|
||||||
|
__crst_table_free(mm, table);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
batch = rcu_table_freelist_get(mm);
|
||||||
|
if (!batch) {
|
||||||
|
smp_call_function(smp_sync, NULL, 1);
|
||||||
|
__crst_table_free(mm, table);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
batch->table[--batch->crst_index] = table;
|
||||||
|
if (batch->pgt_index >= batch->crst_index)
|
||||||
|
rcu_table_freelist_finish();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
|
@ -108,7 +200,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
|
||||||
table = crst_table_alloc(mm, mm->context.noexec);
|
table = crst_table_alloc(mm, mm->context.noexec);
|
||||||
if (!table)
|
if (!table)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
spin_lock(&mm->page_table_lock);
|
spin_lock_bh(&mm->page_table_lock);
|
||||||
if (mm->context.asce_limit < limit) {
|
if (mm->context.asce_limit < limit) {
|
||||||
pgd = (unsigned long *) mm->pgd;
|
pgd = (unsigned long *) mm->pgd;
|
||||||
if (mm->context.asce_limit <= (1UL << 31)) {
|
if (mm->context.asce_limit <= (1UL << 31)) {
|
||||||
|
@ -130,7 +222,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
|
||||||
mm->task_size = mm->context.asce_limit;
|
mm->task_size = mm->context.asce_limit;
|
||||||
table = NULL;
|
table = NULL;
|
||||||
}
|
}
|
||||||
spin_unlock(&mm->page_table_lock);
|
spin_unlock_bh(&mm->page_table_lock);
|
||||||
if (table)
|
if (table)
|
||||||
crst_table_free(mm, table);
|
crst_table_free(mm, table);
|
||||||
if (mm->context.asce_limit < limit)
|
if (mm->context.asce_limit < limit)
|
||||||
|
@ -182,7 +274,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
|
||||||
unsigned long bits;
|
unsigned long bits;
|
||||||
|
|
||||||
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
|
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
|
||||||
spin_lock(&mm->context.list_lock);
|
spin_lock_bh(&mm->context.list_lock);
|
||||||
page = NULL;
|
page = NULL;
|
||||||
if (!list_empty(&mm->context.pgtable_list)) {
|
if (!list_empty(&mm->context.pgtable_list)) {
|
||||||
page = list_first_entry(&mm->context.pgtable_list,
|
page = list_first_entry(&mm->context.pgtable_list,
|
||||||
|
@ -191,7 +283,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
|
||||||
page = NULL;
|
page = NULL;
|
||||||
}
|
}
|
||||||
if (!page) {
|
if (!page) {
|
||||||
spin_unlock(&mm->context.list_lock);
|
spin_unlock_bh(&mm->context.list_lock);
|
||||||
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
|
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -202,7 +294,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
|
||||||
clear_table_pgstes(table);
|
clear_table_pgstes(table);
|
||||||
else
|
else
|
||||||
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
|
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
|
||||||
spin_lock(&mm->context.list_lock);
|
spin_lock_bh(&mm->context.list_lock);
|
||||||
list_add(&page->lru, &mm->context.pgtable_list);
|
list_add(&page->lru, &mm->context.pgtable_list);
|
||||||
}
|
}
|
||||||
table = (unsigned long *) page_to_phys(page);
|
table = (unsigned long *) page_to_phys(page);
|
||||||
|
@ -213,10 +305,25 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
|
||||||
page->flags |= bits;
|
page->flags |= bits;
|
||||||
if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
|
if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
|
||||||
list_move_tail(&page->lru, &mm->context.pgtable_list);
|
list_move_tail(&page->lru, &mm->context.pgtable_list);
|
||||||
spin_unlock(&mm->context.list_lock);
|
spin_unlock_bh(&mm->context.list_lock);
|
||||||
return table;
|
return table;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __page_table_free(struct mm_struct *mm, unsigned long *table)
|
||||||
|
{
|
||||||
|
struct page *page;
|
||||||
|
unsigned long bits;
|
||||||
|
|
||||||
|
bits = ((unsigned long) table) & 15;
|
||||||
|
table = (unsigned long *)(((unsigned long) table) ^ bits);
|
||||||
|
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
|
||||||
|
page->flags ^= bits;
|
||||||
|
if (!(page->flags & FRAG_MASK)) {
|
||||||
|
pgtable_page_dtor(page);
|
||||||
|
__free_page(page);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void page_table_free(struct mm_struct *mm, unsigned long *table)
|
void page_table_free(struct mm_struct *mm, unsigned long *table)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
@ -225,7 +332,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
|
||||||
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
|
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
|
||||||
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
|
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
|
||||||
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
|
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
|
||||||
spin_lock(&mm->context.list_lock);
|
spin_lock_bh(&mm->context.list_lock);
|
||||||
page->flags ^= bits;
|
page->flags ^= bits;
|
||||||
if (page->flags & FRAG_MASK) {
|
if (page->flags & FRAG_MASK) {
|
||||||
/* Page now has some free pgtable fragments. */
|
/* Page now has some free pgtable fragments. */
|
||||||
|
@ -234,18 +341,48 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
|
||||||
} else
|
} else
|
||||||
/* All fragments of the 4K page have been freed. */
|
/* All fragments of the 4K page have been freed. */
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
spin_unlock(&mm->context.list_lock);
|
spin_unlock_bh(&mm->context.list_lock);
|
||||||
if (page) {
|
if (page) {
|
||||||
pgtable_page_dtor(page);
|
pgtable_page_dtor(page);
|
||||||
__free_page(page);
|
__free_page(page);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
|
||||||
|
{
|
||||||
|
struct rcu_table_freelist *batch;
|
||||||
|
struct page *page;
|
||||||
|
unsigned long bits;
|
||||||
|
|
||||||
|
if (atomic_read(&mm->mm_users) < 2 &&
|
||||||
|
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
|
||||||
|
page_table_free(mm, table);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
batch = rcu_table_freelist_get(mm);
|
||||||
|
if (!batch) {
|
||||||
|
smp_call_function(smp_sync, NULL, 1);
|
||||||
|
page_table_free(mm, table);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
|
||||||
|
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
|
||||||
|
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
|
||||||
|
spin_lock_bh(&mm->context.list_lock);
|
||||||
|
/* Delayed freeing with rcu prevents reuse of pgtable fragments */
|
||||||
|
list_del_init(&page->lru);
|
||||||
|
spin_unlock_bh(&mm->context.list_lock);
|
||||||
|
table = (unsigned long *)(((unsigned long) table) | bits);
|
||||||
|
batch->table[batch->pgt_index++] = table;
|
||||||
|
if (batch->pgt_index >= batch->crst_index)
|
||||||
|
rcu_table_freelist_finish();
|
||||||
|
}
|
||||||
|
|
||||||
void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
|
void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
spin_lock(&mm->context.list_lock);
|
spin_lock_bh(&mm->context.list_lock);
|
||||||
/* Free shadow region and segment tables. */
|
/* Free shadow region and segment tables. */
|
||||||
list_for_each_entry(page, &mm->context.crst_list, lru)
|
list_for_each_entry(page, &mm->context.crst_list, lru)
|
||||||
if (page->index) {
|
if (page->index) {
|
||||||
|
@ -255,7 +392,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
|
||||||
/* "Free" second halves of page tables. */
|
/* "Free" second halves of page tables. */
|
||||||
list_for_each_entry(page, &mm->context.pgtable_list, lru)
|
list_for_each_entry(page, &mm->context.pgtable_list, lru)
|
||||||
page->flags &= ~SECOND_HALVES;
|
page->flags &= ~SECOND_HALVES;
|
||||||
spin_unlock(&mm->context.list_lock);
|
spin_unlock_bh(&mm->context.list_lock);
|
||||||
mm->context.noexec = 0;
|
mm->context.noexec = 0;
|
||||||
update_mm(mm, tsk);
|
update_mm(mm, tsk);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue