mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 00:48:50 +00:00
mm: rename USE_SPLIT_PTLOCKS to USE_SPLIT_PTE_PTLOCKS
We're going to introduce split page table lock for PMD level. Let's rename existing split ptlock for PTE level to avoid confusion. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Tested-by: Alex Thorlton <athorlton@sgi.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: "Eric W . Biederman" <ebiederm@xmission.com> Cc: "Paul E . McKenney" <paulmck@linux.vnet.ibm.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Dave Jones <davej@redhat.com> Cc: David Howells <dhowells@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kees Cook <keescook@chromium.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Michael Kerrisk <mtk.manpages@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Robin Holt <robinmholt@gmail.com> Cc: Sedat Dilek <sedat.dilek@gmail.com> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e9bb18c7b9
commit
57c1ffcefb
4 changed files with 13 additions and 13 deletions
|
@ -65,7 +65,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if USE_SPLIT_PTLOCKS
|
#if USE_SPLIT_PTE_PTLOCKS
|
||||||
/*
|
/*
|
||||||
* If we are using split PTE locks, then we need to take the page
|
* If we are using split PTE locks, then we need to take the page
|
||||||
* lock here. Otherwise we are using shared mm->page_table_lock
|
* lock here. Otherwise we are using shared mm->page_table_lock
|
||||||
|
@ -84,10 +84,10 @@ static inline void do_pte_unlock(spinlock_t *ptl)
|
||||||
{
|
{
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
}
|
}
|
||||||
#else /* !USE_SPLIT_PTLOCKS */
|
#else /* !USE_SPLIT_PTE_PTLOCKS */
|
||||||
static inline void do_pte_lock(spinlock_t *ptl) {}
|
static inline void do_pte_lock(spinlock_t *ptl) {}
|
||||||
static inline void do_pte_unlock(spinlock_t *ptl) {}
|
static inline void do_pte_unlock(spinlock_t *ptl) {}
|
||||||
#endif /* USE_SPLIT_PTLOCKS */
|
#endif /* USE_SPLIT_PTE_PTLOCKS */
|
||||||
|
|
||||||
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
|
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
|
||||||
unsigned long pfn)
|
unsigned long pfn)
|
||||||
|
|
|
@ -796,7 +796,7 @@ static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
spinlock_t *ptl = NULL;
|
spinlock_t *ptl = NULL;
|
||||||
|
|
||||||
#if USE_SPLIT_PTLOCKS
|
#if USE_SPLIT_PTE_PTLOCKS
|
||||||
ptl = __pte_lockptr(page);
|
ptl = __pte_lockptr(page);
|
||||||
spin_lock_nest_lock(ptl, &mm->page_table_lock);
|
spin_lock_nest_lock(ptl, &mm->page_table_lock);
|
||||||
#endif
|
#endif
|
||||||
|
@ -1637,7 +1637,7 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
|
||||||
|
|
||||||
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
|
__set_pfn_prot(pfn, PAGE_KERNEL_RO);
|
||||||
|
|
||||||
if (level == PT_PTE && USE_SPLIT_PTLOCKS)
|
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
|
||||||
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
|
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
|
||||||
|
|
||||||
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
||||||
|
@ -1671,7 +1671,7 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
|
||||||
if (!PageHighMem(page)) {
|
if (!PageHighMem(page)) {
|
||||||
xen_mc_batch();
|
xen_mc_batch();
|
||||||
|
|
||||||
if (level == PT_PTE && USE_SPLIT_PTLOCKS)
|
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
|
||||||
__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
|
__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
|
||||||
|
|
||||||
__set_pfn_prot(pfn, PAGE_KERNEL);
|
__set_pfn_prot(pfn, PAGE_KERNEL);
|
||||||
|
|
|
@ -1316,7 +1316,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
|
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
|
||||||
|
|
||||||
#if USE_SPLIT_PTLOCKS
|
#if USE_SPLIT_PTE_PTLOCKS
|
||||||
/*
|
/*
|
||||||
* We tuck a spinlock to guard each pagetable page into its struct page,
|
* We tuck a spinlock to guard each pagetable page into its struct page,
|
||||||
* at page->private, with BUILD_BUG_ON to make sure that this will not
|
* at page->private, with BUILD_BUG_ON to make sure that this will not
|
||||||
|
@ -1329,14 +1329,14 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
|
||||||
} while (0)
|
} while (0)
|
||||||
#define pte_lock_deinit(page) ((page)->mapping = NULL)
|
#define pte_lock_deinit(page) ((page)->mapping = NULL)
|
||||||
#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
|
#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
|
||||||
#else /* !USE_SPLIT_PTLOCKS */
|
#else /* !USE_SPLIT_PTE_PTLOCKS */
|
||||||
/*
|
/*
|
||||||
* We use mm->page_table_lock to guard all pagetable pages of the mm.
|
* We use mm->page_table_lock to guard all pagetable pages of the mm.
|
||||||
*/
|
*/
|
||||||
#define pte_lock_init(page) do {} while (0)
|
#define pte_lock_init(page) do {} while (0)
|
||||||
#define pte_lock_deinit(page) do {} while (0)
|
#define pte_lock_deinit(page) do {} while (0)
|
||||||
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
|
#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
|
||||||
#endif /* USE_SPLIT_PTLOCKS */
|
#endif /* USE_SPLIT_PTE_PTLOCKS */
|
||||||
|
|
||||||
static inline void pgtable_page_ctor(struct page *page)
|
static inline void pgtable_page_ctor(struct page *page)
|
||||||
{
|
{
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
|
|
||||||
struct address_space;
|
struct address_space;
|
||||||
|
|
||||||
#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
|
#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Each physical page in the system has a struct page associated with
|
* Each physical page in the system has a struct page associated with
|
||||||
|
@ -141,7 +141,7 @@ struct page {
|
||||||
* indicates order in the buddy
|
* indicates order in the buddy
|
||||||
* system if PG_buddy is set.
|
* system if PG_buddy is set.
|
||||||
*/
|
*/
|
||||||
#if USE_SPLIT_PTLOCKS
|
#if USE_SPLIT_PTE_PTLOCKS
|
||||||
spinlock_t ptl;
|
spinlock_t ptl;
|
||||||
#endif
|
#endif
|
||||||
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
|
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
|
||||||
|
@ -309,14 +309,14 @@ enum {
|
||||||
NR_MM_COUNTERS
|
NR_MM_COUNTERS
|
||||||
};
|
};
|
||||||
|
|
||||||
#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU)
|
#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
|
||||||
#define SPLIT_RSS_COUNTING
|
#define SPLIT_RSS_COUNTING
|
||||||
/* per-thread cached information, */
|
/* per-thread cached information, */
|
||||||
struct task_rss_stat {
|
struct task_rss_stat {
|
||||||
int events; /* for synchronization threshold */
|
int events; /* for synchronization threshold */
|
||||||
int count[NR_MM_COUNTERS];
|
int count[NR_MM_COUNTERS];
|
||||||
};
|
};
|
||||||
#endif /* USE_SPLIT_PTLOCKS */
|
#endif /* USE_SPLIT_PTE_PTLOCKS */
|
||||||
|
|
||||||
struct mm_rss_stat {
|
struct mm_rss_stat {
|
||||||
atomic_long_t count[NR_MM_COUNTERS];
|
atomic_long_t count[NR_MM_COUNTERS];
|
||||||
|
|
Loading…
Reference in a new issue