linux-stable/arch/arm/lib/uaccess_with_memcpy.c
Russell King 8478132a87 Revert "arm: move exports to definitions"
This reverts commit 4dd1837d75.

Moving the exports for assembly code into the assembly files breaks
KSYM trimming, but also breaks modversions.

While fixing the KSYM trimming is trivial, fixing modversions brings
us to a technically worse position that we had prior to the above
change:

- We end up with the prototype definitions divorsed from everything
  else, which means that adding or removing assembly level ksyms
  become more fragile:
  * if adding a new assembly ksyms export, a missed prototype in
    asm-prototypes.h results in a successful build if no module in
    the selected configuration makes use of the symbol.
  * when removing a ksyms export, asm-prototypes.h will get forgotten,
    with armksyms.c, you'll get a build error if you forget to touch
    the file.

- We end up with the same amount of include files and prototypes,
  they're just in a header file instead of a .c file with their
  exports.

As for lines of code, we don't get much of a size reduction:
 (original commit)
 47 files changed, 131 insertions(+), 208 deletions(-)
 (fix for ksyms trimming)
 7 files changed, 18 insertions(+), 5 deletions(-)
 (two fixes for modversions)
 1 file changed, 34 insertions(+)
 3 files changed, 7 insertions(+), 2 deletions(-)
which results in a net total of only 25 lines deleted.

As there does not seem to be much benefit from this change of approach,
revert the change.

Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
2016-11-23 10:00:03 +00:00

286 lines
6.4 KiB
C

/*
* linux/arch/arm/lib/uaccess_with_memcpy.c
*
* Written by: Lennert Buytenhek and Nicolas Pitre
* Copyright (C) 2009 Marvell Semiconductor
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/uaccess.h>
#include <linux/rwsem.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/hardirq.h> /* for in_atomic() */
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <asm/current.h>
#include <asm/page.h>
static int
pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
{
unsigned long addr = (unsigned long)_addr;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
pud_t *pud;
spinlock_t *ptl;
pgd = pgd_offset(current->mm, addr);
if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
return 0;
pud = pud_offset(pgd, addr);
if (unlikely(pud_none(*pud) || pud_bad(*pud)))
return 0;
pmd = pmd_offset(pud, addr);
if (unlikely(pmd_none(*pmd)))
return 0;
/*
* A pmd can be bad if it refers to a HugeTLB or THP page.
*
* Both THP and HugeTLB pages have the same pmd layout
* and should not be manipulated by the pte functions.
*
* Lock the page table for the destination and check
* to see that it's still huge and whether or not we will
* need to fault on write.
*/
if (unlikely(pmd_thp_or_huge(*pmd))) {
ptl = &current->mm->page_table_lock;
spin_lock(ptl);
if (unlikely(!pmd_thp_or_huge(*pmd)
|| pmd_hugewillfault(*pmd))) {
spin_unlock(ptl);
return 0;
}
*ptep = NULL;
*ptlp = ptl;
return 1;
}
if (unlikely(pmd_bad(*pmd)))
return 0;
pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
!pte_write(*pte) || !pte_dirty(*pte))) {
pte_unmap_unlock(pte, ptl);
return 0;
}
*ptep = pte;
*ptlp = ptl;
return 1;
}
static unsigned long noinline
__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
{
unsigned long ua_flags;
int atomic;
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
memcpy((void *)to, from, n);
return 0;
}
/* the mmap semaphore is taken only if not in an atomic context */
atomic = faulthandler_disabled();
if (!atomic)
down_read(&current->mm->mmap_sem);
while (n) {
pte_t *pte;
spinlock_t *ptl;
int tocopy;
while (!pin_page_for_write(to, &pte, &ptl)) {
if (!atomic)
up_read(&current->mm->mmap_sem);
if (__put_user(0, (char __user *)to))
goto out;
if (!atomic)
down_read(&current->mm->mmap_sem);
}
tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
if (tocopy > n)
tocopy = n;
ua_flags = uaccess_save_and_enable();
memcpy((void *)to, from, tocopy);
uaccess_restore(ua_flags);
to += tocopy;
from += tocopy;
n -= tocopy;
if (pte)
pte_unmap_unlock(pte, ptl);
else
spin_unlock(ptl);
}
if (!atomic)
up_read(&current->mm->mmap_sem);
out:
return n;
}
unsigned long
arm_copy_to_user(void __user *to, const void *from, unsigned long n)
{
/*
* This test is stubbed out of the main function above to keep
* the overhead for small copies low by avoiding a large
* register dump on the stack just to reload them right away.
* With frame pointer disabled, tail call optimization kicks in
* as well making this test almost invisible.
*/
if (n < 64) {
unsigned long ua_flags = uaccess_save_and_enable();
n = __copy_to_user_std(to, from, n);
uaccess_restore(ua_flags);
} else {
n = __copy_to_user_memcpy(to, from, n);
}
return n;
}
static unsigned long noinline
__clear_user_memset(void __user *addr, unsigned long n)
{
unsigned long ua_flags;
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
memset((void *)addr, 0, n);
return 0;
}
down_read(&current->mm->mmap_sem);
while (n) {
pte_t *pte;
spinlock_t *ptl;
int tocopy;
while (!pin_page_for_write(addr, &pte, &ptl)) {
up_read(&current->mm->mmap_sem);
if (__put_user(0, (char __user *)addr))
goto out;
down_read(&current->mm->mmap_sem);
}
tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
if (tocopy > n)
tocopy = n;
ua_flags = uaccess_save_and_enable();
memset((void *)addr, 0, tocopy);
uaccess_restore(ua_flags);
addr += tocopy;
n -= tocopy;
if (pte)
pte_unmap_unlock(pte, ptl);
else
spin_unlock(ptl);
}
up_read(&current->mm->mmap_sem);
out:
return n;
}
unsigned long arm_clear_user(void __user *addr, unsigned long n)
{
/* See rational for this in __copy_to_user() above. */
if (n < 64) {
unsigned long ua_flags = uaccess_save_and_enable();
n = __clear_user_std(addr, n);
uaccess_restore(ua_flags);
} else {
n = __clear_user_memset(addr, n);
}
return n;
}
#if 0
/*
* This code is disabled by default, but kept around in case the chosen
* thresholds need to be revalidated. Some overhead (small but still)
* would be implied by a runtime determined variable threshold, and
* so far the measurement on concerned targets didn't show a worthwhile
* variation.
*
* Note that a fairly precise sched_clock() implementation is needed
* for results to make some sense.
*/
#include <linux/vmalloc.h>
static int __init test_size_treshold(void)
{
struct page *src_page, *dst_page;
void *user_ptr, *kernel_ptr;
unsigned long long t0, t1, t2;
int size, ret;
ret = -ENOMEM;
src_page = alloc_page(GFP_KERNEL);
if (!src_page)
goto no_src;
dst_page = alloc_page(GFP_KERNEL);
if (!dst_page)
goto no_dst;
kernel_ptr = page_address(src_page);
user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
if (!user_ptr)
goto no_vmap;
/* warm up the src page dcache */
ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
for (size = PAGE_SIZE; size >= 4; size /= 2) {
t0 = sched_clock();
ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
t1 = sched_clock();
ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
t2 = sched_clock();
printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
}
for (size = PAGE_SIZE; size >= 4; size /= 2) {
t0 = sched_clock();
ret |= __clear_user_memset(user_ptr, size);
t1 = sched_clock();
ret |= __clear_user_std(user_ptr, size);
t2 = sched_clock();
printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
}
if (ret)
ret = -EFAULT;
vunmap(user_ptr);
no_vmap:
put_page(dst_page);
no_dst:
put_page(src_page);
no_src:
return ret;
}
subsys_initcall(test_size_treshold);
#endif