mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 17:08:10 +00:00
509eb76ebf
__my_cpu_offset is non-volatile, since we want its value to be cached when we access several per-cpu variables in a row with preemption disabled. This means that we rely on preempt_{en,dis}able to hazard with the operation via the barrier() macro, so that we can't end up migrating CPUs without reloading the per-cpu offset. Unfortunately, GCC doesn't treat a "memory" clobber on a non-volatile asm block as a side-effect, and will happily re-order it before other memory clobbers (including those in prempt_disable()) and cache the value. This has been observed to break the cmpxchg logic in the slub allocator, leading to livelock in kmem_cache_alloc in mainline kernels. This patch adds a dummy memory input operand to __my_cpu_offset, forcing it to be ordered with respect to the barrier() macro. Cc: <stable@vger.kernel.org> Cc: Rob Herring <rob.herring@calxeda.com> Reviewed-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
52 lines
1.5 KiB
C
52 lines
1.5 KiB
C
/*
|
|
* Copyright 2012 Calxeda, Inc.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms and conditions of the GNU General Public License,
|
|
* version 2, as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
* more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License along with
|
|
* this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
#ifndef _ASM_ARM_PERCPU_H_
|
|
#define _ASM_ARM_PERCPU_H_
|
|
|
|
/*
|
|
* Same as asm-generic/percpu.h, except that we store the per cpu offset
|
|
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
|
|
*/
|
|
#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6)
|
|
static inline void set_my_cpu_offset(unsigned long off)
|
|
{
|
|
/* Set TPIDRPRW */
|
|
asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
|
|
}
|
|
|
|
static inline unsigned long __my_cpu_offset(void)
|
|
{
|
|
unsigned long off;
|
|
register unsigned long *sp asm ("sp");
|
|
|
|
/*
|
|
* Read TPIDRPRW.
|
|
* We want to allow caching the value, so avoid using volatile and
|
|
* instead use a fake stack read to hazard against barrier().
|
|
*/
|
|
asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp));
|
|
|
|
return off;
|
|
}
|
|
#define __my_cpu_offset __my_cpu_offset()
|
|
#else
|
|
#define set_my_cpu_offset(x) do {} while(0)
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#include <asm-generic/percpu.h>
|
|
|
|
#endif /* _ASM_ARM_PERCPU_H_ */
|