mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc fixes from Benjamin Herrenschmidt: "Here are a few fixes for 3.6 that were piling up while I was away or busy (I was mostly MIA a week or two before San Diego). Some fixes from Anton fixing up issues with our relatively new DSCR control feature, and a few other fixes that are either regressions or bugs nasty enough to warrant not waiting." * 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: powerpc: Don't use __put_user() in patch_instruction powerpc: Make sure IPI handlers see data written by IPI senders powerpc: Restore correct DSCR in context switch powerpc: Fix DSCR inheritance in copy_thread() powerpc: Keep thread.dscr and thread.dscr_inherit in sync powerpc: Update DSCR on all CPUs when writing sysfs dscr_default powerpc/powernv: Always go into nap mode when CPU is offline powerpc: Give hypervisor decrementer interrupts their own handler powerpc/vphn: Fix arch_update_cpu_topology() return value
This commit is contained in:
commit
cb4f9a2964
15 changed files with 68 additions and 34 deletions
|
@ -386,6 +386,7 @@ extern unsigned long cpuidle_disable;
|
||||||
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
|
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
|
||||||
|
|
||||||
extern int powersave_nap; /* set if nap mode can be used in idle loop */
|
extern int powersave_nap; /* set if nap mode can be used in idle loop */
|
||||||
|
extern void power7_nap(void);
|
||||||
|
|
||||||
#ifdef CONFIG_PSERIES_IDLE
|
#ifdef CONFIG_PSERIES_IDLE
|
||||||
extern void update_smt_snooze_delay(int snooze);
|
extern void update_smt_snooze_delay(int snooze);
|
||||||
|
|
|
@ -76,6 +76,7 @@ int main(void)
|
||||||
DEFINE(SIGSEGV, SIGSEGV);
|
DEFINE(SIGSEGV, SIGSEGV);
|
||||||
DEFINE(NMI_MASK, NMI_MASK);
|
DEFINE(NMI_MASK, NMI_MASK);
|
||||||
DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
|
DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
|
||||||
|
DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
|
||||||
#else
|
#else
|
||||||
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
|
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
|
||||||
#endif /* CONFIG_PPC64 */
|
#endif /* CONFIG_PPC64 */
|
||||||
|
|
|
@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void)
|
||||||
|
|
||||||
void doorbell_cause_ipi(int cpu, unsigned long data)
|
void doorbell_cause_ipi(int cpu, unsigned long data)
|
||||||
{
|
{
|
||||||
|
/* Order previous accesses vs. msgsnd, which is treated as a store */
|
||||||
|
mb();
|
||||||
ppc_msgsnd(PPC_DBELL, 0, data);
|
ppc_msgsnd(PPC_DBELL, 0, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -370,6 +370,12 @@ _GLOBAL(ret_from_fork)
|
||||||
li r3,0
|
li r3,0
|
||||||
b syscall_exit
|
b syscall_exit
|
||||||
|
|
||||||
|
.section ".toc","aw"
|
||||||
|
DSCR_DEFAULT:
|
||||||
|
.tc dscr_default[TC],dscr_default
|
||||||
|
|
||||||
|
.section ".text"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This routine switches between two different tasks. The process
|
* This routine switches between two different tasks. The process
|
||||||
* state of one is saved on its kernel stack. Then the state
|
* state of one is saved on its kernel stack. Then the state
|
||||||
|
@ -509,9 +515,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
||||||
mr r1,r8 /* start using new stack pointer */
|
mr r1,r8 /* start using new stack pointer */
|
||||||
std r7,PACAKSAVE(r13)
|
std r7,PACAKSAVE(r13)
|
||||||
|
|
||||||
ld r6,_CCR(r1)
|
|
||||||
mtcrf 0xFF,r6
|
|
||||||
|
|
||||||
#ifdef CONFIG_ALTIVEC
|
#ifdef CONFIG_ALTIVEC
|
||||||
BEGIN_FTR_SECTION
|
BEGIN_FTR_SECTION
|
||||||
ld r0,THREAD_VRSAVE(r4)
|
ld r0,THREAD_VRSAVE(r4)
|
||||||
|
@ -520,14 +523,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||||
#endif /* CONFIG_ALTIVEC */
|
#endif /* CONFIG_ALTIVEC */
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
BEGIN_FTR_SECTION
|
BEGIN_FTR_SECTION
|
||||||
|
lwz r6,THREAD_DSCR_INHERIT(r4)
|
||||||
|
ld r7,DSCR_DEFAULT@toc(2)
|
||||||
ld r0,THREAD_DSCR(r4)
|
ld r0,THREAD_DSCR(r4)
|
||||||
cmpd r0,r25
|
cmpwi r6,0
|
||||||
beq 1f
|
bne 1f
|
||||||
|
ld r0,0(r7)
|
||||||
|
1: cmpd r0,r25
|
||||||
|
beq 2f
|
||||||
mtspr SPRN_DSCR,r0
|
mtspr SPRN_DSCR,r0
|
||||||
1:
|
2:
|
||||||
END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
|
END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
ld r6,_CCR(r1)
|
||||||
|
mtcrf 0xFF,r6
|
||||||
|
|
||||||
/* r3-r13 are destroyed -- Cort */
|
/* r3-r13 are destroyed -- Cort */
|
||||||
REST_8GPRS(14, r1)
|
REST_8GPRS(14, r1)
|
||||||
REST_10GPRS(22, r1)
|
REST_10GPRS(22, r1)
|
||||||
|
|
|
@ -186,7 +186,7 @@ hardware_interrupt_hv:
|
||||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
|
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
|
||||||
|
|
||||||
MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
|
MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
|
||||||
MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
|
STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
|
||||||
|
|
||||||
STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
|
STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
|
||||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
|
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
|
||||||
|
@ -486,6 +486,7 @@ machine_check_common:
|
||||||
|
|
||||||
STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
|
STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
|
||||||
STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
|
STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
|
||||||
|
STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
|
||||||
STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
|
STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
|
||||||
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
|
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
|
||||||
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
|
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
|
||||||
|
|
|
@ -28,7 +28,9 @@ _GLOBAL(power7_idle)
|
||||||
lwz r4,ADDROFF(powersave_nap)(r3)
|
lwz r4,ADDROFF(powersave_nap)(r3)
|
||||||
cmpwi 0,r4,0
|
cmpwi 0,r4,0
|
||||||
beqlr
|
beqlr
|
||||||
|
/* fall through */
|
||||||
|
|
||||||
|
_GLOBAL(power7_nap)
|
||||||
/* NAP is a state loss, we create a regs frame on the
|
/* NAP is a state loss, we create a regs frame on the
|
||||||
* stack, fill it up with the state we care about and
|
* stack, fill it up with the state we care about and
|
||||||
* stick a pointer to it in PACAR1. We really only
|
* stick a pointer to it in PACAR1. We really only
|
||||||
|
|
|
@ -802,16 +802,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||||
#endif /* CONFIG_PPC_STD_MMU_64 */
|
#endif /* CONFIG_PPC_STD_MMU_64 */
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
if (cpu_has_feature(CPU_FTR_DSCR)) {
|
if (cpu_has_feature(CPU_FTR_DSCR)) {
|
||||||
if (current->thread.dscr_inherit) {
|
p->thread.dscr_inherit = current->thread.dscr_inherit;
|
||||||
p->thread.dscr_inherit = 1;
|
p->thread.dscr = current->thread.dscr;
|
||||||
p->thread.dscr = current->thread.dscr;
|
|
||||||
} else if (0 != dscr_default) {
|
|
||||||
p->thread.dscr_inherit = 1;
|
|
||||||
p->thread.dscr = dscr_default;
|
|
||||||
} else {
|
|
||||||
p->thread.dscr_inherit = 0;
|
|
||||||
p->thread.dscr = 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -198,8 +198,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
|
||||||
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
|
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
|
||||||
char *message = (char *)&info->messages;
|
char *message = (char *)&info->messages;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Order previous accesses before accesses in the IPI handler.
|
||||||
|
*/
|
||||||
|
smp_mb();
|
||||||
message[msg] = 1;
|
message[msg] = 1;
|
||||||
mb();
|
/*
|
||||||
|
* cause_ipi functions are required to include a full barrier
|
||||||
|
* before doing whatever causes the IPI.
|
||||||
|
*/
|
||||||
smp_ops->cause_ipi(cpu, info->data);
|
smp_ops->cause_ipi(cpu, info->data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -211,7 +218,7 @@ irqreturn_t smp_ipi_demux(void)
|
||||||
mb(); /* order any irq clear */
|
mb(); /* order any irq clear */
|
||||||
|
|
||||||
do {
|
do {
|
||||||
all = xchg_local(&info->messages, 0);
|
all = xchg(&info->messages, 0);
|
||||||
|
|
||||||
#ifdef __BIG_ENDIAN
|
#ifdef __BIG_ENDIAN
|
||||||
if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
|
if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
|
||||||
|
|
|
@ -194,6 +194,14 @@ static ssize_t show_dscr_default(struct device *dev,
|
||||||
return sprintf(buf, "%lx\n", dscr_default);
|
return sprintf(buf, "%lx\n", dscr_default);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void update_dscr(void *dummy)
|
||||||
|
{
|
||||||
|
if (!current->thread.dscr_inherit) {
|
||||||
|
current->thread.dscr = dscr_default;
|
||||||
|
mtspr(SPRN_DSCR, dscr_default);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t __used store_dscr_default(struct device *dev,
|
static ssize_t __used store_dscr_default(struct device *dev,
|
||||||
struct device_attribute *attr, const char *buf,
|
struct device_attribute *attr, const char *buf,
|
||||||
size_t count)
|
size_t count)
|
||||||
|
@ -206,6 +214,8 @@ static ssize_t __used store_dscr_default(struct device *dev,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
dscr_default = val;
|
dscr_default = val;
|
||||||
|
|
||||||
|
on_each_cpu(update_dscr, NULL, 1);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -535,6 +535,15 @@ void timer_interrupt(struct pt_regs * regs)
|
||||||
trace_timer_interrupt_exit(regs);
|
trace_timer_interrupt_exit(regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hypervisor decrementer interrupts shouldn't occur but are sometimes
|
||||||
|
* left pending on exit from a KVM guest. We don't need to do anything
|
||||||
|
* to clear them, as they are edge-triggered.
|
||||||
|
*/
|
||||||
|
void hdec_interrupt(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SUSPEND
|
#ifdef CONFIG_SUSPEND
|
||||||
static void generic_suspend_disable_irqs(void)
|
static void generic_suspend_disable_irqs(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -972,8 +972,9 @@ static int emulate_instruction(struct pt_regs *regs)
|
||||||
cpu_has_feature(CPU_FTR_DSCR)) {
|
cpu_has_feature(CPU_FTR_DSCR)) {
|
||||||
PPC_WARN_EMULATED(mtdscr, regs);
|
PPC_WARN_EMULATED(mtdscr, regs);
|
||||||
rd = (instword >> 21) & 0x1f;
|
rd = (instword >> 21) & 0x1f;
|
||||||
mtspr(SPRN_DSCR, regs->gpr[rd]);
|
current->thread.dscr = regs->gpr[rd];
|
||||||
current->thread.dscr_inherit = 1;
|
current->thread.dscr_inherit = 1;
|
||||||
|
mtspr(SPRN_DSCR, current->thread.dscr);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -20,7 +20,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = __put_user(instr, addr);
|
__put_user_size(instr, addr, 4, err);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr));
|
asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr));
|
||||||
|
|
|
@ -1436,11 +1436,11 @@ static long vphn_get_associativity(unsigned long cpu,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Update the node maps and sysfs entries for each cpu whose home node
|
* Update the node maps and sysfs entries for each cpu whose home node
|
||||||
* has changed.
|
* has changed. Returns 1 when the topology has changed, and 0 otherwise.
|
||||||
*/
|
*/
|
||||||
int arch_update_cpu_topology(void)
|
int arch_update_cpu_topology(void)
|
||||||
{
|
{
|
||||||
int cpu, nid, old_nid;
|
int cpu, nid, old_nid, changed = 0;
|
||||||
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
|
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
|
||||||
|
@ -1466,9 +1466,10 @@ int arch_update_cpu_topology(void)
|
||||||
dev = get_cpu_device(cpu);
|
dev = get_cpu_device(cpu);
|
||||||
if (dev)
|
if (dev)
|
||||||
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
|
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
|
||||||
|
changed = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 1;
|
return changed;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void topology_work_fn(struct work_struct *work)
|
static void topology_work_fn(struct work_struct *work)
|
||||||
|
|
|
@ -106,14 +106,6 @@ static void pnv_smp_cpu_kill_self(void)
|
||||||
{
|
{
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
|
|
||||||
/* If powersave_nap is enabled, use NAP mode, else just
|
|
||||||
* spin aimlessly
|
|
||||||
*/
|
|
||||||
if (!powersave_nap) {
|
|
||||||
generic_mach_cpu_die();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Standard hot unplug procedure */
|
/* Standard hot unplug procedure */
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
idle_task_exit();
|
idle_task_exit();
|
||||||
|
@ -128,7 +120,7 @@ static void pnv_smp_cpu_kill_self(void)
|
||||||
*/
|
*/
|
||||||
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
|
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
|
||||||
while (!generic_check_cpu_restart(cpu)) {
|
while (!generic_check_cpu_restart(cpu)) {
|
||||||
power7_idle();
|
power7_nap();
|
||||||
if (!generic_check_cpu_restart(cpu)) {
|
if (!generic_check_cpu_restart(cpu)) {
|
||||||
DBG("CPU%d Unexpected exit while offline !\n", cpu);
|
DBG("CPU%d Unexpected exit while offline !\n", cpu);
|
||||||
/* We may be getting an IPI, so we re-enable
|
/* We may be getting an IPI, so we re-enable
|
||||||
|
|
|
@ -65,7 +65,11 @@ static inline void icp_hv_set_xirr(unsigned int value)
|
||||||
static inline void icp_hv_set_qirr(int n_cpu , u8 value)
|
static inline void icp_hv_set_qirr(int n_cpu , u8 value)
|
||||||
{
|
{
|
||||||
int hw_cpu = get_hard_smp_processor_id(n_cpu);
|
int hw_cpu = get_hard_smp_processor_id(n_cpu);
|
||||||
long rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
|
long rc;
|
||||||
|
|
||||||
|
/* Make sure all previous accesses are ordered before IPI sending */
|
||||||
|
mb();
|
||||||
|
rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
|
||||||
if (rc != H_SUCCESS) {
|
if (rc != H_SUCCESS) {
|
||||||
pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
|
pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
|
||||||
"returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
|
"returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
|
||||||
|
|
Loading…
Reference in a new issue