2019-05-27 06:55:05 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2009-06-01 18:13:57 +00:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright (C) 2007 Alan Stern
|
|
|
|
* Copyright (C) 2009 IBM Corporation
|
2009-09-09 17:22:48 +00:00
|
|
|
* Copyright (C) 2009 Frederic Weisbecker <fweisbec@gmail.com>
|
2009-11-23 15:47:13 +00:00
|
|
|
*
|
|
|
|
* Authors: Alan Stern <stern@rowland.harvard.edu>
|
|
|
|
* K.Prasad <prasad@linux.vnet.ibm.com>
|
|
|
|
* Frederic Weisbecker <fweisbec@gmail.com>
|
2009-06-01 18:13:57 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
|
|
|
|
* using the CPU's debug registers.
|
|
|
|
*/
|
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
#include <linux/perf_event.h>
|
|
|
|
#include <linux/hw_breakpoint.h>
|
2009-06-01 18:13:57 +00:00
|
|
|
#include <linux/irqflags.h>
|
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/kallsyms.h>
|
2015-07-31 03:32:40 +00:00
|
|
|
#include <linux/kprobes.h>
|
2009-06-01 18:13:57 +00:00
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/kdebug.h>
|
|
|
|
#include <linux/kernel.h>
|
2016-07-14 00:18:56 +00:00
|
|
|
#include <linux/export.h>
|
2009-06-01 18:13:57 +00:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
|
|
|
|
#include <asm/hw_breakpoint.h>
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/debugreg.h>
|
2016-07-14 00:18:56 +00:00
|
|
|
#include <asm/user.h>
|
2020-05-29 21:27:30 +00:00
|
|
|
#include <asm/desc.h>
|
2020-05-29 21:27:32 +00:00
|
|
|
#include <asm/tlbflush.h>
|
2009-06-01 18:13:57 +00:00
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
/* Per cpu debug control register value */
|
2009-11-25 13:24:44 +00:00
|
|
|
DEFINE_PER_CPU(unsigned long, cpu_dr7);
|
|
|
|
EXPORT_PER_CPU_SYMBOL(cpu_dr7);
|
2009-09-09 17:22:48 +00:00
|
|
|
|
|
|
|
/* Per cpu debug address registers values */
|
|
|
|
static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]);
|
2009-06-01 18:13:57 +00:00
|
|
|
|
|
|
|
/*
|
2009-09-09 17:22:48 +00:00
|
|
|
* Stores the breakpoints currently in use on each breakpoint address
|
|
|
|
* register for each cpus
|
2009-06-01 18:13:57 +00:00
|
|
|
*/
|
2009-09-09 17:22:48 +00:00
|
|
|
static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
|
2009-06-01 18:13:57 +00:00
|
|
|
|
|
|
|
|
2009-11-26 05:04:38 +00:00
|
|
|
static inline unsigned long
|
|
|
|
__encode_dr7(int drnum, unsigned int len, unsigned int type)
|
2009-06-01 18:13:57 +00:00
|
|
|
{
|
|
|
|
unsigned long bp_info;
|
|
|
|
|
|
|
|
bp_info = (len | type) & 0xf;
|
|
|
|
bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE);
|
2009-11-26 05:04:38 +00:00
|
|
|
bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE));
|
|
|
|
|
2009-06-01 18:13:57 +00:00
|
|
|
return bp_info;
|
|
|
|
}
|
|
|
|
|
2009-11-26 05:04:38 +00:00
|
|
|
/*
|
|
|
|
* Encode the length, type, Exact, and Enable bits for a particular breakpoint
|
|
|
|
* as stored in debug register 7.
|
|
|
|
*/
|
|
|
|
unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type)
|
|
|
|
{
|
|
|
|
return __encode_dr7(drnum, len, type) | DR_GLOBAL_SLOWDOWN;
|
|
|
|
}
|
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
/*
|
|
|
|
* Decode the length and type bits for a particular breakpoint as
|
|
|
|
* stored in debug register 7. Return the "enabled" status.
|
|
|
|
*/
|
|
|
|
int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type)
|
2009-06-01 18:13:57 +00:00
|
|
|
{
|
2009-09-09 17:22:48 +00:00
|
|
|
int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE);
|
2009-06-01 18:13:57 +00:00
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
*len = (bp_info & 0xc) | 0x40;
|
|
|
|
*type = (bp_info & 0x3) | 0x80;
|
2009-06-01 18:13:57 +00:00
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3;
|
2009-06-01 18:13:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2009-09-09 17:22:48 +00:00
|
|
|
* Install a perf counter breakpoint.
|
|
|
|
*
|
|
|
|
* We seek a free debug address register and use it for this
|
|
|
|
* breakpoint. Eventually we enable it in the debug control register.
|
|
|
|
*
|
|
|
|
* Atomic: we hold the counter->ctx->lock and we only handle variables
|
|
|
|
* and registers local to this cpu.
|
2009-06-01 18:13:57 +00:00
|
|
|
*/
|
2009-09-09 17:22:48 +00:00
|
|
|
int arch_install_hw_breakpoint(struct perf_event *bp)
|
2009-06-01 18:13:57 +00:00
|
|
|
{
|
2009-09-09 17:22:48 +00:00
|
|
|
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
|
|
|
unsigned long *dr7;
|
|
|
|
int i;
|
|
|
|
|
2020-05-29 21:27:36 +00:00
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
for (i = 0; i < HBP_NUM; i++) {
|
x86: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:40 +00:00
|
|
|
struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
|
2009-09-09 17:22:48 +00:00
|
|
|
|
|
|
|
if (!*slot) {
|
|
|
|
*slot = bp;
|
|
|
|
break;
|
|
|
|
}
|
2009-06-01 18:13:57 +00:00
|
|
|
}
|
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
set_debugreg(info->address, i);
|
2010-12-18 15:28:55 +00:00
|
|
|
__this_cpu_write(cpu_debugreg[i], info->address);
|
2009-09-09 17:22:48 +00:00
|
|
|
|
x86: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:40 +00:00
|
|
|
dr7 = this_cpu_ptr(&cpu_dr7);
|
2009-09-09 17:22:48 +00:00
|
|
|
*dr7 |= encode_dr7(i, info->len, info->type);
|
|
|
|
|
2020-05-29 21:27:36 +00:00
|
|
|
/*
|
|
|
|
* Ensure we first write cpu_dr7 before we set the DR7 register.
|
|
|
|
* This ensures an NMI never see cpu_dr7 0 when DR7 is not.
|
|
|
|
*/
|
|
|
|
barrier();
|
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
set_debugreg(*dr7, 7);
|
2014-05-29 15:26:50 +00:00
|
|
|
if (info->mask)
|
|
|
|
set_dr_addr_mask(info->mask, i);
|
2009-09-09 17:22:48 +00:00
|
|
|
|
|
|
|
return 0;
|
2009-06-01 18:13:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2009-09-09 17:22:48 +00:00
|
|
|
* Uninstall the breakpoint contained in the given counter.
|
|
|
|
*
|
|
|
|
* First we search the debug address register it uses and then we disable
|
|
|
|
* it.
|
|
|
|
*
|
|
|
|
* Atomic: we hold the counter->ctx->lock and we only handle variables
|
|
|
|
* and registers local to this cpu.
|
2009-06-01 18:13:57 +00:00
|
|
|
*/
|
2009-09-09 17:22:48 +00:00
|
|
|
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
|
2009-06-01 18:13:57 +00:00
|
|
|
{
|
2009-09-09 17:22:48 +00:00
|
|
|
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
2020-05-29 21:27:36 +00:00
|
|
|
unsigned long dr7;
|
2009-09-09 17:22:48 +00:00
|
|
|
int i;
|
|
|
|
|
2020-05-29 21:27:36 +00:00
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
for (i = 0; i < HBP_NUM; i++) {
|
x86: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-17 17:30:40 +00:00
|
|
|
struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
|
2009-09-09 17:22:48 +00:00
|
|
|
|
|
|
|
if (*slot == bp) {
|
|
|
|
*slot = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
|
|
|
|
return;
|
2009-06-01 18:13:57 +00:00
|
|
|
|
2020-05-29 21:27:36 +00:00
|
|
|
dr7 = this_cpu_read(cpu_dr7);
|
|
|
|
dr7 &= ~__encode_dr7(i, info->len, info->type);
|
2009-09-09 17:22:48 +00:00
|
|
|
|
2020-05-29 21:27:36 +00:00
|
|
|
set_debugreg(dr7, 7);
|
2014-05-29 15:26:50 +00:00
|
|
|
if (info->mask)
|
|
|
|
set_dr_addr_mask(0, i);
|
2020-05-29 21:27:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure the write to cpu_dr7 is after we've set the DR7 register.
|
|
|
|
* This ensures an NMI never see cpu_dr7 0 when DR7 is not.
|
|
|
|
*/
|
|
|
|
barrier();
|
|
|
|
|
|
|
|
this_cpu_write(cpu_dr7, dr7);
|
2009-06-01 18:13:57 +00:00
|
|
|
}
|
|
|
|
|
2018-06-26 02:58:49 +00:00
|
|
|
static int arch_bp_generic_len(int x86_len)
|
2009-06-01 18:13:57 +00:00
|
|
|
{
|
2018-06-26 02:58:49 +00:00
|
|
|
switch (x86_len) {
|
|
|
|
case X86_BREAKPOINT_LEN_1:
|
|
|
|
return HW_BREAKPOINT_LEN_1;
|
|
|
|
case X86_BREAKPOINT_LEN_2:
|
|
|
|
return HW_BREAKPOINT_LEN_2;
|
|
|
|
case X86_BREAKPOINT_LEN_4:
|
|
|
|
return HW_BREAKPOINT_LEN_4;
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
case X86_BREAKPOINT_LEN_8:
|
|
|
|
return HW_BREAKPOINT_LEN_8;
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2009-06-01 18:13:57 +00:00
|
|
|
}
|
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
int arch_bp_generic_fields(int x86_len, int x86_type,
|
|
|
|
int *gen_len, int *gen_type)
|
2009-06-01 18:13:57 +00:00
|
|
|
{
|
2018-06-26 02:58:49 +00:00
|
|
|
int len;
|
|
|
|
|
2010-09-17 01:24:13 +00:00
|
|
|
/* Type */
|
|
|
|
switch (x86_type) {
|
|
|
|
case X86_BREAKPOINT_EXECUTE:
|
|
|
|
if (x86_len != X86_BREAKPOINT_LEN_X)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
*gen_type = HW_BREAKPOINT_X;
|
2010-06-24 08:00:24 +00:00
|
|
|
*gen_len = sizeof(long);
|
2010-09-17 01:24:13 +00:00
|
|
|
return 0;
|
|
|
|
case X86_BREAKPOINT_WRITE:
|
|
|
|
*gen_type = HW_BREAKPOINT_W;
|
2010-06-24 08:00:24 +00:00
|
|
|
break;
|
2010-09-17 01:24:13 +00:00
|
|
|
case X86_BREAKPOINT_RW:
|
|
|
|
*gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Len */
|
2018-06-26 02:58:49 +00:00
|
|
|
len = arch_bp_generic_len(x86_len);
|
|
|
|
if (len < 0)
|
2009-09-09 17:22:48 +00:00
|
|
|
return -EINVAL;
|
2018-06-26 02:58:49 +00:00
|
|
|
*gen_len = len;
|
2009-06-01 18:13:57 +00:00
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-06-26 02:58:49 +00:00
|
|
|
/*
|
|
|
|
* Check for virtual address in kernel space.
|
|
|
|
*/
|
|
|
|
int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
|
|
|
|
{
|
|
|
|
unsigned long va;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
va = hw->address;
|
|
|
|
len = arch_bp_generic_len(hw->len);
|
|
|
|
WARN_ON_ONCE(len < 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't need to worry about va + len - 1 overflowing:
|
|
|
|
* we already require that va is aligned to a multiple of len.
|
|
|
|
*/
|
|
|
|
return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
|
|
|
|
}
|
2009-09-09 17:22:48 +00:00
|
|
|
|
2020-05-29 21:27:29 +00:00
|
|
|
/*
|
|
|
|
* Checks whether the range [addr, end], overlaps the area [base, base + size).
|
|
|
|
*/
|
|
|
|
static inline bool within_area(unsigned long addr, unsigned long end,
|
|
|
|
unsigned long base, unsigned long size)
|
|
|
|
{
|
|
|
|
return end >= base && addr < (base + size);
|
|
|
|
}
|
|
|
|
|
2020-02-24 12:24:58 +00:00
|
|
|
/*
|
2020-05-29 21:27:30 +00:00
|
|
|
* Checks whether the range from addr to end, inclusive, overlaps the fixed
|
|
|
|
* mapped CPU entry area range or other ranges used for CPU entry.
|
2020-02-24 12:24:58 +00:00
|
|
|
*/
|
2020-05-29 21:27:30 +00:00
|
|
|
static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
|
2020-02-24 12:24:58 +00:00
|
|
|
{
|
2020-05-29 21:27:30 +00:00
|
|
|
int cpu;
|
|
|
|
|
|
|
|
/* CPU entry erea is always used for CPU entry */
|
|
|
|
if (within_area(addr, end, CPU_ENTRY_AREA_BASE,
|
|
|
|
CPU_ENTRY_AREA_TOTAL_SIZE))
|
|
|
|
return true;
|
|
|
|
|
2021-02-04 15:27:06 +00:00
|
|
|
/*
|
|
|
|
* When FSGSBASE is enabled, paranoid_entry() fetches the per-CPU
|
|
|
|
* GSBASE value via __per_cpu_offset or pcpu_unit_offsets.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
if (within_area(addr, end, (unsigned long)__per_cpu_offset,
|
|
|
|
sizeof(unsigned long) * nr_cpu_ids))
|
|
|
|
return true;
|
|
|
|
#else
|
|
|
|
if (within_area(addr, end, (unsigned long)&pcpu_unit_offsets,
|
|
|
|
sizeof(pcpu_unit_offsets)))
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
|
2020-05-29 21:27:30 +00:00
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
/* The original rw GDT is being used after load_direct_gdt() */
|
|
|
|
if (within_area(addr, end, (unsigned long)get_cpu_gdt_rw(cpu),
|
|
|
|
GDT_SIZE))
|
|
|
|
return true;
|
2020-05-29 21:27:31 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* cpu_tss_rw is not directly referenced by hardware, but
|
|
|
|
* cpu_tss_rw is also used in CPU entry code,
|
|
|
|
*/
|
|
|
|
if (within_area(addr, end,
|
|
|
|
(unsigned long)&per_cpu(cpu_tss_rw, cpu),
|
|
|
|
sizeof(struct tss_struct)))
|
|
|
|
return true;
|
2020-05-29 21:27:32 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* cpu_tlbstate.user_pcid_flush_mask is used for CPU entry.
|
|
|
|
* If a data breakpoint on it, it will cause an unwanted #DB.
|
|
|
|
* Protect the full cpu_tlbstate structure to be sure.
|
|
|
|
*/
|
|
|
|
if (within_area(addr, end,
|
|
|
|
(unsigned long)&per_cpu(cpu_tlbstate, cpu),
|
|
|
|
sizeof(struct tlb_state)))
|
|
|
|
return true;
|
2021-02-04 15:27:07 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* When in guest (X86_FEATURE_HYPERVISOR), local_db_save()
|
|
|
|
* will read per-cpu cpu_dr7 before clear dr7 register.
|
|
|
|
*/
|
|
|
|
if (within_area(addr, end, (unsigned long)&per_cpu(cpu_dr7, cpu),
|
|
|
|
sizeof(cpu_dr7)))
|
|
|
|
return true;
|
2020-05-29 21:27:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2020-02-24 12:24:58 +00:00
|
|
|
}
|
|
|
|
|
2018-06-26 02:58:50 +00:00
|
|
|
static int arch_build_bp_info(struct perf_event *bp,
|
|
|
|
const struct perf_event_attr *attr,
|
|
|
|
struct arch_hw_breakpoint *hw)
|
2009-09-09 17:22:48 +00:00
|
|
|
{
|
2020-02-24 12:24:58 +00:00
|
|
|
unsigned long bp_end;
|
|
|
|
|
|
|
|
bp_end = attr->bp_addr + attr->bp_len - 1;
|
|
|
|
if (bp_end < attr->bp_addr)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
2020-05-29 21:27:30 +00:00
|
|
|
* Prevent any breakpoint of any type that overlaps the CPU
|
|
|
|
* entry area and data. This protects the IST stacks and also
|
2020-02-24 12:24:58 +00:00
|
|
|
* reduces the chance that we ever find out what happens if
|
|
|
|
* there's a data breakpoint on the GDT, IDT, or TSS.
|
|
|
|
*/
|
2020-05-29 21:27:30 +00:00
|
|
|
if (within_cpu_entry(attr->bp_addr, bp_end))
|
2020-02-24 12:24:58 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2018-06-26 02:58:50 +00:00
|
|
|
hw->address = attr->bp_addr;
|
|
|
|
hw->mask = 0;
|
2009-09-09 17:22:48 +00:00
|
|
|
|
2010-06-24 08:00:24 +00:00
|
|
|
/* Type */
|
2018-06-26 02:58:50 +00:00
|
|
|
switch (attr->bp_type) {
|
2010-06-24 08:00:24 +00:00
|
|
|
case HW_BREAKPOINT_W:
|
2018-06-26 02:58:50 +00:00
|
|
|
hw->type = X86_BREAKPOINT_WRITE;
|
2010-06-24 08:00:24 +00:00
|
|
|
break;
|
|
|
|
case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
|
2018-06-26 02:58:50 +00:00
|
|
|
hw->type = X86_BREAKPOINT_RW;
|
2010-06-24 08:00:24 +00:00
|
|
|
break;
|
|
|
|
case HW_BREAKPOINT_X:
|
2015-07-31 03:32:40 +00:00
|
|
|
/*
|
|
|
|
* We don't allow kernel breakpoints in places that are not
|
|
|
|
* acceptable for kprobes. On non-kprobes kernels, we don't
|
|
|
|
* allow kernel breakpoints at all.
|
|
|
|
*/
|
2018-06-26 02:58:50 +00:00
|
|
|
if (attr->bp_addr >= TASK_SIZE_MAX) {
|
|
|
|
if (within_kprobe_blacklist(attr->bp_addr))
|
2015-07-31 03:32:40 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-06-26 02:58:50 +00:00
|
|
|
hw->type = X86_BREAKPOINT_EXECUTE;
|
2010-06-24 08:00:24 +00:00
|
|
|
/*
|
|
|
|
* x86 inst breakpoints need to have a specific undefined len.
|
|
|
|
* But we still need to check userspace is not trying to setup
|
|
|
|
* an unsupported length, to get a range breakpoint for example.
|
|
|
|
*/
|
2018-06-26 02:58:50 +00:00
|
|
|
if (attr->bp_len == sizeof(long)) {
|
|
|
|
hw->len = X86_BREAKPOINT_LEN_X;
|
2010-06-24 08:00:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2010-06-24 08:00:24 +00:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
/* Len */
|
2018-06-26 02:58:50 +00:00
|
|
|
switch (attr->bp_len) {
|
2009-06-01 18:13:57 +00:00
|
|
|
case HW_BREAKPOINT_LEN_1:
|
2018-06-26 02:58:50 +00:00
|
|
|
hw->len = X86_BREAKPOINT_LEN_1;
|
2009-06-01 18:13:57 +00:00
|
|
|
break;
|
|
|
|
case HW_BREAKPOINT_LEN_2:
|
2018-06-26 02:58:50 +00:00
|
|
|
hw->len = X86_BREAKPOINT_LEN_2;
|
2009-06-01 18:13:57 +00:00
|
|
|
break;
|
|
|
|
case HW_BREAKPOINT_LEN_4:
|
2018-06-26 02:58:50 +00:00
|
|
|
hw->len = X86_BREAKPOINT_LEN_4;
|
2009-06-01 18:13:57 +00:00
|
|
|
break;
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
case HW_BREAKPOINT_LEN_8:
|
2018-06-26 02:58:50 +00:00
|
|
|
hw->len = X86_BREAKPOINT_LEN_8;
|
2009-09-09 17:22:48 +00:00
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
default:
|
2015-07-31 03:32:41 +00:00
|
|
|
/* AMD range breakpoint */
|
2018-06-26 02:58:50 +00:00
|
|
|
if (!is_power_of_2(attr->bp_len))
|
2014-05-29 15:26:50 +00:00
|
|
|
return -EINVAL;
|
2018-06-26 02:58:50 +00:00
|
|
|
if (attr->bp_addr & (attr->bp_len - 1))
|
2015-07-31 03:32:41 +00:00
|
|
|
return -EINVAL;
|
2015-12-07 09:39:41 +00:00
|
|
|
|
|
|
|
if (!boot_cpu_has(X86_FEATURE_BPEXT))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2015-07-31 03:32:41 +00:00
|
|
|
/*
|
|
|
|
* It's impossible to use a range breakpoint to fake out
|
|
|
|
* user vs kernel detection because bp_len - 1 can't
|
|
|
|
* have the high bit set. If we ever allow range instruction
|
|
|
|
* breakpoints, then we'll have to check for kprobe-blacklisted
|
|
|
|
* addresses anywhere in the range.
|
|
|
|
*/
|
2018-06-26 02:58:50 +00:00
|
|
|
hw->mask = attr->bp_len - 1;
|
|
|
|
hw->len = X86_BREAKPOINT_LEN_1;
|
2009-09-09 17:22:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2014-05-29 15:26:50 +00:00
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
/*
|
|
|
|
* Validate the arch-specific HW Breakpoint register settings
|
|
|
|
*/
|
2018-06-26 02:58:50 +00:00
|
|
|
int hw_breakpoint_arch_parse(struct perf_event *bp,
|
|
|
|
const struct perf_event_attr *attr,
|
|
|
|
struct arch_hw_breakpoint *hw)
|
2009-09-09 17:22:48 +00:00
|
|
|
{
|
|
|
|
unsigned int align;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
2018-06-26 02:58:50 +00:00
|
|
|
ret = arch_build_bp_info(bp, attr, hw);
|
2009-09-09 17:22:48 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2018-06-26 02:58:50 +00:00
|
|
|
switch (hw->len) {
|
2009-09-09 17:22:48 +00:00
|
|
|
case X86_BREAKPOINT_LEN_1:
|
|
|
|
align = 0;
|
2018-06-26 02:58:50 +00:00
|
|
|
if (hw->mask)
|
|
|
|
align = hw->mask;
|
2009-09-09 17:22:48 +00:00
|
|
|
break;
|
|
|
|
case X86_BREAKPOINT_LEN_2:
|
|
|
|
align = 1;
|
|
|
|
break;
|
|
|
|
case X86_BREAKPOINT_LEN_4:
|
|
|
|
align = 3;
|
|
|
|
break;
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
case X86_BREAKPOINT_LEN_8:
|
2009-06-01 18:13:57 +00:00
|
|
|
align = 7;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
default:
|
2014-05-29 15:26:50 +00:00
|
|
|
WARN_ON_ONCE(1);
|
2019-03-07 21:27:56 +00:00
|
|
|
return -EINVAL;
|
2009-06-01 18:13:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that the low-order bits of the address are appropriate
|
|
|
|
* for the alignment implied by len.
|
|
|
|
*/
|
2018-06-26 02:58:50 +00:00
|
|
|
if (hw->address & align)
|
2009-06-01 18:13:57 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
/*
|
|
|
|
* Release the user breakpoints used by ptrace
|
|
|
|
*/
|
|
|
|
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
|
2009-06-01 18:13:57 +00:00
|
|
|
{
|
2009-09-09 17:22:48 +00:00
|
|
|
int i;
|
|
|
|
struct thread_struct *t = &tsk->thread;
|
|
|
|
|
|
|
|
for (i = 0; i < HBP_NUM; i++) {
|
|
|
|
unregister_hw_breakpoint(t->ptrace_bps[i]);
|
|
|
|
t->ptrace_bps[i] = NULL;
|
|
|
|
}
|
2013-07-08 23:01:06 +00:00
|
|
|
|
2020-09-02 13:26:02 +00:00
|
|
|
t->virtual_dr6 = 0;
|
2013-07-08 23:01:06 +00:00
|
|
|
t->ptrace_dr7 = 0;
|
2009-06-01 18:13:57 +00:00
|
|
|
}
|
|
|
|
|
2009-09-09 17:22:48 +00:00
|
|
|
void hw_breakpoint_restore(void)
|
2009-06-01 18:13:57 +00:00
|
|
|
{
|
2010-12-18 15:28:55 +00:00
|
|
|
set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0);
|
|
|
|
set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1);
|
|
|
|
set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2);
|
|
|
|
set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3);
|
2020-09-02 13:26:01 +00:00
|
|
|
set_debugreg(DR6_RESERVED, 6);
|
2010-12-18 15:28:55 +00:00
|
|
|
set_debugreg(__this_cpu_read(cpu_dr7), 7);
|
2009-06-01 18:13:57 +00:00
|
|
|
}
|
2009-09-09 17:22:48 +00:00
|
|
|
EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
|
2009-06-01 18:13:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle debug exception notifications.
|
|
|
|
*
|
|
|
|
* Return value is either NOTIFY_STOP or NOTIFY_DONE as explained below.
|
|
|
|
*
|
|
|
|
* NOTIFY_DONE returned if one of the following conditions is true.
|
|
|
|
* i) When the causative address is from user-space and the exception
|
|
|
|
* is a valid one, i.e. not triggered as a result of lazy debug register
|
|
|
|
* switching
|
|
|
|
* ii) When there are more bits than trap<n> set in DR6 register (such
|
|
|
|
* as BD, BS or BT) indicating that more than one debug condition is
|
|
|
|
* met and requires some more action in do_debug().
|
|
|
|
*
|
|
|
|
* NOTIFY_STOP returned for all other cases
|
|
|
|
*
|
|
|
|
*/
|
2014-04-17 08:18:07 +00:00
|
|
|
static int hw_breakpoint_handler(struct die_args *args)
|
2009-06-01 18:13:57 +00:00
|
|
|
{
|
2020-09-02 13:26:00 +00:00
|
|
|
int i, rc = NOTIFY_STOP;
|
2009-09-09 17:22:48 +00:00
|
|
|
struct perf_event *bp;
|
2009-06-01 18:17:06 +00:00
|
|
|
unsigned long *dr6_p;
|
2020-09-02 13:26:02 +00:00
|
|
|
unsigned long dr6;
|
2021-01-28 21:16:27 +00:00
|
|
|
bool bpx;
|
2009-06-01 18:17:06 +00:00
|
|
|
|
|
|
|
/* The DR6 value is pointed by args->err */
|
|
|
|
dr6_p = (unsigned long *)ERR_PTR(args->err);
|
|
|
|
dr6 = *dr6_p;
|
2009-06-01 18:13:57 +00:00
|
|
|
|
|
|
|
/* Do an early return if no trap bits are set in DR6 */
|
|
|
|
if ((dr6 & DR_TRAP_BITS) == 0)
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
/* Handle all the breakpoints that were triggered */
|
|
|
|
for (i = 0; i < HBP_NUM; ++i) {
|
|
|
|
if (likely(!(dr6 & (DR_TRAP0 << i))))
|
|
|
|
continue;
|
2009-09-09 17:22:48 +00:00
|
|
|
|
2021-01-28 21:16:27 +00:00
|
|
|
bp = this_cpu_read(bp_per_reg[i]);
|
|
|
|
if (!bp)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
bpx = bp->hw.info.type == X86_BREAKPOINT_EXECUTE;
|
|
|
|
|
2009-06-01 18:13:57 +00:00
|
|
|
/*
|
2021-01-28 21:16:27 +00:00
|
|
|
* TF and data breakpoints are traps and can be merged, however
|
|
|
|
* instruction breakpoints are faults and will be raised
|
|
|
|
* separately.
|
|
|
|
*
|
|
|
|
* However DR6 can indicate both TF and instruction
|
|
|
|
* breakpoints. In that case take TF as that has precedence and
|
|
|
|
* delay the instruction breakpoint for the next exception.
|
2009-06-01 18:13:57 +00:00
|
|
|
*/
|
2021-01-28 21:16:27 +00:00
|
|
|
if (bpx && (dr6 & DR_STEP))
|
|
|
|
continue;
|
2009-09-09 17:22:48 +00:00
|
|
|
|
2009-06-01 18:17:06 +00:00
|
|
|
/*
|
|
|
|
* Reset the 'i'th TRAP bit in dr6 to denote completion of
|
|
|
|
* exception handling
|
|
|
|
*/
|
|
|
|
(*dr6_p) &= ~(DR_TRAP0 << i);
|
2009-09-09 17:22:48 +00:00
|
|
|
|
2009-12-05 08:44:31 +00:00
|
|
|
perf_bp_event(bp, args->regs);
|
2009-06-01 18:13:57 +00:00
|
|
|
|
2010-06-24 19:21:27 +00:00
|
|
|
/*
|
|
|
|
* Set up resume flag to avoid breakpoint recursion when
|
|
|
|
* returning back to origin.
|
|
|
|
*/
|
2021-01-28 21:16:27 +00:00
|
|
|
if (bpx)
|
2010-06-24 19:21:27 +00:00
|
|
|
args->regs->flags |= X86_EFLAGS_RF;
|
2009-06-01 18:13:57 +00:00
|
|
|
}
|
2021-01-28 21:16:27 +00:00
|
|
|
|
2010-01-28 11:14:15 +00:00
|
|
|
/*
|
|
|
|
* Further processing in do_debug() is needed for a) user-space
|
|
|
|
* breakpoints (to generate signals) and b) when the system has
|
|
|
|
* taken exception due to multiple causes
|
|
|
|
*/
|
2020-09-02 13:26:02 +00:00
|
|
|
if ((current->thread.virtual_dr6 & DR_TRAP_BITS) ||
|
2010-01-28 11:14:15 +00:00
|
|
|
(dr6 & (~DR_TRAP_BITS)))
|
2009-06-01 18:13:57 +00:00
|
|
|
rc = NOTIFY_DONE;
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle debug exception notifications.
|
|
|
|
*/
|
2014-04-17 08:18:07 +00:00
|
|
|
int hw_breakpoint_exceptions_notify(
|
2009-06-01 18:13:57 +00:00
|
|
|
struct notifier_block *unused, unsigned long val, void *data)
|
|
|
|
{
|
|
|
|
if (val != DIE_DEBUG)
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
return hw_breakpoint_handler(data);
|
|
|
|
}
|
2009-09-09 17:22:48 +00:00
|
|
|
|
|
|
|
void hw_breakpoint_pmu_read(struct perf_event *bp)
|
|
|
|
{
|
|
|
|
/* TODO */
|
|
|
|
}
|