mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 08:28:13 +00:00
42dec9a936
- Improve Intel uncore PMU support: - Parse uncore 'discovery tables' - a new hardware capability enumeration method introduced on the latest Intel platforms. This table is in a well-defined PCI namespace location and is read via MMIO. It is organized in an rbtree. These uncore tables will allow the discovery of standard counter blocks, but fancier counters still need to be enumerated explicitly. - Add Alder Lake support - Improve IIO stacks to PMON mapping support on Skylake servers - Add Intel Alder Lake PMU support - which requires the introduction of 'hybrid' CPUs and PMUs. Alder Lake is a mix of Golden Cove ('big') and Gracemont ('small' - Atom derived) cores. The CPU-side feature set is entirely symmetrical - but on the PMU side there's core type dependent PMU functionality. - Reduce data loss with CPU level hardware tracing on Intel PT / AUX profiling, by fixing the AUX allocation watermark logic. - Improve ring buffer allocation on NUMA systems - Put 'struct perf_event' into their separate kmem_cache pool - Add support for synchronous signals for select perf events. The immediate motivation is to support low-overhead sampling-based race detection for user-space code. The feature consists of the following main changes: - Add thread-only event inheritance via perf_event_attr::inherit_thread, which limits inheritance of events to CLONE_THREAD. - Add the ability for events to not leak through exec(), via perf_event_attr::remove_on_exec. - Allow the generation of SIGTRAP via perf_event_attr::sigtrap, extend siginfo with an u64 ::si_perf, and add the breakpoint information to ::si_addr and ::si_perf if the event is PERF_TYPE_BREAKPOINT. The siginfo support is adequate for breakpoints right now - but the new field can be used to introduce support for other types of metadata passed over siginfo as well. - Misc fixes, cleanups and smaller updates. Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmCJGpERHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1j9zBAAuVbG2snV6SBSdXLhQcM66N3NckOXvSY5 QjjhQcuwJQEK/NJB3266K5d8qSmdyRBsWf3GCsrmyBT67P1V28K44Pu7oCV0UDtf mpVRjEP0oR7hNsANSSgo8Fa4ZD7H5waX7dK7925Tvw8By3mMoZoddiD/84WJHhxO NDF+GRFaRj+/dpbhV8cdCoXTjYdkC36vYuZs3b9lu0tS9D/AJgsNy7TinLvO02Cs 5peP+2y29dgvCXiGBiuJtEA6JyGnX3nUJCvfOZZ/DWDc3fdduARlRrc5Aiq4n/wY UdSkw1VTZBlZ1wMSdmHQVeC5RIH3uWUtRoNqy0Yc90lBm55AQ0EENwIfWDUDC5zy USdBqWTNWKMBxlEilUIyqKPQK8LW/31TRzqy8BWKPNcZt5yP5YS1SjAJRDDjSwL/ I+OBw1vjLJamYh8oNiD5b+VLqNQba81jFASfv+HVWcULumnY6ImECCpkg289Fkpi BVR065boifJDlyENXFbvTxyMBXQsZfA+EhtxG7ju2Ni+TokBbogyCb3L2injPt9g 7jjtTOqmfad4gX1WSc+215iYZMkgECcUd9E+BfOseEjBohqlo7yNKIfYnT8mE/Xq nb7eHjyvLiE8tRtZ+7SjsujOMHv9LhWFAbSaxU/kEVzpkp0zyd6mnnslDKaaHLhz goUMOL/D0lg= =NhQ7 -----END PGP SIGNATURE----- Merge tag 'perf-core-2021-04-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull perf event updates from Ingo Molnar: - Improve Intel uncore PMU support: - Parse uncore 'discovery tables' - a new hardware capability enumeration method introduced on the latest Intel platforms. This table is in a well-defined PCI namespace location and is read via MMIO. It is organized in an rbtree. These uncore tables will allow the discovery of standard counter blocks, but fancier counters still need to be enumerated explicitly. - Add Alder Lake support - Improve IIO stacks to PMON mapping support on Skylake servers - Add Intel Alder Lake PMU support - which requires the introduction of 'hybrid' CPUs and PMUs. Alder Lake is a mix of Golden Cove ('big') and Gracemont ('small' - Atom derived) cores. The CPU-side feature set is entirely symmetrical - but on the PMU side there's core type dependent PMU functionality. - Reduce data loss with CPU level hardware tracing on Intel PT / AUX profiling, by fixing the AUX allocation watermark logic. - Improve ring buffer allocation on NUMA systems - Put 'struct perf_event' into their separate kmem_cache pool - Add support for synchronous signals for select perf events. The immediate motivation is to support low-overhead sampling-based race detection for user-space code. The feature consists of the following main changes: - Add thread-only event inheritance via perf_event_attr::inherit_thread, which limits inheritance of events to CLONE_THREAD. - Add the ability for events to not leak through exec(), via perf_event_attr::remove_on_exec. - Allow the generation of SIGTRAP via perf_event_attr::sigtrap, extend siginfo with an u64 ::si_perf, and add the breakpoint information to ::si_addr and ::si_perf if the event is PERF_TYPE_BREAKPOINT. The siginfo support is adequate for breakpoints right now - but the new field can be used to introduce support for other types of metadata passed over siginfo as well. - Misc fixes, cleanups and smaller updates. * tag 'perf-core-2021-04-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (53 commits) signal, perf: Add missing TRAP_PERF case in siginfo_layout() signal, perf: Fix siginfo_t by avoiding u64 on 32-bit architectures perf/x86: Allow for 8<num_fixed_counters<16 perf/x86/rapl: Add support for Intel Alder Lake perf/x86/cstate: Add Alder Lake CPU support perf/x86/msr: Add Alder Lake CPU support perf/x86/intel/uncore: Add Alder Lake support perf: Extend PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE perf/x86/intel: Add Alder Lake Hybrid support perf/x86: Support filter_match callback perf/x86/intel: Add attr_update for Hybrid PMUs perf/x86: Add structures for the attributes of Hybrid PMUs perf/x86: Register hybrid PMUs perf/x86: Factor out x86_pmu_show_pmu_cap perf/x86: Remove temporary pmu assignment in event_init perf/x86/intel: Factor out intel_pmu_check_extra_regs perf/x86/intel: Factor out intel_pmu_check_event_constraints perf/x86/intel: Factor out intel_pmu_check_num_counters perf/x86: Hybrid PMU support for extra_regs perf/x86: Hybrid PMU support for event constraints ...
75 lines
1.9 KiB
C
75 lines
1.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_CPU_H
|
|
#define _ASM_X86_CPU_H
|
|
|
|
#include <linux/device.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/topology.h>
|
|
#include <linux/nodemask.h>
|
|
#include <linux/percpu.h>
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
extern void prefill_possible_map(void);
|
|
|
|
#else /* CONFIG_SMP */
|
|
|
|
static inline void prefill_possible_map(void) {}
|
|
|
|
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
|
|
#define cpu_acpi_id(cpu) 0
|
|
#define safe_smp_processor_id() 0
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
struct x86_cpu {
|
|
struct cpu cpu;
|
|
};
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
extern int arch_register_cpu(int num);
|
|
extern void arch_unregister_cpu(int);
|
|
extern void start_cpu0(void);
|
|
#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
|
|
extern int _debug_hotplug_cpu(int cpu, int action);
|
|
#endif
|
|
#endif
|
|
|
|
int mwait_usable(const struct cpuinfo_x86 *);
|
|
|
|
unsigned int x86_family(unsigned int sig);
|
|
unsigned int x86_model(unsigned int sig);
|
|
unsigned int x86_stepping(unsigned int sig);
|
|
#ifdef CONFIG_CPU_SUP_INTEL
|
|
extern void __init sld_setup(struct cpuinfo_x86 *c);
|
|
extern void switch_to_sld(unsigned long tifn);
|
|
extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
|
|
extern bool handle_guest_split_lock(unsigned long ip);
|
|
extern void handle_bus_lock(struct pt_regs *regs);
|
|
u8 get_this_hybrid_cpu_type(void);
|
|
#else
|
|
static inline void __init sld_setup(struct cpuinfo_x86 *c) {}
|
|
static inline void switch_to_sld(unsigned long tifn) {}
|
|
static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool handle_guest_split_lock(unsigned long ip)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline void handle_bus_lock(struct pt_regs *regs) {}
|
|
|
|
static inline u8 get_this_hybrid_cpu_type(void)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
#ifdef CONFIG_IA32_FEAT_CTL
|
|
void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
|
|
#else
|
|
static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
|
|
#endif
|
|
#endif /* _ASM_X86_CPU_H */
|