Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

No conflicts.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Paolo Abeni 2023-09-21 21:49:45 +02:00
commit e9cbc89067
396 changed files with 3550 additions and 2015 deletions

View File

@ -7,9 +7,9 @@ AX.25
To use the amateur radio protocols within Linux you will need to get a To use the amateur radio protocols within Linux you will need to get a
suitable copy of the AX.25 Utilities. More detailed information about suitable copy of the AX.25 Utilities. More detailed information about
AX.25, NET/ROM and ROSE, associated programs and utilities can be AX.25, NET/ROM and ROSE, associated programs and utilities can be
found on http://www.linux-ax25.org. found on https://linux-ax25.in-berlin.de.
There is an active mailing list for discussing Linux amateur radio matters There is a mailing list for discussing Linux amateur radio matters
called linux-hams@vger.kernel.org. To subscribe to it, send a message to called linux-hams@vger.kernel.org. To subscribe to it, send a message to
majordomo@vger.kernel.org with the words "subscribe linux-hams" in the body majordomo@vger.kernel.org with the words "subscribe linux-hams" in the body
of the message, the subject field is ignored. You don't need to be of the message, the subject field is ignored. You don't need to be

View File

@ -251,6 +251,7 @@ an involved disclosed party. The current ambassadors list:
IBM Z Christian Borntraeger <borntraeger@de.ibm.com> IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
Intel Tony Luck <tony.luck@intel.com> Intel Tony Luck <tony.luck@intel.com>
Qualcomm Trilok Soni <tsoni@codeaurora.org> Qualcomm Trilok Soni <tsoni@codeaurora.org>
RISC-V Palmer Dabbelt <palmer@dabbelt.com>
Samsung Javier González <javier.gonz@samsung.com> Samsung Javier González <javier.gonz@samsung.com>
Microsoft James Morris <jamorris@linux.microsoft.com> Microsoft James Morris <jamorris@linux.microsoft.com>

View File

@ -74,8 +74,8 @@ topology based on those information. When the device is older and
doesn't respond to the new UMP inquiries, the driver falls back and doesn't respond to the new UMP inquiries, the driver falls back and
builds the topology based on Group Terminal Block (GTB) information builds the topology based on Group Terminal Block (GTB) information
from the USB descriptor. Some device might be screwed up by the from the USB descriptor. Some device might be screwed up by the
unexpected UMP command; in such a case, pass `midi2_probe=0` option to unexpected UMP command; in such a case, pass `midi2_ump_probe=0`
snd-usb-audio driver for skipping the UMP v1.1 inquiries. option to snd-usb-audio driver for skipping the UMP v1.1 inquiries.
When the MIDI 2.0 device is probed, the kernel creates a rawmidi When the MIDI 2.0 device is probed, the kernel creates a rawmidi
device for each UMP Endpoint of the device. Its device name is device for each UMP Endpoint of the device. Its device name is

View File

@ -3344,7 +3344,7 @@ AX.25 NETWORK LAYER
M: Ralf Baechle <ralf@linux-mips.org> M: Ralf Baechle <ralf@linux-mips.org>
L: linux-hams@vger.kernel.org L: linux-hams@vger.kernel.org
S: Maintained S: Maintained
W: http://www.linux-ax25.org/ W: https://linux-ax25.in-berlin.de
F: include/net/ax25.h F: include/net/ax25.h
F: include/uapi/linux/ax25.h F: include/uapi/linux/ax25.h
F: net/ax25/ F: net/ax25/
@ -14769,7 +14769,7 @@ NETROM NETWORK LAYER
M: Ralf Baechle <ralf@linux-mips.org> M: Ralf Baechle <ralf@linux-mips.org>
L: linux-hams@vger.kernel.org L: linux-hams@vger.kernel.org
S: Maintained S: Maintained
W: http://www.linux-ax25.org/ W: https://linux-ax25.in-berlin.de
F: include/net/netrom.h F: include/net/netrom.h
F: include/uapi/linux/netrom.h F: include/uapi/linux/netrom.h
F: net/netrom/ F: net/netrom/
@ -18620,7 +18620,7 @@ ROSE NETWORK LAYER
M: Ralf Baechle <ralf@linux-mips.org> M: Ralf Baechle <ralf@linux-mips.org>
L: linux-hams@vger.kernel.org L: linux-hams@vger.kernel.org
S: Maintained S: Maintained
W: http://www.linux-ax25.org/ W: https://linux-ax25.in-berlin.de
F: include/net/rose.h F: include/net/rose.h
F: include/uapi/linux/rose.h F: include/uapi/linux/rose.h
F: net/rose/ F: net/rose/

View File

@ -2,7 +2,7 @@
VERSION = 6 VERSION = 6
PATCHLEVEL = 6 PATCHLEVEL = 6
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc1 EXTRAVERSION = -rc2
NAME = Hurr durr I'ma ninja sloth NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -207,7 +207,7 @@ static void xen_power_off(void)
static irqreturn_t xen_arm_callback(int irq, void *arg) static irqreturn_t xen_arm_callback(int irq, void *arg)
{ {
xen_hvm_evtchn_do_upcall(); xen_evtchn_do_upcall();
return IRQ_HANDLED; return IRQ_HANDLED;
} }

View File

@ -255,7 +255,7 @@ config PPC
select HAVE_KPROBES select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES select HAVE_KRETPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if HAVE_OBJTOOL_MCOUNT select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if HAVE_OBJTOOL_MCOUNT && (!ARCH_USING_PATCHABLE_FUNCTION_ENTRY || (!CC_IS_GCC || GCC_VERSION >= 110100))
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S) select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)

View File

@ -230,13 +230,15 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
struct arch_hw_breakpoint *info; struct arch_hw_breakpoint *info;
int i; int i;
preempt_disable();
for (i = 0; i < nr_wp_slots(); i++) { for (i = 0; i < nr_wp_slots(); i++) {
struct perf_event *bp = __this_cpu_read(bp_per_reg[i]); struct perf_event *bp = __this_cpu_read(bp_per_reg[i]);
if (unlikely(bp && counter_arch_bp(bp)->perf_single_step)) if (unlikely(bp && counter_arch_bp(bp)->perf_single_step))
goto reset; goto reset;
} }
return; goto out;
reset: reset:
regs_set_return_msr(regs, regs->msr & ~MSR_SE); regs_set_return_msr(regs, regs->msr & ~MSR_SE);
@ -245,6 +247,9 @@ reset:
__set_breakpoint(i, info); __set_breakpoint(i, info);
info->perf_single_step = false; info->perf_single_step = false;
} }
out:
preempt_enable();
} }
static bool is_larx_stcx_instr(int type) static bool is_larx_stcx_instr(int type)
@ -363,6 +368,11 @@ static void handle_p10dd1_spurious_exception(struct perf_event **bp,
} }
} }
/*
* Handle a DABR or DAWR exception.
*
* Called in atomic context.
*/
int hw_breakpoint_handler(struct die_args *args) int hw_breakpoint_handler(struct die_args *args)
{ {
bool err = false; bool err = false;
@ -490,6 +500,8 @@ NOKPROBE_SYMBOL(hw_breakpoint_handler);
/* /*
* Handle single-step exceptions following a DABR hit. * Handle single-step exceptions following a DABR hit.
*
* Called in atomic context.
*/ */
static int single_step_dabr_instruction(struct die_args *args) static int single_step_dabr_instruction(struct die_args *args)
{ {
@ -541,6 +553,8 @@ NOKPROBE_SYMBOL(single_step_dabr_instruction);
/* /*
* Handle debug exception notifications. * Handle debug exception notifications.
*
* Called in atomic context.
*/ */
int hw_breakpoint_exceptions_notify( int hw_breakpoint_exceptions_notify(
struct notifier_block *unused, unsigned long val, void *data) struct notifier_block *unused, unsigned long val, void *data)

View File

@ -131,8 +131,13 @@ void wp_get_instr_detail(struct pt_regs *regs, ppc_inst_t *instr,
int *type, int *size, unsigned long *ea) int *type, int *size, unsigned long *ea)
{ {
struct instruction_op op; struct instruction_op op;
int err;
if (__get_user_instr(*instr, (void __user *)regs->nip)) pagefault_disable();
err = __get_user_instr(*instr, (void __user *)regs->nip);
pagefault_enable();
if (err)
return; return;
analyse_instr(&op, regs, *instr); analyse_instr(&op, regs, *instr);

View File

@ -1512,23 +1512,11 @@ static void do_program_check(struct pt_regs *regs)
return; return;
} }
if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE) && user_mode(regs)) { /* User mode considers other cases after enabling IRQs */
ppc_inst_t insn; if (!user_mode(regs)) {
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
if (get_user_instr(insn, (void __user *)regs->nip)) { return;
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
return;
}
if (ppc_inst_primary_opcode(insn) == 31 &&
get_xop(ppc_inst_val(insn)) == OP_31_XOP_HASHCHK) {
_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
return;
}
} }
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
return;
} }
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (reason & REASON_TM) { if (reason & REASON_TM) {
@ -1561,16 +1549,44 @@ static void do_program_check(struct pt_regs *regs)
/* /*
* If we took the program check in the kernel skip down to sending a * If we took the program check in the kernel skip down to sending a
* SIGILL. The subsequent cases all relate to emulating instructions * SIGILL. The subsequent cases all relate to user space, such as
* which we should only do for userspace. We also do not want to enable * emulating instructions which we should only do for user space. We
* interrupts for kernel faults because that might lead to further * also do not want to enable interrupts for kernel faults because that
* faults, and loose the context of the original exception. * might lead to further faults, and loose the context of the original
* exception.
*/ */
if (!user_mode(regs)) if (!user_mode(regs))
goto sigill; goto sigill;
interrupt_cond_local_irq_enable(regs); interrupt_cond_local_irq_enable(regs);
/*
* (reason & REASON_TRAP) is mostly handled before enabling IRQs,
* except get_user_instr() can sleep so we cannot reliably inspect the
* current instruction in that context. Now that we know we are
* handling a user space trap and can sleep, we can check if the trap
* was a hashchk failure.
*/
if (reason & REASON_TRAP) {
if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE)) {
ppc_inst_t insn;
if (get_user_instr(insn, (void __user *)regs->nip)) {
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
return;
}
if (ppc_inst_primary_opcode(insn) == 31 &&
get_xop(ppc_inst_val(insn)) == OP_31_XOP_HASHCHK) {
_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
return;
}
}
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
return;
}
/* (reason & REASON_ILLEGAL) would be the obvious thing here, /* (reason & REASON_ILLEGAL) would be the obvious thing here,
* but there seems to be a hardware bug on the 405GP (RevD) * but there seems to be a hardware bug on the 405GP (RevD)
* that means ESR is sometimes set incorrectly - either to * that means ESR is sometimes set incorrectly - either to

View File

@ -1418,7 +1418,7 @@ static int h_24x7_event_init(struct perf_event *event)
} }
domain = event_get_domain(event); domain = event_get_domain(event);
if (domain >= HV_PERF_DOMAIN_MAX) { if (domain == 0 || domain >= HV_PERF_DOMAIN_MAX) {
pr_devel("invalid domain %d\n", domain); pr_devel("invalid domain %d\n", domain);
return -EINVAL; return -EINVAL;
} }

View File

@ -2,6 +2,7 @@
menuconfig PPC_82xx menuconfig PPC_82xx
bool "82xx-based boards (PQ II)" bool "82xx-based boards (PQ II)"
depends on PPC_BOOK3S_32 depends on PPC_BOOK3S_32
select FSL_SOC
if PPC_82xx if PPC_82xx
@ -9,7 +10,6 @@ config EP8248E
bool "Embedded Planet EP8248E (a.k.a. CWH-PPC-8248N-VE)" bool "Embedded Planet EP8248E (a.k.a. CWH-PPC-8248N-VE)"
select CPM2 select CPM2
select PPC_INDIRECT_PCI if PCI select PPC_INDIRECT_PCI if PCI
select FSL_SOC
select PHYLIB if NETDEVICES select PHYLIB if NETDEVICES
select MDIO_BITBANG if PHYLIB select MDIO_BITBANG if PHYLIB
help help
@ -22,7 +22,6 @@ config MGCOGE
bool "Keymile MGCOGE" bool "Keymile MGCOGE"
select CPM2 select CPM2
select PPC_INDIRECT_PCI if PCI select PPC_INDIRECT_PCI if PCI
select FSL_SOC
help help
This enables support for the Keymile MGCOGE board. This enables support for the Keymile MGCOGE board.

View File

@ -105,7 +105,7 @@ asm volatile(ALTERNATIVE( \
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
* 0000001 01001 rs1 000 00000 0001011 * 0000001 01001 rs1 000 00000 0001011
* dcache.cva rs1 (clean, virtual address) * dcache.cva rs1 (clean, virtual address)
* 0000001 00100 rs1 000 00000 0001011 * 0000001 00101 rs1 000 00000 0001011
* *
* dcache.cipa rs1 (clean then invalidate, physical address) * dcache.cipa rs1 (clean then invalidate, physical address)
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
@ -118,7 +118,7 @@ asm volatile(ALTERNATIVE( \
* 0000000 11001 00000 000 00000 0001011 * 0000000 11001 00000 000 00000 0001011
*/ */
#define THEAD_inval_A0 ".long 0x0265000b" #define THEAD_inval_A0 ".long 0x0265000b"
#define THEAD_clean_A0 ".long 0x0245000b" #define THEAD_clean_A0 ".long 0x0255000b"
#define THEAD_flush_A0 ".long 0x0275000b" #define THEAD_flush_A0 ".long 0x0275000b"
#define THEAD_SYNC_S ".long 0x0190000b" #define THEAD_SYNC_S ".long 0x0190000b"

View File

@ -98,7 +98,13 @@ static int elf_find_pbase(struct kimage *image, unsigned long kernel_len,
kbuf.image = image; kbuf.image = image;
kbuf.buf_min = lowest_paddr; kbuf.buf_min = lowest_paddr;
kbuf.buf_max = ULONG_MAX; kbuf.buf_max = ULONG_MAX;
kbuf.buf_align = PAGE_SIZE;
/*
* Current riscv boot protocol requires 2MB alignment for
* RV64 and 4MB alignment for RV32
*
*/
kbuf.buf_align = PMD_SIZE;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE); kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE);
kbuf.top_down = false; kbuf.top_down = false;

View File

@ -1945,6 +1945,7 @@ config EFI
select UCS2_STRING select UCS2_STRING
select EFI_RUNTIME_WRAPPERS select EFI_RUNTIME_WRAPPERS
select ARCH_USE_MEMREMAP_PROT select ARCH_USE_MEMREMAP_PROT
select EFI_RUNTIME_MAP if KEXEC_CORE
help help
This enables the kernel to use EFI runtime services that are This enables the kernel to use EFI runtime services that are
available (such as the EFI variable services). available (such as the EFI variable services).
@ -2020,7 +2021,6 @@ config EFI_MAX_FAKE_MEM
config EFI_RUNTIME_MAP config EFI_RUNTIME_MAP
bool "Export EFI runtime maps to sysfs" if EXPERT bool "Export EFI runtime maps to sysfs" if EXPERT
depends on EFI depends on EFI
default KEXEC_CORE
help help
Export EFI runtime memory regions to /sys/firmware/efi/runtime-map. Export EFI runtime memory regions to /sys/firmware/efi/runtime-map.
That memory map is required by the 2nd kernel to set up EFI virtual That memory map is required by the 2nd kernel to set up EFI virtual

View File

@ -59,6 +59,14 @@ static void *alloc_pgt_page(void *context)
return NULL; return NULL;
} }
/* Consumed more tables than expected? */
if (pages->pgt_buf_offset == BOOT_PGT_SIZE_WARN) {
debug_putstr("pgt_buf running low in " __FILE__ "\n");
debug_putstr("Need to raise BOOT_PGT_SIZE?\n");
debug_putaddr(pages->pgt_buf_offset);
debug_putaddr(pages->pgt_buf_size);
}
entry = pages->pgt_buf + pages->pgt_buf_offset; entry = pages->pgt_buf + pages->pgt_buf_offset;
pages->pgt_buf_offset += PAGE_SIZE; pages->pgt_buf_offset += PAGE_SIZE;

View File

@ -294,7 +294,7 @@ static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
inc_irq_stat(irq_hv_callback_count); inc_irq_stat(irq_hv_callback_count);
xen_hvm_evtchn_do_upcall(); xen_evtchn_do_upcall();
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }

View File

@ -40,23 +40,40 @@
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
# define BOOT_STACK_SIZE 0x4000 # define BOOT_STACK_SIZE 0x4000
# define BOOT_INIT_PGT_SIZE (6*4096)
# ifdef CONFIG_RANDOMIZE_BASE
/* /*
* Assuming all cross the 512GB boundary: * Used by decompressor's startup_32() to allocate page tables for identity
* 1 page for level4 * mapping of the 4G of RAM in 4-level paging mode:
* (2+2)*4 pages for kernel, param, cmd_line, and randomized kernel * - 1 level4 table;
* 2 pages for first 2M (video RAM: CONFIG_X86_VERBOSE_BOOTUP). * - 1 level3 table;
* Total is 19 pages. * - 4 level2 table that maps everything with 2M pages;
*
* The additional level5 table needed for 5-level paging is allocated from
* trampoline_32bit memory.
*/ */
# ifdef CONFIG_X86_VERBOSE_BOOTUP # define BOOT_INIT_PGT_SIZE (6*4096)
# define BOOT_PGT_SIZE (19*4096)
# else /* !CONFIG_X86_VERBOSE_BOOTUP */ /*
# define BOOT_PGT_SIZE (17*4096) * Total number of page tables kernel_add_identity_map() can allocate,
# endif * including page tables consumed by startup_32().
# else /* !CONFIG_RANDOMIZE_BASE */ *
# define BOOT_PGT_SIZE BOOT_INIT_PGT_SIZE * Worst-case scenario:
# endif * - 5-level paging needs 1 level5 table;
* - KASLR needs to map kernel, boot_params, cmdline and randomized kernel,
* assuming all of them cross 256T boundary:
* + 4*2 level4 table;
* + 4*2 level3 table;
* + 4*2 level2 table;
* - X86_VERBOSE_BOOTUP needs to map the first 2M (video RAM):
* + 1 level4 table;
* + 1 level3 table;
* + 1 level2 table;
* Total: 28 tables
*
* Add 4 spare table in case decompressor touches anything beyond what is
* accounted above. Warn if it happens.
*/
# define BOOT_PGT_SIZE_WARN (28*4096)
# define BOOT_PGT_SIZE (32*4096)
#else /* !CONFIG_X86_64 */ #else /* !CONFIG_X86_64 */
# define BOOT_STACK_SIZE 0x1000 # define BOOT_STACK_SIZE 0x1000

View File

@ -91,19 +91,6 @@ static inline void efi_fpu_end(void)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#define EFI_X86_KERNEL_ALLOC_LIMIT (SZ_512M - 1) #define EFI_X86_KERNEL_ALLOC_LIMIT (SZ_512M - 1)
#define arch_efi_call_virt_setup() \
({ \
efi_fpu_begin(); \
firmware_restrict_branch_speculation_start(); \
})
#define arch_efi_call_virt_teardown() \
({ \
firmware_restrict_branch_speculation_end(); \
efi_fpu_end(); \
})
#else /* !CONFIG_X86_32 */ #else /* !CONFIG_X86_32 */
#define EFI_X86_KERNEL_ALLOC_LIMIT EFI_ALLOC_LIMIT #define EFI_X86_KERNEL_ALLOC_LIMIT EFI_ALLOC_LIMIT
@ -116,14 +103,6 @@ extern bool efi_disable_ibt_for_runtime;
__efi_call(__VA_ARGS__); \ __efi_call(__VA_ARGS__); \
}) })
#define arch_efi_call_virt_setup() \
({ \
efi_sync_low_kernel_mappings(); \
efi_fpu_begin(); \
firmware_restrict_branch_speculation_start(); \
efi_enter_mm(); \
})
#undef arch_efi_call_virt #undef arch_efi_call_virt
#define arch_efi_call_virt(p, f, args...) ({ \ #define arch_efi_call_virt(p, f, args...) ({ \
u64 ret, ibt = ibt_save(efi_disable_ibt_for_runtime); \ u64 ret, ibt = ibt_save(efi_disable_ibt_for_runtime); \
@ -132,13 +111,6 @@ extern bool efi_disable_ibt_for_runtime;
ret; \ ret; \
}) })
#define arch_efi_call_virt_teardown() \
({ \
efi_leave_mm(); \
firmware_restrict_branch_speculation_end(); \
efi_fpu_end(); \
})
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
/* /*
* CONFIG_KASAN may redefine memset to __memset. __memset function is present * CONFIG_KASAN may redefine memset to __memset. __memset function is present
@ -168,8 +140,8 @@ extern void efi_delete_dummy_variable(void);
extern void efi_crash_gracefully_on_page_fault(unsigned long phys_addr); extern void efi_crash_gracefully_on_page_fault(unsigned long phys_addr);
extern void efi_free_boot_services(void); extern void efi_free_boot_services(void);
void efi_enter_mm(void); void arch_efi_call_virt_setup(void);
void efi_leave_mm(void); void arch_efi_call_virt_teardown(void);
/* kexec external ABI */ /* kexec external ABI */
struct efi_setup_data { struct efi_setup_data {

View File

@ -8,6 +8,14 @@
#undef notrace #undef notrace
#define notrace __attribute__((no_instrument_function)) #define notrace __attribute__((no_instrument_function))
#ifdef CONFIG_64BIT
/*
* The generic version tends to create spurious ENDBR instructions under
* certain conditions.
*/
#define _THIS_IP_ ({ unsigned long __here; asm ("lea 0(%%rip), %0" : "=r" (__here)); __here; })
#endif
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
#endif /* CONFIG_X86_32 */ #endif /* CONFIG_X86_32 */

View File

@ -9,13 +9,6 @@ struct paravirt_patch_site {
u8 type; /* type of this instruction */ u8 type; /* type of this instruction */
u8 len; /* length of original instruction */ u8 len; /* length of original instruction */
}; };
/* Lazy mode for batching updates / context switch */
enum paravirt_lazy_mode {
PARAVIRT_LAZY_NONE,
PARAVIRT_LAZY_MMU,
PARAVIRT_LAZY_CPU,
};
#endif #endif
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
@ -549,14 +542,6 @@ int paravirt_disable_iospace(void);
__PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
void paravirt_start_context_switch(struct task_struct *prev);
void paravirt_end_context_switch(struct task_struct *next);
void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);
void paravirt_flush_lazy_mmu(void);
void _paravirt_nop(void); void _paravirt_nop(void);
void paravirt_BUG(void); void paravirt_BUG(void);
unsigned long paravirt_ret0(void); unsigned long paravirt_ret0(void);

View File

@ -36,6 +36,7 @@
extern struct shared_info *HYPERVISOR_shared_info; extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info; extern struct start_info *xen_start_info;
#include <asm/bug.h>
#include <asm/processor.h> #include <asm/processor.h>
#define XEN_SIGNATURE "XenVMMXenVMM" #define XEN_SIGNATURE "XenVMMXenVMM"
@ -63,4 +64,40 @@ void __init xen_pvh_init(struct boot_params *boot_params);
void __init mem_map_via_hcall(struct boot_params *boot_params_p); void __init mem_map_via_hcall(struct boot_params *boot_params_p);
#endif #endif
/* Lazy mode for batching updates / context switch */
enum xen_lazy_mode {
XEN_LAZY_NONE,
XEN_LAZY_MMU,
XEN_LAZY_CPU,
};
DECLARE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode);
DECLARE_PER_CPU(unsigned int, xen_lazy_nesting);
static inline void enter_lazy(enum xen_lazy_mode mode)
{
enum xen_lazy_mode old_mode = this_cpu_read(xen_lazy_mode);
if (mode == old_mode) {
this_cpu_inc(xen_lazy_nesting);
return;
}
BUG_ON(old_mode != XEN_LAZY_NONE);
this_cpu_write(xen_lazy_mode, mode);
}
static inline void leave_lazy(enum xen_lazy_mode mode)
{
BUG_ON(this_cpu_read(xen_lazy_mode) != mode);
if (this_cpu_read(xen_lazy_nesting) == 0)
this_cpu_write(xen_lazy_mode, XEN_LAZY_NONE);
else
this_cpu_dec(xen_lazy_nesting);
}
enum xen_lazy_mode xen_get_lazy_mode(void);
#endif /* _ASM_X86_XEN_HYPERVISOR_H */ #endif /* _ASM_X86_XEN_HYPERVISOR_H */

View File

@ -1533,7 +1533,7 @@ static void __init build_socket_tables(void)
{ {
struct uv_gam_range_entry *gre = uv_gre_table; struct uv_gam_range_entry *gre = uv_gre_table;
int nums, numn, nump; int nums, numn, nump;
int cpu, i, lnid; int i, lnid, apicid;
int minsock = _min_socket; int minsock = _min_socket;
int maxsock = _max_socket; int maxsock = _max_socket;
int minpnode = _min_pnode; int minpnode = _min_pnode;
@ -1584,15 +1584,14 @@ static void __init build_socket_tables(void)
/* Set socket -> node values: */ /* Set socket -> node values: */
lnid = NUMA_NO_NODE; lnid = NUMA_NO_NODE;
for_each_possible_cpu(cpu) { for (apicid = 0; apicid < ARRAY_SIZE(__apicid_to_node); apicid++) {
int nid = cpu_to_node(cpu); int nid = __apicid_to_node[apicid];
int apicid, sockid; int sockid;
if (lnid == nid) if ((nid == NUMA_NO_NODE) || (lnid == nid))
continue; continue;
lnid = nid; lnid = nid;
apicid = per_cpu(x86_cpu_to_apicid, cpu);
sockid = apicid >> uv_cpuid.socketid_shift; sockid = apicid >> uv_cpuid.socketid_shift;
if (_socket_to_node[sockid - minsock] == SOCK_EMPTY) if (_socket_to_node[sockid - minsock] == SOCK_EMPTY)

View File

@ -143,66 +143,7 @@ int paravirt_disable_iospace(void)
return request_resource(&ioport_resource, &reserve_ioports); return request_resource(&ioport_resource, &reserve_ioports);
} }
static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
static inline void enter_lazy(enum paravirt_lazy_mode mode)
{
BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
this_cpu_write(paravirt_lazy_mode, mode);
}
static void leave_lazy(enum paravirt_lazy_mode mode)
{
BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
}
void paravirt_enter_lazy_mmu(void)
{
enter_lazy(PARAVIRT_LAZY_MMU);
}
void paravirt_leave_lazy_mmu(void)
{
leave_lazy(PARAVIRT_LAZY_MMU);
}
void paravirt_flush_lazy_mmu(void)
{
preempt_disable();
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
preempt_enable();
}
#ifdef CONFIG_PARAVIRT_XXL #ifdef CONFIG_PARAVIRT_XXL
void paravirt_start_context_switch(struct task_struct *prev)
{
BUG_ON(preemptible());
if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
}
enter_lazy(PARAVIRT_LAZY_CPU);
}
void paravirt_end_context_switch(struct task_struct *next)
{
BUG_ON(preemptible());
leave_lazy(PARAVIRT_LAZY_CPU);
if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
arch_enter_lazy_mmu_mode();
}
static noinstr void pv_native_write_cr2(unsigned long val) static noinstr void pv_native_write_cr2(unsigned long val)
{ {
native_write_cr2(val); native_write_cr2(val);
@ -229,14 +170,6 @@ static noinstr void pv_native_safe_halt(void)
} }
#endif #endif
enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
{
if (in_interrupt())
return PARAVIRT_LAZY_NONE;
return this_cpu_read(paravirt_lazy_mode);
}
struct pv_info pv_info = { struct pv_info pv_info = {
.name = "bare hardware", .name = "bare hardware",
#ifdef CONFIG_PARAVIRT_XXL #ifdef CONFIG_PARAVIRT_XXL

View File

@ -579,7 +579,6 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
} }
#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_CLUSTER) || defined(CONFIG_SCHED_MC)
static inline int x86_sched_itmt_flags(void) static inline int x86_sched_itmt_flags(void)
{ {
return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0; return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0;
@ -603,7 +602,14 @@ static int x86_cluster_flags(void)
return cpu_cluster_flags() | x86_sched_itmt_flags(); return cpu_cluster_flags() | x86_sched_itmt_flags();
} }
#endif #endif
#endif
static int x86_die_flags(void)
{
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
return x86_sched_itmt_flags();
return 0;
}
/* /*
* Set if a package/die has multiple NUMA nodes inside. * Set if a package/die has multiple NUMA nodes inside.
@ -640,7 +646,7 @@ static void __init build_sched_topology(void)
*/ */
if (!x86_has_numa_in_package) { if (!x86_has_numa_in_package) {
x86_topology[i++] = (struct sched_domain_topology_level){ x86_topology[i++] = (struct sched_domain_topology_level){
cpu_cpu_mask, SD_INIT_NAME(DIE) cpu_cpu_mask, x86_die_flags, SD_INIT_NAME(DIE)
}; };
} }

View File

@ -56,7 +56,6 @@ SYM_FUNC_END(__put_user_1)
EXPORT_SYMBOL(__put_user_1) EXPORT_SYMBOL(__put_user_1)
SYM_FUNC_START(__put_user_nocheck_1) SYM_FUNC_START(__put_user_nocheck_1)
ENDBR
ASM_STAC ASM_STAC
2: movb %al,(%_ASM_CX) 2: movb %al,(%_ASM_CX)
xor %ecx,%ecx xor %ecx,%ecx
@ -76,7 +75,6 @@ SYM_FUNC_END(__put_user_2)
EXPORT_SYMBOL(__put_user_2) EXPORT_SYMBOL(__put_user_2)
SYM_FUNC_START(__put_user_nocheck_2) SYM_FUNC_START(__put_user_nocheck_2)
ENDBR
ASM_STAC ASM_STAC
4: movw %ax,(%_ASM_CX) 4: movw %ax,(%_ASM_CX)
xor %ecx,%ecx xor %ecx,%ecx
@ -96,7 +94,6 @@ SYM_FUNC_END(__put_user_4)
EXPORT_SYMBOL(__put_user_4) EXPORT_SYMBOL(__put_user_4)
SYM_FUNC_START(__put_user_nocheck_4) SYM_FUNC_START(__put_user_nocheck_4)
ENDBR
ASM_STAC ASM_STAC
6: movl %eax,(%_ASM_CX) 6: movl %eax,(%_ASM_CX)
xor %ecx,%ecx xor %ecx,%ecx
@ -119,7 +116,6 @@ SYM_FUNC_END(__put_user_8)
EXPORT_SYMBOL(__put_user_8) EXPORT_SYMBOL(__put_user_8)
SYM_FUNC_START(__put_user_nocheck_8) SYM_FUNC_START(__put_user_nocheck_8)
ENDBR
ASM_STAC ASM_STAC
9: mov %_ASM_AX,(%_ASM_CX) 9: mov %_ASM_AX,(%_ASM_CX)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32

View File

@ -140,3 +140,15 @@ void __init efi_runtime_update_mappings(void)
} }
} }
} }
void arch_efi_call_virt_setup(void)
{
efi_fpu_begin();
firmware_restrict_branch_speculation_start();
}
void arch_efi_call_virt_teardown(void)
{
firmware_restrict_branch_speculation_end();
efi_fpu_end();
}

View File

@ -474,19 +474,34 @@ void __init efi_dump_pagetable(void)
* can not change under us. * can not change under us.
* It should be ensured that there are no concurrent calls to this function. * It should be ensured that there are no concurrent calls to this function.
*/ */
void efi_enter_mm(void) static void efi_enter_mm(void)
{ {
efi_prev_mm = current->active_mm; efi_prev_mm = current->active_mm;
current->active_mm = &efi_mm; current->active_mm = &efi_mm;
switch_mm(efi_prev_mm, &efi_mm, NULL); switch_mm(efi_prev_mm, &efi_mm, NULL);
} }
void efi_leave_mm(void) static void efi_leave_mm(void)
{ {
current->active_mm = efi_prev_mm; current->active_mm = efi_prev_mm;
switch_mm(&efi_mm, efi_prev_mm, NULL); switch_mm(&efi_mm, efi_prev_mm, NULL);
} }
void arch_efi_call_virt_setup(void)
{
efi_sync_low_kernel_mappings();
efi_fpu_begin();
firmware_restrict_branch_speculation_start();
efi_enter_mm();
}
void arch_efi_call_virt_teardown(void)
{
efi_leave_mm();
firmware_restrict_branch_speculation_end();
efi_fpu_end();
}
static DEFINE_SPINLOCK(efi_runtime_lock); static DEFINE_SPINLOCK(efi_runtime_lock);
/* /*

View File

@ -19,6 +19,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS -D__NO_FORTIFY
# optimization flags. # optimization flags.
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS)) KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
# When LTO is enabled, llvm emits many text sections, which is not supported
# by kexec. Remove -flto=* flags.
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS))
# When linking purgatory.ro with -r unresolved symbols are not checked, # When linking purgatory.ro with -r unresolved symbols are not checked,
# also link a purgatory.chk binary without -r to check for unresolved symbols. # also link a purgatory.chk binary without -r to check for unresolved symbols.
PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib

View File

@ -138,7 +138,7 @@ void __init xen_efi_init(struct boot_params *boot_params)
if (efi_systab_xen == NULL) if (efi_systab_xen == NULL)
return; return;
strncpy((char *)&boot_params->efi_info.efi_loader_signature, "Xen", strscpy((char *)&boot_params->efi_info.efi_loader_signature, "Xen",
sizeof(boot_params->efi_info.efi_loader_signature)); sizeof(boot_params->efi_info.efi_loader_signature));
boot_params->efi_info.efi_systab = (__u32)__pa(efi_systab_xen); boot_params->efi_info.efi_systab = (__u32)__pa(efi_systab_xen);
boot_params->efi_info.efi_systab_hi = (__u32)(__pa(efi_systab_xen) >> 32); boot_params->efi_info.efi_systab_hi = (__u32)(__pa(efi_systab_xen) >> 32);

View File

@ -32,7 +32,7 @@ EXPORT_SYMBOL_GPL(hypercall_page);
* &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
* and xen_vcpu_setup for details. By default it points to share_info->vcpu_info * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
* but during boot it is switched to point to xen_vcpu_info. * but during boot it is switched to point to xen_vcpu_info.
* The pointer is used in __xen_evtchn_do_upcall to acknowledge pending events. * The pointer is used in xen_evtchn_do_upcall to acknowledge pending events.
*/ */
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);

View File

@ -136,7 +136,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_xen_hvm_callback)
inc_irq_stat(irq_hv_callback_count); inc_irq_stat(irq_hv_callback_count);
xen_hvm_evtchn_do_upcall(); xen_evtchn_do_upcall();
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }

View File

@ -101,6 +101,17 @@ struct tls_descs {
struct desc_struct desc[3]; struct desc_struct desc[3];
}; };
DEFINE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode) = XEN_LAZY_NONE;
DEFINE_PER_CPU(unsigned int, xen_lazy_nesting);
enum xen_lazy_mode xen_get_lazy_mode(void)
{
if (in_interrupt())
return XEN_LAZY_NONE;
return this_cpu_read(xen_lazy_mode);
}
/* /*
* Updating the 3 TLS descriptors in the GDT on every task switch is * Updating the 3 TLS descriptors in the GDT on every task switch is
* surprisingly expensive so we avoid updating them if they haven't * surprisingly expensive so we avoid updating them if they haven't
@ -362,10 +373,25 @@ static noinstr unsigned long xen_get_debugreg(int reg)
return HYPERVISOR_get_debugreg(reg); return HYPERVISOR_get_debugreg(reg);
} }
static void xen_start_context_switch(struct task_struct *prev)
{
BUG_ON(preemptible());
if (this_cpu_read(xen_lazy_mode) == XEN_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
}
enter_lazy(XEN_LAZY_CPU);
}
static void xen_end_context_switch(struct task_struct *next) static void xen_end_context_switch(struct task_struct *next)
{ {
BUG_ON(preemptible());
xen_mc_flush(); xen_mc_flush();
paravirt_end_context_switch(next); leave_lazy(XEN_LAZY_CPU);
if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
arch_enter_lazy_mmu_mode();
} }
static unsigned long xen_store_tr(void) static unsigned long xen_store_tr(void)
@ -472,7 +498,7 @@ static void xen_set_ldt(const void *addr, unsigned entries)
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(XEN_LAZY_CPU);
} }
static void xen_load_gdt(const struct desc_ptr *dtr) static void xen_load_gdt(const struct desc_ptr *dtr)
@ -568,7 +594,7 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
* exception between the new %fs descriptor being loaded and * exception between the new %fs descriptor being loaded and
* %fs being effectively cleared at __switch_to(). * %fs being effectively cleared at __switch_to().
*/ */
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) if (xen_get_lazy_mode() == XEN_LAZY_CPU)
loadsegment(fs, 0); loadsegment(fs, 0);
xen_mc_batch(); xen_mc_batch();
@ -577,7 +603,7 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
load_TLS_descriptor(t, cpu, 1); load_TLS_descriptor(t, cpu, 1);
load_TLS_descriptor(t, cpu, 2); load_TLS_descriptor(t, cpu, 2);
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(XEN_LAZY_CPU);
} }
static void xen_load_gs_index(unsigned int idx) static void xen_load_gs_index(unsigned int idx)
@ -909,7 +935,7 @@ static void xen_load_sp0(unsigned long sp0)
mcs = xen_mc_entry(0); mcs = xen_mc_entry(0);
MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0); MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(XEN_LAZY_CPU);
this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
} }
@ -973,7 +999,7 @@ static void xen_write_cr0(unsigned long cr0)
MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0); MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(XEN_LAZY_CPU);
} }
static void xen_write_cr4(unsigned long cr4) static void xen_write_cr4(unsigned long cr4)
@ -1156,7 +1182,7 @@ static const typeof(pv_ops) xen_cpu_ops __initconst = {
#endif #endif
.io_delay = xen_io_delay, .io_delay = xen_io_delay,
.start_context_switch = paravirt_start_context_switch, .start_context_switch = xen_start_context_switch,
.end_context_switch = xen_end_context_switch, .end_context_switch = xen_end_context_switch,
}, },
}; };

View File

@ -236,7 +236,7 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
u.val = pmd_val_ma(val); u.val = pmd_val_ma(val);
xen_extend_mmu_update(&u); xen_extend_mmu_update(&u);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -270,7 +270,7 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
{ {
struct mmu_update u; struct mmu_update u;
if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) if (xen_get_lazy_mode() != XEN_LAZY_MMU)
return false; return false;
xen_mc_batch(); xen_mc_batch();
@ -279,7 +279,7 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
u.val = pte_val_ma(pteval); u.val = pte_val_ma(pteval);
xen_extend_mmu_update(&u); xen_extend_mmu_update(&u);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
return true; return true;
} }
@ -325,7 +325,7 @@ void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
u.val = pte_val_ma(pte); u.val = pte_val_ma(pte);
xen_extend_mmu_update(&u); xen_extend_mmu_update(&u);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
} }
/* Assume pteval_t is equivalent to all the other *val_t types. */ /* Assume pteval_t is equivalent to all the other *val_t types. */
@ -419,7 +419,7 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
u.val = pud_val_ma(val); u.val = pud_val_ma(val);
xen_extend_mmu_update(&u); xen_extend_mmu_update(&u);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -499,7 +499,7 @@ static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
__xen_set_p4d_hyper(ptr, val); __xen_set_p4d_hyper(ptr, val);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -531,7 +531,7 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val)
if (user_ptr) if (user_ptr)
__xen_set_p4d_hyper((p4d_t *)user_ptr, val); __xen_set_p4d_hyper((p4d_t *)user_ptr, val);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
} }
#if CONFIG_PGTABLE_LEVELS >= 5 #if CONFIG_PGTABLE_LEVELS >= 5
@ -1245,7 +1245,7 @@ static noinline void xen_flush_tlb(void)
op->cmd = MMUEXT_TLB_FLUSH_LOCAL; op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -1265,7 +1265,7 @@ static void xen_flush_tlb_one_user(unsigned long addr)
op->arg1.linear_addr = addr & PAGE_MASK; op->arg1.linear_addr = addr & PAGE_MASK;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -1302,7 +1302,7 @@ static void xen_flush_tlb_multi(const struct cpumask *cpus,
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
} }
static unsigned long xen_read_cr3(void) static unsigned long xen_read_cr3(void)
@ -1361,7 +1361,7 @@ static void xen_write_cr3(unsigned long cr3)
else else
__xen_write_cr3(false, 0); __xen_write_cr3(false, 0);
xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */
} }
/* /*
@ -1396,7 +1396,7 @@ static void __init xen_write_cr3_init(unsigned long cr3)
__xen_write_cr3(true, cr3); __xen_write_cr3(true, cr3);
xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */
} }
static int xen_pgd_alloc(struct mm_struct *mm) static int xen_pgd_alloc(struct mm_struct *mm)
@ -1557,7 +1557,7 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned) if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
} }
} }
@ -1587,7 +1587,7 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
__set_pfn_prot(pfn, PAGE_KERNEL); __set_pfn_prot(pfn, PAGE_KERNEL);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
ClearPagePinned(page); ClearPagePinned(page);
} }
@ -1804,7 +1804,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
*/ */
xen_mc_batch(); xen_mc_batch();
__xen_write_cr3(true, __pa(init_top_pgt)); __xen_write_cr3(true, __pa(init_top_pgt));
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(XEN_LAZY_CPU);
/* We can't that easily rip out L3 and L2, as the Xen pagetables are /* We can't that easily rip out L3 and L2, as the Xen pagetables are
* set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
@ -2083,6 +2083,23 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
#endif #endif
} }
static void xen_enter_lazy_mmu(void)
{
enter_lazy(XEN_LAZY_MMU);
}
static void xen_flush_lazy_mmu(void)
{
preempt_disable();
if (xen_get_lazy_mode() == XEN_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
preempt_enable();
}
static void __init xen_post_allocator_init(void) static void __init xen_post_allocator_init(void)
{ {
pv_ops.mmu.set_pte = xen_set_pte; pv_ops.mmu.set_pte = xen_set_pte;
@ -2107,7 +2124,7 @@ static void xen_leave_lazy_mmu(void)
{ {
preempt_disable(); preempt_disable();
xen_mc_flush(); xen_mc_flush();
paravirt_leave_lazy_mmu(); leave_lazy(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }
@ -2166,9 +2183,9 @@ static const typeof(pv_ops) xen_mmu_ops __initconst = {
.exit_mmap = xen_exit_mmap, .exit_mmap = xen_exit_mmap,
.lazy_mode = { .lazy_mode = {
.enter = paravirt_enter_lazy_mmu, .enter = xen_enter_lazy_mmu,
.leave = xen_leave_lazy_mmu, .leave = xen_leave_lazy_mmu,
.flush = paravirt_flush_lazy_mmu, .flush = xen_flush_lazy_mmu,
}, },
.set_fixmap = xen_set_fixmap, .set_fixmap = xen_set_fixmap,
@ -2385,7 +2402,7 @@ static noinline void xen_flush_tlb_all(void)
op->cmd = MMUEXT_TLB_FLUSH_ALL; op->cmd = MMUEXT_TLB_FLUSH_ALL;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
preempt_enable(); preempt_enable();
} }

View File

@ -26,7 +26,7 @@ static inline void xen_mc_batch(void)
/* need to disable interrupts until this entry is complete */ /* need to disable interrupts until this entry is complete */
local_irq_save(flags); local_irq_save(flags);
trace_xen_mc_batch(paravirt_get_lazy_mode()); trace_xen_mc_batch(xen_get_lazy_mode());
__this_cpu_write(xen_mc_irq_flags, flags); __this_cpu_write(xen_mc_irq_flags, flags);
} }
@ -44,7 +44,7 @@ static inline void xen_mc_issue(unsigned mode)
{ {
trace_xen_mc_issue(mode); trace_xen_mc_issue(mode);
if ((paravirt_get_lazy_mode() & mode) == 0) if ((xen_get_lazy_mode() & mode) == 0)
xen_mc_flush(); xen_mc_flush();
/* restore flags saved in xen_mc_batch */ /* restore flags saved in xen_mc_batch */

View File

@ -4405,11 +4405,8 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
struct blk_mq_tags **new_tags; struct blk_mq_tags **new_tags;
int i; int i;
if (set->nr_hw_queues >= new_nr_hw_queues) { if (set->nr_hw_queues >= new_nr_hw_queues)
for (i = new_nr_hw_queues; i < set->nr_hw_queues; i++)
__blk_mq_free_map_and_rqs(set, i);
goto done; goto done;
}
new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node); GFP_KERNEL, set->numa_node);
@ -4719,7 +4716,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
{ {
struct request_queue *q; struct request_queue *q;
LIST_HEAD(head); LIST_HEAD(head);
int prev_nr_hw_queues; int prev_nr_hw_queues = set->nr_hw_queues;
int i;
lockdep_assert_held(&set->tag_list_lock); lockdep_assert_held(&set->tag_list_lock);
@ -4746,7 +4744,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
blk_mq_sysfs_unregister_hctxs(q); blk_mq_sysfs_unregister_hctxs(q);
} }
prev_nr_hw_queues = set->nr_hw_queues;
if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0) if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
goto reregister; goto reregister;
@ -4781,6 +4778,10 @@ switch_back:
list_for_each_entry(q, &set->tag_list, tag_set_list) list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q);
/* Free the excess tags when nr_hw_queues shrink. */
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
__blk_mq_free_map_and_rqs(set, i);
} }
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)

View File

@ -492,7 +492,7 @@ static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
} }
static int thermal_get_trend(struct thermal_zone_device *thermal, static int thermal_get_trend(struct thermal_zone_device *thermal,
struct thermal_trip *trip, const struct thermal_trip *trip,
enum thermal_trend *trend) enum thermal_trend *trend)
{ {
struct acpi_thermal *tz = thermal_zone_device_priv(thermal); struct acpi_thermal *tz = thermal_zone_device_priv(thermal);

View File

@ -1883,6 +1883,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else else
dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n"); dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
if (!(hpriv->cap & HOST_CAP_PART))
host->flags |= ATA_HOST_NO_PART;
if (!(hpriv->cap & HOST_CAP_SSC))
host->flags |= ATA_HOST_NO_SSC;
if (!(hpriv->cap2 & HOST_CAP2_SDS))
host->flags |= ATA_HOST_NO_DEVSLP;
if (pi.flags & ATA_FLAG_EM) if (pi.flags & ATA_FLAG_EM)
ahci_reset_em(host); ahci_reset_em(host);

View File

@ -1256,6 +1256,26 @@ static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
return sprintf(buf, "%d\n", emp->blink_policy); return sprintf(buf, "%d\n", emp->blink_policy);
} }
static void ahci_port_clear_pending_irq(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
/* clear SError */
tmp = readl(port_mmio + PORT_SCR_ERR);
dev_dbg(ap->host->dev, "PORT_SCR_ERR 0x%x\n", tmp);
writel(tmp, port_mmio + PORT_SCR_ERR);
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
dev_dbg(ap->host->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << ap->port_no, hpriv->mmio + HOST_IRQ_STAT);
}
static void ahci_port_init(struct device *dev, struct ata_port *ap, static void ahci_port_init(struct device *dev, struct ata_port *ap,
int port_no, void __iomem *mmio, int port_no, void __iomem *mmio,
void __iomem *port_mmio) void __iomem *port_mmio)
@ -1270,18 +1290,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
if (rc) if (rc)
dev_warn(dev, "%s (%d)\n", emsg, rc); dev_warn(dev, "%s (%d)\n", emsg, rc);
/* clear SError */ ahci_port_clear_pending_irq(ap);
tmp = readl(port_mmio + PORT_SCR_ERR);
dev_dbg(dev, "PORT_SCR_ERR 0x%x\n", tmp);
writel(tmp, port_mmio + PORT_SCR_ERR);
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
dev_dbg(dev, "PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << port_no, mmio + HOST_IRQ_STAT);
/* mark esata ports */ /* mark esata ports */
tmp = readl(port_mmio + PORT_CMD); tmp = readl(port_mmio + PORT_CMD);
@ -1603,6 +1612,8 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
tf.status = ATA_BUSY; tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis); ata_tf_to_fis(&tf, 0, 0, d2h_fis);
ahci_port_clear_pending_irq(ap);
rc = sata_link_hardreset(link, timing, deadline, online, rc = sata_link_hardreset(link, timing, deadline, online,
ahci_check_ready); ahci_check_ready);

View File

@ -4783,11 +4783,8 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
* been aborted by the device due to a limit timeout using the policy * been aborted by the device due to a limit timeout using the policy
* 0xD. For these commands, invoke EH to get the command sense data. * 0xD. For these commands, invoke EH to get the command sense data.
*/ */
if (qc->result_tf.status & ATA_SENSE && if (qc->flags & ATA_QCFLAG_HAS_CDL &&
((ata_is_ncq(qc->tf.protocol) && qc->result_tf.status & ATA_SENSE) {
dev->flags & ATA_DFLAG_CDL_ENABLED) ||
(!ata_is_ncq(qc->tf.protocol) &&
ata_id_sense_reporting_enabled(dev->id)))) {
/* /*
* Tell SCSI EH to not overwrite scmd->result even if this * Tell SCSI EH to not overwrite scmd->result even if this
* command is finished with result SAM_STAT_GOOD. * command is finished with result SAM_STAT_GOOD.

View File

@ -2796,23 +2796,13 @@ int ata_eh_reset(struct ata_link *link, int classify,
} }
} }
/* /* clear cached SError */
* Some controllers can't be frozen very well and may set spurious
* error conditions during reset. Clear accumulated error
* information and re-thaw the port if frozen. As reset is the
* final recovery action and we cross check link onlineness against
* device classification later, no hotplug event is lost by this.
*/
spin_lock_irqsave(link->ap->lock, flags); spin_lock_irqsave(link->ap->lock, flags);
memset(&link->eh_info, 0, sizeof(link->eh_info)); link->eh_info.serror = 0;
if (slave) if (slave)
memset(&slave->eh_info, 0, sizeof(link->eh_info)); slave->eh_info.serror = 0;
ap->pflags &= ~ATA_PFLAG_EH_PENDING;
spin_unlock_irqrestore(link->ap->lock, flags); spin_unlock_irqrestore(link->ap->lock, flags);
if (ata_port_is_frozen(ap))
ata_eh_thaw_port(ap);
/* /*
* Make sure onlineness and classification result correspond. * Make sure onlineness and classification result correspond.
* Hotplug could have happened during reset and some * Hotplug could have happened during reset and some

View File

@ -396,10 +396,23 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
case ATA_LPM_MED_POWER_WITH_DIPM: case ATA_LPM_MED_POWER_WITH_DIPM:
case ATA_LPM_MIN_POWER_WITH_PARTIAL: case ATA_LPM_MIN_POWER_WITH_PARTIAL:
case ATA_LPM_MIN_POWER: case ATA_LPM_MIN_POWER:
if (ata_link_nr_enabled(link) > 0) if (ata_link_nr_enabled(link) > 0) {
/* no restrictions on LPM transitions */ /* assume no restrictions on LPM transitions */
scontrol &= ~(0x7 << 8); scontrol &= ~(0x7 << 8);
else {
/*
* If the controller does not support partial, slumber,
* or devsleep, then disallow these transitions.
*/
if (link->ap->host->flags & ATA_HOST_NO_PART)
scontrol |= (0x1 << 8);
if (link->ap->host->flags & ATA_HOST_NO_SSC)
scontrol |= (0x2 << 8);
if (link->ap->host->flags & ATA_HOST_NO_DEVSLP)
scontrol |= (0x4 << 8);
} else {
/* empty port, power off */ /* empty port, power off */
scontrol &= ~0xf; scontrol &= ~0xf;
scontrol |= (0x1 << 2); scontrol |= (0x1 << 2);

View File

@ -37,7 +37,7 @@ static int comm_read_regr(struct pi_adapter *pi, int cont, int regr)
{ {
int l, h, r; int l, h, r;
r = regr + cont_map[cont]; r = regr + cont_map[cont];
switch (pi->mode) { switch (pi->mode) {
case 0: case 0:
@ -90,7 +90,6 @@ static void comm_connect(struct pi_adapter *pi)
} }
static void comm_disconnect(struct pi_adapter *pi) static void comm_disconnect(struct pi_adapter *pi)
{ {
w2(0); w2(0); w2(0); w2(4); w2(0); w2(0); w2(0); w2(4);
w0(pi->saved_r0); w0(pi->saved_r0);
@ -172,12 +171,12 @@ static void comm_write_block(struct pi_adapter *pi, char *buf, int count)
w4l(swab16(((u16 *)buf)[2 * k]) | w4l(swab16(((u16 *)buf)[2 * k]) |
swab16(((u16 *)buf)[2 * k + 1]) << 16); swab16(((u16 *)buf)[2 * k + 1]) << 16);
break; break;
} }
} }
static void comm_log_adapter(struct pi_adapter *pi) static void comm_log_adapter(struct pi_adapter *pi)
{
{ char *mode_string[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" }; char *mode_string[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" };
dev_info(&pi->dev, dev_info(&pi->dev,
"DataStor Commuter at 0x%x, mode %d (%s), delay %d\n", "DataStor Commuter at 0x%x, mode %d (%s), delay %d\n",

View File

@ -1255,8 +1255,8 @@ static void mv_dump_mem(struct device *dev, void __iomem *start, unsigned bytes)
for (b = 0; b < bytes; ) { for (b = 0; b < bytes; ) {
for (w = 0, o = 0; b < bytes && w < 4; w++) { for (w = 0, o = 0; b < bytes && w < 4; w++) {
o += snprintf(linebuf + o, sizeof(linebuf) - o, o += scnprintf(linebuf + o, sizeof(linebuf) - o,
"%08x ", readl(start + b)); "%08x ", readl(start + b));
b += sizeof(u32); b += sizeof(u32);
} }
dev_dbg(dev, "%s: %p: %s\n", dev_dbg(dev, "%s: %p: %s\n",

View File

@ -3537,6 +3537,8 @@ int device_add(struct device *dev)
/* subsystems can specify simple device enumeration */ /* subsystems can specify simple device enumeration */
else if (dev->bus && dev->bus->dev_name) else if (dev->bus && dev->bus->dev_name)
error = dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id); error = dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
else
error = -EINVAL;
if (error) if (error)
goto name_error; goto name_error;

View File

@ -67,7 +67,6 @@ config COMEDI_TEST
config COMEDI_PARPORT config COMEDI_PARPORT
tristate "Parallel port support" tristate "Parallel port support"
depends on HAS_IOPORT
help help
Enable support for the standard parallel port. Enable support for the standard parallel port.
A cheap and easy way to get a few more digital I/O lines. Steal A cheap and easy way to get a few more digital I/O lines. Steal
@ -80,7 +79,6 @@ config COMEDI_PARPORT
config COMEDI_SSV_DNP config COMEDI_SSV_DNP
tristate "SSV Embedded Systems DIL/Net-PC support" tristate "SSV Embedded Systems DIL/Net-PC support"
depends on X86_32 || COMPILE_TEST depends on X86_32 || COMPILE_TEST
depends on HAS_IOPORT
help help
Enable support for SSV Embedded Systems DIL/Net-PC Enable support for SSV Embedded Systems DIL/Net-PC
@ -91,7 +89,6 @@ endif # COMEDI_MISC_DRIVERS
menuconfig COMEDI_ISA_DRIVERS menuconfig COMEDI_ISA_DRIVERS
bool "Comedi ISA and PC/104 drivers" bool "Comedi ISA and PC/104 drivers"
depends on ISA
help help
Enable comedi ISA and PC/104 drivers to be built Enable comedi ISA and PC/104 drivers to be built
@ -103,8 +100,7 @@ if COMEDI_ISA_DRIVERS
config COMEDI_PCL711 config COMEDI_PCL711
tristate "Advantech PCL-711/711b and ADlink ACL-8112 ISA card support" tristate "Advantech PCL-711/711b and ADlink ACL-8112 ISA card support"
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
help help
Enable support for Advantech PCL-711 and 711b, ADlink ACL-8112 Enable support for Advantech PCL-711 and 711b, ADlink ACL-8112
@ -165,9 +161,8 @@ config COMEDI_PCL730
config COMEDI_PCL812 config COMEDI_PCL812
tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216" tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API select COMEDI_ISADMA if ISA_DMA_API
depends on COMEDI_8254 select COMEDI_8254
help help
Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
ACL-8112DG/HG/PG, ACL-8113, ACL-8216, ICP DAS A-821PGH/PGL/PGL-NDA, ACL-8112DG/HG/PG, ACL-8113, ACL-8216, ICP DAS A-821PGH/PGL/PGL-NDA,
@ -178,9 +173,8 @@ config COMEDI_PCL812
config COMEDI_PCL816 config COMEDI_PCL816
tristate "Advantech PCL-814 and PCL-816 ISA card support" tristate "Advantech PCL-814 and PCL-816 ISA card support"
depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API select COMEDI_ISADMA if ISA_DMA_API
depends on COMEDI_8254 select COMEDI_8254
help help
Enable support for Advantech PCL-814 and PCL-816 ISA cards Enable support for Advantech PCL-814 and PCL-816 ISA cards
@ -189,9 +183,8 @@ config COMEDI_PCL816
config COMEDI_PCL818 config COMEDI_PCL818
tristate "Advantech PCL-718 and PCL-818 ISA card support" tristate "Advantech PCL-718 and PCL-818 ISA card support"
depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API select COMEDI_ISADMA if ISA_DMA_API
depends on COMEDI_8254 select COMEDI_8254
help help
Enable support for Advantech PCL-818 ISA cards Enable support for Advantech PCL-818 ISA cards
PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818 and PCL-718 PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818 and PCL-718
@ -210,7 +203,7 @@ config COMEDI_PCM3724
config COMEDI_AMPLC_DIO200_ISA config COMEDI_AMPLC_DIO200_ISA
tristate "Amplicon PC212E/PC214E/PC215E/PC218E/PC272E" tristate "Amplicon PC212E/PC214E/PC215E/PC218E/PC272E"
depends on COMEDI_AMPLC_DIO200 select COMEDI_AMPLC_DIO200
help help
Enable support for Amplicon PC212E, PC214E, PC215E, PC218E and Enable support for Amplicon PC212E, PC214E, PC215E, PC218E and
PC272E ISA DIO boards PC272E ISA DIO boards
@ -262,8 +255,7 @@ config COMEDI_DAC02
config COMEDI_DAS16M1 config COMEDI_DAS16M1
tristate "MeasurementComputing CIO-DAS16/M1DAS-16 ISA card support" tristate "MeasurementComputing CIO-DAS16/M1DAS-16 ISA card support"
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
select COMEDI_8255 select COMEDI_8255
help help
Enable support for Measurement Computing CIO-DAS16/M1 ISA cards. Enable support for Measurement Computing CIO-DAS16/M1 ISA cards.
@ -273,7 +265,7 @@ config COMEDI_DAS16M1
config COMEDI_DAS08_ISA config COMEDI_DAS08_ISA
tristate "DAS-08 compatible ISA and PC/104 card support" tristate "DAS-08 compatible ISA and PC/104 card support"
depends on COMEDI_DAS08 select COMEDI_DAS08
help help
Enable support for Keithley Metrabyte/ComputerBoards DAS08 Enable support for Keithley Metrabyte/ComputerBoards DAS08
and compatible ISA and PC/104 cards: and compatible ISA and PC/104 cards:
@ -286,9 +278,8 @@ config COMEDI_DAS08_ISA
config COMEDI_DAS16 config COMEDI_DAS16
tristate "DAS-16 compatible ISA and PC/104 card support" tristate "DAS-16 compatible ISA and PC/104 card support"
depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API select COMEDI_ISADMA if ISA_DMA_API
depends on COMEDI_8254 select COMEDI_8254
select COMEDI_8255 select COMEDI_8255
help help
Enable support for Keithley Metrabyte/ComputerBoards DAS16 Enable support for Keithley Metrabyte/ComputerBoards DAS16
@ -305,8 +296,7 @@ config COMEDI_DAS16
config COMEDI_DAS800 config COMEDI_DAS800
tristate "DAS800 and compatible ISA card support" tristate "DAS800 and compatible ISA card support"
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
help help
Enable support for Keithley Metrabyte DAS800 and compatible ISA cards Enable support for Keithley Metrabyte DAS800 and compatible ISA cards
Keithley Metrabyte DAS-800, DAS-801, DAS-802 Keithley Metrabyte DAS-800, DAS-801, DAS-802
@ -318,9 +308,8 @@ config COMEDI_DAS800
config COMEDI_DAS1800 config COMEDI_DAS1800
tristate "DAS1800 and compatible ISA card support" tristate "DAS1800 and compatible ISA card support"
depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API select COMEDI_ISADMA if ISA_DMA_API
depends on COMEDI_8254 select COMEDI_8254
help help
Enable support for DAS1800 and compatible ISA cards Enable support for DAS1800 and compatible ISA cards
Keithley Metrabyte DAS-1701ST, DAS-1701ST-DA, DAS-1701/AO, Keithley Metrabyte DAS-1701ST, DAS-1701ST-DA, DAS-1701/AO,
@ -334,8 +323,7 @@ config COMEDI_DAS1800
config COMEDI_DAS6402 config COMEDI_DAS6402
tristate "DAS6402 and compatible ISA card support" tristate "DAS6402 and compatible ISA card support"
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
help help
Enable support for DAS6402 and compatible ISA cards Enable support for DAS6402 and compatible ISA cards
Computerboards, Keithley Metrabyte DAS6402 and compatibles Computerboards, Keithley Metrabyte DAS6402 and compatibles
@ -414,8 +402,7 @@ config COMEDI_FL512
config COMEDI_AIO_AIO12_8 config COMEDI_AIO_AIO12_8
tristate "I/O Products PC/104 AIO12-8 Analog I/O Board support" tristate "I/O Products PC/104 AIO12-8 Analog I/O Board support"
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
select COMEDI_8255 select COMEDI_8255
help help
Enable support for I/O Products PC/104 AIO12-8 Analog I/O Board Enable support for I/O Products PC/104 AIO12-8 Analog I/O Board
@ -469,9 +456,8 @@ config COMEDI_ADQ12B
config COMEDI_NI_AT_A2150 config COMEDI_NI_AT_A2150
tristate "NI AT-A2150 ISA card support" tristate "NI AT-A2150 ISA card support"
depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API select COMEDI_ISADMA if ISA_DMA_API
depends on COMEDI_8254 select COMEDI_8254
help help
Enable support for National Instruments AT-A2150 cards Enable support for National Instruments AT-A2150 cards
@ -480,8 +466,7 @@ config COMEDI_NI_AT_A2150
config COMEDI_NI_AT_AO config COMEDI_NI_AT_AO
tristate "NI AT-AO-6/10 EISA card support" tristate "NI AT-AO-6/10 EISA card support"
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
help help
Enable support for National Instruments AT-AO-6/10 cards Enable support for National Instruments AT-AO-6/10 cards
@ -512,7 +497,7 @@ config COMEDI_NI_ATMIO16D
config COMEDI_NI_LABPC_ISA config COMEDI_NI_LABPC_ISA
tristate "NI Lab-PC and compatibles ISA support" tristate "NI Lab-PC and compatibles ISA support"
depends on COMEDI_NI_LABPC select COMEDI_NI_LABPC
help help
Enable support for National Instruments Lab-PC and compatibles Enable support for National Instruments Lab-PC and compatibles
Lab-PC-1200, Lab-PC-1200AI, Lab-PC+. Lab-PC-1200, Lab-PC-1200AI, Lab-PC+.
@ -576,7 +561,7 @@ endif # COMEDI_ISA_DRIVERS
menuconfig COMEDI_PCI_DRIVERS menuconfig COMEDI_PCI_DRIVERS
tristate "Comedi PCI drivers" tristate "Comedi PCI drivers"
depends on PCI && HAS_IOPORT depends on PCI
help help
Enable support for comedi PCI drivers. Enable support for comedi PCI drivers.
@ -725,8 +710,7 @@ config COMEDI_ADL_PCI8164
config COMEDI_ADL_PCI9111 config COMEDI_ADL_PCI9111
tristate "ADLink PCI-9111HR support" tristate "ADLink PCI-9111HR support"
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
help help
Enable support for ADlink PCI9111 cards Enable support for ADlink PCI9111 cards
@ -736,7 +720,7 @@ config COMEDI_ADL_PCI9111
config COMEDI_ADL_PCI9118 config COMEDI_ADL_PCI9118
tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support" tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
depends on HAS_DMA depends on HAS_DMA
depends on COMEDI_8254 select COMEDI_8254
help help
Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
@ -745,8 +729,7 @@ config COMEDI_ADL_PCI9118
config COMEDI_ADV_PCI1710 config COMEDI_ADV_PCI1710
tristate "Advantech PCI-171x and PCI-1731 support" tristate "Advantech PCI-171x and PCI-1731 support"
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
help help
Enable support for Advantech PCI-1710, PCI-1710HG, PCI-1711, Enable support for Advantech PCI-1710, PCI-1710HG, PCI-1711,
PCI-1713 and PCI-1731 PCI-1713 and PCI-1731
@ -790,8 +773,7 @@ config COMEDI_ADV_PCI1760
config COMEDI_ADV_PCI_DIO config COMEDI_ADV_PCI_DIO
tristate "Advantech PCI DIO card support" tristate "Advantech PCI DIO card support"
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
select COMEDI_8255 select COMEDI_8255
help help
Enable support for Advantech PCI DIO cards Enable support for Advantech PCI DIO cards
@ -804,7 +786,7 @@ config COMEDI_ADV_PCI_DIO
config COMEDI_AMPLC_DIO200_PCI config COMEDI_AMPLC_DIO200_PCI
tristate "Amplicon PCI215/PCI272/PCIe215/PCIe236/PCIe296 DIO support" tristate "Amplicon PCI215/PCI272/PCIe215/PCIe236/PCIe296 DIO support"
depends on COMEDI_AMPLC_DIO200 select COMEDI_AMPLC_DIO200
help help
Enable support for Amplicon PCI215, PCI272, PCIe215, PCIe236 Enable support for Amplicon PCI215, PCI272, PCIe215, PCIe236
and PCIe296 DIO boards. and PCIe296 DIO boards.
@ -832,8 +814,7 @@ config COMEDI_AMPLC_PC263_PCI
config COMEDI_AMPLC_PCI224 config COMEDI_AMPLC_PCI224
tristate "Amplicon PCI224 and PCI234 support" tristate "Amplicon PCI224 and PCI234 support"
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
help help
Enable support for Amplicon PCI224 and PCI234 AO boards Enable support for Amplicon PCI224 and PCI234 AO boards
@ -842,8 +823,7 @@ config COMEDI_AMPLC_PCI224
config COMEDI_AMPLC_PCI230 config COMEDI_AMPLC_PCI230
tristate "Amplicon PCI230 and PCI260 support" tristate "Amplicon PCI230 and PCI260 support"
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
select COMEDI_8255 select COMEDI_8255
help help
Enable support for Amplicon PCI230 and PCI260 Multifunction I/O Enable support for Amplicon PCI230 and PCI260 Multifunction I/O
@ -862,7 +842,7 @@ config COMEDI_CONTEC_PCI_DIO
config COMEDI_DAS08_PCI config COMEDI_DAS08_PCI
tristate "DAS-08 PCI support" tristate "DAS-08 PCI support"
depends on COMEDI_DAS08 select COMEDI_DAS08
help help
Enable support for PCI DAS-08 cards. Enable support for PCI DAS-08 cards.
@ -949,8 +929,7 @@ config COMEDI_CB_PCIDAS64
config COMEDI_CB_PCIDAS config COMEDI_CB_PCIDAS
tristate "MeasurementComputing PCI-DAS support" tristate "MeasurementComputing PCI-DAS support"
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
select COMEDI_8255 select COMEDI_8255
help help
Enable support for ComputerBoards/MeasurementComputing PCI-DAS with Enable support for ComputerBoards/MeasurementComputing PCI-DAS with
@ -974,8 +953,7 @@ config COMEDI_CB_PCIDDA
config COMEDI_CB_PCIMDAS config COMEDI_CB_PCIMDAS
tristate "MeasurementComputing PCIM-DAS1602/16, PCIe-DAS1602/16 support" tristate "MeasurementComputing PCIM-DAS1602/16, PCIe-DAS1602/16 support"
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
select COMEDI_8255 select COMEDI_8255
help help
Enable support for ComputerBoards/MeasurementComputing PCI Migration Enable support for ComputerBoards/MeasurementComputing PCI Migration
@ -995,8 +973,7 @@ config COMEDI_CB_PCIMDDA
config COMEDI_ME4000 config COMEDI_ME4000
tristate "Meilhaus ME-4000 support" tristate "Meilhaus ME-4000 support"
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
help help
Enable support for Meilhaus PCI data acquisition cards Enable support for Meilhaus PCI data acquisition cards
ME-4650, ME-4670i, ME-4680, ME-4680i and ME-4680is ME-4650, ME-4670i, ME-4680, ME-4680i and ME-4680is
@ -1054,7 +1031,7 @@ config COMEDI_NI_670X
config COMEDI_NI_LABPC_PCI config COMEDI_NI_LABPC_PCI
tristate "NI Lab-PC PCI-1200 support" tristate "NI Lab-PC PCI-1200 support"
depends on COMEDI_NI_LABPC select COMEDI_NI_LABPC
help help
Enable support for National Instruments Lab-PC PCI-1200. Enable support for National Instruments Lab-PC PCI-1200.
@ -1076,7 +1053,6 @@ config COMEDI_NI_PCIDIO
config COMEDI_NI_PCIMIO config COMEDI_NI_PCIMIO
tristate "NI PCI-MIO-E series and M series support" tristate "NI PCI-MIO-E series and M series support"
depends on HAS_DMA depends on HAS_DMA
depends on HAS_IOPORT
select COMEDI_NI_TIOCMD select COMEDI_NI_TIOCMD
select COMEDI_8255 select COMEDI_8255
help help
@ -1098,8 +1074,7 @@ config COMEDI_NI_PCIMIO
config COMEDI_RTD520 config COMEDI_RTD520
tristate "Real Time Devices PCI4520/DM7520 support" tristate "Real Time Devices PCI4520/DM7520 support"
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
help help
Enable support for Real Time Devices PCI4520/DM7520 Enable support for Real Time Devices PCI4520/DM7520
@ -1139,8 +1114,7 @@ if COMEDI_PCMCIA_DRIVERS
config COMEDI_CB_DAS16_CS config COMEDI_CB_DAS16_CS
tristate "CB DAS16 series PCMCIA support" tristate "CB DAS16 series PCMCIA support"
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
help help
Enable support for the ComputerBoards/MeasurementComputing PCMCIA Enable support for the ComputerBoards/MeasurementComputing PCMCIA
cards DAS16/16, PCM-DAS16D/12 and PCM-DAS16s/16 cards DAS16/16, PCM-DAS16D/12 and PCM-DAS16s/16
@ -1150,7 +1124,7 @@ config COMEDI_CB_DAS16_CS
config COMEDI_DAS08_CS config COMEDI_DAS08_CS
tristate "CB DAS08 PCMCIA support" tristate "CB DAS08 PCMCIA support"
depends on COMEDI_DAS08 select COMEDI_DAS08
help help
Enable support for the ComputerBoards/MeasurementComputing DAS-08 Enable support for the ComputerBoards/MeasurementComputing DAS-08
PCMCIA card PCMCIA card
@ -1160,7 +1134,6 @@ config COMEDI_DAS08_CS
config COMEDI_NI_DAQ_700_CS config COMEDI_NI_DAQ_700_CS
tristate "NI DAQCard-700 PCMCIA support" tristate "NI DAQCard-700 PCMCIA support"
depends on HAS_IOPORT
help help
Enable support for the National Instruments PCMCIA DAQCard-700 DIO Enable support for the National Instruments PCMCIA DAQCard-700 DIO
@ -1169,7 +1142,6 @@ config COMEDI_NI_DAQ_700_CS
config COMEDI_NI_DAQ_DIO24_CS config COMEDI_NI_DAQ_DIO24_CS
tristate "NI DAQ-Card DIO-24 PCMCIA support" tristate "NI DAQ-Card DIO-24 PCMCIA support"
depends on HAS_IOPORT
select COMEDI_8255 select COMEDI_8255
help help
Enable support for the National Instruments PCMCIA DAQ-Card DIO-24 Enable support for the National Instruments PCMCIA DAQ-Card DIO-24
@ -1179,7 +1151,7 @@ config COMEDI_NI_DAQ_DIO24_CS
config COMEDI_NI_LABPC_CS config COMEDI_NI_LABPC_CS
tristate "NI DAQCard-1200 PCMCIA support" tristate "NI DAQCard-1200 PCMCIA support"
depends on COMEDI_NI_LABPC select COMEDI_NI_LABPC
help help
Enable support for the National Instruments PCMCIA DAQCard-1200 Enable support for the National Instruments PCMCIA DAQCard-1200
@ -1188,7 +1160,6 @@ config COMEDI_NI_LABPC_CS
config COMEDI_NI_MIO_CS config COMEDI_NI_MIO_CS
tristate "NI DAQCard E series PCMCIA support" tristate "NI DAQCard E series PCMCIA support"
depends on HAS_IOPORT
select COMEDI_NI_TIO select COMEDI_NI_TIO
select COMEDI_8255 select COMEDI_8255
help help
@ -1201,7 +1172,6 @@ config COMEDI_NI_MIO_CS
config COMEDI_QUATECH_DAQP_CS config COMEDI_QUATECH_DAQP_CS
tristate "Quatech DAQP PCMCIA data capture card support" tristate "Quatech DAQP PCMCIA data capture card support"
depends on HAS_IOPORT
help help
Enable support for the Quatech DAQP PCMCIA data capture cards Enable support for the Quatech DAQP PCMCIA data capture cards
DAQP-208 and DAQP-308 DAQP-208 and DAQP-308
@ -1278,14 +1248,12 @@ endif # COMEDI_USB_DRIVERS
config COMEDI_8254 config COMEDI_8254
tristate tristate
depends on HAS_IOPORT
config COMEDI_8255 config COMEDI_8255
tristate tristate
config COMEDI_8255_SA config COMEDI_8255_SA
tristate "Standalone 8255 support" tristate "Standalone 8255 support"
depends on HAS_IOPORT
select COMEDI_8255 select COMEDI_8255
help help
Enable support for 8255 digital I/O as a standalone driver. Enable support for 8255 digital I/O as a standalone driver.
@ -1317,7 +1285,7 @@ config COMEDI_KCOMEDILIB
called kcomedilib. called kcomedilib.
config COMEDI_AMPLC_DIO200 config COMEDI_AMPLC_DIO200
depends on COMEDI_8254 select COMEDI_8254
tristate tristate
config COMEDI_AMPLC_PC236 config COMEDI_AMPLC_PC236
@ -1326,7 +1294,7 @@ config COMEDI_AMPLC_PC236
config COMEDI_DAS08 config COMEDI_DAS08
tristate tristate
depends on COMEDI_8254 select COMEDI_8254
select COMEDI_8255 select COMEDI_8255
config COMEDI_ISADMA config COMEDI_ISADMA
@ -1334,8 +1302,7 @@ config COMEDI_ISADMA
config COMEDI_NI_LABPC config COMEDI_NI_LABPC
tristate tristate
depends on HAS_IOPORT select COMEDI_8254
depends on COMEDI_8254
select COMEDI_8255 select COMEDI_8255
config COMEDI_NI_LABPC_ISADMA config COMEDI_NI_LABPC_ISADMA

View File

@ -1211,7 +1211,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* without actually having a link. * without actually having a link.
*/ */
create: create:
device = kzalloc(sizeof(*device), GFP_KERNEL); device = kzalloc(sizeof(*device), GFP_ATOMIC);
if (device == NULL) if (device == NULL)
break; break;

View File

@ -101,7 +101,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
{ {
struct fw_node *node; struct fw_node *node;
node = kzalloc(struct_size(node, ports, port_count), GFP_KERNEL); node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC);
if (node == NULL) if (node == NULL)
return NULL; return NULL;

View File

@ -1863,15 +1863,15 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp)
return PTR_ERR(adsp2_alg); return PTR_ERR(adsp2_alg);
for (i = 0; i < n_algs; i++) { for (i = 0; i < n_algs; i++) {
cs_dsp_info(dsp, cs_dsp_dbg(dsp,
"%d: ID %x v%d.%d.%d XM@%x YM@%x ZM@%x\n", "%d: ID %x v%d.%d.%d XM@%x YM@%x ZM@%x\n",
i, be32_to_cpu(adsp2_alg[i].alg.id), i, be32_to_cpu(adsp2_alg[i].alg.id),
(be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff0000) >> 16, (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff0000) >> 16,
(be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff00) >> 8, (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff00) >> 8,
be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff, be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff,
be32_to_cpu(adsp2_alg[i].xm), be32_to_cpu(adsp2_alg[i].xm),
be32_to_cpu(adsp2_alg[i].ym), be32_to_cpu(adsp2_alg[i].ym),
be32_to_cpu(adsp2_alg[i].zm)); be32_to_cpu(adsp2_alg[i].zm));
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM, alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
adsp2_alg[i].alg.id, adsp2_alg[i].alg.id,
@ -1996,14 +1996,14 @@ static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp)
return PTR_ERR(halo_alg); return PTR_ERR(halo_alg);
for (i = 0; i < n_algs; i++) { for (i = 0; i < n_algs; i++) {
cs_dsp_info(dsp, cs_dsp_dbg(dsp,
"%d: ID %x v%d.%d.%d XM@%x YM@%x\n", "%d: ID %x v%d.%d.%d XM@%x YM@%x\n",
i, be32_to_cpu(halo_alg[i].alg.id), i, be32_to_cpu(halo_alg[i].alg.id),
(be32_to_cpu(halo_alg[i].alg.ver) & 0xff0000) >> 16, (be32_to_cpu(halo_alg[i].alg.ver) & 0xff0000) >> 16,
(be32_to_cpu(halo_alg[i].alg.ver) & 0xff00) >> 8, (be32_to_cpu(halo_alg[i].alg.ver) & 0xff00) >> 8,
be32_to_cpu(halo_alg[i].alg.ver) & 0xff, be32_to_cpu(halo_alg[i].alg.ver) & 0xff,
be32_to_cpu(halo_alg[i].xm_base), be32_to_cpu(halo_alg[i].xm_base),
be32_to_cpu(halo_alg[i].ym_base)); be32_to_cpu(halo_alg[i].ym_base));
ret = cs_dsp_halo_create_regions(dsp, halo_alg[i].alg.id, ret = cs_dsp_halo_create_regions(dsp, halo_alg[i].alg.id,
halo_alg[i].alg.ver, halo_alg[i].alg.ver,

View File

@ -62,7 +62,7 @@ efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc,
bitmap_size = DIV_ROUND_UP(unaccepted_end - unaccepted_start, bitmap_size = DIV_ROUND_UP(unaccepted_end - unaccepted_start,
EFI_UNACCEPTED_UNIT_SIZE * BITS_PER_BYTE); EFI_UNACCEPTED_UNIT_SIZE * BITS_PER_BYTE);
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
sizeof(*unaccepted_table) + bitmap_size, sizeof(*unaccepted_table) + bitmap_size,
(void **)&unaccepted_table); (void **)&unaccepted_table);
if (status != EFI_SUCCESS) { if (status != EFI_SUCCESS) {

View File

@ -1293,7 +1293,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev); int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev);
bool amdgpu_sg_display_supported(struct amdgpu_device *adev);
bool amdgpu_device_pcie_dynamic_switching_supported(void); bool amdgpu_device_pcie_dynamic_switching_supported(void);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev); bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
bool amdgpu_device_aspm_support_quirk(void); bool amdgpu_device_aspm_support_quirk(void);

View File

@ -478,7 +478,7 @@ void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, struct kfd_cu_info *c
cu_info->cu_active_number = acu_info.number; cu_info->cu_active_number = acu_info.number;
cu_info->cu_ao_mask = acu_info.ao_cu_mask; cu_info->cu_ao_mask = acu_info.ao_cu_mask;
memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0], memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
sizeof(acu_info.bitmap)); sizeof(cu_info->cu_bitmap));
cu_info->num_shader_engines = adev->gfx.config.max_shader_engines; cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh; cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;

View File

@ -980,8 +980,7 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times, uint32_t wait_times,
uint32_t grace_period, uint32_t grace_period,
uint32_t *reg_offset, uint32_t *reg_offset,
uint32_t *reg_data, uint32_t *reg_data)
uint32_t inst)
{ {
*reg_data = wait_times; *reg_data = wait_times;

View File

@ -55,5 +55,4 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times, uint32_t wait_times,
uint32_t grace_period, uint32_t grace_period,
uint32_t *reg_offset, uint32_t *reg_offset,
uint32_t *reg_data, uint32_t *reg_data);
uint32_t inst);

View File

@ -1103,8 +1103,7 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times, uint32_t wait_times,
uint32_t grace_period, uint32_t grace_period,
uint32_t *reg_offset, uint32_t *reg_offset,
uint32_t *reg_data, uint32_t *reg_data)
uint32_t inst)
{ {
*reg_data = wait_times; *reg_data = wait_times;
@ -1120,8 +1119,7 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
SCH_WAVE, SCH_WAVE,
grace_period); grace_period);
*reg_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), *reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
mmCP_IQ_WAIT_TIME2);
} }
void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,

View File

@ -100,5 +100,4 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times, uint32_t wait_times,
uint32_t grace_period, uint32_t grace_period,
uint32_t *reg_offset, uint32_t *reg_offset,
uint32_t *reg_data, uint32_t *reg_data);
uint32_t inst);

View File

@ -1244,32 +1244,6 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
return true; return true;
} }
/*
* On APUs with >= 64GB white flickering has been observed w/ SG enabled.
* Disable S/G on such systems until we have a proper fix.
* https://gitlab.freedesktop.org/drm/amd/-/issues/2354
* https://gitlab.freedesktop.org/drm/amd/-/issues/2735
*/
bool amdgpu_sg_display_supported(struct amdgpu_device *adev)
{
switch (amdgpu_sg_display) {
case -1:
break;
case 0:
return false;
case 1:
return true;
default:
return false;
}
if ((totalram_pages() << (PAGE_SHIFT - 10)) +
(adev->gmc.real_vram_size / 1024) >= 64000000) {
DRM_WARN("Disabling S/G due to >=64GB RAM\n");
return false;
}
return true;
}
/* /*
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
* speed switching. Until we have confirmation from Intel that a specific host * speed switching. Until we have confirmation from Intel that a specific host

View File

@ -43,6 +43,7 @@
#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
#define AMDGPU_MAX_GC_INSTANCES 8 #define AMDGPU_MAX_GC_INSTANCES 8
#define KGD_MAX_QUEUES 128
#define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES #define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
@ -257,7 +258,7 @@ struct amdgpu_cu_info {
uint32_t number; uint32_t number;
uint32_t ao_cu_mask; uint32_t ao_cu_mask;
uint32_t ao_cu_bitmap[4][4]; uint32_t ao_cu_bitmap[4][4];
uint32_t bitmap[4][4]; uint32_t bitmap[AMDGPU_MAX_GC_INSTANCES][4][4];
}; };
struct amdgpu_gfx_ras { struct amdgpu_gfx_ras {

View File

@ -839,7 +839,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0], memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
sizeof(adev->gfx.cu_info.ao_cu_bitmap)); sizeof(adev->gfx.cu_info.ao_cu_bitmap));
memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
sizeof(adev->gfx.cu_info.bitmap)); sizeof(dev_info->cu_bitmap));
dev_info->vram_type = adev->gmc.vram_type; dev_info->vram_type = adev->gmc.vram_type;
dev_info->vram_bit_width = adev->gmc.vram_width; dev_info->vram_bit_width = adev->gmc.vram_width;
dev_info->vce_harvest_config = adev->vce.harvest_config; dev_info->vce_harvest_config = adev->vce.harvest_config;
@ -940,12 +940,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct atom_context *atom_context; struct atom_context *atom_context;
atom_context = adev->mode_info.atom_context; atom_context = adev->mode_info.atom_context;
memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name)); if (atom_context) {
memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn)); memcpy(vbios_info.name, atom_context->name,
vbios_info.version = atom_context->version; sizeof(atom_context->name));
memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str, memcpy(vbios_info.vbios_pn, atom_context->vbios_pn,
sizeof(atom_context->vbios_ver_str)); sizeof(atom_context->vbios_pn));
memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date)); vbios_info.version = atom_context->version;
memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
sizeof(atom_context->vbios_ver_str));
memcpy(vbios_info.date, atom_context->date,
sizeof(atom_context->date));
}
return copy_to_user(out, &vbios_info, return copy_to_user(out, &vbios_info,
min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0; min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0;

View File

@ -1052,7 +1052,8 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
info->ce_count = obj->err_data.ce_count; info->ce_count = obj->err_data.ce_count;
if (err_data.ce_count) { if (err_data.ce_count) {
if (adev->smuio.funcs && if (!adev->aid_mask &&
adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id && adev->smuio.funcs->get_socket_id &&
adev->smuio.funcs->get_die_id) { adev->smuio.funcs->get_die_id) {
dev_info(adev->dev, "socket: %d, die: %d " dev_info(adev->dev, "socket: %d, die: %d "
@ -1072,7 +1073,8 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
} }
} }
if (err_data.ue_count) { if (err_data.ue_count) {
if (adev->smuio.funcs && if (!adev->aid_mask &&
adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id && adev->smuio.funcs->get_socket_id &&
adev->smuio.funcs->get_die_id) { adev->smuio.funcs->get_die_id) {
dev_info(adev->dev, "socket: %d, die: %d " dev_info(adev->dev, "socket: %d, die: %d "

View File

@ -81,7 +81,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
unsigned int size) unsigned int size)
{ {
struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size, struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
GFP_KERNEL, true, 0); GFP_KERNEL, false, 0);
if (IS_ERR(sa)) { if (IS_ERR(sa)) {
*sa_bo = NULL; *sa_bo = NULL;

View File

@ -9449,7 +9449,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh( gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(
adev, disable_masks[i * 2 + j]); adev, disable_masks[i * 2 + j]);
bitmap = gfx_v10_0_get_cu_active_bitmap_per_sh(adev); bitmap = gfx_v10_0_get_cu_active_bitmap_per_sh(adev);
cu_info->bitmap[i][j] = bitmap; cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) { if (bitmap & mask) {

View File

@ -6368,7 +6368,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
* SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]} * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
* SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]} * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
*/ */
cu_info->bitmap[i % 4][j + (i / 4) * 2] = bitmap; cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) if (bitmap & mask)

View File

@ -3577,7 +3577,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
gfx_v6_0_set_user_cu_inactive_bitmap( gfx_v6_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]); adev, disable_masks[i * 2 + j]);
bitmap = gfx_v6_0_get_cu_enabled(adev); bitmap = gfx_v6_0_get_cu_enabled(adev);
cu_info->bitmap[i][j] = bitmap; cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) { if (bitmap & mask) {

View File

@ -5119,7 +5119,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
gfx_v7_0_set_user_cu_inactive_bitmap( gfx_v7_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]); adev, disable_masks[i * 2 + j]);
bitmap = gfx_v7_0_get_cu_active_bitmap(adev); bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
cu_info->bitmap[i][j] = bitmap; cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) { if (bitmap & mask) {

View File

@ -7121,7 +7121,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
gfx_v8_0_set_user_cu_inactive_bitmap( gfx_v8_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]); adev, disable_masks[i * 2 + j]);
bitmap = gfx_v8_0_get_cu_active_bitmap(adev); bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
cu_info->bitmap[i][j] = bitmap; cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (bitmap & mask) { if (bitmap & mask) {

View File

@ -1499,7 +1499,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0); amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (cu_info->bitmap[i][j] & mask) { if (cu_info->bitmap[0][i][j] & mask) {
if (counter == pg_always_on_cu_num) if (counter == pg_always_on_cu_num)
WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap); WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
if (counter < always_on_cu_num) if (counter < always_on_cu_num)
@ -7233,7 +7233,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
* SE6,SH0 --> bitmap[2][1] * SE6,SH0 --> bitmap[2][1]
* SE7,SH0 --> bitmap[3][1] * SE7,SH0 --> bitmap[3][1]
*/ */
cu_info->bitmap[i % 4][j + i / 4] = bitmap; cu_info->bitmap[0][i % 4][j + i / 4] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (bitmap & mask) { if (bitmap & mask) {

View File

@ -4259,7 +4259,7 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
} }
static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
u32 bitmap) u32 bitmap, int xcc_id)
{ {
u32 data; u32 data;
@ -4269,15 +4269,15 @@ static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
WREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG, data); WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
} }
static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev) static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id)
{ {
u32 data, mask; u32 data, mask;
data = RREG32_SOC15(GC, GET_INST(GC, 0), regCC_GC_SHADER_ARRAY_CONFIG); data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
data |= RREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG); data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
@ -4290,7 +4290,7 @@ static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev)
static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info) struct amdgpu_cu_info *cu_info)
{ {
int i, j, k, counter, active_cu_number = 0; int i, j, k, counter, xcc_id, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
unsigned disable_masks[4 * 4]; unsigned disable_masks[4 * 4];
@ -4309,46 +4309,38 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
adev->gfx.config.max_sh_per_se); adev->gfx.config.max_sh_per_se);
mutex_lock(&adev->grbm_idx_mutex); mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
mask = 1; for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
ao_bitmap = 0; mask = 1;
counter = 0; ao_bitmap = 0;
gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, 0); counter = 0;
gfx_v9_4_3_set_user_cu_inactive_bitmap( gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]); gfx_v9_4_3_set_user_cu_inactive_bitmap(
bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev); adev,
disable_masks[i * adev->gfx.config.max_sh_per_se + j],
xcc_id);
bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id);
/* cu_info->bitmap[xcc_id][i][j] = bitmap;
* The bitmap(and ao_cu_bitmap) in cu_info structure is
* 4x4 size array, and it's usually suitable for Vega
* ASICs which has 4*2 SE/SH layout.
* But for Arcturus, SE/SH layout is changed to 8*1.
* To mostly reduce the impact, we make it compatible
* with current bitmap array as below:
* SE4,SH0 --> bitmap[0][1]
* SE5,SH0 --> bitmap[1][1]
* SE6,SH0 --> bitmap[2][1]
* SE7,SH0 --> bitmap[3][1]
*/
cu_info->bitmap[i % 4][j + i / 4] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) { if (bitmap & mask) {
if (counter < adev->gfx.config.max_cu_per_sh) if (counter < adev->gfx.config.max_cu_per_sh)
ao_bitmap |= mask; ao_bitmap |= mask;
counter++; counter++;
}
mask <<= 1;
} }
mask <<= 1; active_cu_number += counter;
if (i < 2 && j < 2)
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
} }
active_cu_number += counter;
if (i < 2 && j < 2)
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
} }
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
xcc_id);
} }
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
0);
mutex_unlock(&adev->grbm_idx_mutex); mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number; cu_info->number = active_cu_number;

View File

@ -345,6 +345,9 @@ static void nbio_v4_3_init_registers(struct amdgpu_device *adev)
data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK; data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data); WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
} }
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
} }
static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev) static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)

View File

@ -766,7 +766,7 @@ static int soc21_common_hw_init(void *handle)
* for the purpose of expose those registers * for the purpose of expose those registers
* to process space * to process space
*/ */
if (adev->nbio.funcs->remap_hdp_registers) if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
adev->nbio.funcs->remap_hdp_registers(adev); adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */ /* enable the doorbell aperture */
adev->nbio.funcs->enable_doorbell_aperture(adev, true); adev->nbio.funcs->enable_doorbell_aperture(adev, true);

View File

@ -2087,7 +2087,8 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info); amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
cu->num_simd_per_cu = cu_info.simd_per_cu; cu->num_simd_per_cu = cu_info.simd_per_cu;
cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number; cu->num_simd_cores = cu_info.simd_per_cu *
(cu_info.cu_active_number / kdev->kfd->num_nodes);
cu->max_waves_simd = cu_info.max_waves_per_simd; cu->max_waves_simd = cu_info.max_waves_per_simd;
cu->wave_front_size = cu_info.wave_front_size; cu->wave_front_size = cu_info.wave_front_size;

View File

@ -79,6 +79,10 @@ struct crat_header {
#define CRAT_SUBTYPE_IOLINK_AFFINITY 5 #define CRAT_SUBTYPE_IOLINK_AFFINITY 5
#define CRAT_SUBTYPE_MAX 6 #define CRAT_SUBTYPE_MAX 6
/*
* Do not change the value of CRAT_SIBLINGMAP_SIZE from 32
* as it breaks the ABI.
*/
#define CRAT_SIBLINGMAP_SIZE 32 #define CRAT_SIBLINGMAP_SIZE 32
/* /*

View File

@ -1677,8 +1677,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm->dev->kfd2kgd->build_grace_period_packet_info( dqm->dev->kfd2kgd->build_grace_period_packet_info(
dqm->dev->adev, dqm->wait_times, dqm->dev->adev, dqm->wait_times,
grace_period, &reg_offset, grace_period, &reg_offset,
&dqm->wait_times, &dqm->wait_times);
ffs(dqm->dev->xcc_mask) - 1);
} }
dqm_unlock(dqm); dqm_unlock(dqm);

View File

@ -162,6 +162,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
return NULL; return NULL;
*doorbell_off = amdgpu_doorbell_index_on_bar(kfd->adev, kfd->doorbells, inx); *doorbell_off = amdgpu_doorbell_index_on_bar(kfd->adev, kfd->doorbells, inx);
inx *= 2;
pr_debug("Get kernel queue doorbell\n" pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n" " doorbell offset == 0x%08X\n"
@ -176,6 +177,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
unsigned int inx; unsigned int inx;
inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr); inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
inx /= 2;
mutex_lock(&kfd->doorbell_mutex); mutex_lock(&kfd->doorbell_mutex);
__clear_bit(inx, kfd->doorbell_bitmap); __clear_bit(inx, kfd->doorbell_bitmap);

View File

@ -97,18 +97,22 @@ void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
const uint32_t *cu_mask, uint32_t cu_mask_count, const uint32_t *cu_mask, uint32_t cu_mask_count,
uint32_t *se_mask) uint32_t *se_mask, uint32_t inst)
{ {
struct kfd_cu_info cu_info; struct kfd_cu_info cu_info;
uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0}; uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0); bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0);
uint32_t en_mask = wgp_mode_req ? 0x3 : 0x1; uint32_t en_mask = wgp_mode_req ? 0x3 : 0x1;
int i, se, sh, cu, cu_bitmap_sh_mul, inc = wgp_mode_req ? 2 : 1; int i, se, sh, cu, cu_bitmap_sh_mul, cu_inc = wgp_mode_req ? 2 : 1;
uint32_t cu_active_per_node;
int inc = cu_inc * NUM_XCC(mm->dev->xcc_mask);
int xcc_inst = inst + ffs(mm->dev->xcc_mask) - 1;
amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info); amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info);
if (cu_mask_count > cu_info.cu_active_number) cu_active_per_node = cu_info.cu_active_number / mm->dev->kfd->num_nodes;
cu_mask_count = cu_info.cu_active_number; if (cu_mask_count > cu_active_per_node)
cu_mask_count = cu_active_per_node;
/* Exceeding these bounds corrupts the stack and indicates a coding error. /* Exceeding these bounds corrupts the stack and indicates a coding error.
* Returning with no CU's enabled will hang the queue, which should be * Returning with no CU's enabled will hang the queue, which should be
@ -141,7 +145,8 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
for (se = 0; se < cu_info.num_shader_engines; se++) for (se = 0; se < cu_info.num_shader_engines; se++)
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
cu_per_sh[se][sh] = hweight32( cu_per_sh[se][sh] = hweight32(
cu_info.cu_bitmap[se % 4][sh + (se / 4) * cu_bitmap_sh_mul]); cu_info.cu_bitmap[xcc_inst][se % 4][sh + (se / 4) *
cu_bitmap_sh_mul]);
/* Symmetrically map cu_mask to all SEs & SHs: /* Symmetrically map cu_mask to all SEs & SHs:
* se_mask programs up to 2 SH in the upper and lower 16 bits. * se_mask programs up to 2 SH in the upper and lower 16 bits.
@ -164,20 +169,33 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
* cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1) * cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
* ... * ...
* *
* For GFX 9.4.3, the following code only looks at a
* subset of the cu_mask corresponding to the inst parameter.
* If we have n XCCs under one GPU node
* cu_mask[0] bit0 -> XCC0 se_mask[0] bit0 (XCC0,SE0,SH0,CU0)
* cu_mask[0] bit1 -> XCC1 se_mask[0] bit0 (XCC1,SE0,SH0,CU0)
* ..
* cu_mask[0] bitn -> XCCn se_mask[0] bit0 (XCCn,SE0,SH0,CU0)
* cu_mask[0] bit n+1 -> XCC0 se_mask[1] bit0 (XCC0,SE1,SH0,CU0)
*
* For example, if there are 6 XCCs under 1 KFD node, this code
* running for each inst, will look at the bits as:
* inst, inst + 6, inst + 12...
*
* First ensure all CUs are disabled, then enable user specified CUs. * First ensure all CUs are disabled, then enable user specified CUs.
*/ */
for (i = 0; i < cu_info.num_shader_engines; i++) for (i = 0; i < cu_info.num_shader_engines; i++)
se_mask[i] = 0; se_mask[i] = 0;
i = 0; i = inst;
for (cu = 0; cu < 16; cu += inc) { for (cu = 0; cu < 16; cu += cu_inc) {
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) { for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
for (se = 0; se < cu_info.num_shader_engines; se++) { for (se = 0; se < cu_info.num_shader_engines; se++) {
if (cu_per_sh[se][sh] > cu) { if (cu_per_sh[se][sh] > cu) {
if (cu_mask[i / 32] & (en_mask << (i % 32))) if (cu_mask[i / 32] & (en_mask << (i % 32)))
se_mask[se] |= en_mask << (cu + sh * 16); se_mask[se] |= en_mask << (cu + sh * 16);
i += inc; i += inc;
if (i == cu_mask_count) if (i >= cu_mask_count)
return; return;
} }
} }

View File

@ -138,7 +138,7 @@ void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
const uint32_t *cu_mask, uint32_t cu_mask_count, const uint32_t *cu_mask, uint32_t cu_mask_count,
uint32_t *se_mask); uint32_t *se_mask, uint32_t inst);
int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id, uint32_t pipe_id, uint32_t queue_id,

View File

@ -52,7 +52,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
return; return;
mqd_symmetrically_map_cu_mask(mm, mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m = get_mqd(mqd); m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0]; m->compute_static_thread_mgmt_se0 = se_mask[0];

View File

@ -52,7 +52,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
return; return;
mqd_symmetrically_map_cu_mask(mm, mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m = get_mqd(mqd); m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0]; m->compute_static_thread_mgmt_se0 = se_mask[0];

View File

@ -71,7 +71,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
} }
mqd_symmetrically_map_cu_mask(mm, mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m->compute_static_thread_mgmt_se0 = se_mask[0]; m->compute_static_thread_mgmt_se0 = se_mask[0];
m->compute_static_thread_mgmt_se1 = se_mask[1]; m->compute_static_thread_mgmt_se1 = se_mask[1];
@ -321,6 +321,43 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
return 0; return 0;
} }
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
{
struct v11_compute_mqd *m;
m = get_mqd(mqd);
memcpy(mqd_dst, m, sizeof(struct v11_compute_mqd));
}
static void restore_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
const void *mqd_src,
const void *ctl_stack_src, const u32 ctl_stack_size)
{
uint64_t addr;
struct v11_compute_mqd *m;
m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
memcpy(m, mqd_src, sizeof(*m));
*mqd = m;
if (gart_addr)
*gart_addr = addr;
m->cp_hqd_pq_doorbell_control =
qp->doorbell_off <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
m->cp_hqd_pq_doorbell_control);
qp->is_active = 0;
}
static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q) struct queue_properties *q)
@ -458,6 +495,8 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->mqd_size = sizeof(struct v11_compute_mqd); mqd->mqd_size = sizeof(struct v11_compute_mqd);
mqd->get_wave_state = get_wave_state; mqd->get_wave_state = get_wave_state;
mqd->mqd_stride = kfd_mqd_stride; mqd->mqd_stride = kfd_mqd_stride;
mqd->checkpoint_mqd = checkpoint_mqd;
mqd->restore_mqd = restore_mqd;
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd; mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif #endif
@ -502,6 +541,8 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd_sdma; mqd->update_mqd = update_mqd_sdma;
mqd->destroy_mqd = kfd_destroy_mqd_sdma; mqd->destroy_mqd = kfd_destroy_mqd_sdma;
mqd->is_occupied = kfd_is_occupied_sdma; mqd->is_occupied = kfd_is_occupied_sdma;
mqd->checkpoint_mqd = checkpoint_mqd;
mqd->restore_mqd = restore_mqd;
mqd->mqd_size = sizeof(struct v11_sdma_mqd); mqd->mqd_size = sizeof(struct v11_sdma_mqd);
mqd->mqd_stride = kfd_mqd_stride; mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)

View File

@ -60,7 +60,7 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
} }
static void update_cu_mask(struct mqd_manager *mm, void *mqd, static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct mqd_update_info *minfo) struct mqd_update_info *minfo, uint32_t inst)
{ {
struct v9_mqd *m; struct v9_mqd *m;
uint32_t se_mask[KFD_MAX_NUM_SE] = {0}; uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
@ -69,27 +69,36 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
return; return;
mqd_symmetrically_map_cu_mask(mm, mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, inst);
m = get_mqd(mqd); m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0]; m->compute_static_thread_mgmt_se0 = se_mask[0];
m->compute_static_thread_mgmt_se1 = se_mask[1]; m->compute_static_thread_mgmt_se1 = se_mask[1];
m->compute_static_thread_mgmt_se2 = se_mask[2]; m->compute_static_thread_mgmt_se2 = se_mask[2];
m->compute_static_thread_mgmt_se3 = se_mask[3]; m->compute_static_thread_mgmt_se3 = se_mask[3];
m->compute_static_thread_mgmt_se4 = se_mask[4]; if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) {
m->compute_static_thread_mgmt_se5 = se_mask[5]; m->compute_static_thread_mgmt_se4 = se_mask[4];
m->compute_static_thread_mgmt_se6 = se_mask[6]; m->compute_static_thread_mgmt_se5 = se_mask[5];
m->compute_static_thread_mgmt_se7 = se_mask[7]; m->compute_static_thread_mgmt_se6 = se_mask[6];
m->compute_static_thread_mgmt_se7 = se_mask[7];
pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n", pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
m->compute_static_thread_mgmt_se0, m->compute_static_thread_mgmt_se0,
m->compute_static_thread_mgmt_se1, m->compute_static_thread_mgmt_se1,
m->compute_static_thread_mgmt_se2, m->compute_static_thread_mgmt_se2,
m->compute_static_thread_mgmt_se3, m->compute_static_thread_mgmt_se3,
m->compute_static_thread_mgmt_se4, m->compute_static_thread_mgmt_se4,
m->compute_static_thread_mgmt_se5, m->compute_static_thread_mgmt_se5,
m->compute_static_thread_mgmt_se6, m->compute_static_thread_mgmt_se6,
m->compute_static_thread_mgmt_se7); m->compute_static_thread_mgmt_se7);
} else {
pr_debug("inst: %u, update cu mask to %#x %#x %#x %#x\n",
inst, m->compute_static_thread_mgmt_se0,
m->compute_static_thread_mgmt_se1,
m->compute_static_thread_mgmt_se2,
m->compute_static_thread_mgmt_se3);
}
} }
static void set_priority(struct v9_mqd *m, struct queue_properties *q) static void set_priority(struct v9_mqd *m, struct queue_properties *q)
@ -290,7 +299,8 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
m->cp_hqd_ctx_save_control = 0; m->cp_hqd_ctx_save_control = 0;
update_cu_mask(mm, mqd, minfo); if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3))
update_cu_mask(mm, mqd, minfo, 0);
set_priority(m, q); set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q); q->is_active = QUEUE_IS_ACTIVE(*q);
@ -676,6 +686,8 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd + size * xcc); m = get_mqd(mqd + size * xcc);
update_mqd(mm, m, q, minfo); update_mqd(mm, m, q, minfo);
update_cu_mask(mm, mqd, minfo, xcc);
if (q->format == KFD_QUEUE_FORMAT_AQL) { if (q->format == KFD_QUEUE_FORMAT_AQL) {
switch (xcc) { switch (xcc) {
case 0: case 0:

View File

@ -55,7 +55,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
return; return;
mqd_symmetrically_map_cu_mask(mm, mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m = get_mqd(mqd); m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0]; m->compute_static_thread_mgmt_se0 = se_mask[0];

View File

@ -299,8 +299,7 @@ static int pm_set_grace_period_v9(struct packet_manager *pm,
pm->dqm->wait_times, pm->dqm->wait_times,
grace_period, grace_period,
&reg_offset, &reg_offset,
&reg_data, &reg_data);
0);
if (grace_period == USE_DEFAULT_GRACE_PERIOD) if (grace_period == USE_DEFAULT_GRACE_PERIOD)
reg_data = pm->dqm->wait_times; reg_data = pm->dqm->wait_times;

View File

@ -1466,8 +1466,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
{ {
return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) || return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) || (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0); KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
} }

View File

@ -450,8 +450,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count", sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count",
dev->node_props.cpu_cores_count); dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, offs, "simd_count", sysfs_show_32bit_prop(buffer, offs, "simd_count",
dev->gpu ? (dev->node_props.simd_count * dev->gpu ? dev->node_props.simd_count : 0);
NUM_XCC(dev->gpu->xcc_mask)) : 0);
sysfs_show_32bit_prop(buffer, offs, "mem_banks_count", sysfs_show_32bit_prop(buffer, offs, "mem_banks_count",
dev->node_props.mem_banks_count); dev->node_props.mem_banks_count);
sysfs_show_32bit_prop(buffer, offs, "caches_count", sysfs_show_32bit_prop(buffer, offs, "caches_count",
@ -1597,14 +1596,17 @@ static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext,
static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext, static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
struct kfd_gpu_cache_info *pcache_info, struct kfd_gpu_cache_info *pcache_info,
struct kfd_cu_info *cu_info, struct kfd_cu_info *cu_info,
int cache_type, unsigned int cu_processor_id) int cache_type, unsigned int cu_processor_id,
struct kfd_node *knode)
{ {
unsigned int cu_sibling_map_mask; unsigned int cu_sibling_map_mask;
int first_active_cu; int first_active_cu;
int i, j, k; int i, j, k, xcc, start, end;
struct kfd_cache_properties *pcache = NULL; struct kfd_cache_properties *pcache = NULL;
cu_sibling_map_mask = cu_info->cu_bitmap[0][0]; start = ffs(knode->xcc_mask) - 1;
end = start + NUM_XCC(knode->xcc_mask);
cu_sibling_map_mask = cu_info->cu_bitmap[start][0][0];
cu_sibling_map_mask &= cu_sibling_map_mask &=
((1 << pcache_info[cache_type].num_cu_shared) - 1); ((1 << pcache_info[cache_type].num_cu_shared) - 1);
first_active_cu = ffs(cu_sibling_map_mask); first_active_cu = ffs(cu_sibling_map_mask);
@ -1639,16 +1641,18 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
cu_sibling_map_mask = cu_sibling_map_mask >> (first_active_cu - 1); cu_sibling_map_mask = cu_sibling_map_mask >> (first_active_cu - 1);
k = 0; k = 0;
for (i = 0; i < cu_info->num_shader_engines; i++) { for (xcc = start; xcc < end; xcc++) {
for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) { for (i = 0; i < cu_info->num_shader_engines; i++) {
pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF); for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF); pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF);
pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF); pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF); pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
k += 4; pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
k += 4;
cu_sibling_map_mask = cu_info->cu_bitmap[i % 4][j + i / 4]; cu_sibling_map_mask = cu_info->cu_bitmap[xcc][i % 4][j + i / 4];
cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1); cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
}
} }
} }
pcache->sibling_map_size = k; pcache->sibling_map_size = k;
@ -1666,7 +1670,7 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_node *kdev) static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_node *kdev)
{ {
struct kfd_gpu_cache_info *pcache_info = NULL; struct kfd_gpu_cache_info *pcache_info = NULL;
int i, j, k; int i, j, k, xcc, start, end;
int ct = 0; int ct = 0;
unsigned int cu_processor_id; unsigned int cu_processor_id;
int ret; int ret;
@ -1700,37 +1704,42 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct
* then it will consider only one CU from * then it will consider only one CU from
* the shared unit * the shared unit
*/ */
start = ffs(kdev->xcc_mask) - 1;
end = start + NUM_XCC(kdev->xcc_mask);
for (ct = 0; ct < num_of_cache_types; ct++) { for (ct = 0; ct < num_of_cache_types; ct++) {
cu_processor_id = gpu_processor_id; cu_processor_id = gpu_processor_id;
if (pcache_info[ct].cache_level == 1) { if (pcache_info[ct].cache_level == 1) {
for (i = 0; i < pcu_info->num_shader_engines; i++) { for (xcc = start; xcc < end; xcc++) {
for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) { for (i = 0; i < pcu_info->num_shader_engines; i++) {
for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) { for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) {
for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) {
ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info, ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info,
pcu_info->cu_bitmap[i % 4][j + i / 4], ct, pcu_info->cu_bitmap[xcc][i % 4][j + i / 4], ct,
cu_processor_id, k); cu_processor_id, k);
if (ret < 0) if (ret < 0)
break; break;
if (!ret) { if (!ret) {
num_of_entries++; num_of_entries++;
list_add_tail(&props_ext->list, &dev->cache_props); list_add_tail(&props_ext->list, &dev->cache_props);
}
/* Move to next CU block */
num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
pcu_info->num_cu_per_sh) ?
pcache_info[ct].num_cu_shared :
(pcu_info->num_cu_per_sh - k);
cu_processor_id += num_cu_shared;
} }
/* Move to next CU block */
num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
pcu_info->num_cu_per_sh) ?
pcache_info[ct].num_cu_shared :
(pcu_info->num_cu_per_sh - k);
cu_processor_id += num_cu_shared;
} }
} }
} }
} else { } else {
ret = fill_in_l2_l3_pcache(&props_ext, pcache_info, ret = fill_in_l2_l3_pcache(&props_ext, pcache_info,
pcu_info, ct, cu_processor_id); pcu_info, ct, cu_processor_id, kdev);
if (ret < 0) if (ret < 0)
break; break;

View File

@ -89,7 +89,7 @@ struct kfd_mem_properties {
struct attribute attr; struct attribute attr;
}; };
#define CACHE_SIBLINGMAP_SIZE 64 #define CACHE_SIBLINGMAP_SIZE 128
struct kfd_cache_properties { struct kfd_cache_properties {
struct list_head list; struct list_head list;

View File

@ -1274,11 +1274,15 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF; page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12); AMDGPU_GPU_PAGE_SHIFT);
page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF; page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12); AMDGPU_GPU_PAGE_SHIFT);
page_table_base.high_part = upper_32_bits(pt_base) & 0xF; page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
AMDGPU_GPU_PAGE_SHIFT);
page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
AMDGPU_GPU_PAGE_SHIFT);
page_table_base.high_part = upper_32_bits(pt_base);
page_table_base.low_part = lower_32_bits(pt_base); page_table_base.low_part = lower_32_bits(pt_base);
pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
@ -1640,8 +1644,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
} }
break; break;
} }
if (init_data.flags.gpu_vm_support) if (init_data.flags.gpu_vm_support &&
init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev); (amdgpu_sg_display == 0))
init_data.flags.gpu_vm_support = false;
if (init_data.flags.gpu_vm_support) if (init_data.flags.gpu_vm_support)
adev->mode_info.gpu_vm_support = true; adev->mode_info.gpu_vm_support = true;
@ -2335,14 +2340,62 @@ static int dm_late_init(void *handle)
return detect_mst_link_for_all_connectors(adev_to_drm(adev)); return detect_mst_link_for_all_connectors(adev_to_drm(adev));
} }
static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
{
int ret;
u8 guid[16];
u64 tmp64;
mutex_lock(&mgr->lock);
if (!mgr->mst_primary)
goto out_fail;
if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN |
DP_UP_REQ_EN |
DP_UPSTREAM_IS_SRC);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
goto out_fail;
}
/* Some hubs forget their guids after they resume */
ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
if (ret != 16) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
if (memchr_inv(guid, 0, 16) == NULL) {
tmp64 = get_jiffies_64();
memcpy(&guid[0], &tmp64, sizeof(u64));
memcpy(&guid[8], &tmp64, sizeof(u64));
ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
if (ret != 16) {
drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
goto out_fail;
}
}
memcpy(mgr->mst_primary->guid, guid, 16);
out_fail:
mutex_unlock(&mgr->lock);
}
static void s3_handle_mst(struct drm_device *dev, bool suspend) static void s3_handle_mst(struct drm_device *dev, bool suspend)
{ {
struct amdgpu_dm_connector *aconnector; struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_list_iter iter; struct drm_connector_list_iter iter;
struct drm_dp_mst_topology_mgr *mgr; struct drm_dp_mst_topology_mgr *mgr;
int ret;
bool need_hotplug = false;
drm_connector_list_iter_begin(dev, &iter); drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) { drm_for_each_connector_iter(connector, &iter) {
@ -2364,18 +2417,15 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
if (!dp_is_lttpr_present(aconnector->dc_link)) if (!dp_is_lttpr_present(aconnector->dc_link))
try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
ret = drm_dp_mst_topology_mgr_resume(mgr, true); /* TODO: move resume_mst_branch_status() into drm mst resume again
if (ret < 0) { * once topology probing work is pulled out from mst resume into mst
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, * resume 2nd step. mst resume 2nd step should be called after old
aconnector->dc_link); * state getting restored (i.e. drm_atomic_helper_resume()).
need_hotplug = true; */
} resume_mst_branch_status(mgr);
} }
} }
drm_connector_list_iter_end(&iter); drm_connector_list_iter_end(&iter);
if (need_hotplug)
drm_kms_helper_hotplug_event(dev);
} }
static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
@ -2769,7 +2819,8 @@ static int dm_resume(void *handle)
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none; enum dc_connection_type new_connection_type = dc_connection_none;
struct dc_state *dc_state; struct dc_state *dc_state;
int i, r, j; int i, r, j, ret;
bool need_hotplug = false;
if (amdgpu_in_reset(adev)) { if (amdgpu_in_reset(adev)) {
dc_state = dm->cached_dc_state; dc_state = dm->cached_dc_state;
@ -2867,7 +2918,7 @@ static int dm_resume(void *handle)
continue; continue;
/* /*
* this is the case when traversing through already created * this is the case when traversing through already created end sink
* MST connectors, should be skipped * MST connectors, should be skipped
*/ */
if (aconnector && aconnector->mst_root) if (aconnector && aconnector->mst_root)
@ -2927,6 +2978,27 @@ static int dm_resume(void *handle)
dm->cached_state = NULL; dm->cached_state = NULL;
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_root)
continue;
ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
if (ret < 0) {
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
aconnector->dc_link);
need_hotplug = true;
}
}
drm_connector_list_iter_end(&iter);
if (need_hotplug)
drm_kms_helper_hotplug_event(ddev);
amdgpu_dm_irq_resume_late(adev); amdgpu_dm_irq_resume_late(adev);
amdgpu_dm_smu_write_watermarks_table(adev); amdgpu_dm_smu_write_watermarks_table(adev);
@ -8073,7 +8145,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].plane_info = bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count]; &bundle->plane_infos[planes_count];
if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) { if (acrtc_state->stream->link->psr_settings.psr_feature_enabled ||
acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
fill_dc_dirty_rects(plane, old_plane_state, fill_dc_dirty_rects(plane, old_plane_state,
new_plane_state, new_crtc_state, new_plane_state, new_crtc_state,
&bundle->flip_addrs[planes_count], &bundle->flip_addrs[planes_count],

View File

@ -620,7 +620,7 @@ struct amdgpu_hdmi_vsdb_info {
unsigned int max_refresh_rate_hz; unsigned int max_refresh_rate_hz;
/** /**
* @replay mode: Replay supported * @replay_mode: Replay supported
*/ */
bool replay_mode; bool replay_mode;
}; };

View File

@ -169,11 +169,23 @@ static void add_link_enc_assignment(
/* Return first available DIG link encoder. */ /* Return first available DIG link encoder. */
static enum engine_id find_first_avail_link_enc( static enum engine_id find_first_avail_link_enc(
const struct dc_context *ctx, const struct dc_context *ctx,
const struct dc_state *state) const struct dc_state *state,
enum engine_id eng_id_requested)
{ {
enum engine_id eng_id = ENGINE_ID_UNKNOWN; enum engine_id eng_id = ENGINE_ID_UNKNOWN;
int i; int i;
if (eng_id_requested != ENGINE_ID_UNKNOWN) {
for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i];
if (eng_id == eng_id_requested)
return eng_id;
}
}
eng_id = ENGINE_ID_UNKNOWN;
for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) { for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i]; eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i];
if (eng_id != ENGINE_ID_UNKNOWN) if (eng_id != ENGINE_ID_UNKNOWN)
@ -287,7 +299,7 @@ void link_enc_cfg_link_encs_assign(
struct dc_stream_state *streams[], struct dc_stream_state *streams[],
uint8_t stream_count) uint8_t stream_count)
{ {
enum engine_id eng_id = ENGINE_ID_UNKNOWN; enum engine_id eng_id = ENGINE_ID_UNKNOWN, eng_id_req = ENGINE_ID_UNKNOWN;
int i; int i;
int j; int j;
@ -377,8 +389,14 @@ void link_enc_cfg_link_encs_assign(
* assigned to that endpoint. * assigned to that endpoint.
*/ */
link_enc = get_link_enc_used_by_link(state, stream->link); link_enc = get_link_enc_used_by_link(state, stream->link);
if (link_enc == NULL) if (link_enc == NULL) {
eng_id = find_first_avail_link_enc(stream->ctx, state);
if (stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
stream->link->dpia_preferred_eng_id != ENGINE_ID_UNKNOWN)
eng_id_req = stream->link->dpia_preferred_eng_id;
eng_id = find_first_avail_link_enc(stream->ctx, state, eng_id_req);
}
else else
eng_id = link_enc->preferred_engine; eng_id = link_enc->preferred_engine;
@ -402,7 +420,9 @@ void link_enc_cfg_link_encs_assign(
DC_LOG_DEBUG("%s: CUR %s(%d) - enc_id(%d)\n", DC_LOG_DEBUG("%s: CUR %s(%d) - enc_id(%d)\n",
__func__, __func__,
assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA", assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA",
assignment.ep_id.link_id.enum_id - 1, assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ?
assignment.ep_id.link_id.enum_id :
assignment.ep_id.link_id.enum_id - 1,
assignment.eng_id); assignment.eng_id);
} }
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
@ -413,7 +433,9 @@ void link_enc_cfg_link_encs_assign(
DC_LOG_DEBUG("%s: NEW %s(%d) - enc_id(%d)\n", DC_LOG_DEBUG("%s: NEW %s(%d) - enc_id(%d)\n",
__func__, __func__,
assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA", assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA",
assignment.ep_id.link_id.enum_id - 1, assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ?
assignment.ep_id.link_id.enum_id :
assignment.ep_id.link_id.enum_id - 1,
assignment.eng_id); assignment.eng_id);
} }
@ -478,7 +500,6 @@ struct dc_link *link_enc_cfg_get_link_using_link_enc(
if (stream) if (stream)
link = stream->link; link = stream->link;
// dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id);
return link; return link;
} }

View File

@ -1496,6 +1496,7 @@ struct dc_link {
* object creation. * object creation.
*/ */
enum engine_id eng_id; enum engine_id eng_id;
enum engine_id dpia_preferred_eng_id;
bool test_pattern_enabled; bool test_pattern_enabled;
enum dp_test_pattern current_test_pattern; enum dp_test_pattern current_test_pattern;

View File

@ -964,7 +964,9 @@ void dce110_edp_backlight_control(
return; return;
} }
if (link->panel_cntl) { if (link->panel_cntl && !(link->dpcd_sink_ext_caps.bits.oled ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
bool is_backlight_on = link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl); bool is_backlight_on = link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl);
if ((enable && is_backlight_on) || (!enable && !is_backlight_on)) { if ((enable && is_backlight_on) || (!enable && !is_backlight_on)) {

View File

@ -1032,6 +1032,28 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) I2C_COMMON_MASK_SH_LIST_DCN30(_MASK)
}; };
/* ========================================================== */
/*
* DPIA index | Preferred Encoder | Host Router
* 0 | C | 0
* 1 | First Available | 0
* 2 | D | 1
* 3 | First Available | 1
*/
/* ========================================================== */
static const enum engine_id dpia_to_preferred_enc_id_table[] = {
ENGINE_ID_DIGC,
ENGINE_ID_DIGC,
ENGINE_ID_DIGD,
ENGINE_ID_DIGD
};
static enum engine_id dcn314_get_preferred_eng_id_dpia(unsigned int dpia_index)
{
return dpia_to_preferred_enc_id_table[dpia_index];
}
static struct dce_i2c_hw *dcn31_i2c_hw_create( static struct dce_i2c_hw *dcn31_i2c_hw_create(
struct dc_context *ctx, struct dc_context *ctx,
uint32_t inst) uint32_t inst)
@ -1785,6 +1807,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.update_bw_bounding_box = dcn314_update_bw_bounding_box, .update_bw_bounding_box = dcn314_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn314_get_panel_config_defaults, .get_panel_config_defaults = dcn314_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
}; };
static struct clock_source *dcn30_clock_source_create( static struct clock_source *dcn30_clock_source_create(

View File

@ -65,6 +65,7 @@ struct resource_context;
struct clk_bw_params; struct clk_bw_params;
struct resource_funcs { struct resource_funcs {
enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index);
void (*destroy)(struct resource_pool **pool); void (*destroy)(struct resource_pool **pool);
void (*link_init)(struct dc_link *link); void (*link_init)(struct dc_link *link);
struct panel_cntl*(*panel_cntl_create)( struct panel_cntl*(*panel_cntl_create)(

View File

@ -791,6 +791,10 @@ static bool construct_dpia(struct dc_link *link,
/* Set dpia port index : 0 to number of dpia ports */ /* Set dpia port index : 0 to number of dpia ports */
link->ddc_hw_inst = init_params->connector_index; link->ddc_hw_inst = init_params->connector_index;
// Assign Dpia preferred eng_id
if (link->dc->res_pool->funcs->get_preferred_eng_id_dpia)
link->dpia_preferred_eng_id = link->dc->res_pool->funcs->get_preferred_eng_id_dpia(link->ddc_hw_inst);
/* TODO: Create link encoder */ /* TODO: Create link encoder */
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;

View File

@ -31,12 +31,12 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/dma-fence.h> #include <linux/dma-fence.h>
#include "amdgpu_irq.h"
#include "amdgpu_gfx.h"
struct pci_dev; struct pci_dev;
struct amdgpu_device; struct amdgpu_device;
#define KGD_MAX_QUEUES 128
struct kfd_dev; struct kfd_dev;
struct kgd_mem; struct kgd_mem;
@ -68,7 +68,7 @@ struct kfd_cu_info {
uint32_t wave_front_size; uint32_t wave_front_size;
uint32_t max_scratch_slots_per_cu; uint32_t max_scratch_slots_per_cu;
uint32_t lds_size; uint32_t lds_size;
uint32_t cu_bitmap[4][4]; uint32_t cu_bitmap[AMDGPU_MAX_GC_INSTANCES][4][4];
}; };
/* For getting GPU local memory information from KGD */ /* For getting GPU local memory information from KGD */
@ -326,8 +326,7 @@ struct kfd2kgd_calls {
uint32_t wait_times, uint32_t wait_times,
uint32_t grace_period, uint32_t grace_period,
uint32_t *reg_offset, uint32_t *reg_offset,
uint32_t *reg_data, uint32_t *reg_data);
uint32_t inst);
void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid, void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid,
int *wave_cnt, int *max_waves_per_cu, uint32_t inst); int *wave_cnt, int *max_waves_per_cu, uint32_t inst);
void (*program_trap_handler_settings)(struct amdgpu_device *adev, void (*program_trap_handler_settings)(struct amdgpu_device *adev,

View File

@ -336,7 +336,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
/* Store one-time values in driver PPTable */ /* Store one-time values in driver PPTable */
if (!pptable->Init) { if (!pptable->Init) {
while (retry--) { while (--retry) {
ret = smu_v13_0_6_get_metrics_table(smu, NULL, true); ret = smu_v13_0_6_get_metrics_table(smu, NULL, true);
if (ret) if (ret)
return ret; return ret;

View File

@ -2203,6 +2203,7 @@ static int drm_mode_create_colorspace_property(struct drm_connector *connector,
/** /**
* drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property * drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property
* @connector: connector to create the Colorspace property on. * @connector: connector to create the Colorspace property on.
* @supported_colorspaces: bitmap of supported color spaces
* *
* Called by a driver the first time it's needed, must be attached to desired * Called by a driver the first time it's needed, must be attached to desired
* HDMI connectors. * HDMI connectors.
@ -2227,6 +2228,7 @@ EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property);
/** /**
* drm_mode_create_dp_colorspace_property - create dp colorspace property * drm_mode_create_dp_colorspace_property - create dp colorspace property
* @connector: connector to create the Colorspace property on. * @connector: connector to create the Colorspace property on.
* @supported_colorspaces: bitmap of supported color spaces
* *
* Called by a driver the first time it's needed, must be attached to desired * Called by a driver the first time it's needed, must be attached to desired
* DP connectors. * DP connectors.

View File

@ -56,7 +56,7 @@ static void drm_exec_unlock_all(struct drm_exec *exec)
struct drm_gem_object *obj; struct drm_gem_object *obj;
unsigned long index; unsigned long index;
drm_exec_for_each_locked_object(exec, index, obj) { drm_exec_for_each_locked_object_reverse(exec, index, obj) {
dma_resv_unlock(obj->resv); dma_resv_unlock(obj->resv);
drm_gem_object_put(obj); drm_gem_object_put(obj);
} }

View File

@ -3540,6 +3540,27 @@ enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata)
return map_aux_ch(devdata->i915, devdata->child.aux_channel); return map_aux_ch(devdata->i915, devdata->child.aux_channel);
} }
bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata)
{
struct drm_i915_private *i915;
u8 aux_channel;
int count = 0;
if (!devdata || !devdata->child.aux_channel)
return false;
i915 = devdata->i915;
aux_channel = devdata->child.aux_channel;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
if (intel_bios_encoder_supports_dp(devdata) &&
aux_channel == devdata->child.aux_channel)
count++;
}
return count > 1;
}
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata) int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata)
{ {
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)

View File

@ -273,6 +273,7 @@ enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata); int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata); int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata); int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata);
bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata); int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata); int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata); int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata);

View File

@ -5512,8 +5512,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
/* /*
* VBT and straps are liars. Also check HPD as that seems * VBT and straps are liars. Also check HPD as that seems
* to be the most reliable piece of information available. * to be the most reliable piece of information available.
*
* ... expect on devices that forgot to hook HPD up for eDP
* (eg. Acer Chromebook C710), so we'll check it only if multiple
* ports are attempting to use the same AUX CH, according to VBT.
*/ */
if (!intel_digital_port_connected(encoder)) { if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) &&
!intel_digital_port_connected(encoder)) {
/* /*
* If this fails, presume the DPCD answer came * If this fails, presume the DPCD answer came
* from some other port using the same AUX CH. * from some other port using the same AUX CH.

Some files were not shown because too many files have changed in this diff Show More