Linux 6.7-rc4

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmVsT/geHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGUiMH/0dKqVU0d9Dt1OA8
 vYX7PZi51piRjYCOBr1jdK2Ovkucmgu48O2/oGlfBHmFkCmMeladRIMDMidS3Z52
 2YlILyX5LUUrWfS2441cT/16AVds7VXUZV+8TccalBKLuJHQWVR+ifntSLTihO6c
 EcvEBpdy1HO0nSw70rGhipMZm9+K8F+JBYc9ews/3ylexC4AeUzIET69YP9/q+Ne
 yYbH0TyJYtm+cDN1IFmyJcJ0CtjdKXGvgNGW6Klq8jJA/aiPn2lRlsGw+0SThkkc
 DhVGm5aBYFMP/iIvXNDSKrwHArdTE79d/jiyhlzuThLvaT7aFcpnbP5pD5G4Ds0V
 2M+4Faw=
 =Qz5G
 -----END PGP SIGNATURE-----

Merge tag 'v6.7-rc4' into media_stage

Linux 6.7-rc4

This is needed for a vsp1 fix that upcoming media patches depend on.

Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
This commit is contained in:
Hans Verkuil 2023-12-03 16:35:17 +01:00
commit 073249b876
551 changed files with 6533 additions and 5614 deletions

View file

@ -59,15 +59,6 @@ Description:
brightness. Reading this file when no hw brightness change
event has happened will return an ENODATA error.
What: /sys/class/leds/<led>/color
Date: June 2023
KernelVersion: 6.5
Description:
Color of the LED.
This is a read-only file. Reading this file returns the color
of the LED as a string (e.g: "red", "green", "multicolor").
What: /sys/class/leds/<led>/trigger
Date: March 2006
KernelVersion: 2.6.17

View file

@ -375,9 +375,9 @@ Developer web site of Loongson and LoongArch (Software and Documentation):
Documentation of LoongArch ISA:
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.02-CN.pdf (in Chinese)
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.10-CN.pdf (in Chinese)
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.02-EN.pdf (in English)
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.10-EN.pdf (in English)
Documentation of LoongArch ELF psABI:

View file

@ -9,7 +9,7 @@ title: NXP S32G2 pin controller
maintainers:
- Ghennadi Procopciuc <Ghennadi.Procopciuc@oss.nxp.com>
- Chester Lin <clin@suse.com>
- Chester Lin <chester62515@gmail.com>
description: |
S32G2 pinmux is implemented in SIUL2 (System Integration Unit Lite2),

View file

@ -36,7 +36,11 @@ properties:
vdd-supply:
description:
VDD power supply to the hub
3V3 power supply to the hub
vdd2-supply:
description:
1V2 power supply to the hub
peer-hub:
$ref: /schemas/types.yaml#/definitions/phandle
@ -62,6 +66,7 @@ allOf:
properties:
reset-gpios: false
vdd-supply: false
vdd2-supply: false
peer-hub: false
i2c-bus: false
else:

View file

@ -521,8 +521,8 @@ examples:
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
<GIC_SPI 488 IRQ_TYPE_EDGE_BOTH>,
<GIC_SPI 489 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "hs_phy_irq", "ss_phy_irq",
"dm_hs_phy_irq", "dp_hs_phy_irq";

View file

@ -41,7 +41,7 @@ examples:
- |
usb {
phys = <&usb2_phy1>, <&usb3_phy1>;
phy-names = "usb";
phy-names = "usb2", "usb3";
#address-cells = <1>;
#size-cells = <0>;

View file

@ -91,6 +91,10 @@ compatibility checking tool (fsck.erofs), and a debugging tool (dump.erofs):
- git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git
For more information, please also refer to the documentation site:
- https://erofs.docs.kernel.org
Bugs and patches are welcome, please kindly help us and send to the following
linux-erofs mailing list:

View file

@ -193,9 +193,23 @@ Review timelines
Generally speaking, the patches get triaged quickly (in less than
48h). But be patient, if your patch is active in patchwork (i.e. it's
listed on the project's patch list) the chances it was missed are close to zero.
Asking the maintainer for status updates on your
patch is a good way to ensure your patch is ignored or pushed to the
bottom of the priority list.
The high volume of development on netdev makes reviewers move on
from discussions relatively quickly. New comments and replies
are very unlikely to arrive after a week of silence. If a patch
is no longer active in patchwork and the thread went idle for more
than a week - clarify the next steps and/or post the next version.
For RFC postings specifically, if nobody responded in a week - reviewers
either missed the posting or have no strong opinions. If the code is ready,
repost as a PATCH.
Emails saying just "ping" or "bump" are considered rude. If you can't figure
out the status of the patch from patchwork or where the discussion has
landed - describe your best guess and ask if it's correct. For example::
I don't understand what the next steps are. Person X seems to be unhappy
with A, should I do B and repost the patches?
.. _Changes requested:

View file

@ -338,9 +338,9 @@ Loongson与LoongArch的开发者网站软件与文档资源
LoongArch指令集架构的文档
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.02-CN.pdf (中文版)
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.10-CN.pdf (中文版)
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.02-EN.pdf (英文版)
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.10-EN.pdf (英文版)
LoongArch的ELF psABI文档

View file

@ -5076,7 +5076,6 @@ CLANG CONTROL FLOW INTEGRITY SUPPORT
M: Sami Tolvanen <samitolvanen@google.com>
M: Kees Cook <keescook@chromium.org>
R: Nathan Chancellor <nathan@kernel.org>
R: Nick Desaulniers <ndesaulniers@google.com>
L: llvm@lists.linux.dev
S: Supported
B: https://github.com/ClangBuiltLinux/linux/issues
@ -5091,8 +5090,9 @@ F: .clang-format
CLANG/LLVM BUILD SUPPORT
M: Nathan Chancellor <nathan@kernel.org>
M: Nick Desaulniers <ndesaulniers@google.com>
R: Tom Rix <trix@redhat.com>
R: Nick Desaulniers <ndesaulniers@google.com>
R: Bill Wendling <morbo@google.com>
R: Justin Stitt <justinstitt@google.com>
L: llvm@lists.linux.dev
S: Supported
W: https://clangbuiltlinux.github.io/
@ -5242,7 +5242,6 @@ F: drivers/platform/x86/compal-laptop.c
COMPILER ATTRIBUTES
M: Miguel Ojeda <ojeda@kernel.org>
R: Nick Desaulniers <ndesaulniers@google.com>
S: Maintained
F: include/linux/compiler_attributes.h
@ -7855,6 +7854,7 @@ R: Yue Hu <huyue2@coolpad.com>
R: Jeffle Xu <jefflexu@linux.alibaba.com>
L: linux-erofs@lists.ozlabs.org
S: Maintained
W: https://erofs.docs.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git
F: Documentation/ABI/testing/sysfs-fs-erofs
F: Documentation/filesystems/erofs.rst
@ -11024,7 +11024,6 @@ F: drivers/net/wireless/intel/iwlwifi/
INTEL WMI SLIM BOOTLOADER (SBL) FIRMWARE UPDATE DRIVER
M: Jithu Joseph <jithu.joseph@intel.com>
R: Maurice Ma <maurice.ma@intel.com>
S: Maintained
W: https://slimbootloader.github.io/security/firmware-update.html
F: drivers/platform/x86/intel/wmi/sbl-fw-update.c
@ -11516,7 +11515,6 @@ F: fs/autofs/
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
M: Masahiro Yamada <masahiroy@kernel.org>
R: Nathan Chancellor <nathan@kernel.org>
R: Nick Desaulniers <ndesaulniers@google.com>
R: Nicolas Schier <nicolas@fjasle.eu>
L: linux-kbuild@vger.kernel.org
S: Maintained
@ -13778,7 +13776,6 @@ F: drivers/net/ethernet/mellanox/mlxfw/
MELLANOX HARDWARE PLATFORM SUPPORT
M: Hans de Goede <hdegoede@redhat.com>
M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
M: Mark Gross <markgross@kernel.org>
M: Vadim Pasternak <vadimp@nvidia.com>
L: platform-driver-x86@vger.kernel.org
S: Supported
@ -14387,7 +14384,6 @@ F: drivers/platform/surface/surface_gpe.c
MICROSOFT SURFACE HARDWARE PLATFORM SUPPORT
M: Hans de Goede <hdegoede@redhat.com>
M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
M: Mark Gross <markgross@kernel.org>
M: Maximilian Luz <luzmaximilian@gmail.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
@ -14994,6 +14990,7 @@ M: Jakub Kicinski <kuba@kernel.org>
M: Paolo Abeni <pabeni@redhat.com>
L: netdev@vger.kernel.org
S: Maintained
P: Documentation/process/maintainer-netdev.rst
Q: https://patchwork.kernel.org/project/netdevbpf/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
@ -15045,6 +15042,7 @@ M: Jakub Kicinski <kuba@kernel.org>
M: Paolo Abeni <pabeni@redhat.com>
L: netdev@vger.kernel.org
S: Maintained
P: Documentation/process/maintainer-netdev.rst
Q: https://patchwork.kernel.org/project/netdevbpf/list/
B: mailto:netdev@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
@ -15055,6 +15053,7 @@ F: Documentation/networking/
F: Documentation/process/maintainer-netdev.rst
F: Documentation/userspace-api/netlink/
F: include/linux/in.h
F: include/linux/indirect_call_wrapper.h
F: include/linux/net.h
F: include/linux/netdevice.h
F: include/net/
@ -17947,6 +17946,8 @@ L: iommu@lists.linux.dev
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: drivers/iommu/arm/arm-smmu/qcom_iommu.c
F: drivers/iommu/arm/arm-smmu/arm-smmu-qcom*
F: drivers/iommu/msm_iommu*
QUALCOMM IPC ROUTER (QRTR) DRIVER
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
@ -22087,6 +22088,7 @@ F: drivers/watchdog/tqmx86_wdt.c
TRACING
M: Steven Rostedt <rostedt@goodmis.org>
M: Masami Hiramatsu <mhiramat@kernel.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
L: linux-kernel@vger.kernel.org
L: linux-trace-kernel@vger.kernel.org
S: Maintained
@ -23681,7 +23683,6 @@ F: drivers/platform/x86/x86-android-tablets/
X86 PLATFORM DRIVERS
M: Hans de Goede <hdegoede@redhat.com>
M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
M: Mark Gross <markgross@kernel.org>
L: platform-driver-x86@vger.kernel.org
S: Maintained
Q: https://patchwork.kernel.org/project/platform-driver-x86/list/

View file

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 7
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc4
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View file

@ -484,7 +484,8 @@ static int __init xen_guest_init(void)
* for secondary CPUs as they are brought up.
* For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
*/
xen_vcpu_info = alloc_percpu(struct vcpu_info);
xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
1 << fls(sizeof(struct vcpu_info) - 1));
if (xen_vcpu_info == NULL)
return -ENOMEM;

View file

@ -158,7 +158,7 @@ endif
all: $(notdir $(KBUILD_IMAGE))
vmlinuz.efi: Image
Image vmlinuz.efi: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@

View file

@ -21,9 +21,22 @@ static inline bool arch_parse_debug_rodata(char *arg)
extern bool rodata_enabled;
extern bool rodata_full;
if (arg && !strcmp(arg, "full")) {
if (!arg)
return false;
if (!strcmp(arg, "full")) {
rodata_enabled = rodata_full = true;
return true;
}
if (!strcmp(arg, "off")) {
rodata_enabled = rodata_full = false;
return true;
}
if (!strcmp(arg, "on")) {
rodata_enabled = true;
rodata_full = true;
rodata_full = false;
return true;
}

View file

@ -1839,6 +1839,10 @@ static int __init __kpti_install_ng_mappings(void *__unused)
static void __init kpti_install_ng_mappings(void)
{
/* Check whether KPTI is going to be used */
if (!cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0))
return;
/*
* We don't need to rewrite the page-tables if either we've done
* it already or we have KASLR enabled and therefore have not

View file

@ -29,8 +29,8 @@ bool can_set_direct_map(void)
*
* KFENCE pool requires page-granular mapping if initialized late.
*/
return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
arm64_kfence_can_set_direct_map();
return rodata_full || debug_pagealloc_enabled() ||
arm64_kfence_can_set_direct_map();
}
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
@ -105,8 +105,7 @@ static int change_memory_common(unsigned long addr, int numpages,
* If we are manipulating read-only permissions, apply the same
* change to the linear mapping of the pages that back this VM area.
*/
if (rodata_enabled &&
rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
pgprot_val(clear_mask) == PTE_RDONLY)) {
for (i = 0; i < area->nr_pages; i++) {
__change_memory_common((u64)page_address(area->pages[i]),

View file

@ -68,6 +68,7 @@ LDFLAGS_vmlinux += -static -n -nostdlib
ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS
cflags-y += $(call cc-option,-mexplicit-relocs)
KBUILD_CFLAGS_KERNEL += $(call cc-option,-mdirect-extern-access)
KBUILD_CFLAGS_KERNEL += $(call cc-option,-fdirect-access-external-data)
KBUILD_AFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data)
KBUILD_CFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data)
KBUILD_AFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
@ -142,6 +143,8 @@ vdso-install-y += arch/loongarch/vdso/vdso.so.dbg
all: $(notdir $(KBUILD_IMAGE))
vmlinuz.efi: vmlinux.efi
vmlinux.elf vmlinux.efi vmlinuz.efi: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(bootvars-y) $(boot)/$@

View file

@ -609,8 +609,7 @@
lu32i.d \reg, 0
lu52i.d \reg, \reg, 0
.pushsection ".la_abs", "aw", %progbits
768:
.dword 768b-766b
.dword 766b
.dword \sym
.popsection
#endif

View file

@ -40,13 +40,13 @@ static __always_inline unsigned long __percpu_##op(void *ptr, \
switch (size) { \
case 4: \
__asm__ __volatile__( \
"am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \
"am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \
: [ret] "=&r" (ret), [ptr] "+ZB"(*(u32 *)ptr) \
: [val] "r" (val)); \
break; \
case 8: \
__asm__ __volatile__( \
"am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \
"am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \
: [ret] "=&r" (ret), [ptr] "+ZB"(*(u64 *)ptr) \
: [val] "r" (val)); \
break; \
@ -63,7 +63,7 @@ PERCPU_OP(and, and, &)
PERCPU_OP(or, or, |)
#undef PERCPU_OP
static __always_inline unsigned long __percpu_read(void *ptr, int size)
static __always_inline unsigned long __percpu_read(void __percpu *ptr, int size)
{
unsigned long ret;
@ -100,7 +100,7 @@ static __always_inline unsigned long __percpu_read(void *ptr, int size)
return ret;
}
static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
static __always_inline void __percpu_write(void __percpu *ptr, unsigned long val, int size)
{
switch (size) {
case 1:
@ -132,8 +132,7 @@ static __always_inline void __percpu_write(void *ptr, unsigned long val, int siz
}
}
static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
int size)
static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size)
{
switch (size) {
case 1:

View file

@ -25,7 +25,7 @@ extern void set_merr_handler(unsigned long offset, void *addr, unsigned long len
#ifdef CONFIG_RELOCATABLE
struct rela_la_abs {
long offset;
long pc;
long symvalue;
};

View file

@ -52,7 +52,7 @@ static inline void __init relocate_absolute(long random_offset)
for (p = begin; (void *)p < end; p++) {
long v = p->symvalue;
uint32_t lu12iw, ori, lu32id, lu52id;
union loongarch_instruction *insn = (void *)p - p->offset;
union loongarch_instruction *insn = (void *)p->pc;
lu12iw = (v >> 12) & 0xfffff;
ori = v & 0xfff;
@ -102,6 +102,14 @@ static inline __init unsigned long get_random_boot(void)
return hash;
}
static int __init nokaslr(char *p)
{
pr_info("KASLR is disabled.\n");
return 0; /* Print a notice and silence the boot warning */
}
early_param("nokaslr", nokaslr);
static inline __init bool kaslr_disabled(void)
{
char *str;

View file

@ -58,21 +58,6 @@ static int constant_set_state_oneshot(struct clock_event_device *evt)
return 0;
}
static int constant_set_state_oneshot_stopped(struct clock_event_device *evt)
{
unsigned long timer_config;
raw_spin_lock(&state_lock);
timer_config = csr_read64(LOONGARCH_CSR_TCFG);
timer_config &= ~CSR_TCFG_EN;
csr_write64(timer_config, LOONGARCH_CSR_TCFG);
raw_spin_unlock(&state_lock);
return 0;
}
static int constant_set_state_periodic(struct clock_event_device *evt)
{
unsigned long period;
@ -92,6 +77,16 @@ static int constant_set_state_periodic(struct clock_event_device *evt)
static int constant_set_state_shutdown(struct clock_event_device *evt)
{
unsigned long timer_config;
raw_spin_lock(&state_lock);
timer_config = csr_read64(LOONGARCH_CSR_TCFG);
timer_config &= ~CSR_TCFG_EN;
csr_write64(timer_config, LOONGARCH_CSR_TCFG);
raw_spin_unlock(&state_lock);
return 0;
}
@ -161,7 +156,7 @@ int constant_clockevent_init(void)
cd->rating = 320;
cd->cpumask = cpumask_of(cpu);
cd->set_state_oneshot = constant_set_state_oneshot;
cd->set_state_oneshot_stopped = constant_set_state_oneshot_stopped;
cd->set_state_oneshot_stopped = constant_set_state_shutdown;
cd->set_state_periodic = constant_set_state_periodic;
cd->set_state_shutdown = constant_set_state_shutdown;
cd->set_next_event = constant_timer_next_event;

View file

@ -13,13 +13,13 @@ struct page *dmw_virt_to_page(unsigned long kaddr)
{
return pfn_to_page(virt_to_pfn(kaddr));
}
EXPORT_SYMBOL_GPL(dmw_virt_to_page);
EXPORT_SYMBOL(dmw_virt_to_page);
struct page *tlb_virt_to_page(unsigned long kaddr)
{
return pfn_to_page(pte_pfn(*virt_to_kpte(kaddr)));
}
EXPORT_SYMBOL_GPL(tlb_virt_to_page);
EXPORT_SYMBOL(tlb_virt_to_page);
pgd_t *pgd_alloc(struct mm_struct *mm)
{

View file

@ -115,9 +115,12 @@ config ARCH_HAS_ILOG2_U64
default n
config GENERIC_BUG
bool
default y
def_bool y
depends on BUG
select GENERIC_BUG_RELATIVE_POINTERS if 64BIT
config GENERIC_BUG_RELATIVE_POINTERS
bool
config GENERIC_HWEIGHT
bool

View file

@ -34,7 +34,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
/* Alternative SMP implementation. */
#define ALTERNATIVE(cond, replacement) "!0:" \
".section .altinstructions, \"aw\" !" \
".section .altinstructions, \"a\" !" \
".align 4 !" \
".word (0b-4-.) !" \
".hword 1, " __stringify(cond) " !" \
".word " __stringify(replacement) " !" \
@ -44,7 +45,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
/* to replace one single instructions by a new instruction */
#define ALTERNATIVE(from, to, cond, replacement)\
.section .altinstructions, "aw" ! \
.section .altinstructions, "a" ! \
.align 4 ! \
.word (from - .) ! \
.hword (to - from)/4, cond ! \
.word replacement ! \
@ -52,7 +54,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
/* to replace multiple instructions by new code */
#define ALTERNATIVE_CODE(from, num_instructions, cond, new_instr_ptr)\
.section .altinstructions, "aw" ! \
.section .altinstructions, "a" ! \
.align 4 ! \
.word (from - .) ! \
.hword -num_instructions, cond ! \
.word (new_instr_ptr - .) ! \

View file

@ -574,6 +574,7 @@
*/
#define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr) \
.section __ex_table,"aw" ! \
.align 4 ! \
.word (fault_addr - .), (except_addr - .) ! \
.previous

View file

@ -17,24 +17,27 @@
#define PARISC_BUG_BREAK_ASM "break 0x1f, 0x1fff"
#define PARISC_BUG_BREAK_INSN 0x03ffe01f /* PARISC_BUG_BREAK_ASM */
#if defined(CONFIG_64BIT)
#define ASM_WORD_INSN ".dword\t"
#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
# define __BUG_REL(val) ".word " __stringify(val) " - ."
#else
#define ASM_WORD_INSN ".word\t"
# define __BUG_REL(val) ".word " __stringify(val)
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define BUG() \
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"aw\"\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \
"\t.short %c1, %c2\n" \
"\t.org 2b+%c3\n" \
"\t.pushsection __bug_table,\"a\"\n" \
"\t.align 4\n" \
"2:\t" __BUG_REL(1b) "\n" \
"\t" __BUG_REL(%c0) "\n" \
"\t.short %1, %2\n" \
"\t.blockz %3-2*4-2*2\n" \
"\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (0), "i" (sizeof(struct bug_entry)) ); \
"i" (0), "i" (sizeof(struct bug_entry)) ); \
unreachable(); \
} while(0)
@ -51,10 +54,12 @@
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"aw\"\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \
"\t.short %c1, %c2\n" \
"\t.org 2b+%c3\n" \
"\t.pushsection __bug_table,\"a\"\n" \
"\t.align 4\n" \
"2:\t" __BUG_REL(1b) "\n" \
"\t" __BUG_REL(%c0) "\n" \
"\t.short %1, %2\n" \
"\t.blockz %3-2*4-2*2\n" \
"\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (BUGFLAG_WARNING|(flags)), \
@ -65,10 +70,11 @@
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"aw\"\n" \
"2:\t" ASM_WORD_INSN "1b\n" \
"\t.short %c0\n" \
"\t.org 2b+%c1\n" \
"\t.pushsection __bug_table,\"a\"\n" \
"\t.align %2\n" \
"2:\t" __BUG_REL(1b) "\n" \
"\t.short %0\n" \
"\t.blockz %1-4-2\n" \
"\t.popsection" \
: : "i" (BUGFLAG_WARNING|(flags)), \
"i" (sizeof(struct bug_entry)) ); \

View file

@ -15,10 +15,12 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
asm_volatile_goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align %1\n\t"
".word 1b - ., %l[l_yes] - .\n\t"
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
: : "i" (&((char *)key)[branch]), "i" (sizeof(long))
: : l_yes);
return false;
l_yes:
@ -30,10 +32,12 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
asm_volatile_goto("1:\n\t"
"b,n %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align %1\n\t"
".word 1b - ., %l[l_yes] - .\n\t"
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
: : "i" (&((char *)key)[branch]), "i" (sizeof(long))
: : l_yes);
return false;
l_yes:

View file

@ -55,7 +55,7 @@
})
#ifdef CONFIG_SMP
# define __lock_aligned __section(".data..lock_aligned")
# define __lock_aligned __section(".data..lock_aligned") __aligned(16)
#endif
#endif /* __PARISC_LDCW_H */

View file

@ -41,6 +41,7 @@ struct exception_table_entry {
#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
".section __ex_table,\"aw\"\n" \
".align 4\n" \
".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
".previous\n"

View file

@ -75,7 +75,6 @@
/* We now return you to your regularly scheduled HPUX. */
#define ENOSYM 215 /* symbol does not exist in executable */
#define ENOTSOCK 216 /* Socket operation on non-socket */
#define EDESTADDRREQ 217 /* Destination address required */
#define EMSGSIZE 218 /* Message too long */
@ -101,7 +100,6 @@
#define ETIMEDOUT 238 /* Connection timed out */
#define ECONNREFUSED 239 /* Connection refused */
#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
#define EREMOTERELEASE 240 /* Remote peer released connection */
#define EHOSTDOWN 241 /* Host is down */
#define EHOSTUNREACH 242 /* No route to host */

View file

@ -130,6 +130,7 @@ SECTIONS
RO_DATA(8)
/* unwind info */
. = ALIGN(4);
.PARISC.unwind : {
__start___unwind = .;
*(.PARISC.unwind)

View file

@ -23,6 +23,15 @@
#include <asm/feature-fixups.h>
#ifdef CONFIG_VSX
#define __REST_1FPVSR(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
REST_FPR(n,base); \
b 3f; \
2: REST_VSR(n,c,base); \
3:
#define __REST_32FPVSRS(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
@ -41,9 +50,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
2: SAVE_32VSRS(n,c,base); \
3:
#else
#define __REST_1FPVSR(n,b,base) REST_FPR(n, base)
#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
#endif
#define REST_1FPVSR(n,c,base) __REST_1FPVSR(n,__REG_##c,__REG_##base)
#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
@ -67,6 +78,7 @@ _GLOBAL(store_fp_state)
SAVE_32FPVSRS(0, R4, R3)
mffs fr0
stfd fr0,FPSTATE_FPSCR(r3)
REST_1FPVSR(0, R4, R3)
blr
EXPORT_SYMBOL(store_fp_state)
@ -138,4 +150,5 @@ _GLOBAL(save_fpu)
2: SAVE_32FPVSRS(0, R4, R6)
mffs fr0
stfd fr0,FPSTATE_FPSCR(r6)
REST_1FPVSR(0, R4, R6)
blr

View file

@ -1198,11 +1198,11 @@ void kvmppc_save_user_regs(void)
usermsr = current->thread.regs->msr;
/* Caller has enabled FP/VEC/VSX/TM in MSR */
if (usermsr & MSR_FP)
save_fpu(current);
__giveup_fpu(current);
if (usermsr & MSR_VEC)
save_altivec(current);
__giveup_altivec(current);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (usermsr & MSR_TM) {

View file

@ -33,6 +33,7 @@ _GLOBAL(store_vr_state)
mfvscr v0
li r4, VRSTATE_VSCR
stvx v0, r4, r3
lvx v0, 0, r3
blr
EXPORT_SYMBOL(store_vr_state)
@ -109,6 +110,7 @@ _GLOBAL(save_altivec)
mfvscr v0
li r4,VRSTATE_VSCR
stvx v0,r4,r7
lvx v0,0,r7
blr
#ifdef CONFIG_VSX

View file

@ -228,7 +228,6 @@ typedef struct thread_struct thread_struct;
execve_tail(); \
} while (0)
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
struct seq_file;

View file

@ -666,6 +666,7 @@ static int __init ipl_init(void)
&ipl_ccw_attr_group_lpar);
break;
case IPL_TYPE_ECKD:
case IPL_TYPE_ECKD_DUMP:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_eckd_attr_group);
break;
case IPL_TYPE_FCP:

View file

@ -279,12 +279,6 @@ static int paicrypt_event_init(struct perf_event *event)
if (IS_ERR(cpump))
return PTR_ERR(cpump);
/* Event initialization sets last_tag to 0. When later on the events
* are deleted and re-added, do not reset the event count value to zero.
* Events are added, deleted and re-added when 2 or more events
* are active at the same time.
*/
event->hw.last_tag = 0;
event->destroy = paicrypt_event_destroy;
if (a->sample_period) {
@ -318,6 +312,11 @@ static void paicrypt_start(struct perf_event *event, int flags)
{
u64 sum;
/* Event initialization sets last_tag to 0. When later on the events
* are deleted and re-added, do not reset the event count value to zero.
* Events are added, deleted and re-added when 2 or more events
* are active at the same time.
*/
if (!event->hw.last_tag) {
event->hw.last_tag = 1;
sum = paicrypt_getall(event); /* Get current value */

View file

@ -260,7 +260,6 @@ static int paiext_event_init(struct perf_event *event)
rc = paiext_alloc(a, event);
if (rc)
return rc;
event->hw.last_tag = 0;
event->destroy = paiext_event_destroy;
if (a->sample_period) {

View file

@ -4660,7 +4660,7 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
if (pmu->intel_cap.pebs_output_pt_available)
pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
else
pmu->pmu.capabilities |= ~PERF_PMU_CAP_AUX_OUTPUT;
pmu->pmu.capabilities &= ~PERF_PMU_CAP_AUX_OUTPUT;
intel_pmu_check_event_constraints(pmu->event_constraints,
pmu->num_counters,

View file

@ -15,6 +15,7 @@
#include <linux/io.h>
#include <asm/apic.h>
#include <asm/desc.h>
#include <asm/e820/api.h>
#include <asm/sev.h>
#include <asm/ibt.h>
#include <asm/hypervisor.h>
@ -286,15 +287,31 @@ static int hv_cpu_die(unsigned int cpu)
static int __init hv_pci_init(void)
{
int gen2vm = efi_enabled(EFI_BOOT);
bool gen2vm = efi_enabled(EFI_BOOT);
/*
* For Generation-2 VM, we exit from pci_arch_init() by returning 0.
* The purpose is to suppress the harmless warning:
* A Generation-2 VM doesn't support legacy PCI/PCIe, so both
* raw_pci_ops and raw_pci_ext_ops are NULL, and pci_subsys_init() ->
* pcibios_init() doesn't call pcibios_resource_survey() ->
* e820__reserve_resources_late(); as a result, any emulated persistent
* memory of E820_TYPE_PRAM (12) via the kernel parameter
* memmap=nn[KMG]!ss is not added into iomem_resource and hence can't be
* detected by register_e820_pmem(). Fix this by directly calling
* e820__reserve_resources_late() here: e820__reserve_resources_late()
* depends on e820__reserve_resources(), which has been called earlier
* from setup_arch(). Note: e820__reserve_resources_late() also adds
* any memory of E820_TYPE_PMEM (7) into iomem_resource, and
* acpi_nfit_register_region() -> acpi_nfit_insert_resource() ->
* region_intersects() returns REGION_INTERSECTS, so the memory of
* E820_TYPE_PMEM won't get added twice.
*
* We return 0 here so that pci_arch_init() won't print the warning:
* "PCI: Fatal: No config space access function found"
*/
if (gen2vm)
if (gen2vm) {
e820__reserve_resources_late();
return 0;
}
/* For Generation-1 VM, we'll proceed in pci_arch_init(). */
return 1;

View file

@ -104,8 +104,6 @@ struct cont_desc {
size_t size;
};
static u32 ucode_new_rev;
/*
* Microcode patch container file is prepended to the initrd in cpio
* format. See Documentation/arch/x86/microcode.rst
@ -442,12 +440,11 @@ static int __apply_microcode_amd(struct microcode_amd *mc)
*
* Returns true if container found (sets @desc), false otherwise.
*/
static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size)
static bool early_apply_microcode(u32 cpuid_1_eax, u32 old_rev, void *ucode, size_t size)
{
struct cont_desc desc = { 0 };
struct microcode_amd *mc;
bool ret = false;
u32 rev, dummy;
desc.cpuid_1_eax = cpuid_1_eax;
@ -457,22 +454,15 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size)
if (!mc)
return ret;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
/*
* Allow application of the same revision to pick up SMT-specific
* changes even if the revision of the other SMT thread is already
* up-to-date.
*/
if (rev > mc->hdr.patch_id)
if (old_rev > mc->hdr.patch_id)
return ret;
if (!__apply_microcode_amd(mc)) {
ucode_new_rev = mc->hdr.patch_id;
ret = true;
}
return ret;
return !__apply_microcode_amd(mc);
}
static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
@ -506,9 +496,12 @@ static void __init find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpi
*ret = cp;
}
void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
{
struct cpio_data cp = { };
u32 dummy;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy);
/* Needed in load_microcode_amd() */
ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
@ -517,7 +510,8 @@ void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
if (!(cp.data && cp.size))
return;
early_apply_microcode(cpuid_1_eax, cp.data, cp.size);
if (early_apply_microcode(cpuid_1_eax, ed->old_rev, cp.data, cp.size))
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
}
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
@ -625,10 +619,8 @@ void reload_ucode_amd(unsigned int cpu)
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
if (rev < mc->hdr.patch_id) {
if (!__apply_microcode_amd(mc)) {
ucode_new_rev = mc->hdr.patch_id;
pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
}
if (!__apply_microcode_amd(mc))
pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id);
}
}
@ -649,8 +641,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
if (p && (p->patch_id == csig->rev))
uci->mc = p->data;
pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
return 0;
}
@ -691,8 +681,6 @@ static enum ucode_state apply_microcode_amd(int cpu)
rev = mc_amd->hdr.patch_id;
ret = UCODE_UPDATED;
pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
out:
uci->cpu_sig.rev = rev;
c->microcode = rev;
@ -935,11 +923,6 @@ struct microcode_ops * __init init_amd_microcode(void)
pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
return NULL;
}
if (ucode_new_rev)
pr_info_once("microcode updated early to new patch_level=0x%08x\n",
ucode_new_rev);
return &microcode_amd_ops;
}

View file

@ -41,8 +41,6 @@
#include "internal.h"
#define DRIVER_VERSION "2.2"
static struct microcode_ops *microcode_ops;
bool dis_ucode_ldr = true;
@ -77,6 +75,8 @@ static u32 final_levels[] = {
0, /* T-101 terminator */
};
struct early_load_data early_data;
/*
* Check the current patch level on this CPU.
*
@ -155,9 +155,9 @@ void __init load_ucode_bsp(void)
return;
if (intel)
load_ucode_intel_bsp();
load_ucode_intel_bsp(&early_data);
else
load_ucode_amd_bsp(cpuid_1_eax);
load_ucode_amd_bsp(&early_data, cpuid_1_eax);
}
void load_ucode_ap(void)
@ -828,6 +828,11 @@ static int __init microcode_init(void)
if (!microcode_ops)
return -ENODEV;
pr_info_once("Current revision: 0x%08x\n", (early_data.new_rev ?: early_data.old_rev));
if (early_data.new_rev)
pr_info_once("Updated early from: 0x%08x\n", early_data.old_rev);
microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0);
if (IS_ERR(microcode_pdev))
return PTR_ERR(microcode_pdev);
@ -846,8 +851,6 @@ static int __init microcode_init(void)
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
mc_cpu_online, mc_cpu_down_prep);
pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
return 0;
out_pdev:

View file

@ -339,16 +339,9 @@ static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci,
static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci)
{
struct microcode_intel *mc = uci->mc;
enum ucode_state ret;
u32 cur_rev, date;
u32 cur_rev;
ret = __apply_microcode(uci, mc, &cur_rev);
if (ret == UCODE_UPDATED) {
date = mc->hdr.date;
pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n",
cur_rev, mc->hdr.rev, date & 0xffff, date >> 24, (date >> 16) & 0xff);
}
return ret;
return __apply_microcode(uci, mc, &cur_rev);
}
static __init bool load_builtin_intel_microcode(struct cpio_data *cp)
@ -413,13 +406,17 @@ static int __init save_builtin_microcode(void)
early_initcall(save_builtin_microcode);
/* Load microcode on BSP from initrd or builtin blobs */
void __init load_ucode_intel_bsp(void)
void __init load_ucode_intel_bsp(struct early_load_data *ed)
{
struct ucode_cpu_info uci;
ed->old_rev = intel_get_microcode_revision();
uci.mc = get_microcode_blob(&uci, false);
if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED)
ucode_patch_va = UCODE_BSP_LOADED;
ed->new_rev = uci.cpu_sig.rev;
}
void load_ucode_intel_ap(void)

View file

@ -37,6 +37,12 @@ struct microcode_ops {
use_nmi : 1;
};
struct early_load_data {
u32 old_rev;
u32 new_rev;
};
extern struct early_load_data early_data;
extern struct ucode_cpu_info ucode_cpu_info[];
struct cpio_data find_microcode_in_initrd(const char *path);
@ -92,14 +98,14 @@ extern bool dis_ucode_ldr;
extern bool force_minrev;
#ifdef CONFIG_CPU_SUP_AMD
void load_ucode_amd_bsp(unsigned int family);
void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);
void load_ucode_amd_ap(unsigned int family);
int save_microcode_in_initrd_amd(unsigned int family);
void reload_ucode_amd(unsigned int cpu);
struct microcode_ops *init_amd_microcode(void);
void exit_amd_microcode(void);
#else /* CONFIG_CPU_SUP_AMD */
static inline void load_ucode_amd_bsp(unsigned int family) { }
static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }
static inline void load_ucode_amd_ap(unsigned int family) { }
static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
static inline void reload_ucode_amd(unsigned int cpu) { }
@ -108,12 +114,12 @@ static inline void exit_amd_microcode(void) { }
#endif /* !CONFIG_CPU_SUP_AMD */
#ifdef CONFIG_CPU_SUP_INTEL
void load_ucode_intel_bsp(void);
void load_ucode_intel_bsp(struct early_load_data *ed);
void load_ucode_intel_ap(void);
void reload_ucode_intel(void);
struct microcode_ops *init_intel_microcode(void);
#else /* CONFIG_CPU_SUP_INTEL */
static inline void load_ucode_intel_bsp(void) { }
static inline void load_ucode_intel_bsp(struct early_load_data *ed) { }
static inline void load_ucode_intel_ap(void) { }
static inline void reload_ucode_intel(void) { }
static inline struct microcode_ops *init_intel_microcode(void) { return NULL; }

View file

@ -262,11 +262,14 @@ static uint32_t __init ms_hyperv_platform(void)
static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
{
static atomic_t nmi_cpu = ATOMIC_INIT(-1);
unsigned int old_cpu, this_cpu;
if (!unknown_nmi_panic)
return NMI_DONE;
if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1)
old_cpu = -1;
this_cpu = raw_smp_processor_id();
if (!atomic_try_cmpxchg(&nmi_cpu, &old_cpu, this_cpu))
return NMI_HANDLED;
return NMI_DONE;

View file

@ -33,9 +33,12 @@ EXPORT_SYMBOL_GPL(hypercall_page);
* and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
* but during boot it is switched to point to xen_vcpu_info.
* The pointer is used in xen_evtchn_do_upcall to acknowledge pending events.
* Make sure that xen_vcpu_info doesn't cross a page boundary by making it
* cache-line aligned (the struct is guaranteed to have a size of 64 bytes,
* which matches the cache line size of 64-bit x86 processors).
*/
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
DEFINE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
/* Linux <-> Xen vCPU id mapping */
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
@ -160,6 +163,7 @@ void xen_vcpu_setup(int cpu)
int err;
struct vcpu_info *vcpup;
BUILD_BUG_ON(sizeof(*vcpup) > SMP_CACHE_BYTES);
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
/*

View file

@ -21,7 +21,7 @@ extern void *xen_initial_gdt;
struct trap_info;
void xen_copy_trap_info(struct trap_info *traps);
DECLARE_PER_CPU(struct vcpu_info, xen_vcpu_info);
DECLARE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
DECLARE_PER_CPU(unsigned long, xen_cr3);
DECLARE_PER_CPU(unsigned long, xen_current_cr3);

View file

@ -425,6 +425,8 @@ void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
void bdev_add(struct block_device *bdev, dev_t dev)
{
if (bdev_stable_writes(bdev))
mapping_set_stable_writes(bdev->bd_inode->i_mapping);
bdev->bd_dev = dev;
bdev->bd_inode->i_rdev = dev;
bdev->bd_inode->i_ino = dev;

View file

@ -577,6 +577,7 @@ static void blkg_destroy_all(struct gendisk *disk)
struct request_queue *q = disk->queue;
struct blkcg_gq *blkg, *n;
int count = BLKG_DESTROY_BATCH_SIZE;
int i;
restart:
spin_lock_irq(&q->queue_lock);
@ -602,6 +603,18 @@ static void blkg_destroy_all(struct gendisk *disk)
}
}
/*
* Mark policy deactivated since policy offline has been done, and
* the free is scheduled, so future blkcg_deactivate_policy() can
* be bypassed
*/
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
if (pol)
__clear_bit(pol->plid, q->blkcg_pols);
}
q->root_blkg = NULL;
spin_unlock_irq(&q->queue_lock);
}

View file

@ -249,8 +249,6 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
{
struct blkcg_gq *blkg;
WARN_ON_ONCE(!rcu_read_lock_held());
if (blkcg == &blkcg_root)
return q->root_blkg;

View file

@ -501,9 +501,17 @@ static inline void bio_check_ro(struct bio *bio)
if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return;
pr_warn_ratelimited("Trying to write to read-only block-device %pg\n",
bio->bi_bdev);
/* Older lvm-tools actually trigger this */
if (bio->bi_bdev->bd_ro_warned)
return;
bio->bi_bdev->bd_ro_warned = true;
/*
* Use ioctl to set underlying disk of raid/dm to read-only
* will trigger this.
*/
pr_warn("Trying to write to read-only block-device %pg\n",
bio->bi_bdev);
}
}

View file

@ -1512,14 +1512,26 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
}
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
static bool blk_is_flush_data_rq(struct request *rq)
{
return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq);
}
static bool blk_mq_rq_inflight(struct request *rq, void *priv)
{
/*
* If we find a request that isn't idle we know the queue is busy
* as it's checked in the iter.
* Return false to stop the iteration.
*
* In case of queue quiesce, if one flush data request is completed,
* don't count it as inflight given the flush sequence is suspended,
* and the original flush data request is invisible to driver, just
* like other pending requests because of quiesce
*/
if (blk_mq_request_started(rq)) {
if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
blk_is_flush_data_rq(rq) &&
blk_mq_request_completed(rq))) {
bool *busy = priv;
*busy = true;

View file

@ -163,38 +163,15 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
* @q: the queue of the device
*
* Description:
* For historical reasons, this routine merely calls blk_set_runtime_active()
* to do the real work of restarting the queue. It does this regardless of
* whether the device's runtime-resume succeeded; even if it failed the
* Restart the queue of a runtime suspended device. It does this regardless
* of whether the device's runtime-resume succeeded; even if it failed the
* driver or error handler will need to communicate with the device.
*
* This function should be called near the end of the device's
* runtime_resume callback.
* runtime_resume callback to correct queue runtime PM status and re-enable
* peeking requests from the queue.
*/
void blk_post_runtime_resume(struct request_queue *q)
{
blk_set_runtime_active(q);
}
EXPORT_SYMBOL(blk_post_runtime_resume);
/**
* blk_set_runtime_active - Force runtime status of the queue to be active
* @q: the queue of the device
*
* If the device is left runtime suspended during system suspend the resume
* hook typically resumes the device and corrects runtime status
* accordingly. However, that does not affect the queue runtime PM status
* which is still "suspended". This prevents processing requests from the
* queue.
*
* This function can be used in driver's resume hook to correct queue
* runtime PM status and re-enable peeking requests from the queue. It
* should be called before first request is added to the queue.
*
* This function is also called by blk_post_runtime_resume() for
* runtime resumes. It does everything necessary to restart the queue.
*/
void blk_set_runtime_active(struct request_queue *q)
{
int old_status;
@ -211,4 +188,4 @@ void blk_set_runtime_active(struct request_queue *q)
if (old_status != RPM_ACTIVE)
blk_clear_pm_only(q);
}
EXPORT_SYMBOL(blk_set_runtime_active);
EXPORT_SYMBOL(blk_post_runtime_resume);

View file

@ -615,6 +615,7 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
#endif
/* Common attributes for bio-based and request-based queues. */
static struct attribute *queue_attrs[] = {
&queue_ra_entry.attr,
&queue_max_hw_sectors_entry.attr,
@ -659,6 +660,7 @@ static struct attribute *queue_attrs[] = {
NULL,
};
/* Request-based queue attributes that are not relevant for bio-based queues. */
static struct attribute *blk_mq_queue_attrs[] = {
&queue_requests_entry.attr,
&elv_iosched_entry.attr,

View file

@ -1320,6 +1320,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
rcu_read_lock();
/*
* Update has_rules[] flags for the updated tg's subtree. A tg is
* considered to have rules if either the tg itself or any of its
@ -1347,6 +1348,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
this_tg->latency_target = max(this_tg->latency_target,
parent_tg->latency_target);
}
rcu_read_unlock();
/*
* We're already holding queue_lock and know @tg is valid. Let's

View file

@ -502,6 +502,16 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
return ret;
}
static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
{
ivpu_boot_dpu_active_drive(vdev, false);
ivpu_boot_pwr_island_isolation_drive(vdev, true);
ivpu_boot_pwr_island_trickle_drive(vdev, false);
ivpu_boot_pwr_island_drive(vdev, false);
return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
}
static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
@ -600,25 +610,17 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
{
int ret;
u32 val;
int ret = 0;
if (IVPU_WA(punit_disabled))
return 0;
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
return ret;
if (ivpu_boot_pwr_domain_disable(vdev)) {
ivpu_err(vdev, "Failed to disable power domain\n");
ret = -EIO;
}
val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
if (ivpu_pll_disable(vdev)) {
ivpu_err(vdev, "Failed to disable PLL\n");
ret = -EIO;
}
return ret;
}
@ -651,10 +653,6 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
{
int ret;
ret = ivpu_hw_37xx_reset(vdev);
if (ret)
ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
ret = ivpu_hw_37xx_d0i3_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
@ -722,11 +720,11 @@ static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
{
int ret = 0;
if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev))
ivpu_err(vdev, "Failed to reset the VPU\n");
if (!ivpu_hw_37xx_is_idle(vdev))
ivpu_warn(vdev, "VPU not idle during power down\n");
if (ivpu_pll_disable(vdev)) {
ivpu_err(vdev, "Failed to disable PLL\n");
if (ivpu_hw_37xx_reset(vdev)) {
ivpu_err(vdev, "Failed to reset VPU\n");
ret = -EIO;
}

View file

@ -253,8 +253,7 @@ static const struct backlight_ops acpi_backlight_ops = {
static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
unsigned long *state)
{
struct acpi_device *device = cooling_dev->devdata;
struct acpi_video_device *video = acpi_driver_data(device);
struct acpi_video_device *video = cooling_dev->devdata;
*state = video->brightness->count - ACPI_VIDEO_FIRST_LEVEL - 1;
return 0;
@ -263,8 +262,7 @@ static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
unsigned long *state)
{
struct acpi_device *device = cooling_dev->devdata;
struct acpi_video_device *video = acpi_driver_data(device);
struct acpi_video_device *video = cooling_dev->devdata;
unsigned long long level;
int offset;
@ -283,8 +281,7 @@ static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
static int
video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long state)
{
struct acpi_device *device = cooling_dev->devdata;
struct acpi_video_device *video = acpi_driver_data(device);
struct acpi_video_device *video = cooling_dev->devdata;
int level;
if (state >= video->brightness->count - ACPI_VIDEO_FIRST_LEVEL)
@ -1125,7 +1122,6 @@ static int acpi_video_bus_get_one_device(struct acpi_device *device, void *arg)
strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
device->driver_data = data;
data->device_id = device_id;
data->video = video;
@ -1747,8 +1743,8 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
device->backlight->props.brightness =
acpi_video_get_brightness(device->backlight);
device->cooling_dev = thermal_cooling_device_register("LCD",
device->dev, &video_cooling_ops);
device->cooling_dev = thermal_cooling_device_register("LCD", device,
&video_cooling_ops);
if (IS_ERR(device->cooling_dev)) {
/*
* Set cooling_dev to NULL so we don't crash trying to free it.
@ -2031,7 +2027,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
* HP ZBook Fury 16 G10 requires ACPI video's child devices have _PS0
* evaluated to have functional panel brightness control.
*/
acpi_device_fix_up_power_extended(device);
acpi_device_fix_up_power_children(device);
pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),

View file

@ -397,6 +397,19 @@ void acpi_device_fix_up_power_extended(struct acpi_device *adev)
}
EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_extended);
/**
* acpi_device_fix_up_power_children - Force a device's children into D0.
* @adev: Parent device object whose children's power state is to be fixed up.
*
* Call acpi_device_fix_up_power() for @adev's children so long as they
* are reported as present and enabled.
*/
void acpi_device_fix_up_power_children(struct acpi_device *adev)
{
acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL);
}
EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_children);
int acpi_device_update_power(struct acpi_device *device, int *state_p)
{
int state;

View file

@ -592,7 +592,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
while (1) {
if (cx->entry_method == ACPI_CSTATE_HALT)
safe_halt();
raw_safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
io_idle(cx->address);
} else

View file

@ -447,6 +447,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
},
},
{
/* Asus ExpertBook B1402CVA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
},
},
{
/* Asus ExpertBook B1502CBA */
.matches = {

View file

@ -1568,17 +1568,22 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
int err;
const struct iommu_ops *ops;
/* Serialise to make dev->iommu stable under our potential fwspec */
mutex_lock(&iommu_probe_device_lock);
/*
* If we already translated the fwspec there is nothing left to do,
* return the iommu_ops.
*/
ops = acpi_iommu_fwspec_ops(dev);
if (ops)
if (ops) {
mutex_unlock(&iommu_probe_device_lock);
return ops;
}
err = iort_iommu_configure_id(dev, id_in);
if (err && err != -EPROBE_DEFER)
err = viot_iommu_configure(dev);
mutex_unlock(&iommu_probe_device_lock);
/*
* If we have reason to believe the IOMMU driver missed the initial

View file

@ -1055,9 +1055,14 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
* Ask the sd driver to issue START STOP UNIT on runtime suspend
* and resume and shutdown only. For system level suspend/resume,
* devices power state is handled directly by libata EH.
* Given that disks are always spun up on system resume, also
* make sure that the sd driver forces runtime suspended disks
* to be resumed to correctly reflect the power state of the
* device.
*/
sdev->manage_runtime_start_stop = true;
sdev->manage_shutdown = true;
sdev->manage_runtime_start_stop = 1;
sdev->manage_shutdown = 1;
sdev->force_runtime_start_on_system_start = 1;
}
/*

View file

@ -82,6 +82,9 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
if (pnp_port_valid(idev, 1)) {
ctl_addr = devm_ioport_map(&idev->dev,
pnp_port_start(idev, 1), 1);
if (!ctl_addr)
return -ENOMEM;
ap->ioaddr.altstatus_addr = ctl_addr;
ap->ioaddr.ctl_addr = ctl_addr;
ap->ops = &isapnp_port_ops;

View file

@ -67,6 +67,7 @@ struct nbd_sock {
struct recv_thread_args {
struct work_struct work;
struct nbd_device *nbd;
struct nbd_sock *nsock;
int index;
};
@ -395,6 +396,22 @@ static u32 req_to_nbd_cmd_type(struct request *req)
}
}
static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd)
{
if (refcount_inc_not_zero(&nbd->config_refs)) {
/*
* Add smp_mb__after_atomic to ensure that reading nbd->config_refs
* and reading nbd->config is ordered. The pair is the barrier in
* nbd_alloc_and_init_config(), avoid nbd->config_refs is set
* before nbd->config.
*/
smp_mb__after_atomic();
return nbd->config;
}
return NULL;
}
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@ -409,13 +426,13 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
return BLK_EH_DONE;
}
if (!refcount_inc_not_zero(&nbd->config_refs)) {
config = nbd_get_config_unlocked(nbd);
if (!config) {
cmd->status = BLK_STS_TIMEOUT;
__clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
mutex_unlock(&cmd->lock);
goto done;
}
config = nbd->config;
if (config->num_connections > 1 ||
(config->num_connections == 1 && nbd->tag_set.timeout)) {
@ -489,15 +506,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
return BLK_EH_DONE;
}
/*
* Send or receive packet. Return a positive value on success and
* negtive value on failue, and never return 0.
*/
static int sock_xmit(struct nbd_device *nbd, int index, int send,
struct iov_iter *iter, int msg_flags, int *sent)
static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
struct iov_iter *iter, int msg_flags, int *sent)
{
struct nbd_config *config = nbd->config;
struct socket *sock = config->socks[index]->sock;
int result;
struct msghdr msg;
unsigned int noreclaim_flag;
@ -540,6 +551,19 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
return result;
}
/*
* Send or receive packet. Return a positive value on success and
* negtive value on failure, and never return 0.
*/
static int sock_xmit(struct nbd_device *nbd, int index, int send,
struct iov_iter *iter, int msg_flags, int *sent)
{
struct nbd_config *config = nbd->config;
struct socket *sock = config->socks[index]->sock;
return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
}
/*
* Different settings for sk->sk_sndtimeo can result in different return values
* if there is a signal pending when we enter sendmsg, because reasons?
@ -696,7 +720,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
return 0;
}
static int nbd_read_reply(struct nbd_device *nbd, int index,
static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
struct nbd_reply *reply)
{
struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
@ -705,7 +729,7 @@ static int nbd_read_reply(struct nbd_device *nbd, int index,
reply->magic = 0;
iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL);
if (result < 0) {
if (!nbd_disconnected(nbd->config))
dev_err(disk_to_dev(nbd->disk),
@ -829,14 +853,14 @@ static void recv_work(struct work_struct *work)
struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config;
struct request_queue *q = nbd->disk->queue;
struct nbd_sock *nsock;
struct nbd_sock *nsock = args->nsock;
struct nbd_cmd *cmd;
struct request *rq;
while (1) {
struct nbd_reply reply;
if (nbd_read_reply(nbd, args->index, &reply))
if (nbd_read_reply(nbd, nsock->sock, &reply))
break;
/*
@ -871,7 +895,6 @@ static void recv_work(struct work_struct *work)
percpu_ref_put(&q->q_usage_counter);
}
nsock = config->socks[args->index];
mutex_lock(&nsock->tx_lock);
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
@ -977,12 +1000,12 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
struct nbd_sock *nsock;
int ret;
if (!refcount_inc_not_zero(&nbd->config_refs)) {
config = nbd_get_config_unlocked(nbd);
if (!config) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Socks array is empty\n");
return -EINVAL;
}
config = nbd->config;
if (index >= config->num_connections) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
@ -1215,6 +1238,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
INIT_WORK(&args->work, recv_work);
args->index = i;
args->nbd = nbd;
args->nsock = nsock;
nsock->cookie++;
mutex_unlock(&nsock->tx_lock);
sockfd_put(old);
@ -1397,6 +1421,7 @@ static int nbd_start_device(struct nbd_device *nbd)
refcount_inc(&nbd->config_refs);
INIT_WORK(&args->work, recv_work);
args->nbd = nbd;
args->nsock = config->socks[i];
args->index = i;
queue_work(nbd->recv_workq, &args->work);
}
@ -1530,17 +1555,20 @@ static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
return error;
}
static struct nbd_config *nbd_alloc_config(void)
static int nbd_alloc_and_init_config(struct nbd_device *nbd)
{
struct nbd_config *config;
if (WARN_ON(nbd->config))
return -EINVAL;
if (!try_module_get(THIS_MODULE))
return ERR_PTR(-ENODEV);
return -ENODEV;
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
if (!config) {
module_put(THIS_MODULE);
return ERR_PTR(-ENOMEM);
return -ENOMEM;
}
atomic_set(&config->recv_threads, 0);
@ -1548,12 +1576,24 @@ static struct nbd_config *nbd_alloc_config(void)
init_waitqueue_head(&config->conn_wait);
config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
atomic_set(&config->live_connections, 0);
return config;
nbd->config = config;
/*
* Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment,
* its pair is the barrier in nbd_get_config_unlocked().
* So nbd_get_config_unlocked() won't see nbd->config as null after
* refcount_inc_not_zero() succeed.
*/
smp_mb__before_atomic();
refcount_set(&nbd->config_refs, 1);
return 0;
}
static int nbd_open(struct gendisk *disk, blk_mode_t mode)
{
struct nbd_device *nbd;
struct nbd_config *config;
int ret = 0;
mutex_lock(&nbd_index_mutex);
@ -1566,27 +1606,25 @@ static int nbd_open(struct gendisk *disk, blk_mode_t mode)
ret = -ENXIO;
goto out;
}
if (!refcount_inc_not_zero(&nbd->config_refs)) {
struct nbd_config *config;
config = nbd_get_config_unlocked(nbd);
if (!config) {
mutex_lock(&nbd->config_lock);
if (refcount_inc_not_zero(&nbd->config_refs)) {
mutex_unlock(&nbd->config_lock);
goto out;
}
config = nbd_alloc_config();
if (IS_ERR(config)) {
ret = PTR_ERR(config);
ret = nbd_alloc_and_init_config(nbd);
if (ret) {
mutex_unlock(&nbd->config_lock);
goto out;
}
nbd->config = config;
refcount_set(&nbd->config_refs, 1);
refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock);
if (max_part)
set_bit(GD_NEED_PART_SCAN, &disk->state);
} else if (nbd_disconnected(nbd->config)) {
} else if (nbd_disconnected(config)) {
if (max_part)
set_bit(GD_NEED_PART_SCAN, &disk->state);
}
@ -1990,22 +2028,17 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
pr_err("nbd%d already in use\n", index);
return -EBUSY;
}
if (WARN_ON(nbd->config)) {
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
return -EINVAL;
}
config = nbd_alloc_config();
if (IS_ERR(config)) {
ret = nbd_alloc_and_init_config(nbd);
if (ret) {
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
pr_err("couldn't allocate config\n");
return PTR_ERR(config);
return ret;
}
nbd->config = config;
refcount_set(&nbd->config_refs, 1);
set_bit(NBD_RT_BOUND, &config->runtime_flags);
config = nbd->config;
set_bit(NBD_RT_BOUND, &config->runtime_flags);
ret = nbd_genl_size_set(info, nbd);
if (ret)
goto out;
@ -2208,7 +2241,8 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
}
mutex_unlock(&nbd_index_mutex);
if (!refcount_inc_not_zero(&nbd->config_refs)) {
config = nbd_get_config_unlocked(nbd);
if (!config) {
dev_err(nbd_to_dev(nbd),
"not configured, cannot reconfigure\n");
nbd_put(nbd);
@ -2216,7 +2250,6 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
}
mutex_lock(&nbd->config_lock);
config = nbd->config;
if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
!nbd->pid) {
dev_err(nbd_to_dev(nbd),

View file

@ -1464,19 +1464,13 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
return BLK_STS_OK;
}
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
sector_t nr_sectors, enum req_op op)
static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
sector_t nr_sectors, enum req_op op)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
blk_status_t sts;
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
sts = null_handle_throttled(cmd);
if (sts != BLK_STS_OK)
return sts;
}
if (op == REQ_OP_FLUSH) {
cmd->error = errno_to_blk_status(null_handle_flush(nullb));
goto out;
@ -1493,7 +1487,6 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
out:
nullb_complete_cmd(cmd);
return BLK_STS_OK;
}
static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
@ -1724,8 +1717,6 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->fake_timeout = should_timeout_request(rq) ||
blk_should_fake_timeout(rq->q);
blk_mq_start_request(rq);
if (should_requeue_request(rq)) {
/*
* Alternate between hitting the core BUSY path, and the
@ -1738,6 +1729,15 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
if (test_bit(NULLB_DEV_FL_THROTTLED, &nq->dev->flags)) {
blk_status_t sts = null_handle_throttled(cmd);
if (sts != BLK_STS_OK)
return sts;
}
blk_mq_start_request(rq);
if (is_poll) {
spin_lock(&nq->poll_lock);
list_add_tail(&rq->queuelist, &nq->poll_list);
@ -1747,7 +1747,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
if (cmd->fake_timeout)
return BLK_STS_OK;
return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
return BLK_STS_OK;
}
static void null_queue_rqs(struct request **rqlist)

View file

@ -307,11 +307,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
WRITE_ONCE(cpudata->highest_perf, highest_perf);
WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
return 0;
}
@ -329,11 +329,12 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
highest_perf = cppc_perf.highest_perf;
WRITE_ONCE(cpudata->highest_perf, highest_perf);
WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
cppc_perf.lowest_nonlinear_perf);
WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
if (cppc_state == AMD_PSTATE_ACTIVE)
return 0;
@ -432,6 +433,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
u64 prev = READ_ONCE(cpudata->cppc_req_cached);
u64 value = prev;
min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
cpudata->max_limit_perf);
max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
cpudata->max_limit_perf);
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
@ -470,6 +475,22 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
return 0;
}
static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
{
u32 max_limit_perf, min_limit_perf;
struct amd_cpudata *cpudata = policy->driver_data;
max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
WRITE_ONCE(cpudata->max_limit_freq, policy->max);
WRITE_ONCE(cpudata->min_limit_freq, policy->min);
return 0;
}
static int amd_pstate_update_freq(struct cpufreq_policy *policy,
unsigned int target_freq, bool fast_switch)
{
@ -480,6 +501,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
if (!cpudata->max_freq)
return -ENODEV;
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
cap_perf = READ_ONCE(cpudata->highest_perf);
min_perf = READ_ONCE(cpudata->lowest_perf);
max_perf = cap_perf;
@ -518,7 +542,9 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
return amd_pstate_update_freq(policy, target_freq, true);
if (!amd_pstate_update_freq(policy, target_freq, true))
return target_freq;
return policy->cur;
}
static void amd_pstate_adjust_perf(unsigned int cpu,
@ -532,6 +558,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
struct amd_cpudata *cpudata = policy->driver_data;
unsigned int target_freq;
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
cap_perf = READ_ONCE(cpudata->highest_perf);
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
max_freq = READ_ONCE(cpudata->max_freq);
@ -745,6 +775,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
/* Initial processor data capability frequencies */
cpudata->max_freq = max_freq;
cpudata->min_freq = min_freq;
cpudata->max_limit_freq = max_freq;
cpudata->min_limit_freq = min_freq;
cpudata->nominal_freq = nominal_freq;
cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
@ -850,11 +882,16 @@ static ssize_t show_energy_performance_available_preferences(
{
int i = 0;
int offset = 0;
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
return sysfs_emit_at(buf, offset, "%s\n",
energy_perf_strings[EPP_INDEX_PERFORMANCE]);
while (energy_perf_strings[i] != NULL)
offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
sysfs_emit_at(buf, offset, "\n");
offset += sysfs_emit_at(buf, offset, "\n");
return offset;
}
@ -1183,16 +1220,25 @@ static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
static void amd_pstate_epp_init(unsigned int cpu)
static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata = policy->driver_data;
u32 max_perf, min_perf;
u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
u64 value;
s16 epp;
max_perf = READ_ONCE(cpudata->highest_perf);
min_perf = READ_ONCE(cpudata->lowest_perf);
max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
cpudata->max_limit_perf);
min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
cpudata->max_limit_perf);
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
value = READ_ONCE(cpudata->cppc_req_cached);
@ -1210,9 +1256,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
value &= ~AMD_CPPC_DES_PERF(~0L);
value |= AMD_CPPC_DES_PERF(0);
if (cpudata->epp_policy == cpudata->policy)
goto skip_epp;
cpudata->epp_policy = cpudata->policy;
/* Get BIOS pre-defined epp value */
@ -1222,7 +1265,7 @@ static void amd_pstate_epp_init(unsigned int cpu)
* This return value can only be negative for shared_memory
* systems where EPP register read/write not supported.
*/
goto skip_epp;
return;
}
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
@ -1236,8 +1279,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
WRITE_ONCE(cpudata->cppc_req_cached, value);
amd_pstate_set_epp(cpudata, epp);
skip_epp:
cpufreq_cpu_put(policy);
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@ -1252,7 +1293,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
cpudata->policy = policy->policy;
amd_pstate_epp_init(policy->cpu);
amd_pstate_epp_update_limit(policy);
return 0;
}

View file

@ -327,7 +327,7 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
imx6x_disable_freq_in_opp(dev, 696000000);
if (of_machine_is_compatible("fsl,imx6ull")) {
if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ)
imx6x_disable_freq_in_opp(dev, 792000000);
if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)

View file

@ -23,8 +23,10 @@
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/soc/qcom/smem.h>
@ -55,6 +57,7 @@ struct qcom_cpufreq_match_data {
struct qcom_cpufreq_drv_cpu {
int opp_token;
struct device **virt_devs;
};
struct qcom_cpufreq_drv {
@ -424,6 +427,30 @@ static const struct qcom_cpufreq_match_data match_data_ipq8074 = {
.get_version = qcom_cpufreq_ipq8074_name_version,
};
static void qcom_cpufreq_suspend_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
{
const char * const *name = drv->data->genpd_names;
int i;
if (!drv->cpus[cpu].virt_devs)
return;
for (i = 0; *name; i++, name++)
device_set_awake_path(drv->cpus[cpu].virt_devs[i]);
}
static void qcom_cpufreq_put_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
{
const char * const *name = drv->data->genpd_names;
int i;
if (!drv->cpus[cpu].virt_devs)
return;
for (i = 0; *name; i++, name++)
pm_runtime_put(drv->cpus[cpu].virt_devs[i]);
}
static int qcom_cpufreq_probe(struct platform_device *pdev)
{
struct qcom_cpufreq_drv *drv;
@ -478,6 +505,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
of_node_put(np);
for_each_possible_cpu(cpu) {
struct device **virt_devs = NULL;
struct dev_pm_opp_config config = {
.supported_hw = NULL,
};
@ -498,7 +526,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
if (drv->data->genpd_names) {
config.genpd_names = drv->data->genpd_names;
config.virt_devs = NULL;
config.virt_devs = &virt_devs;
}
if (config.supported_hw || config.genpd_names) {
@ -509,6 +537,27 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
goto free_opp;
}
}
if (virt_devs) {
const char * const *name = config.genpd_names;
int i, j;
for (i = 0; *name; i++, name++) {
ret = pm_runtime_resume_and_get(virt_devs[i]);
if (ret) {
dev_err(cpu_dev, "failed to resume %s: %d\n",
*name, ret);
/* Rollback previous PM runtime calls */
name = config.genpd_names;
for (j = 0; *name && j < i; j++, name++)
pm_runtime_put(virt_devs[j]);
goto free_opp;
}
}
drv->cpus[cpu].virt_devs = virt_devs;
}
}
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
@ -522,8 +571,10 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
dev_err(cpu_dev, "Failed to register platform device\n");
free_opp:
for_each_possible_cpu(cpu)
for_each_possible_cpu(cpu) {
qcom_cpufreq_put_virt_devs(drv, cpu);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
return ret;
}
@ -534,15 +585,31 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu)
for_each_possible_cpu(cpu) {
qcom_cpufreq_put_virt_devs(drv, cpu);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
}
static int qcom_cpufreq_suspend(struct device *dev)
{
struct qcom_cpufreq_drv *drv = dev_get_drvdata(dev);
unsigned int cpu;
for_each_possible_cpu(cpu)
qcom_cpufreq_suspend_virt_devs(drv, cpu);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(qcom_cpufreq_pm_ops, qcom_cpufreq_suspend, NULL);
static struct platform_driver qcom_cpufreq_driver = {
.probe = qcom_cpufreq_probe,
.remove_new = qcom_cpufreq_remove,
.driver = {
.name = "qcom-cpufreq-nvmem",
.pm = pm_sleep_ptr(&qcom_cpufreq_pm_ops),
},
};

View file

@ -301,7 +301,7 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
if ((old->context == fence->context && old_usage >= usage &&
dma_fence_is_later(fence, old)) ||
dma_fence_is_later_or_same(fence, old)) ||
dma_fence_is_signaled(old)) {
dma_resv_list_set(fobj, i, fence, usage);
dma_fence_put(old);

View file

@ -1093,9 +1093,10 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
DPLL_CMD_PIN_ID_GET);
if (!hdr)
if (!hdr) {
nlmsg_free(msg);
return -EMSGSIZE;
}
pin = dpll_pin_find_from_nlattr(info);
if (!IS_ERR(pin)) {
ret = dpll_msg_add_pin_handle(msg, pin);
@ -1123,8 +1124,10 @@ int dpll_nl_pin_get_doit(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
DPLL_CMD_PIN_GET);
if (!hdr)
if (!hdr) {
nlmsg_free(msg);
return -EMSGSIZE;
}
ret = dpll_cmd_pin_get_one(msg, pin, info->extack);
if (ret) {
nlmsg_free(msg);
@ -1256,8 +1259,10 @@ int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
DPLL_CMD_DEVICE_ID_GET);
if (!hdr)
if (!hdr) {
nlmsg_free(msg);
return -EMSGSIZE;
}
dpll = dpll_device_find_from_nlattr(info);
if (!IS_ERR(dpll)) {
@ -1284,8 +1289,10 @@ int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
DPLL_CMD_DEVICE_GET);
if (!hdr)
if (!hdr) {
nlmsg_free(msg);
return -EMSGSIZE;
}
ret = dpll_device_get_one(dpll, msg, info->extack);
if (ret) {

View file

@ -717,14 +717,11 @@ static void create_units(struct fw_device *device)
fw_unit_attributes,
&unit->attribute_group);
if (device_register(&unit->device) < 0)
goto skip_unit;
fw_device_get(device);
continue;
skip_unit:
kfree(unit);
if (device_register(&unit->device) < 0) {
put_device(&unit->device);
continue;
}
}
}

View file

@ -1519,9 +1519,9 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
sdev->use_10_for_rw = 1;
if (sbp2_param_exclusive_login) {
sdev->manage_system_start_stop = true;
sdev->manage_runtime_start_stop = true;
sdev->manage_shutdown = true;
sdev->manage_system_start_stop = 1;
sdev->manage_runtime_start_stop = 1;
sdev->manage_shutdown = 1;
}
if (sdev->type == TYPE_ROM)

View file

@ -101,7 +101,7 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
* overlap on physical address level.
*/
list_for_each_entry(entry, &accepting_list, list) {
if (entry->end < range.start)
if (entry->end <= range.start)
continue;
if (entry->start >= range.end)
continue;

View file

@ -547,7 +547,7 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
struct amdgpu_device *adev = dst, *peer_adev;
int num_links;
if (adev->asic_type != CHIP_ALDEBARAN)
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))
return 0;
if (src)

View file

@ -638,6 +638,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
if (!adev->didt_rreg)
return -EOPNOTSUPP;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@ -694,6 +697,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
if (!adev->didt_wreg)
return -EOPNOTSUPP;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);

View file

@ -4538,6 +4538,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, false);
r = amdgpu_dpm_notify_rlc_state(adev, false);
if (r)
return r;
return 0;
}

View file

@ -340,14 +340,11 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
adev->have_disp_power_ref = true;
return ret;
}
/* if we have no active crtcs, then drop the power ref
* we got before
/* if we have no active crtcs, then go to
* drop the power ref we got before
*/
if (!active && adev->have_disp_power_ref) {
pm_runtime_put_autosuspend(dev->dev);
if (!active && adev->have_disp_power_ref)
adev->have_disp_power_ref = false;
}
out:
/* drop the power reference we got coming in here */
pm_runtime_put_autosuspend(dev->dev);

View file

@ -2263,6 +2263,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
pci_wake_from_d3(pdev, TRUE);
/*
* For runpm implemented via BACO, PMFW will handle the
* timing for BACO in and out:

View file

@ -181,6 +181,9 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
if (!bo->ttm)
return AMDGPU_BO_INVALID_OFFSET;
if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
return AMDGPU_BO_INVALID_OFFSET;

View file

@ -1527,10 +1527,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
uint64_t offset;
uint64_t offset = AMDGPU_BO_INVALID_OFFSET;
offset = (bo->tbo.resource->start << PAGE_SHIFT) +
amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
if (bo->tbo.resource->mem_type == TTM_PL_TT)
offset = amdgpu_gmc_agp_addr(&bo->tbo);
if (offset == AMDGPU_BO_INVALID_OFFSET)
offset = (bo->tbo.resource->start << PAGE_SHIFT) +
amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
return amdgpu_gmc_sign_extend(offset);
}

View file

@ -214,6 +214,12 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
control->i2c_address = EEPROM_I2C_MADDR_0;
return true;
case IP_VERSION(13, 0, 0):
if (strnstr(atom_ctx->vbios_pn, "D707",
sizeof(atom_ctx->vbios_pn)))
control->i2c_address = EEPROM_I2C_MADDR_0;
else
control->i2c_address = EEPROM_I2C_MADDR_4;
return true;
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
control->i2c_address = EEPROM_I2C_MADDR_4;

View file

@ -959,10 +959,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
return 0;
addr = amdgpu_gmc_agp_addr(bo);
if (addr != AMDGPU_BO_INVALID_OFFSET) {
bo->resource->start = addr >> PAGE_SHIFT;
if (addr != AMDGPU_BO_INVALID_OFFSET)
return 0;
}
/* allocate GART space */
placement.num_placement = 1;

View file

@ -89,6 +89,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin");
static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
};
static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
@ -304,6 +308,10 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
default:
break;
}
soc15_program_register_sequence(adev,
golden_settings_gc_11_0,
(const u32)ARRAY_SIZE(golden_settings_gc_11_0));
}
static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
@ -419,7 +427,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
cpu_ptr = &adev->wb.wb[index];
r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err1;

View file

@ -883,8 +883,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 16,
AMDGPU_IB_POOL_DIRECT, &ib);
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;

View file

@ -1039,8 +1039,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 16,
AMDGPU_IB_POOL_DIRECT, &ib);
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;

View file

@ -297,8 +297,8 @@ static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 16,
AMDGPU_IB_POOL_DIRECT, &ib);
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;

View file

@ -259,17 +259,17 @@ const struct nbio_hdp_flush_reg nbio_v7_11_hdp_flush_reg = {
static void nbio_v7_11_init_registers(struct amdgpu_device *adev)
{
/* uint32_t def, data;
uint32_t def, data;
def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3);
data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3);
data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data);
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data);
*/
}
static void nbio_v7_11_update_medium_grain_clock_gating(struct amdgpu_device *adev,

View file

@ -611,11 +611,6 @@ static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device
dev_info(adev->dev, "RAS controller interrupt triggered "
"by NBIF error\n");
/* ras_controller_int is dedicated for nbif ras error,
* not the global interrupt for sync flood
*/
amdgpu_ras_reset_gpu(adev);
}
amdgpu_ras_error_data_fini(&err_data);

View file

@ -1161,6 +1161,11 @@ static int soc15_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x46;
/* GC 9.4.3 uses MMIO register region hole at a different offset */
if (!amdgpu_sriov_vf(adev)) {
adev->rmmio_remap.reg_offset = 0x1A000;
adev->rmmio_remap.bus_addr = adev->rmmio_base + 0x1A000;
}
break;
default:
/* FIXME: not supported yet */

View file

@ -1128,7 +1128,7 @@ static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev,
struct kfd_dev *dev = adev->kfd.dev;
uint32_t i;
if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3))
if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3))
return dev->nodes[0];
for (i = 0; i < dev->num_nodes; i++)

View file

@ -169,16 +169,43 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
return 0;
}
static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
struct process_queue_node *pqn)
{
struct kfd_node *dev;
struct kfd_process_device *pdd;
dev = pqn->q->device;
pdd = kfd_get_process_device_data(dev, pqm->process);
if (!pdd) {
pr_err("Process device data doesn't exist\n");
return;
}
if (pqn->q->gws) {
if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
!dev->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_remove_gws_from_process(
pqm->process->kgd_process_info, pqn->q->gws);
pdd->qpd.num_gws = 0;
}
if (dev->kfd->shared_resources.enable_mes) {
amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->gang_ctx_bo);
if (pqn->q->wptr_bo)
amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
}
}
void pqm_uninit(struct process_queue_manager *pqm)
{
struct process_queue_node *pqn, *next;
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
if (pqn->q && pqn->q->gws &&
KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
!pqn->q->device->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
pqn->q->gws);
if (pqn->q)
pqm_clean_queue_resource(pqm, pqn);
kfd_procfs_del_queue(pqn->q);
uninit_queue(pqn->q);
list_del(&pqn->process_queue_list);
@ -461,22 +488,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
goto err_destroy_queue;
}
if (pqn->q->gws) {
if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
!dev->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_remove_gws_from_process(
pqm->process->kgd_process_info,
pqn->q->gws);
pdd->qpd.num_gws = 0;
}
if (dev->kfd->shared_resources.enable_mes) {
amdgpu_amdkfd_free_gtt_mem(dev->adev,
pqn->q->gang_ctx_bo);
if (pqn->q->wptr_bo)
amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
}
pqm_clean_queue_resource(pqm, pqn);
uninit_queue(pqn->q);
}

View file

@ -6267,7 +6267,7 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
dm_new_state->underscan_enable = val;
ret = 0;
} else if (property == adev->mode_info.abm_level_property) {
dm_new_state->abm_level = val;
dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
ret = 0;
}
@ -6312,7 +6312,8 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
*val = dm_state->underscan_enable;
ret = 0;
} else if (property == adev->mode_info.abm_level_property) {
*val = dm_state->abm_level;
*val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
dm_state->abm_level : 0;
ret = 0;
}
@ -6385,7 +6386,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->pbn = 0;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
state->abm_level = amdgpu_dm_abm_level;
state->abm_level = amdgpu_dm_abm_level ?:
ABM_LEVEL_IMMEDIATE_DISABLE;
__drm_atomic_helper_connector_reset(connector, &state->base);
}

View file

@ -334,7 +334,7 @@ static struct wm_table lpddr5_wm_table = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@ -342,7 +342,7 @@ static struct wm_table lpddr5_wm_table = {
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@ -350,7 +350,7 @@ static struct wm_table lpddr5_wm_table = {
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@ -358,7 +358,7 @@ static struct wm_table lpddr5_wm_table = {
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,

View file

@ -232,6 +232,10 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->work_arounds.skip_clock_update)
return;
/* DTBCLK is fixed, so set a default if unspecified. */
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
new_clocks->ref_dtbclk_khz = 600000;
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
* also if safe to lower is false, we just go in the higher state
@ -265,8 +269,10 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
dcn35_smu_set_dtbclk(clk_mgr, true);
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
}
/* check that we're not already in D0 */
@ -314,17 +320,12 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
update_dispclk = true;
}
if (!new_clocks->dtbclk_en) {
new_clocks->ref_dtbclk_khz = 600000;
}
/* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
if (!dc->debug.disable_dtb_ref_clk_switch &&
should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000, clk_mgr_base->clks.ref_dtbclk_khz / 1000)) {
/* DCCG requires KHz precision for DTBCLK */
dcn35_smu_set_dtbclk(clk_mgr, true);
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000,
clk_mgr_base->clks.ref_dtbclk_khz / 1000)) {
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
}
if (dpp_clock_lowered) {
@ -443,32 +444,32 @@ static struct wm_table ddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9,
.sr_enter_plus_exit_time_us = 11,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9,
.sr_enter_plus_exit_time_us = 11,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9,
.sr_enter_plus_exit_time_us = 11,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9,
.sr_enter_plus_exit_time_us = 11,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
}
@ -480,32 +481,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
}
@ -515,11 +516,6 @@ static DpmClocks_t_dcn35 dummy_clocks;
static struct dcn35_watermarks dummy_wms = { 0 };
static struct dcn35_ss_info_table ss_info_table = {
.ss_divider = 1000,
.ss_percentage = {0, 0, 375, 375, 375}
};
static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
{
int i, num_valid_sets;
@ -653,27 +649,47 @@ static unsigned int convert_wck_ratio(uint8_t wck_ratio)
return 1;
}
static inline uint32_t calc_dram_speed_mts(const MemPstateTable_t *entry)
{
return entry->UClk * convert_wck_ratio(entry->WckRatio) * 2;
}
static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
struct integrated_info *bios_info,
DpmClocks_t_dcn35 *clock_table)
{
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
uint32_t max_pstate = 0, max_uclk = 0, max_fclk = 0;
uint32_t min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0;
int i;
/* Determine min/max p-state values. */
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
if (is_valid_clock_value(clock_table->MemPstateTable[i].UClk) &&
clock_table->MemPstateTable[i].UClk > max_uclk) {
max_uclk = clock_table->MemPstateTable[i].UClk;
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) {
max_dram_speed_mts = dram_speed_mts;
max_pstate = i;
}
}
/* We expect the table to contain at least one valid Uclk entry. */
ASSERT(is_valid_clock_value(max_uclk));
min_dram_speed_mts = max_dram_speed_mts;
min_pstate = max_pstate;
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) {
min_dram_speed_mts = dram_speed_mts;
min_pstate = i;
}
}
/* We expect the table to contain at least one valid P-state entry. */
ASSERT(clock_table->NumMemPstatesEnabled &&
is_valid_clock_value(max_dram_speed_mts) &&
is_valid_clock_value(min_dram_speed_mts));
/* dispclk and dppclk can be max at any voltage, same number of levels for both */
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
@ -683,47 +699,46 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
max_dppclk = find_max_clk_value(clock_table->DppClocks,
clock_table->NumDispClkLevelsEnabled);
} else {
/* Invalid number of entries in the table from PMFW. */
ASSERT(0);
}
if (clock_table->NumFclkLevelsEnabled <= NUM_FCLK_DPM_LEVELS)
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq,
clock_table->NumFclkLevelsEnabled);
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
uint32_t min_uclk = clock_table->MemPstateTable[0].UClk;
/* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
ASSERT(clock_table->NumDcfClkLevelsEnabled > 0);
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, clock_table->NumFclkLevelsEnabled);
for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
int j;
for (j = 1; j < clock_table->NumMemPstatesEnabled; j++) {
if (is_valid_clock_value(clock_table->MemPstateTable[j].UClk) &&
clock_table->MemPstateTable[j].UClk < min_uclk &&
clock_table->MemPstateTable[j].Voltage <= clock_table->SocVoltage[i]) {
min_uclk = clock_table->MemPstateTable[j].UClk;
min_pstate = j;
}
}
/* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
break;
break;
bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
/* Now update clocks we do read */
bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[min_pstate].MemClk;
bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[min_pstate].Voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
clock_table->MemPstateTable[min_pstate].WckRatio);
}
bw_params->clk_table.entries[i].wck_ratio =
convert_wck_ratio(clock_table->MemPstateTable[min_pstate].WckRatio);
/* Dcfclk and Fclk are tied, but at a different ratio */
bw_params->clk_table.entries[i].fclk_mhz = min(max_fclk, 2 * clock_table->DcfClocks[i]);
}
/* Make sure to include at least one entry at highest pstate */
if (max_pstate != min_pstate || i == 0) {
if (i > MAX_NUM_DPM_LVL - 1)
i = MAX_NUM_DPM_LVL - 1;
bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[max_pstate].MemClk;
bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[max_pstate].Voltage;
@ -739,6 +754,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
}
bw_params->clk_table.num_entries = i--;
/* Make sure all highest clocks are included*/
bw_params->clk_table.entries[i].socclk_mhz =
find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
bw_params->clk_table.entries[i].dispclk_mhz =
@ -757,6 +773,11 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
bw_params->clk_table.num_entries_per_clk.num_fclk_levels = clock_table->NumFclkLevelsEnabled;
bw_params->clk_table.num_entries_per_clk.num_memclk_levels = clock_table->NumMemPstatesEnabled;
bw_params->clk_table.num_entries_per_clk.num_socclk_levels = clock_table->NumSocClkLevelsEnabled;
/*
* Set any 0 clocks to max default setting. Not an issue for
* power since we aren't doing switching in such case anyway
*/
for (i = 0; i < bw_params->clk_table.num_entries; i++) {
if (!bw_params->clk_table.entries[i].fclk_mhz) {
bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
@ -965,21 +986,6 @@ struct clk_mgr_funcs dcn35_fpga_funcs = {
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
};
static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
{
uint32_t clock_source;
struct dc_context *ctx = clk_mgr->base.ctx;
REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
if (clk_mgr->dprefclk_ss_percentage != 0) {
clk_mgr->ss_on_dprefclk = true;
clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
}
}
void dcn35_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn35 *clk_mgr,
@ -1043,17 +1049,11 @@ void dcn35_clk_mgr_construct(
dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
clk_mgr->base.base.clks.ref_dtbclk_khz = dcn35_smu_get_dtbclk(&clk_mgr->base);
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
if (!clk_mgr->base.base.clks.ref_dtbclk_khz)
dcn35_smu_set_dtbclk(&clk_mgr->base, true);
clk_mgr->base.base.clks.dtbclk_en = true;
dce_clock_read_ss_info(&clk_mgr->base);
/*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
dcn35_read_ss_info_from_lut(&clk_mgr->base);
clk_mgr->base.base.bw_params = &dcn35_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
@ -1129,7 +1129,6 @@ void dcn35_clk_mgr_construct(
ctx->dc->debug.disable_dpp_power_gate = false;
ctx->dc->debug.disable_hubp_power_gate = false;
ctx->dc->debug.disable_dsc_power_gate = false;
ctx->dc->debug.disable_hpo_power_gate = false;
} else {
/*let's reset the config control flag*/
ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/

View file

@ -874,6 +874,7 @@ struct dc_debug_options {
unsigned int seamless_boot_odm_combine;
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
int minimum_z8_residency_time;
int minimum_z10_residency_time;
bool disable_z9_mpc;
unsigned int force_fclk_khz;
bool enable_tri_buf;
@ -1608,7 +1609,6 @@ struct dc_link {
enum edp_revision edp_revision;
union dpcd_sink_ext_caps dpcd_sink_ext_caps;
struct backlight_settings backlight_settings;
struct psr_settings psr_settings;
struct replay_settings replay_settings;

View file

@ -991,10 +991,6 @@ struct link_mst_stream_allocation_table {
struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
};
struct backlight_settings {
uint32_t backlight_millinits;
};
/* PSR feature flags */
struct psr_settings {
bool psr_feature_enabled; // PSR is supported by sink

View file

@ -871,7 +871,7 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.enable_z9_disable_interface = true,
.minimum_z8_residency_time = 2000,
.minimum_z8_residency_time = 2100,
.psr_skip_crtc_disable = true,
.replay_skip_crtc_disabled = true,
.disable_dmcu = true,

View file

@ -261,6 +261,7 @@ void pg_cntl35_hpo_pg_control(struct pg_cntl *pg_cntl, bool power_on)
uint32_t power_gate = power_on ? 0 : 1;
uint32_t pwr_status = power_on ? 0 : 2;
uint32_t org_ip_request_cntl;
uint32_t power_forceon;
bool block_enabled;
if (pg_cntl->ctx->dc->debug.ignore_pg ||
@ -277,6 +278,10 @@ void pg_cntl35_hpo_pg_control(struct pg_cntl *pg_cntl, bool power_on)
return;
}
REG_GET(DOMAIN25_PG_CONFIG, DOMAIN_POWER_FORCEON, &power_forceon);
if (power_forceon)
return;
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
@ -304,6 +309,7 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on)
uint32_t power_gate = power_on ? 0 : 1;
uint32_t pwr_status = power_on ? 0 : 2;
uint32_t org_ip_request_cntl;
uint32_t power_forceon;
bool block_enabled;
if (pg_cntl->ctx->dc->debug.ignore_pg ||
@ -319,6 +325,10 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on)
return;
}
REG_GET(DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, &power_forceon);
if (power_forceon)
return;
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);

View file

@ -1712,6 +1712,13 @@ static bool dcn35_validate_bandwidth(struct dc *dc,
out = dml2_validate(dc, context, fast_validate);
if (fast_validate)
return out;
DC_FP_START();
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
return out;
}
@ -1857,7 +1864,7 @@ static bool dcn35_resource_construct(
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
dc->config.use_default_clock_table = false;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {

Some files were not shown because too many files have changed in this diff Show more