Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

No conflicts.

Adjacent changes:

drivers/net/ethernet/stmicro/stmmac/common.h
  38cc3c6dcc ("net: stmmac: protect updates of 64-bit statistics counters")
  fd5a6a7131 ("net: stmmac: est: Per Tx-queue error count for HLBF")
  c5c3e1bfc9 ("net: stmmac: Offload queueMaxSDU from tc-taprio")

drivers/net/wireless/microchip/wilc1000/netdev.c
  c901388028 ("wifi: fill in MODULE_DESCRIPTION()s for wilc1000")
  328efda22a ("wifi: wilc1000: do not realloc workqueue everytime an interface is added")

net/unix/garbage.c
  11498715f2 ("af_unix: Remove io_uring code for GC.")
  1279f9d9de ("af_unix: Call kfree_skb() for dead unix_(sk)->oob_skb in GC.")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-02-08 15:20:37 -08:00
commit 3be042cf46
427 changed files with 4978 additions and 3037 deletions

View File

@ -1,4 +1,4 @@
What: /sys/devices/.../hwmon/hwmon<i>/in0_input
What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/in0_input
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@ -6,7 +6,7 @@ Description: RO. Current Voltage in millivolt.
Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_max
What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_max
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@ -20,7 +20,7 @@ Description: RW. Card reactive sustained (PL1/Tau) power limit in microwatts.
Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_rated_max
What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_rated_max
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@ -28,7 +28,7 @@ Description: RO. Card default power limit (default TDP setting).
Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_max_interval
What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_max_interval
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@ -37,7 +37,7 @@ Description: RW. Sustained power limit interval (Tau in PL1/Tau) in
Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_crit
What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_crit
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@ -50,7 +50,7 @@ Description: RW. Card reactive critical (I1) power limit in microwatts.
Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/curr1_crit
What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/curr1_crit
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@ -63,7 +63,7 @@ Description: RW. Card reactive critical (I1) power limit in milliamperes.
Only supported for particular Intel i915 graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/energy1_input
What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/energy1_input
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org

View File

@ -1,4 +1,4 @@
What: /sys/devices/.../hwmon/hwmon<i>/power1_max
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@ -12,7 +12,7 @@ Description: RW. Card reactive sustained (PL1) power limit in microwatts.
Only supported for particular Intel xe graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_rated_max
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_rated_max
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@ -20,7 +20,7 @@ Description: RO. Card default power limit (default TDP setting).
Only supported for particular Intel xe graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_crit
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_crit
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@ -33,7 +33,7 @@ Description: RW. Card reactive critical (I1) power limit in microwatts.
Only supported for particular Intel xe graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/curr1_crit
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/curr1_crit
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@ -44,7 +44,7 @@ Description: RW. Card reactive critical (I1) power limit in milliamperes.
the operating frequency if the power averaged over a window
exceeds this limit.
What: /sys/devices/.../hwmon/hwmon<i>/in0_input
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/in0_input
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@ -52,7 +52,7 @@ Description: RO. Current Voltage in millivolt.
Only supported for particular Intel xe graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/energy1_input
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/energy1_input
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@ -60,7 +60,7 @@ Description: RO. Energy input of device in microjoules.
Only supported for particular Intel xe graphics platforms.
What: /sys/devices/.../hwmon/hwmon<i>/power1_max_interval
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max_interval
Date: October 2023
KernelVersion: 6.6
Contact: intel-xe@lists.freedesktop.org

View File

@ -22,6 +22,7 @@ properties:
- const: allwinner,sun6i-a31-spdif
- const: allwinner,sun8i-h3-spdif
- const: allwinner,sun50i-h6-spdif
- const: allwinner,sun50i-h616-spdif
- items:
- const: allwinner,sun8i-a83t-spdif
- const: allwinner,sun8i-h3-spdif
@ -62,6 +63,8 @@ allOf:
enum:
- allwinner,sun6i-a31-spdif
- allwinner,sun8i-h3-spdif
- allwinner,sun50i-h6-spdif
- allwinner,sun50i-h616-spdif
then:
required:
@ -73,7 +76,7 @@ allOf:
contains:
enum:
- allwinner,sun8i-h3-spdif
- allwinner,sun50i-h6-spdif
- allwinner,sun50i-h616-spdif
then:
properties:

View File

@ -448,17 +448,17 @@ Function-specific configfs interface
The function name to use when creating the function directory is "ncm".
The NCM function provides these attributes in its function directory:
=============== ==================================================
ifname network device interface name associated with this
function instance
qmult queue length multiplier for high and super speed
host_addr MAC address of host's end of this
Ethernet over USB link
dev_addr MAC address of device's end of this
Ethernet over USB link
max_segment_size Segment size required for P2P connections. This
will set MTU to (max_segment_size - 14 bytes)
=============== ==================================================
======================= ==================================================
ifname network device interface name associated with this
function instance
qmult queue length multiplier for high and super speed
host_addr MAC address of host's end of this
Ethernet over USB link
dev_addr MAC address of device's end of this
Ethernet over USB link
max_segment_size Segment size required for P2P connections. This
will set MTU to 14 bytes
======================= ==================================================
and after creating the functions/ncm.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected.

View File

@ -4172,14 +4172,14 @@ F: drivers/firmware/broadcom/tee_bnxt_fw.c
F: drivers/net/ethernet/broadcom/bnxt/
F: include/linux/firmware/broadcom/tee_bnxt_fw.h
BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
M: Arend van Spriel <aspriel@gmail.com>
M: Franky Lin <franky.lin@broadcom.com>
M: Hante Meuleman <hante.meuleman@broadcom.com>
BROADCOM BRCM80211 IEEE802.11 WIRELESS DRIVERS
M: Arend van Spriel <arend.vanspriel@broadcom.com>
L: linux-wireless@vger.kernel.org
L: brcm80211@lists.linux.dev
L: brcm80211-dev-list.pdl@broadcom.com
S: Supported
F: drivers/net/wireless/broadcom/brcm80211/
F: include/linux/platform_data/brcmfmac.h
BROADCOM BRCMSTB GPIO DRIVER
M: Doug Berger <opendmb@gmail.com>
@ -10094,7 +10094,7 @@ L: linux-i2c@vger.kernel.org
S: Maintained
W: https://i2c.wiki.kernel.org/
Q: https://patchwork.ozlabs.org/project/linux-i2c/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andi.shyti/linux.git
F: Documentation/devicetree/bindings/i2c/
F: drivers/i2c/algos/
F: drivers/i2c/busses/
@ -11130,7 +11130,6 @@ S: Supported
F: drivers/net/wireless/intel/iwlegacy/
INTEL WIRELESS WIFI LINK (iwlwifi)
M: Gregory Greenman <gregory.greenman@intel.com>
M: Miri Korenblit <miriam.rachel.korenblit@intel.com>
L: linux-wireless@vger.kernel.org
S: Supported
@ -16866,9 +16865,8 @@ F: Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
F: drivers/pci/controller/pcie-xilinx-cpm.c
PCI ENDPOINT SUBSYSTEM
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
M: Krzysztof Wilczyński <kw@linux.com>
R: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
R: Kishon Vijay Abraham I <kishon@kernel.org>
L: linux-pci@vger.kernel.org
S: Supported
@ -18444,7 +18442,7 @@ S: Supported
F: drivers/infiniband/sw/rdmavt
RDS - RELIABLE DATAGRAM SOCKETS
M: Santosh Shilimkar <santosh.shilimkar@oracle.com>
M: Allison Henderson <allison.henderson@oracle.com>
L: netdev@vger.kernel.org
L: linux-rdma@vger.kernel.org
L: rds-devel@oss.oracle.com (moderated for non-subscribers)

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 8
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc3
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View File

@ -195,7 +195,7 @@ vdso_prepare: prepare0
include/generated/vdso-offsets.h arch/arm64/kernel/vdso/vdso.so
ifdef CONFIG_COMPAT_VDSO
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 \
include/generated/vdso32-offsets.h arch/arm64/kernel/vdso32/vdso.so
arch/arm64/kernel/vdso32/vdso.so
endif
endif

View File

@ -17,9 +17,6 @@
#ifndef __ASSEMBLY__
#include <generated/vdso-offsets.h>
#ifdef CONFIG_COMPAT_VDSO
#include <generated/vdso32-offsets.h>
#endif
#define VDSO_SYMBOL(base, name) \
({ \

View File

@ -77,9 +77,9 @@ obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.o
# We need to prevent the SCS patching code from patching itself. Using
# -mbranch-protection=none here to avoid the patchable PAC opcodes from being
# generated triggers an issue with full LTO on Clang, which stops emitting PAC
# instructions altogether. So instead, omit the unwind tables used by the
# patching code, so it will not be able to locate its own PAC instructions.
CFLAGS_patch-scs.o += -fno-asynchronous-unwind-tables -fno-unwind-tables
# instructions altogether. So disable LTO as well for the compilation unit.
CFLAGS_patch-scs.o += -mbranch-protection=none
CFLAGS_REMOVE_patch-scs.o += $(CC_FLAGS_LTO)
# Force dependency (vdso*-wrap.S includes vdso.so through incbin)
$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so

View File

@ -127,9 +127,6 @@ obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso)
targets += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
include/generated/vdso32-offsets.h: $(obj)/vdso32.so.dbg FORCE
$(call if_changed,vdsosym)
# Strip rule for vdso.so
$(obj)/vdso.so: OBJCOPYFLAGS := -S
$(obj)/vdso.so: $(obj)/vdso32.so.dbg FORCE
@ -166,9 +163,3 @@ quiet_cmd_vdsoas = AS32 $@
quiet_cmd_vdsomunge = MUNGE $@
cmd_vdsomunge = $(obj)/$(munge) $< $@
# Generate vDSO offsets using helper script (borrowed from the 64-bit vDSO)
gen-vdsosym := $(srctree)/$(src)/../vdso/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
# The AArch64 nm should be able to read an AArch32 binary
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@

View File

@ -12,6 +12,7 @@ config LOONGARCH
select ARCH_DISABLE_KASAN_INLINE
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_FORTIFY_SOURCE
@ -99,6 +100,7 @@ config LOONGARCH
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB if PERF_EVENTS
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
@ -632,23 +634,6 @@ config RANDOMIZE_BASE_MAX_OFFSET
This is limited by the size of the lower address memory, 256MB.
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
default y
help
This kernel feature is useful for number crunching applications
that may need to compute untrusted bytecode during their
execution. By using pipes or other transports made available to
the process as file descriptors supporting the read/write
syscalls, it's possible to isolate those applications in
their own address space using seccomp. Once seccomp is
enabled via /proc/<pid>/seccomp, it cannot be disabled
and the task is only allowed to execute a few safe syscalls
defined by each seccomp mode.
If unsure, say Y. Only embedded should say N here.
endmenu
config ARCH_SELECT_MEMORY_MODEL
@ -667,10 +652,6 @@ config ARCH_SPARSEMEM_ENABLE
or have huge holes in the physical address space for other reasons.
See <file:Documentation/mm/numa.rst> for more.
config ARCH_ENABLE_THP_MIGRATION
def_bool y
depends on TRANSPARENT_HUGEPAGE
config ARCH_MEMORY_PROBE
def_bool y
depends on MEMORY_HOTPLUG

View File

@ -32,8 +32,10 @@ static inline bool acpi_has_cpu_in_madt(void)
return true;
}
#define MAX_CORE_PIC 256
extern struct list_head acpi_wakeup_device_list;
extern struct acpi_madt_core_pic acpi_core_pic[NR_CPUS];
extern struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
extern int __init parse_acpi_topology(void);

View File

@ -29,11 +29,9 @@ int disabled_cpus;
u64 acpi_saved_sp;
#define MAX_CORE_PIC 256
#define PREFIX "ACPI: "
struct acpi_madt_core_pic acpi_core_pic[NR_CPUS];
struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
{

View File

@ -44,6 +44,9 @@ void *kasan_mem_to_shadow(const void *addr)
unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
unsigned long offset = 0;
if (maddr >= FIXADDR_START)
return (void *)(kasan_early_shadow_page);
maddr &= XRANGE_SHADOW_MASK;
switch (xrange) {
case XKPRANGE_CC_SEG:

View File

@ -2,6 +2,7 @@
# Objects to go into the VDSO.
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
# Include the generic Makefile to check the built vdso.

View File

@ -1287,20 +1287,20 @@ spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_group *grp = iommu_group_get(dev);
struct iommu_table_group *table_group;
int ret = -EINVAL;
/* At first attach the ownership is already set */
if (!domain)
return 0;
if (!grp)
return -ENODEV;
table_group = iommu_group_get_iommudata(grp);
ret = table_group->ops->take_ownership(table_group);
/*
* The domain being set to PLATFORM from earlier
* BLOCKED. The table_group ownership has to be released.
*/
table_group->ops->release_ownership(table_group);
iommu_group_put(grp);
return ret;
return 0;
}
static const struct iommu_domain_ops spapr_tce_platform_domain_ops = {
@ -1312,13 +1312,32 @@ static struct iommu_domain spapr_tce_platform_domain = {
.ops = &spapr_tce_platform_domain_ops,
};
static struct iommu_domain spapr_tce_blocked_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
static int
spapr_tce_blocked_iommu_attach_dev(struct iommu_domain *platform_domain,
struct device *dev)
{
struct iommu_group *grp = iommu_group_get(dev);
struct iommu_table_group *table_group;
int ret = -EINVAL;
/*
* FIXME: SPAPR mixes blocked and platform behaviors, the blocked domain
* also sets the dma_api ops
*/
.ops = &spapr_tce_platform_domain_ops,
table_group = iommu_group_get_iommudata(grp);
ret = table_group->ops->take_ownership(table_group);
iommu_group_put(grp);
return ret;
}
static const struct iommu_domain_ops spapr_tce_blocked_domain_ops = {
.attach_dev = spapr_tce_blocked_iommu_attach_dev,
};
static struct iommu_domain spapr_tce_blocked_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
.ops = &spapr_tce_blocked_domain_ops,
};
static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)

View File

@ -139,6 +139,33 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_ZIHPM,
KVM_RISCV_ISA_EXT_SMSTATEEN,
KVM_RISCV_ISA_EXT_ZICOND,
KVM_RISCV_ISA_EXT_ZBC,
KVM_RISCV_ISA_EXT_ZBKB,
KVM_RISCV_ISA_EXT_ZBKC,
KVM_RISCV_ISA_EXT_ZBKX,
KVM_RISCV_ISA_EXT_ZKND,
KVM_RISCV_ISA_EXT_ZKNE,
KVM_RISCV_ISA_EXT_ZKNH,
KVM_RISCV_ISA_EXT_ZKR,
KVM_RISCV_ISA_EXT_ZKSED,
KVM_RISCV_ISA_EXT_ZKSH,
KVM_RISCV_ISA_EXT_ZKT,
KVM_RISCV_ISA_EXT_ZVBB,
KVM_RISCV_ISA_EXT_ZVBC,
KVM_RISCV_ISA_EXT_ZVKB,
KVM_RISCV_ISA_EXT_ZVKG,
KVM_RISCV_ISA_EXT_ZVKNED,
KVM_RISCV_ISA_EXT_ZVKNHA,
KVM_RISCV_ISA_EXT_ZVKNHB,
KVM_RISCV_ISA_EXT_ZVKSED,
KVM_RISCV_ISA_EXT_ZVKSH,
KVM_RISCV_ISA_EXT_ZVKT,
KVM_RISCV_ISA_EXT_ZFH,
KVM_RISCV_ISA_EXT_ZFHMIN,
KVM_RISCV_ISA_EXT_ZIHINTNTL,
KVM_RISCV_ISA_EXT_ZVFH,
KVM_RISCV_ISA_EXT_ZVFHMIN,
KVM_RISCV_ISA_EXT_ZFA,
KVM_RISCV_ISA_EXT_MAX,
};

View File

@ -42,15 +42,42 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(SVPBMT),
KVM_ISA_EXT_ARR(ZBA),
KVM_ISA_EXT_ARR(ZBB),
KVM_ISA_EXT_ARR(ZBC),
KVM_ISA_EXT_ARR(ZBKB),
KVM_ISA_EXT_ARR(ZBKC),
KVM_ISA_EXT_ARR(ZBKX),
KVM_ISA_EXT_ARR(ZBS),
KVM_ISA_EXT_ARR(ZFA),
KVM_ISA_EXT_ARR(ZFH),
KVM_ISA_EXT_ARR(ZFHMIN),
KVM_ISA_EXT_ARR(ZICBOM),
KVM_ISA_EXT_ARR(ZICBOZ),
KVM_ISA_EXT_ARR(ZICNTR),
KVM_ISA_EXT_ARR(ZICOND),
KVM_ISA_EXT_ARR(ZICSR),
KVM_ISA_EXT_ARR(ZIFENCEI),
KVM_ISA_EXT_ARR(ZIHINTNTL),
KVM_ISA_EXT_ARR(ZIHINTPAUSE),
KVM_ISA_EXT_ARR(ZIHPM),
KVM_ISA_EXT_ARR(ZKND),
KVM_ISA_EXT_ARR(ZKNE),
KVM_ISA_EXT_ARR(ZKNH),
KVM_ISA_EXT_ARR(ZKR),
KVM_ISA_EXT_ARR(ZKSED),
KVM_ISA_EXT_ARR(ZKSH),
KVM_ISA_EXT_ARR(ZKT),
KVM_ISA_EXT_ARR(ZVBB),
KVM_ISA_EXT_ARR(ZVBC),
KVM_ISA_EXT_ARR(ZVFH),
KVM_ISA_EXT_ARR(ZVFHMIN),
KVM_ISA_EXT_ARR(ZVKB),
KVM_ISA_EXT_ARR(ZVKG),
KVM_ISA_EXT_ARR(ZVKNED),
KVM_ISA_EXT_ARR(ZVKNHA),
KVM_ISA_EXT_ARR(ZVKNHB),
KVM_ISA_EXT_ARR(ZVKSED),
KVM_ISA_EXT_ARR(ZVKSH),
KVM_ISA_EXT_ARR(ZVKT),
};
static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
@ -92,13 +119,40 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_SVNAPOT:
case KVM_RISCV_ISA_EXT_ZBA:
case KVM_RISCV_ISA_EXT_ZBB:
case KVM_RISCV_ISA_EXT_ZBC:
case KVM_RISCV_ISA_EXT_ZBKB:
case KVM_RISCV_ISA_EXT_ZBKC:
case KVM_RISCV_ISA_EXT_ZBKX:
case KVM_RISCV_ISA_EXT_ZBS:
case KVM_RISCV_ISA_EXT_ZFA:
case KVM_RISCV_ISA_EXT_ZFH:
case KVM_RISCV_ISA_EXT_ZFHMIN:
case KVM_RISCV_ISA_EXT_ZICNTR:
case KVM_RISCV_ISA_EXT_ZICOND:
case KVM_RISCV_ISA_EXT_ZICSR:
case KVM_RISCV_ISA_EXT_ZIFENCEI:
case KVM_RISCV_ISA_EXT_ZIHINTNTL:
case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
case KVM_RISCV_ISA_EXT_ZIHPM:
case KVM_RISCV_ISA_EXT_ZKND:
case KVM_RISCV_ISA_EXT_ZKNE:
case KVM_RISCV_ISA_EXT_ZKNH:
case KVM_RISCV_ISA_EXT_ZKR:
case KVM_RISCV_ISA_EXT_ZKSED:
case KVM_RISCV_ISA_EXT_ZKSH:
case KVM_RISCV_ISA_EXT_ZKT:
case KVM_RISCV_ISA_EXT_ZVBB:
case KVM_RISCV_ISA_EXT_ZVBC:
case KVM_RISCV_ISA_EXT_ZVFH:
case KVM_RISCV_ISA_EXT_ZVFHMIN:
case KVM_RISCV_ISA_EXT_ZVKB:
case KVM_RISCV_ISA_EXT_ZVKG:
case KVM_RISCV_ISA_EXT_ZVKNED:
case KVM_RISCV_ISA_EXT_ZVKNHA:
case KVM_RISCV_ISA_EXT_ZVKNHB:
case KVM_RISCV_ISA_EXT_ZVKSED:
case KVM_RISCV_ISA_EXT_ZVKSH:
case KVM_RISCV_ISA_EXT_ZVKT:
return false;
/* Extensions which can be disabled using Smstateen */
case KVM_RISCV_ISA_EXT_SSAIA:

View File

@ -68,7 +68,7 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
local_flush_tlb_range_asid(start, end, PAGE_SIZE, FLUSH_TLB_NO_ASID);
local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
}
static void __ipi_flush_tlb_all(void *info)

View File

@ -676,8 +676,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
if (vcpu->kvm->arch.crypto.pqap_hook) {
pqap_hook = *vcpu->kvm->arch.crypto.pqap_hook;
ret = pqap_hook(vcpu);
if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000)
kvm_s390_set_psw_cc(vcpu, 3);
if (!ret) {
if (vcpu->run->s.regs.gprs[1] & 0x00ff0000)
kvm_s390_set_psw_cc(vcpu, 3);
else
kvm_s390_set_psw_cc(vcpu, 0);
}
up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
return ret;
}

View File

@ -1235,7 +1235,6 @@ static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
if (IS_ERR(gmap))
return PTR_ERR(gmap);
gmap->private = vcpu->kvm;
vcpu->kvm->stat.gmap_shadow_create++;
WRITE_ONCE(vsie_page->gmap, gmap);
return 0;

View File

@ -1691,6 +1691,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
return ERR_PTR(-ENOMEM);
new->mm = parent->mm;
new->parent = gmap_get(parent);
new->private = parent->private;
new->orig_asce = asce;
new->edat_level = edat_level;
new->initialized = false;

View File

@ -10,13 +10,14 @@ enum cc_vendor {
CC_VENDOR_INTEL,
};
extern enum cc_vendor cc_vendor;
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
extern enum cc_vendor cc_vendor;
void cc_set_mask(u64 mask);
u64 cc_mkenc(u64 val);
u64 cc_mkdec(u64 val);
#else
#define cc_vendor (CC_VENDOR_NONE)
static inline u64 cc_mkenc(u64 val)
{
return val;

View File

@ -1145,6 +1145,8 @@ struct kvm_hv {
unsigned int synic_auto_eoi_used;
struct kvm_hv_syndbg hv_syndbg;
bool xsaves_xsavec_checked;
};
#endif

View File

@ -434,7 +434,8 @@ static void __init sev_map_percpu_data(void)
{
int cpu;
if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
if (cc_vendor != CC_VENDOR_AMD ||
!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
return;
for_each_possible_cpu(cpu) {

View File

@ -1322,6 +1322,56 @@ static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
return false;
}
#define KVM_HV_WIN2016_GUEST_ID 0x1040a00003839
#define KVM_HV_WIN2016_GUEST_ID_MASK (~GENMASK_ULL(23, 16)) /* mask out the service version */
/*
* Hyper-V enabled Windows Server 2016 SMP VMs fail to boot in !XSAVES && XSAVEC
* configuration.
* Such configuration can result from, for example, AMD Erratum 1386 workaround.
*
* Print a notice so users aren't left wondering what's suddenly gone wrong.
*/
static void __kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_hv *hv = to_kvm_hv(kvm);
/* Check again under the hv_lock. */
if (hv->xsaves_xsavec_checked)
return;
if ((hv->hv_guest_os_id & KVM_HV_WIN2016_GUEST_ID_MASK) !=
KVM_HV_WIN2016_GUEST_ID)
return;
hv->xsaves_xsavec_checked = true;
/* UP configurations aren't affected */
if (atomic_read(&kvm->online_vcpus) < 2)
return;
if (guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) ||
!guest_cpuid_has(vcpu, X86_FEATURE_XSAVEC))
return;
pr_notice_ratelimited("Booting SMP Windows KVM VM with !XSAVES && XSAVEC. "
"If it fails to boot try disabling XSAVEC in the VM config.\n");
}
void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu)
{
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
if (!vcpu->arch.hyperv_enabled ||
hv->xsaves_xsavec_checked)
return;
mutex_lock(&hv->hv_lock);
__kvm_hv_xsaves_xsavec_maybe_warn(vcpu);
mutex_unlock(&hv->hv_lock);
}
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
bool host)
{

View File

@ -182,6 +182,8 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
struct pvclock_vcpu_time_info *hv_clock);
void kvm_hv_request_tsc_page_update(struct kvm *kvm);
void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu);
void kvm_hv_init_vm(struct kvm *kvm);
void kvm_hv_destroy_vm(struct kvm *kvm);
int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
@ -267,6 +269,7 @@ int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu);
static inline void kvm_hv_setup_tsc_page(struct kvm *kvm,
struct pvclock_vcpu_time_info *hv_clock) {}
static inline void kvm_hv_request_tsc_page_update(struct kvm *kvm) {}
static inline void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu) {}
static inline void kvm_hv_init_vm(struct kvm *kvm) {}
static inline void kvm_hv_destroy_vm(struct kvm *kvm) {}
static inline int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)

View File

@ -1782,6 +1782,10 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS)
kvm_mmu_reset_context(vcpu);
if (!static_cpu_has(X86_FEATURE_XSAVES) &&
(efer & EFER_SVME))
kvm_hv_xsaves_xsavec_maybe_warn(vcpu);
return 0;
}
@ -7016,6 +7020,9 @@ set_identity_unlock:
r = -EEXIST;
if (kvm->arch.vpit)
goto create_pit_unlock;
r = -ENOENT;
if (!pic_in_kernel(kvm))
goto create_pit_unlock;
r = -ENOMEM;
kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
if (kvm->arch.vpit)

View File

@ -49,6 +49,7 @@
#include "blk-pm.h"
#include "blk-cgroup.h"
#include "blk-throttle.h"
#include "blk-ioprio.h"
struct dentry *blk_debugfs_root;
@ -833,6 +834,14 @@ end_io:
}
EXPORT_SYMBOL(submit_bio_noacct);
static void bio_set_ioprio(struct bio *bio)
{
/* Nobody set ioprio so far? Initialize it based on task's nice value */
if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
bio->bi_ioprio = get_current_ioprio();
blkcg_set_ioprio(bio);
}
/**
* submit_bio - submit a bio to the block device layer for I/O
* @bio: The &struct bio which describes the I/O
@ -855,6 +864,7 @@ void submit_bio(struct bio *bio)
count_vm_events(PGPGOUT, bio_sectors(bio));
}
bio_set_ioprio(bio);
submit_bio_noacct(bio);
}
EXPORT_SYMBOL(submit_bio);

View File

@ -40,7 +40,6 @@
#include "blk-stat.h"
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
#include "blk-ioprio.h"
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
@ -2944,14 +2943,6 @@ static bool blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
return true;
}
static void bio_set_ioprio(struct bio *bio)
{
/* Nobody set ioprio so far? Initialize it based on task's nice value */
if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
bio->bi_ioprio = get_current_ioprio();
blkcg_set_ioprio(bio);
}
/**
* blk_mq_submit_bio - Create and send a request to block device.
* @bio: Bio pointer.
@ -2976,7 +2967,6 @@ void blk_mq_submit_bio(struct bio *bio)
blk_status_t ret;
bio = blk_queue_bounce(bio, q);
bio_set_ioprio(bio);
if (plug) {
rq = rq_list_peek(&plug->cached_rq);

View File

@ -91,13 +91,13 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
if (!(msg->msg_flags & MSG_MORE)) {
err = hash_alloc_result(sk, ctx);
if (err)
goto unlock_free;
goto unlock_free_result;
ahash_request_set_crypt(&ctx->req, NULL,
ctx->result, 0);
err = crypto_wait_req(crypto_ahash_final(&ctx->req),
&ctx->wait);
if (err)
goto unlock_free;
goto unlock_free_result;
}
goto done_more;
}
@ -170,6 +170,7 @@ unlock:
unlock_free:
af_alg_free_sg(&ctx->sgl);
unlock_free_result:
hash_free_result(sk, ctx);
ctx->more = false;
goto unlock;

View File

@ -148,6 +148,9 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
if (!is_power_of_2(inst->alg.co.base.cra_blocksize))
goto out_free_inst;
if (inst->alg.co.statesize)
goto out_free_inst;
inst->alg.encrypt = crypto_cbc_encrypt;
inst->alg.decrypt = crypto_cbc_decrypt;

View File

@ -478,6 +478,16 @@ binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
{
WARN_ON(!list_empty(&thread->waiting_thread_node));
binder_enqueue_work_ilocked(work, &thread->todo);
/* (e)poll-based threads require an explicit wakeup signal when
* queuing their own work; they rely on these events to consume
* messages without I/O block. Without it, threads risk waiting
* indefinitely without handling the work.
*/
if (thread->looper & BINDER_LOOPER_STATE_POLL &&
thread->pid == current->pid && !thread->process_todo)
wake_up_interruptible_sync(&thread->wait);
thread->process_todo = true;
}

View File

@ -606,13 +606,13 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
/* ASMedia */
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci_43bit_dma }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci_43bit_dma }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci_43bit_dma }, /* ASM1061 */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci_43bit_dma }, /* ASM1061/1062 */
{ PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci }, /* ASM1061R */
{ PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci }, /* ASM1062R */
{ PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci }, /* ASM1062+JMB575 */
{ PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci_43bit_dma }, /* ASM1061R */
{ PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci_43bit_dma }, /* ASM1062R */
{ PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci_43bit_dma }, /* ASM1062+JMB575 */
{ PCI_VDEVICE(ASMEDIA, 0x1062), board_ahci }, /* ASM1062A */
{ PCI_VDEVICE(ASMEDIA, 0x1064), board_ahci }, /* ASM1064 */
{ PCI_VDEVICE(ASMEDIA, 0x1164), board_ahci }, /* ASM1164 */

View File

@ -2930,6 +2930,8 @@ open_card_ubr0(struct idt77252_dev *card)
vc->scq = alloc_scq(card, vc->class);
if (!vc->scq) {
printk("%s: can't get SCQ.\n", card->name);
kfree(card->vcs[0]);
card->vcs[0] = NULL;
return -ENOMEM;
}

View File

@ -534,10 +534,16 @@ EXPORT_SYMBOL_GPL(sev_platform_init);
static int __sev_platform_shutdown_locked(int *error)
{
struct sev_device *sev = psp_master->sev_data;
struct psp_device *psp = psp_master;
struct sev_device *sev;
int ret;
if (!sev || sev->state == SEV_STATE_UNINIT)
if (!psp || !psp->sev_data)
return 0;
sev = psp->sev_data;
if (sev->state == SEV_STATE_UNINIT)
return 0;
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);

View File

@ -168,10 +168,7 @@ static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
if (vmf->pgoff > buffer->pagecount)
return VM_FAULT_SIGBUS;
vmf->page = buffer->pages[vmf->pgoff];
get_page(vmf->page);
return 0;
return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff]));
}
static const struct vm_operations_struct dma_heap_vm_ops = {
@ -185,6 +182,8 @@ static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
return -EINVAL;
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &dma_heap_vm_ops;
vma->vm_private_data = buffer;

View File

@ -222,8 +222,14 @@ struct atdma_sg {
* @vd: pointer to the virtual dma descriptor.
* @atchan: pointer to the atmel dma channel.
* @total_len: total transaction byte count
* @sg_len: number of sg entries.
* @sglen: number of sg entries.
* @sg: array of sgs.
* @boundary: number of transfers to perform before the automatic address increment operation
* @dst_hole: value to add to the destination address when the boundary has been reached
* @src_hole: value to add to the source address when the boundary has been reached
* @memset_buffer: buffer used for the memset operation
* @memset_paddr: physical address of the buffer used for the memset operation
* @memset_vaddr: virtual address of the buffer used for the memset operation
*/
struct at_desc {
struct virt_dma_desc vd;
@ -245,7 +251,10 @@ struct at_desc {
/*-- Channels --------------------------------------------------------*/
/**
* atc_status - information bits stored in channel status flag
* enum atc_status - information bits stored in channel status flag
*
* @ATC_IS_PAUSED: If channel is pauses
* @ATC_IS_CYCLIC: If channel is cyclic
*
* Manipulated with atomic operations.
*/
@ -282,7 +291,6 @@ struct at_dma_chan {
u32 save_cfg;
u32 save_dscr;
struct dma_slave_config dma_sconfig;
bool cyclic;
struct at_desc *desc;
};
@ -328,12 +336,12 @@ static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
/**
* struct at_dma - internal representation of an Atmel HDMA Controller
* @dma_device: dmaengine dma_device object members
* @atdma_devtype: identifier of DMA controller compatibility
* @ch_regs: memory mapped register base
* @regs: memory mapped register base
* @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
* @all_chan_mask: all channels availlable in a mask
* @lli_pool: hw lli table
* @memset_pool: hw memset pool
* @chan: channels table to store at_dma_chan structures
*/
struct at_dma {
@ -626,6 +634,9 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
/**
* atc_get_llis_residue - Get residue for a hardware linked list transfer
* @atchan: pointer to an atmel hdmac channel.
* @desc: pointer to the descriptor for which the residue is calculated.
* @residue: residue to be set to dma_tx_state.
*
* Calculate the residue by removing the length of the Linked List Item (LLI)
* already transferred from the total length. To get the current LLI we can use
@ -661,10 +672,8 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
* two DSCR values are different, we read again the CTRLA then the DSCR till two
* consecutive read values from DSCR are equal or till the maximum trials is
* reach. This algorithm is very unlikely not to find a stable value for DSCR.
* @atchan: pointer to an atmel hdmac channel.
* @desc: pointer to the descriptor for which the residue is calculated.
* @residue: residue to be set to dma_tx_state.
* Returns 0 on success, -errno otherwise.
*
* Returns: %0 on success, -errno otherwise.
*/
static int atc_get_llis_residue(struct at_dma_chan *atchan,
struct at_desc *desc, u32 *residue)
@ -731,7 +740,8 @@ static int atc_get_llis_residue(struct at_dma_chan *atchan,
* @chan: DMA channel
* @cookie: transaction identifier to check status of
* @residue: residue to be updated.
* Return 0 on success, -errono otherwise.
*
* Return: %0 on success, -errno otherwise.
*/
static int atc_get_residue(struct dma_chan *chan, dma_cookie_t cookie,
u32 *residue)
@ -1710,7 +1720,7 @@ static void atc_issue_pending(struct dma_chan *chan)
* atc_alloc_chan_resources - allocate resources for DMA channel
* @chan: allocate descriptor resources for this channel
*
* return - the number of allocated descriptors
* Return: the number of allocated descriptors
*/
static int atc_alloc_chan_resources(struct dma_chan *chan)
{

View File

@ -38,15 +38,17 @@ static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
if (!dpaa2_chan->fd_pool)
goto err;
dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
sizeof(struct dpaa2_fl_entry),
sizeof(struct dpaa2_fl_entry), 0);
dpaa2_chan->fl_pool =
dma_pool_create("fl_pool", dev,
sizeof(struct dpaa2_fl_entry) * 3,
sizeof(struct dpaa2_fl_entry), 0);
if (!dpaa2_chan->fl_pool)
goto err_fd;
dpaa2_chan->sdd_pool =
dma_pool_create("sdd_pool", dev,
sizeof(struct dpaa2_qdma_sd_d),
sizeof(struct dpaa2_qdma_sd_d) * 2,
sizeof(struct dpaa2_qdma_sd_d), 0);
if (!dpaa2_chan->sdd_pool)
goto err_fl;

View File

@ -514,11 +514,11 @@ static struct fsl_qdma_queue
queue_temp = queue_head + i + (j * queue_num);
queue_temp->cq =
dma_alloc_coherent(&pdev->dev,
sizeof(struct fsl_qdma_format) *
queue_size[i],
&queue_temp->bus_addr,
GFP_KERNEL);
dmam_alloc_coherent(&pdev->dev,
sizeof(struct fsl_qdma_format) *
queue_size[i],
&queue_temp->bus_addr,
GFP_KERNEL);
if (!queue_temp->cq)
return NULL;
queue_temp->block_base = fsl_qdma->block_base +
@ -563,15 +563,14 @@ static struct fsl_qdma_queue
/*
* Buffer for queue command
*/
status_head->cq = dma_alloc_coherent(&pdev->dev,
sizeof(struct fsl_qdma_format) *
status_size,
&status_head->bus_addr,
GFP_KERNEL);
if (!status_head->cq) {
devm_kfree(&pdev->dev, status_head);
status_head->cq = dmam_alloc_coherent(&pdev->dev,
sizeof(struct fsl_qdma_format) *
status_size,
&status_head->bus_addr,
GFP_KERNEL);
if (!status_head->cq)
return NULL;
}
status_head->n_cq = status_size;
status_head->virt_head = status_head->cq;
status_head->virt_tail = status_head->cq;
@ -1268,8 +1267,6 @@ static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
static void fsl_qdma_remove(struct platform_device *pdev)
{
int i;
struct fsl_qdma_queue *status;
struct device_node *np = pdev->dev.of_node;
struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
@ -1277,12 +1274,6 @@ static void fsl_qdma_remove(struct platform_device *pdev)
fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_qdma->dma_dev);
for (i = 0; i < fsl_qdma->block_number; i++) {
status = fsl_qdma->status[i];
dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
status->n_cq, status->cq, status->bus_addr);
}
}
static const struct of_device_id fsl_qdma_dt_ids[] = {

View File

@ -2404,6 +2404,11 @@ static int edma_probe(struct platform_device *pdev)
if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
dev_name(dev));
if (!irq_name) {
ret = -ENOMEM;
goto err_disable_pm;
}
ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
ecc);
if (ret) {
@ -2420,6 +2425,11 @@ static int edma_probe(struct platform_device *pdev)
if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
dev_name(dev));
if (!irq_name) {
ret = -ENOMEM;
goto err_disable_pm;
}
ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
ecc);
if (ret) {

View File

@ -3968,6 +3968,7 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
{
struct udma_chan *uc = to_udma_chan(&vc->chan);
struct udma_desc *d;
u8 status;
if (!vd)
return;
@ -3977,12 +3978,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
if (d->metadata_size)
udma_fetch_epib(uc, d);
/* Provide residue information for the client */
if (result) {
void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
if (cppi5_desc_get_type(desc_vaddr) ==
CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
/* Provide residue information for the client */
result->residue = d->residue -
cppi5_hdesc_get_pktlen(desc_vaddr);
if (result->residue)
@ -3991,7 +3992,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
result->result = DMA_TRANS_NOERROR;
} else {
result->residue = 0;
result->result = DMA_TRANS_NOERROR;
/* Propagate TR Response errors to the client */
status = d->hwdesc[0].tr_resp_base->status;
if (status)
result->result = DMA_TRANS_ABORTED;
else
result->result = DMA_TRANS_NOERROR;
}
}
}

View File

@ -141,11 +141,31 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
static const struct drm_client_funcs kfd_client_funcs = {
.unregister = drm_client_release,
};
int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev)
{
int ret;
if (!adev->kfd.init_complete)
return 0;
ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd",
&kfd_client_funcs);
if (ret) {
dev_err(adev->dev, "Failed to init DRM client: %d\n",
ret);
return ret;
}
drm_client_register(&adev->kfd.client);
return 0;
}
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
int i;
int last_valid_bit;
int ret;
amdgpu_amdkfd_gpuvm_init_mem_limits();
@ -164,12 +184,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
.enable_mes = adev->enable_mes,
};
ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd", &kfd_client_funcs);
if (ret) {
dev_err(adev->dev, "Failed to init DRM client: %d\n", ret);
return;
}
/* this is going to have a few of the MSBs set that we need to
* clear
*/
@ -208,10 +222,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
&gpu_resources);
if (adev->kfd.init_complete)
drm_client_register(&adev->kfd.client);
else
drm_client_release(&adev->kfd.client);
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;

View File

@ -182,6 +182,8 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm,
struct svm_range_bo *svm_bo);
int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev);
#if defined(CONFIG_DEBUG_FS)
int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
#endif
@ -301,7 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,

View File

@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
if (!(ring && drm_sched_wqueue_ready(&ring->sched)))
if (!amdgpu_ring_sched_ready(ring))
continue;
/* stop secheduler and drain ring. */

View File

@ -2085,21 +2085,35 @@ out:
return ret;
}
void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
{
struct kfd_mem_attachment *entry;
struct amdgpu_vm *vm;
int ret;
vm = drm_priv_to_vm(drm_priv);
mutex_lock(&mem->lock);
ret = amdgpu_bo_reserve(mem->bo, true);
if (ret)
goto out;
list_for_each_entry(entry, &mem->attachments, list) {
if (entry->bo_va->base.vm == vm)
kfd_mem_dmaunmap_attachment(mem, entry);
if (entry->bo_va->base.vm != vm)
continue;
if (entry->bo_va->base.bo->tbo.ttm &&
!entry->bo_va->base.bo->tbo.ttm->sg)
continue;
kfd_mem_dmaunmap_attachment(mem, entry);
}
amdgpu_bo_unreserve(mem->bo);
out:
mutex_unlock(&mem->lock);
return ret;
}
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(

View File

@ -1678,7 +1678,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_wqueue_stop(&ring->sched);
}
@ -1694,7 +1694,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_wqueue_start(&ring->sched);
}
@ -1916,8 +1916,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
ring = adev->rings[val];
if (!ring || !ring->funcs->preempt_ib ||
!drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring) ||
!ring->funcs->preempt_ib)
return -EINVAL;
/* the last preemption failed */

View File

@ -4121,23 +4121,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
} else {
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
r = psp_gpu_reset(adev);
break;
default:
tmp = amdgpu_reset_method;
/* It should do a default reset when loading or reloading the driver,
* regardless of the module parameter reset_method.
*/
amdgpu_reset_method = AMD_RESET_METHOD_NONE;
r = amdgpu_asic_reset(adev);
amdgpu_reset_method = tmp;
break;
}
tmp = amdgpu_reset_method;
/* It should do a default reset when loading or reloading the driver,
* regardless of the module parameter reset_method.
*/
amdgpu_reset_method = AMD_RESET_METHOD_NONE;
r = amdgpu_asic_reset(adev);
amdgpu_reset_method = tmp;
if (r) {
dev_err(adev->dev, "asic reset on init failed\n");
goto failed;
@ -5031,7 +5021,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring))
continue;
spin_lock(&ring->sched.job_list_lock);
@ -5170,7 +5160,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring))
continue;
/* Clear job fence from fence drv to avoid force_completion
@ -5637,7 +5627,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
if (!ring || !drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_stop(&ring->sched, job ? &job->base : NULL);
@ -5706,7 +5696,7 @@ skip_hw_reset:
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
if (!ring || !drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_start(&ring->sched, true);
@ -6061,7 +6051,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_stop(&ring->sched, NULL);
@ -6189,7 +6179,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !drm_sched_wqueue_ready(&ring->sched))
if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_start(&ring->sched, true);

View File

@ -2255,6 +2255,10 @@ retry_init:
if (ret)
goto err_pci;
ret = amdgpu_amdkfd_drm_client_create(adev);
if (ret)
goto err_pci;
/*
* 1. don't init fbdev on hw without DCE
* 2. don't init fbdev if there are no connectors

View File

@ -635,6 +635,7 @@ int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
ring->name);
ring->sched.ready = !r;
return r;
}
@ -717,3 +718,14 @@ void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring)
if (ring->is_sw_ring)
amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE);
}
bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
{
if (!ring)
return false;
if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched))
return false;
return true;
}

View File

@ -450,5 +450,5 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
#endif

View File

@ -204,6 +204,12 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
tmp = RREG32(mmIH_RB_CNTL);
tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(mmIH_RB_CNTL, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(mmIH_RB_CNTL, tmp);
}
return (wptr & ih->ptr_mask);
}

View File

@ -216,6 +216,11 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32(mmIH_RB_CNTL, tmp);
out:
return (wptr & ih->ptr_mask);

View File

@ -4027,8 +4027,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = 0;
adev->gfx.mec2_fw = NULL;
}
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
gfx_v10_0_check_fw_write_wait(adev);
out:

View File

@ -107,23 +107,6 @@ static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a)
};
static const struct soc15_reg_golden golden_settings_gc_11_5_0[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_DEBUG5, 0xffffffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL, 0xffffffff, 0xf37fff3f),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xfffffffb, 0x00f40188),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x80009007),
SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf1ffffff, 0x00880007),
SOC15_REG_GOLDEN_VALUE(GC, 0, regPC_CONFIG_CNTL_1, 0xffffffff, 0x00010000),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL2, 0x007f0000, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xffcfffff, 0x0000200a),
SOC15_REG_GOLDEN_VALUE(GC, 0, regUTCL1_CTRL_2, 0xffffffff, 0x0000048f)
};
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
@ -304,11 +287,6 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_11_0_1,
(const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
break;
case IP_VERSION(11, 5, 0):
soc15_program_register_sequence(adev,
golden_settings_gc_11_5_0,
(const u32)ARRAY_SIZE(golden_settings_gc_11_5_0));
break;
default:
break;
}

View File

@ -915,8 +915,8 @@ static int gmc_v6_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
else
return r;
return 0;
}
static int gmc_v6_0_hw_fini(void *handle)

View File

@ -1099,8 +1099,8 @@ static int gmc_v7_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
else
return r;
return 0;
}
static int gmc_v7_0_hw_fini(void *handle)

View File

@ -1219,8 +1219,8 @@ static int gmc_v8_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
else
return r;
return 0;
}
static int gmc_v8_0_hw_fini(void *handle)

View File

@ -2341,8 +2341,8 @@ static int gmc_v9_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
else
return r;
return 0;
}
/**

View File

@ -215,6 +215,11 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32(mmIH_RB_CNTL, tmp);
out:
return (wptr & ih->ptr_mask);

View File

@ -418,6 +418,12 @@ static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}

View File

@ -418,6 +418,13 @@ static u32 ih_v6_1_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}

View File

@ -442,6 +442,12 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}

View File

@ -119,6 +119,12 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(IH_RB_CNTL, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(IH_RB_CNTL, tmp);
}
return (wptr & ih->ptr_mask);
}

View File

@ -219,6 +219,12 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32(mmIH_RB_CNTL, tmp);
out:
return (wptr & ih->ptr_mask);
}

View File

@ -2017,22 +2017,6 @@ static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_sta
return ret;
}
/**
* vcn_v4_0_set_interrupt_state - set VCN block interrupt state
*
* @adev: amdgpu_device pointer
* @source: interrupt sources
* @type: interrupt types
* @state: interrupt states
*
* Set VCN block interrupt state
*/
static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
unsigned type, enum amdgpu_interrupt_state state)
{
return 0;
}
/**
* vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state
*
@ -2097,7 +2081,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_
}
static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = {
.set = vcn_v4_0_set_interrupt_state,
.process = vcn_v4_0_process_interrupt,
};

View File

@ -269,8 +269,6 @@ static int vcn_v4_0_5_hw_fini(void *handle)
vcn_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
}
amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0);
}
return 0;
@ -1668,22 +1666,6 @@ static int vcn_v4_0_5_set_powergating_state(void *handle, enum amd_powergating_s
return ret;
}
/**
* vcn_v4_0_5_set_interrupt_state - set VCN block interrupt state
*
* @adev: amdgpu_device pointer
* @source: interrupt sources
* @type: interrupt types
* @state: interrupt states
*
* Set VCN block interrupt state
*/
static int vcn_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
unsigned type, enum amdgpu_interrupt_state state)
{
return 0;
}
/**
* vcn_v4_0_5_process_interrupt - process VCN block interrupt
*
@ -1726,7 +1708,6 @@ static int vcn_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgp
}
static const struct amdgpu_irq_src_funcs vcn_v4_0_5_irq_funcs = {
.set = vcn_v4_0_5_set_interrupt_state,
.process = vcn_v4_0_5_process_interrupt,
};

View File

@ -373,6 +373,12 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}

View File

@ -421,6 +421,12 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
* can be detected.
*/
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}

View File

@ -674,7 +674,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
0xbe801f6c, 0xbf810000,
0xbe801f6c, 0xbf9b0000,
};
static const uint32_t cwsr_trap_nv1x_hex[] = {
@ -1091,7 +1091,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xb9eef807, 0x876dff6d,
0x0000ffff, 0x87fe7e7e,
0x87ea6a6a, 0xb9faf802,
0xbe80226c, 0xbf810000,
0xbe80226c, 0xbf9b0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
@ -1574,7 +1574,7 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
0xbe801f6c, 0xbf810000,
0xbe801f6c, 0xbf9b0000,
};
static const uint32_t cwsr_trap_aldebaran_hex[] = {
@ -2065,7 +2065,7 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
0xbe801f6c, 0xbf810000,
0xbe801f6c, 0xbf9b0000,
};
static const uint32_t cwsr_trap_gfx10_hex[] = {
@ -2500,7 +2500,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x876dff6d, 0x0000ffff,
0x87fe7e7e, 0x87ea6a6a,
0xb9faf802, 0xbe80226c,
0xbf810000, 0xbf9f0000,
0xbf9b0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
};
@ -2944,7 +2944,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
0xb8eef802, 0xbf0d866e,
0xbfa20002, 0xb97af802,
0xbe80486c, 0xb97af802,
0xbe804a6c, 0xbfb00000,
0xbe804a6c, 0xbfb10000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
@ -3436,5 +3436,5 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
0xbe801f6c, 0xbf810000,
0xbe801f6c, 0xbf9b0000,
};

View File

@ -1104,7 +1104,7 @@ L_RETURN_WITHOUT_PRIV:
s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
L_END_PGM:
s_endpgm
s_endpgm_saved
end
function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)

View File

@ -921,7 +921,7 @@ L_RESTORE:
/* the END */
/**************************************************************************/
L_END_PGM:
s_endpgm
s_endpgm_saved
end

View File

@ -1442,7 +1442,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
if (err)
goto sync_memory_failed;
}
mutex_unlock(&p->mutex);

View File

@ -574,7 +574,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
prange->last);
addr = prange->start << PAGE_SHIFT;
addr = migrate->start;
src = (uint64_t *)(scratch + npages);
dst = scratch;

View File

@ -1488,10 +1488,15 @@ void kfd_dec_compute_active(struct kfd_node *dev);
/* Cgroup Support */
/* Check with device cgroup if @kfd device is accessible */
static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd)
static inline int kfd_devcgroup_check_permission(struct kfd_node *node)
{
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
struct drm_device *ddev = adev_to_drm(kfd->adev);
struct drm_device *ddev;
if (node->xcp)
ddev = node->xcp->ddev;
else
ddev = adev_to_drm(node->adev);
return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
ddev->render->index,

View File

@ -9187,6 +9187,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* To fix this, DC should permit updating only stream properties.
*/
dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
if (!dummy_updates) {
DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
continue;
}
for (j = 0; j < status->plane_count; j++)
dummy_updates[j].surface = status->plane_states[0];

View File

@ -437,32 +437,32 @@ static struct wm_table ddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
}
@ -474,32 +474,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
}

View File

@ -203,12 +203,12 @@ void dcn32_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.transmitter = init_data->transmitter;

View File

@ -184,8 +184,6 @@ void dcn35_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
@ -240,6 +238,8 @@ void dcn35_link_encoder_construct(
}
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
if (enc10->base.connector.id == CONNECTOR_ID_USBC)
enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (bp_funcs->get_connector_speed_cap_info)
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,

View File

@ -1112,7 +1112,7 @@ struct pipe_slice_table {
struct pipe_ctx *pri_pipe;
struct dc_plane_state *plane;
int slice_count;
} mpc_combines[MAX_SURFACES];
} mpc_combines[MAX_PLANES];
int mpc_combine_count;
};
@ -2753,7 +2753,7 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
struct _vcs_dpi_voltage_scaling_st entry = {0};
struct clk_limit_table_entry max_clk_data = {0};
unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299;
unsigned int min_dcfclk_mhz = 399, min_fclk_mhz = 599;
static const unsigned int num_dcfclk_stas = 5;
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564};

View File

@ -164,8 +164,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
},
},
.num_states = 5,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
.sr_exit_z8_time_us = 210.0,
.sr_enter_plus_exit_z8_time_us = 320.0,
.fclk_change_latency_us = 24.0,

View File

@ -341,9 +341,6 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
break;
}
if (dml2->config.bbox_overrides.clks_table.num_states)
p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
/* Override from passed values, if available */
for (i = 0; i < p->in_states->num_states; i++) {
if (dml2->config.bbox_overrides.sr_exit_latency_us) {
@ -400,6 +397,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
/* Copy clocks tables entries, if available */
if (dml2->config.bbox_overrides.clks_table.num_states) {
p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) {
p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz;
@ -793,35 +791,28 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
}
}
/*TODO no support for mpc combine, need rework - should calculate scaling params based on plane+stream*/
static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state *in, const struct dc_state *context)
static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context)
{
int i;
struct scaler_data data = { 0 };
struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe;
memset(temp_pipe, 0, sizeof(struct pipe_ctx));
for (i = 0; i < MAX_PIPES; i++) {
const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state == in && !pipe->prev_odm_pipe) {
const struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
temp_pipe->stream = pipe->stream;
temp_pipe->plane_state = pipe->plane_state;
temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps;
data = context->res_ctx.pipe_ctx[i].plane_res.scl_data;
while (next_pipe) {
data.h_active += next_pipe->plane_res.scl_data.h_active;
data.recout.width += next_pipe->plane_res.scl_data.recout.width;
if (in->rotation == ROTATION_ANGLE_0 || in->rotation == ROTATION_ANGLE_180) {
data.viewport.width += next_pipe->plane_res.scl_data.viewport.width;
} else {
data.viewport.height += next_pipe->plane_res.scl_data.viewport.height;
}
next_pipe = next_pipe->next_odm_pipe;
}
resource_build_scaling_params(temp_pipe);
break;
}
}
ASSERT(i < MAX_PIPES);
return data;
return temp_pipe->plane_res.scl_data;
}
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in)
@ -866,7 +857,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
out->ScalerEnabled[location] = false;
}
static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, const struct dc_state *context)
static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, struct dc_state *context)
{
const struct scaler_data scaler_data = get_scaler_data_for_plane(in, context);

View File

@ -1183,9 +1183,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
dto_params.timing = &pipe_ctx->stream->timing;
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
if (dccg) {
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
}
} else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,

View File

@ -2790,18 +2790,17 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
}
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
phyd32clk = get_phyd32clk_src(link);
dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
dto_params.otg_inst = tg->inst;
dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
phyd32clk = get_phyd32clk_src(link);
dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
} else {
if (dccg->funcs->enable_symclk_se)
dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,

View File

@ -469,6 +469,8 @@ struct resource_context {
unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
bool is_mpc_3dlut_acquired[MAX_PIPES];
/* solely used for build scalar data in dml2 */
struct pipe_ctx temp_pipe;
};
struct dce_bw_output {

View File

@ -196,7 +196,7 @@ static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_in
struct dc_link *link_dpia_primary, *link_dpia_secondary;
int total_bw = 0;
for (uint8_t i = 0; i < MAX_PIPES * 2; ++i) {
for (uint8_t i = 0; i < (MAX_PIPES * 2) - 1; ++i) {
if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;

View File

@ -734,7 +734,7 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
smu->smu_baco.state = SMU_BACO_STATE_NONE;
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false;
smu->user_dpm_profile.fan_mode = -1;
@ -1954,31 +1954,10 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
return 0;
}
static int smu_reset_mp1_state(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
if ((!adev->in_runpm) && (!adev->in_suspend) &&
(!amdgpu_in_reset(adev)))
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
break;
default:
break;
}
return ret;
}
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@ -1996,15 +1975,7 @@ static int smu_hw_fini(void *handle)
adev->pm.dpm_enabled = false;
ret = smu_smc_hw_cleanup(smu);
if (ret)
return ret;
ret = smu_reset_mp1_state(smu);
if (ret)
return ret;
return 0;
return smu_smc_hw_cleanup(smu);
}
static void smu_late_fini(void *handle)

View File

@ -424,7 +424,6 @@ enum smu_reset_mode {
enum smu_baco_state {
SMU_BACO_STATE_ENTER = 0,
SMU_BACO_STATE_EXIT,
SMU_BACO_STATE_NONE,
};
struct smu_baco_context {

View File

@ -2748,13 +2748,7 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_PrepareMp1ForUnload,
0x55, NULL);
if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
ret = smu_v13_0_disable_pmfw_state(smu);
ret = smu_cmn_set_mp1_state(smu, mp1_state);
break;
default:
/* Ignore others */
@ -2950,7 +2944,7 @@ static bool smu_v13_0_0_wbrf_support_check(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
switch (adev->ip_versions[MP1_HWIP][0]) {
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 0):
return smu->smc_fw_version >= 0x004e6300;
case IP_VERSION(13, 0, 10):

View File

@ -2505,13 +2505,7 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_PrepareMp1ForUnload,
0x55, NULL);
if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
ret = smu_v13_0_disable_pmfw_state(smu);
ret = smu_cmn_set_mp1_state(smu, mp1_state);
break;
default:
/* Ignore others */

View File

@ -103,6 +103,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
cancel_work_sync(&fctx->uevent_work);
nouveau_fence_context_kill(fctx, 0);
nvif_event_dtor(&fctx->event);
fctx->dead = 1;
@ -145,12 +146,13 @@ nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fc
return drop;
}
static int
nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc)
static void
nouveau_fence_uevent_work(struct work_struct *work)
{
struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event);
struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
uevent_work);
unsigned long flags;
int ret = NVIF_EVENT_KEEP;
int drop = 0;
spin_lock_irqsave(&fctx->lock, flags);
if (!list_empty(&fctx->pending)) {
@ -160,11 +162,20 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc
fence = list_entry(fctx->pending.next, typeof(*fence), head);
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
if (nouveau_fence_update(chan, fctx))
ret = NVIF_EVENT_DROP;
drop = 1;
}
spin_unlock_irqrestore(&fctx->lock, flags);
if (drop)
nvif_event_block(&fctx->event);
return ret;
spin_unlock_irqrestore(&fctx->lock, flags);
}
static int
nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc)
{
struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event);
schedule_work(&fctx->uevent_work);
return NVIF_EVENT_KEEP;
}
void
@ -178,6 +189,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
} args;
int ret;
INIT_WORK(&fctx->uevent_work, nouveau_fence_uevent_work);
INIT_LIST_HEAD(&fctx->flip);
INIT_LIST_HEAD(&fctx->pending);
spin_lock_init(&fctx->lock);

View File

@ -44,6 +44,7 @@ struct nouveau_fence_chan {
u32 context;
char name[32];
struct work_struct uevent_work;
struct nvif_event event;
int notify_ref, dead, killed;
};

View File

@ -1078,7 +1078,6 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
if (IS_ERR(rpc))
return PTR_ERR(rpc);
rpc->size = sizeof(*rpc);
rpc->numEntries = NV_GSP_REG_NUM_ENTRIES;
str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]);
@ -1094,6 +1093,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
strings += name_len;
str_offset += name_len;
}
rpc->size = str_offset;
return nvkm_gsp_rpc_wr(gsp, rpc, false);
}

View File

@ -960,7 +960,8 @@ int host1x_client_iommu_attach(struct host1x_client *client)
* not the shared IOMMU domain, don't try to attach it to a different
* domain. This allows using the IOMMU-backed DMA API.
*/
if (domain && domain != tegra->domain)
if (domain && domain->type != IOMMU_DOMAIN_IDENTITY &&
domain != tegra->domain)
return 0;
if (tegra->domain) {

View File

@ -94,6 +94,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
goto err_free;
}
dma_set_max_seg_size(dev->dev, dma_max_mapping_size(dev->dev) ?: UINT_MAX);
ret = virtio_gpu_init(vdev, dev);
if (ret)
goto err_free;

View File

@ -50,8 +50,8 @@
#define HOST2GUC_SELF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffff << 16)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffff << 0)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffffu << 16)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffffu << 0)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32 GUC_HXG_REQUEST_MSG_n_DATAn
#define HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn

View File

@ -242,8 +242,8 @@ struct slpc_shared_data {
(HOST2GUC_PC_SLPC_REQUEST_REQUEST_MSG_MIN_LEN + \
HOST2GUC_PC_SLPC_EVENT_MAX_INPUT_ARGS)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID (0xff << 8)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC (0xff << 0)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID (0xffu << 8)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC (0xffu << 0)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_N_EVENT_DATA_N GUC_HXG_REQUEST_MSG_n_DATAn
#endif

View File

@ -82,11 +82,11 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
#define GUC_CTB_HDR_LEN 1u
#define GUC_CTB_MSG_MIN_LEN GUC_CTB_HDR_LEN
#define GUC_CTB_MSG_MAX_LEN 256u
#define GUC_CTB_MSG_0_FENCE (0xffff << 16)
#define GUC_CTB_MSG_0_FORMAT (0xf << 12)
#define GUC_CTB_MSG_0_FENCE (0xffffu << 16)
#define GUC_CTB_MSG_0_FORMAT (0xfu << 12)
#define GUC_CTB_FORMAT_HXG 0u
#define GUC_CTB_MSG_0_RESERVED (0xf << 8)
#define GUC_CTB_MSG_0_NUM_DWORDS (0xff << 0)
#define GUC_CTB_MSG_0_RESERVED (0xfu << 8)
#define GUC_CTB_MSG_0_NUM_DWORDS (0xffu << 0)
/**
* DOC: CTB HXG Message

View File

@ -31,9 +31,9 @@
*/
#define GUC_KLV_LEN_MIN 1u
#define GUC_KLV_0_KEY (0xffff << 16)
#define GUC_KLV_0_LEN (0xffff << 0)
#define GUC_KLV_n_VALUE (0xffffffff << 0)
#define GUC_KLV_0_KEY (0xffffu << 16)
#define GUC_KLV_0_LEN (0xffffu << 0)
#define GUC_KLV_n_VALUE (0xffffffffu << 0)
/**
* DOC: GuC Self Config KLVs

View File

@ -40,18 +40,18 @@
*/
#define GUC_HXG_MSG_MIN_LEN 1u
#define GUC_HXG_MSG_0_ORIGIN (0x1 << 31)
#define GUC_HXG_MSG_0_ORIGIN (0x1u << 31)
#define GUC_HXG_ORIGIN_HOST 0u
#define GUC_HXG_ORIGIN_GUC 1u
#define GUC_HXG_MSG_0_TYPE (0x7 << 28)
#define GUC_HXG_MSG_0_TYPE (0x7u << 28)
#define GUC_HXG_TYPE_REQUEST 0u
#define GUC_HXG_TYPE_EVENT 1u
#define GUC_HXG_TYPE_NO_RESPONSE_BUSY 3u
#define GUC_HXG_TYPE_NO_RESPONSE_RETRY 5u
#define GUC_HXG_TYPE_RESPONSE_FAILURE 6u
#define GUC_HXG_TYPE_RESPONSE_SUCCESS 7u
#define GUC_HXG_MSG_0_AUX (0xfffffff << 0)
#define GUC_HXG_MSG_n_PAYLOAD (0xffffffff << 0)
#define GUC_HXG_MSG_0_AUX (0xfffffffu << 0)
#define GUC_HXG_MSG_n_PAYLOAD (0xffffffffu << 0)
/**
* DOC: HXG Request
@ -85,8 +85,8 @@
*/
#define GUC_HXG_REQUEST_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN
#define GUC_HXG_REQUEST_MSG_0_DATA0 (0xfff << 16)
#define GUC_HXG_REQUEST_MSG_0_ACTION (0xffff << 0)
#define GUC_HXG_REQUEST_MSG_0_DATA0 (0xfffu << 16)
#define GUC_HXG_REQUEST_MSG_0_ACTION (0xffffu << 0)
#define GUC_HXG_REQUEST_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
/**
@ -117,8 +117,8 @@
*/
#define GUC_HXG_EVENT_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN
#define GUC_HXG_EVENT_MSG_0_DATA0 (0xfff << 16)
#define GUC_HXG_EVENT_MSG_0_ACTION (0xffff << 0)
#define GUC_HXG_EVENT_MSG_0_DATA0 (0xfffu << 16)
#define GUC_HXG_EVENT_MSG_0_ACTION (0xffffu << 0)
#define GUC_HXG_EVENT_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
/**
@ -188,8 +188,8 @@
*/
#define GUC_HXG_FAILURE_MSG_LEN GUC_HXG_MSG_MIN_LEN
#define GUC_HXG_FAILURE_MSG_0_HINT (0xfff << 16)
#define GUC_HXG_FAILURE_MSG_0_ERROR (0xffff << 0)
#define GUC_HXG_FAILURE_MSG_0_HINT (0xfffu << 16)
#define GUC_HXG_FAILURE_MSG_0_ERROR (0xffffu << 0)
/**
* DOC: HXG Response

Some files were not shown because too many files have changed in this diff Show More