Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

Conflicts:

drivers/net/ethernet/sfc/tc.c
  fa165e1949 ("sfc: don't unregister flow_indr if it was never registered")
  3bf969e88a ("sfc: add MAE table machinery for conntrack table")
https://lore.kernel.org/all/20230818112159.7430e9b4@canb.auug.org.au/

No adjacent changes.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2023-08-18 12:44:22 -07:00
commit 7ff57803d2
287 changed files with 2302 additions and 1262 deletions

View File

@ -538,6 +538,8 @@ Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
Sibi Sankar <quic_sibis@quicinc.com> <sibis@codeaurora.org>
Sid Manning <quic_sidneym@quicinc.com> <sidneym@codeaurora.org>
Simon Arlott <simon@octiron.net> <simon@fire.lp0.eu>
Simon Horman <horms@kernel.org> <simon.horman@corigine.com>
Simon Horman <horms@kernel.org> <simon.horman@netronome.com>
Simon Kelley <simon@thekelleys.org.uk>
Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>

View File

@ -216,7 +216,6 @@ properties:
description: Whether to enable burnout current for EXT1.
adi,ext1-burnout-current-nanoamp:
$ref: /schemas/types.yaml#/definitions/uint32
description:
Burnout current in nanoamps to be applied to EXT1.
enum: [0, 50, 500, 1000, 10000]
@ -233,7 +232,6 @@ properties:
description: Whether to enable burnout current for EXT2.
adi,ext2-burnout-current-nanoamp:
$ref: /schemas/types.yaml#/definitions/uint32
description: Burnout current in nanoamps to be applied to EXT2.
enum: [0, 50, 500, 1000, 10000]
default: 0
@ -249,7 +247,6 @@ properties:
description: Whether to enable burnout current for VIOUT.
adi,viout-burnout-current-nanoamp:
$ref: /schemas/types.yaml#/definitions/uint32
description: Burnout current in nanoamps to be applied to VIOUT.
enum: [0, 1000, 10000]
default: 0

View File

@ -178,10 +178,10 @@ nf_conntrack_sctp_timeout_established - INTEGER (seconds)
Default is set to (hb_interval * path_max_retrans + rto_max)
nf_conntrack_sctp_timeout_shutdown_sent - INTEGER (seconds)
default 0.3
default 3
nf_conntrack_sctp_timeout_shutdown_recd - INTEGER (seconds)
default 0.3
default 3
nf_conntrack_sctp_timeout_shutdown_ack_sent - INTEGER (seconds)
default 3

View File

@ -9394,7 +9394,6 @@ F: drivers/crypto/hisilicon/sgl.c
F: include/linux/hisi_acc_qm.h
HISILICON ROCE DRIVER
M: Haoyue Xu <xuhaoyue1@hisilicon.com>
M: Junxian Huang <huangjunxian6@hisilicon.com>
L: linux-rdma@vger.kernel.org
S: Maintained
@ -12499,6 +12498,7 @@ F: net/mctp/
MAPLE TREE
M: Liam R. Howlett <Liam.Howlett@oracle.com>
L: maple-tree@lists.infradead.org
L: linux-mm@kvack.org
S: Supported
F: Documentation/core-api/maple_tree.rst
@ -16312,6 +16312,7 @@ F: drivers/pci/controller/dwc/pci-exynos.c
PCI DRIVER FOR SYNOPSYS DESIGNWARE
M: Jingoo Han <jingoohan1@gmail.com>
M: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
L: linux-pci@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml
@ -22499,7 +22500,6 @@ L: virtualization@lists.linux-foundation.org
S: Maintained
F: drivers/block/virtio_blk.c
F: drivers/scsi/virtio_scsi.c
F: drivers/vhost/scsi.c
F: include/uapi/linux/virtio_blk.h
F: include/uapi/linux/virtio_scsi.h
@ -22598,6 +22598,16 @@ F: include/linux/vhost_iotlb.h
F: include/uapi/linux/vhost.h
F: kernel/vhost_task.c
VIRTIO HOST (VHOST-SCSI)
M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
M: Mike Christie <michael.christie@oracle.com>
R: Paolo Bonzini <pbonzini@redhat.com>
R: Stefan Hajnoczi <stefanha@redhat.com>
L: virtualization@lists.linux-foundation.org
S: Maintained
F: drivers/vhost/scsi.c
VIRTIO I2C DRIVER
M: Conghui Chen <conghui.chen@intel.com>
M: Viresh Kumar <viresh.kumar@linaro.org>

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 5
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View File

@ -47,12 +47,6 @@ unsigned long __get_wchan(struct task_struct *p);
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
#ifndef CONFIG_SMP
/* Nothing to prefetch. */
#define spin_lock_prefetch(lock) do { } while (0)
#endif
extern inline void prefetch(const void *ptr)
{
@ -64,11 +58,4 @@ extern inline void prefetchw(const void *ptr)
__builtin_prefetch(ptr, 1, 3);
}
#ifdef CONFIG_SMP
extern inline void spin_lock_prefetch(const void *ptr)
{
__builtin_prefetch(ptr, 1, 3);
}
#endif
#endif /* __ASM_ALPHA_PROCESSOR_H */

View File

@ -385,8 +385,7 @@ setup_memory(void *kernel_end)
#endif /* CONFIG_BLK_DEV_INITRD */
}
int __init
page_is_ram(unsigned long pfn)
int page_is_ram(unsigned long pfn)
{
struct memclust_struct * cluster;
struct memdesc_struct * memdesc;

View File

@ -359,14 +359,6 @@ static inline void prefetchw(const void *ptr)
asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr));
}
#define ARCH_HAS_SPINLOCK_PREFETCH
static inline void spin_lock_prefetch(const void *ptr)
{
asm volatile(ARM64_LSE_ATOMIC_INSN(
"prfm pstl1strm, %a0",
"nop") : : "p" (ptr));
}
extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
extern void __init minsigstksz_setup(void);

View File

@ -634,7 +634,6 @@ ia64_imva (void *addr)
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
#define PREFETCH_STRIDE L1_CACHE_BYTES
static inline void
@ -649,8 +648,6 @@ prefetchw (const void *x)
ia64_lfetch_excl(ia64_lfhint_none, x);
}
#define spin_lock_prefetch(x) prefetchw(x)
extern unsigned long boot_option_idle_override;
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,

View File

@ -58,8 +58,6 @@
#define cpu_has_rixi (cpu_data[0].cputype != CPU_CAVIUM_OCTEON)
#define ARCH_HAS_SPINLOCK_PREFETCH 1
#define spin_lock_prefetch(x) prefetch(x)
#define PREFETCH_STRIDE 128
#ifdef __OCTEON__

View File

@ -2,7 +2,7 @@
#
config LIGHTWEIGHT_SPINLOCK_CHECK
bool "Enable lightweight spinlock checks"
depends on SMP && !DEBUG_SPINLOCK
depends on DEBUG_KERNEL && SMP && !DEBUG_SPINLOCK
default y
help
Add checks with low performance impact to the spinlock functions

View File

@ -117,7 +117,7 @@ char *strchr(const char *s, int c)
return NULL;
}
int puts(const char *s)
static int puts(const char *s)
{
const char *nuline = s;
@ -172,7 +172,7 @@ static int print_num(unsigned long num, int base)
return 0;
}
int printf(const char *fmt, ...)
static int printf(const char *fmt, ...)
{
va_list args;
int i = 0;
@ -204,13 +204,13 @@ void abort(void)
}
#undef malloc
void *malloc(size_t size)
static void *malloc(size_t size)
{
return malloc_gzip(size);
}
#undef free
void free(void *ptr)
static void free(void *ptr)
{
return free_gzip(ptr);
}
@ -278,7 +278,7 @@ static void parse_elf(void *output)
free(phdrs);
}
unsigned long decompress_kernel(unsigned int started_wide,
asmlinkage unsigned long __visible decompress_kernel(unsigned int started_wide,
unsigned int command_line,
const unsigned int rd_start,
const unsigned int rd_end)

View File

@ -14,6 +14,8 @@
#define dma_outb outb
#define dma_inb inb
extern unsigned long pcxl_dma_start;
/*
** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
** (or rather not merge) DMAs into manageable chunks.

View File

@ -12,6 +12,10 @@ extern void mcount(void);
extern unsigned long sys_call_table[];
extern unsigned long return_address(unsigned int);
struct ftrace_regs;
extern void ftrace_function_trampoline(unsigned long parent,
unsigned long self_addr, unsigned long org_sp_gr3,
struct ftrace_regs *fregs);
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_caller(void);

View File

@ -7,8 +7,6 @@
#include <asm/processor.h>
#include <asm/spinlock_types.h>
#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */
static inline void arch_spin_val_check(int lock_val)
{
if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))

View File

@ -4,6 +4,10 @@
#define __ARCH_SPIN_LOCK_UNLOCKED_VAL 0x1a46
#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */
#ifndef __ASSEMBLY__
typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;
@ -27,6 +31,8 @@ typedef struct {
volatile unsigned int counter;
} arch_rwlock_t;
#endif /* __ASSEMBLY__ */
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
#define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \
.counter = __ARCH_RW_LOCK_UNLOCKED__ }

View File

@ -25,6 +25,7 @@
#include <asm/traps.h>
#include <asm/thread_info.h>
#include <asm/alternative.h>
#include <asm/spinlock_types.h>
#include <linux/linkage.h>
#include <linux/pgtable.h>
@ -406,7 +407,7 @@
LDREG 0(\ptp),\pte
bb,<,n \pte,_PAGE_PRESENT_BIT,3f
b \fault
stw \spc,0(\tmp)
stw \tmp1,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
2: LDREG 0(\ptp),\pte
@ -415,24 +416,22 @@
.endm
/* Release page_table_lock without reloading lock address.
Note that the values in the register spc are limited to
NR_SPACE_IDS (262144). Thus, the stw instruction always
stores a nonzero value even when register spc is 64 bits.
We use an ordered store to ensure all prior accesses are
performed prior to releasing the lock. */
.macro ptl_unlock0 spc,tmp
.macro ptl_unlock0 spc,tmp,tmp2
#ifdef CONFIG_TLB_PTLOCK
98: or,COND(=) %r0,\spc,%r0
stw,ma \spc,0(\tmp)
98: ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
or,COND(=) %r0,\spc,%r0
stw,ma \tmp2,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
.endm
/* Release page_table_lock. */
.macro ptl_unlock1 spc,tmp
.macro ptl_unlock1 spc,tmp,tmp2
#ifdef CONFIG_TLB_PTLOCK
98: get_ptl \tmp
ptl_unlock0 \spc,\tmp
ptl_unlock0 \spc,\tmp,\tmp2
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
.endm
@ -1125,7 +1124,7 @@ dtlb_miss_20w:
idtlbt pte,prot
ptl_unlock1 spc,t0
ptl_unlock1 spc,t0,t1
rfir
nop
@ -1151,7 +1150,7 @@ nadtlb_miss_20w:
idtlbt pte,prot
ptl_unlock1 spc,t0
ptl_unlock1 spc,t0,t1
rfir
nop
@ -1185,7 +1184,7 @@ dtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
ptl_unlock1 spc,t0
ptl_unlock1 spc,t0,t1
rfir
nop
@ -1218,7 +1217,7 @@ nadtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
ptl_unlock1 spc,t0
ptl_unlock1 spc,t0,t1
rfir
nop
@ -1247,7 +1246,7 @@ dtlb_miss_20:
idtlbt pte,prot
ptl_unlock1 spc,t0
ptl_unlock1 spc,t0,t1
rfir
nop
@ -1275,7 +1274,7 @@ nadtlb_miss_20:
idtlbt pte,prot
ptl_unlock1 spc,t0
ptl_unlock1 spc,t0,t1
rfir
nop
@ -1320,7 +1319,7 @@ itlb_miss_20w:
iitlbt pte,prot
ptl_unlock1 spc,t0
ptl_unlock1 spc,t0,t1
rfir
nop
@ -1344,7 +1343,7 @@ naitlb_miss_20w:
iitlbt pte,prot
ptl_unlock1 spc,t0
ptl_unlock1 spc,t0,t1
rfir
nop
@ -1378,7 +1377,7 @@ itlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
ptl_unlock1 spc,t0
ptl_unlock1 spc,t0,t1
rfir
nop
@ -1402,7 +1401,7 @@ naitlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
ptl_unlock1 spc,t0
ptl_unlock1 spc,t0,t1
rfir
nop
@ -1432,7 +1431,7 @@ itlb_miss_20:
iitlbt pte,prot
ptl_unlock1 spc,t0
ptl_unlock1 spc,t0,t1
rfir
nop
@ -1452,7 +1451,7 @@ naitlb_miss_20:
iitlbt pte,prot
ptl_unlock1 spc,t0
ptl_unlock1 spc,t0,t1
rfir
nop
@ -1482,7 +1481,7 @@ dbit_trap_20w:
idtlbt pte,prot
ptl_unlock0 spc,t0
ptl_unlock0 spc,t0,t1
rfir
nop
#else
@ -1508,7 +1507,7 @@ dbit_trap_11:
mtsp t1, %sr1 /* Restore sr1 */
ptl_unlock0 spc,t0
ptl_unlock0 spc,t0,t1
rfir
nop
@ -1528,7 +1527,7 @@ dbit_trap_20:
idtlbt pte,prot
ptl_unlock0 spc,t0
ptl_unlock0 spc,t0,t1
rfir
nop
#endif

View File

@ -74,8 +74,8 @@
static DEFINE_SPINLOCK(pdc_lock);
#endif
unsigned long pdc_result[NUM_PDC_RESULT] __aligned(8);
unsigned long pdc_result2[NUM_PDC_RESULT] __aligned(8);
static unsigned long pdc_result[NUM_PDC_RESULT] __aligned(8);
static unsigned long pdc_result2[NUM_PDC_RESULT] __aligned(8);
#ifdef CONFIG_64BIT
#define WIDE_FIRMWARE 0x1
@ -334,7 +334,7 @@ int __pdc_cpu_rendezvous(void)
/**
* pdc_cpu_rendezvous_lock - Lock PDC while transitioning to rendezvous state
*/
void pdc_cpu_rendezvous_lock(void)
void pdc_cpu_rendezvous_lock(void) __acquires(&pdc_lock)
{
spin_lock(&pdc_lock);
}
@ -342,7 +342,7 @@ void pdc_cpu_rendezvous_lock(void)
/**
* pdc_cpu_rendezvous_unlock - Unlock PDC after reaching rendezvous state
*/
void pdc_cpu_rendezvous_unlock(void)
void pdc_cpu_rendezvous_unlock(void) __releases(&pdc_lock)
{
spin_unlock(&pdc_lock);
}

View File

@ -53,7 +53,7 @@ static void __hot prepare_ftrace_return(unsigned long *parent,
static ftrace_func_t ftrace_func;
void notrace __hot ftrace_function_trampoline(unsigned long parent,
asmlinkage void notrace __hot ftrace_function_trampoline(unsigned long parent,
unsigned long self_addr,
unsigned long org_sp_gr3,
struct ftrace_regs *fregs)

View File

@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/libgcc.h>
#include <linux/string.h>
EXPORT_SYMBOL(memset);
@ -92,12 +93,6 @@ EXPORT_SYMBOL($$divI_12);
EXPORT_SYMBOL($$divI_14);
EXPORT_SYMBOL($$divI_15);
extern void __ashrdi3(void);
extern void __ashldi3(void);
extern void __lshrdi3(void);
extern void __muldi3(void);
extern void __ucmpdi2(void);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3);

View File

@ -39,7 +39,7 @@ static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
static unsigned long pcxl_used_bytes __read_mostly;
static unsigned long pcxl_used_pages __read_mostly;
extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
unsigned long pcxl_dma_start __ro_after_init; /* pcxl dma mapping area start */
static DEFINE_SPINLOCK(pcxl_res_lock);
static char *pcxl_res_map;
static int pcxl_res_hint;
@ -381,7 +381,7 @@ pcxl_dma_init(void)
pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
get_order(pcxl_res_size));
memset(pcxl_res_map, 0, pcxl_res_size);
proc_gsc_root = proc_mkdir("gsc", NULL);
proc_gsc_root = proc_mkdir("bus/gsc", NULL);
if (!proc_gsc_root)
printk(KERN_WARNING
"pcxl_dma_init: Unable to create gsc /proc dir entry\n");

View File

@ -354,10 +354,8 @@ static int __init pdt_initcall(void)
return -ENODEV;
kpdtd_task = kthread_run(pdt_mainloop, NULL, "kpdtd");
if (IS_ERR(kpdtd_task))
return PTR_ERR(kpdtd_task);
return 0;
return PTR_ERR_OR_ZERO(kpdtd_task);
}
late_initcall(pdt_initcall);

View File

@ -57,7 +57,7 @@ struct rdr_tbl_ent {
static int perf_processor_interface __read_mostly = UNKNOWN_INTF;
static int perf_enabled __read_mostly;
static DEFINE_SPINLOCK(perf_lock);
struct parisc_device *cpu_device __read_mostly;
static struct parisc_device *cpu_device __read_mostly;
/* RDRs to write for PCX-W */
static const int perf_rdrs_W[] =

View File

@ -26,6 +26,7 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/pdc.h>
#include <asm/smp.h>
#include <asm/pdcpat.h>
#include <asm/irq.h> /* for struct irq_region */
#include <asm/parisc-device.h>

View File

@ -40,11 +40,6 @@
static char __initdata command_line[COMMAND_LINE_SIZE];
/* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */
struct proc_dir_entry * proc_runway_root __read_mostly = NULL;
struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL;
static void __init setup_cmdline(char **cmdline_p)
{
extern unsigned int boot_args[];
@ -196,48 +191,6 @@ const struct seq_operations cpuinfo_op = {
.show = show_cpuinfo
};
static void __init parisc_proc_mkdir(void)
{
/*
** Can't call proc_mkdir() until after proc_root_init() has been
** called by start_kernel(). In other words, this code can't
** live in arch/.../setup.c because start_parisc() calls
** start_kernel().
*/
switch (boot_cpu_data.cpu_type) {
case pcxl:
case pcxl2:
if (NULL == proc_gsc_root)
{
proc_gsc_root = proc_mkdir("bus/gsc", NULL);
}
break;
case pcxt_:
case pcxu:
case pcxu_:
case pcxw:
case pcxw_:
case pcxw2:
if (NULL == proc_runway_root)
{
proc_runway_root = proc_mkdir("bus/runway", NULL);
}
break;
case mako:
case mako2:
if (NULL == proc_mckinley_root)
{
proc_mckinley_root = proc_mkdir("bus/mckinley", NULL);
}
break;
default:
/* FIXME: this was added to prevent the compiler
* complaining about missing pcx, pcxs and pcxt
* I'm assuming they have neither gsc nor runway */
break;
}
}
static struct resource central_bus = {
.name = "Central Bus",
.start = F_EXTEND(0xfff80000),
@ -294,7 +247,6 @@ static int __init parisc_init(void)
{
u32 osid = (OS_ID_LINUX << 16);
parisc_proc_mkdir();
parisc_init_resources();
do_device_inventory(); /* probe for hardware */

View File

@ -423,7 +423,7 @@ static void check_syscallno_in_delay_branch(struct pt_regs *regs)
regs->gr[31] -= 8; /* delayed branching */
/* Get assembler opcode of code in delay branch */
uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4);
uaddr = (u32 __user *) ((regs->gr[31] & ~3) + 4);
err = get_user(opcode, uaddr);
if (err)
return;

View File

@ -27,17 +27,12 @@
#include <linux/elf-randomize.h>
/*
* Construct an artificial page offset for the mapping based on the virtual
* Construct an artificial page offset for the mapping based on the physical
* address of the kernel file mapping variable.
* If filp is zero the calculated pgoff value aliases the memory of the given
* address. This is useful for io_uring where the mapping shall alias a kernel
* address and a userspace adress where both the kernel and the userspace
* access the same memory region.
*/
#define GET_FILP_PGOFF(filp, addr) \
((filp ? (((unsigned long) filp->f_mapping) >> 8) \
& ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL) \
+ (addr >> PAGE_SHIFT))
#define GET_FILP_PGOFF(filp) \
(filp ? (((unsigned long) filp->f_mapping) >> 8) \
& ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL)
static unsigned long shared_align_offset(unsigned long filp_pgoff,
unsigned long pgoff)
@ -117,7 +112,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
filp_pgoff = GET_FILP_PGOFF(filp, addr);
filp_pgoff = GET_FILP_PGOFF(filp);
if (flags & MAP_FIXED) {
/* Even MAP_FIXED mappings must reside within TASK_SIZE */

View File

@ -39,6 +39,7 @@ registers).
#include <asm/assembly.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/spinlock_types.h>
#include <linux/linkage.h>
@ -66,6 +67,16 @@ registers).
stw \reg1, 0(%sr2,\reg2)
.endm
/* raise exception if spinlock content is not zero or
* __ARCH_SPIN_LOCK_UNLOCKED_VAL */
.macro spinlock_check spin_val,tmpreg
#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmpreg
andcm,= \spin_val, \tmpreg, %r0
.word SPINLOCK_BREAK_INSN
#endif
.endm
.text
.import syscall_exit,code
@ -508,7 +519,8 @@ lws_start:
lws_exit_noerror:
lws_pagefault_enable %r1,%r21
stw,ma %r20, 0(%sr2,%r20)
ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21
stw,ma %r21, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
b lws_exit
copy %r0, %r21
@ -521,7 +533,8 @@ lws_wouldblock:
lws_pagefault:
lws_pagefault_enable %r1,%r21
stw,ma %r20, 0(%sr2,%r20)
ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21
stw,ma %r21, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 3(%r0),%r28
b lws_exit
@ -619,6 +632,7 @@ lws_compare_and_swap:
/* Try to acquire the lock */
LDCW 0(%sr2,%r20), %r28
spinlock_check %r28, %r21
comclr,<> %r0, %r28, %r0
b,n lws_wouldblock
@ -772,6 +786,7 @@ cas2_lock_start:
/* Try to acquire the lock */
LDCW 0(%sr2,%r20), %r28
spinlock_check %r28, %r21
comclr,<> %r0, %r28, %r0
b,n lws_wouldblock
@ -1001,6 +1016,7 @@ atomic_xchg_start:
/* Try to acquire the lock */
LDCW 0(%sr2,%r20), %r28
spinlock_check %r28, %r21
comclr,<> %r0, %r28, %r0
b,n lws_wouldblock
@ -1199,6 +1215,7 @@ atomic_store_start:
/* Try to acquire the lock */
LDCW 0(%sr2,%r20), %r28
spinlock_check %r28, %r21
comclr,<> %r0, %r28, %r0
b,n lws_wouldblock
@ -1330,7 +1347,7 @@ ENTRY(lws_lock_start)
/* lws locks */
.rept 256
/* Keep locks aligned at 16-bytes */
.word 1
.word __ARCH_SPIN_LOCK_UNLOCKED_VAL
.word 0
.word 0
.word 0

View File

@ -11,6 +11,7 @@
#include <linux/signal.h>
#include <linux/ratelimit.h>
#include <linux/uaccess.h>
#include <linux/sysctl.h>
#include <asm/unaligned.h>
#include <asm/hardirq.h>
#include <asm/traps.h>

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/libgcc.h>
union ull_union {
unsigned long long ull;
@ -9,7 +10,7 @@ union ull_union {
} ui;
};
int __ucmpdi2(unsigned long long a, unsigned long long b)
word_type __ucmpdi2(unsigned long long a, unsigned long long b)
{
union ull_union au = {.ull = a};
union ull_union bu = {.ull = b};

View File

@ -192,31 +192,31 @@ int fixup_exception(struct pt_regs *regs)
* For implementation see handle_interruption() in traps.c
*/
static const char * const trap_description[] = {
[1] "High-priority machine check (HPMC)",
[2] "Power failure interrupt",
[3] "Recovery counter trap",
[5] "Low-priority machine check",
[6] "Instruction TLB miss fault",
[7] "Instruction access rights / protection trap",
[8] "Illegal instruction trap",
[9] "Break instruction trap",
[10] "Privileged operation trap",
[11] "Privileged register trap",
[12] "Overflow trap",
[13] "Conditional trap",
[14] "FP Assist Exception trap",
[15] "Data TLB miss fault",
[16] "Non-access ITLB miss fault",
[17] "Non-access DTLB miss fault",
[18] "Data memory protection/unaligned access trap",
[19] "Data memory break trap",
[20] "TLB dirty bit trap",
[21] "Page reference trap",
[22] "Assist emulation trap",
[25] "Taken branch trap",
[26] "Data memory access rights trap",
[27] "Data memory protection ID trap",
[28] "Unaligned data reference trap",
[1] = "High-priority machine check (HPMC)",
[2] = "Power failure interrupt",
[3] = "Recovery counter trap",
[5] = "Low-priority machine check",
[6] = "Instruction TLB miss fault",
[7] = "Instruction access rights / protection trap",
[8] = "Illegal instruction trap",
[9] = "Break instruction trap",
[10] = "Privileged operation trap",
[11] = "Privileged register trap",
[12] = "Overflow trap",
[13] = "Conditional trap",
[14] = "FP Assist Exception trap",
[15] = "Data TLB miss fault",
[16] = "Non-access ITLB miss fault",
[17] = "Non-access DTLB miss fault",
[18] = "Data memory protection/unaligned access trap",
[19] = "Data memory break trap",
[20] = "TLB dirty bit trap",
[21] = "Page reference trap",
[22] = "Assist emulation trap",
[25] = "Taken branch trap",
[26] = "Data memory access rights trap",
[27] = "Data memory protection ID trap",
[28] = "Unaligned data reference trap",
};
const char *trap_name(unsigned long code)

View File

@ -523,10 +523,6 @@ void mark_rodata_ro(void)
void *parisc_vmalloc_start __ro_after_init;
EXPORT_SYMBOL(parisc_vmalloc_start);
#ifdef CONFIG_PA11
unsigned long pcxl_dma_start __ro_after_init;
#endif
void __init mem_init(void)
{
/* Do sanity checks on IPC (compat) structures */

View File

@ -27,7 +27,7 @@
*/
void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
{
void __iomem *addr;
uintptr_t addr;
struct vm_struct *area;
unsigned long offset, last_addr;
pgprot_t pgprot;
@ -79,10 +79,9 @@ void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
if (!area)
return NULL;
addr = (void __iomem *) area->addr;
if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
phys_addr, pgprot)) {
vunmap(addr);
addr = (uintptr_t) area->addr;
if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
vunmap(area->addr);
return NULL;
}

View File

@ -393,7 +393,6 @@ int validate_sp_size(unsigned long sp, struct task_struct *p,
*/
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
static inline void prefetch(const void *x)
{
@ -411,8 +410,6 @@ static inline void prefetchw(const void *x)
__asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
}
#define spin_lock_prefetch(x) prefetchw(x)
/* asm stubs */
extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val);
extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);

View File

@ -37,6 +37,10 @@ static inline void flush_dcache_page(struct page *page)
#define flush_icache_user_page(vma, pg, addr, len) \
flush_icache_mm(vma->vm_mm, 0)
#ifdef CONFIG_64BIT
#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
#endif
#ifndef CONFIG_SMP
#define flush_icache_all() local_flush_icache_all()

View File

@ -101,9 +101,9 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* Relaxed I/O memory access primitives. These follow the Device memory
* ordering rules but do not guarantee any ordering relative to Normal memory
* accesses. These are defined to order the indicated access (either a read or
* write) with all other I/O memory accesses. Since the platform specification
* defines that all I/O regions are strongly ordered on channel 2, no explicit
* fences are required to enforce this ordering.
* write) with all other I/O memory accesses to the same peripheral. Since the
* platform specification defines that all I/O regions are strongly ordered on
* channel 0, no explicit fences are required to enforce this ordering.
*/
/* FIXME: These are now the same as asm-generic */
#define __io_rbr() do {} while (0)
@ -125,14 +125,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#endif
/*
* I/O memory access primitives. Reads are ordered relative to any
* following Normal memory access. Writes are ordered relative to any prior
* Normal memory access. The memory barriers here are necessary as RISC-V
* I/O memory access primitives. Reads are ordered relative to any following
* Normal memory read and delay() loop. Writes are ordered relative to any
* prior Normal memory write. The memory barriers here are necessary as RISC-V
* doesn't define any ordering between the memory space and the I/O space.
*/
#define __io_br() do {} while (0)
#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory")
#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory")
#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); })
#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
#define __io_aw() mmiowb_set_pending()
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })

View File

@ -188,6 +188,8 @@ extern struct pt_alloc_ops pt_ops __initdata;
#define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP)
extern pgd_t swapper_pg_dir[];
extern pgd_t trampoline_pg_dir[];
extern pgd_t early_pg_dir[];
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_present(pmd_t pmd)

View File

@ -3,12 +3,14 @@
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
extern bool pgtable_l4_enabled, pgtable_l5_enabled;
#define IOREMAP_MAX_ORDER (PUD_SHIFT)
#define arch_vmap_pud_supported arch_vmap_pud_supported
static inline bool arch_vmap_pud_supported(pgprot_t prot)
{
return true;
return pgtable_l4_enabled || pgtable_l5_enabled;
}
#define arch_vmap_pmd_supported arch_vmap_pmd_supported

View File

@ -17,6 +17,11 @@
#include <asm/smp.h>
#include <asm/pgtable.h>
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
return phys_id == cpuid_to_hartid_map(cpu);
}
/*
* Returns the hart ID of the given device tree node, or -ENODEV if the node
* isn't an enabled and valid RISC-V hart node.

View File

@ -281,7 +281,7 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
kbuf.buffer = initrd;
kbuf.bufsz = kbuf.memsz = initrd_len;
kbuf.buf_align = PAGE_SIZE;
kbuf.top_down = false;
kbuf.top_down = true;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
@ -425,6 +425,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
* sym, instead of searching the whole relsec.
*/
case R_RISCV_PCREL_HI20:
case R_RISCV_CALL_PLT:
case R_RISCV_CALL:
*(u64 *)loc = CLEAN_IMM(UITYPE, *(u64 *)loc) |
ENCODE_UJTYPE_IMM(val - addr);

View File

@ -61,11 +61,6 @@ int riscv_hartid_to_cpuid(unsigned long hartid)
return -ENOENT;
}
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
return phys_id == cpuid_to_hartid_map(cpu);
}
static void ipi_stop(void)
{
set_cpu_online(smp_processor_id(), false);

View File

@ -26,12 +26,13 @@
#include <linux/kfence.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
#include <asm/numa.h>
#include <asm/pgtable.h>
#include <asm/ptdump.h>
#include <asm/sections.h>
#include <asm/soc.h>
#include <asm/io.h>
#include <asm/ptdump.h>
#include <asm/numa.h>
#include <asm/tlbflush.h>
#include "../kernel/head.h"
@ -214,8 +215,13 @@ static void __init setup_bootmem(void)
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
phys_ram_end = memblock_end_of_DRAM();
/*
* Make sure we align the start of the memory on a PMD boundary so that
* at worst, we map the linear mapping with PMD mappings.
*/
if (!IS_ENABLED(CONFIG_XIP_KERNEL))
phys_ram_base = memblock_start_of_DRAM();
phys_ram_base = memblock_start_of_DRAM() & PMD_MASK;
/*
* In 64-bit, any use of __va/__pa before this point is wrong as we

View File

@ -22,7 +22,6 @@
* region is not and then we have to go down to the PUD level.
*/
extern pgd_t early_pg_dir[PTRS_PER_PGD];
pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss;
pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss;

View File

@ -213,7 +213,6 @@ unsigned long __get_wchan(struct task_struct *task);
*/
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
static inline void prefetch(const void *x)
{
@ -239,8 +238,6 @@ static inline void prefetchw(const void *x)
: "r" (x));
}
#define spin_lock_prefetch(x) prefetchw(x)
#define HAVE_ARCH_PICK_MMAP_LAYOUT
int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap);

View File

@ -63,7 +63,14 @@ void load_stage2_idt(void)
set_idt_entry(X86_TRAP_PF, boot_page_fault);
#ifdef CONFIG_AMD_MEM_ENCRYPT
set_idt_entry(X86_TRAP_VC, boot_stage2_vc);
/*
* Clear the second stage #VC handler in case guest types
* needing #VC have not been detected.
*/
if (sev_status & BIT(1))
set_idt_entry(X86_TRAP_VC, boot_stage2_vc);
else
set_idt_entry(X86_TRAP_VC, NULL);
#endif
load_boot_idt(&boot_idt_desc);

View File

@ -405,10 +405,13 @@ void sev_enable(struct boot_params *bp)
bp->cc_blob_address = 0;
/*
* Setup/preliminary detection of SNP. This will be sanity-checked
* against CPUID/MSR values later.
* Do an initial SEV capability check before snp_init() which
* loads the CPUID page and the same checks afterwards are done
* without the hypervisor and are trustworthy.
*
* If the HV fakes SEV support, the guest will crash'n'burn
* which is good enough.
*/
snp = snp_init(bp);
/* Check for the SME/SEV support leaf */
eax = 0x80000000;
@ -429,6 +432,36 @@ void sev_enable(struct boot_params *bp)
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
/* Check whether SEV is supported */
if (!(eax & BIT(1)))
return;
/*
* Setup/preliminary detection of SNP. This will be sanity-checked
* against CPUID/MSR values later.
*/
snp = snp_init(bp);
/* Now repeat the checks with the SNP CPUID table. */
/* Recheck the SME/SEV support leaf */
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (eax < 0x8000001f)
return;
/*
* Recheck for the SME/SEV feature:
* CPUID Fn8000_001F[EAX]
* - Bit 0 - Secure Memory Encryption support
* - Bit 1 - Secure Encrypted Virtualization support
* CPUID Fn8000_001F[EBX]
* - Bits 5:0 - Pagetable bit position used to indicate encryption
*/
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
/* Check whether SEV is supported */
if (!(eax & BIT(1))) {
if (snp)
error("SEV-SNP support indicated by CC blob, but not CPUID.");

View File

@ -299,8 +299,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
/* Round the lowest possible end address up to a PMD boundary. */
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
if (end >= TASK_SIZE_MAX)
end = TASK_SIZE_MAX;
if (end >= DEFAULT_MAP_WINDOW)
end = DEFAULT_MAP_WINDOW;
end -= len;
if (end > start) {

View File

@ -15,6 +15,7 @@
#include <asm/mpspec.h>
#include <asm/x86_init.h>
#include <asm/cpufeature.h>
#include <asm/irq_vectors.h>
#ifdef CONFIG_ACPI_APEI
# include <asm/pgtable_types.h>
@ -31,6 +32,7 @@ extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
extern int acpi_fix_pin2_polarity;
extern int acpi_disable_cmcff;
extern bool acpi_int_src_ovr[NR_IRQS_LEGACY];
extern u8 acpi_sci_flags;
extern u32 acpi_sci_override_gsi;

View File

@ -21,7 +21,7 @@
#define FUNCTION_PADDING
#endif
#if (CONFIG_FUNCTION_ALIGNMENT > 8) && !defined(__DISABLE_EXPORTS) && !defined(BULID_VDSO)
#if (CONFIG_FUNCTION_ALIGNMENT > 8) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
# define __FUNC_ALIGN __ALIGN; FUNCTION_PADDING
#else
# define __FUNC_ALIGN __ALIGN

View File

@ -586,7 +586,6 @@ extern char ignore_fpu_irq;
#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
#ifdef CONFIG_X86_32
# define BASE_PREFETCH ""
@ -620,11 +619,6 @@ static __always_inline void prefetchw(const void *x)
"m" (*(const char *)x));
}
static inline void spin_lock_prefetch(const void *x)
{
prefetchw(x);
}
#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
TOP_OF_KERNEL_STACK_PADDING)

View File

@ -56,7 +56,7 @@
#define GDT_ENTRY_INVALID_SEG 0
#ifdef CONFIG_X86_32
#if defined(CONFIG_X86_32) && !defined(BUILD_VDSO32_64)
/*
* The layout of the per-CPU GDT under Linux:
*

View File

@ -52,6 +52,7 @@ int acpi_lapic;
int acpi_ioapic;
int acpi_strict;
int acpi_disable_cmcff;
bool acpi_int_src_ovr[NR_IRQS_LEGACY];
/* ACPI SCI override configuration */
u8 acpi_sci_flags __initdata;
@ -588,6 +589,9 @@ acpi_parse_int_src_ovr(union acpi_subtable_headers * header,
acpi_table_print_madt_entry(&header->common);
if (intsrc->source_irq < NR_IRQS_LEGACY)
acpi_int_src_ovr[intsrc->source_irq] = true;
if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
acpi_sci_ioapic_setup(intsrc->source_irq,
intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,

View File

@ -73,6 +73,7 @@ static const int amd_erratum_1054[] =
static const int amd_zenbleed[] =
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf),
AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
static const int amd_div0[] =

View File

@ -722,14 +722,9 @@ void submit_bio_noacct(struct bio *bio)
struct block_device *bdev = bio->bi_bdev;
struct request_queue *q = bdev_get_queue(bdev);
blk_status_t status = BLK_STS_IOERR;
struct blk_plug *plug;
might_sleep();
plug = blk_mq_plug(bio);
if (plug && plug->nowait)
bio->bi_opf |= REQ_NOWAIT;
/*
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
* if queue does not support NOWAIT.
@ -1059,7 +1054,6 @@ void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
plug->rq_count = 0;
plug->multiple_queues = false;
plug->has_elevator = false;
plug->nowait = false;
INIT_LIST_HEAD(&plug->cb_list);
/*

View File

@ -3301,11 +3301,12 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
if (qos[QOS_MIN] > qos[QOS_MAX])
goto einval;
if (enable) {
if (enable && !ioc->enabled) {
blk_stat_enable_accounting(disk->queue);
blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
ioc->enabled = true;
} else {
} else if (!enable && ioc->enabled) {
blk_stat_disable_accounting(disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
ioc->enabled = false;
}

View File

@ -358,13 +358,14 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
task_io_account_write(bio->bi_iter.bi_size);
}
if (iocb->ki_flags & IOCB_NOWAIT)
bio->bi_opf |= REQ_NOWAIT;
if (iocb->ki_flags & IOCB_HIPRI) {
bio->bi_opf |= REQ_POLLED | REQ_NOWAIT;
bio->bi_opf |= REQ_POLLED;
submit_bio(bio);
WRITE_ONCE(iocb->private, bio);
} else {
if (iocb->ki_flags & IOCB_NOWAIT)
bio->bi_opf |= REQ_NOWAIT;
submit_bio(bio);
}
return -EIOCBQUEUED;

View File

@ -173,6 +173,9 @@ static void internal_free_pages_locked(struct ivpu_bo *bo)
{
unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT);
for (i = 0; i < npages; i++)
put_page(bo->pages[i]);
@ -587,6 +590,11 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT);
if (bo->flags & DRM_IVPU_BO_WC)
set_pages_array_wc(bo->pages, bo->base.size >> PAGE_SHIFT);
else if (bo->flags & DRM_IVPU_BO_UNCACHED)
set_pages_array_uc(bo->pages, bo->base.size >> PAGE_SHIFT);
prot = ivpu_bo_pgprot(bo, PAGE_KERNEL);
bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot);
if (!bo->kvaddr) {

View File

@ -392,18 +392,31 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
struct qaic_manage_trans_dma_xfer *in_trans,
struct ioctl_resources *resources, struct dma_xfer *xfer)
{
u64 xfer_start_addr, remaining, end, total;
unsigned long need_pages;
struct page **page_list;
unsigned long nr_pages;
struct sg_table *sgt;
u64 xfer_start_addr;
int ret;
int i;
xfer_start_addr = in_trans->addr + resources->xferred_dma_size;
if (check_add_overflow(in_trans->addr, resources->xferred_dma_size, &xfer_start_addr))
return -EINVAL;
need_pages = DIV_ROUND_UP(in_trans->size + offset_in_page(xfer_start_addr) -
resources->xferred_dma_size, PAGE_SIZE);
if (in_trans->size < resources->xferred_dma_size)
return -EINVAL;
remaining = in_trans->size - resources->xferred_dma_size;
if (remaining == 0)
return 0;
if (check_add_overflow(xfer_start_addr, remaining, &end))
return -EINVAL;
total = remaining + offset_in_page(xfer_start_addr);
if (total >= SIZE_MAX)
return -EINVAL;
need_pages = DIV_ROUND_UP(total, PAGE_SIZE);
nr_pages = need_pages;
@ -435,7 +448,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
ret = sg_alloc_table_from_pages(sgt, page_list, nr_pages,
offset_in_page(xfer_start_addr),
in_trans->size - resources->xferred_dma_size, GFP_KERNEL);
remaining, GFP_KERNEL);
if (ret) {
ret = -ENOMEM;
goto free_sgt;
@ -566,9 +579,6 @@ static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list
QAIC_MANAGE_EXT_MSG_LENGTH)
return -ENOMEM;
if (in_trans->addr + in_trans->size < in_trans->addr || !in_trans->size)
return -EINVAL;
xfer = kmalloc(sizeof(*xfer), GFP_KERNEL);
if (!xfer)
return -ENOMEM;

View File

@ -1021,6 +1021,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
bo->dbc = dbc;
srcu_read_unlock(&dbc->ch_lock, rcu_id);
drm_gem_object_put(obj);
kfree(slice_ent);
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);

View File

@ -470,6 +470,45 @@ static const struct dmi_system_id asus_laptop[] = {
{ }
};
static const struct dmi_system_id tongfang_gm_rg[] = {
{
.ident = "TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
},
},
{ }
};
static const struct dmi_system_id maingear_laptop[] = {
{
.ident = "MAINGEAR Vector Pro 2 15",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
}
},
{
.ident = "MAINGEAR Vector Pro 2 17",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-17A3070T"),
},
},
{ }
};
static const struct dmi_system_id pcspecialist_laptop[] = {
{
.ident = "PCSpecialist Elimina Pro 16 M",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"),
DMI_MATCH(DMI_PRODUCT_NAME, "Elimina Pro 16 M"),
},
},
{ }
};
static const struct dmi_system_id lg_laptop[] = {
{
.ident = "LG Electronics 17U70P",
@ -493,6 +532,9 @@ struct irq_override_cmp {
static const struct irq_override_cmp override_table[] = {
{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
{ tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
{ maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
{ pcspecialist_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
{ lg_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
};
@ -512,6 +554,28 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
return entry->override;
}
#ifdef CONFIG_X86
/*
* Always use the MADT override info, except for the i8042 PS/2 ctrl
* IRQs (1 and 12). For these the DSDT IRQ settings should sometimes
* be used otherwise PS/2 keyboards / mice will not work.
*/
if (gsi != 1 && gsi != 12)
return true;
/* If the override comes from an INT_SRC_OVR MADT entry, honor it. */
if (acpi_int_src_ovr[gsi])
return true;
/*
* IRQ override isn't needed on modern AMD Zen systems and
* this override breaks active low IRQs on AMD Ryzen 6000 and
* newer systems. Skip it.
*/
if (boot_cpu_has(X86_FEATURE_ZEN))
return false;
#endif
return true;
}

View File

@ -1714,6 +1714,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{"BSG1160", },
{"BSG2150", },
{"CSC3551", },
{"CSC3556", },
{"INT33FE", },
{"INT3515", },
/* Non-conforming _HID for Cirrus Logic already released */

View File

@ -6617,6 +6617,7 @@ err_init_binder_device_failed:
err_alloc_device_names_failed:
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
binder_alloc_shrinker_exit();
return ret;
}

View File

@ -1087,6 +1087,12 @@ int binder_alloc_shrinker_init(void)
return ret;
}
void binder_alloc_shrinker_exit(void)
{
unregister_shrinker(&binder_shrinker);
list_lru_destroy(&binder_alloc_lru);
}
/**
* check_buffer() - verify that buffer/offset is safe to access
* @alloc: binder_alloc for this proc

View File

@ -129,6 +129,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
int pid);
extern void binder_alloc_init(struct binder_alloc *alloc);
extern int binder_alloc_shrinker_init(void);
extern void binder_alloc_shrinker_exit(void);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
extern struct binder_buffer *
binder_alloc_prepare_to_free(struct binder_alloc *alloc,

View File

@ -532,7 +532,7 @@ CPU_SHOW_VULN_FALLBACK(srbds);
CPU_SHOW_VULN_FALLBACK(mmio_stale_data);
CPU_SHOW_VULN_FALLBACK(retbleed);
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gather_data_sampling);
CPU_SHOW_VULN_FALLBACK(gds);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@ -546,7 +546,7 @@ static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gather_data_sampling, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,

View File

@ -1870,15 +1870,16 @@ static void zram_bio_discard(struct zram *zram, struct bio *bio)
static void zram_bio_read(struct zram *zram, struct bio *bio)
{
struct bvec_iter iter;
struct bio_vec bv;
unsigned long start_time;
unsigned long start_time = bio_start_io_acct(bio);
struct bvec_iter iter = bio->bi_iter;
start_time = bio_start_io_acct(bio);
bio_for_each_segment(bv, bio, iter) {
do {
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT;
struct bio_vec bv = bio_iter_iovec(bio, iter);
bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
atomic64_inc(&zram->stats.failed_reads);
@ -1890,22 +1891,26 @@ static void zram_bio_read(struct zram *zram, struct bio *bio)
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
}
bio_advance_iter_single(bio, &iter, bv.bv_len);
} while (iter.bi_size);
bio_end_io_acct(bio, start_time);
bio_endio(bio);
}
static void zram_bio_write(struct zram *zram, struct bio *bio)
{
struct bvec_iter iter;
struct bio_vec bv;
unsigned long start_time;
unsigned long start_time = bio_start_io_acct(bio);
struct bvec_iter iter = bio->bi_iter;
start_time = bio_start_io_acct(bio);
bio_for_each_segment(bv, bio, iter) {
do {
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT;
struct bio_vec bv = bio_iter_iovec(bio, iter);
bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
atomic64_inc(&zram->stats.failed_writes);
@ -1916,7 +1921,10 @@ static void zram_bio_write(struct zram *zram, struct bio *bio)
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
}
bio_advance_iter_single(bio, &iter, bv.bv_len);
} while (iter.bi_size);
bio_end_io_acct(bio, start_time);
bio_endio(bio);
}

View File

@ -89,7 +89,7 @@ static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr)
tpm_tis_flush(iobase);
}
static int interrupts = -1;
static int interrupts;
module_param(interrupts, int, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
@ -183,7 +183,7 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = {
.ident = "UPX-TGL",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
DMI_MATCH(DMI_PRODUCT_VERSION, "UPX-TGL"),
DMI_MATCH(DMI_PRODUCT_NAME, "UPX-TGL01"),
},
},
{}

View File

@ -3,13 +3,6 @@
# Counter devices
#
menuconfig COUNTER
tristate "Counter support"
help
This enables counter device support through the Generic Counter
interface. You only need to enable this, if you also want to enable
one or more of the counter device drivers below.
config I8254
tristate
select COUNTER
@ -25,6 +18,13 @@ config I8254
If built as a module its name will be i8254.
menuconfig COUNTER
tristate "Counter support"
help
This enables counter device support through the Generic Counter
interface. You only need to enable this, if you also want to enable
one or more of the counter device drivers below.
if COUNTER
config 104_QUAD_8

View File

@ -1012,8 +1012,8 @@ static int amd_pstate_update_status(const char *buf, size_t size)
return 0;
}
static ssize_t show_status(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t ret;
@ -1024,7 +1024,7 @@ static ssize_t show_status(struct kobject *kobj,
return ret;
}
static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
static ssize_t status_store(struct device *a, struct device_attribute *b,
const char *buf, size_t count)
{
char *p = memchr(buf, '\n', count);
@ -1043,7 +1043,7 @@ cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
cpufreq_freq_attr_ro(amd_pstate_highest_perf);
cpufreq_freq_attr_rw(energy_performance_preference);
cpufreq_freq_attr_ro(energy_performance_available_preferences);
define_one_global_rw(status);
static DEVICE_ATTR_RW(status);
static struct freq_attr *amd_pstate_attr[] = {
&amd_pstate_max_freq,
@ -1062,7 +1062,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
};
static struct attribute *pstate_global_attributes[] = {
&status.attr,
&dev_attr_status.attr,
NULL
};

View File

@ -120,20 +120,6 @@ static void psci_pd_remove(void)
}
}
static bool psci_pd_try_set_osi_mode(void)
{
int ret;
if (!psci_has_osi_support())
return false;
ret = psci_set_osi_mode(true);
if (ret)
return false;
return true;
}
static void psci_cpuidle_domain_sync_state(struct device *dev)
{
/*
@ -152,15 +138,12 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *node;
bool use_osi;
bool use_osi = psci_has_osi_support();
int ret = 0, pd_count = 0;
if (!np)
return -ENODEV;
/* If OSI mode is supported, let's try to enable it. */
use_osi = psci_pd_try_set_osi_mode();
/*
* Parse child nodes for the "#power-domain-cells" property and
* initialize a genpd/genpd-of-provider pair when it's found.
@ -170,33 +153,37 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
continue;
ret = psci_pd_init(node, use_osi);
if (ret)
goto put_node;
if (ret) {
of_node_put(node);
goto exit;
}
pd_count++;
}
/* Bail out if not using the hierarchical CPU topology. */
if (!pd_count)
goto no_pd;
return 0;
/* Link genpd masters/subdomains to model the CPU topology. */
ret = dt_idle_pd_init_topology(np);
if (ret)
goto remove_pd;
/* let's try to enable OSI. */
ret = psci_set_osi_mode(use_osi);
if (ret)
goto remove_pd;
pr_info("Initialized CPU PM domain topology using %s mode\n",
use_osi ? "OSI" : "PC");
return 0;
put_node:
of_node_put(node);
remove_pd:
dt_idle_pd_remove_topology(np);
psci_pd_remove();
exit:
pr_err("failed to create CPU PM domains ret=%d\n", ret);
no_pd:
if (use_osi)
psci_set_osi_mode(false);
return ret;
}

View File

@ -152,6 +152,30 @@ int dt_idle_pd_init_topology(struct device_node *np)
return 0;
}
int dt_idle_pd_remove_topology(struct device_node *np)
{
struct device_node *node;
struct of_phandle_args child, parent;
int ret;
for_each_child_of_node(np, node) {
if (of_parse_phandle_with_args(node, "power-domains",
"#power-domain-cells", 0, &parent))
continue;
child.np = node;
child.args_count = 0;
ret = of_genpd_remove_subdomain(&parent, &child);
of_node_put(parent.np);
if (ret) {
of_node_put(node);
return ret;
}
}
return 0;
}
struct device *dt_idle_attach_cpu(int cpu, const char *name)
{
struct device *dev;

View File

@ -14,6 +14,8 @@ struct generic_pm_domain *dt_idle_pd_alloc(struct device_node *np,
int dt_idle_pd_init_topology(struct device_node *np);
int dt_idle_pd_remove_topology(struct device_node *np);
struct device *dt_idle_attach_cpu(int cpu, const char *name);
void dt_idle_detach_cpu(struct device *dev);
@ -36,6 +38,11 @@ static inline int dt_idle_pd_init_topology(struct device_node *np)
return 0;
}
static inline int dt_idle_pd_remove_topology(struct device_node *np)
{
return 0;
}
static inline struct device *dt_idle_attach_cpu(int cpu, const char *name)
{
return NULL;

View File

@ -429,6 +429,7 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
gc->set_config = gpio_sim_set_config;
gc->to_irq = gpio_sim_to_irq;
gc->free = gpio_sim_free;
gc->can_sleep = true;
ret = devm_gpiochip_add_data(dev, gc, chip);
if (ret)

View File

@ -18,7 +18,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#define WS16C48_EXTENT 10
#define WS16C48_EXTENT 11
#define MAX_NUM_WS16C48 max_num_isa_dev(WS16C48_EXTENT)
static unsigned int base[MAX_NUM_WS16C48];

View File

@ -1296,6 +1296,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
bool amdgpu_sg_display_supported(struct amdgpu_device *adev);
bool amdgpu_device_pcie_dynamic_switching_supported(void);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
bool amdgpu_device_aspm_support_quirk(void);

View File

@ -295,7 +295,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
if (!p->gang_size) {
ret = -EINVAL;
goto free_partial_kdata;
goto free_all_kdata;
}
for (i = 0; i < p->gang_size; ++i) {

View File

@ -1458,6 +1458,32 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
return true;
}
/*
* On APUs with >= 64GB white flickering has been observed w/ SG enabled.
* Disable S/G on such systems until we have a proper fix.
* https://gitlab.freedesktop.org/drm/amd/-/issues/2354
* https://gitlab.freedesktop.org/drm/amd/-/issues/2735
*/
bool amdgpu_sg_display_supported(struct amdgpu_device *adev)
{
switch (amdgpu_sg_display) {
case -1:
break;
case 0:
return false;
case 1:
return true;
default:
return false;
}
if ((totalram_pages() << (PAGE_SHIFT - 10)) +
(adev->gmc.real_vram_size / 1024) >= 64000000) {
DRM_WARN("Disabling S/G due to >=64GB RAM\n");
return false;
}
return true;
}
/*
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
* speed switching. Until we have confirmation from Intel that a specific host
@ -3696,10 +3722,11 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
{
if (amdgpu_mcbp == 1)
adev->gfx.mcbp = true;
if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) &&
(adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) &&
adev->gfx.num_gfx_rings)
else if (amdgpu_mcbp == 0)
adev->gfx.mcbp = false;
else if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) &&
(adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) &&
adev->gfx.num_gfx_rings)
adev->gfx.mcbp = true;
if (amdgpu_sriov_vf(adev))
@ -4367,6 +4394,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
cancel_delayed_work_sync(&adev->delayed_init_work);
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
amdgpu_ras_suspend(adev);

View File

@ -551,6 +551,41 @@ int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
return 0;
}
/**
* amdgpu_fence_need_ring_interrupt_restore - helper function to check whether
* fence driver interrupts need to be restored.
*
* @ring: ring that to be checked
*
* Interrupts for rings that belong to GFX IP don't need to be restored
* when the target power state is s0ix.
*
* Return true if need to restore interrupts, false otherwise.
*/
static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
bool is_gfx_power_domain = false;
switch (ring->funcs->type) {
case AMDGPU_RING_TYPE_SDMA:
/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0))
is_gfx_power_domain = true;
break;
case AMDGPU_RING_TYPE_GFX:
case AMDGPU_RING_TYPE_COMPUTE:
case AMDGPU_RING_TYPE_KIQ:
case AMDGPU_RING_TYPE_MES:
is_gfx_power_domain = true;
break;
default:
break;
}
return !(adev->in_s0ix && is_gfx_power_domain);
}
/**
* amdgpu_fence_driver_hw_fini - tear down the fence driver
* for all possible rings.
@ -579,7 +614,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
amdgpu_fence_driver_force_completion(ring);
if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
ring->fence_drv.irq_src)
ring->fence_drv.irq_src &&
amdgpu_fence_need_ring_interrupt_restore(ring))
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
@ -655,7 +691,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
continue;
/* enable the interrupt */
if (ring->fence_drv.irq_src)
if (ring->fence_drv.irq_src &&
amdgpu_fence_need_ring_interrupt_restore(ring))
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
}

View File

@ -692,15 +692,8 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (adev->gfx.gfx_off_req_count == 0 &&
!adev->gfx.gfx_off_state) {
/* If going to s2idle, no need to wait */
if (adev->in_s0ix) {
if (!amdgpu_dpm_set_powergating_by_smu(adev,
AMD_IP_BLOCK_TYPE_GFX, true))
adev->gfx.gfx_off_state = true;
} else {
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
delay);
}
}
} else {
if (adev->gfx.gfx_off_req_count == 0) {

View File

@ -397,7 +397,7 @@ void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring)
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
WARN_ON(!ring->is_sw_ring);
if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) {
if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) {
if (amdgpu_mcbp_scan(mux) > 0)
amdgpu_mcbp_trigger_preempt(mux);
return;

View File

@ -239,8 +239,13 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
for (i = 1; i < MAX_XCP; i++) {
ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
if (ret)
if (ret == -ENOSPC) {
dev_warn(adev->dev,
"Skip xcp node #%d when out of drm node resource.", i);
return 0;
} else if (ret) {
return ret;
}
/* Redirect all IOCTLs to the primary device */
adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
@ -328,6 +333,9 @@ int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
return 0;
for (i = 1; i < MAX_XCP; i++) {
if (!adev->xcp_mgr->xcp[i].ddev)
break;
ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
if (ret)
return ret;
@ -345,6 +353,9 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
return;
for (i = 1; i < MAX_XCP; i++) {
if (!adev->xcp_mgr->xcp[i].ddev)
break;
p_ddev = adev->xcp_mgr->xcp[i].ddev;
drm_dev_unplug(p_ddev);
p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;

View File

@ -471,8 +471,12 @@ static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 3):
if ((adev->gfx.me_fw_version >= 1505) &&
(adev->gfx.pfp_fw_version >= 1600) &&
(adev->gfx.mec_fw_version >= 512))
adev->gfx.cp_gfx_shadow = true;
(adev->gfx.mec_fw_version >= 512)) {
if (amdgpu_sriov_vf(adev))
adev->gfx.cp_gfx_shadow = true;
else
adev->gfx.cp_gfx_shadow = false;
}
break;
default:
adev->gfx.cp_gfx_shadow = false;

View File

@ -137,14 +137,15 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
int ret;
int retry_loop;
/* Wait for bootloader to signify that it is ready having bit 31 of
* C2PMSG_35 set to 1. All other bits are expected to be cleared.
* If there is an error in processing command, bits[7:0] will be set.
* This is applicable for PSP v13.0.6 and newer.
*/
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp,
SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
0x80000000,
0x80000000,
false);
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
0x80000000, 0xffffffff, false);
if (ret == 0)
return 0;

View File

@ -1543,11 +1543,7 @@ static bool kfd_ignore_crat(void)
if (ignore_crat)
return true;
#ifndef KFD_SUPPORT_IOMMU_V2
ret = true;
#else
ret = false;
#endif
return ret;
}

View File

@ -194,11 +194,6 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
kfd_device_info_set_event_interrupt_class(kfd);
/* Raven */
if (gc_version == IP_VERSION(9, 1, 0) ||
gc_version == IP_VERSION(9, 2, 2))
kfd->device_info.needs_iommu_device = true;
if (gc_version < IP_VERSION(11, 0, 0)) {
/* Navi2x+, Navi1x+ */
if (gc_version == IP_VERSION(10, 3, 6))
@ -233,10 +228,6 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
asic_type != CHIP_TONGA)
kfd->device_info.supports_cwsr = true;
if (asic_type == CHIP_KAVERI ||
asic_type == CHIP_CARRIZO)
kfd->device_info.needs_iommu_device = true;
if (asic_type != CHIP_HAWAII && !vf)
kfd->device_info.needs_pci_atomics = true;
}
@ -249,7 +240,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
uint32_t gfx_target_version = 0;
switch (adev->asic_type) {
#ifdef KFD_SUPPORT_IOMMU_V2
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_KAVERI:
gfx_target_version = 70000;
@ -262,7 +252,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
if (!vf)
f2g = &gfx_v8_kfd2kgd;
break;
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_HAWAII:
gfx_target_version = 70001;
@ -298,7 +287,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
gfx_target_version = 90000;
f2g = &gfx_v9_kfd2kgd;
break;
#ifdef KFD_SUPPORT_IOMMU_V2
/* Raven */
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
@ -306,7 +294,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
if (!vf)
f2g = &gfx_v9_kfd2kgd;
break;
#endif
/* Vega12 */
case IP_VERSION(9, 2, 1):
gfx_target_version = 90004;

View File

@ -2538,18 +2538,12 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
}
switch (dev->adev->asic_type) {
case CHIP_CARRIZO:
device_queue_manager_init_vi(&dqm->asic_ops);
break;
case CHIP_KAVERI:
device_queue_manager_init_cik(&dqm->asic_ops);
break;
case CHIP_HAWAII:
device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
break;
case CHIP_CARRIZO:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_POLARIS10:

View File

@ -1965,7 +1965,14 @@ int kfd_topology_add_device(struct kfd_node *gpu)
const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type];
gpu_id = kfd_generate_gpu_id(gpu);
pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
if (gpu->xcp && !gpu->xcp->ddev) {
dev_warn(gpu->adev->dev,
"Won't add GPU (ID: 0x%x) to topology since it has no drm node assigned.",
gpu_id);
return 0;
} else {
pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
}
/* Check to see if this gpu device exists in the topology_device_list.
* If so, assign the gpu to that device,

View File

@ -1638,9 +1638,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
break;
}
if (init_data.flags.gpu_vm_support &&
(amdgpu_sg_display == 0))
init_data.flags.gpu_vm_support = false;
if (init_data.flags.gpu_vm_support)
init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev);
if (init_data.flags.gpu_vm_support)
adev->mode_info.gpu_vm_support = true;

View File

@ -1320,7 +1320,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (computed_streams[i])
continue;
if (!res_pool->funcs->remove_stream_from_ctx ||
if (res_pool->funcs->remove_stream_from_ctx &&
res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
return -EINVAL;

View File

@ -777,7 +777,8 @@ void dce110_edp_wait_for_hpd_ready(
dal_gpio_destroy_irq(&hpd);
/* ensure that the panel is detected */
ASSERT(edp_hpd_high);
if (!edp_hpd_high)
DC_LOG_DC("%s: wait timed out!\n", __func__);
}
void dce110_edp_power_control(

View File

@ -712,7 +712,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,

View File

@ -357,8 +357,11 @@ void dpp3_set_cursor_attributes(
int cur_rom_en = 0;
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA)
cur_rom_en = 1;
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
cur_rom_en = 1;
}
}
REG_UPDATE_3(CURSOR0_CONTROL,
CUR0_MODE, color_format,

View File

@ -1581,9 +1581,9 @@ static int smu_disable_dpms(struct smu_context *smu)
/*
* For SMU 13.0.4/11, PMFW will handle the features disablement properly
* for gpu reset case. Driver involvement is unnecessary.
* for gpu reset and S0i3 cases. Driver involvement is unnecessary.
*/
if (amdgpu_in_reset(adev)) {
if (amdgpu_in_reset(adev) || adev->in_s0ix) {
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 11):

View File

@ -588,7 +588,9 @@ err0_out:
return -ENOMEM;
}
static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu)
static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu,
bool use_metrics_v3,
bool use_metrics_v2)
{
struct smu_table_context *smu_table= &smu->smu_table;
SmuMetricsExternal_t *metrics_ext =
@ -596,13 +598,11 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
uint32_t throttler_status = 0;
int i;
if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
(smu->smc_fw_version >= 0x3A4900)) {
if (use_metrics_v3) {
for (i = 0; i < THROTTLER_COUNT; i++)
throttler_status |=
(metrics_ext->SmuMetrics_V3.ThrottlingPercentage[i] ? 1U << i : 0);
} else if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
(smu->smc_fw_version >= 0x3A4300)) {
} else if (use_metrics_v2) {
for (i = 0; i < THROTTLER_COUNT; i++)
throttler_status |=
(metrics_ext->SmuMetrics_V2.ThrottlingPercentage[i] ? 1U << i : 0);
@ -864,7 +864,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
metrics->TemperatureVrSoc) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_THROTTLER_STATUS:
*value = sienna_cichlid_get_throttler_status_locked(smu);
*value = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2);
break;
case METRICS_CURR_FANSPEED:
*value = use_metrics_v3 ? metrics_v3->CurrFanSpeed :
@ -4017,7 +4017,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_dclk1 = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCLK_1] :
use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_1] : metrics->CurrClock[PPCLK_DCLK_1];
gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu);
gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2);
gpu_metrics->indep_throttle_status =
smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
sienna_cichlid_throttler_map);

View File

@ -332,10 +332,13 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco;
PPTable_t *pptable = smu->smu_table.driver_pptable;
#if 0
PPTable_t *pptable = smu->smu_table.driver_pptable;
const OverDriveLimits_t * const overdrive_upperlimits =
&pptable->SkuTable.OverDriveLimitsBasicMax;
const OverDriveLimits_t * const overdrive_lowerlimits =
&pptable->SkuTable.OverDriveLimitsMin;
#endif
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
@ -347,18 +350,30 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
smu_baco->maco_support = true;
/*
* We are in the transition to a new OD mechanism.
* Disable the OD feature support for SMU13 temporarily.
* TODO: get this reverted when new OD mechanism online
*/
#if 0
if (!overdrive_lowerlimits->FeatureCtrlMask ||
!overdrive_upperlimits->FeatureCtrlMask)
smu->od_enabled = false;
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
/*
* Instead of having its own buffer space and get overdrive_table copied,
* smu->od_settings just points to the actual overdrive_table
*/
smu->od_settings = &powerplay_table->overdrive_table;
#else
smu->od_enabled = false;
#endif
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
smu->adev->pm.no_fan =
!(pptable->SkuTable.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT));
return 0;
}
@ -1140,7 +1155,6 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
(OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
uint32_t gen_speed, lane_width;
int i, curr_freq, size = 0;
int32_t min_value, max_value;
@ -1256,7 +1270,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
pcie_table->clk_freq[i],
(gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
(lane_width == DECODE_LANE_WIDTH(link_width[pcie_table->pcie_lane[i]])) ?
(lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
"*" : "");
break;

View File

@ -81,9 +81,10 @@
#define EPSILON 1
#define smnPCIE_ESM_CTRL 0x193D0
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1ab40288
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1a340288
#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
#define MAX_LINK_WIDTH 6
static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
@ -708,16 +709,19 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
*value = SMUQ10_TO_UINT(metrics->SocketPower) << 8;
break;
case METRICS_TEMPERATURE_HOTSPOT:
*value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature);
*value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_MEM:
*value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature);
*value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
/* This is the max of all VRs and not just SOC VR.
* No need to define another data type for the same.
*/
case METRICS_TEMPERATURE_VRSOC:
*value = SMUQ10_TO_UINT(metrics->MaxVrTemperature);
*value = SMUQ10_TO_UINT(metrics->MaxVrTemperature) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
default:
*value = UINT_MAX;
@ -1966,6 +1970,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
struct amdgpu_device *adev = smu->adev;
int ret = 0, inst0, xcc0;
MetricsTable_t *metrics;
u16 link_width_level;
inst0 = adev->sdma.instance[0].aid_id;
xcc0 = GET_INST(GC, 0);
@ -1993,9 +1998,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->average_socket_power =
SMUQ10_TO_UINT(metrics->SocketPower);
/* Energy is reported in 15.625mJ units */
gpu_metrics->energy_accumulator =
SMUQ10_TO_UINT(metrics->SocketEnergyAcc);
/* Energy counter reported in 15.259uJ (2^-16) units */
gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
gpu_metrics->current_gfxclk =
SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc0]);
@ -2017,8 +2021,12 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->throttle_status = 0;
if (!(adev->flags & AMD_IS_APU)) {
link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
if (link_width_level > MAX_LINK_WIDTH)
link_width_level = 0;
gpu_metrics->pcie_link_width =
smu_v13_0_6_get_current_pcie_link_width_level(smu);
DECODE_LANE_WIDTH(link_width_level);
gpu_metrics->pcie_link_speed =
smu_v13_0_6_get_current_pcie_link_speed(smu);
}

View File

@ -323,10 +323,12 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
struct smu_baco_context *smu_baco = &smu->smu_baco;
PPTable_t *smc_pptable = table_context->driver_pptable;
BoardTable_t *BoardTable = &smc_pptable->BoardTable;
#if 0
const OverDriveLimits_t * const overdrive_upperlimits =
&smc_pptable->SkuTable.OverDriveLimitsBasicMax;
const OverDriveLimits_t * const overdrive_lowerlimits =
&smc_pptable->SkuTable.OverDriveLimitsMin;
#endif
if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
@ -338,18 +340,22 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
smu_baco->maco_support = true;
#if 0
if (!overdrive_lowerlimits->FeatureCtrlMask ||
!overdrive_upperlimits->FeatureCtrlMask)
smu->od_enabled = false;
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
/*
* Instead of having its own buffer space and get overdrive_table copied,
* smu->od_settings just points to the actual overdrive_table
*/
smu->od_settings = &powerplay_table->overdrive_table;
#else
smu->od_enabled = false;
#endif
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
return 0;
}

View File

@ -2517,9 +2517,11 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
};
int int_status[3], i;
if (it6505->enable_drv_hold || pm_runtime_get_if_in_use(dev) <= 0)
if (it6505->enable_drv_hold || !it6505->powered)
return IRQ_HANDLED;
pm_runtime_get_sync(dev);
int_status[0] = it6505_read(it6505, INT_STATUS_01);
int_status[1] = it6505_read(it6505, INT_STATUS_02);
int_status[2] = it6505_read(it6505, INT_STATUS_03);

View File

@ -774,9 +774,7 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO_NO_HSA |
MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP |
MIPI_DSI_MODE_NO_EOT_PACKET;
MIPI_DSI_MODE_VIDEO_HSE;
ret = devm_mipi_dsi_attach(dev, dsi);
if (ret < 0) {

View File

@ -3456,6 +3456,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
connector->base.id, connector->name);
return NULL;
}
if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Composite sync not supported\n",
connector->base.id, connector->name);
}
/* it is incorrect if hsync/vsync width is zero */
if (!hsync_pulse_width || !vsync_pulse_width) {
@ -3502,27 +3506,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC;
} else {
switch (pt->misc & DRM_EDID_PT_SYNC_MASK) {
case DRM_EDID_PT_ANALOG_CSYNC:
case DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC:
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Analog composite sync!\n",
connector->base.id, connector->name);
mode->flags |= DRM_MODE_FLAG_CSYNC | DRM_MODE_FLAG_NCSYNC;
break;
case DRM_EDID_PT_DIGITAL_CSYNC:
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Digital composite sync!\n",
connector->base.id, connector->name);
mode->flags |= DRM_MODE_FLAG_CSYNC;
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
DRM_MODE_FLAG_PCSYNC : DRM_MODE_FLAG_NCSYNC;
break;
case DRM_EDID_PT_DIGITAL_SEPARATE_SYNC:
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
break;
}
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
}
set_size:

Some files were not shown because too many files have changed in this diff Show More