Merge branch 'linus' into locking/core, to resolve conflict

Conflicts:
	arch/arm/include/asm/percpu.h

As Stephen Rothwell noted, there's a conflict between this commit
in locking/core:

  a21ee6055c ("lockdep: Change hardirq{s_enabled,_context} to per-cpu variables")

and this fresh upstream commit:

  aa54ea903a ("ARM: percpu.h: fix build error")

a21ee6055c is a simpler solution to the dependency problem and doesn't
further increase header hell - so this conflict resolution effectively
reverts aa54ea903a and uses the a21ee6055c solution.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2020-07-31 11:58:05 +02:00
commit 28cff52eae
350 changed files with 3725 additions and 3279 deletions

View File

@ -198,6 +198,9 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
Mayuresh Janorkar <mayur@ti.com>
Michael Buesch <m@bues.ch>
Michel Dänzer <michel@tungstengraphics.com>
Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il>
Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com>
Mike Rapoport <rppt@kernel.org> <rppt@linux.ibm.com>
Miodrag Dinic <miodrag.dinic@mips.com> <miodrag.dinic@imgtec.com>
Miquel Raynal <miquel.raynal@bootlin.com> <miquel.raynal@free-electrons.com>
Mitesh shah <mshah@teja.com>

View File

@ -16,7 +16,16 @@ Description: Allow the root user to disable/enable in runtime the clock
gating mechanism in Gaudi. Due to how Gaudi is built, the
clock gating needs to be disabled in order to access the
registers of the TPC and MME engines. This is sometimes needed
during debug by the user and hence the user needs this option
during debug by the user and hence the user needs this option.
The user can supply a bitmask value, each bit represents
a different engine to disable/enable its clock gating feature.
The bitmask is composed of 20 bits:
0 - 7 : DMA channels
8 - 11 : MME engines
12 - 19 : TPC engines
The bit's location of a specific engine can be determined
using (1 << GAUDI_ENGINE_ID_*). GAUDI_ENGINE_ID_* values
are defined in uapi habanalabs.h file in enum gaudi_engine_id
What: /sys/kernel/debug/habanalabs/hl<n>/command_buffers
Date: Jan 2019

View File

@ -47,6 +47,9 @@ properties:
$ref: /schemas/types.yaml#/definitions/phandle-array
description: Phandle to the device SRAM
iommus:
maxItems: 1
memory-region:
description:
CMA pool to use for buffers allocation instead of the default

View File

@ -378,6 +378,8 @@ examples:
- |
sound {
compatible = "simple-audio-card";
#address-cells = <1>;
#size-cells = <0>;
simple-audio-card,name = "rsnd-ak4643";
simple-audio-card,format = "left_j";
@ -391,10 +393,12 @@ examples:
"ak4642 Playback", "DAI1 Playback";
dpcmcpu: simple-audio-card,cpu@0 {
reg = <0>;
sound-dai = <&rcar_sound 0>;
};
simple-audio-card,cpu@1 {
reg = <1>;
sound-dai = <&rcar_sound 1>;
};
@ -418,6 +422,8 @@ examples:
- |
sound {
compatible = "simple-audio-card";
#address-cells = <1>;
#size-cells = <0>;
simple-audio-card,routing =
"pcm3168a Playback", "DAI1 Playback",
@ -426,6 +432,7 @@ examples:
"pcm3168a Playback", "DAI4 Playback";
simple-audio-card,dai-link@0 {
reg = <0>;
format = "left_j";
bitclock-master = <&sndcpu0>;
frame-master = <&sndcpu0>;
@ -439,22 +446,23 @@ examples:
};
simple-audio-card,dai-link@1 {
reg = <1>;
format = "i2s";
bitclock-master = <&sndcpu1>;
frame-master = <&sndcpu1>;
convert-channels = <8>; /* TDM Split */
sndcpu1: cpu@0 {
sndcpu1: cpu0 {
sound-dai = <&rcar_sound 1>;
};
cpu@1 {
cpu1 {
sound-dai = <&rcar_sound 2>;
};
cpu@2 {
cpu2 {
sound-dai = <&rcar_sound 3>;
};
cpu@3 {
cpu3 {
sound-dai = <&rcar_sound 4>;
};
codec {
@ -466,6 +474,7 @@ examples:
};
simple-audio-card,dai-link@2 {
reg = <2>;
format = "i2s";
bitclock-master = <&sndcpu2>;
frame-master = <&sndcpu2>;

View File

@ -23,6 +23,7 @@ PTP hardware clock infrastructure for Linux
+ Ancillary clock features
- Time stamp external events
- Period output signals configurable from user space
- Low Pass Filter (LPF) access from user space
- Synchronization of the Linux system time via the PPS subsystem
PTP hardware clock kernel API
@ -94,3 +95,14 @@ Supported hardware
- Auxiliary Slave/Master Mode Snapshot (optional interrupt)
- Target Time (optional interrupt)
* Renesas (IDT) ClockMatrix™
- Up to 4 independent PHC channels
- Integrated low pass filter (LPF), access via .adjPhase (compliant to ITU-T G.8273.2)
- Programmable output periodic signals
- Programmable inputs can time stamp external triggers
- Driver and/or hardware configuration through firmware (idtcm.bin)
- LPF settings (bandwidth, phase limiting, automatic holdover, physical layer assist (per ITU-T G.8273.2))
- Programmable output PTP clocks, any frequency up to 1GHz (to other PHY/MAC time stampers, refclk to ASSPs/SoCs/FPGAs)
- Lock to GNSS input, automatic switching between GNSS and user-space PHC control (optional)

View File

@ -26,7 +26,7 @@ Usage
1) Device creation & deletion
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847.
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls_uc
This creates a bareudp tunnel device which tunnels L3 traffic with ethertype
0x8847 (MPLS traffic). The destination port of the UDP header will be set to
@ -34,14 +34,21 @@ Usage
b) ip link delete bareudp0
2) Device creation with multiple proto mode enabled
2) Device creation with multiproto mode enabled
There are two ways to create a bareudp device for MPLS & IP with multiproto mode
enabled.
The multiproto mode allows bareudp tunnels to handle several protocols of the
same family. It is currently only available for IP and MPLS. This mode has to
be enabled explicitly with the "multiproto" flag.
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847 multiproto
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype ipv4 multiproto
b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls
For an IPv4 tunnel the multiproto mode allows the tunnel to also handle
IPv6.
b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls_uc multiproto
For MPLS, the multiproto mode allows the tunnel to handle both unicast
and multicast MPLS packets.
3) Device Usage

View File

@ -6956,6 +6956,7 @@ M: Timur Tabi <timur@kernel.org>
M: Nicolin Chen <nicoleotsuka@gmail.com>
M: Xiubo Li <Xiubo.Lee@gmail.com>
R: Fabio Estevam <festevam@gmail.com>
R: Shengjiu Wang <shengjiu.wang@gmail.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
@ -9305,6 +9306,17 @@ F: Documentation/kbuild/kconfig*
F: scripts/Kconfig.include
F: scripts/kconfig/
KCOV
R: Dmitry Vyukov <dvyukov@google.com>
R: Andrey Konovalov <andreyknvl@google.com>
L: kasan-dev@googlegroups.com
S: Maintained
F: Documentation/dev-tools/kcov.rst
F: include/linux/kcov.h
F: include/uapi/linux/kcov.h
F: kernel/kcov.c
F: scripts/Makefile.kcov
KCSAN
M: Marco Elver <elver@google.com>
R: Dmitry Vyukov <dvyukov@google.com>
@ -11240,7 +11252,7 @@ S: Maintained
F: drivers/crypto/atmel-ecc.*
MICROCHIP I2C DRIVER
M: Ludovic Desroches <ludovic.desroches@microchip.com>
M: Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
L: linux-i2c@vger.kernel.org
S: Supported
F: drivers/i2c/busses/i2c-at91-*.c
@ -11333,17 +11345,17 @@ F: drivers/iio/adc/at91-sama5d2_adc.c
F: include/dt-bindings/iio/adc/at91-sama5d2_adc.h
MICROCHIP SAMA5D2-COMPATIBLE SHUTDOWN CONTROLLER
M: Nicolas Ferre <nicolas.ferre@microchip.com>
M: Claudiu Beznea <claudiu.beznea@microchip.com>
S: Supported
F: drivers/power/reset/at91-sama5d2_shdwc.c
MICROCHIP SPI DRIVER
M: Nicolas Ferre <nicolas.ferre@microchip.com>
M: Tudor Ambarus <tudor.ambarus@microchip.com>
S: Supported
F: drivers/spi/spi-atmel.*
MICROCHIP SSC DRIVER
M: Nicolas Ferre <nicolas.ferre@microchip.com>
M: Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/misc/atmel-ssc.c
@ -14862,6 +14874,7 @@ F: drivers/s390/block/dasd*
F: include/linux/dasd_mod.h
S390 IOMMU (PCI)
M: Matthew Rosato <mjrosato@linux.ibm.com>
M: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 8
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
@ -567,7 +567,7 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
ifneq ($(CROSS_COMPILE),)
CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
endif
ifneq ($(GCC_TOOLCHAIN),)
@ -1754,7 +1754,7 @@ PHONY += descend $(build-dirs)
descend: $(build-dirs)
$(build-dirs): prepare
$(Q)$(MAKE) $(build)=$@ \
single-build=$(if $(filter-out $@/, $(filter $@/%, $(single-no-ko))),1) \
single-build=$(if $(filter-out $@/, $(filter $@/%, $(KBUILD_SINGLE_TARGETS))),1) \
need-builtin=1 need-modorder=1
clean-dirs := $(addprefix _clean_, $(clean-dirs))

View File

@ -342,7 +342,8 @@
comphy: phy@18300 {
compatible = "marvell,armada-380-comphy";
reg = <0x18300 0x100>;
reg-names = "comphy", "conf";
reg = <0x18300 0x100>, <0x18460 4>;
#address-cells = <1>;
#size-cells = <0>;

View File

@ -397,7 +397,7 @@
pinctrl_usbotg: usbotggrp {
fsl,pins = <
MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
>;
};
@ -409,6 +409,7 @@
MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070
MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070
MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070
MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0
>;
};

View File

@ -99,7 +99,7 @@
&fec2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet2>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-handle = <&ethphy0>;
fsl,magic-packet;
status = "okay";

View File

@ -213,7 +213,7 @@
&fec2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet2>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-handle = <&ethphy2>;
status = "okay";
};

View File

@ -402,7 +402,7 @@
&gbe0 {
phy-handle = <&ethphy0>;
phy-mode = "rgmii-id";
phy-mode = "rgmii-rxid";
status = "okay";
};

View File

@ -198,7 +198,7 @@
default-pool {
compatible = "shared-dma-pool";
size = <0x6000000>;
alloc-ranges = <0x4a000000 0x6000000>;
alloc-ranges = <0x40000000 0x10000000>;
reusable;
linux,cma-default;
};

View File

@ -117,7 +117,7 @@
default-pool {
compatible = "shared-dma-pool";
size = <0x6000000>;
alloc-ranges = <0x4a000000 0x6000000>;
alloc-ranges = <0x40000000 0x10000000>;
reusable;
linux,cma-default;
};

View File

@ -181,7 +181,7 @@
default-pool {
compatible = "shared-dma-pool";
size = <0x6000000>;
alloc-ranges = <0x4a000000 0x6000000>;
alloc-ranges = <0x40000000 0x10000000>;
reusable;
linux,cma-default;
};

View File

@ -137,7 +137,7 @@ export TEXT_OFFSET
core-y += arch/arm64/
libs-y := arch/arm64/lib/ $(libs-y)
core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
# Default target when executing plain make
boot := arch/arm64/boot

View File

@ -161,6 +161,7 @@
resets = <&ccu RST_BUS_VE>;
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
allwinner,sram = <&ve_sram 1>;
iommus = <&iommu 3>;
};
gpu: gpu@1800000 {

View File

@ -454,10 +454,7 @@
status = "okay";
phy-mode = "2500base-x";
phys = <&cp1_comphy5 2>;
fixed-link {
speed = <2500>;
full-duplex;
};
managed = "in-band-status";
};
&cp1_spi1 {

View File

@ -14,7 +14,7 @@ COMPAT_GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE_COMPAT)elfedit))
COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..)
CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%))
CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)
CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE_COMPAT))
CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments
ifneq ($(COMPAT_GCC_TOOLCHAIN),)
CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN)

View File

@ -210,6 +210,8 @@ atomic64_set(atomic64_t *v, s64 i)
_atomic_spin_unlock_irqrestore(v, flags);
}
#define atomic64_set_release(v, i) atomic64_set((v), (i))
static __inline__ s64
atomic64_read(const atomic64_t *v)
{

View File

@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
unsigned int new_);
extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr,
(unsigned int)old, (unsigned int)new_);
case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
}
__cmpxchg_called_with_bad_pointer();
return old;

View File

@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign
_atomic_spin_unlock_irqrestore(ptr, flags);
return (unsigned long)prev;
}
u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
{
unsigned long flags;
u8 prev;
_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
*ptr = new;
_atomic_spin_unlock_irqrestore(ptr, flags);
return prev;
}

View File

@ -95,19 +95,40 @@ void __init mem_init(void)
#ifdef CONFIG_BLK_DEV_INITRD
static void __init setup_initrd(void)
{
phys_addr_t start;
unsigned long size;
if (initrd_start >= initrd_end) {
pr_info("initrd not found or empty");
goto disable;
}
if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) {
pr_err("initrd extends beyond end of memory");
/* Ignore the virtul address computed during device tree parsing */
initrd_start = initrd_end = 0;
if (!phys_initrd_size)
return;
/*
* Round the memory region to page boundaries as per free_initrd_mem()
* This allows us to detect whether the pages overlapping the initrd
* are in use, but more importantly, reserves the entire set of pages
* as we don't want these pages allocated for other purposes.
*/
start = round_down(phys_initrd_start, PAGE_SIZE);
size = phys_initrd_size + (phys_initrd_start - start);
size = round_up(size, PAGE_SIZE);
if (!memblock_is_region_memory(start, size)) {
pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
(u64)start, size);
goto disable;
}
size = initrd_end - initrd_start;
memblock_reserve(__pa_symbol(initrd_start), size);
if (memblock_is_region_reserved(start, size)) {
pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
(u64)start, size);
goto disable;
}
memblock_reserve(start, size);
/* Now convert initrd to virtual addresses */
initrd_start = (unsigned long)__va(phys_initrd_start);
initrd_end = initrd_start + phys_initrd_size;
initrd_below_start_ok = 1;
pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
@ -126,33 +147,36 @@ void __init setup_bootmem(void)
{
struct memblock_region *reg;
phys_addr_t mem_size = 0;
phys_addr_t total_mem = 0;
phys_addr_t mem_start, end = 0;
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
/* Find the memory region containing the kernel */
for_each_memblock(memory, reg) {
phys_addr_t end = reg->base + reg->size;
if (reg->base <= vmlinux_start && vmlinux_end <= end) {
mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
/*
* Remove memblock from the end of usable area to the
* end of region
*/
if (reg->base + mem_size < end)
memblock_remove(reg->base + mem_size,
end - reg->base - mem_size);
}
end = reg->base + reg->size;
if (!total_mem)
mem_start = reg->base;
if (reg->base <= vmlinux_start && vmlinux_end <= end)
BUG_ON(reg->size == 0);
total_mem = total_mem + reg->size;
}
BUG_ON(mem_size == 0);
/*
* Remove memblock from the end of usable area to the
* end of region
*/
mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
if (mem_start + mem_size < end)
memblock_remove(mem_start + mem_size,
end - mem_start - mem_size);
/* Reserve from the start of the kernel to the end of the kernel */
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
set_max_mapnr(PFN_DOWN(mem_size));
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = max_pfn;
set_max_mapnr(max_low_pfn);
#ifdef CONFIG_BLK_DEV_INITRD
setup_initrd();

View File

@ -44,7 +44,7 @@ asmlinkage void __init kasan_early_init(void)
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
__pgprot(_PAGE_TABLE)));
flush_tlb_all();
local_flush_tlb_all();
}
static void __init populate(void *start, void *end)
@ -79,7 +79,7 @@ static void __init populate(void *start, void *end)
pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
__pgprot(_PAGE_TABLE)));
flush_tlb_all();
local_flush_tlb_all();
memset(start, 0, end - start);
}

View File

@ -292,7 +292,7 @@ CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_SPECIAL, 0x00f5);
CPUMF_EVENT_ATTR(cf_z15, DFLT_ACCESS, 0x00f7);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CYCLES, 0x00fc);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CCERROR, 0x00109);
CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109);
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
@ -629,7 +629,7 @@ static struct attribute *cpumcf_z15_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_z15, DFLT_ACCESS),
CPUMF_EVENT_PTR(cf_z15, DFLT_CYCLES),
CPUMF_EVENT_PTR(cf_z15, DFLT_CC),
CPUMF_EVENT_PTR(cf_z15, DFLT_CCERROR),
CPUMF_EVENT_PTR(cf_z15, DFLT_CCFINISH),
CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
NULL,

View File

@ -12,6 +12,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
#define __pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, (pmdp))
#endif
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
@ -33,13 +34,4 @@ do { \
tlb_remove_page((tlb), (pte)); \
} while (0)
#if CONFIG_PGTABLE_LEVELS > 2
#define __pmd_free_tlb(tlb, pmdp, addr) \
do { \
struct page *page = virt_to_page(pmdp); \
pgtable_pmd_page_dtor(page); \
tlb_remove_page((tlb), page); \
} while (0);
#endif
#endif /* __ASM_SH_PGALLOC_H */

View File

@ -199,7 +199,7 @@ syscall_trace_entry:
mov.l @(OFF_R7,r15), r7 ! arg3
mov.l @(OFF_R3,r15), r3 ! syscall_nr
!
mov.l 2f, r10 ! Number of syscalls
mov.l 6f, r10 ! Number of syscalls
cmp/hs r10, r3
bf syscall_call
mov #-ENOSYS, r0
@ -353,7 +353,7 @@ ENTRY(system_call)
tst r9, r8
bf syscall_trace_entry
!
mov.l 2f, r8 ! Number of syscalls
mov.l 6f, r8 ! Number of syscalls
cmp/hs r8, r3
bt syscall_badsys
!
@ -392,7 +392,7 @@ syscall_exit:
#if !defined(CONFIG_CPU_SH2)
1: .long TRA
#endif
2: .long NR_syscalls
6: .long NR_syscalls
3: .long sys_call_table
7: .long do_syscall_trace_enter
8: .long do_syscall_trace_leave

View File

@ -39,6 +39,7 @@
#define BT_MBI_UNIT_PMC 0x04
#define BT_MBI_UNIT_GFX 0x06
#define BT_MBI_UNIT_SMI 0x0C
#define BT_MBI_UNIT_CCK 0x14
#define BT_MBI_UNIT_USB 0x43
#define BT_MBI_UNIT_SATA 0xA3
#define BT_MBI_UNIT_PCIE 0xA6

View File

@ -71,6 +71,22 @@ static void printk_stack_address(unsigned long address, int reliable,
printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
}
static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
unsigned int nbytes)
{
if (!user_mode(regs))
return copy_from_kernel_nofault(buf, (u8 *)src, nbytes);
/*
* Make sure userspace isn't trying to trick us into dumping kernel
* memory by pointing the userspace instruction pointer at it.
*/
if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX))
return -EINVAL;
return copy_from_user_nmi(buf, (void __user *)src, nbytes);
}
/*
* There are a couple of reasons for the 2/3rd prologue, courtesy of Linus:
*
@ -97,17 +113,8 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl)
#define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
u8 opcodes[OPCODE_BUFSIZE];
unsigned long prologue = regs->ip - PROLOGUE_SIZE;
bool bad_ip;
/*
* Make sure userspace isn't trying to trick us into dumping kernel
* memory by pointing the userspace instruction pointer at it.
*/
bad_ip = user_mode(regs) &&
__chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX);
if (bad_ip || copy_from_kernel_nofault(opcodes, (u8 *)prologue,
OPCODE_BUFSIZE)) {
if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
printk("%sCode: Bad RIP value.\n", loglvl);
} else {
printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"

View File

@ -1074,7 +1074,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of
copy_part(offsetof(struct fxregs_state, st_space), 128,
&xsave->i387.st_space, &kbuf, &offset_start, &count);
if (header.xfeatures & XFEATURE_MASK_SSE)
copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256,
copy_part(xstate_offsets[XFEATURE_SSE], 256,
&xsave->i387.xmm_space, &kbuf, &offset_start, &count);
/*
* Fill xsave->i387.sw_reserved value for ptrace frame:

View File

@ -58,7 +58,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
* or a page fault), which can make frame pointers
* unreliable.
*/
if (IS_ENABLED(CONFIG_FRAME_POINTER))
return -EINVAL;
}
@ -81,10 +80,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
if (unwind_error(&state))
return -EINVAL;
/* Success path for non-user tasks, i.e. kthreads and idle tasks */
if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
return -EINVAL;
return 0;
}

View File

@ -440,8 +440,11 @@ bool unwind_next_frame(struct unwind_state *state)
/*
* Find the orc_entry associated with the text address.
*
* Decrement call return addresses by one so they work for sibling
* calls and calls to noreturn functions.
* For a call frame (as opposed to a signal frame), state->ip points to
* the instruction after the call. That instruction's stack layout
* could be different from the call instruction's layout, for example
* if the call was to a noreturn function. So get the ORC data for the
* call instruction itself.
*/
orc = orc_find(state->signal ? state->ip : state->ip - 1);
if (!orc) {
@ -662,6 +665,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
state->sp = task->thread.sp;
state->bp = READ_ONCE_NOCHECK(frame->bp);
state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
state->signal = (void *)state->ip == ret_from_fork;
}
if (get_stack_info((unsigned long *)state->sp, state->task,

View File

@ -358,6 +358,7 @@ SECTIONS
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
__bss_start = .;
*(.bss..page_aligned)
. = ALIGN(PAGE_SIZE);
*(BSS_MAIN)
BSS_DECRYPTED
. = ALIGN(PAGE_SIZE);

View File

@ -57,7 +57,7 @@ static inline
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
{
if (access_ok(dst, len))
if (access_ok(src, len))
return csum_partial_copy_generic((__force const void *)src, dst,
len, sum, err_ptr, NULL);
if (len)

View File

@ -947,7 +947,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
mmap_read_unlock(mm);
mmput(mm);
mmput_async(mm);
trace_binder_unmap_kernel_start(alloc, index);

View File

@ -721,7 +721,7 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
return next;
/* When no more children in primary, continue with secondary */
if (!IS_ERR_OR_NULL(fwnode->secondary))
if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
next = fwnode_get_next_child_node(fwnode->secondary, child);
return next;

View File

@ -2865,6 +2865,24 @@ static int sysc_check_disabled_devices(struct sysc *ddata)
return error;
}
/*
* Ignore timers tagged with no-reset and no-idle. These are likely in use,
* for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks
* are needed, we could also look at the timer register configuration.
*/
static int sysc_check_active_timer(struct sysc *ddata)
{
if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
ddata->cap->type != TI_SYSC_OMAP4_TIMER)
return 0;
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
(ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
return -EBUSY;
return 0;
}
static const struct of_device_id sysc_match_table[] = {
{ .compatible = "simple-bus", },
{ /* sentinel */ },
@ -2921,6 +2939,10 @@ static int sysc_probe(struct platform_device *pdev)
if (error)
return error;
error = sysc_check_active_timer(ddata);
if (error)
return error;
error = sysc_get_clocks(ddata);
if (error)
return error;

View File

@ -814,7 +814,8 @@ static struct inode *devmem_inode;
#ifdef CONFIG_IO_STRICT_DEVMEM
void revoke_devmem(struct resource *res)
{
struct inode *inode = READ_ONCE(devmem_inode);
/* pairs with smp_store_release() in devmem_init_inode() */
struct inode *inode = smp_load_acquire(&devmem_inode);
/*
* Check that the initialization has completed. Losing the race
@ -1028,8 +1029,11 @@ static int devmem_init_inode(void)
return rc;
}
/* publish /dev/mem initialized */
WRITE_ONCE(devmem_inode, inode);
/*
* Publish /dev/mem initialized.
* Pairs with smp_load_acquire() in revoke_devmem().
*/
smp_store_release(&devmem_inode, inode);
return 0;
}

View File

@ -1277,6 +1277,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
fast_mix(fast_pool);
add_interrupt_bench(cycles);
this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&

View File

@ -19,7 +19,7 @@
/* For type1, set SYSC_OMAP2_CLOCKACTIVITY for fck off on idle, l4 clock on */
#define DMTIMER_TYPE1_ENABLE ((1 << 9) | (SYSC_IDLE_SMART << 3) | \
SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_AUTOIDLE)
#define DMTIMER_TYPE1_DISABLE (SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE)
#define DMTIMER_TYPE2_ENABLE (SYSC_IDLE_SMART_WKUP << 2)
#define DMTIMER_RESET_WAIT 100000
@ -44,6 +44,8 @@ struct dmtimer_systimer {
u8 ctrl;
u8 wakeup;
u8 ifctrl;
struct clk *fck;
struct clk *ick;
unsigned long rate;
};
@ -298,16 +300,20 @@ static void __init dmtimer_systimer_select_best(void)
}
/* Interface clocks are only available on some SoCs variants */
static int __init dmtimer_systimer_init_clock(struct device_node *np,
static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t,
struct device_node *np,
const char *name,
unsigned long *rate)
{
struct clk *clock;
unsigned long r;
bool is_ick = false;
int error;
is_ick = !strncmp(name, "ick", 3);
clock = of_clk_get_by_name(np, name);
if ((PTR_ERR(clock) == -EINVAL) && !strncmp(name, "ick", 3))
if ((PTR_ERR(clock) == -EINVAL) && is_ick)
return 0;
else if (IS_ERR(clock))
return PTR_ERR(clock);
@ -320,6 +326,11 @@ static int __init dmtimer_systimer_init_clock(struct device_node *np,
if (!r)
return -ENODEV;
if (is_ick)
t->ick = clock;
else
t->fck = clock;
*rate = r;
return 0;
@ -339,7 +350,10 @@ static void dmtimer_systimer_enable(struct dmtimer_systimer *t)
static void dmtimer_systimer_disable(struct dmtimer_systimer *t)
{
writel_relaxed(0, t->base + t->sysc);
if (!dmtimer_systimer_revision1(t))
return;
writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc);
}
static int __init dmtimer_systimer_setup(struct device_node *np,
@ -366,13 +380,13 @@ static int __init dmtimer_systimer_setup(struct device_node *np,
pr_err("%s: clock source init failed: %i\n", __func__, error);
/* For ti-sysc, we have timer clocks at the parent module level */
error = dmtimer_systimer_init_clock(np->parent, "fck", &rate);
error = dmtimer_systimer_init_clock(t, np->parent, "fck", &rate);
if (error)
goto err_unmap;
t->rate = rate;
error = dmtimer_systimer_init_clock(np->parent, "ick", &rate);
error = dmtimer_systimer_init_clock(t, np->parent, "ick", &rate);
if (error)
goto err_unmap;
@ -496,12 +510,18 @@ static void omap_clockevent_idle(struct clock_event_device *evt)
struct dmtimer_systimer *t = &clkevt->t;
dmtimer_systimer_disable(t);
clk_disable(t->fck);
}
static void omap_clockevent_unidle(struct clock_event_device *evt)
{
struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
struct dmtimer_systimer *t = &clkevt->t;
int error;
error = clk_enable(t->fck);
if (error)
pr_err("could not enable timer fck on resume: %i\n", error);
dmtimer_systimer_enable(t);
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
@ -570,8 +590,8 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
3, /* Timer internal resynch latency */
0xffffffff);
if (of_device_is_compatible(np, "ti,am33xx") ||
of_device_is_compatible(np, "ti,am43")) {
if (of_machine_is_compatible("ti,am33xx") ||
of_machine_is_compatible("ti,am43")) {
dev->suspend = omap_clockevent_idle;
dev->resume = omap_clockevent_unidle;
}
@ -616,12 +636,18 @@ static void dmtimer_clocksource_suspend(struct clocksource *cs)
clksrc->loadval = readl_relaxed(t->base + t->counter);
dmtimer_systimer_disable(t);
clk_disable(t->fck);
}
static void dmtimer_clocksource_resume(struct clocksource *cs)
{
struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
struct dmtimer_systimer *t = &clksrc->t;
int error;
error = clk_enable(t->fck);
if (error)
pr_err("could not enable timer fck on resume: %i\n", error);
dmtimer_systimer_enable(t);
writel_relaxed(clksrc->loadval, t->base + t->counter);
@ -653,8 +679,8 @@ static int __init dmtimer_clocksource_init(struct device_node *np)
dev->mask = CLOCKSOURCE_MASK(32);
dev->flags = CLOCK_SOURCE_IS_CONTINUOUS;
if (of_device_is_compatible(np, "ti,am33xx") ||
of_device_is_compatible(np, "ti,am43")) {
/* Unlike for clockevent, legacy code sets suspend only for am4 */
if (of_machine_is_compatible("ti,am43")) {
dev->suspend = dmtimer_clocksource_suspend;
dev->resume = dmtimer_clocksource_resume;
}

View File

@ -102,7 +102,7 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
case PF_INET:
if (likely(!inet_sk(sk)->inet_rcv_saddr))
return ndev;
ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
ndev = __ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr, false);
break;
#if IS_ENABLED(CONFIG_IPV6)
case PF_INET6:

View File

@ -1052,14 +1052,15 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
&record_type);
if (err)
goto out_err;
/* Avoid appending tls handshake, alert to tls data */
if (skb)
tx_skb_finalize(skb);
}
recordsz = size;
csk->tlshws.txleft = recordsz;
csk->tlshws.type = record_type;
if (skb)
ULP_SKB_CB(skb)->ulp.tls.type = record_type;
}
if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||

View File

@ -356,10 +356,7 @@ static struct pstore_info efi_pstore_info = {
static __init int efivars_pstore_init(void)
{
if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
return 0;
if (!efivars_kobject())
if (!efivars_kobject() || !efivar_supports_writes())
return 0;
if (efivars_pstore_disable)

View File

@ -176,11 +176,13 @@ static struct efivar_operations generic_ops;
static int generic_ops_register(void)
{
generic_ops.get_variable = efi.get_variable;
generic_ops.set_variable = efi.set_variable;
generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
generic_ops.get_next_variable = efi.get_next_variable;
generic_ops.query_variable_store = efi_query_variable_store;
if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
generic_ops.set_variable = efi.set_variable;
generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
}
return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
}
@ -382,7 +384,8 @@ static int __init efisubsys_init(void)
return -ENOMEM;
}
if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) {
if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
efivar_ssdt_load();
error = generic_ops_register();
if (error)
@ -416,7 +419,8 @@ static int __init efisubsys_init(void)
err_remove_group:
sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
err_unregister:
if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
generic_ops_unregister();
err_put:
kobject_put(efi_kobj);

View File

@ -680,11 +680,8 @@ int efivars_sysfs_init(void)
struct kobject *parent_kobj = efivars_kobject();
int error = 0;
if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
return -ENODEV;
/* No efivars has been registered yet */
if (!parent_kobj)
if (!parent_kobj || !efivar_supports_writes())
return 0;
printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,

View File

@ -6,8 +6,7 @@
# enabled, even if doing so doesn't break the build.
#
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small \
$(call cc-option,-maccumulate-outgoing-args)
cflags-$(CONFIG_X86_64) := -mcmodel=small
cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar \

View File

@ -44,7 +44,7 @@ efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
*addr = ALIGN((unsigned long)alloc_addr, align);
if (slack > 0) {
int l = (alloc_addr % align) / EFI_PAGE_SIZE;
int l = (alloc_addr & (align - 1)) / EFI_PAGE_SIZE;
if (l) {
efi_bs_call(free_pages, alloc_addr, slack - l + 1);

View File

@ -121,23 +121,6 @@ static unsigned long get_dram_base(void)
return membase;
}
/*
* This function handles the architcture specific differences between arm and
* arm64 regarding where the kernel image must be loaded and any memory that
* must be reserved. On failure it is required to free all
* all allocations it has made.
*/
efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
unsigned long dram_base,
efi_loaded_image_t *image);
asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
unsigned long fdt_addr,
unsigned long fdt_size);
/*
* EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint
* that is described in the PE/COFF header. Most of the code is the same

View File

@ -776,6 +776,22 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
unsigned long *load_size,
unsigned long soft_limit,
unsigned long hard_limit);
/*
* This function handles the architcture specific differences between arm and
* arm64 regarding where the kernel image must be loaded and any memory that
* must be reserved. On failure it is required to free all
* all allocations it has made.
*/
efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
unsigned long dram_base,
efi_loaded_image_t *image);
asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
unsigned long fdt_addr,
unsigned long fdt_size);
void efi_handle_post_ebs_state(void);

View File

@ -8,6 +8,7 @@
#include <linux/efi.h>
#include <linux/pci.h>
#include <linux/stddef.h>
#include <asm/efi.h>
#include <asm/e820/types.h>
@ -361,8 +362,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
int options_size = 0;
efi_status_t status;
char *cmdline_ptr;
unsigned long ramdisk_addr;
unsigned long ramdisk_size;
efi_system_table = sys_table_arg;
@ -390,8 +389,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr = &boot_params->hdr;
/* Copy the second sector to boot_params */
memcpy(&hdr->jump, image_base + 512, 512);
/* Copy the setup header from the second sector to boot_params */
memcpy(&hdr->jump, image_base + 512,
sizeof(struct setup_header) - offsetof(struct setup_header, jump));
/*
* Fill out some of the header fields ourselves because the

View File

@ -1229,3 +1229,9 @@ out:
return rv;
}
EXPORT_SYMBOL_GPL(efivars_unregister);
int efivar_supports_writes(void)
{
return __efivars && __efivars->ops->set_variable;
}
EXPORT_SYMBOL_GPL(efivar_supports_writes);

View File

@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
/* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
fw_cfg_sel_ko, "%d", entry->select);
if (err)
goto err_register;
if (err) {
kobject_put(&entry->kobj);
return err;
}
/* add raw binary content access */
err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
err_add_raw:
kobject_del(&entry->kobj);
err_register:
kfree(entry);
return err;
}

View File

@ -83,7 +83,8 @@ int __afu_port_disable(struct platform_device *pdev)
* on this port and minimum soft reset pulse width has elapsed.
* Driver polls port_soft_reset_ack to determine if reset done by HW.
*/
if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST,
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
v & PORT_CTRL_SFTRST_ACK,
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
dev_err(&pdev->dev, "timeout, fail to reset device\n");
return -ETIMEDOUT;

View File

@ -227,7 +227,6 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
{
struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
struct dfl_fpga_cdev *cdev = drvdata->cdev;
int ret = 0;
if (!num_vfs) {
/*
@ -239,6 +238,8 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
dfl_fpga_cdev_config_ports_pf(cdev);
} else {
int ret;
/*
* before enable SRIOV, put released ports into VF access mode
* first of all.

View File

@ -692,9 +692,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return n ? -EFAULT : 0;
}
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device dev_info = {};
struct drm_amdgpu_info_device dev_info;
uint64_t vm_size;
memset(&dev_info, 0, sizeof(dev_info));
dev_info.device_id = dev->pdev->device;
dev_info.chip_rev = adev->rev_id;
dev_info.external_rev = adev->external_rev_id;

View File

@ -8717,20 +8717,38 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
* the same resource. If we have a new DC context as part of
* the DM atomic state from validation we need to free it and
* retain the existing one instead.
*
* Furthermore, since the DM atomic state only contains the DC
* context and can safely be annulled, we can free the state
* and clear the associated private object now to free
* some memory and avoid a possible use-after-free later.
*/
struct dm_atomic_state *new_dm_state, *old_dm_state;
new_dm_state = dm_atomic_get_new_state(state);
old_dm_state = dm_atomic_get_old_state(state);
for (i = 0; i < state->num_private_objs; i++) {
struct drm_private_obj *obj = state->private_objs[i].ptr;
if (new_dm_state && old_dm_state) {
if (new_dm_state->context)
dc_release_state(new_dm_state->context);
if (obj->funcs == adev->dm.atomic_obj.funcs) {
int j = state->num_private_objs-1;
new_dm_state->context = old_dm_state->context;
dm_atomic_destroy_state(obj,
state->private_objs[i].state);
if (old_dm_state->context)
dc_retain_state(old_dm_state->context);
/* If i is not at the end of the array then the
* last element needs to be moved to where i was
* before the array can safely be truncated.
*/
if (i != j)
state->private_objs[i] =
state->private_objs[j];
state->private_objs[j].ptr = NULL;
state->private_objs[j].state = NULL;
state->private_objs[j].old_state = NULL;
state->private_objs[j].new_state = NULL;
state->num_private_objs = j;
break;
}
}
}

View File

@ -644,9 +644,6 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
/* sclk is bigger than max sclk in the dependence table */
*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
(dep_table->entries[i - 1].vddc -
(uint16_t)VDDC_VDDCI_DELTA));
if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
*voltage |= (data->vbios_boot_state.vddci_bootup_value *
@ -654,8 +651,13 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
else if (dep_table->entries[i - 1].vddci)
*voltage |= (dep_table->entries[i - 1].vddci *
VOLTAGE_SCALE) << VDDC_SHIFT;
else
else {
vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
(dep_table->entries[i - 1].vddc -
(uint16_t)VDDC_VDDCI_DELTA));
*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
}
if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;

View File

@ -146,6 +146,7 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0;
bochs->dev->mode_config.prefer_shadow_fbdev = 1;
bochs->dev->mode_config.fbdev_use_iomem = true;
bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
bochs->dev->mode_config.funcs = &bochs_mode_funcs;

View File

@ -1224,6 +1224,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511->bridge.funcs = &adv7511_bridge_funcs;
adv7511->bridge.of_node = dev->of_node;
adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
drm_bridge_add(&adv7511->bridge);

View File

@ -917,11 +917,6 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
struct drm_panel *panel;
int ret;
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
DRM_ERROR("Fix bridge driver to make connector optional!");
return -EINVAL;
}
ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel,
&panel_bridge);
if (ret)

View File

@ -399,7 +399,11 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
unsigned int y;
for (y = clip->y1; y < clip->y2; y++) {
memcpy(dst, src, len);
if (!fb_helper->dev->mode_config.fbdev_use_iomem)
memcpy(dst, src, len);
else
memcpy_toio((void __iomem *)dst, src, len);
src += fb->pitches[0];
dst += fb->pitches[0];
}

View File

@ -871,9 +871,6 @@ err:
* @file_priv: drm file-private structure
*
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
* will not go away until the handle is deleted.
*/
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
@ -898,14 +895,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
drm_gem_object_put_unlocked(obj);
if (ret)
return ret;
goto err;
args->handle = handle;
args->size = obj->size;
return 0;
err:
drm_gem_object_put_unlocked(obj);
return ret;
}
/**

View File

@ -923,7 +923,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc,
}
}
tr.len = chunk;
tr.len = chunk * 2;
len -= chunk;
ret = spi_sync(spi, &m);

View File

@ -322,10 +322,8 @@ static int drm_of_lvds_get_remote_pixels_type(
* configurations by passing the endpoints explicitly to
* drm_of_lvds_get_dual_link_pixel_order().
*/
if (!current_pt || pixels_type != current_pt) {
of_node_put(remote_port);
if (!current_pt || pixels_type != current_pt)
return -EINVAL;
}
}
return pixels_type;

View File

@ -271,6 +271,8 @@ void lima_pp_fini(struct lima_ip *ip)
int lima_pp_bcast_resume(struct lima_ip *ip)
{
/* PP has been reset by individual PP resume */
ip->data.async_reset = false;
return 0;
}

View File

@ -1060,9 +1060,14 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
*/
if (fb) {
mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0));
if (!mcde->video_mode)
/* Send a single frame using software sync */
mcde_display_send_one_frame(mcde);
if (!mcde->video_mode) {
/*
* Send a single frame using software sync if the flow
* is not active yet.
*/
if (mcde->flow_active == 0)
mcde_display_send_one_frame(mcde);
}
dev_info_once(mcde->dev, "sent first display update\n");
} else {
/*

View File

@ -2073,7 +2073,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
*/
if (core->assign_windows) {
core->func->wndw.owner(core);
core->func->update(core, interlock, false);
nv50_disp_atomic_commit_core(state, interlock);
core->assign_windows = false;
interlock[NV50_DISP_INTERLOCK_CORE] = 0;
}
@ -2506,7 +2506,7 @@ nv50_display_create(struct drm_device *dev)
if (disp->disp->object.oclass >= TU102_DISP)
nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
else
if (disp->disp->object.oclass >= GF110_DISP)
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
else
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;

View File

@ -191,6 +191,7 @@ nouveau_decode_mod(struct nouveau_drm *drm,
uint32_t *tile_mode,
uint8_t *kind)
{
struct nouveau_display *disp = nouveau_display(drm->dev);
BUG_ON(!tile_mode || !kind);
if (modifier == DRM_FORMAT_MOD_LINEAR) {
@ -202,6 +203,12 @@ nouveau_decode_mod(struct nouveau_drm *drm,
* Extract the block height and kind from the corresponding
* modifier fields. See drm_fourcc.h for details.
*/
if ((modifier & (0xffull << 12)) == 0ull) {
/* Legacy modifier. Translate to this dev's 'kind.' */
modifier |= disp->format_modifiers[0] & (0xffull << 12);
}
*tile_mode = (uint32_t)(modifier & 0xF);
*kind = (uint8_t)((modifier >> 12) & 0xFF);
@ -227,6 +234,16 @@ nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
}
}
static const u64 legacy_modifiers[] = {
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
DRM_FORMAT_MOD_INVALID
};
static int
nouveau_validate_decode_mod(struct nouveau_drm *drm,
uint64_t modifier,
@ -247,8 +264,14 @@ nouveau_validate_decode_mod(struct nouveau_drm *drm,
(disp->format_modifiers[mod] != modifier);
mod++);
if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
return -EINVAL;
if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) {
for (mod = 0;
(legacy_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
(legacy_modifiers[mod] != modifier);
mod++);
if (legacy_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
return -EINVAL;
}
nouveau_decode_mod(drm, modifier, tile_mode, kind);

View File

@ -315,7 +315,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_mode_fb_cmd2 mode_cmd = {};
int ret;
mode_cmd.width = sizes->surface_width;
@ -590,6 +590,7 @@ fini:
drm_fb_helper_fini(&fbcon->helper);
free:
kfree(fbcon);
drm->fbcon = NULL;
return ret;
}

View File

@ -117,15 +117,6 @@ nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type,
{
struct nvkm_ior *ior;
/* First preference is to reuse the OR that is currently armed
* on HW, if any, in order to prevent unnecessary switching.
*/
list_for_each_entry(ior, &outp->disp->ior, head) {
if (!ior->identity && !!ior->func->hda.hpd == hda &&
!ior->asy.outp && ior->arm.outp == outp)
return nvkm_outp_acquire_ior(outp, user, ior);
}
/* Failing that, a completely unused OR is the next best thing. */
list_for_each_entry(ior, &outp->disp->ior, head) {
if (!ior->identity && !!ior->func->hda.hpd == hda &&
@ -173,6 +164,27 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda)
return nvkm_outp_acquire_ior(outp, user, ior);
}
/* First preference is to reuse the OR that is currently armed
* on HW, if any, in order to prevent unnecessary switching.
*/
list_for_each_entry(ior, &outp->disp->ior, head) {
if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp) {
/*XXX: For various complicated reasons, we can't outright switch
* the boot-time OR on the first modeset without some fairly
* invasive changes.
*
* The systems that were fixed by modifying the OR selection
* code to account for HDA support shouldn't regress here as
* the HDA-enabled ORs match the relevant output's pad macro
* index, and the firmware seems to select an OR this way.
*
* This warning is to make it obvious if that proves wrong.
*/
WARN_ON(hda && !ior->func->hda.hpd);
return nvkm_outp_acquire_ior(outp, user, ior);
}
}
/* If we don't need HDA, first try to acquire an OR that doesn't
* support it to leave free the ones that do.
*/

View File

@ -615,9 +615,9 @@ static const struct panel_desc boe_tv101wum_nl6_desc = {
static const struct drm_display_mode auo_kd101n80_45na_default_mode = {
.clock = 157000,
.hdisplay = 1200,
.hsync_start = 1200 + 80,
.hsync_end = 1200 + 80 + 24,
.htotal = 1200 + 80 + 24 + 36,
.hsync_start = 1200 + 60,
.hsync_end = 1200 + 60 + 24,
.htotal = 1200 + 60 + 24 + 56,
.vdisplay = 1920,
.vsync_start = 1920 + 16,
.vsync_end = 1920 + 16 + 4,

View File

@ -1250,7 +1250,21 @@ static const struct panel_desc boe_nv133fhm_n61 = {
.height = 165,
},
.delay = {
.hpd_absent_delay = 200,
/*
* When power is first given to the panel there's a short
* spike on the HPD line. It was explained that this spike
* was until the TCON data download was complete. On
* one system this was measured at 8 ms. We'll put 15 ms
* in the prepare delay just to be safe and take it away
* from the hpd_absent_delay (which would otherwise be 200 ms)
* to handle this. That means:
* - If HPD isn't hooked up you still have 200 ms delay.
* - If HPD is hooked up we won't try to look at it for the
* first 15 ms.
*/
.prepare = 15,
.hpd_absent_delay = 185,
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,

View File

@ -260,7 +260,7 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
unsigned long reg;
reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
if (reg & SUN4I_HDMI_HPD_HIGH) {
if (!(reg & SUN4I_HDMI_HPD_HIGH)) {
cec_phys_addr_invalidate(hdmi->cec_adap);
return connector_status_disconnected;
}

View File

@ -421,20 +421,21 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
/* Read data if receive data valid is set */
while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
CDNS_I2C_SR_RXDV) {
/*
* Clear hold bit that was set for FIFO control if
* RX data left is less than FIFO depth, unless
* repeated start is selected.
*/
if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) &&
!id->bus_hold_flag)
cdns_i2c_clear_bus_hold(id);
if (id->recv_count > 0) {
*(id->p_recv_buf)++ =
cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
id->recv_count--;
id->curr_recv_count--;
/*
* Clear hold bit that was set for FIFO control
* if RX data left is less than or equal to
* FIFO DEPTH unless repeated start is selected
*/
if (id->recv_count <= CDNS_I2C_FIFO_DEPTH &&
!id->bus_hold_flag)
cdns_i2c_clear_bus_hold(id);
} else {
dev_err(id->adap.dev.parent,
"xfer_size reg rollover. xfer aborted!\n");
@ -594,10 +595,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
ctrl_reg |= CDNS_I2C_CR_HOLD;
else
ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
@ -654,11 +653,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
if (id->send_count > CDNS_I2C_FIFO_DEPTH)
ctrl_reg |= CDNS_I2C_CR_HOLD;
else
ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
/* Clear the interrupts in interrupt status register. */

View File

@ -367,7 +367,6 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
geni_se_select_mode(se, GENI_SE_FIFO);
writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN);
geni_se_setup_m_cmd(se, I2C_READ, m_param);
if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) {
geni_se_select_mode(se, GENI_SE_FIFO);
@ -375,6 +374,8 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_buf = NULL;
}
geni_se_setup_m_cmd(se, I2C_READ, m_param);
time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
if (!time_left)
geni_i2c_abort_xfer(gi2c);
@ -408,7 +409,6 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
geni_se_select_mode(se, GENI_SE_FIFO);
writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN);
geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) {
geni_se_select_mode(se, GENI_SE_FIFO);
@ -416,6 +416,8 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_buf = NULL;
}
geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
if (!dma_buf) /* Get FIFO IRQ */
writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);

View File

@ -868,6 +868,7 @@ static int rcar_unreg_slave(struct i2c_client *slave)
/* disable irqs and ensure none is running before clearing ptr */
rcar_i2c_write(priv, ICSIER, 0);
rcar_i2c_write(priv, ICSCR, 0);
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
synchronize_irq(priv->irq);
priv->slave = NULL;
@ -969,6 +970,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
goto out_pm_put;
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
if (priv->devtype == I2C_RCAR_GEN3) {
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (!IS_ERR(priv->rstc)) {

View File

@ -3676,10 +3676,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
return ret;
}
cm_id_priv->id.state = IB_CM_IDLE;
spin_lock_irq(&cm.lock);
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
}
spin_unlock_irq(&cm.lock);
return 0;
}

View File

@ -649,9 +649,6 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
{
struct ib_uverbs_file *ufile = attrs->ufile;
/* alloc_commit consumes the uobj kref */
uobj->uapi_object->type_class->alloc_commit(uobj);
/* kref is held so long as the uobj is on the uobj list. */
uverbs_uobject_get(uobj);
spin_lock_irq(&ufile->uobjects_lock);
@ -661,6 +658,9 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
/* matches atomic_set(-1) in alloc_uobj */
atomic_set(&uobj->usecnt, 0);
/* alloc_commit consumes the uobj kref */
uobj->uapi_object->type_class->alloc_commit(uobj);
/* Matches the down_read in rdma_alloc_begin_uobject */
up_read(&ufile->hw_destroy_rwsem);
}

View File

@ -3954,6 +3954,15 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
return 0;
}
static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
const struct ib_qp_attr *attr)
{
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
return IB_MTU_4096;
return attr->path_mtu;
}
static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
struct hns_roce_v2_qp_context *context,
@ -3965,6 +3974,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t trrl_ba;
dma_addr_t irrl_ba;
enum ib_mtu mtu;
u8 port_num;
u64 *mtts;
u8 *dmac;
@ -4062,23 +4072,23 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, 0);
/* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
mtu = get_mtu(ibqp, attr);
if (attr_mask & IB_QP_PATH_MTU) {
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, mtu);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, 0);
}
#define MAX_LP_MSG_LEN 65536
/* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S,
ilog2(hr_dev->caps.max_sq_inline / IB_MTU_4096));
ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu)));
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
else if (attr_mask & IB_QP_PATH_MTU)
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, 0);
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,

View File

@ -120,7 +120,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
buf_attr.page_shift = is_fast ? PAGE_SHIFT :
hr_dev->caps.pbl_buf_pg_sz + HNS_HW_PAGE_SHIFT;
hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
buf_attr.region[0].size = length;
buf_attr.region[0].hopnum = mr->pbl_hop_num;
buf_attr.region_count = 1;

View File

@ -601,6 +601,23 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
*/
synchronize_srcu(&dev->odp_srcu);
/*
* All work on the prefetch list must be completed, xa_erase() prevented
* new work from being created.
*/
wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
/*
* At this point it is forbidden for any other thread to enter
* pagefault_mr() on this imr. It is already forbidden to call
* pagefault_mr() on an implicit child. Due to this additions to
* implicit_children are prevented.
*/
/*
* Block destroy_unused_implicit_child_mr() from incrementing
* num_deferred_work.
*/
xa_lock(&imr->implicit_children);
xa_for_each (&imr->implicit_children, idx, mtt) {
__xa_erase(&imr->implicit_children, idx);
@ -609,9 +626,8 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
xa_unlock(&imr->implicit_children);
/*
* num_deferred_work can only be incremented inside the odp_srcu, or
* under xa_lock while the child is in the xarray. Thus at this point
* it is only decreasing, and all work holding it is now on the wq.
* Wait for any concurrent destroy_unused_implicit_child_mr() to
* complete.
*/
wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));

View File

@ -83,11 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
struct mlx5_srq_table *table = &dev->srq_table;
struct mlx5_core_srq *srq;
xa_lock(&table->array);
xa_lock_irq(&table->array);
srq = xa_load(&table->array, srqn);
if (srq)
refcount_inc(&srq->common.refcount);
xa_unlock(&table->array);
xa_unlock_irq(&table->array);
return srq;
}

View File

@ -243,6 +243,7 @@ static int aggregate_requests(struct icc_node *node)
{
struct icc_provider *p = node->provider;
struct icc_req *r;
u32 avg_bw, peak_bw;
node->avg_bw = 0;
node->peak_bw = 0;
@ -251,9 +252,14 @@ static int aggregate_requests(struct icc_node *node)
p->pre_aggregate(node);
hlist_for_each_entry(r, &node->req_list, req_node) {
if (!r->enabled)
continue;
p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
if (r->enabled) {
avg_bw = r->avg_bw;
peak_bw = r->peak_bw;
} else {
avg_bw = 0;
peak_bw = 0;
}
p->aggregate(node, r->tag, avg_bw, peak_bw,
&node->avg_bw, &node->peak_bw);
}

View File

@ -197,13 +197,13 @@ DEFINE_QNODE(pcnoc_int_0, MSM8916_PNOC_INT_0, 8, -1, -1, MSM8916_PNOC_SNOC_MAS,
DEFINE_QNODE(pcnoc_int_1, MSM8916_PNOC_INT_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS);
DEFINE_QNODE(pcnoc_m_0, MSM8916_PNOC_MAS_0, 8, -1, -1, MSM8916_PNOC_INT_0);
DEFINE_QNODE(pcnoc_m_1, MSM8916_PNOC_MAS_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS);
DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 8, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS);
DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 8, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG);
DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 8, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB);
DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 8, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG);
DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 8, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG);
DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 8, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1);
DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 8, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG);
DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 4, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS);
DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 4, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG);
DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 4, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB);
DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 4, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG);
DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 4, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG);
DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 4, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1);
DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 4, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG);
DEFINE_QNODE(pcnoc_snoc_mas, MSM8916_PNOC_SNOC_MAS, 8, 29, -1, MSM8916_PNOC_SNOC_SLV);
DEFINE_QNODE(pcnoc_snoc_slv, MSM8916_PNOC_SNOC_SLV, 8, -1, 45, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC, MSM8916_SNOC_INT_1);
DEFINE_QNODE(qdss_int, MSM8916_SNOC_QDSS_INT, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC);

View File

@ -65,6 +65,7 @@ struct qcom_iommu_domain {
struct mutex init_mutex; /* Protects iommu pointer */
struct iommu_domain domain;
struct qcom_iommu_dev *iommu;
struct iommu_fwspec *fwspec;
};
static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
@ -84,9 +85,9 @@ static struct qcom_iommu_dev * to_iommu(struct device *dev)
return dev_iommu_priv_get(dev);
}
static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid)
static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
{
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
struct qcom_iommu_dev *qcom_iommu = d->iommu;
if (!qcom_iommu)
return NULL;
return qcom_iommu->ctxs[asid - 1];
@ -118,14 +119,12 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
static void qcom_iommu_tlb_sync(void *cookie)
{
struct iommu_fwspec *fwspec;
struct device *dev = cookie;
struct qcom_iommu_domain *qcom_domain = cookie;
struct iommu_fwspec *fwspec = qcom_domain->fwspec;
unsigned i;
fwspec = dev_iommu_fwspec_get(dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
unsigned int val, ret;
iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
@ -139,14 +138,12 @@ static void qcom_iommu_tlb_sync(void *cookie)
static void qcom_iommu_tlb_inv_context(void *cookie)
{
struct device *dev = cookie;
struct iommu_fwspec *fwspec;
struct qcom_iommu_domain *qcom_domain = cookie;
struct iommu_fwspec *fwspec = qcom_domain->fwspec;
unsigned i;
fwspec = dev_iommu_fwspec_get(dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
}
@ -156,16 +153,14 @@ static void qcom_iommu_tlb_inv_context(void *cookie)
static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
{
struct device *dev = cookie;
struct iommu_fwspec *fwspec;
struct qcom_iommu_domain *qcom_domain = cookie;
struct iommu_fwspec *fwspec = qcom_domain->fwspec;
unsigned i, reg;
reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
fwspec = dev_iommu_fwspec_get(dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
size_t s = size;
iova = (iova >> 12) << 12;
@ -256,7 +251,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
};
qcom_domain->iommu = qcom_iommu;
pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev);
qcom_domain->fwspec = fwspec;
pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain);
if (!pgtbl_ops) {
dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
ret = -ENOMEM;
@ -269,7 +266,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
domain->geometry.force_aperture = true;
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
if (!ctx->secure_init) {
ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
@ -419,7 +416,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
pm_runtime_get_sync(qcom_iommu->dev);
for (i = 0; i < fwspec->num_ids; i++) {
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
/* Disable the context bank: */
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);

View File

@ -2420,7 +2420,7 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */
if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
return;
spin_lock_irq(&ic->endio_wait.lock);
@ -2481,7 +2481,7 @@ static void integrity_recalc(struct work_struct *w)
next_chunk:
if (unlikely(dm_suspended(ic->ti)))
if (unlikely(dm_post_suspending(ic->ti)))
goto unlock_ret;
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);

View File

@ -143,6 +143,7 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
#define DMF_NOFLUSH_SUSPENDING 5
#define DMF_DEFERRED_REMOVE 6
#define DMF_SUSPENDED_INTERNALLY 7
#define DMF_POST_SUSPENDING 8
#define DM_NUMA_NODE NUMA_NO_NODE
static int dm_numa_node = DM_NUMA_NODE;
@ -2408,6 +2409,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
set_bit(DMF_SUSPENDED, &md->flags);
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
}
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
@ -2766,7 +2768,9 @@ retry:
if (r)
goto out_unlock;
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
clear_bit(DMF_POST_SUSPENDING, &md->flags);
out_unlock:
mutex_unlock(&md->suspend_lock);
@ -2863,7 +2867,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
DMF_SUSPENDED_INTERNALLY);
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
clear_bit(DMF_POST_SUSPENDING, &md->flags);
}
static void __dm_internal_resume(struct mapped_device *md)
@ -3024,6 +3030,11 @@ int dm_suspended_md(struct mapped_device *md)
return test_bit(DMF_SUSPENDED, &md->flags);
}
static int dm_post_suspending_md(struct mapped_device *md)
{
return test_bit(DMF_POST_SUSPENDING, &md->flags);
}
int dm_suspended_internally_md(struct mapped_device *md)
{
return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
@ -3040,6 +3051,12 @@ int dm_suspended(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_suspended);
int dm_post_suspending(struct dm_target *ti)
{
return dm_post_suspending_md(dm_table_get_md(ti->table));
}
EXPORT_SYMBOL_GPL(dm_post_suspending);
int dm_noflush_suspending(struct dm_target *ti)
{
return __noflush_suspending(dm_table_get_md(ti->table));

View File

@ -499,11 +499,19 @@ static int validate_queue_index(struct hl_device *hdev,
struct asic_fixed_properties *asic = &hdev->asic_prop;
struct hw_queue_properties *hw_queue_prop;
/* This must be checked here to prevent out-of-bounds access to
* hw_queues_props array
*/
if (chunk->queue_index >= HL_MAX_QUEUES) {
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
return -EINVAL;
}
hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
if ((chunk->queue_index >= HL_MAX_QUEUES) ||
(hw_queue_prop->type == QUEUE_TYPE_NA)) {
dev_err(hdev->dev, "Queue index %d is invalid\n",
if (hw_queue_prop->type == QUEUE_TYPE_NA) {
dev_err(hdev->dev, "Queue index %d is not applicable\n",
chunk->queue_index);
return -EINVAL;
}

View File

@ -36,7 +36,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.i2c_reg = i2c_reg;
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_DEVICE_TIMEOUT_USEC, (long *) val);
0, (long *) val);
if (rc)
dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
@ -63,7 +63,7 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.value = cpu_to_le64(val);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_DEVICE_TIMEOUT_USEC, NULL);
0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
@ -87,7 +87,7 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
pkt.value = cpu_to_le64(state);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_DEVICE_TIMEOUT_USEC, NULL);
0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
@ -981,7 +981,7 @@ static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
if (*ppos)
return 0;
sprintf(tmp_buf, "%d\n", hdev->clock_gating);
sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask);
rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
strlen(tmp_buf) + 1);
@ -993,7 +993,7 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u32 value;
u64 value;
ssize_t rc;
if (atomic_read(&hdev->in_reset)) {
@ -1002,19 +1002,12 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
return 0;
}
rc = kstrtouint_from_user(buf, count, 10, &value);
rc = kstrtoull_from_user(buf, count, 16, &value);
if (rc)
return rc;
if (value) {
hdev->clock_gating = 1;
if (hdev->asic_funcs->enable_clock_gating)
hdev->asic_funcs->enable_clock_gating(hdev);
} else {
if (hdev->asic_funcs->disable_clock_gating)
hdev->asic_funcs->disable_clock_gating(hdev);
hdev->clock_gating = 0;
}
hdev->clock_gating_mask = value;
hdev->asic_funcs->set_clock_gating(hdev);
return count;
}

View File

@ -608,7 +608,7 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
hdev->in_debug = 0;
if (!hdev->hard_reset_pending)
hdev->asic_funcs->enable_clock_gating(hdev);
hdev->asic_funcs->set_clock_gating(hdev);
goto out;
}

View File

@ -61,7 +61,7 @@ int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
sizeof(pkt), 0, NULL);
}
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
@ -144,7 +144,7 @@ int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
pkt.value = cpu_to_le64(event_type);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_DEVICE_TIMEOUT_USEC, &result);
0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
@ -183,7 +183,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
ARMCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
total_pkt_size, 0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask IRQ array\n");
@ -204,7 +204,7 @@ int hl_fw_test_cpu_queue(struct hl_device *hdev)
test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
sizeof(test_pkt), 0, &result);
if (!rc) {
if (result != ARMCP_PACKET_FENCE_VAL)
@ -248,7 +248,7 @@ int hl_fw_send_heartbeat(struct hl_device *hdev)
hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
sizeof(hb_pkt), 0, &result);
if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
rc = -EIO;

View File

@ -80,6 +80,7 @@
#define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
#define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
#define GAUDI_QMAN0_FENCE_VAL 0x72E91AB9
@ -98,6 +99,11 @@
#define GAUDI_ARB_WDT_TIMEOUT 0x1000000
#define GAUDI_CLK_GATE_DEBUGFS_MASK (\
BIT(GAUDI_ENGINE_ID_MME_0) |\
BIT(GAUDI_ENGINE_ID_MME_2) |\
GENMASK_ULL(GAUDI_ENGINE_ID_TPC_7, GAUDI_ENGINE_ID_TPC_0))
static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
"gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
"gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
@ -106,14 +112,14 @@ static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
};
static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = {
[GAUDI_PCI_DMA_1] = 0,
[GAUDI_PCI_DMA_2] = 1,
[GAUDI_PCI_DMA_3] = 5,
[GAUDI_HBM_DMA_1] = 2,
[GAUDI_HBM_DMA_2] = 3,
[GAUDI_HBM_DMA_3] = 4,
[GAUDI_HBM_DMA_4] = 6,
[GAUDI_HBM_DMA_5] = 7
[GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0,
[GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1,
[GAUDI_PCI_DMA_3] = GAUDI_ENGINE_ID_DMA_5,
[GAUDI_HBM_DMA_1] = GAUDI_ENGINE_ID_DMA_2,
[GAUDI_HBM_DMA_2] = GAUDI_ENGINE_ID_DMA_3,
[GAUDI_HBM_DMA_3] = GAUDI_ENGINE_ID_DMA_4,
[GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_6,
[GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_7
};
static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = {
@ -1819,7 +1825,7 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
gaudi_init_rate_limiter(hdev);
gaudi_disable_clock_gating(hdev);
hdev->asic_funcs->disable_clock_gating(hdev);
for (tpc_id = 0, tpc_offset = 0;
tpc_id < TPC_NUMBER_OF_ENGINES;
@ -2531,46 +2537,55 @@ static void gaudi_tpc_stall(struct hl_device *hdev)
WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
}
static void gaudi_enable_clock_gating(struct hl_device *hdev)
static void gaudi_set_clock_gating(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
u32 qman_offset;
int i;
if (!hdev->clock_gating)
return;
if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE)
return;
/* In case we are during debug session, don't enable the clock gate
* as it may interfere
*/
if (hdev->in_debug)
return;
for (i = 0, qman_offset = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) {
for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
if (!(hdev->clock_gating_mask &
(BIT_ULL(gaudi_dma_assignment[i]))))
continue;
qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
QMAN_UPPER_CP_CGM_PWR_GATE_EN);
}
for (; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) {
for (i = GAUDI_HBM_DMA_1 ; i < GAUDI_DMA_MAX ; i++) {
if (!(hdev->clock_gating_mask &
(BIT_ULL(gaudi_dma_assignment[i]))))
continue;
qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
QMAN_COMMON_CP_CGM_PWR_GATE_EN);
}
WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
WREG32(mmMME0_QM_CGM_CFG,
QMAN_COMMON_CP_CGM_PWR_GATE_EN);
WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
WREG32(mmMME2_QM_CGM_CFG,
QMAN_COMMON_CP_CGM_PWR_GATE_EN);
if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0))) {
WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
WREG32(mmMME0_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN);
}
if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2))) {
WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
WREG32(mmMME2_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN);
}
for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
if (!(hdev->clock_gating_mask &
(BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i))))
continue;
WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset,
QMAN_CGM1_PWR_GATE_EN);
WREG32(mmTPC0_QM_CGM_CFG + qman_offset,
@ -2663,7 +2678,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
gaudi_stop_hbm_dma_qmans(hdev);
gaudi_stop_pci_dma_qmans(hdev);
gaudi_disable_clock_gating(hdev);
hdev->asic_funcs->disable_clock_gating(hdev);
msleep(wait_timeout_ms);
@ -3003,7 +3018,7 @@ static int gaudi_hw_init(struct hl_device *hdev)
gaudi_init_tpc_qmans(hdev);
gaudi_enable_clock_gating(hdev);
hdev->asic_funcs->set_clock_gating(hdev);
gaudi_enable_timestamp(hdev);
@ -3112,7 +3127,9 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
HW_CAP_HBM_DMA | HW_CAP_PLL |
HW_CAP_MMU |
HW_CAP_SRAM_SCRAMBLER |
HW_CAP_HBM_SCRAMBLER);
HW_CAP_HBM_SCRAMBLER |
HW_CAP_CLK_GATE);
memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat));
}
@ -3463,6 +3480,9 @@ static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg,
return 0;
}
if (!timeout)
timeout = GAUDI_MSG_TO_CPU_TIMEOUT_USEC;
return hl_fw_send_cpu_message(hdev, GAUDI_QUEUE_ID_CPU_PQ, msg, len,
timeout, result);
}
@ -3865,6 +3885,12 @@ static int gaudi_validate_cb(struct hl_device *hdev,
rc = -EPERM;
break;
case PACKET_WREG_BULK:
dev_err(hdev->dev,
"User not allowed to use WREG_BULK\n");
rc = -EPERM;
break;
case PACKET_LOAD_AND_EXE:
rc = gaudi_validate_load_and_exe_pkt(hdev, parser,
(struct packet_load_and_exe *) user_pkt);
@ -3880,7 +3906,6 @@ static int gaudi_validate_cb(struct hl_device *hdev,
break;
case PACKET_WREG_32:
case PACKET_WREG_BULK:
case PACKET_MSG_LONG:
case PACKET_MSG_SHORT:
case PACKET_REPEAT:
@ -4521,13 +4546,18 @@ static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
int rc = 0;
if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
(hdev->clock_gating_mask &
GAUDI_CLK_GATE_DEBUGFS_MASK)) {
dev_err_ratelimited(hdev->dev,
"Can't read register - clock gating is enabled!\n");
rc = -EFAULT;
} else {
*val = RREG32(addr - CFG_BASE);
}
} else if ((addr >= SRAM_BASE_ADDR) &&
(addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
*val = readl(hdev->pcie_bar[SRAM_BAR_ID] +
@ -4563,13 +4593,18 @@ static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
int rc = 0;
if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
(hdev->clock_gating_mask &
GAUDI_CLK_GATE_DEBUGFS_MASK)) {
dev_err_ratelimited(hdev->dev,
"Can't write register - clock gating is enabled!\n");
rc = -EFAULT;
} else {
WREG32(addr - CFG_BASE, val);
}
} else if ((addr >= SRAM_BASE_ADDR) &&
(addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
writel(val, hdev->pcie_bar[SRAM_BAR_ID] +
@ -4605,7 +4640,11 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
int rc = 0;
if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
(hdev->clock_gating_mask &
GAUDI_CLK_GATE_DEBUGFS_MASK)) {
dev_err_ratelimited(hdev->dev,
"Can't read register - clock gating is enabled!\n");
rc = -EFAULT;
@ -4615,6 +4654,7 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
*val = (((u64) val_h) << 32) | val_l;
}
} else if ((addr >= SRAM_BASE_ADDR) &&
(addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
*val = readq(hdev->pcie_bar[SRAM_BAR_ID] +
@ -4651,7 +4691,11 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
int rc = 0;
if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
(hdev->clock_gating_mask &
GAUDI_CLK_GATE_DEBUGFS_MASK)) {
dev_err_ratelimited(hdev->dev,
"Can't write register - clock gating is enabled!\n");
rc = -EFAULT;
@ -4660,6 +4704,7 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
WREG32(addr + sizeof(u32) - CFG_BASE,
upper_32_bits(val));
}
} else if ((addr >= SRAM_BASE_ADDR) &&
(addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
writeq(val, hdev->pcie_bar[SRAM_BAR_ID] +
@ -4881,7 +4926,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid);
gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid);
hdev->asic_funcs->enable_clock_gating(hdev);
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
}
@ -5262,7 +5307,7 @@ static void gaudi_print_ecc_info_generic(struct hl_device *hdev,
}
if (disable_clock_gating) {
hdev->asic_funcs->enable_clock_gating(hdev);
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
}
}
@ -5749,7 +5794,7 @@ static bool gaudi_tpc_read_interrupts(struct hl_device *hdev, u8 tpc_id,
/* Clear interrupts */
WREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset, 0);
hdev->asic_funcs->enable_clock_gating(hdev);
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
@ -6265,7 +6310,7 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
if (s)
seq_puts(s, "\n");
hdev->asic_funcs->enable_clock_gating(hdev);
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
@ -6366,7 +6411,7 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
dev_err(hdev->dev,
"Timeout while waiting for TPC%d icache prefetch\n",
tpc_id);
hdev->asic_funcs->enable_clock_gating(hdev);
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
return -EIO;
}
@ -6395,7 +6440,7 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
1000,
kernel_timeout);
hdev->asic_funcs->enable_clock_gating(hdev);
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
if (rc) {
@ -6736,7 +6781,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.mmu_invalidate_cache = gaudi_mmu_invalidate_cache,
.mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range,
.send_heartbeat = gaudi_send_heartbeat,
.enable_clock_gating = gaudi_enable_clock_gating,
.set_clock_gating = gaudi_set_clock_gating,
.disable_clock_gating = gaudi_disable_clock_gating,
.debug_coresight = gaudi_debug_coresight,
.is_device_idle = gaudi_is_device_idle,

View File

@ -88,6 +88,7 @@
#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
#define GOYA_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
#define GOYA_QMAN0_FENCE_VAL 0xD169B243
@ -2830,6 +2831,9 @@ int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
return 0;
}
if (!timeout)
timeout = GOYA_MSG_TO_CPU_TIMEOUT_USEC;
return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
timeout, result);
}
@ -4431,8 +4435,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
rc = goya_send_cpu_message(hdev, (u32 *) pkt, total_pkt_size,
HL_DEVICE_TIMEOUT_USEC, &result);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
total_pkt_size, 0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask IRQ array\n");
@ -4464,8 +4468,8 @@ static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.value = cpu_to_le64(event_type);
rc = goya_send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_DEVICE_TIMEOUT_USEC, &result);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
@ -5028,14 +5032,14 @@ int goya_armcp_info_get(struct hl_device *hdev)
return 0;
}
static void goya_enable_clock_gating(struct hl_device *hdev)
static void goya_set_clock_gating(struct hl_device *hdev)
{
/* clock gating not supported in Goya */
}
static void goya_disable_clock_gating(struct hl_device *hdev)
{
/* clock gating not supported in Goya */
}
static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
@ -5259,7 +5263,7 @@ static const struct hl_asic_funcs goya_funcs = {
.mmu_invalidate_cache = goya_mmu_invalidate_cache,
.mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
.send_heartbeat = goya_send_heartbeat,
.enable_clock_gating = goya_enable_clock_gating,
.set_clock_gating = goya_set_clock_gating,
.disable_clock_gating = goya_disable_clock_gating,
.debug_coresight = goya_debug_coresight,
.is_device_idle = goya_is_device_idle,

View File

@ -578,8 +578,9 @@ enum hl_pll_frequency {
* @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
* ASID-VA-size mask.
* @send_heartbeat: send is-alive packet to ArmCP and verify response.
* @enable_clock_gating: enable clock gating for reducing power consumption.
* @disable_clock_gating: disable clock for accessing registers on HBW.
* @set_clock_gating: enable/disable clock gating per engine according to
* clock gating mask in hdev
* @disable_clock_gating: disable clock gating completely
* @debug_coresight: perform certain actions on Coresight for debugging.
* @is_device_idle: return true if device is idle, false otherwise.
* @soft_reset_late_init: perform certain actions needed after soft reset.
@ -587,7 +588,11 @@ enum hl_pll_frequency {
* @hw_queues_unlock: release H/W queues lock.
* @get_pci_id: retrieve PCI ID.
* @get_eeprom_data: retrieve EEPROM data from F/W.
* @send_cpu_message: send buffer to ArmCP.
* @send_cpu_message: send message to F/W. If the message is timedout, the
* driver will eventually reset the device. The timeout can
* be determined by the calling function or it can be 0 and
* then the timeout is the default timeout for the specific
* ASIC
* @get_hw_state: retrieve the H/W state
* @pci_bars_map: Map PCI BARs.
* @set_dram_bar_base: Set DRAM BAR to map specific device address. Returns
@ -680,7 +685,7 @@ struct hl_asic_funcs {
int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
void (*enable_clock_gating)(struct hl_device *hdev);
void (*set_clock_gating)(struct hl_device *hdev);
void (*disable_clock_gating)(struct hl_device *hdev);
int (*debug_coresight)(struct hl_device *hdev, void *data);
bool (*is_device_idle)(struct hl_device *hdev, u32 *mask,
@ -1398,6 +1403,9 @@ struct hl_device_idle_busy_ts {
* @max_power: the max power of the device, as configured by the sysadmin. This
* value is saved so in case of hard-reset, the driver will restore
* this value and update the F/W after the re-initialization
* @clock_gating_mask: is clock gating enabled. bitmask that represents the
* different engines. See debugfs-driver-habanalabs for
* details.
* @in_reset: is device in reset flow.
* @curr_pll_profile: current PLL profile.
* @cs_active_cnt: number of active command submissions on this device (active
@ -1425,7 +1433,6 @@ struct hl_device_idle_busy_ts {
* @init_done: is the initialization of the device done.
* @mmu_enable: is MMU enabled.
* @mmu_huge_page_opt: is MMU huge pages optimization enabled.
* @clock_gating: is clock gating enabled.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
* @dma_mask: the dma mask that was set for this device
* @in_debug: is device under debug. This, together with fpriv_list, enforces
@ -1493,6 +1500,7 @@ struct hl_device {
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
u64 clock_gating_mask;
atomic_t in_reset;
enum hl_pll_frequency curr_pll_profile;
int cs_active_cnt;
@ -1514,7 +1522,6 @@ struct hl_device {
u8 dram_default_page_mapping;
u8 pmmu_huge_range;
u8 init_done;
u8 clock_gating;
u8 device_cpu_disabled;
u8 dma_mask;
u8 in_debug;

View File

@ -232,7 +232,7 @@ static void set_driver_behavior_per_device(struct hl_device *hdev)
hdev->fw_loading = 1;
hdev->cpu_queues_enable = 1;
hdev->heartbeat = 1;
hdev->clock_gating = 1;
hdev->clock_gating_mask = ULONG_MAX;
hdev->reset_pcilink = 0;
hdev->axi_drain = 0;

View File

@ -10,7 +10,6 @@
#include <linux/pci.h>
#include <linux/hwmon.h>
#define SENSORS_PKT_TIMEOUT 1000000 /* 1s */
#define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1)
int hl_build_hwmon_channel_info(struct hl_device *hdev,
@ -323,7 +322,7 @@ int hl_get_temperature(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, value);
0, value);
if (rc) {
dev_err(hdev->dev,
@ -350,7 +349,7 @@ int hl_set_temperature(struct hl_device *hdev,
pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, NULL);
0, NULL);
if (rc)
dev_err(hdev->dev,
@ -374,7 +373,7 @@ int hl_get_voltage(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, value);
0, value);
if (rc) {
dev_err(hdev->dev,
@ -400,7 +399,7 @@ int hl_get_current(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, value);
0, value);
if (rc) {
dev_err(hdev->dev,
@ -426,7 +425,7 @@ int hl_get_fan_speed(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, value);
0, value);
if (rc) {
dev_err(hdev->dev,
@ -452,7 +451,7 @@ int hl_get_pwm_info(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, value);
0, value);
if (rc) {
dev_err(hdev->dev,
@ -479,7 +478,7 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
pkt.value = cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, NULL);
0, NULL);
if (rc)
dev_err(hdev->dev,
@ -502,7 +501,7 @@ int hl_set_voltage(struct hl_device *hdev,
pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, NULL);
0, NULL);
if (rc)
dev_err(hdev->dev,
@ -527,7 +526,7 @@ int hl_set_current(struct hl_device *hdev,
pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, NULL);
0, NULL);
if (rc)
dev_err(hdev->dev,

View File

@ -9,9 +9,6 @@
#include <linux/pci.h>
#define SET_CLK_PKT_TIMEOUT 1000000 /* 1s */
#define SET_PWR_PKT_TIMEOUT 1000000 /* 1s */
long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
{
struct armcp_packet pkt;
@ -29,7 +26,7 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
pkt.pll_index = cpu_to_le32(pll_index);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SET_CLK_PKT_TIMEOUT, &result);
0, &result);
if (rc) {
dev_err(hdev->dev,
@ -54,7 +51,7 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
pkt.value = cpu_to_le64(freq);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SET_CLK_PKT_TIMEOUT, NULL);
0, NULL);
if (rc)
dev_err(hdev->dev,
@ -74,7 +71,7 @@ u64 hl_get_max_power(struct hl_device *hdev)
ARMCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SET_PWR_PKT_TIMEOUT, &result);
0, &result);
if (rc) {
dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
@ -96,7 +93,7 @@ void hl_set_max_power(struct hl_device *hdev, u64 value)
pkt.value = cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SET_PWR_PKT_TIMEOUT, NULL);
0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);

View File

@ -68,7 +68,7 @@ static void aspeed_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
if (WARN_ON(clock > host->max_clk))
clock = host->max_clk;
for (div = 1; div < 256; div *= 2) {
for (div = 2; div < 256; div *= 2) {
if ((parent / div) <= clock)
break;
}

View File

@ -5053,15 +5053,19 @@ int bond_create(struct net *net, const char *name)
bond_dev->rtnl_link_ops = &bond_link_ops;
res = register_netdevice(bond_dev);
if (res < 0) {
free_netdev(bond_dev);
rtnl_unlock();
return res;
}
netif_carrier_off(bond_dev);
bond_work_init_all(bond);
rtnl_unlock();
if (res < 0)
free_netdev(bond_dev);
return res;
return 0;
}
static int __net_init bond_net_init(struct net *net)

Some files were not shown because too many files have changed in this diff Show More