Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

No conflicts.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2023-11-30 16:10:40 -08:00
commit 975f2d73a9
257 changed files with 2795 additions and 3769 deletions

View file

@ -59,15 +59,6 @@ Description:
brightness. Reading this file when no hw brightness change brightness. Reading this file when no hw brightness change
event has happened will return an ENODATA error. event has happened will return an ENODATA error.
What: /sys/class/leds/<led>/color
Date: June 2023
KernelVersion: 6.5
Description:
Color of the LED.
This is a read-only file. Reading this file returns the color
of the LED as a string (e.g: "red", "green", "multicolor").
What: /sys/class/leds/<led>/trigger What: /sys/class/leds/<led>/trigger
Date: March 2006 Date: March 2006
KernelVersion: 2.6.17 KernelVersion: 2.6.17

View file

@ -9,7 +9,7 @@ title: NXP S32G2 pin controller
maintainers: maintainers:
- Ghennadi Procopciuc <Ghennadi.Procopciuc@oss.nxp.com> - Ghennadi Procopciuc <Ghennadi.Procopciuc@oss.nxp.com>
- Chester Lin <clin@suse.com> - Chester Lin <chester62515@gmail.com>
description: | description: |
S32G2 pinmux is implemented in SIUL2 (System Integration Unit Lite2), S32G2 pinmux is implemented in SIUL2 (System Integration Unit Lite2),

View file

@ -36,7 +36,11 @@ properties:
vdd-supply: vdd-supply:
description: description:
VDD power supply to the hub 3V3 power supply to the hub
vdd2-supply:
description:
1V2 power supply to the hub
peer-hub: peer-hub:
$ref: /schemas/types.yaml#/definitions/phandle $ref: /schemas/types.yaml#/definitions/phandle
@ -62,6 +66,7 @@ allOf:
properties: properties:
reset-gpios: false reset-gpios: false
vdd-supply: false vdd-supply: false
vdd2-supply: false
peer-hub: false peer-hub: false
i2c-bus: false i2c-bus: false
else: else:

View file

@ -521,8 +521,8 @@ examples:
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>, interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 488 IRQ_TYPE_EDGE_BOTH>,
<GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>; <GIC_SPI 489 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "hs_phy_irq", "ss_phy_irq", interrupt-names = "hs_phy_irq", "ss_phy_irq",
"dm_hs_phy_irq", "dp_hs_phy_irq"; "dm_hs_phy_irq", "dp_hs_phy_irq";

View file

@ -41,7 +41,7 @@ examples:
- | - |
usb { usb {
phys = <&usb2_phy1>, <&usb3_phy1>; phys = <&usb2_phy1>, <&usb3_phy1>;
phy-names = "usb"; phy-names = "usb2", "usb3";
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;

View file

@ -22056,6 +22056,7 @@ F: drivers/watchdog/tqmx86_wdt.c
TRACING TRACING
M: Steven Rostedt <rostedt@goodmis.org> M: Steven Rostedt <rostedt@goodmis.org>
M: Masami Hiramatsu <mhiramat@kernel.org> M: Masami Hiramatsu <mhiramat@kernel.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
L: linux-trace-kernel@vger.kernel.org L: linux-trace-kernel@vger.kernel.org
S: Maintained S: Maintained

View file

@ -2,7 +2,7 @@
VERSION = 6 VERSION = 6
PATCHLEVEL = 7 PATCHLEVEL = 7
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc2 EXTRAVERSION = -rc3
NAME = Hurr durr I'ma ninja sloth NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION* # *DOCUMENTATION*

View file

@ -484,7 +484,8 @@ static int __init xen_guest_init(void)
* for secondary CPUs as they are brought up. * for secondary CPUs as they are brought up.
* For uniformity we use VCPUOP_register_vcpu_info even on cpu0. * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
*/ */
xen_vcpu_info = alloc_percpu(struct vcpu_info); xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
1 << fls(sizeof(struct vcpu_info) - 1));
if (xen_vcpu_info == NULL) if (xen_vcpu_info == NULL)
return -ENOMEM; return -ENOMEM;

View file

@ -158,7 +158,7 @@ endif
all: $(notdir $(KBUILD_IMAGE)) all: $(notdir $(KBUILD_IMAGE))
vmlinuz.efi: Image
Image vmlinuz.efi: vmlinux Image vmlinuz.efi: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@

View file

@ -21,9 +21,22 @@ static inline bool arch_parse_debug_rodata(char *arg)
extern bool rodata_enabled; extern bool rodata_enabled;
extern bool rodata_full; extern bool rodata_full;
if (arg && !strcmp(arg, "full")) { if (!arg)
return false;
if (!strcmp(arg, "full")) {
rodata_enabled = rodata_full = true;
return true;
}
if (!strcmp(arg, "off")) {
rodata_enabled = rodata_full = false;
return true;
}
if (!strcmp(arg, "on")) {
rodata_enabled = true; rodata_enabled = true;
rodata_full = true; rodata_full = false;
return true; return true;
} }

View file

@ -29,8 +29,8 @@ bool can_set_direct_map(void)
* *
* KFENCE pool requires page-granular mapping if initialized late. * KFENCE pool requires page-granular mapping if initialized late.
*/ */
return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() || return rodata_full || debug_pagealloc_enabled() ||
arm64_kfence_can_set_direct_map(); arm64_kfence_can_set_direct_map();
} }
static int change_page_range(pte_t *ptep, unsigned long addr, void *data) static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
@ -105,8 +105,7 @@ static int change_memory_common(unsigned long addr, int numpages,
* If we are manipulating read-only permissions, apply the same * If we are manipulating read-only permissions, apply the same
* change to the linear mapping of the pages that back this VM area. * change to the linear mapping of the pages that back this VM area.
*/ */
if (rodata_enabled && if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
pgprot_val(clear_mask) == PTE_RDONLY)) { pgprot_val(clear_mask) == PTE_RDONLY)) {
for (i = 0; i < area->nr_pages; i++) { for (i = 0; i < area->nr_pages; i++) {
__change_memory_common((u64)page_address(area->pages[i]), __change_memory_common((u64)page_address(area->pages[i]),

View file

@ -115,9 +115,12 @@ config ARCH_HAS_ILOG2_U64
default n default n
config GENERIC_BUG config GENERIC_BUG
bool def_bool y
default y
depends on BUG depends on BUG
select GENERIC_BUG_RELATIVE_POINTERS if 64BIT
config GENERIC_BUG_RELATIVE_POINTERS
bool
config GENERIC_HWEIGHT config GENERIC_HWEIGHT
bool bool

View file

@ -34,7 +34,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
/* Alternative SMP implementation. */ /* Alternative SMP implementation. */
#define ALTERNATIVE(cond, replacement) "!0:" \ #define ALTERNATIVE(cond, replacement) "!0:" \
".section .altinstructions, \"aw\" !" \ ".section .altinstructions, \"a\" !" \
".align 4 !" \
".word (0b-4-.) !" \ ".word (0b-4-.) !" \
".hword 1, " __stringify(cond) " !" \ ".hword 1, " __stringify(cond) " !" \
".word " __stringify(replacement) " !" \ ".word " __stringify(replacement) " !" \
@ -44,7 +45,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
/* to replace one single instructions by a new instruction */ /* to replace one single instructions by a new instruction */
#define ALTERNATIVE(from, to, cond, replacement)\ #define ALTERNATIVE(from, to, cond, replacement)\
.section .altinstructions, "aw" ! \ .section .altinstructions, "a" ! \
.align 4 ! \
.word (from - .) ! \ .word (from - .) ! \
.hword (to - from)/4, cond ! \ .hword (to - from)/4, cond ! \
.word replacement ! \ .word replacement ! \
@ -52,7 +54,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
/* to replace multiple instructions by new code */ /* to replace multiple instructions by new code */
#define ALTERNATIVE_CODE(from, num_instructions, cond, new_instr_ptr)\ #define ALTERNATIVE_CODE(from, num_instructions, cond, new_instr_ptr)\
.section .altinstructions, "aw" ! \ .section .altinstructions, "a" ! \
.align 4 ! \
.word (from - .) ! \ .word (from - .) ! \
.hword -num_instructions, cond ! \ .hword -num_instructions, cond ! \
.word (new_instr_ptr - .) ! \ .word (new_instr_ptr - .) ! \

View file

@ -574,6 +574,7 @@
*/ */
#define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr) \ #define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr) \
.section __ex_table,"aw" ! \ .section __ex_table,"aw" ! \
.align 4 ! \
.word (fault_addr - .), (except_addr - .) ! \ .word (fault_addr - .), (except_addr - .) ! \
.previous .previous

View file

@ -17,24 +17,27 @@
#define PARISC_BUG_BREAK_ASM "break 0x1f, 0x1fff" #define PARISC_BUG_BREAK_ASM "break 0x1f, 0x1fff"
#define PARISC_BUG_BREAK_INSN 0x03ffe01f /* PARISC_BUG_BREAK_ASM */ #define PARISC_BUG_BREAK_INSN 0x03ffe01f /* PARISC_BUG_BREAK_ASM */
#if defined(CONFIG_64BIT) #ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
#define ASM_WORD_INSN ".dword\t" # define __BUG_REL(val) ".word " __stringify(val) " - ."
#else #else
#define ASM_WORD_INSN ".word\t" # define __BUG_REL(val) ".word " __stringify(val)
#endif #endif
#ifdef CONFIG_DEBUG_BUGVERBOSE #ifdef CONFIG_DEBUG_BUGVERBOSE
#define BUG() \ #define BUG() \
do { \ do { \
asm volatile("\n" \ asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"aw\"\n" \ "\t.pushsection __bug_table,\"a\"\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \ "\t.align 4\n" \
"\t.short %c1, %c2\n" \ "2:\t" __BUG_REL(1b) "\n" \
"\t.org 2b+%c3\n" \ "\t" __BUG_REL(%c0) "\n" \
"\t.short %1, %2\n" \
"\t.blockz %3-2*4-2*2\n" \
"\t.popsection" \ "\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \ : : "i" (__FILE__), "i" (__LINE__), \
"i" (0), "i" (sizeof(struct bug_entry)) ); \ "i" (0), "i" (sizeof(struct bug_entry)) ); \
unreachable(); \ unreachable(); \
} while(0) } while(0)
@ -51,10 +54,12 @@
do { \ do { \
asm volatile("\n" \ asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"aw\"\n" \ "\t.pushsection __bug_table,\"a\"\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \ "\t.align 4\n" \
"\t.short %c1, %c2\n" \ "2:\t" __BUG_REL(1b) "\n" \
"\t.org 2b+%c3\n" \ "\t" __BUG_REL(%c0) "\n" \
"\t.short %1, %2\n" \
"\t.blockz %3-2*4-2*2\n" \
"\t.popsection" \ "\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \ : : "i" (__FILE__), "i" (__LINE__), \
"i" (BUGFLAG_WARNING|(flags)), \ "i" (BUGFLAG_WARNING|(flags)), \
@ -65,10 +70,11 @@
do { \ do { \
asm volatile("\n" \ asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"aw\"\n" \ "\t.pushsection __bug_table,\"a\"\n" \
"2:\t" ASM_WORD_INSN "1b\n" \ "\t.align %2\n" \
"\t.short %c0\n" \ "2:\t" __BUG_REL(1b) "\n" \
"\t.org 2b+%c1\n" \ "\t.short %0\n" \
"\t.blockz %1-4-2\n" \
"\t.popsection" \ "\t.popsection" \
: : "i" (BUGFLAG_WARNING|(flags)), \ : : "i" (BUGFLAG_WARNING|(flags)), \
"i" (sizeof(struct bug_entry)) ); \ "i" (sizeof(struct bug_entry)) ); \

View file

@ -15,10 +15,12 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
asm_volatile_goto("1:\n\t" asm_volatile_goto("1:\n\t"
"nop\n\t" "nop\n\t"
".pushsection __jump_table, \"aw\"\n\t" ".pushsection __jump_table, \"aw\"\n\t"
".align %1\n\t"
".word 1b - ., %l[l_yes] - .\n\t" ".word 1b - ., %l[l_yes] - .\n\t"
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t" __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
".popsection\n\t" ".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes); : : "i" (&((char *)key)[branch]), "i" (sizeof(long))
: : l_yes);
return false; return false;
l_yes: l_yes:
@ -30,10 +32,12 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
asm_volatile_goto("1:\n\t" asm_volatile_goto("1:\n\t"
"b,n %l[l_yes]\n\t" "b,n %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t" ".pushsection __jump_table, \"aw\"\n\t"
".align %1\n\t"
".word 1b - ., %l[l_yes] - .\n\t" ".word 1b - ., %l[l_yes] - .\n\t"
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t" __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
".popsection\n\t" ".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes); : : "i" (&((char *)key)[branch]), "i" (sizeof(long))
: : l_yes);
return false; return false;
l_yes: l_yes:

View file

@ -55,7 +55,7 @@
}) })
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
# define __lock_aligned __section(".data..lock_aligned") # define __lock_aligned __section(".data..lock_aligned") __aligned(16)
#endif #endif
#endif /* __PARISC_LDCW_H */ #endif /* __PARISC_LDCW_H */

View file

@ -41,6 +41,7 @@ struct exception_table_entry {
#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\ #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
".section __ex_table,\"aw\"\n" \ ".section __ex_table,\"aw\"\n" \
".align 4\n" \
".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \ ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
".previous\n" ".previous\n"

View file

@ -75,7 +75,6 @@
/* We now return you to your regularly scheduled HPUX. */ /* We now return you to your regularly scheduled HPUX. */
#define ENOSYM 215 /* symbol does not exist in executable */
#define ENOTSOCK 216 /* Socket operation on non-socket */ #define ENOTSOCK 216 /* Socket operation on non-socket */
#define EDESTADDRREQ 217 /* Destination address required */ #define EDESTADDRREQ 217 /* Destination address required */
#define EMSGSIZE 218 /* Message too long */ #define EMSGSIZE 218 /* Message too long */
@ -101,7 +100,6 @@
#define ETIMEDOUT 238 /* Connection timed out */ #define ETIMEDOUT 238 /* Connection timed out */
#define ECONNREFUSED 239 /* Connection refused */ #define ECONNREFUSED 239 /* Connection refused */
#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */ #define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
#define EREMOTERELEASE 240 /* Remote peer released connection */
#define EHOSTDOWN 241 /* Host is down */ #define EHOSTDOWN 241 /* Host is down */
#define EHOSTUNREACH 242 /* No route to host */ #define EHOSTUNREACH 242 /* No route to host */

View file

@ -130,6 +130,7 @@ SECTIONS
RO_DATA(8) RO_DATA(8)
/* unwind info */ /* unwind info */
. = ALIGN(4);
.PARISC.unwind : { .PARISC.unwind : {
__start___unwind = .; __start___unwind = .;
*(.PARISC.unwind) *(.PARISC.unwind)

View file

@ -228,7 +228,6 @@ typedef struct thread_struct thread_struct;
execve_tail(); \ execve_tail(); \
} while (0) } while (0)
/* Forward declaration, a strange C thing */
struct task_struct; struct task_struct;
struct mm_struct; struct mm_struct;
struct seq_file; struct seq_file;

View file

@ -666,6 +666,7 @@ static int __init ipl_init(void)
&ipl_ccw_attr_group_lpar); &ipl_ccw_attr_group_lpar);
break; break;
case IPL_TYPE_ECKD: case IPL_TYPE_ECKD:
case IPL_TYPE_ECKD_DUMP:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_eckd_attr_group); rc = sysfs_create_group(&ipl_kset->kobj, &ipl_eckd_attr_group);
break; break;
case IPL_TYPE_FCP: case IPL_TYPE_FCP:

View file

@ -279,12 +279,6 @@ static int paicrypt_event_init(struct perf_event *event)
if (IS_ERR(cpump)) if (IS_ERR(cpump))
return PTR_ERR(cpump); return PTR_ERR(cpump);
/* Event initialization sets last_tag to 0. When later on the events
* are deleted and re-added, do not reset the event count value to zero.
* Events are added, deleted and re-added when 2 or more events
* are active at the same time.
*/
event->hw.last_tag = 0;
event->destroy = paicrypt_event_destroy; event->destroy = paicrypt_event_destroy;
if (a->sample_period) { if (a->sample_period) {
@ -318,6 +312,11 @@ static void paicrypt_start(struct perf_event *event, int flags)
{ {
u64 sum; u64 sum;
/* Event initialization sets last_tag to 0. When later on the events
* are deleted and re-added, do not reset the event count value to zero.
* Events are added, deleted and re-added when 2 or more events
* are active at the same time.
*/
if (!event->hw.last_tag) { if (!event->hw.last_tag) {
event->hw.last_tag = 1; event->hw.last_tag = 1;
sum = paicrypt_getall(event); /* Get current value */ sum = paicrypt_getall(event); /* Get current value */

View file

@ -260,7 +260,6 @@ static int paiext_event_init(struct perf_event *event)
rc = paiext_alloc(a, event); rc = paiext_alloc(a, event);
if (rc) if (rc)
return rc; return rc;
event->hw.last_tag = 0;
event->destroy = paiext_event_destroy; event->destroy = paiext_event_destroy;
if (a->sample_period) { if (a->sample_period) {

View file

@ -4660,7 +4660,7 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
if (pmu->intel_cap.pebs_output_pt_available) if (pmu->intel_cap.pebs_output_pt_available)
pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT; pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
else else
pmu->pmu.capabilities |= ~PERF_PMU_CAP_AUX_OUTPUT; pmu->pmu.capabilities &= ~PERF_PMU_CAP_AUX_OUTPUT;
intel_pmu_check_event_constraints(pmu->event_constraints, intel_pmu_check_event_constraints(pmu->event_constraints,
pmu->num_counters, pmu->num_counters,

View file

@ -104,8 +104,6 @@ struct cont_desc {
size_t size; size_t size;
}; };
static u32 ucode_new_rev;
/* /*
* Microcode patch container file is prepended to the initrd in cpio * Microcode patch container file is prepended to the initrd in cpio
* format. See Documentation/arch/x86/microcode.rst * format. See Documentation/arch/x86/microcode.rst
@ -442,12 +440,11 @@ static int __apply_microcode_amd(struct microcode_amd *mc)
* *
* Returns true if container found (sets @desc), false otherwise. * Returns true if container found (sets @desc), false otherwise.
*/ */
static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) static bool early_apply_microcode(u32 cpuid_1_eax, u32 old_rev, void *ucode, size_t size)
{ {
struct cont_desc desc = { 0 }; struct cont_desc desc = { 0 };
struct microcode_amd *mc; struct microcode_amd *mc;
bool ret = false; bool ret = false;
u32 rev, dummy;
desc.cpuid_1_eax = cpuid_1_eax; desc.cpuid_1_eax = cpuid_1_eax;
@ -457,22 +454,15 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size)
if (!mc) if (!mc)
return ret; return ret;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
/* /*
* Allow application of the same revision to pick up SMT-specific * Allow application of the same revision to pick up SMT-specific
* changes even if the revision of the other SMT thread is already * changes even if the revision of the other SMT thread is already
* up-to-date. * up-to-date.
*/ */
if (rev > mc->hdr.patch_id) if (old_rev > mc->hdr.patch_id)
return ret; return ret;
if (!__apply_microcode_amd(mc)) { return !__apply_microcode_amd(mc);
ucode_new_rev = mc->hdr.patch_id;
ret = true;
}
return ret;
} }
static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
@ -506,9 +496,12 @@ static void __init find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpi
*ret = cp; *ret = cp;
} }
void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
{ {
struct cpio_data cp = { }; struct cpio_data cp = { };
u32 dummy;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy);
/* Needed in load_microcode_amd() */ /* Needed in load_microcode_amd() */
ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax; ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
@ -517,7 +510,8 @@ void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
if (!(cp.data && cp.size)) if (!(cp.data && cp.size))
return; return;
early_apply_microcode(cpuid_1_eax, cp.data, cp.size); if (early_apply_microcode(cpuid_1_eax, ed->old_rev, cp.data, cp.size))
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
} }
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
@ -625,10 +619,8 @@ void reload_ucode_amd(unsigned int cpu)
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
if (rev < mc->hdr.patch_id) { if (rev < mc->hdr.patch_id) {
if (!__apply_microcode_amd(mc)) { if (!__apply_microcode_amd(mc))
ucode_new_rev = mc->hdr.patch_id; pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id);
pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
}
} }
} }
@ -649,8 +641,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
if (p && (p->patch_id == csig->rev)) if (p && (p->patch_id == csig->rev))
uci->mc = p->data; uci->mc = p->data;
pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
return 0; return 0;
} }
@ -691,8 +681,6 @@ static enum ucode_state apply_microcode_amd(int cpu)
rev = mc_amd->hdr.patch_id; rev = mc_amd->hdr.patch_id;
ret = UCODE_UPDATED; ret = UCODE_UPDATED;
pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
out: out:
uci->cpu_sig.rev = rev; uci->cpu_sig.rev = rev;
c->microcode = rev; c->microcode = rev;
@ -935,11 +923,6 @@ struct microcode_ops * __init init_amd_microcode(void)
pr_warn("AMD CPU family 0x%x not supported\n", c->x86); pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
return NULL; return NULL;
} }
if (ucode_new_rev)
pr_info_once("microcode updated early to new patch_level=0x%08x\n",
ucode_new_rev);
return &microcode_amd_ops; return &microcode_amd_ops;
} }

View file

@ -41,8 +41,6 @@
#include "internal.h" #include "internal.h"
#define DRIVER_VERSION "2.2"
static struct microcode_ops *microcode_ops; static struct microcode_ops *microcode_ops;
bool dis_ucode_ldr = true; bool dis_ucode_ldr = true;
@ -77,6 +75,8 @@ static u32 final_levels[] = {
0, /* T-101 terminator */ 0, /* T-101 terminator */
}; };
struct early_load_data early_data;
/* /*
* Check the current patch level on this CPU. * Check the current patch level on this CPU.
* *
@ -155,9 +155,9 @@ void __init load_ucode_bsp(void)
return; return;
if (intel) if (intel)
load_ucode_intel_bsp(); load_ucode_intel_bsp(&early_data);
else else
load_ucode_amd_bsp(cpuid_1_eax); load_ucode_amd_bsp(&early_data, cpuid_1_eax);
} }
void load_ucode_ap(void) void load_ucode_ap(void)
@ -828,6 +828,11 @@ static int __init microcode_init(void)
if (!microcode_ops) if (!microcode_ops)
return -ENODEV; return -ENODEV;
pr_info_once("Current revision: 0x%08x\n", (early_data.new_rev ?: early_data.old_rev));
if (early_data.new_rev)
pr_info_once("Updated early from: 0x%08x\n", early_data.old_rev);
microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0); microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0);
if (IS_ERR(microcode_pdev)) if (IS_ERR(microcode_pdev))
return PTR_ERR(microcode_pdev); return PTR_ERR(microcode_pdev);
@ -846,8 +851,6 @@ static int __init microcode_init(void)
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
mc_cpu_online, mc_cpu_down_prep); mc_cpu_online, mc_cpu_down_prep);
pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
return 0; return 0;
out_pdev: out_pdev:

View file

@ -339,16 +339,9 @@ static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci,
static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci)
{ {
struct microcode_intel *mc = uci->mc; struct microcode_intel *mc = uci->mc;
enum ucode_state ret; u32 cur_rev;
u32 cur_rev, date;
ret = __apply_microcode(uci, mc, &cur_rev); return __apply_microcode(uci, mc, &cur_rev);
if (ret == UCODE_UPDATED) {
date = mc->hdr.date;
pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n",
cur_rev, mc->hdr.rev, date & 0xffff, date >> 24, (date >> 16) & 0xff);
}
return ret;
} }
static __init bool load_builtin_intel_microcode(struct cpio_data *cp) static __init bool load_builtin_intel_microcode(struct cpio_data *cp)
@ -413,13 +406,17 @@ static int __init save_builtin_microcode(void)
early_initcall(save_builtin_microcode); early_initcall(save_builtin_microcode);
/* Load microcode on BSP from initrd or builtin blobs */ /* Load microcode on BSP from initrd or builtin blobs */
void __init load_ucode_intel_bsp(void) void __init load_ucode_intel_bsp(struct early_load_data *ed)
{ {
struct ucode_cpu_info uci; struct ucode_cpu_info uci;
ed->old_rev = intel_get_microcode_revision();
uci.mc = get_microcode_blob(&uci, false); uci.mc = get_microcode_blob(&uci, false);
if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED) if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED)
ucode_patch_va = UCODE_BSP_LOADED; ucode_patch_va = UCODE_BSP_LOADED;
ed->new_rev = uci.cpu_sig.rev;
} }
void load_ucode_intel_ap(void) void load_ucode_intel_ap(void)

View file

@ -37,6 +37,12 @@ struct microcode_ops {
use_nmi : 1; use_nmi : 1;
}; };
struct early_load_data {
u32 old_rev;
u32 new_rev;
};
extern struct early_load_data early_data;
extern struct ucode_cpu_info ucode_cpu_info[]; extern struct ucode_cpu_info ucode_cpu_info[];
struct cpio_data find_microcode_in_initrd(const char *path); struct cpio_data find_microcode_in_initrd(const char *path);
@ -92,14 +98,14 @@ extern bool dis_ucode_ldr;
extern bool force_minrev; extern bool force_minrev;
#ifdef CONFIG_CPU_SUP_AMD #ifdef CONFIG_CPU_SUP_AMD
void load_ucode_amd_bsp(unsigned int family); void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);
void load_ucode_amd_ap(unsigned int family); void load_ucode_amd_ap(unsigned int family);
int save_microcode_in_initrd_amd(unsigned int family); int save_microcode_in_initrd_amd(unsigned int family);
void reload_ucode_amd(unsigned int cpu); void reload_ucode_amd(unsigned int cpu);
struct microcode_ops *init_amd_microcode(void); struct microcode_ops *init_amd_microcode(void);
void exit_amd_microcode(void); void exit_amd_microcode(void);
#else /* CONFIG_CPU_SUP_AMD */ #else /* CONFIG_CPU_SUP_AMD */
static inline void load_ucode_amd_bsp(unsigned int family) { } static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }
static inline void load_ucode_amd_ap(unsigned int family) { } static inline void load_ucode_amd_ap(unsigned int family) { }
static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
static inline void reload_ucode_amd(unsigned int cpu) { } static inline void reload_ucode_amd(unsigned int cpu) { }
@ -108,12 +114,12 @@ static inline void exit_amd_microcode(void) { }
#endif /* !CONFIG_CPU_SUP_AMD */ #endif /* !CONFIG_CPU_SUP_AMD */
#ifdef CONFIG_CPU_SUP_INTEL #ifdef CONFIG_CPU_SUP_INTEL
void load_ucode_intel_bsp(void); void load_ucode_intel_bsp(struct early_load_data *ed);
void load_ucode_intel_ap(void); void load_ucode_intel_ap(void);
void reload_ucode_intel(void); void reload_ucode_intel(void);
struct microcode_ops *init_intel_microcode(void); struct microcode_ops *init_intel_microcode(void);
#else /* CONFIG_CPU_SUP_INTEL */ #else /* CONFIG_CPU_SUP_INTEL */
static inline void load_ucode_intel_bsp(void) { } static inline void load_ucode_intel_bsp(struct early_load_data *ed) { }
static inline void load_ucode_intel_ap(void) { } static inline void load_ucode_intel_ap(void) { }
static inline void reload_ucode_intel(void) { } static inline void reload_ucode_intel(void) { }
static inline struct microcode_ops *init_intel_microcode(void) { return NULL; } static inline struct microcode_ops *init_intel_microcode(void) { return NULL; }

View file

@ -425,6 +425,8 @@ void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
void bdev_add(struct block_device *bdev, dev_t dev) void bdev_add(struct block_device *bdev, dev_t dev)
{ {
if (bdev_stable_writes(bdev))
mapping_set_stable_writes(bdev->bd_inode->i_mapping);
bdev->bd_dev = dev; bdev->bd_dev = dev;
bdev->bd_inode->i_rdev = dev; bdev->bd_inode->i_rdev = dev;
bdev->bd_inode->i_ino = dev; bdev->bd_inode->i_ino = dev;

View file

@ -577,6 +577,7 @@ static void blkg_destroy_all(struct gendisk *disk)
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
struct blkcg_gq *blkg, *n; struct blkcg_gq *blkg, *n;
int count = BLKG_DESTROY_BATCH_SIZE; int count = BLKG_DESTROY_BATCH_SIZE;
int i;
restart: restart:
spin_lock_irq(&q->queue_lock); spin_lock_irq(&q->queue_lock);
@ -602,6 +603,18 @@ static void blkg_destroy_all(struct gendisk *disk)
} }
} }
/*
* Mark policy deactivated since policy offline has been done, and
* the free is scheduled, so future blkcg_deactivate_policy() can
* be bypassed
*/
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
if (pol)
__clear_bit(pol->plid, q->blkcg_pols);
}
q->root_blkg = NULL; q->root_blkg = NULL;
spin_unlock_irq(&q->queue_lock); spin_unlock_irq(&q->queue_lock);
} }

View file

@ -249,8 +249,6 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
{ {
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
WARN_ON_ONCE(!rcu_read_lock_held());
if (blkcg == &blkcg_root) if (blkcg == &blkcg_root)
return q->root_blkg; return q->root_blkg;

View file

@ -163,38 +163,15 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
* @q: the queue of the device * @q: the queue of the device
* *
* Description: * Description:
* For historical reasons, this routine merely calls blk_set_runtime_active() * Restart the queue of a runtime suspended device. It does this regardless
* to do the real work of restarting the queue. It does this regardless of * of whether the device's runtime-resume succeeded; even if it failed the
* whether the device's runtime-resume succeeded; even if it failed the
* driver or error handler will need to communicate with the device. * driver or error handler will need to communicate with the device.
* *
* This function should be called near the end of the device's * This function should be called near the end of the device's
* runtime_resume callback. * runtime_resume callback to correct queue runtime PM status and re-enable
* peeking requests from the queue.
*/ */
void blk_post_runtime_resume(struct request_queue *q) void blk_post_runtime_resume(struct request_queue *q)
{
blk_set_runtime_active(q);
}
EXPORT_SYMBOL(blk_post_runtime_resume);
/**
* blk_set_runtime_active - Force runtime status of the queue to be active
* @q: the queue of the device
*
* If the device is left runtime suspended during system suspend the resume
* hook typically resumes the device and corrects runtime status
* accordingly. However, that does not affect the queue runtime PM status
* which is still "suspended". This prevents processing requests from the
* queue.
*
* This function can be used in driver's resume hook to correct queue
* runtime PM status and re-enable peeking requests from the queue. It
* should be called before first request is added to the queue.
*
* This function is also called by blk_post_runtime_resume() for
* runtime resumes. It does everything necessary to restart the queue.
*/
void blk_set_runtime_active(struct request_queue *q)
{ {
int old_status; int old_status;
@ -211,4 +188,4 @@ void blk_set_runtime_active(struct request_queue *q)
if (old_status != RPM_ACTIVE) if (old_status != RPM_ACTIVE)
blk_clear_pm_only(q); blk_clear_pm_only(q);
} }
EXPORT_SYMBOL(blk_set_runtime_active); EXPORT_SYMBOL(blk_post_runtime_resume);

View file

@ -1320,6 +1320,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
rcu_read_lock();
/* /*
* Update has_rules[] flags for the updated tg's subtree. A tg is * Update has_rules[] flags for the updated tg's subtree. A tg is
* considered to have rules if either the tg itself or any of its * considered to have rules if either the tg itself or any of its
@ -1347,6 +1348,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
this_tg->latency_target = max(this_tg->latency_target, this_tg->latency_target = max(this_tg->latency_target,
parent_tg->latency_target); parent_tg->latency_target);
} }
rcu_read_unlock();
/* /*
* We're already holding queue_lock and know @tg is valid. Let's * We're already holding queue_lock and know @tg is valid. Let's

View file

@ -502,6 +502,16 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
return ret; return ret;
} }
static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
{
ivpu_boot_dpu_active_drive(vdev, false);
ivpu_boot_pwr_island_isolation_drive(vdev, true);
ivpu_boot_pwr_island_trickle_drive(vdev, false);
ivpu_boot_pwr_island_drive(vdev, false);
return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
}
static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev) static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
{ {
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES); u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
@ -600,25 +610,17 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
static int ivpu_hw_37xx_reset(struct ivpu_device *vdev) static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
{ {
int ret; int ret = 0;
u32 val;
if (IVPU_WA(punit_disabled)) if (ivpu_boot_pwr_domain_disable(vdev)) {
return 0; ivpu_err(vdev, "Failed to disable power domain\n");
ret = -EIO;
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
return ret;
} }
val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET); if (ivpu_pll_disable(vdev)) {
val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val); ivpu_err(vdev, "Failed to disable PLL\n");
REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val); ret = -EIO;
}
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
return ret; return ret;
} }
@ -651,10 +653,6 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
{ {
int ret; int ret;
ret = ivpu_hw_37xx_reset(vdev);
if (ret)
ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
ret = ivpu_hw_37xx_d0i3_disable(vdev); ret = ivpu_hw_37xx_d0i3_disable(vdev);
if (ret) if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret); ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
@ -722,11 +720,11 @@ static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
{ {
int ret = 0; int ret = 0;
if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev)) if (!ivpu_hw_37xx_is_idle(vdev))
ivpu_err(vdev, "Failed to reset the VPU\n"); ivpu_warn(vdev, "VPU not idle during power down\n");
if (ivpu_pll_disable(vdev)) { if (ivpu_hw_37xx_reset(vdev)) {
ivpu_err(vdev, "Failed to disable PLL\n"); ivpu_err(vdev, "Failed to reset VPU\n");
ret = -EIO; ret = -EIO;
} }

View file

@ -2031,7 +2031,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
* HP ZBook Fury 16 G10 requires ACPI video's child devices have _PS0 * HP ZBook Fury 16 G10 requires ACPI video's child devices have _PS0
* evaluated to have functional panel brightness control. * evaluated to have functional panel brightness control.
*/ */
acpi_device_fix_up_power_extended(device); acpi_device_fix_up_power_children(device);
pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n", pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device), ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),

View file

@ -397,6 +397,19 @@ void acpi_device_fix_up_power_extended(struct acpi_device *adev)
} }
EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_extended); EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_extended);
/**
* acpi_device_fix_up_power_children - Force a device's children into D0.
* @adev: Parent device object whose children's power state is to be fixed up.
*
* Call acpi_device_fix_up_power() for @adev's children so long as they
* are reported as present and enabled.
*/
void acpi_device_fix_up_power_children(struct acpi_device *adev)
{
acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL);
}
EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_children);
int acpi_device_update_power(struct acpi_device *device, int *state_p) int acpi_device_update_power(struct acpi_device *device, int *state_p)
{ {
int state; int state;

View file

@ -592,7 +592,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
while (1) { while (1) {
if (cx->entry_method == ACPI_CSTATE_HALT) if (cx->entry_method == ACPI_CSTATE_HALT)
safe_halt(); raw_safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
io_idle(cx->address); io_idle(cx->address);
} else } else

View file

@ -447,6 +447,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"), DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
}, },
}, },
{
/* Asus ExpertBook B1402CVA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
},
},
{ {
/* Asus ExpertBook B1502CBA */ /* Asus ExpertBook B1502CBA */
.matches = { .matches = {

View file

@ -82,6 +82,9 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
if (pnp_port_valid(idev, 1)) { if (pnp_port_valid(idev, 1)) {
ctl_addr = devm_ioport_map(&idev->dev, ctl_addr = devm_ioport_map(&idev->dev,
pnp_port_start(idev, 1), 1); pnp_port_start(idev, 1), 1);
if (!ctl_addr)
return -ENOMEM;
ap->ioaddr.altstatus_addr = ctl_addr; ap->ioaddr.altstatus_addr = ctl_addr;
ap->ioaddr.ctl_addr = ctl_addr; ap->ioaddr.ctl_addr = ctl_addr;
ap->ops = &isapnp_port_ops; ap->ops = &isapnp_port_ops;

View file

@ -67,6 +67,7 @@ struct nbd_sock {
struct recv_thread_args { struct recv_thread_args {
struct work_struct work; struct work_struct work;
struct nbd_device *nbd; struct nbd_device *nbd;
struct nbd_sock *nsock;
int index; int index;
}; };
@ -395,6 +396,22 @@ static u32 req_to_nbd_cmd_type(struct request *req)
} }
} }
static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd)
{
if (refcount_inc_not_zero(&nbd->config_refs)) {
/*
* Add smp_mb__after_atomic to ensure that reading nbd->config_refs
* and reading nbd->config is ordered. The pair is the barrier in
* nbd_alloc_and_init_config(), avoid nbd->config_refs is set
* before nbd->config.
*/
smp_mb__after_atomic();
return nbd->config;
}
return NULL;
}
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req) static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
{ {
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@ -409,13 +426,13 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
return BLK_EH_DONE; return BLK_EH_DONE;
} }
if (!refcount_inc_not_zero(&nbd->config_refs)) { config = nbd_get_config_unlocked(nbd);
if (!config) {
cmd->status = BLK_STS_TIMEOUT; cmd->status = BLK_STS_TIMEOUT;
__clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
mutex_unlock(&cmd->lock); mutex_unlock(&cmd->lock);
goto done; goto done;
} }
config = nbd->config;
if (config->num_connections > 1 || if (config->num_connections > 1 ||
(config->num_connections == 1 && nbd->tag_set.timeout)) { (config->num_connections == 1 && nbd->tag_set.timeout)) {
@ -489,15 +506,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
return BLK_EH_DONE; return BLK_EH_DONE;
} }
/* static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
* Send or receive packet. Return a positive value on success and struct iov_iter *iter, int msg_flags, int *sent)
* negtive value on failue, and never return 0.
*/
static int sock_xmit(struct nbd_device *nbd, int index, int send,
struct iov_iter *iter, int msg_flags, int *sent)
{ {
struct nbd_config *config = nbd->config;
struct socket *sock = config->socks[index]->sock;
int result; int result;
struct msghdr msg; struct msghdr msg;
unsigned int noreclaim_flag; unsigned int noreclaim_flag;
@ -540,6 +551,19 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
return result; return result;
} }
/*
* Send or receive packet. Return a positive value on success and
* negtive value on failure, and never return 0.
*/
static int sock_xmit(struct nbd_device *nbd, int index, int send,
struct iov_iter *iter, int msg_flags, int *sent)
{
struct nbd_config *config = nbd->config;
struct socket *sock = config->socks[index]->sock;
return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
}
/* /*
* Different settings for sk->sk_sndtimeo can result in different return values * Different settings for sk->sk_sndtimeo can result in different return values
* if there is a signal pending when we enter sendmsg, because reasons? * if there is a signal pending when we enter sendmsg, because reasons?
@ -696,7 +720,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
return 0; return 0;
} }
static int nbd_read_reply(struct nbd_device *nbd, int index, static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
struct nbd_reply *reply) struct nbd_reply *reply)
{ {
struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)}; struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
@ -705,7 +729,7 @@ static int nbd_read_reply(struct nbd_device *nbd, int index,
reply->magic = 0; reply->magic = 0;
iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply)); iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL);
if (result < 0) { if (result < 0) {
if (!nbd_disconnected(nbd->config)) if (!nbd_disconnected(nbd->config))
dev_err(disk_to_dev(nbd->disk), dev_err(disk_to_dev(nbd->disk),
@ -829,14 +853,14 @@ static void recv_work(struct work_struct *work)
struct nbd_device *nbd = args->nbd; struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config; struct nbd_config *config = nbd->config;
struct request_queue *q = nbd->disk->queue; struct request_queue *q = nbd->disk->queue;
struct nbd_sock *nsock; struct nbd_sock *nsock = args->nsock;
struct nbd_cmd *cmd; struct nbd_cmd *cmd;
struct request *rq; struct request *rq;
while (1) { while (1) {
struct nbd_reply reply; struct nbd_reply reply;
if (nbd_read_reply(nbd, args->index, &reply)) if (nbd_read_reply(nbd, nsock->sock, &reply))
break; break;
/* /*
@ -871,7 +895,6 @@ static void recv_work(struct work_struct *work)
percpu_ref_put(&q->q_usage_counter); percpu_ref_put(&q->q_usage_counter);
} }
nsock = config->socks[args->index];
mutex_lock(&nsock->tx_lock); mutex_lock(&nsock->tx_lock);
nbd_mark_nsock_dead(nbd, nsock, 1); nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock); mutex_unlock(&nsock->tx_lock);
@ -977,12 +1000,12 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
struct nbd_sock *nsock; struct nbd_sock *nsock;
int ret; int ret;
if (!refcount_inc_not_zero(&nbd->config_refs)) { config = nbd_get_config_unlocked(nbd);
if (!config) {
dev_err_ratelimited(disk_to_dev(nbd->disk), dev_err_ratelimited(disk_to_dev(nbd->disk),
"Socks array is empty\n"); "Socks array is empty\n");
return -EINVAL; return -EINVAL;
} }
config = nbd->config;
if (index >= config->num_connections) { if (index >= config->num_connections) {
dev_err_ratelimited(disk_to_dev(nbd->disk), dev_err_ratelimited(disk_to_dev(nbd->disk),
@ -1215,6 +1238,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
INIT_WORK(&args->work, recv_work); INIT_WORK(&args->work, recv_work);
args->index = i; args->index = i;
args->nbd = nbd; args->nbd = nbd;
args->nsock = nsock;
nsock->cookie++; nsock->cookie++;
mutex_unlock(&nsock->tx_lock); mutex_unlock(&nsock->tx_lock);
sockfd_put(old); sockfd_put(old);
@ -1397,6 +1421,7 @@ static int nbd_start_device(struct nbd_device *nbd)
refcount_inc(&nbd->config_refs); refcount_inc(&nbd->config_refs);
INIT_WORK(&args->work, recv_work); INIT_WORK(&args->work, recv_work);
args->nbd = nbd; args->nbd = nbd;
args->nsock = config->socks[i];
args->index = i; args->index = i;
queue_work(nbd->recv_workq, &args->work); queue_work(nbd->recv_workq, &args->work);
} }
@ -1530,17 +1555,20 @@ static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
return error; return error;
} }
static struct nbd_config *nbd_alloc_config(void) static int nbd_alloc_and_init_config(struct nbd_device *nbd)
{ {
struct nbd_config *config; struct nbd_config *config;
if (WARN_ON(nbd->config))
return -EINVAL;
if (!try_module_get(THIS_MODULE)) if (!try_module_get(THIS_MODULE))
return ERR_PTR(-ENODEV); return -ENODEV;
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS); config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
if (!config) { if (!config) {
module_put(THIS_MODULE); module_put(THIS_MODULE);
return ERR_PTR(-ENOMEM); return -ENOMEM;
} }
atomic_set(&config->recv_threads, 0); atomic_set(&config->recv_threads, 0);
@ -1548,12 +1576,24 @@ static struct nbd_config *nbd_alloc_config(void)
init_waitqueue_head(&config->conn_wait); init_waitqueue_head(&config->conn_wait);
config->blksize_bits = NBD_DEF_BLKSIZE_BITS; config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
atomic_set(&config->live_connections, 0); atomic_set(&config->live_connections, 0);
return config;
nbd->config = config;
/*
* Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment,
* its pair is the barrier in nbd_get_config_unlocked().
* So nbd_get_config_unlocked() won't see nbd->config as null after
* refcount_inc_not_zero() succeed.
*/
smp_mb__before_atomic();
refcount_set(&nbd->config_refs, 1);
return 0;
} }
static int nbd_open(struct gendisk *disk, blk_mode_t mode) static int nbd_open(struct gendisk *disk, blk_mode_t mode)
{ {
struct nbd_device *nbd; struct nbd_device *nbd;
struct nbd_config *config;
int ret = 0; int ret = 0;
mutex_lock(&nbd_index_mutex); mutex_lock(&nbd_index_mutex);
@ -1566,27 +1606,25 @@ static int nbd_open(struct gendisk *disk, blk_mode_t mode)
ret = -ENXIO; ret = -ENXIO;
goto out; goto out;
} }
if (!refcount_inc_not_zero(&nbd->config_refs)) {
struct nbd_config *config;
config = nbd_get_config_unlocked(nbd);
if (!config) {
mutex_lock(&nbd->config_lock); mutex_lock(&nbd->config_lock);
if (refcount_inc_not_zero(&nbd->config_refs)) { if (refcount_inc_not_zero(&nbd->config_refs)) {
mutex_unlock(&nbd->config_lock); mutex_unlock(&nbd->config_lock);
goto out; goto out;
} }
config = nbd_alloc_config(); ret = nbd_alloc_and_init_config(nbd);
if (IS_ERR(config)) { if (ret) {
ret = PTR_ERR(config);
mutex_unlock(&nbd->config_lock); mutex_unlock(&nbd->config_lock);
goto out; goto out;
} }
nbd->config = config;
refcount_set(&nbd->config_refs, 1);
refcount_inc(&nbd->refs); refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock); mutex_unlock(&nbd->config_lock);
if (max_part) if (max_part)
set_bit(GD_NEED_PART_SCAN, &disk->state); set_bit(GD_NEED_PART_SCAN, &disk->state);
} else if (nbd_disconnected(nbd->config)) { } else if (nbd_disconnected(config)) {
if (max_part) if (max_part)
set_bit(GD_NEED_PART_SCAN, &disk->state); set_bit(GD_NEED_PART_SCAN, &disk->state);
} }
@ -1990,22 +2028,17 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
pr_err("nbd%d already in use\n", index); pr_err("nbd%d already in use\n", index);
return -EBUSY; return -EBUSY;
} }
if (WARN_ON(nbd->config)) {
mutex_unlock(&nbd->config_lock); ret = nbd_alloc_and_init_config(nbd);
nbd_put(nbd); if (ret) {
return -EINVAL;
}
config = nbd_alloc_config();
if (IS_ERR(config)) {
mutex_unlock(&nbd->config_lock); mutex_unlock(&nbd->config_lock);
nbd_put(nbd); nbd_put(nbd);
pr_err("couldn't allocate config\n"); pr_err("couldn't allocate config\n");
return PTR_ERR(config); return ret;
} }
nbd->config = config;
refcount_set(&nbd->config_refs, 1);
set_bit(NBD_RT_BOUND, &config->runtime_flags);
config = nbd->config;
set_bit(NBD_RT_BOUND, &config->runtime_flags);
ret = nbd_genl_size_set(info, nbd); ret = nbd_genl_size_set(info, nbd);
if (ret) if (ret)
goto out; goto out;
@ -2208,7 +2241,8 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
} }
mutex_unlock(&nbd_index_mutex); mutex_unlock(&nbd_index_mutex);
if (!refcount_inc_not_zero(&nbd->config_refs)) { config = nbd_get_config_unlocked(nbd);
if (!config) {
dev_err(nbd_to_dev(nbd), dev_err(nbd_to_dev(nbd),
"not configured, cannot reconfigure\n"); "not configured, cannot reconfigure\n");
nbd_put(nbd); nbd_put(nbd);
@ -2216,7 +2250,6 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
} }
mutex_lock(&nbd->config_lock); mutex_lock(&nbd->config_lock);
config = nbd->config;
if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
!nbd->pid) { !nbd->pid) {
dev_err(nbd_to_dev(nbd), dev_err(nbd_to_dev(nbd),

View file

@ -1464,19 +1464,13 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
return BLK_STS_OK; return BLK_STS_OK;
} }
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
sector_t nr_sectors, enum req_op op) sector_t nr_sectors, enum req_op op)
{ {
struct nullb_device *dev = cmd->nq->dev; struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb; struct nullb *nullb = dev->nullb;
blk_status_t sts; blk_status_t sts;
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
sts = null_handle_throttled(cmd);
if (sts != BLK_STS_OK)
return sts;
}
if (op == REQ_OP_FLUSH) { if (op == REQ_OP_FLUSH) {
cmd->error = errno_to_blk_status(null_handle_flush(nullb)); cmd->error = errno_to_blk_status(null_handle_flush(nullb));
goto out; goto out;
@ -1493,7 +1487,6 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
out: out:
nullb_complete_cmd(cmd); nullb_complete_cmd(cmd);
return BLK_STS_OK;
} }
static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer) static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
@ -1724,8 +1717,6 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->fake_timeout = should_timeout_request(rq) || cmd->fake_timeout = should_timeout_request(rq) ||
blk_should_fake_timeout(rq->q); blk_should_fake_timeout(rq->q);
blk_mq_start_request(rq);
if (should_requeue_request(rq)) { if (should_requeue_request(rq)) {
/* /*
* Alternate between hitting the core BUSY path, and the * Alternate between hitting the core BUSY path, and the
@ -1738,6 +1729,15 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK; return BLK_STS_OK;
} }
if (test_bit(NULLB_DEV_FL_THROTTLED, &nq->dev->flags)) {
blk_status_t sts = null_handle_throttled(cmd);
if (sts != BLK_STS_OK)
return sts;
}
blk_mq_start_request(rq);
if (is_poll) { if (is_poll) {
spin_lock(&nq->poll_lock); spin_lock(&nq->poll_lock);
list_add_tail(&rq->queuelist, &nq->poll_list); list_add_tail(&rq->queuelist, &nq->poll_list);
@ -1747,7 +1747,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
if (cmd->fake_timeout) if (cmd->fake_timeout)
return BLK_STS_OK; return BLK_STS_OK;
return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq)); null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
return BLK_STS_OK;
} }
static void null_queue_rqs(struct request **rqlist) static void null_queue_rqs(struct request **rqlist)

View file

@ -101,7 +101,7 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
* overlap on physical address level. * overlap on physical address level.
*/ */
list_for_each_entry(entry, &accepting_list, list) { list_for_each_entry(entry, &accepting_list, list) {
if (entry->end < range.start) if (entry->end <= range.start)
continue; continue;
if (entry->start >= range.end) if (entry->start >= range.end)
continue; continue;

View file

@ -174,6 +174,17 @@ to_ast_sil164_connector(struct drm_connector *connector)
return container_of(connector, struct ast_sil164_connector, base); return container_of(connector, struct ast_sil164_connector, base);
} }
struct ast_bmc_connector {
struct drm_connector base;
struct drm_connector *physical_connector;
};
static inline struct ast_bmc_connector *
to_ast_bmc_connector(struct drm_connector *connector)
{
return container_of(connector, struct ast_bmc_connector, base);
}
/* /*
* Device * Device
*/ */
@ -218,7 +229,7 @@ struct ast_device {
} astdp; } astdp;
struct { struct {
struct drm_encoder encoder; struct drm_encoder encoder;
struct drm_connector connector; struct ast_bmc_connector bmc_connector;
} bmc; } bmc;
} output; } output;

View file

@ -1767,6 +1767,30 @@ static const struct drm_encoder_funcs ast_bmc_encoder_funcs = {
.destroy = drm_encoder_cleanup, .destroy = drm_encoder_cleanup,
}; };
static int ast_bmc_connector_helper_detect_ctx(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
struct ast_bmc_connector *bmc_connector = to_ast_bmc_connector(connector);
struct drm_connector *physical_connector = bmc_connector->physical_connector;
/*
* Most user-space compositors cannot handle more than one connected
* connector per CRTC. Hence, we only mark the BMC as connected if the
* physical connector is disconnected. If the physical connector's status
* is connected or unknown, the BMC remains disconnected. This has no
* effect on the output of the BMC.
*
* FIXME: Remove this logic once user-space compositors can handle more
* than one connector per CRTC. The BMC should always be connected.
*/
if (physical_connector && physical_connector->status == connector_status_disconnected)
return connector_status_connected;
return connector_status_disconnected;
}
static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector) static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
{ {
return drm_add_modes_noedid(connector, 4096, 4096); return drm_add_modes_noedid(connector, 4096, 4096);
@ -1774,6 +1798,7 @@ static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs ast_bmc_connector_helper_funcs = { static const struct drm_connector_helper_funcs ast_bmc_connector_helper_funcs = {
.get_modes = ast_bmc_connector_helper_get_modes, .get_modes = ast_bmc_connector_helper_get_modes,
.detect_ctx = ast_bmc_connector_helper_detect_ctx,
}; };
static const struct drm_connector_funcs ast_bmc_connector_funcs = { static const struct drm_connector_funcs ast_bmc_connector_funcs = {
@ -1784,12 +1809,33 @@ static const struct drm_connector_funcs ast_bmc_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
}; };
static int ast_bmc_output_init(struct ast_device *ast) static int ast_bmc_connector_init(struct drm_device *dev,
struct ast_bmc_connector *bmc_connector,
struct drm_connector *physical_connector)
{
struct drm_connector *connector = &bmc_connector->base;
int ret;
ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
if (ret)
return ret;
drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
bmc_connector->physical_connector = physical_connector;
return 0;
}
static int ast_bmc_output_init(struct ast_device *ast,
struct drm_connector *physical_connector)
{ {
struct drm_device *dev = &ast->base; struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc; struct drm_crtc *crtc = &ast->crtc;
struct drm_encoder *encoder = &ast->output.bmc.encoder; struct drm_encoder *encoder = &ast->output.bmc.encoder;
struct drm_connector *connector = &ast->output.bmc.connector; struct ast_bmc_connector *bmc_connector = &ast->output.bmc.bmc_connector;
struct drm_connector *connector = &bmc_connector->base;
int ret; int ret;
ret = drm_encoder_init(dev, encoder, ret = drm_encoder_init(dev, encoder,
@ -1799,13 +1845,10 @@ static int ast_bmc_output_init(struct ast_device *ast)
return ret; return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc); encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs, ret = ast_bmc_connector_init(dev, bmc_connector, physical_connector);
DRM_MODE_CONNECTOR_VIRTUAL);
if (ret) if (ret)
return ret; return ret;
drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
ret = drm_connector_attach_encoder(connector, encoder); ret = drm_connector_attach_encoder(connector, encoder);
if (ret) if (ret)
return ret; return ret;
@ -1864,6 +1907,7 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
int ast_mode_config_init(struct ast_device *ast) int ast_mode_config_init(struct ast_device *ast)
{ {
struct drm_device *dev = &ast->base; struct drm_device *dev = &ast->base;
struct drm_connector *physical_connector = NULL;
int ret; int ret;
ret = drmm_mode_config_init(dev); ret = drmm_mode_config_init(dev);
@ -1904,23 +1948,27 @@ int ast_mode_config_init(struct ast_device *ast)
ret = ast_vga_output_init(ast); ret = ast_vga_output_init(ast);
if (ret) if (ret)
return ret; return ret;
physical_connector = &ast->output.vga.vga_connector.base;
} }
if (ast->tx_chip_types & AST_TX_SIL164_BIT) { if (ast->tx_chip_types & AST_TX_SIL164_BIT) {
ret = ast_sil164_output_init(ast); ret = ast_sil164_output_init(ast);
if (ret) if (ret)
return ret; return ret;
physical_connector = &ast->output.sil164.sil164_connector.base;
} }
if (ast->tx_chip_types & AST_TX_DP501_BIT) { if (ast->tx_chip_types & AST_TX_DP501_BIT) {
ret = ast_dp501_output_init(ast); ret = ast_dp501_output_init(ast);
if (ret) if (ret)
return ret; return ret;
physical_connector = &ast->output.dp501.connector;
} }
if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
ret = ast_astdp_output_init(ast); ret = ast_astdp_output_init(ast);
if (ret) if (ret)
return ret; return ret;
physical_connector = &ast->output.astdp.connector;
} }
ret = ast_bmc_output_init(ast); ret = ast_bmc_output_init(ast, physical_connector);
if (ret) if (ret)
return ret; return ret;

View file

@ -1161,6 +1161,14 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_connector->port = port; intel_connector->port = port;
drm_dp_mst_get_port_malloc(port); drm_dp_mst_get_port_malloc(port);
/*
* TODO: set the AUX for the actual MST port decompressing the stream.
* At the moment the driver only supports enabling this globally in the
* first downstream MST branch, via intel_dp's (root port) AUX.
*/
intel_connector->dp.dsc_decompression_aux = &intel_dp->aux;
intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
connector = &intel_connector->base; connector = &intel_connector->base;
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort); DRM_MODE_CONNECTOR_DisplayPort);
@ -1172,14 +1180,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
/*
* TODO: set the AUX for the actual MST port decompressing the stream.
* At the moment the driver only supports enabling this globally in the
* first downstream MST branch, via intel_dp's (root port) AUX.
*/
intel_connector->dp.dsc_decompression_aux = &intel_dp->aux;
intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
struct drm_encoder *enc = struct drm_encoder *enc =
&intel_dp->mst_encoders[pipe]->base.base; &intel_dp->mst_encoders[pipe]->base.base;

View file

@ -982,8 +982,6 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
err: err:
i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret); i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
intel_gt_release_all(i915);
return ret; return ret;
} }
@ -1002,15 +1000,6 @@ int intel_gt_tiles_init(struct drm_i915_private *i915)
return 0; return 0;
} }
void intel_gt_release_all(struct drm_i915_private *i915)
{
struct intel_gt *gt;
unsigned int id;
for_each_gt(gt, i915, id)
i915->gt[id] = NULL;
}
void intel_gt_info_print(const struct intel_gt_info *info, void intel_gt_info_print(const struct intel_gt_info *info,
struct drm_printer *p) struct drm_printer *p)
{ {

View file

@ -782,7 +782,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = i915_driver_mmio_probe(i915); ret = i915_driver_mmio_probe(i915);
if (ret < 0) if (ret < 0)
goto out_tiles_cleanup; goto out_runtime_pm_put;
ret = i915_driver_hw_probe(i915); ret = i915_driver_hw_probe(i915);
if (ret < 0) if (ret < 0)
@ -842,8 +842,6 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i915_ggtt_driver_late_release(i915); i915_ggtt_driver_late_release(i915);
out_cleanup_mmio: out_cleanup_mmio:
i915_driver_mmio_release(i915); i915_driver_mmio_release(i915);
out_tiles_cleanup:
intel_gt_release_all(i915);
out_runtime_pm_put: out_runtime_pm_put:
enable_rpm_wakeref_asserts(&i915->runtime_pm); enable_rpm_wakeref_asserts(&i915->runtime_pm);
i915_driver_late_release(i915); i915_driver_late_release(i915);

View file

@ -406,6 +406,7 @@ static const struct dpu_perf_cfg sc8280xp_perf_data = {
.min_llcc_ib = 0, .min_llcc_ib = 0,
.min_dram_ib = 800000, .min_dram_ib = 800000,
.danger_lut_tbl = {0xf, 0xffff, 0x0}, .danger_lut_tbl = {0xf, 0xffff, 0x0},
.safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
.qos_lut_tbl = { .qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc8180x_qos_linear), {.nentry = ARRAY_SIZE(sc8180x_qos_linear),
.entries = sc8180x_qos_linear .entries = sc8180x_qos_linear

View file

@ -844,8 +844,7 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
return 0; return 0;
fail: fail:
if (mdp5_kms) mdp5_destroy(mdp5_kms);
mdp5_destroy(mdp5_kms);
return ret; return ret;
} }

View file

@ -365,9 +365,11 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
/* reset video pattern flag on disconnect */ /* reset video pattern flag on disconnect */
if (!hpd) { if (!hpd) {
dp->panel->video_test = false; dp->panel->video_test = false;
drm_dp_set_subconnector_property(dp->dp_display.connector, if (!dp->dp_display.is_edp)
connector_status_disconnected, drm_dp_set_subconnector_property(dp->dp_display.connector,
dp->panel->dpcd, dp->panel->downstream_ports); connector_status_disconnected,
dp->panel->dpcd,
dp->panel->downstream_ports);
} }
dp->dp_display.is_connected = hpd; dp->dp_display.is_connected = hpd;
@ -396,8 +398,11 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
dp_link_process_request(dp->link); dp_link_process_request(dp->link);
drm_dp_set_subconnector_property(dp->dp_display.connector, connector_status_connected, if (!dp->dp_display.is_edp)
dp->panel->dpcd, dp->panel->downstream_ports); drm_dp_set_subconnector_property(dp->dp_display.connector,
connector_status_connected,
dp->panel->dpcd,
dp->panel->downstream_ports);
edid = dp->panel->edid; edid = dp->panel->edid;

View file

@ -345,6 +345,9 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct dr
if (IS_ERR(connector)) if (IS_ERR(connector))
return connector; return connector;
if (!dp_display->is_edp)
drm_connector_attach_dp_subconnector_property(connector);
drm_connector_attach_encoder(connector, encoder); drm_connector_attach_encoder(connector, encoder);
return connector; return connector;

View file

@ -918,7 +918,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) { if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
if (phy->cphy_mode) { if (phy->cphy_mode) {
vreg_ctrl_0 = 0x45; vreg_ctrl_0 = 0x45;
vreg_ctrl_1 = 0x45; vreg_ctrl_1 = 0x41;
glbl_rescode_top_ctrl = 0x00; glbl_rescode_top_ctrl = 0x00;
glbl_rescode_bot_ctrl = 0x00; glbl_rescode_bot_ctrl = 0x00;
} else { } else {

View file

@ -288,8 +288,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (ret) if (ret)
goto err_msm_uninit; goto err_msm_uninit;
drm_kms_helper_poll_init(ddev);
if (priv->kms_init) { if (priv->kms_init) {
drm_kms_helper_poll_init(ddev); drm_kms_helper_poll_init(ddev);
msm_fbdev_setup(ddev); msm_fbdev_setup(ddev);

View file

@ -539,7 +539,7 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
struct nvkm_runl *runl; struct nvkm_runl *runl;
struct nvkm_engn *engn; struct nvkm_engn *engn;
u32 cgids = 2048; u32 cgids = 2048;
u32 chids = 2048 / CHID_PER_USERD; u32 chids = 2048;
int ret; int ret;
NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl; NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl;

View file

@ -1709,6 +1709,7 @@ static const struct panel_desc auo_b101uan08_3_desc = {
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM, MIPI_DSI_MODE_LPM,
.init_cmds = auo_b101uan08_3_init_cmd, .init_cmds = auo_b101uan08_3_init_cmd,
.lp11_before_reset = true,
}; };
static const struct drm_display_mode boe_tv105wum_nw0_default_mode = { static const struct drm_display_mode boe_tv105wum_nw0_default_mode = {
@ -1766,11 +1767,11 @@ static const struct panel_desc starry_qfh032011_53g_desc = {
}; };
static const struct drm_display_mode starry_himax83102_j02_default_mode = { static const struct drm_display_mode starry_himax83102_j02_default_mode = {
.clock = 161600, .clock = 162850,
.hdisplay = 1200, .hdisplay = 1200,
.hsync_start = 1200 + 40, .hsync_start = 1200 + 50,
.hsync_end = 1200 + 40 + 20, .hsync_end = 1200 + 50 + 20,
.htotal = 1200 + 40 + 20 + 40, .htotal = 1200 + 50 + 20 + 50,
.vdisplay = 1920, .vdisplay = 1920,
.vsync_start = 1920 + 116, .vsync_start = 1920 + 116,
.vsync_end = 1920 + 116 + 8, .vsync_end = 1920 + 116 + 8,

View file

@ -2379,13 +2379,13 @@ static const struct panel_desc innolux_g070y2_t02 = {
static const struct display_timing innolux_g101ice_l01_timing = { static const struct display_timing innolux_g101ice_l01_timing = {
.pixelclock = { 60400000, 71100000, 74700000 }, .pixelclock = { 60400000, 71100000, 74700000 },
.hactive = { 1280, 1280, 1280 }, .hactive = { 1280, 1280, 1280 },
.hfront_porch = { 41, 80, 100 }, .hfront_porch = { 30, 60, 70 },
.hback_porch = { 40, 79, 99 }, .hback_porch = { 30, 60, 70 },
.hsync_len = { 1, 1, 1 }, .hsync_len = { 22, 40, 60 },
.vactive = { 800, 800, 800 }, .vactive = { 800, 800, 800 },
.vfront_porch = { 5, 11, 14 }, .vfront_porch = { 3, 8, 14 },
.vback_porch = { 4, 11, 14 }, .vback_porch = { 3, 8, 14 },
.vsync_len = { 1, 1, 1 }, .vsync_len = { 4, 7, 12 },
.flags = DISPLAY_FLAGS_DE_HIGH, .flags = DISPLAY_FLAGS_DE_HIGH,
}; };
@ -2402,6 +2402,7 @@ static const struct panel_desc innolux_g101ice_l01 = {
.disable = 200, .disable = 200,
}, },
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS, .connector_type = DRM_MODE_CONNECTOR_LVDS,
}; };

View file

@ -247,14 +247,22 @@ static inline void vop_cfg_done(struct vop *vop)
VOP_REG_SET(vop, common, cfg_done, 1); VOP_REG_SET(vop, common, cfg_done, 1);
} }
static bool has_rb_swapped(uint32_t format) static bool has_rb_swapped(uint32_t version, uint32_t format)
{ {
switch (format) { switch (format) {
case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888: case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_BGR888:
case DRM_FORMAT_BGR565: case DRM_FORMAT_BGR565:
return true; return true;
/*
* full framework (IP version 3.x) only need rb swapped for RGB888 and
* little framework (IP version 2.x) only need rb swapped for BGR888,
* check for 3.x to also only rb swap BGR888 for unknown vop version
*/
case DRM_FORMAT_RGB888:
return VOP_MAJOR(version) == 3;
case DRM_FORMAT_BGR888:
return VOP_MAJOR(version) != 3;
default: default:
return false; return false;
} }
@ -1030,7 +1038,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
VOP_WIN_SET(vop, win, dsp_info, dsp_info); VOP_WIN_SET(vop, win, dsp_info, dsp_info);
VOP_WIN_SET(vop, win, dsp_st, dsp_st); VOP_WIN_SET(vop, win, dsp_st, dsp_st);
rb_swap = has_rb_swapped(fb->format->format); rb_swap = has_rb_swapped(vop->data->version, fb->format->format);
VOP_WIN_SET(vop, win, rb_swap, rb_swap); VOP_WIN_SET(vop, win, rb_swap, rb_swap);
/* /*

View file

@ -345,6 +345,8 @@ static const struct apple_non_apple_keyboard non_apple_keyboards[] = {
{ "AONE" }, { "AONE" },
{ "GANSS" }, { "GANSS" },
{ "Hailuck" }, { "Hailuck" },
{ "Jamesdonkey" },
{ "A3R" },
}; };
static bool apple_is_non_apple_keyboard(struct hid_device *hdev) static bool apple_is_non_apple_keyboard(struct hid_device *hdev)

View file

@ -381,7 +381,7 @@ static int asus_raw_event(struct hid_device *hdev,
return 0; return 0;
} }
static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size) static int asus_kbd_set_report(struct hid_device *hdev, const u8 *buf, size_t buf_size)
{ {
unsigned char *dmabuf; unsigned char *dmabuf;
int ret; int ret;
@ -404,7 +404,7 @@ static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size
static int asus_kbd_init(struct hid_device *hdev) static int asus_kbd_init(struct hid_device *hdev)
{ {
u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54, const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54,
0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 }; 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 };
int ret; int ret;
@ -418,7 +418,7 @@ static int asus_kbd_init(struct hid_device *hdev)
static int asus_kbd_get_functions(struct hid_device *hdev, static int asus_kbd_get_functions(struct hid_device *hdev,
unsigned char *kbd_func) unsigned char *kbd_func)
{ {
u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 }; const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 };
u8 *readbuf; u8 *readbuf;
int ret; int ret;
@ -449,7 +449,7 @@ static int asus_kbd_get_functions(struct hid_device *hdev,
static int rog_nkey_led_init(struct hid_device *hdev) static int rog_nkey_led_init(struct hid_device *hdev)
{ {
u8 buf_init_start[] = { FEATURE_KBD_LED_REPORT_ID1, 0xB9 }; const u8 buf_init_start[] = { FEATURE_KBD_LED_REPORT_ID1, 0xB9 };
u8 buf_init2[] = { FEATURE_KBD_LED_REPORT_ID1, 0x41, 0x53, 0x55, 0x53, 0x20, u8 buf_init2[] = { FEATURE_KBD_LED_REPORT_ID1, 0x41, 0x53, 0x55, 0x53, 0x20,
0x54, 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 }; 0x54, 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 };
u8 buf_init3[] = { FEATURE_KBD_LED_REPORT_ID1, u8 buf_init3[] = { FEATURE_KBD_LED_REPORT_ID1,
@ -1000,6 +1000,24 @@ static int asus_start_multitouch(struct hid_device *hdev)
return 0; return 0;
} }
static int __maybe_unused asus_resume(struct hid_device *hdev) {
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
int ret = 0;
if (drvdata->kbd_backlight) {
const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4,
drvdata->kbd_backlight->cdev.brightness };
ret = asus_kbd_set_report(hdev, buf, sizeof(buf));
if (ret < 0) {
hid_err(hdev, "Asus failed to set keyboard backlight: %d\n", ret);
goto asus_resume_err;
}
}
asus_resume_err:
return ret;
}
static int __maybe_unused asus_reset_resume(struct hid_device *hdev) static int __maybe_unused asus_reset_resume(struct hid_device *hdev)
{ {
struct asus_drvdata *drvdata = hid_get_drvdata(hdev); struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
@ -1294,6 +1312,7 @@ static struct hid_driver asus_driver = {
.input_configured = asus_input_configured, .input_configured = asus_input_configured,
#ifdef CONFIG_PM #ifdef CONFIG_PM
.reset_resume = asus_reset_resume, .reset_resume = asus_reset_resume,
.resume = asus_resume,
#endif #endif
.event = asus_event, .event = asus_event,
.raw_event = asus_raw_event .raw_event = asus_raw_event

View file

@ -702,15 +702,22 @@ static void hid_close_report(struct hid_device *device)
* Free a device structure, all reports, and all fields. * Free a device structure, all reports, and all fields.
*/ */
static void hid_device_release(struct device *dev) void hiddev_free(struct kref *ref)
{ {
struct hid_device *hid = to_hid_device(dev); struct hid_device *hid = container_of(ref, struct hid_device, ref);
hid_close_report(hid); hid_close_report(hid);
kfree(hid->dev_rdesc); kfree(hid->dev_rdesc);
kfree(hid); kfree(hid);
} }
static void hid_device_release(struct device *dev)
{
struct hid_device *hid = to_hid_device(dev);
kref_put(&hid->ref, hiddev_free);
}
/* /*
* Fetch a report description item from the data stream. We support long * Fetch a report description item from the data stream. We support long
* items, though they are not used yet. * items, though they are not used yet.
@ -2846,6 +2853,7 @@ struct hid_device *hid_allocate_device(void)
spin_lock_init(&hdev->debug_list_lock); spin_lock_init(&hdev->debug_list_lock);
sema_init(&hdev->driver_input_lock, 1); sema_init(&hdev->driver_input_lock, 1);
mutex_init(&hdev->ll_open_lock); mutex_init(&hdev->ll_open_lock);
kref_init(&hdev->ref);
hid_bpf_device_init(hdev); hid_bpf_device_init(hdev);

View file

@ -1135,6 +1135,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
goto out; goto out;
} }
list->hdev = (struct hid_device *) inode->i_private; list->hdev = (struct hid_device *) inode->i_private;
kref_get(&list->hdev->ref);
file->private_data = list; file->private_data = list;
mutex_init(&list->read_mutex); mutex_init(&list->read_mutex);
@ -1227,6 +1228,8 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
list_del(&list->node); list_del(&list->node);
spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
kfifo_free(&list->hid_debug_fifo); kfifo_free(&list->hid_debug_fifo);
kref_put(&list->hdev->ref, hiddev_free);
kfree(list); kfree(list);
return 0; return 0;

View file

@ -21,6 +21,10 @@ MODULE_DESCRIPTION("HID driver for Glorious PC Gaming Race mice");
* Glorious Model O and O- specify the const flag in the consumer input * Glorious Model O and O- specify the const flag in the consumer input
* report descriptor, which leads to inputs being ignored. Fix this * report descriptor, which leads to inputs being ignored. Fix this
* by patching the descriptor. * by patching the descriptor.
*
* Glorious Model I incorrectly specifes the Usage Minimum for its
* keyboard HID report, causing keycodes to be misinterpreted.
* Fix this by setting Usage Minimum to 0 in that report.
*/ */
static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc, static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize) unsigned int *rsize)
@ -32,6 +36,10 @@ static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[85] = rdesc[113] = rdesc[141] = \ rdesc[85] = rdesc[113] = rdesc[141] = \
HID_MAIN_ITEM_VARIABLE | HID_MAIN_ITEM_RELATIVE; HID_MAIN_ITEM_VARIABLE | HID_MAIN_ITEM_RELATIVE;
} }
if (*rsize == 156 && rdesc[41] == 1) {
hid_info(hdev, "patching Glorious Model I keyboard report descriptor\n");
rdesc[41] = 0;
}
return rdesc; return rdesc;
} }
@ -44,6 +52,8 @@ static void glorious_update_name(struct hid_device *hdev)
model = "Model O"; break; model = "Model O"; break;
case USB_DEVICE_ID_GLORIOUS_MODEL_D: case USB_DEVICE_ID_GLORIOUS_MODEL_D:
model = "Model D"; break; model = "Model D"; break;
case USB_DEVICE_ID_GLORIOUS_MODEL_I:
model = "Model I"; break;
} }
snprintf(hdev->name, sizeof(hdev->name), "%s %s", "Glorious", model); snprintf(hdev->name, sizeof(hdev->name), "%s %s", "Glorious", model);
@ -66,10 +76,12 @@ static int glorious_probe(struct hid_device *hdev,
} }
static const struct hid_device_id glorious_devices[] = { static const struct hid_device_id glorious_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS, { HID_USB_DEVICE(USB_VENDOR_ID_SINOWEALTH,
USB_DEVICE_ID_GLORIOUS_MODEL_O) }, USB_DEVICE_ID_GLORIOUS_MODEL_O) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS, { HID_USB_DEVICE(USB_VENDOR_ID_SINOWEALTH,
USB_DEVICE_ID_GLORIOUS_MODEL_D) }, USB_DEVICE_ID_GLORIOUS_MODEL_D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LAVIEW,
USB_DEVICE_ID_GLORIOUS_MODEL_I) },
{ } { }
}; };
MODULE_DEVICE_TABLE(hid, glorious_devices); MODULE_DEVICE_TABLE(hid, glorious_devices);

View file

@ -511,10 +511,6 @@
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100 #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
#define USB_VENDOR_ID_GLORIOUS 0x258a
#define USB_DEVICE_ID_GLORIOUS_MODEL_D 0x0033
#define USB_DEVICE_ID_GLORIOUS_MODEL_O 0x0036
#define I2C_VENDOR_ID_GOODIX 0x27c6 #define I2C_VENDOR_ID_GOODIX 0x27c6
#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0 #define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
@ -745,6 +741,9 @@
#define USB_VENDOR_ID_LABTEC 0x1020 #define USB_VENDOR_ID_LABTEC 0x1020
#define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006 #define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006
#define USB_VENDOR_ID_LAVIEW 0x22D4
#define USB_DEVICE_ID_GLORIOUS_MODEL_I 0x1503
#define USB_VENDOR_ID_LCPOWER 0x1241 #define USB_VENDOR_ID_LCPOWER 0x1241
#define USB_DEVICE_ID_LCPOWER_LC1000 0xf767 #define USB_DEVICE_ID_LCPOWER_LC1000 0xf767
@ -869,7 +868,6 @@
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534 #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539 #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2 0xc547
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623 #define USB_DEVICE_ID_SPACETRAVELLER 0xc623
#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626 #define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
@ -1160,6 +1158,10 @@
#define USB_VENDOR_ID_SIGMATEL 0x066F #define USB_VENDOR_ID_SIGMATEL 0x066F
#define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780 #define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780
#define USB_VENDOR_ID_SINOWEALTH 0x258a
#define USB_DEVICE_ID_GLORIOUS_MODEL_D 0x0033
#define USB_DEVICE_ID_GLORIOUS_MODEL_O 0x0036
#define USB_VENDOR_ID_SIS_TOUCH 0x0457 #define USB_VENDOR_ID_SIS_TOUCH 0x0457
#define USB_DEVICE_ID_SIS9200_TOUCH 0x9200 #define USB_DEVICE_ID_SIS9200_TOUCH 0x9200
#define USB_DEVICE_ID_SIS817_TOUCH 0x0817 #define USB_DEVICE_ID_SIS817_TOUCH 0x0817

View file

@ -1695,12 +1695,11 @@ static int logi_dj_raw_event(struct hid_device *hdev,
} }
/* /*
* Mouse-only receivers send unnumbered mouse data. The 27 MHz * Mouse-only receivers send unnumbered mouse data. The 27 MHz
* receiver uses 6 byte packets, the nano receiver 8 bytes, * receiver uses 6 byte packets, the nano receiver 8 bytes.
* the lightspeed receiver (Pro X Superlight) 13 bytes.
*/ */
if (djrcv_dev->unnumbered_application == HID_GD_MOUSE && if (djrcv_dev->unnumbered_application == HID_GD_MOUSE &&
size <= 13){ size <= 8) {
u8 mouse_report[14]; u8 mouse_report[9];
/* Prepend report id */ /* Prepend report id */
mouse_report[0] = REPORT_TYPE_MOUSE; mouse_report[0] = REPORT_TYPE_MOUSE;
@ -1984,10 +1983,6 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1), USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1),
.driver_data = recvr_type_gaming_hidpp}, .driver_data = recvr_type_gaming_hidpp},
{ /* Logitech lightspeed receiver (0xc547) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2),
.driver_data = recvr_type_gaming_hidpp},
{ /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */ { /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER), HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),

View file

@ -1142,6 +1142,8 @@ static int mcp2221_probe(struct hid_device *hdev,
if (ret) if (ret)
return ret; return ret;
hid_device_io_start(hdev);
/* Set I2C bus clock diviser */ /* Set I2C bus clock diviser */
if (i2c_clk_freq > 400) if (i2c_clk_freq > 400)
i2c_clk_freq = 400; i2c_clk_freq = 400;
@ -1157,12 +1159,12 @@ static int mcp2221_probe(struct hid_device *hdev,
snprintf(mcp->adapter.name, sizeof(mcp->adapter.name), snprintf(mcp->adapter.name, sizeof(mcp->adapter.name),
"MCP2221 usb-i2c bridge"); "MCP2221 usb-i2c bridge");
i2c_set_adapdata(&mcp->adapter, mcp);
ret = devm_i2c_add_adapter(&hdev->dev, &mcp->adapter); ret = devm_i2c_add_adapter(&hdev->dev, &mcp->adapter);
if (ret) { if (ret) {
hid_err(hdev, "can't add usb-i2c adapter: %d\n", ret); hid_err(hdev, "can't add usb-i2c adapter: %d\n", ret);
return ret; return ret;
} }
i2c_set_adapdata(&mcp->adapter, mcp);
#if IS_REACHABLE(CONFIG_GPIOLIB) #if IS_REACHABLE(CONFIG_GPIOLIB)
/* Setup GPIO chip */ /* Setup GPIO chip */

View file

@ -2046,6 +2046,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT, MT_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT,
USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) }, USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
/* HONOR GLO-GXXX panel */
{ .driver_data = MT_CLS_VTL,
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
0x347d, 0x7853) },
/* Ilitek dual touch panel */ /* Ilitek dual touch panel */
{ .driver_data = MT_CLS_NSMU, { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_ILITEK, MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,

View file

@ -33,6 +33,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM), HID_QUIRK_NOGET },

View file

@ -75,19 +75,6 @@ static ssize_t max_brightness_show(struct device *dev,
} }
static DEVICE_ATTR_RO(max_brightness); static DEVICE_ATTR_RO(max_brightness);
static ssize_t color_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *color_text = "invalid";
struct led_classdev *led_cdev = dev_get_drvdata(dev);
if (led_cdev->color < LED_COLOR_ID_MAX)
color_text = led_colors[led_cdev->color];
return sysfs_emit(buf, "%s\n", color_text);
}
static DEVICE_ATTR_RO(color);
#ifdef CONFIG_LEDS_TRIGGERS #ifdef CONFIG_LEDS_TRIGGERS
static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0); static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
static struct bin_attribute *led_trigger_bin_attrs[] = { static struct bin_attribute *led_trigger_bin_attrs[] = {
@ -102,7 +89,6 @@ static const struct attribute_group led_trigger_group = {
static struct attribute *led_class_attrs[] = { static struct attribute *led_class_attrs[] = {
&dev_attr_brightness.attr, &dev_attr_brightness.attr,
&dev_attr_max_brightness.attr, &dev_attr_max_brightness.attr,
&dev_attr_color.attr,
NULL, NULL,
}; };

View file

@ -265,6 +265,7 @@ struct bcache_device {
#define BCACHE_DEV_WB_RUNNING 3 #define BCACHE_DEV_WB_RUNNING 3
#define BCACHE_DEV_RATE_DW_RUNNING 4 #define BCACHE_DEV_RATE_DW_RUNNING 4
int nr_stripes; int nr_stripes;
#define BCH_MIN_STRIPE_SZ ((4 << 20) >> SECTOR_SHIFT)
unsigned int stripe_size; unsigned int stripe_size;
atomic_t *stripe_sectors_dirty; atomic_t *stripe_sectors_dirty;
unsigned long *full_dirty_stripes; unsigned long *full_dirty_stripes;

View file

@ -1000,6 +1000,9 @@ static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
* *
* The btree node will have either a read or a write lock held, depending on * The btree node will have either a read or a write lock held, depending on
* level and op->lock. * level and op->lock.
*
* Note: Only error code or btree pointer will be returned, it is unncessary
* for callers to check NULL pointer.
*/ */
struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
struct bkey *k, int level, bool write, struct bkey *k, int level, bool write,
@ -1111,6 +1114,10 @@ static void btree_node_free(struct btree *b)
mutex_unlock(&b->c->bucket_lock); mutex_unlock(&b->c->bucket_lock);
} }
/*
* Only error code or btree pointer will be returned, it is unncessary for
* callers to check NULL pointer.
*/
struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
int level, bool wait, int level, bool wait,
struct btree *parent) struct btree *parent)
@ -1368,7 +1375,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
memset(new_nodes, 0, sizeof(new_nodes)); memset(new_nodes, 0, sizeof(new_nodes));
closure_init_stack(&cl); closure_init_stack(&cl);
while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b)) while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
keys += r[nodes++].keys; keys += r[nodes++].keys;
blocks = btree_default_blocks(b->c) * 2 / 3; blocks = btree_default_blocks(b->c) * 2 / 3;
@ -1532,6 +1539,8 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
return 0; return 0;
n = btree_node_alloc_replacement(replace, NULL); n = btree_node_alloc_replacement(replace, NULL);
if (IS_ERR(n))
return 0;
/* recheck reserve after allocating replacement node */ /* recheck reserve after allocating replacement node */
if (btree_check_reserve(b, NULL)) { if (btree_check_reserve(b, NULL)) {

View file

@ -905,6 +905,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
if (!d->stripe_size) if (!d->stripe_size)
d->stripe_size = 1 << 31; d->stripe_size = 1 << 31;
else if (d->stripe_size < BCH_MIN_STRIPE_SZ)
d->stripe_size = roundup(BCH_MIN_STRIPE_SZ, d->stripe_size);
n = DIV_ROUND_UP_ULL(sectors, d->stripe_size); n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
if (!n || n > max_stripes) { if (!n || n > max_stripes) {
@ -2016,7 +2018,7 @@ static int run_cache_set(struct cache_set *c)
c->root = bch_btree_node_get(c, NULL, k, c->root = bch_btree_node_get(c, NULL, k,
j->btree_level, j->btree_level,
true, NULL); true, NULL);
if (IS_ERR_OR_NULL(c->root)) if (IS_ERR(c->root))
goto err; goto err;
list_del_init(&c->root->list); list_del_init(&c->root->list);

View file

@ -1104,7 +1104,7 @@ SHOW(__bch_cache)
sum += INITIAL_PRIO - cached[i]; sum += INITIAL_PRIO - cached[i];
if (n) if (n)
do_div(sum, n); sum = div64_u64(sum, n);
for (i = 0; i < ARRAY_SIZE(q); i++) for (i = 0; i < ARRAY_SIZE(q); i++)
q[i] = INITIAL_PRIO - cached[n * (i + 1) / q[i] = INITIAL_PRIO - cached[n * (i + 1) /

View file

@ -913,7 +913,7 @@ static int bch_dirty_init_thread(void *arg)
int cur_idx, prev_idx, skip_nr; int cur_idx, prev_idx, skip_nr;
k = p = NULL; k = p = NULL;
cur_idx = prev_idx = 0; prev_idx = 0;
bch_btree_iter_init(&c->root->keys, &iter, NULL); bch_btree_iter_init(&c->root->keys, &iter, NULL);
k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
@ -977,24 +977,35 @@ static int bch_btre_dirty_init_thread_nr(void)
void bch_sectors_dirty_init(struct bcache_device *d) void bch_sectors_dirty_init(struct bcache_device *d)
{ {
int i; int i;
struct btree *b = NULL;
struct bkey *k = NULL; struct bkey *k = NULL;
struct btree_iter iter; struct btree_iter iter;
struct sectors_dirty_init op; struct sectors_dirty_init op;
struct cache_set *c = d->c; struct cache_set *c = d->c;
struct bch_dirty_init_state state; struct bch_dirty_init_state state;
retry_lock:
b = c->root;
rw_lock(0, b, b->level);
if (b != c->root) {
rw_unlock(0, b);
goto retry_lock;
}
/* Just count root keys if no leaf node */ /* Just count root keys if no leaf node */
rw_lock(0, c->root, c->root->level);
if (c->root->level == 0) { if (c->root->level == 0) {
bch_btree_op_init(&op.op, -1); bch_btree_op_init(&op.op, -1);
op.inode = d->id; op.inode = d->id;
op.count = 0; op.count = 0;
for_each_key_filter(&c->root->keys, for_each_key_filter(&c->root->keys,
k, &iter, bch_ptr_invalid) k, &iter, bch_ptr_invalid) {
if (KEY_INODE(k) != op.inode)
continue;
sectors_dirty_init_fn(&op.op, c->root, k); sectors_dirty_init_fn(&op.op, c->root, k);
}
rw_unlock(0, c->root); rw_unlock(0, b);
return; return;
} }
@ -1014,23 +1025,24 @@ void bch_sectors_dirty_init(struct bcache_device *d)
if (atomic_read(&state.enough)) if (atomic_read(&state.enough))
break; break;
atomic_inc(&state.started);
state.infos[i].state = &state; state.infos[i].state = &state;
state.infos[i].thread = state.infos[i].thread =
kthread_run(bch_dirty_init_thread, &state.infos[i], kthread_run(bch_dirty_init_thread, &state.infos[i],
"bch_dirtcnt[%d]", i); "bch_dirtcnt[%d]", i);
if (IS_ERR(state.infos[i].thread)) { if (IS_ERR(state.infos[i].thread)) {
pr_err("fails to run thread bch_dirty_init[%d]\n", i); pr_err("fails to run thread bch_dirty_init[%d]\n", i);
atomic_dec(&state.started);
for (--i; i >= 0; i--) for (--i; i >= 0; i--)
kthread_stop(state.infos[i].thread); kthread_stop(state.infos[i].thread);
goto out; goto out;
} }
atomic_inc(&state.started);
} }
out: out:
/* Must wait for all threads to stop. */ /* Must wait for all threads to stop. */
wait_event(state.wait, atomic_read(&state.started) == 0); wait_event(state.wait, atomic_read(&state.started) == 0);
rw_unlock(0, c->root); rw_unlock(0, b);
} }
void bch_cached_dev_writeback_init(struct cached_dev *dc) void bch_cached_dev_writeback_init(struct cached_dev *dc)

View file

@ -8666,7 +8666,8 @@ static void md_end_clone_io(struct bio *bio)
struct bio *orig_bio = md_io_clone->orig_bio; struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev; struct mddev *mddev = md_io_clone->mddev;
orig_bio->bi_status = bio->bi_status; if (bio->bi_status && !orig_bio->bi_status)
orig_bio->bi_status = bio->bi_status;
if (md_io_clone->start_time) if (md_io_clone->start_time)
bio_end_io_acct(orig_bio, md_io_clone->start_time); bio_end_io_acct(orig_bio, md_io_clone->start_time);

View file

@ -2,6 +2,7 @@
config VIDEO_MGB4 config VIDEO_MGB4
tristate "Digiteq Automotive MGB4 support" tristate "Digiteq Automotive MGB4 support"
depends on VIDEO_DEV && PCI && I2C && DMADEVICES && SPI && MTD && IIO depends on VIDEO_DEV && PCI && I2C && DMADEVICES && SPI && MTD && IIO
depends on COMMON_CLK
select VIDEOBUF2_DMA_SG select VIDEOBUF2_DMA_SG
select IIO_BUFFER select IIO_BUFFER
select IIO_TRIGGERED_BUFFER select IIO_TRIGGERED_BUFFER

View file

@ -42,6 +42,10 @@
#define MGB4_USER_IRQS 16 #define MGB4_USER_IRQS 16
#define DIGITEQ_VID 0x1ed8
#define T100_DID 0x0101
#define T200_DID 0x0201
ATTRIBUTE_GROUPS(mgb4_pci); ATTRIBUTE_GROUPS(mgb4_pci);
static int flashid; static int flashid;
@ -151,7 +155,7 @@ static struct spi_master *get_spi_adap(struct platform_device *pdev)
return dev ? container_of(dev, struct spi_master, dev) : NULL; return dev ? container_of(dev, struct spi_master, dev) : NULL;
} }
static int init_spi(struct mgb4_dev *mgbdev) static int init_spi(struct mgb4_dev *mgbdev, u32 devid)
{ {
struct resource spi_resources[] = { struct resource spi_resources[] = {
{ {
@ -213,8 +217,13 @@ static int init_spi(struct mgb4_dev *mgbdev)
snprintf(mgbdev->fw_part_name, sizeof(mgbdev->fw_part_name), snprintf(mgbdev->fw_part_name, sizeof(mgbdev->fw_part_name),
"mgb4-fw.%d", flashid); "mgb4-fw.%d", flashid);
mgbdev->partitions[0].name = mgbdev->fw_part_name; mgbdev->partitions[0].name = mgbdev->fw_part_name;
mgbdev->partitions[0].size = 0x400000; if (devid == T200_DID) {
mgbdev->partitions[0].offset = 0x400000; mgbdev->partitions[0].size = 0x950000;
mgbdev->partitions[0].offset = 0x1000000;
} else {
mgbdev->partitions[0].size = 0x400000;
mgbdev->partitions[0].offset = 0x400000;
}
mgbdev->partitions[0].mask_flags = 0; mgbdev->partitions[0].mask_flags = 0;
snprintf(mgbdev->data_part_name, sizeof(mgbdev->data_part_name), snprintf(mgbdev->data_part_name, sizeof(mgbdev->data_part_name),
@ -551,7 +560,7 @@ static int mgb4_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_video_regs; goto err_video_regs;
/* SPI FLASH */ /* SPI FLASH */
rv = init_spi(mgbdev); rv = init_spi(mgbdev, id->device);
if (rv < 0) if (rv < 0)
goto err_cmt_regs; goto err_cmt_regs;
@ -666,7 +675,8 @@ static void mgb4_remove(struct pci_dev *pdev)
} }
static const struct pci_device_id mgb4_pci_ids[] = { static const struct pci_device_id mgb4_pci_ids[] = {
{ PCI_DEVICE(0x1ed8, 0x0101), }, { PCI_DEVICE(DIGITEQ_VID, T100_DID), },
{ PCI_DEVICE(DIGITEQ_VID, T200_DID), },
{ 0, } { 0, }
}; };
MODULE_DEVICE_TABLE(pci, mgb4_pci_ids); MODULE_DEVICE_TABLE(pci, mgb4_pci_ids);

View file

@ -373,7 +373,7 @@ int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
(7 << VI6_DPR_SMPPT_TGW_SHIFT) | (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
(VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT)); (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
v4l2_subdev_call(&pipe->output->entity.subdev, video, s_stream, 0); vsp1_wpf_stop(pipe->output);
return ret; return ret;
} }

View file

@ -43,14 +43,6 @@ static inline void vsp1_rpf_write(struct vsp1_rwpf *rpf,
data); data);
} }
/* -----------------------------------------------------------------------------
* V4L2 Subdevice Operations
*/
static const struct v4l2_subdev_ops rpf_ops = {
.pad = &vsp1_rwpf_pad_ops,
};
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
* VSP1 Entity Operations * VSP1 Entity Operations
*/ */
@ -411,7 +403,7 @@ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index)
rpf->entity.index = index; rpf->entity.index = index;
sprintf(name, "rpf.%u", index); sprintf(name, "rpf.%u", index);
ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &rpf_ops, ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &vsp1_rwpf_subdev_ops,
MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER); MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
if (ret < 0) if (ret < 0)
return ERR_PTR(ret); return ERR_PTR(ret);

View file

@ -24,7 +24,7 @@ struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
} }
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
* V4L2 Subdevice Pad Operations * V4L2 Subdevice Operations
*/ */
static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev, static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
@ -243,7 +243,7 @@ static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
return ret; return ret;
} }
const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = { static const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
.init_cfg = vsp1_entity_init_cfg, .init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = vsp1_rwpf_enum_mbus_code, .enum_mbus_code = vsp1_rwpf_enum_mbus_code,
.enum_frame_size = vsp1_rwpf_enum_frame_size, .enum_frame_size = vsp1_rwpf_enum_frame_size,
@ -253,6 +253,10 @@ const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
.set_selection = vsp1_rwpf_set_selection, .set_selection = vsp1_rwpf_set_selection,
}; };
const struct v4l2_subdev_ops vsp1_rwpf_subdev_ops = {
.pad = &vsp1_rwpf_pad_ops,
};
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
* Controls * Controls
*/ */

View file

@ -79,9 +79,11 @@ static inline struct vsp1_rwpf *entity_to_rwpf(struct vsp1_entity *entity)
struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index); struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index);
struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index); struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index);
void vsp1_wpf_stop(struct vsp1_rwpf *wpf);
int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf, unsigned int ncontrols); int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf, unsigned int ncontrols);
extern const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops; extern const struct v4l2_subdev_ops vsp1_rwpf_subdev_ops;
struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf, struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
struct v4l2_subdev_state *sd_state); struct v4l2_subdev_state *sd_state);

View file

@ -186,17 +186,13 @@ static int wpf_init_controls(struct vsp1_rwpf *wpf)
} }
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
* V4L2 Subdevice Core Operations * VSP1 Entity Operations
*/ */
static int wpf_s_stream(struct v4l2_subdev *subdev, int enable) void vsp1_wpf_stop(struct vsp1_rwpf *wpf)
{ {
struct vsp1_rwpf *wpf = to_rwpf(subdev);
struct vsp1_device *vsp1 = wpf->entity.vsp1; struct vsp1_device *vsp1 = wpf->entity.vsp1;
if (enable)
return 0;
/* /*
* Write to registers directly when stopping the stream as there will be * Write to registers directly when stopping the stream as there will be
* no pipeline run to apply the display list. * no pipeline run to apply the display list.
@ -204,27 +200,8 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0); vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0);
vsp1_write(vsp1, wpf->entity.index * VI6_WPF_OFFSET + vsp1_write(vsp1, wpf->entity.index * VI6_WPF_OFFSET +
VI6_WPF_SRCRPF, 0); VI6_WPF_SRCRPF, 0);
return 0;
} }
/* -----------------------------------------------------------------------------
* V4L2 Subdevice Operations
*/
static const struct v4l2_subdev_video_ops wpf_video_ops = {
.s_stream = wpf_s_stream,
};
static const struct v4l2_subdev_ops wpf_ops = {
.video = &wpf_video_ops,
.pad = &vsp1_rwpf_pad_ops,
};
/* -----------------------------------------------------------------------------
* VSP1 Entity Operations
*/
static void vsp1_wpf_destroy(struct vsp1_entity *entity) static void vsp1_wpf_destroy(struct vsp1_entity *entity)
{ {
struct vsp1_rwpf *wpf = entity_to_rwpf(entity); struct vsp1_rwpf *wpf = entity_to_rwpf(entity);
@ -583,7 +560,7 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
wpf->entity.index = index; wpf->entity.index = index;
sprintf(name, "wpf.%u", index); sprintf(name, "wpf.%u", index);
ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &wpf_ops, ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &vsp1_rwpf_subdev_ops,
MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER); MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
if (ret < 0) if (ret < 0)
return ERR_PTR(ret); return ERR_PTR(ret);

View file

@ -1482,6 +1482,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
blk_mq_requeue_request(req, true); blk_mq_requeue_request(req, true);
else else
__blk_mq_end_request(req, BLK_STS_OK); __blk_mq_end_request(req, BLK_STS_OK);
} else if (mq->in_recovery) {
blk_mq_requeue_request(req, true);
} else { } else {
blk_mq_end_request(req, BLK_STS_OK); blk_mq_end_request(req, BLK_STS_OK);
} }

View file

@ -551,7 +551,9 @@ int mmc_cqe_recovery(struct mmc_host *host)
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT; cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
mmc_wait_for_cmd(host, &cmd, 0); mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO);
memset(&cmd, 0, sizeof(cmd)); memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_CMDQ_TASK_MGMT; cmd.opcode = MMC_CMDQ_TASK_MGMT;
@ -559,10 +561,13 @@ int mmc_cqe_recovery(struct mmc_host *host)
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT; cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
err = mmc_wait_for_cmd(host, &cmd, 0); err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
host->cqe_ops->cqe_recovery_finish(host); host->cqe_ops->cqe_recovery_finish(host);
if (err)
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
mmc_retune_release(host); mmc_retune_release(host);
return err; return err;

View file

@ -942,8 +942,8 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
ret = cqhci_tasks_cleared(cq_host); ret = cqhci_tasks_cleared(cq_host);
if (!ret) if (!ret)
pr_debug("%s: cqhci: Failed to clear tasks\n", pr_warn("%s: cqhci: Failed to clear tasks\n",
mmc_hostname(mmc)); mmc_hostname(mmc));
return ret; return ret;
} }
@ -976,7 +976,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
ret = cqhci_halted(cq_host); ret = cqhci_halted(cq_host);
if (!ret) if (!ret)
pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc)); pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
return ret; return ret;
} }
@ -984,10 +984,10 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
/* /*
* After halting we expect to be able to use the command line. We interpret the * After halting we expect to be able to use the command line. We interpret the
* failure to halt to mean the data lines might still be in use (and the upper * failure to halt to mean the data lines might still be in use (and the upper
* layers will need to send a STOP command), so we set the timeout based on a * layers will need to send a STOP command), however failing to halt complicates
* generous command timeout. * the recovery, so set a timeout that would reasonably allow I/O to complete.
*/ */
#define CQHCI_START_HALT_TIMEOUT 5 #define CQHCI_START_HALT_TIMEOUT 500
static void cqhci_recovery_start(struct mmc_host *mmc) static void cqhci_recovery_start(struct mmc_host *mmc)
{ {
@ -1075,28 +1075,28 @@ static void cqhci_recovery_finish(struct mmc_host *mmc)
ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
ok = false;
/* /*
* The specification contradicts itself, by saying that tasks cannot be * The specification contradicts itself, by saying that tasks cannot be
* cleared if CQHCI does not halt, but if CQHCI does not halt, it should * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
* be disabled/re-enabled, but not to disable before clearing tasks. * be disabled/re-enabled, but not to disable before clearing tasks.
* Have a go anyway. * Have a go anyway.
*/ */
if (!ok) { if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc)); ok = false;
cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
cqcfg &= ~CQHCI_ENABLE; /* Disable to make sure tasks really are cleared */
cqhci_writel(cq_host, cqcfg, CQHCI_CFG); cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
cqcfg |= CQHCI_ENABLE; cqcfg &= ~CQHCI_ENABLE;
cqhci_writel(cq_host, cqcfg, CQHCI_CFG); cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
/* Be sure that there are no tasks */
ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) cqcfg |= CQHCI_ENABLE;
ok = false; cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
WARN_ON(!ok);
} cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
if (!ok)
cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
cqhci_recover_mrqs(cq_host); cqhci_recover_mrqs(cq_host);

View file

@ -1189,6 +1189,32 @@ static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
sdhci_writel(host, val, SDHCI_GLI_9763E_HS400_ES_REG); sdhci_writel(host, val, SDHCI_GLI_9763E_HS400_ES_REG);
} }
static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot,
bool enable)
{
struct pci_dev *pdev = slot->chip->pdev;
u32 value;
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
if (enable)
value &= ~GLI_9763E_CFG_LPSN_DIS;
else
value |= GLI_9763E_CFG_LPSN_DIS;
pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
}
static void sdhci_set_gl9763e_signaling(struct sdhci_host *host, static void sdhci_set_gl9763e_signaling(struct sdhci_host *host,
unsigned int timing) unsigned int timing)
{ {
@ -1297,6 +1323,9 @@ static int gl9763e_add_host(struct sdhci_pci_slot *slot)
if (ret) if (ret)
goto cleanup; goto cleanup;
/* Disable LPM negotiation to avoid entering L1 state. */
gl9763e_set_low_power_negotiation(slot, false);
return 0; return 0;
cleanup: cleanup:
@ -1340,31 +1369,6 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
} }
#ifdef CONFIG_PM #ifdef CONFIG_PM
static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot, bool enable)
{
struct pci_dev *pdev = slot->chip->pdev;
u32 value;
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
if (enable)
value &= ~GLI_9763E_CFG_LPSN_DIS;
else
value |= GLI_9763E_CFG_LPSN_DIS;
pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
}
static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip) static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
{ {
struct sdhci_pci_slot *slot = chip->slots[0]; struct sdhci_pci_slot *slot = chip->slots[0];

View file

@ -416,12 +416,33 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
mmc_request_done(host->mmc, mrq); mmc_request_done(host->mmc, mrq);
} }
static void sdhci_sprd_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
switch (mode) {
case MMC_POWER_OFF:
mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, 0);
mmc_regulator_disable_vqmmc(mmc);
break;
case MMC_POWER_ON:
mmc_regulator_enable_vqmmc(mmc);
break;
case MMC_POWER_UP:
mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, vdd);
break;
}
}
static struct sdhci_ops sdhci_sprd_ops = { static struct sdhci_ops sdhci_sprd_ops = {
.read_l = sdhci_sprd_readl, .read_l = sdhci_sprd_readl,
.write_l = sdhci_sprd_writel, .write_l = sdhci_sprd_writel,
.write_w = sdhci_sprd_writew, .write_w = sdhci_sprd_writew,
.write_b = sdhci_sprd_writeb, .write_b = sdhci_sprd_writeb,
.set_clock = sdhci_sprd_set_clock, .set_clock = sdhci_sprd_set_clock,
.set_power = sdhci_sprd_set_power,
.get_max_clock = sdhci_sprd_get_max_clock, .get_max_clock = sdhci_sprd_get_max_clock,
.get_min_clock = sdhci_sprd_get_min_clock, .get_min_clock = sdhci_sprd_get_min_clock,
.set_bus_width = sdhci_set_bus_width, .set_bus_width = sdhci_set_bus_width,
@ -823,6 +844,10 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
SDHCI_SUPPORT_DDR50); SDHCI_SUPPORT_DDR50);
ret = mmc_regulator_get_supply(host->mmc);
if (ret)
goto pm_runtime_disable;
ret = sdhci_setup_host(host); ret = sdhci_setup_host(host);
if (ret) if (ret)
goto pm_runtime_disable; goto pm_runtime_disable;

View file

@ -577,6 +577,18 @@ static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100; config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
} }
static void mv88e6351_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
/* Translate the default cmode */
mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
MAC_1000FD;
}
static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip) static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
{ {
u16 reg, val; u16 reg, val;
@ -3880,7 +3892,8 @@ static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
struct mv88e6xxx_chip *chip = ds->priv; struct mv88e6xxx_chip *chip = ds->priv;
int err; int err;
if (chip->info->ops->pcs_ops->pcs_init) { if (chip->info->ops->pcs_ops &&
chip->info->ops->pcs_ops->pcs_init) {
err = chip->info->ops->pcs_ops->pcs_init(chip, port); err = chip->info->ops->pcs_ops->pcs_init(chip, port);
if (err) if (err)
return err; return err;
@ -3895,7 +3908,8 @@ static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
mv88e6xxx_teardown_devlink_regions_port(ds, port); mv88e6xxx_teardown_devlink_regions_port(ds, port);
if (chip->info->ops->pcs_ops->pcs_teardown) if (chip->info->ops->pcs_ops &&
chip->info->ops->pcs_ops->pcs_teardown)
chip->info->ops->pcs_ops->pcs_teardown(chip, port); chip->info->ops->pcs_ops->pcs_teardown(chip, port);
} }
@ -4340,7 +4354,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext, .stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge, .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.phylink_get_caps = mv88e6185_phylink_get_caps, .phylink_get_caps = mv88e6351_phylink_get_caps,
}; };
static const struct mv88e6xxx_ops mv88e6172_ops = { static const struct mv88e6xxx_ops mv88e6172_ops = {
@ -4440,7 +4454,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext, .stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge, .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.phylink_get_caps = mv88e6185_phylink_get_caps, .phylink_get_caps = mv88e6351_phylink_get_caps,
}; };
static const struct mv88e6xxx_ops mv88e6176_ops = { static const struct mv88e6xxx_ops mv88e6176_ops = {
@ -5069,7 +5083,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext, .stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge, .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.phylink_get_caps = mv88e6185_phylink_get_caps, .phylink_get_caps = mv88e6351_phylink_get_caps,
}; };
static const struct mv88e6xxx_ops mv88e6351_ops = { static const struct mv88e6xxx_ops mv88e6351_ops = {
@ -5117,7 +5131,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.stu_loadpurge = mv88e6352_g1_stu_loadpurge, .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6352_avb_ops, .avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops, .ptp_ops = &mv88e6352_ptp_ops,
.phylink_get_caps = mv88e6185_phylink_get_caps, .phylink_get_caps = mv88e6351_phylink_get_caps,
}; };
static const struct mv88e6xxx_ops mv88e6352_ops = { static const struct mv88e6xxx_ops mv88e6352_ops = {

View file

@ -516,8 +516,6 @@ struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
memcpy(skb->data, fd_vaddr + fd_offset, fd_length); memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
return skb; return skb;
} }
@ -589,6 +587,7 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct rtnl_link_stats64 *percpu_stats; struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras; struct dpaa2_eth_drv_stats *percpu_extras;
struct device *dev = priv->net_dev->dev.parent; struct device *dev = priv->net_dev->dev.parent;
bool recycle_rx_buf = false;
void *buf_data; void *buf_data;
u32 xdp_act; u32 xdp_act;
@ -618,6 +617,8 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
dma_unmap_page(dev, addr, priv->rx_buf_size, dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
} else {
recycle_rx_buf = true;
} }
} else if (fd_format == dpaa2_fd_sg) { } else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog); WARN_ON(priv->xdp_prog);
@ -637,6 +638,9 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
goto err_build_skb; goto err_build_skb;
dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
if (recycle_rx_buf)
dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
return; return;
err_build_skb: err_build_skb:
@ -1073,14 +1077,12 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
dma_addr_t addr; dma_addr_t addr;
buffer_start = skb->data - dpaa2_eth_needed_headroom(skb); buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
/* If there's enough room to align the FD address, do it.
* It will help hardware optimize accesses.
*/
aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
DPAA2_ETH_TX_BUF_ALIGN); DPAA2_ETH_TX_BUF_ALIGN);
if (aligned_start >= skb->head) if (aligned_start >= skb->head)
buffer_start = aligned_start; buffer_start = aligned_start;
else
return -ENOMEM;
/* Store a backpointer to the skb at the beginning of the buffer /* Store a backpointer to the skb at the beginning of the buffer
* (in the private data area) such that we can release it * (in the private data area) such that we can release it
@ -4967,6 +4969,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err) if (err)
goto err_dl_port_add; goto err_dl_port_add;
net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
err = register_netdev(net_dev); err = register_netdev(net_dev);
if (err < 0) { if (err < 0) {
dev_err(dev, "register_netdev() failed\n"); dev_err(dev, "register_netdev() failed\n");

View file

@ -740,7 +740,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb) static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
{ {
unsigned int headroom = DPAA2_ETH_SWA_SIZE; unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
/* If we don't have an skb (e.g. XDP buffer), we only need space for /* If we don't have an skb (e.g. XDP buffer), we only need space for
* the software annotation area * the software annotation area

View file

@ -569,6 +569,50 @@ ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
dev_dbg(dev, "Problem restarting traffic for LAG node move\n"); dev_dbg(dev, "Problem restarting traffic for LAG node move\n");
} }
/**
* ice_lag_build_netdev_list - populate the lag struct's netdev list
* @lag: local lag struct
* @ndlist: pointer to netdev list to populate
*/
static void ice_lag_build_netdev_list(struct ice_lag *lag,
struct ice_lag_netdev_list *ndlist)
{
struct ice_lag_netdev_list *nl;
struct net_device *tmp_nd;
INIT_LIST_HEAD(&ndlist->node);
rcu_read_lock();
for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
if (!nl)
break;
nl->netdev = tmp_nd;
list_add(&nl->node, &ndlist->node);
}
rcu_read_unlock();
lag->netdev_head = &ndlist->node;
}
/**
* ice_lag_destroy_netdev_list - free lag struct's netdev list
* @lag: pointer to local lag struct
* @ndlist: pointer to lag struct netdev list
*/
static void ice_lag_destroy_netdev_list(struct ice_lag *lag,
struct ice_lag_netdev_list *ndlist)
{
struct ice_lag_netdev_list *entry, *n;
rcu_read_lock();
list_for_each_entry_safe(entry, n, &ndlist->node, node) {
list_del(&entry->node);
kfree(entry);
}
rcu_read_unlock();
lag->netdev_head = NULL;
}
/** /**
* ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF * ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF
* @lag: primary interface LAG struct * @lag: primary interface LAG struct
@ -597,7 +641,6 @@ ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
void ice_lag_move_new_vf_nodes(struct ice_vf *vf) void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
{ {
struct ice_lag_netdev_list ndlist; struct ice_lag_netdev_list ndlist;
struct list_head *tmp, *n;
u8 pri_port, act_port; u8 pri_port, act_port;
struct ice_lag *lag; struct ice_lag *lag;
struct ice_vsi *vsi; struct ice_vsi *vsi;
@ -621,38 +664,15 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
pri_port = pf->hw.port_info->lport; pri_port = pf->hw.port_info->lport;
act_port = lag->active_port; act_port = lag->active_port;
if (lag->upper_netdev) { if (lag->upper_netdev)
struct ice_lag_netdev_list *nl; ice_lag_build_netdev_list(lag, &ndlist);
struct net_device *tmp_nd;
INIT_LIST_HEAD(&ndlist.node);
rcu_read_lock();
for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
if (!nl)
break;
nl->netdev = tmp_nd;
list_add(&nl->node, &ndlist.node);
}
rcu_read_unlock();
}
lag->netdev_head = &ndlist.node;
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) && if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
lag->bonded && lag->primary && pri_port != act_port && lag->bonded && lag->primary && pri_port != act_port &&
!list_empty(lag->netdev_head)) !list_empty(lag->netdev_head))
ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx); ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
list_for_each_safe(tmp, n, &ndlist.node) { ice_lag_destroy_netdev_list(lag, &ndlist);
struct ice_lag_netdev_list *entry;
entry = list_entry(tmp, struct ice_lag_netdev_list, node);
list_del(&entry->node);
kfree(entry);
}
lag->netdev_head = NULL;
new_vf_unlock: new_vf_unlock:
mutex_unlock(&pf->lag_mutex); mutex_unlock(&pf->lag_mutex);
@ -679,6 +699,29 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport)
ice_lag_move_single_vf_nodes(lag, oldport, newport, i); ice_lag_move_single_vf_nodes(lag, oldport, newport, i);
} }
/**
* ice_lag_move_vf_nodes_cfg - move vf nodes outside LAG netdev event context
* @lag: local lag struct
* @src_prt: lport value for source port
* @dst_prt: lport value for destination port
*
* This function is used to move nodes during an out-of-netdev-event situation,
* primarily when the driver needs to reconfigure or recreate resources.
*
* Must be called while holding the lag_mutex to avoid lag events from
* processing while out-of-sync moves are happening. Also, paired moves,
* such as used in a reset flow, should both be called under the same mutex
* lock to avoid changes between start of reset and end of reset.
*/
void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
{
struct ice_lag_netdev_list ndlist;
ice_lag_build_netdev_list(lag, &ndlist);
ice_lag_move_vf_nodes(lag, src_prt, dst_prt);
ice_lag_destroy_netdev_list(lag, &ndlist);
}
#define ICE_LAG_SRIOV_CP_RECIPE 10 #define ICE_LAG_SRIOV_CP_RECIPE 10
#define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16 #define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
@ -2051,7 +2094,6 @@ void ice_lag_rebuild(struct ice_pf *pf)
{ {
struct ice_lag_netdev_list ndlist; struct ice_lag_netdev_list ndlist;
struct ice_lag *lag, *prim_lag; struct ice_lag *lag, *prim_lag;
struct list_head *tmp, *n;
u8 act_port, loc_port; u8 act_port, loc_port;
if (!pf->lag || !pf->lag->bonded) if (!pf->lag || !pf->lag->bonded)
@ -2063,21 +2105,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
if (lag->primary) { if (lag->primary) {
prim_lag = lag; prim_lag = lag;
} else { } else {
struct ice_lag_netdev_list *nl; ice_lag_build_netdev_list(lag, &ndlist);
struct net_device *tmp_nd;
INIT_LIST_HEAD(&ndlist.node);
rcu_read_lock();
for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
if (!nl)
break;
nl->netdev = tmp_nd;
list_add(&nl->node, &ndlist.node);
}
rcu_read_unlock();
lag->netdev_head = &ndlist.node;
prim_lag = ice_lag_find_primary(lag); prim_lag = ice_lag_find_primary(lag);
} }
@ -2107,13 +2135,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
ice_clear_rdma_cap(pf); ice_clear_rdma_cap(pf);
lag_rebuild_out: lag_rebuild_out:
list_for_each_safe(tmp, n, &ndlist.node) { ice_lag_destroy_netdev_list(lag, &ndlist);
struct ice_lag_netdev_list *entry;
entry = list_entry(tmp, struct ice_lag_netdev_list, node);
list_del(&entry->node);
kfree(entry);
}
mutex_unlock(&pf->lag_mutex); mutex_unlock(&pf->lag_mutex);
} }

View file

@ -65,4 +65,5 @@ int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf); void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf); void ice_lag_rebuild(struct ice_pf *pf);
bool ice_lag_is_switchdev_running(struct ice_pf *pf); bool ice_lag_is_switchdev_running(struct ice_pf *pf);
void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
#endif /* _ICE_LAG_H_ */ #endif /* _ICE_LAG_H_ */

View file

@ -828,12 +828,16 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
int ice_reset_vf(struct ice_vf *vf, u32 flags) int ice_reset_vf(struct ice_vf *vf, u32 flags)
{ {
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
struct ice_lag *lag;
struct ice_vsi *vsi; struct ice_vsi *vsi;
u8 act_prt, pri_prt;
struct device *dev; struct device *dev;
int err = 0; int err = 0;
bool rsd; bool rsd;
dev = ice_pf_to_dev(pf); dev = ice_pf_to_dev(pf);
act_prt = ICE_LAG_INVALID_PORT;
pri_prt = pf->hw.port_info->lport;
if (flags & ICE_VF_RESET_NOTIFY) if (flags & ICE_VF_RESET_NOTIFY)
ice_notify_vf_reset(vf); ice_notify_vf_reset(vf);
@ -844,6 +848,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
return 0; return 0;
} }
lag = pf->lag;
mutex_lock(&pf->lag_mutex);
if (lag && lag->bonded && lag->primary) {
act_prt = lag->active_port;
if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
lag->upper_netdev)
ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
else
act_prt = ICE_LAG_INVALID_PORT;
}
if (flags & ICE_VF_RESET_LOCK) if (flags & ICE_VF_RESET_LOCK)
mutex_lock(&vf->cfg_lock); mutex_lock(&vf->cfg_lock);
else else
@ -936,6 +951,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
if (flags & ICE_VF_RESET_LOCK) if (flags & ICE_VF_RESET_LOCK)
mutex_unlock(&vf->cfg_lock); mutex_unlock(&vf->cfg_lock);
if (lag && lag->bonded && lag->primary &&
act_prt != ICE_LAG_INVALID_PORT)
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
mutex_unlock(&pf->lag_mutex);
return err; return err;
} }

View file

@ -1603,9 +1603,24 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_vsi_queue_config_info *)msg; (struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi; struct virtchnl_queue_pair_info *qpi;
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
struct ice_lag *lag;
struct ice_vsi *vsi; struct ice_vsi *vsi;
u8 act_prt, pri_prt;
int i = -1, q_idx; int i = -1, q_idx;
lag = pf->lag;
mutex_lock(&pf->lag_mutex);
act_prt = ICE_LAG_INVALID_PORT;
pri_prt = pf->hw.port_info->lport;
if (lag && lag->bonded && lag->primary) {
act_prt = lag->active_port;
if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
lag->upper_netdev)
ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
else
act_prt = ICE_LAG_INVALID_PORT;
}
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
goto error_param; goto error_param;
@ -1729,6 +1744,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
} }
} }
if (lag && lag->bonded && lag->primary &&
act_prt != ICE_LAG_INVALID_PORT)
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
mutex_unlock(&pf->lag_mutex);
/* send the response to the VF */ /* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
VIRTCHNL_STATUS_SUCCESS, NULL, 0); VIRTCHNL_STATUS_SUCCESS, NULL, 0);
@ -1743,6 +1763,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vf->vf_id, i); vf->vf_id, i);
} }
if (lag && lag->bonded && lag->primary &&
act_prt != ICE_LAG_INVALID_PORT)
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
mutex_unlock(&pf->lag_mutex);
ice_lag_move_new_vf_nodes(vf); ice_lag_move_new_vf_nodes(vf);
/* send the response to the VF */ /* send the response to the VF */

View file

@ -5505,6 +5505,8 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
ipolicer = &nix_hw->ipolicer[layer]; ipolicer = &nix_hw->ipolicer[layer];
for (idx = 0; idx < req->prof_count[layer]; idx++) { for (idx = 0; idx < req->prof_count[layer]; idx++) {
if (idx == MAX_BANDPROF_PER_PFFUNC)
break;
prof_idx = req->prof_idx[layer][idx]; prof_idx = req->prof_idx[layer][idx];
if (prof_idx >= ipolicer->band_prof.max || if (prof_idx >= ipolicer->band_prof.max ||
ipolicer->pfvf_map[prof_idx] != pcifunc) ipolicer->pfvf_map[prof_idx] != pcifunc)
@ -5518,8 +5520,6 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
ipolicer->pfvf_map[prof_idx] = 0x00; ipolicer->pfvf_map[prof_idx] = 0x00;
ipolicer->match_id[prof_idx] = 0; ipolicer->match_id[prof_idx] = 0;
rvu_free_rsrc(&ipolicer->band_prof, prof_idx); rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
if (idx == MAX_BANDPROF_PER_PFFUNC)
break;
} }
} }
mutex_unlock(&rvu->rsrc_lock); mutex_unlock(&rvu->rsrc_lock);

View file

@ -450,6 +450,9 @@ int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
aq->prof.pebs_mantissa = 0; aq->prof.pebs_mantissa = 0;
aq->prof_mask.pebs_mantissa = 0xFF; aq->prof_mask.pebs_mantissa = 0xFF;
aq->prof.hl_en = 0;
aq->prof_mask.hl_en = 1;
/* Fill AQ info */ /* Fill AQ info */
aq->qidx = profile; aq->qidx = profile;
aq->ctype = NIX_AQ_CTYPE_BANDPROF; aq->ctype = NIX_AQ_CTYPE_BANDPROF;

View file

@ -1070,6 +1070,8 @@ int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic); void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data); void *type_data);
void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic);
/* CGX/RPM DMAC filters support */ /* CGX/RPM DMAC filters support */
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos); int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);

View file

@ -566,7 +566,9 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr); otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr, otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
TYPE_PFVF); TYPE_PFVF);
vfs -= 64; if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
vfs = 64;
} }
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0)); intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
@ -574,7 +576,8 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF); otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -1870,6 +1873,8 @@ int otx2_open(struct net_device *netdev)
if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
otx2_dmacflt_reinstall_flows(pf); otx2_dmacflt_reinstall_flows(pf);
otx2_tc_apply_ingress_police_rules(pf);
err = otx2_rxtx_enable(pf, true); err = otx2_rxtx_enable(pf, true);
/* If a mbox communication error happens at this point then interface /* If a mbox communication error happens at this point then interface
* will end up in a state such that it is in down state but hardware * will end up in a state such that it is in down state but hardware

View file

@ -47,6 +47,9 @@ struct otx2_tc_flow {
bool is_act_police; bool is_act_police;
u32 prio; u32 prio;
struct npc_install_flow_req req; struct npc_install_flow_req req;
u64 rate;
u32 burst;
bool is_pps;
}; };
static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
@ -284,6 +287,41 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
return err; return err;
} }
static int otx2_tc_act_set_hw_police(struct otx2_nic *nic,
struct otx2_tc_flow *node)
{
int rc;
mutex_lock(&nic->mbox.lock);
rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
if (rc) {
mutex_unlock(&nic->mbox.lock);
return rc;
}
rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile,
node->burst, node->rate, node->is_pps);
if (rc)
goto free_leaf;
rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true);
if (rc)
goto free_leaf;
mutex_unlock(&nic->mbox.lock);
return 0;
free_leaf:
if (cn10k_free_leaf_profile(nic, node->leaf_profile))
netdev_err(nic->netdev,
"Unable to free leaf bandwidth profile(%d)\n",
node->leaf_profile);
mutex_unlock(&nic->mbox.lock);
return rc;
}
static int otx2_tc_act_set_police(struct otx2_nic *nic, static int otx2_tc_act_set_police(struct otx2_nic *nic,
struct otx2_tc_flow *node, struct otx2_tc_flow *node,
struct flow_cls_offload *f, struct flow_cls_offload *f,
@ -300,39 +338,20 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
return -EINVAL; return -EINVAL;
} }
mutex_lock(&nic->mbox.lock);
rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
if (rc) {
mutex_unlock(&nic->mbox.lock);
return rc;
}
rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
if (rc)
goto free_leaf;
rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
if (rc)
goto free_leaf;
mutex_unlock(&nic->mbox.lock);
req->match_id = mark & 0xFFFFULL; req->match_id = mark & 0xFFFFULL;
req->index = rq_idx; req->index = rq_idx;
req->op = NIX_RX_ACTIONOP_UCAST; req->op = NIX_RX_ACTIONOP_UCAST;
set_bit(rq_idx, &nic->rq_bmap);
node->is_act_police = true; node->is_act_police = true;
node->rq = rq_idx; node->rq = rq_idx;
node->burst = burst;
node->rate = rate;
node->is_pps = pps;
return 0; rc = otx2_tc_act_set_hw_police(nic, node);
if (!rc)
set_bit(rq_idx, &nic->rq_bmap);
free_leaf:
if (cn10k_free_leaf_profile(nic, node->leaf_profile))
netdev_err(nic->netdev,
"Unable to free leaf bandwidth profile(%d)\n",
node->leaf_profile);
mutex_unlock(&nic->mbox.lock);
return rc; return rc;
} }
@ -1058,6 +1077,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
} }
if (flow_node->is_act_police) { if (flow_node->is_act_police) {
__clear_bit(flow_node->rq, &nic->rq_bmap);
if (nic->flags & OTX2_FLAG_INTF_DOWN)
goto free_mcam_flow;
mutex_lock(&nic->mbox.lock); mutex_lock(&nic->mbox.lock);
err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
@ -1073,11 +1097,10 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
"Unable to free leaf bandwidth profile(%d)\n", "Unable to free leaf bandwidth profile(%d)\n",
flow_node->leaf_profile); flow_node->leaf_profile);
__clear_bit(flow_node->rq, &nic->rq_bmap);
mutex_unlock(&nic->mbox.lock); mutex_unlock(&nic->mbox.lock);
} }
free_mcam_flow:
otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false); otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
kfree_rcu(flow_node, rcu); kfree_rcu(flow_node, rcu);
@ -1097,6 +1120,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
return -ENOMEM; return -ENOMEM;
if (nic->flags & OTX2_FLAG_INTF_DOWN) {
NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
return -EINVAL;
}
if (flow_cfg->nr_flows == flow_cfg->max_flows) { if (flow_cfg->nr_flows == flow_cfg->max_flows) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Free MCAM entry not available to add the flow"); "Free MCAM entry not available to add the flow");
@ -1456,3 +1484,45 @@ void otx2_shutdown_tc(struct otx2_nic *nic)
otx2_destroy_tc_flow_list(nic); otx2_destroy_tc_flow_list(nic);
} }
EXPORT_SYMBOL(otx2_shutdown_tc); EXPORT_SYMBOL(otx2_shutdown_tc);
static void otx2_tc_config_ingress_rule(struct otx2_nic *nic,
struct otx2_tc_flow *node)
{
struct npc_install_flow_req *req;
if (otx2_tc_act_set_hw_police(nic, node))
return;
mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
if (!req)
goto err;
memcpy(req, &node->req, sizeof(struct npc_install_flow_req));
if (otx2_sync_mbox_msg(&nic->mbox))
netdev_err(nic->netdev,
"Failed to install MCAM flow entry for ingress rule");
err:
mutex_unlock(&nic->mbox.lock);
}
void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic)
{
struct otx2_flow_config *flow_cfg = nic->flow_cfg;
struct otx2_tc_flow *node;
/* If any ingress policer rules exist for the interface then
* apply those rules. Ingress policer rules depend on bandwidth
* profiles linked to the receive queues. Since no receive queues
* exist when interface is down, ingress policer rules are stored
* and configured in hardware after all receive queues are allocated
* in otx2_open.
*/
list_for_each_entry(node, &flow_cfg->flow_list_tc, list) {
if (node->is_act_police)
otx2_tc_config_ingress_rule(nic, node);
}
}
EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules);

View file

@ -575,6 +575,7 @@ struct rtl8169_tc_offsets {
enum rtl_flag { enum rtl_flag {
RTL_FLAG_TASK_ENABLED = 0, RTL_FLAG_TASK_ENABLED = 0,
RTL_FLAG_TASK_RESET_PENDING, RTL_FLAG_TASK_RESET_PENDING,
RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
RTL_FLAG_TASK_TX_TIMEOUT, RTL_FLAG_TASK_TX_TIMEOUT,
RTL_FLAG_MAX RTL_FLAG_MAX
}; };
@ -4496,6 +4497,8 @@ static void rtl_task(struct work_struct *work)
reset: reset:
rtl_reset_work(tp); rtl_reset_work(tp);
netif_wake_queue(tp->dev); netif_wake_queue(tp->dev);
} else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
rtl_reset_work(tp);
} }
out_unlock: out_unlock:
rtnl_unlock(); rtnl_unlock();
@ -4529,7 +4532,7 @@ static void r8169_phylink_handler(struct net_device *ndev)
} else { } else {
/* In few cases rx is broken after link-down otherwise */ /* In few cases rx is broken after link-down otherwise */
if (rtl_is_8125(tp)) if (rtl_is_8125(tp))
rtl_reset_work(tp); rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE);
pm_runtime_idle(d); pm_runtime_idle(d);
} }
@ -4605,7 +4608,7 @@ static int rtl8169_close(struct net_device *dev)
rtl8169_down(tp); rtl8169_down(tp);
rtl8169_rx_clear(tp); rtl8169_rx_clear(tp);
cancel_work_sync(&tp->wk.work); cancel_work(&tp->wk.work);
free_irq(tp->irq, tp); free_irq(tp->irq, tp);
@ -4839,6 +4842,8 @@ static void rtl_remove_one(struct pci_dev *pdev)
if (pci_dev_run_wake(pdev)) if (pci_dev_run_wake(pdev))
pm_runtime_get_noresume(&pdev->dev); pm_runtime_get_noresume(&pdev->dev);
cancel_work_sync(&tp->wk.work);
unregister_netdev(tp->dev); unregister_netdev(tp->dev);
if (tp->dash_type != RTL_DASH_NONE) if (tp->dash_type != RTL_DASH_NONE)

Some files were not shown because too many files have changed in this diff Show more