This is the 6.8.8 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmYtFgcACgkQONu9yGCS
 aT62gg//XSyI5LTWcbnmubKZhluEYscrhJh7QkA7x47kfG6F04q2PNmg5zgHP/xi
 vDMEVbTo+cs6C3DRA3+ybgIVesAFMfXKF3GzA0+DiO66bmL10CaQ0YwA9Rt+LlEN
 54w+hXQjSrh9cIhNjoObWf8pES59dazI1rfbnO+bC7LXMnh5+7haxJglsoi31Yo3
 SwutokioPPslVGNx6u1eheb9J30n6CyKsJR/RaNPDQ8nKBWqe0IxlQDFjqvI1xHH
 zvTHNKXm0+ME/NGZa7WhrTk81wsef0cTtPucvi4lcK7EV1+JD+4w8gM0rk4T+oGi
 tdG0kmmeeHd5ytjfS14DmOwvQIPk2dWA6fjk5+QnuM2mxSNrrN+z3i0PGKynAvQb
 vt031B9gK1e12TTcH4zyHpvXlOlvDqPe7mWOlM8aQS2I9hwjezKl8eN8LBDk7lRH
 KJAHT27oVhfZDOCwCgy0Bqs89Mh+P20buU6WZ8nG2tIuwyB2meWurB48yHkm7ble
 VoFCT5b107ntw9h2L7524+BCHSoUWZVevCV/7RY7Wo8xKBW2ptqPwSYpnRqRQ74U
 E7zBwcn+lfScF3z7ttnqQu+rtqsvbgLPhRM27US2BvTjVIXGPbKWrMHEATr/4xRN
 NR6FWO2xORib6PYNGa9okib1XzHznNM96krr+JQR/6y8kb80yb0=
 =doU9
 -----END PGP SIGNATURE-----

Merge v6.8.8

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2024-04-27 17:13:05 +02:00
commit 4d617ac607
178 changed files with 1903 additions and 828 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 8
SUBLEVEL = 7
SUBLEVEL = 8
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

View File

@ -569,6 +569,11 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
adr_l x1, __hyp_text_end
adr_l x2, dcache_clean_poc
blr x2
mov_q x0, INIT_SCTLR_EL2_MMU_OFF
pre_disable_mmu_workaround
msr sctlr_el2, x0
isb
0:
mov_q x0, HCR_HOST_NVHE_FLAGS
msr hcr_el2, x0

View File

@ -219,9 +219,6 @@ bool kernel_page_present(struct page *page)
pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page);
if (!can_set_direct_map())
return true;
pgdp = pgd_offset_k(addr);
if (pgd_none(READ_ONCE(*pgdp)))
return false;

View File

@ -79,6 +79,9 @@ do { \
#define __smp_mb__before_atomic() do { } while (0)
#define __smp_mb__after_atomic() do { } while (0)
/* Writing to CR3 provides a full memory barrier in switch_mm(). */
#define smp_mb__after_switch_mm() do { } while (0)
#include <asm-generic/barrier.h>
#endif /* _ASM_X86_BARRIER_H */

View File

@ -854,6 +854,7 @@ struct kvm_vcpu_arch {
int cpuid_nent;
struct kvm_cpuid_entry2 *cpuid_entries;
struct kvm_hypervisor_cpuid kvm_cpuid;
bool is_amd_compatible;
/*
* FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly

View File

@ -1651,7 +1651,8 @@ static void __init bhi_select_mitigation(void)
return;
/* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
!boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
spec_ctrl_disable_kernel_rrsba();
if (rrsba_disabled)
return;
@ -2803,11 +2804,13 @@ static const char *spectre_bhi_state(void)
{
if (!boot_cpu_has_bug(X86_BUG_BHI))
return "; BHI: Not affected";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
return "; BHI: BHI_DIS_S";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
return "; BHI: SW loop, KVM: SW loop";
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && rrsba_disabled)
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
!boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
rrsba_disabled)
return "; BHI: Retpoline";
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
return "; BHI: Vulnerable, KVM: SW loop";

View File

@ -44,7 +44,10 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_F16C, X86_FEATURE_XMM2, },
{ X86_FEATURE_AES, X86_FEATURE_XMM2 },
{ X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
{ X86_FEATURE_GFNI, X86_FEATURE_XMM2 },
{ X86_FEATURE_FMA, X86_FEATURE_AVX },
{ X86_FEATURE_VAES, X86_FEATURE_AVX },
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX },
{ X86_FEATURE_AVX2, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512F, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F },
@ -56,9 +59,6 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL },
{ X86_FEATURE_GFNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_VAES, X86_FEATURE_AVX512VL },
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },

View File

@ -366,6 +366,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_update_pv_runtime(vcpu);
vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);

View File

@ -120,6 +120,16 @@ static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
}
static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
{
return vcpu->arch.is_amd_compatible;
}
static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
{
return !guest_cpuid_is_amd_compatible(vcpu);
}
static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;

View File

@ -2771,7 +2771,8 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
if (r && lvt_type == APIC_LVTPC)
if (r && lvt_type == APIC_LVTPC &&
guest_cpuid_is_intel_compatible(apic->vcpu))
kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
return r;
}

View File

@ -4922,7 +4922,7 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
context->cpu_role.base.level, is_efer_nx(context),
guest_can_use(vcpu, X86_FEATURE_GBPAGES),
is_cr4_pse(context),
guest_cpuid_is_amd_or_hygon(vcpu));
guest_cpuid_is_amd_compatible(vcpu));
}
static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
@ -7388,7 +7388,8 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
* by the memslot, KVM can't use a hugepage due to the
* misaligned address regardless of memory attributes.
*/
if (gfn >= slot->base_gfn) {
if (gfn >= slot->base_gfn &&
gfn + nr_pages <= slot->base_gfn + slot->npages) {
if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
hugepage_clear_mixed(slot, gfn, level);
else

View File

@ -1498,6 +1498,16 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
}
}
static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
{
/*
* All TDP MMU shadow pages share the same role as their root, aside
* from level, so it is valid to key off any shadow page to determine if
* write protection is needed for an entire tree.
*/
return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
}
/*
* Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
* AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
@ -1508,7 +1518,8 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end)
{
u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK;
const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
shadow_dirty_mask;
struct tdp_iter iter;
bool spte_set = false;
@ -1523,7 +1534,7 @@ retry:
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
continue;
KVM_MMU_WARN_ON(kvm_ad_enabled() &&
KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
spte_ad_need_write_protect(iter.old_spte));
if (!(iter.old_spte & dbit))
@ -1570,8 +1581,8 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t gfn, unsigned long mask, bool wrprot)
{
u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
shadow_dirty_mask;
const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
shadow_dirty_mask;
struct tdp_iter iter;
lockdep_assert_held_write(&kvm->mmu_lock);
@ -1583,7 +1594,7 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
if (!mask)
break;
KVM_MMU_WARN_ON(kvm_ad_enabled() &&
KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
spte_ad_need_write_protect(iter.old_spte));
if (iter.level > PG_LEVEL_4K ||

View File

@ -7857,8 +7857,28 @@ static u64 vmx_get_perf_capabilities(void)
if (vmx_pebs_supported()) {
perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
perf_cap &= ~PERF_CAP_PEBS_BASELINE;
/*
* Disallow adaptive PEBS as it is functionally broken, can be
* used by the guest to read *host* LBRs, and can be used to
* bypass userspace event filters. To correctly and safely
* support adaptive PEBS, KVM needs to:
*
* 1. Account for the ADAPTIVE flag when (re)programming fixed
* counters.
*
* 2. Gain support from perf (or take direct control of counter
* programming) to support events without adaptive PEBS
* enabled for the hardware counter.
*
* 3. Ensure LBR MSRs cannot hold host data on VM-Entry with
* adaptive PEBS enabled and MSR_PEBS_DATA_CFG.LBRS=1.
*
* 4. Document which PMU events are effectively exposed to the
* guest via adaptive PEBS, and make adaptive PEBS mutually
* exclusive with KVM_SET_PMU_EVENT_FILTER if necessary.
*/
perf_cap &= ~PERF_CAP_PEBS_BASELINE;
}
return perf_cap;

View File

@ -3422,7 +3422,7 @@ static bool is_mci_status_msr(u32 msr)
static bool can_set_mci_status(struct kvm_vcpu *vcpu)
{
/* McStatusWrEn enabled? */
if (guest_cpuid_is_amd_or_hygon(vcpu))
if (guest_cpuid_is_amd_compatible(vcpu))
return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
return false;

View File

@ -639,6 +639,14 @@ static void blkdev_flush_mapping(struct block_device *bdev)
bdev_write_inode(bdev);
}
static void blkdev_put_whole(struct block_device *bdev)
{
if (atomic_dec_and_test(&bdev->bd_openers))
blkdev_flush_mapping(bdev);
if (bdev->bd_disk->fops->release)
bdev->bd_disk->fops->release(bdev->bd_disk);
}
static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
{
struct gendisk *disk = bdev->bd_disk;
@ -657,20 +665,21 @@ static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
if (!atomic_read(&bdev->bd_openers))
set_init_blocksize(bdev);
if (test_bit(GD_NEED_PART_SCAN, &disk->state))
bdev_disk_changed(disk, false);
atomic_inc(&bdev->bd_openers);
if (test_bit(GD_NEED_PART_SCAN, &disk->state)) {
/*
* Only return scanning errors if we are called from contexts
* that explicitly want them, e.g. the BLKRRPART ioctl.
*/
ret = bdev_disk_changed(disk, false);
if (ret && (mode & BLK_OPEN_STRICT_SCAN)) {
blkdev_put_whole(bdev);
return ret;
}
}
return 0;
}
static void blkdev_put_whole(struct block_device *bdev)
{
if (atomic_dec_and_test(&bdev->bd_openers))
blkdev_flush_mapping(bdev);
if (bdev->bd_disk->fops->release)
bdev->bd_disk->fops->release(bdev->bd_disk);
}
static int blkdev_get_part(struct block_device *part, blk_mode_t mode)
{
struct gendisk *disk = part->bd_disk;

View File

@ -556,7 +556,8 @@ static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode,
return -EACCES;
if (bdev_is_partition(bdev))
return -EINVAL;
return disk_scan_partitions(bdev->bd_disk, mode);
return disk_scan_partitions(bdev->bd_disk,
mode | BLK_OPEN_STRICT_SCAN);
case BLKTRACESTART:
case BLKTRACESTOP:
case BLKTRACETEARDOWN:

View File

@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *vc)
}
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch;
while (tmpx < vc->vc_cols - 1) {
while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
tmp_pos += 2;
tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp);

View File

@ -1708,8 +1708,10 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
if (offset > buffer->data_size || read_size < sizeof(*hdr))
if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
!IS_ALIGNED(offset, sizeof(u32)))
return 0;
if (u) {
if (copy_from_user(object, u + offset, read_size))
return 0;

View File

@ -702,7 +702,7 @@ static void extract_entropy(void *buf, size_t len)
static void __cold _credit_init_bits(size_t bits)
{
static struct execute_work set_ready;
static DECLARE_WORK(set_ready, crng_set_ready);
unsigned int new, orig, add;
unsigned long flags;
@ -718,8 +718,8 @@ static void __cold _credit_init_bits(size_t bits)
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
if (static_key_initialized)
execute_in_process_context(crng_set_ready, &set_ready);
if (static_key_initialized && system_unbound_wq)
queue_work(system_unbound_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
@ -890,8 +890,8 @@ void __init random_init(void)
/*
* If we were initialized by the cpu or bootloader before jump labels
* are initialized, then we should enable the static branch here, where
* it's guaranteed that jump labels have been initialized.
* or workqueues are initialized, then we should enable the static
* branch here, where it's guaranteed that these have been initialized.
*/
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
crng_set_ready(NULL);

View File

@ -37,6 +37,10 @@ static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
/* List of registered clks that use runtime PM */
static HLIST_HEAD(clk_rpm_list);
static DEFINE_MUTEX(clk_rpm_list_lock);
static const struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
@ -59,6 +63,7 @@ struct clk_core {
struct clk_hw *hw;
struct module *owner;
struct device *dev;
struct hlist_node rpm_node;
struct device_node *of_node;
struct clk_core *parent;
struct clk_parent_map *parents;
@ -122,6 +127,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
pm_runtime_put_sync(core->dev);
}
/**
* clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
*
* Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
* that disabling unused clks avoids a deadlock where a device is runtime PM
* resuming/suspending and the runtime PM callback is trying to grab the
* prepare_lock for something like clk_prepare_enable() while
* clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
* PM resume/suspend the device as well.
*
* Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
* success. Otherwise the lock is released on failure.
*
* Return: 0 on success, negative errno otherwise.
*/
static int clk_pm_runtime_get_all(void)
{
int ret;
struct clk_core *core, *failed;
/*
* Grab the list lock to prevent any new clks from being registered
* or unregistered until clk_pm_runtime_put_all().
*/
mutex_lock(&clk_rpm_list_lock);
/*
* Runtime PM "get" all the devices that are needed for the clks
* currently registered. Do this without holding the prepare_lock, to
* avoid the deadlock.
*/
hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
ret = clk_pm_runtime_get(core);
if (ret) {
failed = core;
pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
dev_name(failed->dev), failed->name);
goto err;
}
}
return 0;
err:
hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
if (core == failed)
break;
clk_pm_runtime_put(core);
}
mutex_unlock(&clk_rpm_list_lock);
return ret;
}
/**
* clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
*
* Put the runtime PM references taken in clk_pm_runtime_get_all() and release
* the 'clk_rpm_list_lock'.
*/
static void clk_pm_runtime_put_all(void)
{
struct clk_core *core;
hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
clk_pm_runtime_put(core);
mutex_unlock(&clk_rpm_list_lock);
}
static void clk_pm_runtime_init(struct clk_core *core)
{
struct device *dev = core->dev;
if (dev && pm_runtime_enabled(dev)) {
core->rpm_enabled = true;
mutex_lock(&clk_rpm_list_lock);
hlist_add_head(&core->rpm_node, &clk_rpm_list);
mutex_unlock(&clk_rpm_list_lock);
}
}
/*** locking ***/
static void clk_prepare_lock(void)
{
@ -1362,9 +1450,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
if (core->flags & CLK_IGNORE_UNUSED)
return;
if (clk_pm_runtime_get(core))
return;
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
@ -1373,8 +1458,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
clk_pm_runtime_put(core);
}
static void __init clk_disable_unused_subtree(struct clk_core *core)
@ -1390,9 +1473,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
if (clk_pm_runtime_get(core))
goto unprepare_out;
flags = clk_enable_lock();
if (core->enable_count)
@ -1417,8 +1497,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
unlock_out:
clk_enable_unlock(flags);
clk_pm_runtime_put(core);
unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent);
}
@ -1434,6 +1512,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
static int __init clk_disable_unused(void)
{
struct clk_core *core;
int ret;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
@ -1442,6 +1521,13 @@ static int __init clk_disable_unused(void)
pr_info("clk: Disabling unused clocks\n");
ret = clk_pm_runtime_get_all();
if (ret)
return ret;
/*
* Grab the prepare lock to keep the clk topology stable while iterating
* over clks.
*/
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
@ -1458,6 +1544,8 @@ static int __init clk_disable_unused(void)
clk_prepare_unlock();
clk_pm_runtime_put_all();
return 0;
}
late_initcall_sync(clk_disable_unused);
@ -3233,9 +3321,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
{
struct clk_core *child;
clk_pm_runtime_get(c);
clk_summary_show_one(s, c, level);
clk_pm_runtime_put(c);
hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1);
@ -3245,11 +3331,15 @@ static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk_core *c;
struct hlist_head **lists = s->private;
int ret;
seq_puts(s, " enable prepare protect duty hardware connection\n");
seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n");
seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
ret = clk_pm_runtime_get_all();
if (ret)
return ret;
clk_prepare_lock();
@ -3258,6 +3348,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
clk_summary_show_subtree(s, c, 0);
clk_prepare_unlock();
clk_pm_runtime_put_all();
return 0;
}
@ -3305,8 +3396,14 @@ static int clk_dump_show(struct seq_file *s, void *data)
struct clk_core *c;
bool first_node = true;
struct hlist_head **lists = s->private;
int ret;
ret = clk_pm_runtime_get_all();
if (ret)
return ret;
seq_putc(s, '{');
clk_prepare_lock();
for (; *lists; lists++) {
@ -3319,6 +3416,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
}
clk_prepare_unlock();
clk_pm_runtime_put_all();
seq_puts(s, "}\n");
return 0;
@ -3962,8 +4060,6 @@ static int __clk_core_init(struct clk_core *core)
}
clk_core_reparent_orphans_nolock();
kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
unlock:
@ -4192,6 +4288,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
kfree(core->parents);
}
/* Free memory allocated for a struct clk_core */
static void __clk_release(struct kref *ref)
{
struct clk_core *core = container_of(ref, struct clk_core, ref);
if (core->rpm_enabled) {
mutex_lock(&clk_rpm_list_lock);
hlist_del(&core->rpm_node);
mutex_unlock(&clk_rpm_list_lock);
}
clk_core_free_parent_map(core);
kfree_const(core->name);
kfree(core);
}
static struct clk *
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{
@ -4212,6 +4324,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
goto fail_out;
}
kref_init(&core->ref);
core->name = kstrdup_const(init->name, GFP_KERNEL);
if (!core->name) {
ret = -ENOMEM;
@ -4224,9 +4338,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
}
core->ops = init->ops;
if (dev && pm_runtime_enabled(dev))
core->rpm_enabled = true;
core->dev = dev;
clk_pm_runtime_init(core);
core->of_node = np;
if (dev && dev->driver)
core->owner = dev->driver->owner;
@ -4266,12 +4379,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
hw->clk = NULL;
fail_create_clk:
clk_core_free_parent_map(core);
fail_parents:
fail_ops:
kfree_const(core->name);
fail_name:
kfree(core);
kref_put(&core->ref, __clk_release);
fail_out:
return ERR_PTR(ret);
}
@ -4351,18 +4462,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(of_clk_hw_register);
/* Free memory allocated for a clock. */
static void __clk_release(struct kref *ref)
{
struct clk_core *core = container_of(ref, struct clk_core, ref);
lockdep_assert_held(&prepare_lock);
clk_core_free_parent_map(core);
kfree_const(core->name);
kfree(core);
}
/*
* Empty clk_ops for unregistered clocks. These are used temporarily
* after clk_unregister() was called on a clock and until last clock

View File

@ -152,7 +152,7 @@ static const struct mtk_gate infra_clks[] = {
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P1, "infra_pcie_peri_ck_26m_ck_p1",
"csw_infra_f26m_sel", 8),
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P2, "infra_pcie_peri_ck_26m_ck_p2",
"csw_infra_f26m_sel", 9),
"infra_pcie_peri_ck_26m_ck_p3", 9),
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P3, "infra_pcie_peri_ck_26m_ck_p3",
"csw_infra_f26m_sel", 10),
/* INFRA1 */

View File

@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "clk-mtk.h"
@ -494,6 +495,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
return IS_ERR(base) ? PTR_ERR(base) : -ENOMEM;
}
devm_pm_runtime_enable(&pdev->dev);
/*
* Do a pm_runtime_resume_and_get() to workaround a possible
* deadlock between clk_register() and the genpd framework.
*/
r = pm_runtime_resume_and_get(&pdev->dev);
if (r)
return r;
/* Calculate how many clk_hw_onecell_data entries to allocate */
num_clks = mcd->num_clks + mcd->num_composite_clks;
num_clks += mcd->num_fixed_clks + mcd->num_factor_clks;
@ -574,6 +585,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
goto unregister_clks;
}
pm_runtime_put(&pdev->dev);
return r;
unregister_clks:
@ -604,6 +617,8 @@ free_data:
free_base:
if (mcd->shared_io && base)
iounmap(base);
pm_runtime_put(&pdev->dev);
return r;
}

View File

@ -641,32 +641,21 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
struct vmk80xx_private *devpriv = dev->private;
struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usb_host_interface *iface_desc = intf->cur_altsetting;
struct usb_endpoint_descriptor *ep_desc;
int i;
struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
int ret;
if (iface_desc->desc.bNumEndpoints != 2)
if (devpriv->model == VMK8061_MODEL)
ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
&ep_tx_desc, NULL, NULL);
else
ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
&ep_rx_desc, &ep_tx_desc);
if (ret)
return -ENODEV;
for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
ep_desc = &iface_desc->endpoint[i].desc;
if (usb_endpoint_is_int_in(ep_desc) ||
usb_endpoint_is_bulk_in(ep_desc)) {
if (!devpriv->ep_rx)
devpriv->ep_rx = ep_desc;
continue;
}
if (usb_endpoint_is_int_out(ep_desc) ||
usb_endpoint_is_bulk_out(ep_desc)) {
if (!devpriv->ep_tx)
devpriv->ep_tx = ep_desc;
continue;
}
}
if (!devpriv->ep_rx || !devpriv->ep_tx)
return -ENODEV;
devpriv->ep_rx = ep_rx_desc;
devpriv->ep_tx = ep_tx_desc;
if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
return -EINVAL;

View File

@ -562,7 +562,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
size_t bus_size = (size_t)mem->size;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
@ -573,9 +572,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
return -EINVAL;
if (adev->mman.aper_base_kaddr &&
mem->placement & TTM_PL_FLAG_CONTIGUOUS)

View File

@ -1559,6 +1559,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
/* Validate operation parameters to prevent potential abuse */
static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
uint64_t saddr,
uint64_t offset,
uint64_t size)
{
uint64_t tmp, lpfn;
if (saddr & AMDGPU_GPU_PAGE_MASK
|| offset & AMDGPU_GPU_PAGE_MASK
|| size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
if (check_add_overflow(saddr, size, &tmp)
|| check_add_overflow(offset, size, &tmp)
|| size == 0 /* which also leads to end < begin */)
return -EINVAL;
/* make sure object fit at this offset */
if (bo && offset + size > amdgpu_bo_size(bo))
return -EINVAL;
/* Ensure last pfn not exceed max_pfn */
lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
if (lpfn >= adev->vm_manager.max_pfn)
return -EINVAL;
return 0;
}
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@ -1585,21 +1616,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
int r;
/* validate the parameters */
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
return -EINVAL;
if (saddr + size <= saddr || offset + size <= offset)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (r)
return r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) {
@ -1652,17 +1676,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr;
int r;
/* validate the parameters */
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
return -EINVAL;
if (saddr + size <= saddr || offset + size <= offset)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (r)
return r;
/* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@ -1676,7 +1692,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr;
mapping->last = eaddr;
@ -1763,10 +1779,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
LIST_HEAD(removed);
uint64_t eaddr;
int r;
r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
if (r)
return r;
eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL);

View File

@ -819,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
mutex_lock(&kfd_processes_mutex);
if (kfd_is_locked()) {
mutex_unlock(&kfd_processes_mutex);
pr_debug("KFD is locked! Cannot create process");
return ERR_PTR(-EINVAL);
process = ERR_PTR(-EINVAL);
goto out;
}
/* A prior open of /dev/kfd could have already created the process. */

View File

@ -2512,7 +2512,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
enum pipe pipe = new_cdclk_state->pipe;
struct intel_cdclk_config cdclk_config;
enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@ -2521,12 +2522,25 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_pre_notify(state);
if (new_cdclk_state->disable_pipes ||
old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
if (new_cdclk_state->disable_pipes) {
cdclk_config = new_cdclk_state->actual;
pipe = INVALID_PIPE;
} else {
if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
cdclk_config = new_cdclk_state->actual;
pipe = new_cdclk_state->pipe;
} else {
cdclk_config = old_cdclk_state->actual;
pipe = INVALID_PIPE;
}
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
old_cdclk_state->actual.voltage_level);
}
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
intel_set_cdclk(i915, &cdclk_config, pipe);
}
/**
@ -2544,7 +2558,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
enum pipe pipe = new_cdclk_state->pipe;
enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@ -2554,11 +2568,14 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_cdclk_pcode_post_notify(state);
if (!new_cdclk_state->disable_pipes &&
old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
pipe = new_cdclk_state->pipe;
else
pipe = INVALID_PIPE;
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
}
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
}
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)

View File

@ -23,6 +23,7 @@
*/
#include "nouveau_drv.h"
#include "nouveau_bios.h"
#include "nouveau_reg.h"
#include "dispnv04/hw.h"
#include "nouveau_encoder.h"
@ -1677,7 +1678,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
*/
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
return false;
}
}
@ -1763,26 +1764,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#ifdef __powerpc__
/* Apple iMac G4 NV17 */
if (of_machine_is_compatible("PowerMac4,5")) {
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
return;
}
#endif
/* Make up some sane defaults */
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
bios->legacy.i2c_indices.crt, 1, 1);
bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
bios->legacy.i2c_indices.tv,
all_heads, 0);
all_heads, DCB_OUTPUT_A);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
bios->legacy.i2c_indices.panel,
all_heads, 1);
all_heads, DCB_OUTPUT_B);
}
static int

View File

@ -222,8 +222,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
void __iomem *map = NULL;
/* Already mapped? */
if (refcount_inc_not_zero(&iobj->maps))
if (refcount_inc_not_zero(&iobj->maps)) {
/* read barrier match the wmb on refcount set */
smp_rmb();
return iobj->map;
}
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
@ -250,6 +253,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast;
else
iobj->base.memory.ptrs = &nv50_instobj_slow;
/* barrier to ensure the ptrs are written before refcount is set */
smp_wmb();
refcount_set(&iobj->maps, 1);
}

View File

@ -253,8 +253,6 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi);
mipi_dsi_device_unregister(ctx->dsi);
drm_panel_remove(&ctx->panel);
}

View File

@ -923,8 +923,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
for (i = 0; i < max_device; i++) {
ATOM_CONNECTOR_INFO_I2C ci =
supported_devices->info.asConnInfo[i];
ATOM_CONNECTOR_INFO_I2C ci;
if (frev > 1)
ci = supported_devices->info_2d1.asConnInfo[i];
else
ci = supported_devices->info.asConnInfo[i];
bios_connectors[i].valid = false;

View File

@ -288,17 +288,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching,
unsigned int order)
{
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
if (pool->use_dma_alloc)
return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86
switch (caching) {
case ttm_write_combined:
if (pool->nid != NUMA_NO_NODE)
return &pool->caching[caching].orders[order];
if (pool->use_dma32)
return &global_dma32_write_combined[order];
return &global_write_combined[order];
case ttm_uncached:
if (pool->nid != NUMA_NO_NODE)
return &pool->caching[caching].orders[order];
if (pool->use_dma32)
return &global_dma32_uncached[order];
@ -566,11 +572,17 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32;
if (use_dma_alloc || nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j < NR_PAGE_ORDERS; ++j)
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
for (j = 0; j < NR_PAGE_ORDERS; ++j) {
struct ttm_pool_type *pt;
/* Initialize only pool types which are actually used */
pt = ttm_pool_select_type(pool, i, j);
if (pt != &pool->caching[i].orders[j])
continue;
ttm_pool_type_init(pt, pool, i, j);
}
}
}
EXPORT_SYMBOL(ttm_pool_init);
@ -599,10 +611,16 @@ void ttm_pool_fini(struct ttm_pool *pool)
{
unsigned int i, j;
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j < NR_PAGE_ORDERS; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
for (j = 0; j < NR_PAGE_ORDERS; ++j) {
struct ttm_pool_type *pt;
pt = ttm_pool_select_type(pool, i, j);
if (pt != &pool->caching[i].orders[j])
continue;
ttm_pool_type_fini(pt);
}
}
/* We removed the pool types from the LRU, but we need to also make sure

View File

@ -105,7 +105,6 @@ v3d_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_BIN];
file->enabled_ns[V3D_BIN] += local_clock() - file->start_ns[V3D_BIN];
file->jobs_sent[V3D_BIN]++;
v3d->queue[V3D_BIN].jobs_sent++;
@ -126,7 +125,6 @@ v3d_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_RENDER];
file->enabled_ns[V3D_RENDER] += local_clock() - file->start_ns[V3D_RENDER];
file->jobs_sent[V3D_RENDER]++;
v3d->queue[V3D_RENDER].jobs_sent++;
@ -147,7 +145,6 @@ v3d_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_CSD];
file->enabled_ns[V3D_CSD] += local_clock() - file->start_ns[V3D_CSD];
file->jobs_sent[V3D_CSD]++;
v3d->queue[V3D_CSD].jobs_sent++;
@ -195,7 +192,6 @@ v3d_hub_irq(int irq, void *arg)
struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv;
u64 runtime = local_clock() - file->start_ns[V3D_TFU];
file->enabled_ns[V3D_TFU] += local_clock() - file->start_ns[V3D_TFU];
file->jobs_sent[V3D_TFU]++;
v3d->queue[V3D_TFU].jobs_sent++;

View File

@ -456,8 +456,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
.no_wait_gpu = false
};
u32 j, initial_line = dst_offset / dst_stride;
struct vmw_bo_blit_line_data d;
struct vmw_bo_blit_line_data d = {0};
int ret = 0;
struct page **dst_pages = NULL;
struct page **src_pages = NULL;
/* Buffer objects need to be either pinned or reserved: */
if (!(dst->pin_count))
@ -477,12 +479,35 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
return ret;
}
if (!src->ttm->pages && src->ttm->sg) {
src_pages = kvmalloc_array(src->ttm->num_pages,
sizeof(struct page *), GFP_KERNEL);
if (!src_pages)
return -ENOMEM;
ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
src->ttm->num_pages);
if (ret)
goto out;
}
if (!dst->ttm->pages && dst->ttm->sg) {
dst_pages = kvmalloc_array(dst->ttm->num_pages,
sizeof(struct page *), GFP_KERNEL);
if (!dst_pages) {
ret = -ENOMEM;
goto out;
}
ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
dst->ttm->num_pages);
if (ret)
goto out;
}
d.mapped_dst = 0;
d.mapped_src = 0;
d.dst_addr = NULL;
d.src_addr = NULL;
d.dst_pages = dst->ttm->pages;
d.src_pages = src->ttm->pages;
d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
d.dst_num_pages = PFN_UP(dst->resource->size);
d.src_num_pages = PFN_UP(src->resource->size);
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
@ -504,6 +529,10 @@ out:
kunmap_atomic(d.src_addr);
if (d.dst_addr)
kunmap_atomic(d.dst_addr);
if (src_pages)
kvfree(src_pages);
if (dst_pages)
kvfree(dst_pages);
return ret;
}

View File

@ -377,7 +377,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
{
struct ttm_operation_ctx ctx = {
.interruptible = params->bo_type != ttm_bo_type_kernel,
.no_wait_gpu = false
.no_wait_gpu = false,
.resv = params->resv,
};
struct ttm_device *bdev = &dev_priv->bdev;
struct drm_device *vdev = &dev_priv->drm;
@ -394,8 +395,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
&vmw_bo->placement, 0, &ctx, NULL,
NULL, destroy);
&vmw_bo->placement, 0, &ctx,
params->sg, params->resv, destroy);
if (unlikely(ret))
return ret;

View File

@ -55,6 +55,8 @@ struct vmw_bo_params {
enum ttm_bo_type bo_type;
size_t size;
bool pin;
struct dma_resv *resv;
struct sg_table *sg;
};
/**

View File

@ -1628,6 +1628,7 @@ static const struct drm_driver driver = {
.prime_fd_to_handle = vmw_prime_fd_to_handle,
.prime_handle_to_fd = vmw_prime_handle_to_fd,
.gem_prime_import_sg_table = vmw_prime_import_sg_table,
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,

View File

@ -1131,6 +1131,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t handle, uint32_t flags,
int *prime_fd);
struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *table);
/*
* MemoryOBject management - vmwgfx_mob.c

View File

@ -149,6 +149,38 @@ out_no_bo:
return ret;
}
struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *table)
{
int ret;
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_gem_object *gem = NULL;
struct vmw_bo *vbo;
struct vmw_bo_params params = {
.domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_sg,
.size = attach->dmabuf->size,
.pin = false,
.resv = attach->dmabuf->resv,
.sg = table,
};
dma_resv_lock(params.resv, NULL);
ret = vmw_bo_create(dev_priv, &params, &vbo);
if (ret != 0)
goto out_no_bo;
vbo->tbo.base.funcs = &vmw_gem_object_funcs;
gem = &vbo->tbo.base;
out_no_bo:
dma_resv_unlock(params.resv);
return gem;
}
int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)

View File

@ -932,6 +932,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct vmw_private *vmw = vmw_priv(crtc->dev);
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
@ -939,9 +940,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
bool has_primary = new_state->plane_mask &
drm_plane_mask(crtc->primary);
/* We always want to have an active plane with an active CRTC */
if (has_primary != new_state->enable)
return -EINVAL;
/*
* This is fine in general, but broken userspace might expect
* some actual rendering so give a clue as why it's blank.
*/
if (new_state->enable && !has_primary)
drm_dbg_driver(&vmw->drm,
"CRTC without a primary plane will be blank.\n");
if (new_state->connector_mask != connector_mask &&

View File

@ -243,10 +243,10 @@ struct vmw_framebuffer_bo {
static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB1555,
};
static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {

View File

@ -75,8 +75,12 @@ int vmw_prime_fd_to_handle(struct drm_device *dev,
int fd, u32 *handle)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret = ttm_prime_fd_to_handle(tfile, fd, handle);
return ttm_prime_fd_to_handle(tfile, fd, handle);
if (ret)
ret = drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
return ret;
}
int vmw_prime_handle_to_fd(struct drm_device *dev,
@ -85,5 +89,12 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
int *prime_fd)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
int ret;
if (handle > VMWGFX_NUM_MOB)
ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
else
ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
return ret;
}

View File

@ -220,13 +220,18 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
switch (dev_priv->map_mode) {
case vmw_dma_map_bind:
case vmw_dma_map_populate:
vsgt->sgt = &vmw_tt->sgt;
ret = sg_alloc_table_from_pages_segment(
&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
(unsigned long)vsgt->num_pages << PAGE_SHIFT,
dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
if (ret)
goto out_sg_alloc_fail;
if (vmw_tt->dma_ttm.page_flags & TTM_TT_FLAG_EXTERNAL) {
vsgt->sgt = vmw_tt->dma_ttm.sg;
} else {
vsgt->sgt = &vmw_tt->sgt;
ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
vsgt->pages, vsgt->num_pages, 0,
(unsigned long)vsgt->num_pages << PAGE_SHIFT,
dma_get_max_seg_size(dev_priv->drm.dev),
GFP_KERNEL);
if (ret)
goto out_sg_alloc_fail;
}
ret = vmw_ttm_map_for_dma(vmw_tt);
if (unlikely(ret != 0))
@ -241,8 +246,9 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
return 0;
out_map_fail:
sg_free_table(vmw_tt->vsgt.sgt);
vmw_tt->vsgt.sgt = NULL;
drm_warn(&dev_priv->drm, "VSG table map failed!");
sg_free_table(vsgt->sgt);
vsgt->sgt = NULL;
out_sg_alloc_fail:
return ret;
}
@ -388,15 +394,17 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
int ret;
bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
/* TODO: maybe completely drop this ? */
if (ttm_tt_is_populated(ttm))
return 0;
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
if (external && ttm->sg)
return drm_prime_sg_to_dma_addr_array(ttm->sg,
ttm->dma_address,
ttm->num_pages);
return ret;
return ttm_pool_alloc(&bdev->pool, ttm, ctx);
}
static void vmw_ttm_unpopulate(struct ttm_device *bdev,
@ -404,6 +412,10 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm);
bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
if (external)
return;
vmw_ttm_unbind(bdev, ttm);
@ -422,6 +434,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
{
struct vmw_ttm_tt *vmw_be;
int ret;
bool external = bo->type == ttm_bo_type_sg;
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
@ -430,7 +443,10 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
if (external)
page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached);
else

View File

@ -31,7 +31,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
if (ret)
return ret;
goto err;
if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
/*
@ -42,12 +42,16 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
*/
if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) {
ttm_bo_unreserve(&bo->ttm);
return -EINVAL;
ret = -EINVAL;
goto err;
}
bo->flags |= XE_BO_SCANOUT_BIT;
}
ttm_bo_unreserve(&bo->ttm);
return 0;
err:
xe_bo_put(bo);
return ret;
}

View File

@ -1026,23 +1026,26 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
}
}
static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id)
static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
enum ib_cm_state old_state)
{
struct cm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
pr_err("%s: cm_id=%p timed out. state=%d refcnt=%d\n", __func__,
cm_id, cm_id->state, refcount_read(&cm_id_priv->refcount));
pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
}
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{
struct cm_id_private *cm_id_priv;
enum ib_cm_state old_state;
struct cm_work *work;
int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irq(&cm_id_priv->lock);
old_state = cm_id->state;
retest:
switch (cm_id->state) {
case IB_CM_LISTEN:
@ -1151,7 +1154,7 @@ retest:
msecs_to_jiffies(
CM_DESTROY_ID_WAIT_TIMEOUT));
if (!ret) /* timeout happened */
cm_destroy_id_wait_timeout(cm_id);
cm_destroy_id_wait_timeout(cm_id, old_state);
} while (!ret);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)

View File

@ -188,7 +188,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
mdev = dev->mdev;
mdev_port_num = 1;
}
if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
!mlx5_core_mp_enabled(mdev)) {
/* set local port to one for Function-Per-Port HCA. */
mdev = dev->mdev;
mdev_port_num = 1;

View File

@ -33,6 +33,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
if (rxe->tfm)
crypto_free_shash(rxe->tfm);
mutex_destroy(&rxe->usdev_lock);
}
/* initialize rxe device parameters */

View File

@ -176,6 +176,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
path->num_nodes = num_nodes;
mutex_lock(&icc_bw_lock);
for (i = num_nodes - 1; i >= 0; i--) {
node->provider->users++;
hlist_add_head(&path->reqs[i].req_node, &node->req_list);
@ -186,6 +188,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
node = node->reverse;
}
mutex_unlock(&icc_bw_lock);
return path;
}
@ -792,12 +796,16 @@ void icc_put(struct icc_path *path)
pr_err("%s: error (%d)\n", __func__, ret);
mutex_lock(&icc_lock);
mutex_lock(&icc_bw_lock);
for (i = 0; i < path->num_nodes; i++) {
node = path->reqs[i].node;
hlist_del(&path->reqs[i].req_node);
if (!WARN_ON(!node->provider->users))
node->provider->users--;
}
mutex_unlock(&icc_bw_lock);
mutex_unlock(&icc_lock);
kfree_const(path->name);

View File

@ -116,15 +116,6 @@ static struct qcom_icc_node xm_sdc2 = {
.links = { X1E80100_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node ddr_perf_mode_master = {
.name = "ddr_perf_mode_master",
.id = X1E80100_MASTER_DDR_PERF_MODE,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { X1E80100_SLAVE_DDR_PERF_MODE },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
.id = X1E80100_MASTER_QUP_CORE_0,
@ -832,14 +823,6 @@ static struct qcom_icc_node qns_a2noc_snoc = {
.links = { X1E80100_MASTER_A2NOC_SNOC },
};
static struct qcom_icc_node ddr_perf_mode_slave = {
.name = "ddr_perf_mode_slave",
.id = X1E80100_SLAVE_DDR_PERF_MODE,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
.id = X1E80100_SLAVE_QUP_CORE_0,
@ -1591,12 +1574,6 @@ static struct qcom_icc_bcm bcm_acv = {
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_acv_perf = {
.name = "ACV_PERF",
.num_nodes = 1,
.nodes = { &ddr_perf_mode_slave },
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.num_nodes = 1,
@ -1863,18 +1840,15 @@ static const struct qcom_icc_desc x1e80100_aggre2_noc = {
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
&bcm_acv_perf,
&bcm_qup0,
&bcm_qup1,
&bcm_qup2,
};
static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_DDR_PERF_MODE] = &ddr_perf_mode_master,
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
[SLAVE_DDR_PERF_MODE] = &ddr_perf_mode_slave,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
[SLAVE_QUP_CORE_2] = &qup2_core_slave,

View File

@ -37,6 +37,7 @@ config IOMMUFD_TEST
depends on DEBUG_KERNEL
depends on FAULT_INJECTION
depends on RUNTIME_TESTING_MENU
select IOMMUFD_DRIVER
default n
help
This is dangerous, do not enable unless running

View File

@ -1002,7 +1002,7 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
} else {
pcr->card_removed |= SD_EXIST;
pcr->card_inserted &= ~SD_EXIST;
if (PCI_PID(pcr) == PID_5261) {
if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;

View File

@ -116,7 +116,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},

View File

@ -402,25 +402,40 @@ static int mei_vsc_remove(struct platform_device *pdev)
static int mei_vsc_suspend(struct device *dev)
{
struct mei_device *mei_dev = dev_get_drvdata(dev);
struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
mei_stop(mei_dev);
mei_disable_interrupts(mei_dev);
vsc_tp_free_irq(hw->tp);
return 0;
}
static int mei_vsc_resume(struct device *dev)
{
struct mei_device *mei_dev = dev_get_drvdata(dev);
struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
int ret;
ret = vsc_tp_request_irq(hw->tp);
if (ret)
return ret;
ret = mei_restart(mei_dev);
if (ret)
return ret;
goto err_free;
/* start timer if stopped in suspend */
schedule_delayed_work(&mei_dev->timer_work, HZ);
return 0;
err_free:
vsc_tp_free_irq(hw->tp);
return ret;
}
static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);

View File

@ -94,6 +94,27 @@ static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
{}
};
static irqreturn_t vsc_tp_isr(int irq, void *data)
{
struct vsc_tp *tp = data;
atomic_inc(&tp->assert_cnt);
wake_up(&tp->xfer_wait);
return IRQ_WAKE_THREAD;
}
static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
{
struct vsc_tp *tp = data;
if (tp->event_notify)
tp->event_notify(tp->event_notify_context);
return IRQ_HANDLED;
}
/* wakeup firmware and wait for response */
static int vsc_tp_wakeup_request(struct vsc_tp *tp)
{
@ -383,6 +404,37 @@ int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
}
EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
/**
* vsc_tp_request_irq - request irq for vsc_tp device
* @tp: vsc_tp device handle
*/
int vsc_tp_request_irq(struct vsc_tp *tp)
{
struct spi_device *spi = tp->spi;
struct device *dev = &spi->dev;
int ret;
irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
dev_name(dev), tp);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL_NS_GPL(vsc_tp_request_irq, VSC_TP);
/**
* vsc_tp_free_irq - free irq for vsc_tp device
* @tp: vsc_tp device handle
*/
void vsc_tp_free_irq(struct vsc_tp *tp)
{
free_irq(tp->spi->irq, tp);
}
EXPORT_SYMBOL_NS_GPL(vsc_tp_free_irq, VSC_TP);
/**
* vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
* @tp: vsc_tp device handle
@ -413,27 +465,6 @@ void vsc_tp_intr_disable(struct vsc_tp *tp)
}
EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
static irqreturn_t vsc_tp_isr(int irq, void *data)
{
struct vsc_tp *tp = data;
atomic_inc(&tp->assert_cnt);
return IRQ_WAKE_THREAD;
}
static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
{
struct vsc_tp *tp = data;
wake_up(&tp->xfer_wait);
if (tp->event_notify)
tp->event_notify(tp->event_notify_context);
return IRQ_HANDLED;
}
static int vsc_tp_match_any(struct acpi_device *adev, void *data)
{
struct acpi_device **__adev = data;
@ -485,10 +516,9 @@ static int vsc_tp_probe(struct spi_device *spi)
tp->spi = spi;
irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr,
vsc_tp_thread_isr,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
dev_name(dev), tp);
ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
dev_name(dev), tp);
if (ret)
return ret;
@ -522,6 +552,8 @@ static int vsc_tp_probe(struct spi_device *spi)
err_destroy_lock:
mutex_destroy(&tp->mutex);
free_irq(spi->irq, tp);
return ret;
}
@ -532,6 +564,8 @@ static void vsc_tp_remove(struct spi_device *spi)
platform_device_unregister(tp->pdev);
mutex_destroy(&tp->mutex);
free_irq(spi->irq, tp);
}
static const struct acpi_device_id vsc_tp_acpi_ids[] = {

View File

@ -37,6 +37,9 @@ int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
void *context);
int vsc_tp_request_irq(struct vsc_tp *tp);
void vsc_tp_free_irq(struct vsc_tp *tp);
void vsc_tp_intr_enable(struct vsc_tp *tp);
void vsc_tp_intr_disable(struct vsc_tp *tp);
void vsc_tp_intr_synchronize(struct vsc_tp *tp);

View File

@ -1947,14 +1947,16 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
static int mt753x_mirror_port_get(unsigned int id, u32 val)
{
return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
MIRROR_PORT(val);
return (id == ID_MT7531 || id == ID_MT7988) ?
MT7531_MIRROR_PORT_GET(val) :
MIRROR_PORT(val);
}
static int mt753x_mirror_port_set(unsigned int id, u32 val)
{
return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
MIRROR_PORT(val);
return (id == ID_MT7531 || id == ID_MT7988) ?
MT7531_MIRROR_PORT_SET(val) :
MIRROR_PORT(val);
}
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
@ -2469,8 +2471,6 @@ mt7530_setup(struct dsa_switch *ds)
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
mt7530_pll_setup(priv);
/* Lower Tx driving for TRGMII path */
for (i = 0; i < NUM_TRGMII_CTRL; i++)
mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
@ -2488,6 +2488,9 @@ mt7530_setup(struct dsa_switch *ds)
priv->p6_interface = PHY_INTERFACE_MODE_NA;
if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_40MHZ)
mt7530_pll_setup(priv);
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
@ -2517,6 +2520,9 @@ mt7530_setup(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
/* Allow mirroring frames received on the local port (monitor port). */
mt7530_set(priv, MT753X_AGC, LOCAL_EN);
/* Setup VLAN ID 0 for VLAN-unaware bridges */
ret = mt7530_setup_vlan0(priv);
if (ret)
@ -2625,6 +2631,9 @@ mt7531_setup_common(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
/* Allow mirroring frames received on the local port (monitor port). */
mt7530_set(priv, MT753X_AGC, LOCAL_EN);
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
@ -2703,18 +2712,25 @@ mt7531_setup(struct dsa_switch *ds)
priv->p5_interface = PHY_INTERFACE_MODE_NA;
priv->p6_interface = PHY_INTERFACE_MODE_NA;
/* Enable PHY core PLL, since phy_device has not yet been created
* provided for phy_[read,write]_mmd_indirect is called, we provide
* our own mt7531_ind_mmd_phy_[read,write] to complete this
* function.
/* Enable Energy-Efficient Ethernet (EEE) and PHY core PLL, since
* phy_device has not yet been created provided for
* phy_[read,write]_mmd_indirect is called, we provide our own
* mt7531_ind_mmd_phy_[read,write] to complete this function.
*/
val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
MDIO_MMD_VEND2, CORE_PLL_GROUP4);
val |= MT7531_PHY_PLL_BYPASS_MODE;
val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE;
val &= ~MT7531_PHY_PLL_OFF;
mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
CORE_PLL_GROUP4, val);
/* Disable EEE advertisement on the switch PHYs. */
for (i = MT753X_CTRL_PHY_ADDR;
i < MT753X_CTRL_PHY_ADDR + MT7530_NUM_PHYS; i++) {
mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
0);
}
mt7531_setup_common(ds);
/* Setup VLAN ID 0 for VLAN-unaware bridges */

View File

@ -32,6 +32,10 @@ enum mt753x_id {
#define SYSC_REG_RSTCTRL 0x34
#define RESET_MCM BIT(2)
/* Register for ARL global control */
#define MT753X_AGC 0xc
#define LOCAL_EN BIT(7)
/* Registers to mac forward control for unknown frames */
#define MT7530_MFC 0x10
#define BC_FFP(x) (((x) & 0xff) << 24)
@ -630,6 +634,7 @@ enum mt7531_clk_skew {
#define RG_SYSPLL_DDSFBK_EN BIT(12)
#define RG_SYSPLL_BIAS_EN BIT(11)
#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
#define MT7531_RG_SYSPLL_DMY2 BIT(6)
#define MT7531_PHY_PLL_OFF BIT(5)
#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)

View File

@ -28,6 +28,8 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
* - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
* - Tunnel flag (present if tunnel)
*/
if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
lkups_cnt++;
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
lkups_cnt++;
@ -363,6 +365,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
/* Always add direction metadata */
ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
ice_rule_add_src_vsi_metadata(&list[i]);
i++;
}
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
if (tc_fltr->tunnel_type != TNL_LAST) {
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
@ -772,7 +779,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
int ret;
int i;
if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
return -EOPNOTSUPP;
}
@ -820,6 +827,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
/* specify the cookie as filter_rule_id */
rule_info.fltr_rule_id = fltr->cookie;
rule_info.src_vsi = vsi->idx;
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
if (ret == -EEXIST) {
@ -1481,7 +1489,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
(BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
return -EOPNOTSUPP;
} else {

View File

@ -688,6 +688,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control match;
u32 val;
flow_rule_match_control(rule, &match);
if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
@ -696,12 +697,14 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
val = match.key->flags & FLOW_DIS_IS_FRAGMENT;
if (ntohs(flow_spec->etype) == ETH_P_IP) {
flow_spec->ip_flag = IPV4_FLAG_MORE;
flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0;
flow_mask->ip_flag = IPV4_FLAG_MORE;
req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
} else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
flow_spec->next_header = IPPROTO_FRAGMENT;
flow_spec->next_header = val ?
IPPROTO_FRAGMENT : 0;
flow_mask->next_header = 0xff;
req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
} else {

View File

@ -1074,13 +1074,13 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
static void
mtk_wed_stop(struct mtk_wed_device *dev)
{
mtk_wed_dma_disable(dev);
mtk_wed_set_ext_int(dev, false);
wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
if (!mtk_wed_get_rx_capa(dev))
return;
@ -1093,7 +1093,6 @@ static void
mtk_wed_deinit(struct mtk_wed_device *dev)
{
mtk_wed_stop(dev);
mtk_wed_dma_disable(dev);
wed_clr(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WDMA_INT_AGENT_EN |
@ -2605,9 +2604,6 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
static void
mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
{
if (!dev->running)
return;
mtk_wed_set_ext_int(dev, !!mask);
wed_w32(dev, MTK_WED_INT_MASK, mask);
}

View File

@ -45,6 +45,10 @@ struct arfs_table {
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};
enum {
MLX5E_ARFS_STATE_ENABLED,
};
enum arfs_type {
ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
@ -59,6 +63,7 @@ struct mlx5e_arfs_tables {
spinlock_t arfs_lock;
int last_filter_id;
struct workqueue_struct *wq;
unsigned long state;
};
struct arfs_tuple {
@ -169,6 +174,8 @@ int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
return err;
}
}
set_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
return 0;
}
@ -454,6 +461,8 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs)
int i;
int j;
clear_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
spin_lock_bh(&arfs->arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
@ -626,17 +635,8 @@ static void arfs_handle_work(struct work_struct *work)
struct mlx5_flow_handle *rule;
arfs = mlx5e_fs_get_arfs(priv->fs);
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
spin_lock_bh(&arfs->arfs_lock);
hlist_del(&arfs_rule->hlist);
spin_unlock_bh(&arfs->arfs_lock);
mutex_unlock(&priv->state_lock);
kfree(arfs_rule);
goto out;
}
mutex_unlock(&priv->state_lock);
if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state))
return;
if (!arfs_rule->rule) {
rule = arfs_add_rule(priv, arfs_rule);
@ -752,6 +752,11 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -EPROTONOSUPPORT;
spin_lock_bh(&arfs->arfs_lock);
if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) {
spin_unlock_bh(&arfs->arfs_lock);
return -EPERM;
}
arfs_rule = arfs_find_rule(arfs_t, &fk);
if (arfs_rule) {
if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {

View File

@ -1868,6 +1868,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
if (err)
goto abort;
dev->priv.eswitch = esw;
err = esw_offloads_init(esw);
if (err)
goto reps_err;
@ -1892,11 +1893,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
if (MLX5_ESWITCH_MANAGER(dev) &&
mlx5_esw_vport_match_metadata_supported(esw))
esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
esw_info(dev,
@ -1908,6 +1904,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
reps_err:
mlx5_esw_vports_cleanup(esw);
dev->priv.eswitch = NULL;
abort:
if (esw->work_queue)
destroy_workqueue(esw->work_queue);
@ -1926,7 +1923,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_info(esw->dev, "cleanup\n");
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt));
mutex_destroy(&esw->state_lock);
@ -1937,6 +1933,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mutex_destroy(&esw->offloads.encap_tbl_lock);
mutex_destroy(&esw->offloads.decap_tbl_lock);
esw_offloads_cleanup(esw);
esw->dev->priv.eswitch = NULL;
mlx5_esw_vports_cleanup(esw);
debugfs_remove_recursive(esw->debugfs_root);
devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,

View File

@ -2476,6 +2476,10 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
if (err)
return err;
if (MLX5_ESWITCH_MANAGER(esw->dev) &&
mlx5_esw_vport_match_metadata_supported(esw))
esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
err = devl_params_register(priv_to_devlink(esw->dev),
esw_devlink_params,
ARRAY_SIZE(esw_devlink_params));

View File

@ -703,8 +703,10 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
return err;
}
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
mlx5_lag_port_sel_destroy(ldev);
ldev->buckets = 1;
}
if (mlx5_lag_has_drop_rule(ldev))
mlx5_lag_drop_rule_cleanup(ldev);

View File

@ -1699,12 +1699,15 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
err = mlx5_devlink_params_register(priv_to_devlink(dev));
if (err) {
mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
goto query_hca_caps_err;
goto params_reg_err;
}
devl_unlock(devlink);
return 0;
params_reg_err:
devl_unregister(devlink);
devl_unlock(devlink);
query_hca_caps_err:
devl_unregister(devlink);
devl_unlock(devlink);

View File

@ -75,7 +75,6 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto peer_devlink_set_err;
}
devlink_register(devlink);
return 0;
peer_devlink_set_err:

View File

@ -36,6 +36,27 @@ struct sparx5_tc_flower_template {
u16 l3_proto; /* protocol specified in the template */
};
/* SparX-5 VCAP fragment types:
* 0 = no fragment, 1 = initial fragment,
* 2 = suspicious fragment, 3 = valid follow-up fragment
*/
enum { /* key / mask */
FRAG_NOT = 0x03, /* 0 / 3 */
FRAG_SOME = 0x11, /* 1 / 1 */
FRAG_FIRST = 0x13, /* 1 / 3 */
FRAG_LATER = 0x33, /* 3 / 3 */
FRAG_INVAL = 0xff, /* invalid */
};
/* Flower fragment flag to VCAP fragment type mapping */
static const u8 sparx5_vcap_frag_map[4][4] = { /* is_frag */
{ FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_FIRST }, /* 0/0 */
{ FRAG_NOT, FRAG_NOT, FRAG_INVAL, FRAG_INVAL }, /* 0/1 */
{ FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_INVAL }, /* 1/0 */
{ FRAG_SOME, FRAG_LATER, FRAG_INVAL, FRAG_FIRST } /* 1/1 */
/* 0/0 0/1 1/0 1/1 <-- first_frag */
};
static int
sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
{
@ -145,29 +166,27 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
flow_rule_match_control(st->frule, &mt);
if (mt.mask->flags) {
if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
value = 1; /* initial fragment */
mask = 0x3;
} else {
if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
value = 3; /* follow up fragment */
mask = 0x3;
} else {
value = 0; /* no fragment */
mask = 0x3;
}
}
} else {
if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
value = 3; /* follow up fragment */
mask = 0x3;
} else {
value = 0; /* no fragment */
mask = 0x3;
}
u8 is_frag_key = !!(mt.key->flags & FLOW_DIS_IS_FRAGMENT);
u8 is_frag_mask = !!(mt.mask->flags & FLOW_DIS_IS_FRAGMENT);
u8 is_frag_idx = (is_frag_key << 1) | is_frag_mask;
u8 first_frag_key = !!(mt.key->flags & FLOW_DIS_FIRST_FRAG);
u8 first_frag_mask = !!(mt.mask->flags & FLOW_DIS_FIRST_FRAG);
u8 first_frag_idx = (first_frag_key << 1) | first_frag_mask;
/* Lookup verdict based on the 2 + 2 input bits */
u8 vdt = sparx5_vcap_frag_map[is_frag_idx][first_frag_idx];
if (vdt == FRAG_INVAL) {
NL_SET_ERR_MSG_MOD(st->fco->common.extack,
"Match on invalid fragment flag combination");
return -EINVAL;
}
/* Extract VCAP fragment key and mask from verdict */
value = (vdt >> 4) & 0x3;
mask = vdt & 0x3;
err = vcap_rule_add_key_u32(st->vrule,
VCAP_KF_L3_FRAGMENT_TYPE,
value, mask);

View File

@ -72,6 +72,7 @@ enum mac_version {
};
struct rtl8169_private;
struct r8169_led_classdev;
void r8169_apply_firmware(struct rtl8169_private *tp);
u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp);
@ -83,4 +84,5 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx,
char *buf, int buf_len);
int rtl8168_get_led_mode(struct rtl8169_private *tp);
int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val);
void rtl8168_init_leds(struct net_device *ndev);
struct r8169_led_classdev *rtl8168_init_leds(struct net_device *ndev);
void r8169_remove_leds(struct r8169_led_classdev *leds);

View File

@ -138,20 +138,31 @@ static void rtl8168_setup_ldev(struct r8169_led_classdev *ldev,
led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
/* ignore errors */
devm_led_classdev_register(&ndev->dev, led_cdev);
led_classdev_register(&ndev->dev, led_cdev);
}
void rtl8168_init_leds(struct net_device *ndev)
struct r8169_led_classdev *rtl8168_init_leds(struct net_device *ndev)
{
/* bind resource mgmt to netdev */
struct device *dev = &ndev->dev;
struct r8169_led_classdev *leds;
int i;
leds = devm_kcalloc(dev, RTL8168_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
leds = kcalloc(RTL8168_NUM_LEDS + 1, sizeof(*leds), GFP_KERNEL);
if (!leds)
return;
return NULL;
for (i = 0; i < RTL8168_NUM_LEDS; i++)
rtl8168_setup_ldev(leds + i, ndev, i);
return leds;
}
void r8169_remove_leds(struct r8169_led_classdev *leds)
{
if (!leds)
return;
for (struct r8169_led_classdev *l = leds; l->ndev; l++)
led_classdev_unregister(&l->led);
kfree(leds);
}

View File

@ -634,6 +634,8 @@ struct rtl8169_private {
const char *fw_name;
struct rtl_fw *rtl_fw;
struct r8169_led_classdev *leds;
u32 ocp_base;
};
@ -4930,6 +4932,9 @@ static void rtl_remove_one(struct pci_dev *pdev)
cancel_work_sync(&tp->wk.work);
if (IS_ENABLED(CONFIG_R8169_LEDS))
r8169_remove_leds(tp->leds);
unregister_netdev(tp->dev);
if (tp->dash_type != RTL_DASH_NONE)
@ -5391,7 +5396,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ENABLED(CONFIG_R8169_LEDS) &&
tp->mac_version > RTL_GIGA_MAC_VER_06 &&
tp->mac_version < RTL_GIGA_MAC_VER_61)
rtl8168_init_leds(dev);
tp->leds = rtl8168_init_leds(dev);
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);

View File

@ -1060,8 +1060,10 @@ struct ravb_private {
struct ravb_desc *desc_bat;
dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
struct ravb_rx_desc *gbeth_rx_ring;
struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
union {
struct ravb_rx_desc *desc;
struct ravb_ex_rx_desc *ex_desc;
} rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
struct sk_buff *rx_1st_skb;

View File

@ -250,11 +250,11 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
unsigned int ring_size;
unsigned int i;
if (!priv->gbeth_rx_ring)
if (!priv->rx_ring[q].desc)
return;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
struct ravb_rx_desc *desc = &priv->rx_ring[q].desc[i];
if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr)))
@ -264,9 +264,9 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
DMA_FROM_DEVICE);
}
ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].desc,
priv->rx_desc_dma[q]);
priv->gbeth_rx_ring = NULL;
priv->rx_ring[q].desc = NULL;
}
static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
@ -275,11 +275,11 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
unsigned int ring_size;
unsigned int i;
if (!priv->rx_ring[q])
if (!priv->rx_ring[q].ex_desc)
return;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q].ex_desc[i];
if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr)))
@ -290,9 +290,9 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
}
ring_size = sizeof(struct ravb_ex_rx_desc) *
(priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].ex_desc,
priv->rx_desc_dma[q]);
priv->rx_ring[q] = NULL;
priv->rx_ring[q].ex_desc = NULL;
}
/* Free skb's and DMA buffers for Ethernet AVB */
@ -344,11 +344,11 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
unsigned int i;
rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
memset(priv->gbeth_rx_ring, 0, rx_ring_size);
memset(priv->rx_ring[q].desc, 0, rx_ring_size);
/* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
rx_desc = &priv->gbeth_rx_ring[i];
rx_desc = &priv->rx_ring[q].desc[i];
rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
GBETH_RX_BUFF_MAX,
@ -361,7 +361,7 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
}
rx_desc = &priv->gbeth_rx_ring[i];
rx_desc = &priv->rx_ring[q].desc[i];
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
}
@ -374,11 +374,11 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
dma_addr_t dma_addr;
unsigned int i;
memset(priv->rx_ring[q], 0, rx_ring_size);
memset(priv->rx_ring[q].ex_desc, 0, rx_ring_size);
/* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
rx_desc = &priv->rx_ring[q][i];
rx_desc = &priv->rx_ring[q].ex_desc[i];
rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
RX_BUF_SZ,
@ -391,7 +391,7 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
}
rx_desc = &priv->rx_ring[q][i];
rx_desc = &priv->rx_ring[q].ex_desc[i];
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
}
@ -446,10 +446,10 @@ static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->rx_desc_dma[q],
GFP_KERNEL);
return priv->gbeth_rx_ring;
priv->rx_ring[q].desc = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->rx_desc_dma[q],
GFP_KERNEL);
return priv->rx_ring[q].desc;
}
static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
@ -459,10 +459,11 @@ static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->rx_desc_dma[q],
GFP_KERNEL);
return priv->rx_ring[q];
priv->rx_ring[q].ex_desc = dma_alloc_coherent(ndev->dev.parent,
ring_size,
&priv->rx_desc_dma[q],
GFP_KERNEL);
return priv->rx_ring[q].ex_desc;
}
/* Init skb and descriptor buffer for Ethernet AVB */
@ -780,12 +781,15 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
int limit;
int i;
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q];
desc = &priv->gbeth_rx_ring[entry];
for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].desc[entry];
if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
break;
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
@ -849,15 +853,12 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
break;
}
}
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
desc = &priv->gbeth_rx_ring[entry];
}
/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->gbeth_rx_ring[entry];
desc = &priv->rx_ring[q].desc[entry];
desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
if (!priv->rx_skb[q][entry]) {
@ -893,30 +894,29 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
priv->cur_rx[q];
struct net_device_stats *stats = &priv->stats[q];
struct ravb_ex_rx_desc *desc;
unsigned int limit, i;
struct sk_buff *skb;
dma_addr_t dma_addr;
struct timespec64 ts;
int rx_packets = 0;
u8 desc_status;
u16 pkt_len;
int limit;
int entry;
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].ex_desc[entry];
if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
break;
boguscnt = min(boguscnt, *quota);
limit = boguscnt;
desc = &priv->rx_ring[q][entry];
while (desc->die_dt != DT_FEMPTY) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
if (--boguscnt < 0)
break;
/* We use 0-byte descriptors to mark the DMA mapping errors */
if (!pkt_len)
continue;
@ -962,18 +962,15 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum(skb);
napi_gro_receive(&priv->napi[q], skb);
stats->rx_packets++;
rx_packets++;
stats->rx_bytes += pkt_len;
}
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry];
}
/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry];
desc = &priv->rx_ring[q].ex_desc[entry];
desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
if (!priv->rx_skb[q][entry]) {
@ -998,9 +995,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
desc->die_dt = DT_FEMPTY;
}
*quota -= limit - (++boguscnt);
return boguscnt <= 0;
stats->rx_packets += rx_packets;
*quota -= rx_packets;
return *quota == 0;
}
/* Packet receive function for Ethernet AVB */

View File

@ -550,6 +550,7 @@ extern const struct stmmac_hwtimestamp stmmac_ptp;
extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
struct mac_link {
u32 caps;
u32 speed_mask;
u32 speed10;
u32 speed100;

View File

@ -1096,6 +1096,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
priv->dev->priv_flags |= IFF_UNICAST_FLT;
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000;
/* The loopback bit seems to be re-set when link change
* Simply mask it each time
* Speed 10/100/1000 are set in BIT(2)/BIT(3)

View File

@ -539,6 +539,8 @@ int dwmac1000_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000;
mac->link.duplex = GMAC_CONTROL_DM;
mac->link.speed10 = GMAC_CONTROL_PS;
mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;

View File

@ -175,6 +175,8 @@ int dwmac100_setup(struct stmmac_priv *priv)
dev_info(priv->device, "\tDWMAC100\n");
mac->pcsr = priv->ioaddr;
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100;
mac->link.duplex = MAC_CONTROL_F;
mac->link.speed10 = 0;
mac->link.speed100 = 0;

View File

@ -70,7 +70,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
{
priv->phylink_config.mac_capabilities |= MAC_2500FD;
if (priv->plat->tx_queues_to_use > 1)
priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
else
priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
}
static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
@ -1378,6 +1381,8 @@ int dwmac4_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
mac->link.duplex = GMAC_CONFIG_DM;
mac->link.speed10 = GMAC_CONFIG_PS;
mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;

View File

@ -47,14 +47,6 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
}
static void xgmac_phylink_get_caps(struct stmmac_priv *priv)
{
priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD |
MAC_10000FD | MAC_25000FD |
MAC_40000FD | MAC_50000FD |
MAC_100000FD;
}
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
{
u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
@ -1540,7 +1532,6 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
.phylink_get_caps = xgmac_phylink_get_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
@ -1601,7 +1592,6 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
const struct stmmac_ops dwxlgmac2_ops = {
.core_init = dwxgmac2_core_init,
.phylink_get_caps = xgmac_phylink_get_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxlgmac2_rx_queue_enable,
@ -1661,6 +1651,9 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_1000FD | MAC_2500FD | MAC_5000FD |
MAC_10000FD;
mac->link.duplex = 0;
mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
@ -1698,6 +1691,11 @@ int dwxlgmac2_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_1000FD | MAC_2500FD | MAC_5000FD |
MAC_10000FD | MAC_25000FD |
MAC_40000FD | MAC_50000FD |
MAC_100000FD;
mac->link.duplex = 0;
mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;

View File

@ -1198,17 +1198,6 @@ static int stmmac_init_phy(struct net_device *dev)
return ret;
}
static void stmmac_set_half_duplex(struct stmmac_priv *priv)
{
/* Half-Duplex can only work with single tx queue */
if (priv->plat->tx_queues_to_use > 1)
priv->phylink_config.mac_capabilities &=
~(MAC_10HD | MAC_100HD | MAC_1000HD);
else
priv->phylink_config.mac_capabilities |=
(MAC_10HD | MAC_100HD | MAC_1000HD);
}
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
@ -1236,15 +1225,11 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
xpcs_get_interfaces(priv->hw->xpcs,
priv->phylink_config.supported_interfaces);
priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10FD | MAC_100FD |
MAC_1000FD;
stmmac_set_half_duplex(priv);
/* Get the MAC specific capabilities */
stmmac_mac_phylink_get_caps(priv);
priv->phylink_config.mac_capabilities = priv->hw->link.caps;
max_speed = priv->plat->max_speed;
if (max_speed)
phylink_limit_mac_speed(&priv->phylink_config, max_speed);
@ -7286,6 +7271,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret = 0, i;
int max_speed;
if (netif_running(dev))
stmmac_release(dev);
@ -7299,7 +7285,14 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
rx_cnt);
stmmac_set_half_duplex(priv);
stmmac_mac_phylink_get_caps(priv);
priv->phylink_config.mac_capabilities = priv->hw->link.caps;
max_speed = priv->plat->max_speed;
if (max_speed)
phylink_limit_mac_speed(&priv->phylink_config, max_speed);
stmmac_napi_add(dev);
if (netif_running(dev))

View File

@ -2793,6 +2793,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns;
struct am65_cpsw_tx_chn *tx_chan = common->tx_chns;
struct device *dev = common->dev;
struct am65_cpsw_port *port;
int ret = 0, i;
@ -2805,6 +2807,22 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
if (ret)
return ret;
/* The DMA Channels are not guaranteed to be in a clean state.
* Reset and disable them to ensure that they are back to the
* clean state and ready to be used.
*/
for (i = 0; i < common->tx_ch_num; i++) {
k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i],
am65_cpsw_nuss_tx_cleanup);
k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
}
for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
am65_cpsw_nuss_rx_cleanup, !!i);
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
return ret;

View File

@ -2132,14 +2132,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
tun_is_little_endian(tun), true,
vlan_hlen)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n",
sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
tun16_to_cpu(tun, gso.hdr_len));
print_hex_dump(KERN_ERR, "tun: ",
DUMP_PREFIX_NONE,
16, 1, skb->head,
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
if (net_ratelimit()) {
netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
tun16_to_cpu(tun, gso.hdr_len));
print_hex_dump(KERN_ERR, "tun: ",
DUMP_PREFIX_NONE,
16, 1, skb->head,
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
}
WARN_ON_ONCE(1);
return -EINVAL;
}

View File

@ -1317,6 +1317,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
netif_set_tso_max_size(dev->net, 16384);
ax88179_reset(dev);
return 0;
}
@ -1695,7 +1697,6 @@ static const struct driver_info ax88179_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@ -1708,7 +1709,6 @@ static const struct driver_info ax88178a_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,

View File

@ -208,6 +208,15 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_BIOS_VERSION, "03.03"),
}
},
{
.ident = "Framework Laptop 13 (Phoenix)",
.driver_data = &quirk_spurious_8042,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"),
DMI_MATCH(DMI_BIOS_VERSION, "03.05"),
}
},
{}
};

View File

@ -363,10 +363,8 @@ int ccw_device_set_online(struct ccw_device *cdev)
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_online(cdev);
spin_unlock_irq(cdev->ccwlock);
if (ret == 0)
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
else {
if (ret) {
spin_unlock_irq(cdev->ccwlock);
CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
@ -375,7 +373,12 @@ int ccw_device_set_online(struct ccw_device *cdev)
put_device(&cdev->dev);
return ret;
}
spin_lock_irq(cdev->ccwlock);
/* Wait until a final state is reached */
while (!dev_fsm_final_state(cdev)) {
spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
spin_lock_irq(cdev->ccwlock);
}
/* Check if online processing was successful */
if ((cdev->private->state != DEV_STATE_ONLINE) &&
(cdev->private->state != DEV_STATE_W4SENSE)) {

View File

@ -722,8 +722,8 @@ static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
lgr_info_log();
}
static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
int dstat)
static int qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
int dstat, int dcc)
{
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
@ -731,15 +731,18 @@ static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
goto error;
if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
goto error;
if (dcc == 1)
return -EAGAIN;
if (!(dstat & DEV_STAT_DEV_END))
goto error;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
return;
return 0;
error:
DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
return -EIO;
}
/* qdio interrupt handler */
@ -748,7 +751,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
int cstat, dstat;
int cstat, dstat, rc, dcc;
if (!intparm || !irq_ptr) {
ccw_device_get_schid(cdev, &schid);
@ -768,10 +771,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
qdio_irq_check_sense(irq_ptr, irb);
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
dcc = scsw_cmd_is_valid_cc(&irb->scsw) ? irb->scsw.cmd.cc : 0;
rc = 0;
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
qdio_establish_handle_irq(irq_ptr, cstat, dstat);
rc = qdio_establish_handle_irq(irq_ptr, cstat, dstat, dcc);
break;
case QDIO_IRQ_STATE_CLEANUP:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
@ -785,12 +790,25 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
if (cstat || dstat)
qdio_handle_activate_check(irq_ptr, intparm, cstat,
dstat);
else if (dcc == 1)
rc = -EAGAIN;
break;
case QDIO_IRQ_STATE_STOPPED:
break;
default:
WARN_ON_ONCE(1);
}
if (rc == -EAGAIN) {
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qint retry");
rc = ccw_device_start(cdev, irq_ptr->ccw, intparm, 0, 0);
if (!rc)
return;
DBF_ERROR("%4x RETRY ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%4x", rc);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
}
wake_up(&cdev->private->wait_q);
}

View File

@ -292,13 +292,16 @@ out:
static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
clear_bit(dmb->sba_idx, ism->sba_bitmap);
dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
dmb->cpu_addr, dmb->dma_addr);
dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
DMA_FROM_DEVICE);
folio_put(virt_to_folio(dmb->cpu_addr));
}
static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
struct folio *folio;
unsigned long bit;
int rc;
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
return -EINVAL;
@ -315,14 +318,30 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
return -EINVAL;
dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
&dmb->dma_addr,
GFP_KERNEL | __GFP_NOWARN |
__GFP_NOMEMALLOC | __GFP_NORETRY);
if (!dmb->cpu_addr)
clear_bit(dmb->sba_idx, ism->sba_bitmap);
folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
__GFP_NORETRY, get_order(dmb->dmb_len));
return dmb->cpu_addr ? 0 : -ENOMEM;
if (!folio) {
rc = -ENOMEM;
goto out_bit;
}
dmb->cpu_addr = folio_address(folio);
dmb->dma_addr = dma_map_page(&ism->pdev->dev,
virt_to_page(dmb->cpu_addr), 0,
dmb->dmb_len, DMA_FROM_DEVICE);
if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
rc = -ENOMEM;
goto out_free;
}
return 0;
out_free:
kfree(dmb->cpu_addr);
out_bit:
clear_bit(dmb->sba_idx, ism->sba_bitmap);
return rc;
}
int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,

View File

@ -543,10 +543,9 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
if (blk_queue_add_random(q))
add_disk_randomness(req->q->disk);
if (!blk_rq_is_passthrough(req)) {
WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
cmd->flags &= ~SCMD_INITIALIZED;
}
WARN_ON_ONCE(!blk_rq_is_passthrough(req) &&
!(cmd->flags & SCMD_INITIALIZED));
cmd->flags = 0;
/*
* Calling rcu_barrier() is not necessary here because the

View File

@ -616,6 +616,7 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
tze->trip_stats[trip_id].timestamp = now;
tze->trip_stats[trip_id].max = max(tze->trip_stats[trip_id].max, temperature);
tze->trip_stats[trip_id].min = min(tze->trip_stats[trip_id].min, temperature);
tze->trip_stats[trip_id].count++;
tze->trip_stats[trip_id].avg = tze->trip_stats[trip_id].avg +
(temperature - tze->trip_stats[trip_id].avg) /
tze->trip_stats[trip_id].count;

View File

@ -423,6 +423,7 @@ err_free:
/**
* tb_domain_add() - Add domain to the system
* @tb: Domain to add
* @reset: Issue reset to the host router
*
* Starts the domain and adds it to the system. Hotplugging devices will
* work after this has been returned successfully. In order to remove
@ -431,7 +432,7 @@ err_free:
*
* Return: %0 in case of success and negative errno in case of error
*/
int tb_domain_add(struct tb *tb)
int tb_domain_add(struct tb *tb, bool reset)
{
int ret;
@ -460,7 +461,7 @@ int tb_domain_add(struct tb *tb)
/* Start the domain */
if (tb->cm_ops->start) {
ret = tb->cm_ops->start(tb);
ret = tb->cm_ops->start(tb, reset);
if (ret)
goto err_domain_del;
}

View File

@ -2144,7 +2144,7 @@ static int icm_runtime_resume(struct tb *tb)
return 0;
}
static int icm_start(struct tb *tb)
static int icm_start(struct tb *tb, bool not_used)
{
struct icm *icm = tb_priv(tb);
int ret;

View File

@ -6,6 +6,8 @@
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#include <linux/delay.h>
#include "tb.h"
/**
@ -45,6 +47,49 @@ static int find_port_lc_cap(struct tb_port *port)
return sw->cap_lc + start + phys * size;
}
/**
* tb_lc_reset_port() - Trigger downstream port reset through LC
* @port: Port that is reset
*
* Triggers downstream port reset through link controller registers.
* Returns %0 in case of success negative errno otherwise. Only supports
* non-USB4 routers with link controller (that's Thunderbolt 2 and
* Thunderbolt 3).
*/
int tb_lc_reset_port(struct tb_port *port)
{
struct tb_switch *sw = port->sw;
int cap, ret;
u32 mode;
if (sw->generation < 2)
return -EINVAL;
cap = find_port_lc_cap(port);
if (cap < 0)
return cap;
ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
if (ret)
return ret;
mode |= TB_LC_PORT_MODE_DPR;
ret = tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
if (ret)
return ret;
fsleep(10000);
ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
if (ret)
return ret;
mode &= ~TB_LC_PORT_MODE_DPR;
return tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
}
static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
{
bool upstream = tb_is_upstream_port(port);

View File

@ -1221,7 +1221,7 @@ static void nhi_check_iommu(struct tb_nhi *nhi)
str_enabled_disabled(port_ok));
}
static void nhi_reset(struct tb_nhi *nhi)
static bool nhi_reset(struct tb_nhi *nhi)
{
ktime_t timeout;
u32 val;
@ -1229,11 +1229,11 @@ static void nhi_reset(struct tb_nhi *nhi)
val = ioread32(nhi->iobase + REG_CAPS);
/* Reset only v2 and later routers */
if (FIELD_GET(REG_CAPS_VERSION_MASK, val) < REG_CAPS_VERSION_2)
return;
return false;
if (!host_reset) {
dev_dbg(&nhi->pdev->dev, "skipping host router reset\n");
return;
return false;
}
iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET);
@ -1244,12 +1244,14 @@ static void nhi_reset(struct tb_nhi *nhi)
val = ioread32(nhi->iobase + REG_RESET);
if (!(val & REG_RESET_HRR)) {
dev_warn(&nhi->pdev->dev, "host router reset successful\n");
return;
return true;
}
usleep_range(10, 20);
} while (ktime_before(ktime_get(), timeout));
dev_warn(&nhi->pdev->dev, "timeout resetting host router\n");
return false;
}
static int nhi_init_msi(struct tb_nhi *nhi)
@ -1331,6 +1333,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct device *dev = &pdev->dev;
struct tb_nhi *nhi;
struct tb *tb;
bool reset;
int res;
if (!nhi_imr_valid(pdev))
@ -1365,7 +1368,11 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi_check_quirks(nhi);
nhi_check_iommu(nhi);
nhi_reset(nhi);
/*
* Only USB4 v2 hosts support host reset so if we already did
* that then don't do it again when the domain is initialized.
*/
reset = nhi_reset(nhi) ? false : host_reset;
res = nhi_init_msi(nhi);
if (res)
@ -1392,7 +1399,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
res = tb_domain_add(tb);
res = tb_domain_add(tb, reset);
if (res) {
/*
* At this point the RX/TX rings might already have been

View File

@ -446,6 +446,19 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
return -ETIMEDOUT;
}
/**
* tb_path_deactivate_hop() - Deactivate one path in path config space
* @port: Lane or protocol adapter
* @hop_index: HopID of the path to be cleared
*
* This deactivates or clears a single path config space entry at
* @hop_index. Returns %0 in success and negative errno otherwise.
*/
int tb_path_deactivate_hop(struct tb_port *port, int hop_index)
{
return __tb_path_deactivate_hop(port, hop_index, true);
}
static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
{
int i, res;

View File

@ -676,6 +676,13 @@ int tb_port_disable(struct tb_port *port)
return __tb_port_enable(port, false);
}
static int tb_port_reset(struct tb_port *port)
{
if (tb_switch_is_usb4(port->sw))
return port->cap_usb4 ? usb4_port_reset(port) : 0;
return tb_lc_reset_port(port);
}
/*
* tb_init_port() - initialize a port
*
@ -1534,29 +1541,124 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
regs->__unknown1, regs->__unknown4);
}
static int tb_switch_reset_host(struct tb_switch *sw)
{
if (sw->generation > 1) {
struct tb_port *port;
tb_switch_for_each_port(sw, port) {
int i, ret;
/*
* For lane adapters we issue downstream port
* reset and clear up path config spaces.
*
* For protocol adapters we disable the path and
* clear path config space one by one (from 8 to
* Max Input HopID of the adapter).
*/
if (tb_port_is_null(port) && !tb_is_upstream_port(port)) {
ret = tb_port_reset(port);
if (ret)
return ret;
} else if (tb_port_is_usb3_down(port) ||
tb_port_is_usb3_up(port)) {
tb_usb3_port_enable(port, false);
} else if (tb_port_is_dpin(port) ||
tb_port_is_dpout(port)) {
tb_dp_port_enable(port, false);
} else if (tb_port_is_pcie_down(port) ||
tb_port_is_pcie_up(port)) {
tb_pci_port_enable(port, false);
} else {
continue;
}
/* Cleanup path config space of protocol adapter */
for (i = TB_PATH_MIN_HOPID;
i <= port->config.max_in_hop_id; i++) {
ret = tb_path_deactivate_hop(port, i);
if (ret)
return ret;
}
}
} else {
struct tb_cfg_result res;
/* Thunderbolt 1 uses the "reset" config space packet */
res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
TB_CFG_SWITCH, 2, 2);
if (res.err)
return res.err;
res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
if (res.err > 0)
return -EIO;
else if (res.err < 0)
return res.err;
}
return 0;
}
static int tb_switch_reset_device(struct tb_switch *sw)
{
return tb_port_reset(tb_switch_downstream_port(sw));
}
static bool tb_switch_enumerated(struct tb_switch *sw)
{
u32 val;
int ret;
/*
* Read directly from the hardware because we use this also
* during system sleep where sw->config.enabled is already set
* by us.
*/
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1);
if (ret)
return false;
return !!(val & ROUTER_CS_3_V);
}
/**
* tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
* @sw: Switch to reset
* tb_switch_reset() - Perform reset to the router
* @sw: Router to reset
*
* Return: Returns 0 on success or an error code on failure.
* Issues reset to the router @sw. Can be used for any router. For host
* routers, resets all the downstream ports and cleans up path config
* spaces accordingly. For device routers issues downstream port reset
* through the parent router, so as side effect there will be unplug
* soon after this is finished.
*
* If the router is not enumerated does nothing.
*
* Returns %0 on success or negative errno in case of failure.
*/
int tb_switch_reset(struct tb_switch *sw)
{
struct tb_cfg_result res;
int ret;
if (sw->generation > 1)
/*
* We cannot access the port config spaces unless the router is
* already enumerated. If the router is not enumerated it is
* equal to being reset so we can skip that here.
*/
if (!tb_switch_enumerated(sw))
return 0;
tb_sw_dbg(sw, "resetting switch\n");
tb_sw_dbg(sw, "resetting\n");
res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
TB_CFG_SWITCH, 2, 2);
if (res.err)
return res.err;
res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
if (res.err > 0)
return -EIO;
return res.err;
if (tb_route(sw))
ret = tb_switch_reset_device(sw);
else
ret = tb_switch_reset_host(sw);
if (ret)
tb_sw_warn(sw, "failed to reset\n");
return ret;
}
/**
@ -3078,22 +3180,29 @@ void tb_switch_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
if (sw->is_unplugged)
return;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return;
/*
* Unconfigure downstream port so that wake-on-connect can be
* configured after router unplug. No need to unconfigure upstream port
* since its router is unplugged.
*/
up = tb_upstream_port(sw);
down = up->remote;
if (tb_switch_is_usb4(down->sw))
usb4_port_unconfigure(down);
else
tb_lc_unconfigure_port(down);
if (sw->is_unplugged)
return;
up = tb_upstream_port(sw);
if (tb_switch_is_usb4(up->sw))
usb4_port_unconfigure(up);
else
tb_lc_unconfigure_port(up);
down = up->remote;
if (tb_switch_is_usb4(down->sw))
usb4_port_unconfigure(down);
else
tb_lc_unconfigure_port(down);
}
static void tb_switch_credits_init(struct tb_switch *sw)
@ -3339,7 +3448,26 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
return tb_lc_set_wake(sw, flags);
}
int tb_switch_resume(struct tb_switch *sw)
static void tb_switch_check_wakes(struct tb_switch *sw)
{
if (device_may_wakeup(&sw->dev)) {
if (tb_switch_is_usb4(sw))
usb4_switch_check_wakes(sw);
}
}
/**
* tb_switch_resume() - Resume a switch after sleep
* @sw: Switch to resume
* @runtime: Is this resume from runtime suspend or system sleep
*
* Resumes and re-enumerates router (and all its children), if still plugged
* after suspend. Don't enumerate device router whose UID was changed during
* suspend. If this is resume from system sleep, notifies PM core about the
* wakes occurred during suspend. Disables all wakes, except USB4 wake of
* upstream port for USB4 routers that shall be always enabled.
*/
int tb_switch_resume(struct tb_switch *sw, bool runtime)
{
struct tb_port *port;
int err;
@ -3388,6 +3516,9 @@ int tb_switch_resume(struct tb_switch *sw)
if (err)
return err;
if (!runtime)
tb_switch_check_wakes(sw);
/* Disable wakes */
tb_switch_set_wake(sw, 0);
@ -3417,7 +3548,8 @@ int tb_switch_resume(struct tb_switch *sw)
*/
if (tb_port_unlock(port))
tb_port_warn(port, "failed to unlock port\n");
if (port->remote && tb_switch_resume(port->remote->sw)) {
if (port->remote &&
tb_switch_resume(port->remote->sw, runtime)) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
tb_sw_set_unplugged(port->remote->sw);

View File

@ -1717,6 +1717,12 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
continue;
}
/* Needs to be on different routers */
if (in->sw == port->sw) {
tb_port_dbg(port, "skipping DP OUT on same router\n");
continue;
}
tb_port_dbg(port, "DP OUT available\n");
/*
@ -2628,7 +2634,7 @@ static int tb_scan_finalize_switch(struct device *dev, void *data)
return 0;
}
static int tb_start(struct tb *tb)
static int tb_start(struct tb *tb, bool reset)
{
struct tb_cm *tcm = tb_priv(tb);
int ret;
@ -2669,12 +2675,24 @@ static int tb_start(struct tb *tb)
tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
/* Enable TMU if it is off */
tb_switch_tmu_enable(tb->root_switch);
/* Full scan to discover devices added before the driver was loaded. */
tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */
tb_discover_tunnels(tb);
/* Add DP resources from the DP tunnels created by the boot firmware */
tb_discover_dp_resources(tb);
/*
* Boot firmware might have created tunnels of its own. Since we
* cannot be sure they are usable for us, tear them down and
* reset the ports to handle it as new hotplug for USB4 v1
* routers (for USB4 v2 and beyond we already do host reset).
*/
if (reset && usb4_switch_version(tb->root_switch) == 1) {
tb_switch_reset(tb->root_switch);
} else {
/* Full scan to discover devices added before the driver was loaded. */
tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */
tb_discover_tunnels(tb);
/* Add DP resources from the DP tunnels created by the boot firmware */
tb_discover_dp_resources(tb);
}
/*
* If the boot firmware did not create USB 3.x tunnels create them
* now for the whole topology.
@ -2745,10 +2763,14 @@ static int tb_resume_noirq(struct tb *tb)
tb_dbg(tb, "resuming...\n");
/* remove any pci devices the firmware might have setup */
tb_switch_reset(tb->root_switch);
/*
* For non-USB4 hosts (Apple systems) remove any PCIe devices
* the firmware might have setup.
*/
if (!tb_switch_is_usb4(tb->root_switch))
tb_switch_reset(tb->root_switch);
tb_switch_resume(tb->root_switch);
tb_switch_resume(tb->root_switch, false);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
tb_restore_children(tb->root_switch);
@ -2874,7 +2896,7 @@ static int tb_runtime_resume(struct tb *tb)
struct tb_tunnel *tunnel, *n;
mutex_lock(&tb->lock);
tb_switch_resume(tb->root_switch);
tb_switch_resume(tb->root_switch, true);
tb_free_invalid_tunnels(tb);
tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)

View File

@ -487,7 +487,7 @@ struct tb_path {
*/
struct tb_cm_ops {
int (*driver_ready)(struct tb *tb);
int (*start)(struct tb *tb);
int (*start)(struct tb *tb, bool reset);
void (*stop)(struct tb *tb);
int (*suspend_noirq)(struct tb *tb);
int (*resume_noirq)(struct tb *tb);
@ -750,7 +750,7 @@ int tb_xdomain_init(void);
void tb_xdomain_exit(void);
struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize);
int tb_domain_add(struct tb *tb);
int tb_domain_add(struct tb *tb, bool reset);
void tb_domain_remove(struct tb *tb);
int tb_domain_suspend_noirq(struct tb *tb);
int tb_domain_resume_noirq(struct tb *tb);
@ -817,7 +817,7 @@ int tb_switch_configuration_valid(struct tb_switch *sw);
int tb_switch_add(struct tb_switch *sw);
void tb_switch_remove(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw, bool runtime);
int tb_switch_resume(struct tb_switch *sw);
int tb_switch_resume(struct tb_switch *sw, bool runtime);
int tb_switch_reset(struct tb_switch *sw);
int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
u32 value, int timeout_msec);
@ -1154,6 +1154,7 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
void tb_path_free(struct tb_path *path);
int tb_path_activate(struct tb_path *path);
void tb_path_deactivate(struct tb_path *path);
int tb_path_deactivate_hop(struct tb_port *port, int hop_index);
bool tb_path_is_invalid(struct tb_path *path);
bool tb_path_port_on_path(const struct tb_path *path,
const struct tb_port *port);
@ -1173,6 +1174,7 @@ int tb_drom_read(struct tb_switch *sw);
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
int tb_lc_reset_port(struct tb_port *port);
int tb_lc_configure_port(struct tb_port *port);
void tb_lc_unconfigure_port(struct tb_port *port);
int tb_lc_configure_xdomain(struct tb_port *port);
@ -1276,6 +1278,7 @@ static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
return usb4_switch_version(sw) > 0;
}
void usb4_switch_check_wakes(struct tb_switch *sw);
int usb4_switch_setup(struct tb_switch *sw);
int usb4_switch_configuration_valid(struct tb_switch *sw);
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
@ -1305,6 +1308,7 @@ void usb4_switch_remove_ports(struct tb_switch *sw);
int usb4_port_unlock(struct tb_port *port);
int usb4_port_hotplug_enable(struct tb_port *port);
int usb4_port_reset(struct tb_port *port);
int usb4_port_configure(struct tb_port *port);
void usb4_port_unconfigure(struct tb_port *port);
int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);

View File

@ -194,6 +194,8 @@ struct tb_regs_switch_header {
#define USB4_VERSION_MAJOR_MASK GENMASK(7, 5)
#define ROUTER_CS_1 0x01
#define ROUTER_CS_3 0x03
#define ROUTER_CS_3_V BIT(31)
#define ROUTER_CS_4 0x04
/* Used with the router cmuv field */
#define ROUTER_CS_4_CMUV_V1 0x10
@ -389,6 +391,7 @@ struct tb_regs_port_header {
#define PORT_CS_18_CSA BIT(22)
#define PORT_CS_18_TIP BIT(24)
#define PORT_CS_19 0x13
#define PORT_CS_19_DPR BIT(0)
#define PORT_CS_19_PC BIT(3)
#define PORT_CS_19_PID BIT(4)
#define PORT_CS_19_WOC BIT(16)
@ -584,6 +587,9 @@ struct tb_regs_hop {
#define TB_LC_POWER 0x740
/* Link controller registers */
#define TB_LC_PORT_MODE 0x26
#define TB_LC_PORT_MODE_DPR BIT(0)
#define TB_LC_CS_42 0x2a
#define TB_LC_CS_42_USB_PLUGGED BIT(31)

View File

@ -155,7 +155,13 @@ static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
tx_dwords, rx_data, rx_dwords);
}
static void usb4_switch_check_wakes(struct tb_switch *sw)
/**
* usb4_switch_check_wakes() - Check for wakes and notify PM core about them
* @sw: Router whose wakes to check
*
* Checks wakes occurred during suspend and notify the PM core about them.
*/
void usb4_switch_check_wakes(struct tb_switch *sw)
{
bool wakeup_usb4 = false;
struct usb4_port *usb4;
@ -163,9 +169,6 @@ static void usb4_switch_check_wakes(struct tb_switch *sw)
bool wakeup = false;
u32 val;
if (!device_may_wakeup(&sw->dev))
return;
if (tb_route(sw)) {
if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
return;
@ -244,8 +247,6 @@ int usb4_switch_setup(struct tb_switch *sw)
u32 val = 0;
int ret;
usb4_switch_check_wakes(sw);
if (!tb_route(sw))
return 0;
@ -1113,6 +1114,45 @@ int usb4_port_hotplug_enable(struct tb_port *port)
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
}
/**
* usb4_port_reset() - Issue downstream port reset
* @port: USB4 port to reset
*
* Issues downstream port reset to @port.
*/
int usb4_port_reset(struct tb_port *port)
{
int ret;
u32 val;
if (!port->cap_usb4)
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
if (ret)
return ret;
val |= PORT_CS_19_DPR;
ret = tb_port_write(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
if (ret)
return ret;
fsleep(10000);
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
if (ret)
return ret;
val &= ~PORT_CS_19_DPR;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
}
static int usb4_port_set_configured(struct tb_port *port, bool configured)
{
int ret;

View File

@ -357,9 +357,9 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
long rate;
int ret;
clk_disable_unprepare(d->clk);
rate = clk_round_rate(d->clk, newrate);
if (rate > 0 && p->uartclk != rate) {
clk_disable_unprepare(d->clk);
if (rate > 0) {
/*
* Note that any clock-notifer worker will block in
* serial8250_update_uartclk() until we are done.
@ -367,8 +367,8 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
ret = clk_set_rate(d->clk, newrate);
if (!ret)
p->uartclk = rate;
clk_prepare_enable(d->clk);
}
clk_prepare_enable(d->clk);
dw8250_do_set_termios(p, termios, old);
}

View File

@ -1086,11 +1086,13 @@ static void mxs_auart_set_ldisc(struct uart_port *port,
static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
{
u32 istat;
u32 istat, stat;
struct mxs_auart_port *s = context;
u32 mctrl_temp = s->mctrl_prev;
u32 stat = mxs_read(s, REG_STAT);
uart_port_lock(&s->port);
stat = mxs_read(s, REG_STAT);
istat = mxs_read(s, REG_INTR);
/* ack irq */
@ -1126,6 +1128,8 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
istat &= ~AUART_INTR_TXIS;
}
uart_port_unlock(&s->port);
return IRQ_HANDLED;
}

Some files were not shown because too many files have changed in this diff Show More