Merge branch 'perf/urgent' into perf/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2017-08-29 15:09:03 +02:00
commit e0563e0495
116 changed files with 819 additions and 389 deletions

View file

@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 13 PATCHLEVEL = 13
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc6 EXTRAVERSION = -rc7
NAME = Fearless Coyote NAME = Fearless Coyote
# *DOCUMENTATION* # *DOCUMENTATION*

View file

@ -75,13 +75,20 @@ void arc_init_IRQ(void)
* Set a default priority for all available interrupts to prevent * Set a default priority for all available interrupts to prevent
* switching of register banks if Fast IRQ and multiple register banks * switching of register banks if Fast IRQ and multiple register banks
* are supported by CPU. * are supported by CPU.
* Also disable all IRQ lines so faulty external hardware won't * Also disable private-per-core IRQ lines so faulty external HW won't
* trigger interrupt that kernel is not ready to handle. * trigger interrupt that kernel is not ready to handle.
*/ */
for (i = NR_EXCEPTIONS; i < irq_bcr.irqs + NR_EXCEPTIONS; i++) { for (i = NR_EXCEPTIONS; i < irq_bcr.irqs + NR_EXCEPTIONS; i++) {
write_aux_reg(AUX_IRQ_SELECT, i); write_aux_reg(AUX_IRQ_SELECT, i);
write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO); write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
write_aux_reg(AUX_IRQ_ENABLE, 0);
/*
* Only mask cpu private IRQs here.
* "common" interrupts are masked at IDU, otherwise it would
* need to be unmasked at each cpu, with IPIs
*/
if (i < FIRST_EXT_IRQ)
write_aux_reg(AUX_IRQ_ENABLE, 0);
} }
/* setup status32, don't enable intr yet as kernel doesn't want */ /* setup status32, don't enable intr yet as kernel doesn't want */

View file

@ -27,7 +27,7 @@
*/ */
void arc_init_IRQ(void) void arc_init_IRQ(void)
{ {
int level_mask = 0, i; unsigned int level_mask = 0, i;
/* Is timer high priority Interrupt (Level2 in ARCompact jargon) */ /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ; level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;

View file

@ -1,5 +1,4 @@
CONFIG_SOC_TMS320C6455=y CONFIG_SOC_TMS320C6455=y
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set # CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
CONFIG_SPARSE_IRQ=y CONFIG_SPARSE_IRQ=y
@ -25,7 +24,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2 CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=17000 CONFIG_BLK_DEV_RAM_SIZE=17000
CONFIG_MISC_DEVICES=y
# CONFIG_INPUT is not set # CONFIG_INPUT is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
# CONFIG_VT is not set # CONFIG_VT is not set

View file

@ -1,5 +1,4 @@
CONFIG_SOC_TMS320C6457=y CONFIG_SOC_TMS320C6457=y
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set # CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
CONFIG_SPARSE_IRQ=y CONFIG_SPARSE_IRQ=y
@ -26,7 +25,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2 CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=17000 CONFIG_BLK_DEV_RAM_SIZE=17000
CONFIG_MISC_DEVICES=y
# CONFIG_INPUT is not set # CONFIG_INPUT is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
# CONFIG_VT is not set # CONFIG_VT is not set

View file

@ -1,5 +1,4 @@
CONFIG_SOC_TMS320C6472=y CONFIG_SOC_TMS320C6472=y
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set # CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
CONFIG_SPARSE_IRQ=y CONFIG_SPARSE_IRQ=y
@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2 CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=17000 CONFIG_BLK_DEV_RAM_SIZE=17000
CONFIG_MISC_DEVICES=y
# CONFIG_INPUT is not set # CONFIG_INPUT is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
# CONFIG_VT is not set # CONFIG_VT is not set

View file

@ -1,5 +1,4 @@
CONFIG_SOC_TMS320C6474=y CONFIG_SOC_TMS320C6474=y
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set # CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
CONFIG_SPARSE_IRQ=y CONFIG_SPARSE_IRQ=y
@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2 CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=17000 CONFIG_BLK_DEV_RAM_SIZE=17000
CONFIG_MISC_DEVICES=y
# CONFIG_INPUT is not set # CONFIG_INPUT is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
# CONFIG_VT is not set # CONFIG_VT is not set

View file

@ -1,5 +1,4 @@
CONFIG_SOC_TMS320C6678=y CONFIG_SOC_TMS320C6678=y
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set # CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
CONFIG_SPARSE_IRQ=y CONFIG_SPARSE_IRQ=y
@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2 CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=17000 CONFIG_BLK_DEV_RAM_SIZE=17000
CONFIG_MISC_DEVICES=y
# CONFIG_INPUT is not set # CONFIG_INPUT is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
# CONFIG_VT is not set # CONFIG_VT is not set

View file

@ -208,14 +208,14 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL); pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
if (!pic) { if (!pic) {
pr_err("%s: Could not alloc PIC structure.\n", np->full_name); pr_err("%pOF: Could not alloc PIC structure.\n", np);
return NULL; return NULL;
} }
pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32, pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32,
&megamod_domain_ops, pic); &megamod_domain_ops, pic);
if (!pic->irqhost) { if (!pic->irqhost) {
pr_err("%s: Could not alloc host.\n", np->full_name); pr_err("%pOF: Could not alloc host.\n", np);
goto error_free; goto error_free;
} }
@ -225,7 +225,7 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
pic->regs = of_iomap(np, 0); pic->regs = of_iomap(np, 0);
if (!pic->regs) { if (!pic->regs) {
pr_err("%s: Could not map registers.\n", np->full_name); pr_err("%pOF: Could not map registers.\n", np);
goto error_free; goto error_free;
} }
@ -253,8 +253,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
irq_data = irq_get_irq_data(irq); irq_data = irq_get_irq_data(irq);
if (!irq_data) { if (!irq_data) {
pr_err("%s: combiner-%d no irq_data for virq %d!\n", pr_err("%pOF: combiner-%d no irq_data for virq %d!\n",
np->full_name, i, irq); np, i, irq);
continue; continue;
} }
@ -265,16 +265,16 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
* of the core priority interrupts (4 - 15). * of the core priority interrupts (4 - 15).
*/ */
if (hwirq < 4 || hwirq >= NR_PRIORITY_IRQS) { if (hwirq < 4 || hwirq >= NR_PRIORITY_IRQS) {
pr_err("%s: combiner-%d core irq %ld out of range!\n", pr_err("%pOF: combiner-%d core irq %ld out of range!\n",
np->full_name, i, hwirq); np, i, hwirq);
continue; continue;
} }
/* record the mapping */ /* record the mapping */
mapping[hwirq - 4] = i; mapping[hwirq - 4] = i;
pr_debug("%s: combiner-%d cascading to hwirq %ld\n", pr_debug("%pOF: combiner-%d cascading to hwirq %ld\n",
np->full_name, i, hwirq); np, i, hwirq);
cascade_data[i].pic = pic; cascade_data[i].pic = pic;
cascade_data[i].index = i; cascade_data[i].index = i;
@ -290,8 +290,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
/* Finally, set up the MUX registers */ /* Finally, set up the MUX registers */
for (i = 0; i < NR_MUX_OUTPUTS; i++) { for (i = 0; i < NR_MUX_OUTPUTS; i++) {
if (mapping[i] != IRQ_UNMAPPED) { if (mapping[i] != IRQ_UNMAPPED) {
pr_debug("%s: setting mux %d to priority %d\n", pr_debug("%pOF: setting mux %d to priority %d\n",
np->full_name, mapping[i], i + 4); np, mapping[i], i + 4);
set_megamod_mux(pic, mapping[i], i); set_megamod_mux(pic, mapping[i], i);
} }
} }

View file

@ -436,8 +436,8 @@ void __init c64x_setup_clocks(void)
err = of_property_read_u32(node, "clock-frequency", &val); err = of_property_read_u32(node, "clock-frequency", &val);
if (err || val == 0) { if (err || val == 0) {
pr_err("%s: no clock-frequency found! Using %dMHz\n", pr_err("%pOF: no clock-frequency found! Using %dMHz\n",
node->full_name, (int)val / 1000000); node, (int)val / 1000000);
val = 25000000; val = 25000000;
} }
clkin1.rate = val; clkin1.rate = val;

View file

@ -204,14 +204,14 @@ void __init timer64_init(void)
timer = of_iomap(np, 0); timer = of_iomap(np, 0);
if (!timer) { if (!timer) {
pr_debug("%s: Cannot map timer registers.\n", np->full_name); pr_debug("%pOF: Cannot map timer registers.\n", np);
goto out; goto out;
} }
pr_debug("%s: Timer registers=%p.\n", np->full_name, timer); pr_debug("%pOF: Timer registers=%p.\n", np, timer);
cd->irq = irq_of_parse_and_map(np, 0); cd->irq = irq_of_parse_and_map(np, 0);
if (cd->irq == NO_IRQ) { if (cd->irq == NO_IRQ) {
pr_debug("%s: Cannot find interrupt.\n", np->full_name); pr_debug("%pOF: Cannot find interrupt.\n", np);
iounmap(timer); iounmap(timer);
goto out; goto out;
} }
@ -229,7 +229,7 @@ void __init timer64_init(void)
dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED); dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED);
} }
pr_debug("%s: Timer irq=%d.\n", np->full_name, cd->irq); pr_debug("%pOF: Timer irq=%d.\n", np, cd->irq);
clockevents_calc_mult_shift(cd, c6x_core_freq / TIMER_DIVISOR, 5); clockevents_calc_mult_shift(cd, c6x_core_freq / TIMER_DIVISOR, 5);

View file

@ -90,6 +90,24 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
/* Mark this context has been used on the new CPU */ /* Mark this context has been used on the new CPU */
if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) { if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
/*
* This full barrier orders the store to the cpumask above vs
* a subsequent operation which allows this CPU to begin loading
* translations for next.
*
* When using the radix MMU that operation is the load of the
* MMU context id, which is then moved to SPRN_PID.
*
* For the hash MMU it is either the first load from slb_cache
* in switch_slb(), and/or the store of paca->mm_ctx_id in
* copy_mm_to_paca().
*
* On the read side the barrier is in pte_xchg(), which orders
* the store to the PTE vs the load of mm_cpumask.
*/
smp_mb();
new_on_cpu = true; new_on_cpu = true;
} }

View file

@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
unsigned long *p = (unsigned long *)ptep; unsigned long *p = (unsigned long *)ptep;
__be64 prev; __be64 prev;
/* See comment in switch_mm_irqs_off() */
prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old), prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
(__force unsigned long)pte_raw(new)); (__force unsigned long)pte_raw(new));

View file

@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
{ {
unsigned long *p = (unsigned long *)ptep; unsigned long *p = (unsigned long *)ptep;
/* See comment in switch_mm_irqs_off() */
return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new)); return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
} }
#endif #endif

View file

@ -294,32 +294,26 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce_64 *args) struct kvm_create_spapr_tce_64 *args)
{ {
struct kvmppc_spapr_tce_table *stt = NULL; struct kvmppc_spapr_tce_table *stt = NULL;
struct kvmppc_spapr_tce_table *siter;
unsigned long npages, size; unsigned long npages, size;
int ret = -ENOMEM; int ret = -ENOMEM;
int i; int i;
int fd = -1;
if (!args->size) if (!args->size)
return -EINVAL; return -EINVAL;
/* Check this LIOBN hasn't been previously allocated */
list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
if (stt->liobn == args->liobn)
return -EBUSY;
}
size = _ALIGN_UP(args->size, PAGE_SIZE >> 3); size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
npages = kvmppc_tce_pages(size); npages = kvmppc_tce_pages(size);
ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true); ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
if (ret) { if (ret)
stt = NULL; return ret;
goto fail;
}
ret = -ENOMEM; ret = -ENOMEM;
stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *), stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
GFP_KERNEL); GFP_KERNEL);
if (!stt) if (!stt)
goto fail; goto fail_acct;
stt->liobn = args->liobn; stt->liobn = args->liobn;
stt->page_shift = args->page_shift; stt->page_shift = args->page_shift;
@ -334,24 +328,42 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
goto fail; goto fail;
} }
kvm_get_kvm(kvm); ret = fd = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
stt, O_RDWR | O_CLOEXEC);
if (ret < 0)
goto fail;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
/* Check this LIOBN hasn't been previously allocated */
ret = 0;
list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
if (siter->liobn == args->liobn) {
ret = -EBUSY;
break;
}
}
if (!ret) {
list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
kvm_get_kvm(kvm);
}
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, if (!ret)
stt, O_RDWR | O_CLOEXEC); return fd;
fail: put_unused_fd(fd);
if (stt) {
for (i = 0; i < npages; i++)
if (stt->pages[i])
__free_page(stt->pages[i]);
kfree(stt); fail:
} for (i = 0; i < npages; i++)
if (stt->pages[i])
__free_page(stt->pages[i]);
kfree(stt);
fail_acct:
kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
return ret; return ret;
} }

View file

@ -1291,6 +1291,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Hypervisor doorbell - exit only if host IPI flag set */ /* Hypervisor doorbell - exit only if host IPI flag set */
cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
bne 3f bne 3f
BEGIN_FTR_SECTION
PPC_MSGSYNC
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13) lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0 cmpwi r0, 0
beq 4f beq 4f

View file

@ -16,7 +16,22 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
u8 cppr; u8 cppr;
u16 ack; u16 ack;
/* XXX DD1 bug workaround: Check PIPR vs. CPPR first ! */ /*
* Ensure any previous store to CPPR is ordered vs.
* the subsequent loads from PIPR or ACK.
*/
eieio();
/*
* DD1 bug workaround: If PIPR is less favored than CPPR
* ignore the interrupt or we might incorrectly lose an IPB
* bit.
*/
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
u8 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
if (pipr >= xc->hw_cppr)
return;
}
/* Perform the acknowledge OS to register cycle. */ /* Perform the acknowledge OS to register cycle. */
ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG)); ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
@ -235,6 +250,11 @@ static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
/* /*
* If we found an interrupt, adjust what the guest CPPR should * If we found an interrupt, adjust what the guest CPPR should
* be as if we had just fetched that interrupt from HW. * be as if we had just fetched that interrupt from HW.
*
* Note: This can only make xc->cppr smaller as the previous
* loop will only exit with hirq != 0 if prio is lower than
* the current xc->cppr. Thus we don't need to re-check xc->mfrr
* for pending IPIs.
*/ */
if (hirq) if (hirq)
xc->cppr = prio; xc->cppr = prio;
@ -380,6 +400,12 @@ X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
old_cppr = xc->cppr; old_cppr = xc->cppr;
xc->cppr = cppr; xc->cppr = cppr;
/*
* Order the above update of xc->cppr with the subsequent
* read of xc->mfrr inside push_pending_to_hw()
*/
smp_mb();
/* /*
* We are masking less, we need to look for pending things * We are masking less, we need to look for pending things
* to deliver and set VP pending bits accordingly to trigger * to deliver and set VP pending bits accordingly to trigger
@ -420,21 +446,37 @@ X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
* used to signal MFRR changes is EOId when fetched from * used to signal MFRR changes is EOId when fetched from
* the queue. * the queue.
*/ */
if (irq == XICS_IPI || irq == 0) if (irq == XICS_IPI || irq == 0) {
/*
* This barrier orders the setting of xc->cppr vs.
* subsquent test of xc->mfrr done inside
* scan_interrupts and push_pending_to_hw
*/
smp_mb();
goto bail; goto bail;
}
/* Find interrupt source */ /* Find interrupt source */
sb = kvmppc_xive_find_source(xive, irq, &src); sb = kvmppc_xive_find_source(xive, irq, &src);
if (!sb) { if (!sb) {
pr_devel(" source not found !\n"); pr_devel(" source not found !\n");
rc = H_PARAMETER; rc = H_PARAMETER;
/* Same as above */
smp_mb();
goto bail; goto bail;
} }
state = &sb->irq_state[src]; state = &sb->irq_state[src];
kvmppc_xive_select_irq(state, &hw_num, &xd); kvmppc_xive_select_irq(state, &hw_num, &xd);
state->in_eoi = true; state->in_eoi = true;
mb();
/*
* This barrier orders both setting of in_eoi above vs,
* subsequent test of guest_priority, and the setting
* of xc->cppr vs. subsquent test of xc->mfrr done inside
* scan_interrupts and push_pending_to_hw
*/
smp_mb();
again: again:
if (state->guest_priority == MASKED) { if (state->guest_priority == MASKED) {
@ -461,6 +503,14 @@ X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
} }
/*
* This barrier orders the above guest_priority check
* and spin_lock/unlock with clearing in_eoi below.
*
* It also has to be a full mb() as it must ensure
* the MMIOs done in source_eoi() are completed before
* state->in_eoi is visible.
*/
mb(); mb();
state->in_eoi = false; state->in_eoi = false;
bail: bail:
@ -495,6 +545,18 @@ X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
/* Locklessly write over MFRR */ /* Locklessly write over MFRR */
xc->mfrr = mfrr; xc->mfrr = mfrr;
/*
* The load of xc->cppr below and the subsequent MMIO store
* to the IPI must happen after the above mfrr update is
* globally visible so that:
*
* - Synchronize with another CPU doing an H_EOI or a H_CPPR
* updating xc->cppr then reading xc->mfrr.
*
* - The target of the IPI sees the xc->mfrr update
*/
mb();
/* Shoot the IPI if most favored than target cppr */ /* Shoot the IPI if most favored than target cppr */
if (mfrr < xc->cppr) if (mfrr < xc->cppr)
__x_writeq(0, __x_trig_page(&xc->vp_ipi_data)); __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));

View file

@ -394,7 +394,7 @@ static int sthyi(u64 vaddr)
"srl %[cc],28\n" "srl %[cc],28\n"
: [cc] "=d" (cc) : [cc] "=d" (cc)
: [code] "d" (code), [addr] "a" (addr) : [code] "d" (code), [addr] "a" (addr)
: "memory", "cc"); : "3", "memory", "cc");
return cc; return cc;
} }
@ -425,7 +425,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr); VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
trace_kvm_s390_handle_sthyi(vcpu, code, addr); trace_kvm_s390_handle_sthyi(vcpu, code, addr);
if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK) if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
if (code & 0xffff) { if (code & 0xffff) {
@ -433,6 +433,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
goto out; goto out;
} }
if (addr & ~PAGE_MASK)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* /*
* If the page has not yet been faulted in, we want to do that * If the page has not yet been faulted in, we want to do that
* now and not after all the expensive calculations. * now and not after all the expensive calculations.

View file

@ -450,10 +450,10 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
return 0; return 0;
} }
static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate) static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
{ {
if (use_xsave()) { if (use_xsave()) {
copy_kernel_to_xregs(&fpstate->xsave, -1); copy_kernel_to_xregs(&fpstate->xsave, mask);
} else { } else {
if (use_fxsr()) if (use_fxsr())
copy_kernel_to_fxregs(&fpstate->fxsave); copy_kernel_to_fxregs(&fpstate->fxsave);
@ -477,7 +477,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
: : [addr] "m" (fpstate)); : : [addr] "m" (fpstate));
} }
__copy_kernel_to_fpregs(fpstate); __copy_kernel_to_fpregs(fpstate, -1);
} }
extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);

View file

@ -492,6 +492,7 @@ struct kvm_vcpu_arch {
unsigned long cr4; unsigned long cr4;
unsigned long cr4_guest_owned_bits; unsigned long cr4_guest_owned_bits;
unsigned long cr8; unsigned long cr8;
u32 pkru;
u32 hflags; u32 hflags;
u64 efer; u64 efer;
u64 apic_base; u64 apic_base;

View file

@ -140,9 +140,7 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.execute_only_pkey = -1; mm->context.execute_only_pkey = -1;
} }
#endif #endif
init_new_context_ldt(tsk, mm); return init_new_context_ldt(tsk, mm);
return 0;
} }
static inline void destroy_context(struct mm_struct *mm) static inline void destroy_context(struct mm_struct *mm)
{ {

View file

@ -469,7 +469,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
cpuid_mask(&entry->ecx, CPUID_7_ECX); cpuid_mask(&entry->ecx, CPUID_7_ECX);
/* PKU is not yet implemented for shadow paging. */ /* PKU is not yet implemented for shadow paging. */
if (!tdp_enabled) if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
entry->ecx &= ~F(PKU); entry->ecx &= ~F(PKU);
entry->edx &= kvm_cpuid_7_0_edx_x86_features; entry->edx &= kvm_cpuid_7_0_edx_x86_features;
entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX); entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);

View file

@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
| ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32); | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
} }
static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
{
return kvm_x86_ops->get_pkru(vcpu);
}
static inline void enter_guest_mode(struct kvm_vcpu *vcpu) static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.hflags |= HF_GUEST_MASK; vcpu->arch.hflags |= HF_GUEST_MASK;

View file

@ -185,7 +185,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
* index of the protection domain, so pte_pkey * 2 is * index of the protection domain, so pte_pkey * 2 is
* is the index of the first bit for the domain. * is the index of the first bit for the domain.
*/ */
pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3; pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */ /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
offset = (pfec & ~1) + offset = (pfec & ~1) +

View file

@ -1777,11 +1777,6 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
to_svm(vcpu)->vmcb->save.rflags = rflags; to_svm(vcpu)->vmcb->save.rflags = rflags;
} }
static u32 svm_get_pkru(struct kvm_vcpu *vcpu)
{
return 0;
}
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{ {
switch (reg) { switch (reg) {
@ -5413,8 +5408,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.get_rflags = svm_get_rflags, .get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags, .set_rflags = svm_set_rflags,
.get_pkru = svm_get_pkru,
.tlb_flush = svm_flush_tlb, .tlb_flush = svm_flush_tlb,
.run = svm_vcpu_run, .run = svm_vcpu_run,

View file

@ -636,8 +636,6 @@ struct vcpu_vmx {
u64 current_tsc_ratio; u64 current_tsc_ratio;
bool guest_pkru_valid;
u32 guest_pkru;
u32 host_pkru; u32 host_pkru;
/* /*
@ -2383,11 +2381,6 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
to_vmx(vcpu)->emulation_required = emulation_required(vcpu); to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
} }
static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
{
return to_vmx(vcpu)->guest_pkru;
}
static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
{ {
u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
@ -9020,8 +9013,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmx_set_interrupt_shadow(vcpu, 0); vmx_set_interrupt_shadow(vcpu, 0);
if (vmx->guest_pkru_valid) if (static_cpu_has(X86_FEATURE_PKU) &&
__write_pkru(vmx->guest_pkru); kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
vcpu->arch.pkru != vmx->host_pkru)
__write_pkru(vcpu->arch.pkru);
atomic_switch_perf_msrs(vmx); atomic_switch_perf_msrs(vmx);
debugctlmsr = get_debugctlmsr(); debugctlmsr = get_debugctlmsr();
@ -9169,13 +9164,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* back on host, so it is safe to read guest PKRU from current * back on host, so it is safe to read guest PKRU from current
* XSAVE. * XSAVE.
*/ */
if (boot_cpu_has(X86_FEATURE_OSPKE)) { if (static_cpu_has(X86_FEATURE_PKU) &&
vmx->guest_pkru = __read_pkru(); kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
if (vmx->guest_pkru != vmx->host_pkru) { vcpu->arch.pkru = __read_pkru();
vmx->guest_pkru_valid = true; if (vcpu->arch.pkru != vmx->host_pkru)
__write_pkru(vmx->host_pkru); __write_pkru(vmx->host_pkru);
} else
vmx->guest_pkru_valid = false;
} }
/* /*
@ -11682,8 +11675,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.get_rflags = vmx_get_rflags, .get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags, .set_rflags = vmx_set_rflags,
.get_pkru = vmx_get_pkru,
.tlb_flush = vmx_flush_tlb, .tlb_flush = vmx_flush_tlb,
.run = vmx_vcpu_run, .run = vmx_vcpu_run,

View file

@ -3245,7 +3245,12 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
u32 size, offset, ecx, edx; u32 size, offset, ecx, edx;
cpuid_count(XSTATE_CPUID, index, cpuid_count(XSTATE_CPUID, index,
&size, &offset, &ecx, &edx); &size, &offset, &ecx, &edx);
memcpy(dest + offset, src, size); if (feature == XFEATURE_MASK_PKRU)
memcpy(dest + offset, &vcpu->arch.pkru,
sizeof(vcpu->arch.pkru));
else
memcpy(dest + offset, src, size);
} }
valid -= feature; valid -= feature;
@ -3283,7 +3288,11 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
u32 size, offset, ecx, edx; u32 size, offset, ecx, edx;
cpuid_count(XSTATE_CPUID, index, cpuid_count(XSTATE_CPUID, index,
&size, &offset, &ecx, &edx); &size, &offset, &ecx, &edx);
memcpy(dest, src + offset, size); if (feature == XFEATURE_MASK_PKRU)
memcpy(&vcpu->arch.pkru, src + offset,
sizeof(vcpu->arch.pkru));
else
memcpy(dest, src + offset, size);
} }
valid -= feature; valid -= feature;
@ -7633,7 +7642,9 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
*/ */
vcpu->guest_fpu_loaded = 1; vcpu->guest_fpu_loaded = 1;
__kernel_fpu_begin(); __kernel_fpu_begin();
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state); /* PKRU is separately restored in kvm_x86_ops->run. */
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
~XFEATURE_MASK_PKRU);
trace_kvm_fpu(1); trace_kvm_fpu(1);
} }

View file

@ -75,6 +75,8 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(STATS), QUEUE_FLAG_NAME(STATS),
QUEUE_FLAG_NAME(POLL_STATS), QUEUE_FLAG_NAME(POLL_STATS),
QUEUE_FLAG_NAME(REGISTERED), QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
QUEUE_FLAG_NAME(QUIESCED),
}; };
#undef QUEUE_FLAG_NAME #undef QUEUE_FLAG_NAME
@ -265,6 +267,7 @@ static const char *const cmd_flag_name[] = {
CMD_FLAG_NAME(RAHEAD), CMD_FLAG_NAME(RAHEAD),
CMD_FLAG_NAME(BACKGROUND), CMD_FLAG_NAME(BACKGROUND),
CMD_FLAG_NAME(NOUNMAP), CMD_FLAG_NAME(NOUNMAP),
CMD_FLAG_NAME(NOWAIT),
}; };
#undef CMD_FLAG_NAME #undef CMD_FLAG_NAME

View file

@ -382,6 +382,14 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
} \ } \
} while (0) } while (0)
static inline unsigned int throtl_bio_data_size(struct bio *bio)
{
/* assume it's one sector */
if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
return 512;
return bio->bi_iter.bi_size;
}
static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
{ {
INIT_LIST_HEAD(&qn->node); INIT_LIST_HEAD(&qn->node);
@ -934,6 +942,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
bool rw = bio_data_dir(bio); bool rw = bio_data_dir(bio);
u64 bytes_allowed, extra_bytes, tmp; u64 bytes_allowed, extra_bytes, tmp;
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
unsigned int bio_size = throtl_bio_data_size(bio);
jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
@ -947,14 +956,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
do_div(tmp, HZ); do_div(tmp, HZ);
bytes_allowed = tmp; bytes_allowed = tmp;
if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
if (wait) if (wait)
*wait = 0; *wait = 0;
return true; return true;
} }
/* Calc approx time to dispatch */ /* Calc approx time to dispatch */
extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw)); jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
if (!jiffy_wait) if (!jiffy_wait)
@ -1034,11 +1043,12 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
{ {
bool rw = bio_data_dir(bio); bool rw = bio_data_dir(bio);
unsigned int bio_size = throtl_bio_data_size(bio);
/* Charge the bio to the group */ /* Charge the bio to the group */
tg->bytes_disp[rw] += bio->bi_iter.bi_size; tg->bytes_disp[rw] += bio_size;
tg->io_disp[rw]++; tg->io_disp[rw]++;
tg->last_bytes_disp[rw] += bio->bi_iter.bi_size; tg->last_bytes_disp[rw] += bio_size;
tg->last_io_disp[rw]++; tg->last_io_disp[rw]++;
/* /*

View file

@ -29,26 +29,25 @@
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
/** /**
* bsg_destroy_job - routine to teardown/delete a bsg job * bsg_teardown_job - routine to teardown a bsg job
* @job: bsg_job that is to be torn down * @job: bsg_job that is to be torn down
*/ */
static void bsg_destroy_job(struct kref *kref) static void bsg_teardown_job(struct kref *kref)
{ {
struct bsg_job *job = container_of(kref, struct bsg_job, kref); struct bsg_job *job = container_of(kref, struct bsg_job, kref);
struct request *rq = job->req; struct request *rq = job->req;
blk_end_request_all(rq, BLK_STS_OK);
put_device(job->dev); /* release reference for the request */ put_device(job->dev); /* release reference for the request */
kfree(job->request_payload.sg_list); kfree(job->request_payload.sg_list);
kfree(job->reply_payload.sg_list); kfree(job->reply_payload.sg_list);
kfree(job);
blk_end_request_all(rq, BLK_STS_OK);
} }
void bsg_job_put(struct bsg_job *job) void bsg_job_put(struct bsg_job *job)
{ {
kref_put(&job->kref, bsg_destroy_job); kref_put(&job->kref, bsg_teardown_job);
} }
EXPORT_SYMBOL_GPL(bsg_job_put); EXPORT_SYMBOL_GPL(bsg_job_put);
@ -100,7 +99,7 @@ EXPORT_SYMBOL_GPL(bsg_job_done);
*/ */
static void bsg_softirq_done(struct request *rq) static void bsg_softirq_done(struct request *rq)
{ {
struct bsg_job *job = rq->special; struct bsg_job *job = blk_mq_rq_to_pdu(rq);
bsg_job_put(job); bsg_job_put(job);
} }
@ -122,33 +121,20 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
} }
/** /**
* bsg_create_job - create the bsg_job structure for the bsg request * bsg_prepare_job - create the bsg_job structure for the bsg request
* @dev: device that is being sent the bsg request * @dev: device that is being sent the bsg request
* @req: BSG request that needs a job structure * @req: BSG request that needs a job structure
*/ */
static int bsg_create_job(struct device *dev, struct request *req) static int bsg_prepare_job(struct device *dev, struct request *req)
{ {
struct request *rsp = req->next_rq; struct request *rsp = req->next_rq;
struct request_queue *q = req->q;
struct scsi_request *rq = scsi_req(req); struct scsi_request *rq = scsi_req(req);
struct bsg_job *job; struct bsg_job *job = blk_mq_rq_to_pdu(req);
int ret; int ret;
BUG_ON(req->special);
job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
if (!job)
return -ENOMEM;
req->special = job;
job->req = req;
if (q->bsg_job_size)
job->dd_data = (void *)&job[1];
job->request = rq->cmd; job->request = rq->cmd;
job->request_len = rq->cmd_len; job->request_len = rq->cmd_len;
job->reply = rq->sense;
job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
* allocated */
if (req->bio) { if (req->bio) {
ret = bsg_map_buffer(&job->request_payload, req); ret = bsg_map_buffer(&job->request_payload, req);
if (ret) if (ret)
@ -187,7 +173,6 @@ static void bsg_request_fn(struct request_queue *q)
{ {
struct device *dev = q->queuedata; struct device *dev = q->queuedata;
struct request *req; struct request *req;
struct bsg_job *job;
int ret; int ret;
if (!get_device(dev)) if (!get_device(dev))
@ -199,7 +184,7 @@ static void bsg_request_fn(struct request_queue *q)
break; break;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
ret = bsg_create_job(dev, req); ret = bsg_prepare_job(dev, req);
if (ret) { if (ret) {
scsi_req(req)->result = ret; scsi_req(req)->result = ret;
blk_end_request_all(req, BLK_STS_OK); blk_end_request_all(req, BLK_STS_OK);
@ -207,8 +192,7 @@ static void bsg_request_fn(struct request_queue *q)
continue; continue;
} }
job = req->special; ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req));
ret = q->bsg_job_fn(job);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
if (ret) if (ret)
break; break;
@ -219,6 +203,35 @@ static void bsg_request_fn(struct request_queue *q)
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
} }
static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
{
struct bsg_job *job = blk_mq_rq_to_pdu(req);
struct scsi_request *sreq = &job->sreq;
memset(job, 0, sizeof(*job));
scsi_req_init(sreq);
sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
sreq->sense = kzalloc(sreq->sense_len, gfp);
if (!sreq->sense)
return -ENOMEM;
job->req = req;
job->reply = sreq->sense;
job->reply_len = sreq->sense_len;
job->dd_data = job + 1;
return 0;
}
static void bsg_exit_rq(struct request_queue *q, struct request *req)
{
struct bsg_job *job = blk_mq_rq_to_pdu(req);
struct scsi_request *sreq = &job->sreq;
kfree(sreq->sense);
}
/** /**
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
* @dev: device to attach bsg device to * @dev: device to attach bsg device to
@ -235,7 +248,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
q = blk_alloc_queue(GFP_KERNEL); q = blk_alloc_queue(GFP_KERNEL);
if (!q) if (!q)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
q->cmd_size = sizeof(struct scsi_request); q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
q->init_rq_fn = bsg_init_rq;
q->exit_rq_fn = bsg_exit_rq;
q->request_fn = bsg_request_fn; q->request_fn = bsg_request_fn;
ret = blk_init_allocated_queue(q); ret = blk_init_allocated_queue(q);
@ -243,7 +258,6 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
goto out_cleanup_queue; goto out_cleanup_queue;
q->queuedata = dev; q->queuedata = dev;
q->bsg_job_size = dd_job_size;
q->bsg_job_fn = job_fn; q->bsg_job_fn = job_fn;
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);

View file

@ -3362,7 +3362,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
const char *failure_string; const char *failure_string;
struct binder_buffer *buffer; struct binder_buffer *buffer;
if (proc->tsk != current) if (proc->tsk != current->group_leader)
return -EINVAL; return -EINVAL;
if ((vma->vm_end - vma->vm_start) > SZ_4M) if ((vma->vm_end - vma->vm_start) > SZ_4M)

View file

@ -221,8 +221,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
} }
static int static int
figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit, figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
loff_t logical_blocksize)
{ {
loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
sector_t x = (sector_t)size; sector_t x = (sector_t)size;
@ -234,12 +233,6 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
lo->lo_offset = offset; lo->lo_offset = offset;
if (lo->lo_sizelimit != sizelimit) if (lo->lo_sizelimit != sizelimit)
lo->lo_sizelimit = sizelimit; lo->lo_sizelimit = sizelimit;
if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
lo->lo_logical_blocksize = logical_blocksize;
blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
blk_queue_logical_block_size(lo->lo_queue,
lo->lo_logical_blocksize);
}
set_capacity(lo->lo_disk, x); set_capacity(lo->lo_disk, x);
bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
/* let user-space know about the new size */ /* let user-space know about the new size */
@ -820,7 +813,6 @@ static void loop_config_discard(struct loop_device *lo)
struct file *file = lo->lo_backing_file; struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host; struct inode *inode = file->f_mapping->host;
struct request_queue *q = lo->lo_queue; struct request_queue *q = lo->lo_queue;
int lo_bits = 9;
/* /*
* We use punch hole to reclaim the free space used by the * We use punch hole to reclaim the free space used by the
@ -840,11 +832,9 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_granularity = inode->i_sb->s_blocksize; q->limits.discard_granularity = inode->i_sb->s_blocksize;
q->limits.discard_alignment = 0; q->limits.discard_alignment = 0;
if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
lo_bits = blksize_bits(lo->lo_logical_blocksize);
blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits); blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits); blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
} }
@ -938,7 +928,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->use_dio = false; lo->use_dio = false;
lo->lo_blocksize = lo_blocksize; lo->lo_blocksize = lo_blocksize;
lo->lo_logical_blocksize = 512;
lo->lo_device = bdev; lo->lo_device = bdev;
lo->lo_flags = lo_flags; lo->lo_flags = lo_flags;
lo->lo_backing_file = file; lo->lo_backing_file = file;
@ -1104,7 +1093,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
int err; int err;
struct loop_func_table *xfer; struct loop_func_table *xfer;
kuid_t uid = current_uid(); kuid_t uid = current_uid();
int lo_flags = lo->lo_flags;
if (lo->lo_encrypt_key_size && if (lo->lo_encrypt_key_size &&
!uid_eq(lo->lo_key_owner, uid) && !uid_eq(lo->lo_key_owner, uid) &&
@ -1137,26 +1125,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (err) if (err)
goto exit; goto exit;
if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
lo->lo_logical_blocksize = 512;
lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
if (LO_INFO_BLOCKSIZE(info) != 512 &&
LO_INFO_BLOCKSIZE(info) != 1024 &&
LO_INFO_BLOCKSIZE(info) != 2048 &&
LO_INFO_BLOCKSIZE(info) != 4096)
return -EINVAL;
if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
return -EINVAL;
}
if (lo->lo_offset != info->lo_offset || if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit || lo->lo_sizelimit != info->lo_sizelimit) {
lo->lo_flags != lo_flags || if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
LO_INFO_BLOCKSIZE(info))) {
err = -EFBIG; err = -EFBIG;
goto exit; goto exit;
} }
@ -1348,8 +1319,7 @@ static int loop_set_capacity(struct loop_device *lo)
if (unlikely(lo->lo_state != Lo_bound)) if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO; return -ENXIO;
return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit, return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
lo->lo_logical_blocksize);
} }
static int loop_set_dio(struct loop_device *lo, unsigned long arg) static int loop_set_dio(struct loop_device *lo, unsigned long arg)

View file

@ -49,7 +49,6 @@ struct loop_device {
struct file * lo_backing_file; struct file * lo_backing_file;
struct block_device *lo_device; struct block_device *lo_device;
unsigned lo_blocksize; unsigned lo_blocksize;
unsigned lo_logical_blocksize;
void *key_data; void *key_data;
gfp_t old_gfp_mask; gfp_t old_gfp_mask;

View file

@ -381,6 +381,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
struct request_queue *q = vblk->disk->queue; struct request_queue *q = vblk->disk->queue;
char cap_str_2[10], cap_str_10[10]; char cap_str_2[10], cap_str_10[10];
char *envp[] = { "RESIZE=1", NULL }; char *envp[] = { "RESIZE=1", NULL };
unsigned long long nblocks;
u64 capacity; u64 capacity;
/* Host must always specify the capacity. */ /* Host must always specify the capacity. */
@ -393,16 +394,19 @@ static void virtblk_config_changed_work(struct work_struct *work)
capacity = (sector_t)-1; capacity = (sector_t)-1;
} }
string_get_size(capacity, queue_logical_block_size(q), nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
string_get_size(nblocks, queue_logical_block_size(q),
STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
string_get_size(capacity, queue_logical_block_size(q), string_get_size(nblocks, queue_logical_block_size(q),
STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
dev_notice(&vdev->dev, dev_notice(&vdev->dev,
"new size: %llu %d-byte logical blocks (%s/%s)\n", "new size: %llu %d-byte logical blocks (%s/%s)\n",
(unsigned long long)capacity, nblocks,
queue_logical_block_size(q), queue_logical_block_size(q),
cap_str_10, cap_str_2); cap_str_10,
cap_str_2);
set_capacity(vblk->disk, capacity); set_capacity(vblk->disk, capacity);
revalidate_disk(vblk->disk); revalidate_disk(vblk->disk);

View file

@ -717,8 +717,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i); tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i);
tdc->irq = of_irq_get(pdev->dev.of_node, i); tdc->irq = of_irq_get(pdev->dev.of_node, i);
if (tdc->irq < 0) { if (tdc->irq <= 0) {
ret = tdc->irq; ret = tdc->irq ?: -ENXIO;
goto irq_dispose; goto irq_dispose;
} }

View file

@ -1655,6 +1655,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
if (config->funcs->atomic_check) if (config->funcs->atomic_check)
ret = config->funcs->atomic_check(state->dev, state); ret = config->funcs->atomic_check(state->dev, state);
if (ret)
return ret;
if (!state->allow_modeset) { if (!state->allow_modeset) {
for_each_new_crtc_in_state(state, crtc, crtc_state, i) { for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state)) { if (drm_atomic_crtc_needs_modeset(crtc_state)) {
@ -1665,7 +1668,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
} }
} }
return ret; return 0;
} }
EXPORT_SYMBOL(drm_atomic_check_only); EXPORT_SYMBOL(drm_atomic_check_only);
@ -2167,10 +2170,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_atomic_state *state; struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx; struct drm_modeset_acquire_ctx ctx;
struct drm_plane *plane; struct drm_plane *plane;
struct drm_out_fence_state *fence_state = NULL; struct drm_out_fence_state *fence_state;
unsigned plane_mask; unsigned plane_mask;
int ret = 0; int ret = 0;
unsigned int i, j, num_fences = 0; unsigned int i, j, num_fences;
/* disallow for drivers not supporting atomic: */ /* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@ -2211,6 +2214,8 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
plane_mask = 0; plane_mask = 0;
copied_objs = 0; copied_objs = 0;
copied_props = 0; copied_props = 0;
fence_state = NULL;
num_fences = 0;
for (i = 0; i < arg->count_objs; i++) { for (i = 0; i < arg->count_objs; i++) {
uint32_t obj_id, count_props; uint32_t obj_id, count_props;

View file

@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr; struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
if (drm_core_check_feature(dev, DRIVER_PRIME)) if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_gem_remove_prime_handles(obj, file_priv); drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv); drm_vma_node_revoke(&obj->vma_node, file_priv);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
drm_gem_object_handle_put_unlocked(obj); drm_gem_object_handle_put_unlocked(obj);
return 0; return 0;

View file

@ -601,6 +601,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
crtc = drm_crtc_find(dev, plane_req->crtc_id); crtc = drm_crtc_find(dev, plane_req->crtc_id);
if (!crtc) { if (!crtc) {
drm_framebuffer_put(fb);
DRM_DEBUG_KMS("Unknown crtc ID %d\n", DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id); plane_req->crtc_id);
return -ENOENT; return -ENOENT;

View file

@ -2714,7 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
unmap_src: unmap_src:
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
put_obj: put_obj:
i915_gem_object_put(wa_ctx->indirect_ctx.obj); i915_gem_object_put(obj);
return ret; return ret;
} }

View file

@ -1120,8 +1120,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
uint8_t aux_channel, ddc_pin; uint8_t aux_channel, ddc_pin;
/* Each DDI port can have more than one value on the "DVO Port" field, /* Each DDI port can have more than one value on the "DVO Port" field,
* so look for all the possible values for each port and abort if more * so look for all the possible values for each port.
* than one is found. */ */
int dvo_ports[][3] = { int dvo_ports[][3] = {
{DVO_PORT_HDMIA, DVO_PORT_DPA, -1}, {DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
{DVO_PORT_HDMIB, DVO_PORT_DPB, -1}, {DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
@ -1130,7 +1130,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
{DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
}; };
/* Find the child device to use, abort if more than one found. */ /*
* Find the first child device to reference the port, report if more
* than one found.
*/
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
it = dev_priv->vbt.child_dev + i; it = dev_priv->vbt.child_dev + i;
@ -1140,11 +1143,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (it->common.dvo_port == dvo_ports[port][j]) { if (it->common.dvo_port == dvo_ports[port][j]) {
if (child) { if (child) {
DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n", DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
port_name(port)); port_name(port));
return; } else {
child = it;
} }
child = it;
} }
} }
} }

View file

@ -46,7 +46,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector)
struct intel_encoder *encoder = connector->encoder; struct intel_encoder *encoder = connector->encoder;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct mipi_dsi_device *dsi_device; struct mipi_dsi_device *dsi_device;
u8 data; u8 data = 0;
enum port port; enum port port;
/* FIXME: Need to take care of 16 bit brightness level */ /* FIXME: Need to take care of 16 bit brightness level */

View file

@ -306,7 +306,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
if (!gpio_desc) { if (!gpio_desc) {
gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
"panel", gpio_index, NULL, gpio_index,
value ? GPIOD_OUT_LOW : value ? GPIOD_OUT_LOW :
GPIOD_OUT_HIGH); GPIOD_OUT_HIGH);

View file

@ -1221,6 +1221,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return ret; return ret;
} }
static u8 gtiir[] = {
[RCS] = 0,
[BCS] = 0,
[VCS] = 1,
[VCS2] = 1,
[VECS] = 3,
};
static int gen8_init_common_ring(struct intel_engine_cs *engine) static int gen8_init_common_ring(struct intel_engine_cs *engine)
{ {
struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_private *dev_priv = engine->i915;
@ -1245,9 +1253,22 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name); DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
/* After a GPU reset, we may have requests to replay */ GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
/*
* Clear any pending interrupt state.
*
* We do it twice out of paranoia that some of the IIR are double
* buffered, and if we only reset it once there may still be
* an interrupt pending.
*/
I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
/* After a GPU reset, we may have requests to replay */
submit = false; submit = false;
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
if (!port_isset(&port[n])) if (!port_isset(&port[n]))

View file

@ -210,8 +210,8 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
if (!IS_GEN9(dev_priv)) { if (!HAS_LSPCON(dev_priv)) {
DRM_ERROR("LSPCON is supported on GEN9 only\n"); DRM_ERROR("LSPCON is not supported on this platform\n");
return false; return false;
} }

View file

@ -545,15 +545,13 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
return; return;
} }
ics = ipu_drm_fourcc_to_colorspace(fb->format->format);
switch (ipu_plane->dp_flow) { switch (ipu_plane->dp_flow) {
case IPU_DP_FLOW_SYNC_BG: case IPU_DP_FLOW_SYNC_BG:
ipu_dp_setup_channel(ipu_plane->dp, ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_RGB);
IPUV3_COLORSPACE_RGB,
IPUV3_COLORSPACE_RGB);
ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true); ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
break; break;
case IPU_DP_FLOW_SYNC_FG: case IPU_DP_FLOW_SYNC_FG:
ics = ipu_drm_fourcc_to_colorspace(state->fb->format->format);
ipu_dp_setup_channel(ipu_plane->dp, ics, ipu_dp_setup_channel(ipu_plane->dp, ics,
IPUV3_COLORSPACE_UNKNOWN); IPUV3_COLORSPACE_UNKNOWN);
/* Enable local alpha on partial plane */ /* Enable local alpha on partial plane */

View file

@ -275,11 +275,15 @@ static void rockchip_drm_fb_resume(struct drm_device *drm)
static int rockchip_drm_sys_suspend(struct device *dev) static int rockchip_drm_sys_suspend(struct device *dev)
{ {
struct drm_device *drm = dev_get_drvdata(dev); struct drm_device *drm = dev_get_drvdata(dev);
struct rockchip_drm_private *priv = drm->dev_private; struct rockchip_drm_private *priv;
if (!drm)
return 0;
drm_kms_helper_poll_disable(drm); drm_kms_helper_poll_disable(drm);
rockchip_drm_fb_suspend(drm); rockchip_drm_fb_suspend(drm);
priv = drm->dev_private;
priv->state = drm_atomic_helper_suspend(drm); priv->state = drm_atomic_helper_suspend(drm);
if (IS_ERR(priv->state)) { if (IS_ERR(priv->state)) {
rockchip_drm_fb_resume(drm); rockchip_drm_fb_resume(drm);
@ -293,8 +297,12 @@ static int rockchip_drm_sys_suspend(struct device *dev)
static int rockchip_drm_sys_resume(struct device *dev) static int rockchip_drm_sys_resume(struct device *dev)
{ {
struct drm_device *drm = dev_get_drvdata(dev); struct drm_device *drm = dev_get_drvdata(dev);
struct rockchip_drm_private *priv = drm->dev_private; struct rockchip_drm_private *priv;
if (!drm)
return 0;
priv = drm->dev_private;
drm_atomic_helper_resume(drm, priv->state); drm_atomic_helper_resume(drm, priv->state);
rockchip_drm_fb_resume(drm); rockchip_drm_fb_resume(drm);
drm_kms_helper_poll_enable(drm); drm_kms_helper_poll_enable(drm);

View file

@ -25,12 +25,20 @@
#include "sun4i_framebuffer.h" #include "sun4i_framebuffer.h"
#include "sun4i_tcon.h" #include "sun4i_tcon.h"
static void sun4i_drv_lastclose(struct drm_device *dev)
{
struct sun4i_drv *drv = dev->dev_private;
drm_fbdev_cma_restore_mode(drv->fbdev);
}
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops); DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
static struct drm_driver sun4i_drv_driver = { static struct drm_driver sun4i_drv_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
/* Generic Operations */ /* Generic Operations */
.lastclose = sun4i_drv_lastclose,
.fops = &sun4i_drv_fops, .fops = &sun4i_drv_fops,
.name = "sun4i-drm", .name = "sun4i-drm",
.desc = "Allwinner sun4i Display Engine", .desc = "Allwinner sun4i Display Engine",

View file

@ -1,6 +1,7 @@
config IMX_IPUV3_CORE config IMX_IPUV3_CORE
tristate "IPUv3 core support" tristate "IPUv3 core support"
depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
depends on DRM || !DRM # if DRM=m, this can't be 'y'
select GENERIC_IRQ_CHIP select GENERIC_IRQ_CHIP
help help
Choose this if you have a i.MX5/6 system and want to use the Image Choose this if you have a i.MX5/6 system and want to use the Image

View file

@ -410,10 +410,11 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
} }
/* We are in an invalid state; reset bus to a known state. */ /* We are in an invalid state; reset bus to a known state. */
if (!bus->msgs && bus->master_state != ASPEED_I2C_MASTER_STOP) { if (!bus->msgs) {
dev_err(bus->dev, "bus in unknown state"); dev_err(bus->dev, "bus in unknown state");
bus->cmd_err = -EIO; bus->cmd_err = -EIO;
aspeed_i2c_do_stop(bus); if (bus->master_state != ASPEED_I2C_MASTER_STOP)
aspeed_i2c_do_stop(bus);
goto out_no_complete; goto out_no_complete;
} }
msg = &bus->msgs[bus->msgs_index]; msg = &bus->msgs[bus->msgs_index];

View file

@ -198,8 +198,7 @@ static void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY; dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL | dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED | DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;
DW_IC_CON_SPEED_FAST;
dev->mode = DW_IC_SLAVE; dev->mode = DW_IC_SLAVE;
@ -430,7 +429,7 @@ static void dw_i2c_plat_complete(struct device *dev)
#endif #endif
#ifdef CONFIG_PM #ifdef CONFIG_PM
static int dw_i2c_plat_suspend(struct device *dev) static int dw_i2c_plat_runtime_suspend(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
@ -452,11 +451,21 @@ static int dw_i2c_plat_resume(struct device *dev)
return 0; return 0;
} }
#ifdef CONFIG_PM_SLEEP
static int dw_i2c_plat_suspend(struct device *dev)
{
pm_runtime_resume(dev);
return dw_i2c_plat_runtime_suspend(dev);
}
#endif
static const struct dev_pm_ops dw_i2c_dev_pm_ops = { static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
.prepare = dw_i2c_plat_prepare, .prepare = dw_i2c_plat_prepare,
.complete = dw_i2c_plat_complete, .complete = dw_i2c_plat_complete,
SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume) SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL) SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
dw_i2c_plat_resume,
NULL)
}; };
#define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops) #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)

View file

@ -177,6 +177,8 @@ static int i2c_dw_reg_slave(struct i2c_client *slave)
return -EBUSY; return -EBUSY;
if (slave->flags & I2C_CLIENT_TEN) if (slave->flags & I2C_CLIENT_TEN)
return -EAFNOSUPPORT; return -EAFNOSUPPORT;
pm_runtime_get_sync(dev->dev);
/* /*
* Set slave address in the IC_SAR register, * Set slave address in the IC_SAR register,
* the address to which the DW_apb_i2c responds. * the address to which the DW_apb_i2c responds.
@ -205,6 +207,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
dev->disable_int(dev); dev->disable_int(dev);
dev->disable(dev); dev->disable(dev);
dev->slave = NULL; dev->slave = NULL;
pm_runtime_put(dev->dev);
return 0; return 0;
} }
@ -272,7 +275,7 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
slave_activity = ((dw_readl(dev, DW_IC_STATUS) & slave_activity = ((dw_readl(dev, DW_IC_STATUS) &
DW_IC_STATUS_SLAVE_ACTIVITY) >> 6); DW_IC_STATUS_SLAVE_ACTIVITY) >> 6);
if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY)) if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave)
return 0; return 0;
dev_dbg(dev->dev, dev_dbg(dev->dev,
@ -382,7 +385,6 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
ret = i2c_add_numbered_adapter(adap); ret = i2c_add_numbered_adapter(adap);
if (ret) if (ret)
dev_err(dev->dev, "failure adding adapter: %d\n", ret); dev_err(dev->dev, "failure adding adapter: %d\n", ret);
pm_runtime_put_noidle(dev->dev);
return ret; return ret;
} }

View file

@ -127,8 +127,7 @@ static int simtec_i2c_probe(struct platform_device *dev)
iounmap(pd->reg); iounmap(pd->reg);
err_res: err_res:
release_resource(pd->ioarea); release_mem_region(pd->ioarea->start, size);
kfree(pd->ioarea);
err: err:
kfree(pd); kfree(pd);
@ -142,8 +141,7 @@ static int simtec_i2c_remove(struct platform_device *dev)
i2c_del_adapter(&pd->adap); i2c_del_adapter(&pd->adap);
iounmap(pd->reg); iounmap(pd->reg);
release_resource(pd->ioarea); release_mem_region(pd->ioarea->start, resource_size(pd->ioarea));
kfree(pd->ioarea);
kfree(pd); kfree(pd);
return 0; return 0;

View file

@ -353,8 +353,8 @@ static int i2c_device_probe(struct device *dev)
} }
/* /*
* An I2C ID table is not mandatory, if and only if, a suitable Device * An I2C ID table is not mandatory, if and only if, a suitable OF
* Tree match table entry is supplied for the probing device. * or ACPI ID table is supplied for the probing device.
*/ */
if (!driver->id_table && if (!driver->id_table &&
!i2c_acpi_match_device(dev->driver->acpi_match_table, client) && !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&

View file

@ -644,7 +644,7 @@ static int ina2xx_capture_thread(void *data)
{ {
struct iio_dev *indio_dev = data; struct iio_dev *indio_dev = data;
struct ina2xx_chip_info *chip = iio_priv(indio_dev); struct ina2xx_chip_info *chip = iio_priv(indio_dev);
unsigned int sampling_us = SAMPLING_PERIOD(chip); int sampling_us = SAMPLING_PERIOD(chip);
int buffer_us; int buffer_us;
/* /*

View file

@ -64,7 +64,7 @@
#define STM32H7_CKMODE_MASK GENMASK(17, 16) #define STM32H7_CKMODE_MASK GENMASK(17, 16)
/* STM32 H7 maximum analog clock rate (from datasheet) */ /* STM32 H7 maximum analog clock rate (from datasheet) */
#define STM32H7_ADC_MAX_CLK_RATE 72000000 #define STM32H7_ADC_MAX_CLK_RATE 36000000
/** /**
* stm32_adc_common_regs - stm32 common registers, compatible dependent data * stm32_adc_common_regs - stm32 common registers, compatible dependent data
@ -148,14 +148,14 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev,
return -EINVAL; return -EINVAL;
} }
priv->common.rate = rate; priv->common.rate = rate / stm32f4_pclk_div[i];
val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR); val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR);
val &= ~STM32F4_ADC_ADCPRE_MASK; val &= ~STM32F4_ADC_ADCPRE_MASK;
val |= i << STM32F4_ADC_ADCPRE_SHIFT; val |= i << STM32F4_ADC_ADCPRE_SHIFT;
writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR); writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR);
dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n", dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n",
rate / (stm32f4_pclk_div[i] * 1000)); priv->common.rate / 1000);
return 0; return 0;
} }
@ -250,7 +250,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
out: out:
/* rate used later by each ADC instance to control BOOST mode */ /* rate used later by each ADC instance to control BOOST mode */
priv->common.rate = rate; priv->common.rate = rate / div;
/* Set common clock mode and prescaler */ /* Set common clock mode and prescaler */
val = readl_relaxed(priv->common.base + STM32H7_ADC_CCR); val = readl_relaxed(priv->common.base + STM32H7_ADC_CCR);
@ -260,7 +260,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
writel_relaxed(val, priv->common.base + STM32H7_ADC_CCR); writel_relaxed(val, priv->common.base + STM32H7_ADC_CCR);
dev_dbg(&pdev->dev, "Using %s clock/%d source at %ld kHz\n", dev_dbg(&pdev->dev, "Using %s clock/%d source at %ld kHz\n",
ckmode ? "bus" : "adc", div, rate / (div * 1000)); ckmode ? "bus" : "adc", div, priv->common.rate / 1000);
return 0; return 0;
} }

View file

@ -111,8 +111,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
s32 poll_value = 0; s32 poll_value = 0;
if (state) { if (state) {
if (!atomic_read(&st->user_requested_state))
return 0;
if (sensor_hub_device_open(st->hsdev)) if (sensor_hub_device_open(st->hsdev))
return -EIO; return -EIO;
@ -161,6 +159,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
&report_val); &report_val);
} }
pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
st->pdev->name, state_val, report_val);
sensor_hub_get_feature(st->hsdev, st->power_state.report_id, sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
st->power_state.index, st->power_state.index,
sizeof(state_val), &state_val); sizeof(state_val), &state_val);
@ -182,6 +183,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
ret = pm_runtime_get_sync(&st->pdev->dev); ret = pm_runtime_get_sync(&st->pdev->dev);
else { else {
pm_runtime_mark_last_busy(&st->pdev->dev); pm_runtime_mark_last_busy(&st->pdev->dev);
pm_runtime_use_autosuspend(&st->pdev->dev);
ret = pm_runtime_put_autosuspend(&st->pdev->dev); ret = pm_runtime_put_autosuspend(&st->pdev->dev);
} }
if (ret < 0) { if (ret < 0) {
@ -285,8 +287,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
/* Default to 3 seconds, but can be changed from sysfs */ /* Default to 3 seconds, but can be changed from sysfs */
pm_runtime_set_autosuspend_delay(&attrb->pdev->dev, pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
3000); 3000);
pm_runtime_use_autosuspend(&attrb->pdev->dev);
return ret; return ret;
error_unreg_trigger: error_unreg_trigger:
iio_trigger_unregister(trig); iio_trigger_unregister(trig);

View file

@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.gyro_max_val = IIO_RAD_TO_DEGREE(22500), .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
.gyro_max_scale = 450, .gyro_max_scale = 450,
.accel_max_val = IIO_M_S_2_TO_G(12500), .accel_max_val = IIO_M_S_2_TO_G(12500),
.accel_max_scale = 5, .accel_max_scale = 10,
}, },
[ADIS16485] = { [ADIS16485] = {
.channels = adis16485_channels, .channels = adis16485_channels,

View file

@ -356,9 +356,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
.drdy_irq = { .drdy_irq = {
.addr = 0x62, .addr = 0x62,
.mask_int1 = 0x01, .mask_int1 = 0x01,
.addr_ihl = 0x63, .addr_stat_drdy = 0x67,
.mask_ihl = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
}, },
.multi_read_bit = false, .multi_read_bit = false,
.bootime = 2, .bootime = 2,

View file

@ -282,6 +282,11 @@ static int bmp280_read_temp(struct bmp280_data *data,
} }
adc_temp = be32_to_cpu(tmp) >> 12; adc_temp = be32_to_cpu(tmp) >> 12;
if (adc_temp == BMP280_TEMP_SKIPPED) {
/* reading was skipped */
dev_err(data->dev, "reading temperature skipped\n");
return -EIO;
}
comp_temp = bmp280_compensate_temp(data, adc_temp); comp_temp = bmp280_compensate_temp(data, adc_temp);
/* /*
@ -317,6 +322,11 @@ static int bmp280_read_press(struct bmp280_data *data,
} }
adc_press = be32_to_cpu(tmp) >> 12; adc_press = be32_to_cpu(tmp) >> 12;
if (adc_press == BMP280_PRESS_SKIPPED) {
/* reading was skipped */
dev_err(data->dev, "reading pressure skipped\n");
return -EIO;
}
comp_press = bmp280_compensate_press(data, adc_press); comp_press = bmp280_compensate_press(data, adc_press);
*val = comp_press; *val = comp_press;
@ -345,6 +355,11 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
} }
adc_humidity = be16_to_cpu(tmp); adc_humidity = be16_to_cpu(tmp);
if (adc_humidity == BMP280_HUMIDITY_SKIPPED) {
/* reading was skipped */
dev_err(data->dev, "reading humidity skipped\n");
return -EIO;
}
comp_humidity = bmp280_compensate_humidity(data, adc_humidity); comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
*val = comp_humidity; *val = comp_humidity;
@ -597,14 +612,20 @@ static const struct bmp280_chip_info bmp280_chip_info = {
static int bme280_chip_config(struct bmp280_data *data) static int bme280_chip_config(struct bmp280_data *data)
{ {
int ret = bmp280_chip_config(data); int ret;
u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1); u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1);
/*
* Oversampling of humidity must be set before oversampling of
* temperature/pressure is set to become effective.
*/
ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY,
BMP280_OSRS_HUMIDITY_MASK, osrs);
if (ret < 0) if (ret < 0)
return ret; return ret;
return regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY, return bmp280_chip_config(data);
BMP280_OSRS_HUMIDITY_MASK, osrs);
} }
static const struct bmp280_chip_info bme280_chip_info = { static const struct bmp280_chip_info bme280_chip_info = {

View file

@ -96,6 +96,11 @@
#define BME280_CHIP_ID 0x60 #define BME280_CHIP_ID 0x60
#define BMP280_SOFT_RESET_VAL 0xB6 #define BMP280_SOFT_RESET_VAL 0xB6
/* BMP280 register skipped special values */
#define BMP280_TEMP_SKIPPED 0x80000
#define BMP280_PRESS_SKIPPED 0x80000
#define BMP280_HUMIDITY_SKIPPED 0x8000
/* Regmap configurations */ /* Regmap configurations */
extern const struct regmap_config bmp180_regmap_config; extern const struct regmap_config bmp180_regmap_config;
extern const struct regmap_config bmp280_regmap_config; extern const struct regmap_config bmp280_regmap_config;

View file

@ -366,34 +366,32 @@ static int stm32_counter_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask) int *val, int *val2, long mask)
{ {
struct stm32_timer_trigger *priv = iio_priv(indio_dev); struct stm32_timer_trigger *priv = iio_priv(indio_dev);
u32 dat;
switch (mask) { switch (mask) {
case IIO_CHAN_INFO_RAW: case IIO_CHAN_INFO_RAW:
{ regmap_read(priv->regmap, TIM_CNT, &dat);
u32 cnt; *val = dat;
regmap_read(priv->regmap, TIM_CNT, &cnt);
*val = cnt;
return IIO_VAL_INT; return IIO_VAL_INT;
}
case IIO_CHAN_INFO_SCALE:
{
u32 smcr;
regmap_read(priv->regmap, TIM_SMCR, &smcr); case IIO_CHAN_INFO_ENABLE:
smcr &= TIM_SMCR_SMS; regmap_read(priv->regmap, TIM_CR1, &dat);
*val = (dat & TIM_CR1_CEN) ? 1 : 0;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
regmap_read(priv->regmap, TIM_SMCR, &dat);
dat &= TIM_SMCR_SMS;
*val = 1; *val = 1;
*val2 = 0; *val2 = 0;
/* in quadrature case scale = 0.25 */ /* in quadrature case scale = 0.25 */
if (smcr == 3) if (dat == 3)
*val2 = 2; *val2 = 2;
return IIO_VAL_FRACTIONAL_LOG2; return IIO_VAL_FRACTIONAL_LOG2;
} }
}
return -EINVAL; return -EINVAL;
} }
@ -403,15 +401,31 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask) int val, int val2, long mask)
{ {
struct stm32_timer_trigger *priv = iio_priv(indio_dev); struct stm32_timer_trigger *priv = iio_priv(indio_dev);
u32 dat;
switch (mask) { switch (mask) {
case IIO_CHAN_INFO_RAW: case IIO_CHAN_INFO_RAW:
regmap_write(priv->regmap, TIM_CNT, val); return regmap_write(priv->regmap, TIM_CNT, val);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE: case IIO_CHAN_INFO_SCALE:
/* fixed scale */ /* fixed scale */
return -EINVAL; return -EINVAL;
case IIO_CHAN_INFO_ENABLE:
if (val) {
regmap_read(priv->regmap, TIM_CR1, &dat);
if (!(dat & TIM_CR1_CEN))
clk_enable(priv->clk);
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
TIM_CR1_CEN);
} else {
regmap_read(priv->regmap, TIM_CR1, &dat);
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
0);
if (dat & TIM_CR1_CEN)
clk_disable(priv->clk);
}
return 0;
} }
return -EINVAL; return -EINVAL;
@ -471,7 +485,7 @@ static int stm32_get_trigger_mode(struct iio_dev *indio_dev,
regmap_read(priv->regmap, TIM_SMCR, &smcr); regmap_read(priv->regmap, TIM_SMCR, &smcr);
return smcr == TIM_SMCR_SMS ? 0 : -EINVAL; return (smcr & TIM_SMCR_SMS) == TIM_SMCR_SMS ? 0 : -EINVAL;
} }
static const struct iio_enum stm32_trigger_mode_enum = { static const struct iio_enum stm32_trigger_mode_enum = {
@ -507,9 +521,19 @@ static int stm32_set_enable_mode(struct iio_dev *indio_dev,
{ {
struct stm32_timer_trigger *priv = iio_priv(indio_dev); struct stm32_timer_trigger *priv = iio_priv(indio_dev);
int sms = stm32_enable_mode2sms(mode); int sms = stm32_enable_mode2sms(mode);
u32 val;
if (sms < 0) if (sms < 0)
return sms; return sms;
/*
* Triggered mode sets CEN bit automatically by hardware. So, first
* enable counter clock, so it can use it. Keeps it in sync with CEN.
*/
if (sms == 6) {
regmap_read(priv->regmap, TIM_CR1, &val);
if (!(val & TIM_CR1_CEN))
clk_enable(priv->clk);
}
regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms); regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
@ -571,11 +595,14 @@ static int stm32_get_quadrature_mode(struct iio_dev *indio_dev,
{ {
struct stm32_timer_trigger *priv = iio_priv(indio_dev); struct stm32_timer_trigger *priv = iio_priv(indio_dev);
u32 smcr; u32 smcr;
int mode;
regmap_read(priv->regmap, TIM_SMCR, &smcr); regmap_read(priv->regmap, TIM_SMCR, &smcr);
smcr &= TIM_SMCR_SMS; mode = (smcr & TIM_SMCR_SMS) - 1;
if ((mode < 0) || (mode > ARRAY_SIZE(stm32_quadrature_modes)))
return -EINVAL;
return smcr - 1; return mode;
} }
static const struct iio_enum stm32_quadrature_mode_enum = { static const struct iio_enum stm32_quadrature_mode_enum = {
@ -592,13 +619,20 @@ static const char *const stm32_count_direction_states[] = {
static int stm32_set_count_direction(struct iio_dev *indio_dev, static int stm32_set_count_direction(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, const struct iio_chan_spec *chan,
unsigned int mode) unsigned int dir)
{ {
struct stm32_timer_trigger *priv = iio_priv(indio_dev); struct stm32_timer_trigger *priv = iio_priv(indio_dev);
u32 val;
int mode;
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_DIR, mode); /* In encoder mode, direction is RO (given by TI1/TI2 signals) */
regmap_read(priv->regmap, TIM_SMCR, &val);
mode = (val & TIM_SMCR_SMS) - 1;
if ((mode >= 0) || (mode < ARRAY_SIZE(stm32_quadrature_modes)))
return -EBUSY;
return 0; return regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_DIR,
dir ? TIM_CR1_DIR : 0);
} }
static int stm32_get_count_direction(struct iio_dev *indio_dev, static int stm32_get_count_direction(struct iio_dev *indio_dev,
@ -609,7 +643,7 @@ static int stm32_get_count_direction(struct iio_dev *indio_dev,
regmap_read(priv->regmap, TIM_CR1, &cr1); regmap_read(priv->regmap, TIM_CR1, &cr1);
return (cr1 & TIM_CR1_DIR); return ((cr1 & TIM_CR1_DIR) ? 1 : 0);
} }
static const struct iio_enum stm32_count_direction_enum = { static const struct iio_enum stm32_count_direction_enum = {
@ -672,7 +706,9 @@ static const struct iio_chan_spec_ext_info stm32_trigger_count_info[] = {
static const struct iio_chan_spec stm32_trigger_channel = { static const struct iio_chan_spec stm32_trigger_channel = {
.type = IIO_COUNT, .type = IIO_COUNT,
.channel = 0, .channel = 0,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_ENABLE) |
BIT(IIO_CHAN_INFO_SCALE),
.ext_info = stm32_trigger_count_info, .ext_info = stm32_trigger_count_info,
.indexed = 1 .indexed = 1
}; };

View file

@ -331,7 +331,7 @@ static int soc_button_probe(struct platform_device *pdev)
error = gpiod_count(dev, NULL); error = gpiod_count(dev, NULL);
if (error < 0) { if (error < 0) {
dev_dbg(dev, "no GPIO attached, ignoring...\n"); dev_dbg(dev, "no GPIO attached, ignoring...\n");
return error; return -ENODEV;
} }
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);

View file

@ -1215,14 +1215,24 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
case SS4_PACKET_ID_TWO: case SS4_PACKET_ID_TWO:
if (priv->flags & ALPS_BUTTONPAD) { if (priv->flags & ALPS_BUTTONPAD) {
f->mt[0].x = SS4_BTL_MF_X_V2(p, 0); if (IS_SS4PLUS_DEV(priv->dev_id)) {
f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
} else {
f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
}
f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0); f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0);
f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1); f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1);
} else { } else {
f->mt[0].x = SS4_STD_MF_X_V2(p, 0); if (IS_SS4PLUS_DEV(priv->dev_id)) {
f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
} else {
f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
}
f->mt[0].y = SS4_STD_MF_Y_V2(p, 0); f->mt[0].y = SS4_STD_MF_Y_V2(p, 0);
f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
f->mt[1].y = SS4_STD_MF_Y_V2(p, 1); f->mt[1].y = SS4_STD_MF_Y_V2(p, 1);
} }
f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0; f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0;
@ -1239,16 +1249,27 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
case SS4_PACKET_ID_MULTI: case SS4_PACKET_ID_MULTI:
if (priv->flags & ALPS_BUTTONPAD) { if (priv->flags & ALPS_BUTTONPAD) {
f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); if (IS_SS4PLUS_DEV(priv->dev_id)) {
f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
} else {
f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
}
f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0); f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1); f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
no_data_x = SS4_MFPACKET_NO_AX_BL; no_data_x = SS4_MFPACKET_NO_AX_BL;
no_data_y = SS4_MFPACKET_NO_AY_BL; no_data_y = SS4_MFPACKET_NO_AY_BL;
} else { } else {
f->mt[2].x = SS4_STD_MF_X_V2(p, 0); if (IS_SS4PLUS_DEV(priv->dev_id)) {
f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
} else {
f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
}
f->mt[2].y = SS4_STD_MF_Y_V2(p, 0); f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
f->mt[3].y = SS4_STD_MF_Y_V2(p, 1); f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
no_data_x = SS4_MFPACKET_NO_AX; no_data_x = SS4_MFPACKET_NO_AX;
no_data_y = SS4_MFPACKET_NO_AY; no_data_y = SS4_MFPACKET_NO_AY;
@ -2541,8 +2562,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
memset(otp, 0, sizeof(otp)); memset(otp, 0, sizeof(otp));
if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) || if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) ||
alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0])) alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]))
return -1; return -1;
alps_update_device_area_ss4_v2(otp, priv); alps_update_device_area_ss4_v2(otp, priv);

View file

@ -100,6 +100,10 @@ enum SS4_PACKET_ID {
((_b[1 + _i * 3] << 5) & 0x1F00) \ ((_b[1 + _i * 3] << 5) & 0x1F00) \
) )
#define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \
((_b[1 + (_i) * 3] << 4) & 0x0F80) \
)
#define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \ #define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \
((_b[2 + (_i) * 3] << 5) & 0x01E0) | \ ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \
((_b[2 + (_i) * 3] << 4) & 0x0E00) \ ((_b[2 + (_i) * 3] << 4) & 0x0E00) \
@ -109,6 +113,10 @@ enum SS4_PACKET_ID {
((_b[0 + (_i) * 3] >> 3) & 0x0010) \ ((_b[0 + (_i) * 3] >> 3) & 0x0010) \
) )
#define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) | \
((_b[0 + (_i) * 3] >> 4) & 0x0008) \
)
#define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \ #define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \
((_b[0 + (_i) * 3] >> 3) & 0x0008) \ ((_b[0 + (_i) * 3] >> 3) & 0x0008) \
) )

View file

@ -1247,6 +1247,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0000", 0 }, { "ELAN0000", 0 },
{ "ELAN0100", 0 }, { "ELAN0100", 0 },
{ "ELAN0600", 0 }, { "ELAN0600", 0 },
{ "ELAN0602", 0 },
{ "ELAN0605", 0 }, { "ELAN0605", 0 },
{ "ELAN0608", 0 }, { "ELAN0608", 0 },
{ "ELAN0605", 0 }, { "ELAN0605", 0 },

View file

@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir
if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
return -1; return -1;
if (param[0] != TP_MAGIC_IDENT) /* add new TP ID. */
if (!(param[0] & TP_MAGIC_IDENT))
return -1; return -1;
if (firmware_id) if (firmware_id)

View file

@ -21,8 +21,9 @@
#define TP_COMMAND 0xE2 /* Commands start with this */ #define TP_COMMAND 0xE2 /* Commands start with this */
#define TP_READ_ID 0xE1 /* Sent for device identification */ #define TP_READ_ID 0xE1 /* Sent for device identification */
#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */ #define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
/* by the firmware ID */ /* by the firmware ID */
/* Firmware ID includes 0x1, 0x2, 0x3 */
/* /*

View file

@ -574,7 +574,9 @@ struct amd_iommu {
static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
{ {
return container_of(dev, struct amd_iommu, iommu.dev); struct iommu_device *iommu = dev_to_iommu_device(dev);
return container_of(iommu, struct amd_iommu, iommu);
} }
#define ACPIHID_UID_LEN 256 #define ACPIHID_UID_LEN 256

View file

@ -4736,7 +4736,9 @@ static void intel_disable_iommus(void)
static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{ {
return container_of(dev, struct intel_iommu, iommu.dev); struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
return container_of(iommu_dev, struct intel_iommu, iommu);
} }
static ssize_t intel_iommu_show_version(struct device *dev, static ssize_t intel_iommu_show_version(struct device *dev,

View file

@ -62,32 +62,40 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
va_list vargs; va_list vargs;
int ret; int ret;
device_initialize(&iommu->dev); iommu->dev = kzalloc(sizeof(*iommu->dev), GFP_KERNEL);
if (!iommu->dev)
return -ENOMEM;
iommu->dev.class = &iommu_class; device_initialize(iommu->dev);
iommu->dev.parent = parent;
iommu->dev.groups = groups; iommu->dev->class = &iommu_class;
iommu->dev->parent = parent;
iommu->dev->groups = groups;
va_start(vargs, fmt); va_start(vargs, fmt);
ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs); ret = kobject_set_name_vargs(&iommu->dev->kobj, fmt, vargs);
va_end(vargs); va_end(vargs);
if (ret) if (ret)
goto error; goto error;
ret = device_add(&iommu->dev); ret = device_add(iommu->dev);
if (ret) if (ret)
goto error; goto error;
dev_set_drvdata(iommu->dev, iommu);
return 0; return 0;
error: error:
put_device(&iommu->dev); put_device(iommu->dev);
return ret; return ret;
} }
void iommu_device_sysfs_remove(struct iommu_device *iommu) void iommu_device_sysfs_remove(struct iommu_device *iommu)
{ {
device_unregister(&iommu->dev); dev_set_drvdata(iommu->dev, NULL);
device_unregister(iommu->dev);
iommu->dev = NULL;
} }
/* /*
* IOMMU drivers can indicate a device is managed by a given IOMMU using * IOMMU drivers can indicate a device is managed by a given IOMMU using
@ -102,14 +110,14 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
if (!iommu || IS_ERR(iommu)) if (!iommu || IS_ERR(iommu))
return -ENODEV; return -ENODEV;
ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices", ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices",
&link->kobj, dev_name(link)); &link->kobj, dev_name(link));
if (ret) if (ret)
return ret; return ret;
ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu"); ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev->kobj, "iommu");
if (ret) if (ret)
sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", sysfs_remove_link_from_group(&iommu->dev->kobj, "devices",
dev_name(link)); dev_name(link));
return ret; return ret;
@ -121,5 +129,5 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
return; return;
sysfs_remove_link(&link->kobj, "iommu"); sysfs_remove_link(&link->kobj, "iommu");
sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link)); sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link));
} }

View file

@ -1371,12 +1371,46 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
R1_CC_ERROR | /* Card controller error */ \ R1_CC_ERROR | /* Card controller error */ \
R1_ERROR) /* General/unknown error */ R1_ERROR) /* General/unknown error */
static bool mmc_blk_has_cmd_err(struct mmc_command *cmd) static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
{ {
if (!cmd->error && cmd->resp[0] & CMD_ERRORS) u32 val;
cmd->error = -EIO;
return cmd->error; /*
* Per the SD specification(physical layer version 4.10)[1],
* section 4.3.3, it explicitly states that "When the last
* block of user area is read using CMD18, the host should
* ignore OUT_OF_RANGE error that may occur even the sequence
* is correct". And JESD84-B51 for eMMC also has a similar
* statement on section 6.8.3.
*
* Multiple block read/write could be done by either predefined
* method, namely CMD23, or open-ending mode. For open-ending mode,
* we should ignore the OUT_OF_RANGE error as it's normal behaviour.
*
* However the spec[1] doesn't tell us whether we should also
* ignore that for predefined method. But per the spec[1], section
* 4.15 Set Block Count Command, it says"If illegal block count
* is set, out of range error will be indicated during read/write
* operation (For example, data transfer is stopped at user area
* boundary)." In another word, we could expect a out of range error
* in the response for the following CMD18/25. And if argument of
* CMD23 + the argument of CMD18/25 exceed the max number of blocks,
* we could also expect to get a -ETIMEDOUT or any error number from
* the host drivers due to missing data response(for write)/data(for
* read), as the cards will stop the data transfer by itself per the
* spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode.
*/
if (!brq->stop.error) {
bool oor_with_open_end;
/* If there is no error yet, check R1 response */
val = brq->stop.resp[0] & CMD_ERRORS;
oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc;
if (val && !oor_with_open_end)
brq->stop.error = -EIO;
}
} }
static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
@ -1400,8 +1434,11 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
* stop.error indicates a problem with the stop command. Data * stop.error indicates a problem with the stop command. Data
* may have been transferred, or may still be transferring. * may have been transferred, or may still be transferring.
*/ */
if (brq->sbc.error || brq->cmd.error || mmc_blk_has_cmd_err(&brq->stop) ||
brq->data.error) { mmc_blk_eval_resp_error(brq);
if (brq->sbc.error || brq->cmd.error ||
brq->stop.error || brq->data.error) {
switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
case ERR_RETRY: case ERR_RETRY:
return MMC_BLK_RETRY; return MMC_BLK_RETRY;

View file

@ -1364,7 +1364,18 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
ret = atmel_smc_cs_conf_set_timing(smcconf, ret = atmel_smc_cs_conf_set_timing(smcconf,
ATMEL_HSMC_TIMINGS_TADL_SHIFT, ATMEL_HSMC_TIMINGS_TADL_SHIFT,
ncycles); ncycles);
if (ret) /*
* Version 4 of the ONFI spec mandates that tADL be at least 400
* nanoseconds, but, depending on the master clock rate, 400 ns may not
* fit in the tADL field of the SMC reg. We need to relax the check and
* accept the -ERANGE return code.
*
* Note that previous versions of the ONFI spec had a lower tADL_min
* (100 or 200 ns). It's not clear why this timing constraint got
* increased but it seems most NANDs are fine with values lower than
* 400ns, so we should be safe.
*/
if (ret && ret != -ERANGE)
return ret; return ret;
ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps); ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps);

View file

@ -2373,6 +2373,7 @@ static int __init ns_init_module(void)
return 0; return 0;
err_exit: err_exit:
nandsim_debugfs_remove(nand);
free_nandsim(nand); free_nandsim(nand);
nand_release(nsmtd); nand_release(nsmtd);
for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i) for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)

View file

@ -924,10 +924,8 @@ static void ntb_transport_link_work(struct work_struct *work)
ntb_free_mw(nt, i); ntb_free_mw(nt, i);
/* if there's an actual failure, we should just bail */ /* if there's an actual failure, we should just bail */
if (rc < 0) { if (rc < 0)
ntb_link_disable(ndev);
return; return;
}
out: out:
if (ntb_link_is_up(ndev, NULL, NULL) == 1) if (ntb_link_is_up(ndev, NULL, NULL) == 1)
@ -1059,7 +1057,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
int node; int node;
int rc, i; int rc, i;
mw_count = ntb_mw_count(ndev, PIDX); mw_count = ntb_peer_mw_count(ndev);
if (!ndev->ops->mw_set_trans) { if (!ndev->ops->mw_set_trans) {
dev_err(&ndev->dev, "Inbound MW based NTB API is required\n"); dev_err(&ndev->dev, "Inbound MW based NTB API is required\n");

View file

@ -959,7 +959,7 @@ static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
tc->ntb = ntb; tc->ntb = ntb;
init_waitqueue_head(&tc->link_wq); init_waitqueue_head(&tc->link_wq);
tc->mw_count = min(ntb_mw_count(tc->ntb, PIDX), MAX_MWS); tc->mw_count = min(ntb_peer_mw_count(tc->ntb), MAX_MWS);
for (i = 0; i < tc->mw_count; i++) { for (i = 0; i < tc->mw_count; i++) {
rc = tool_init_mw(tc, i); rc = tool_init_mw(tc, i);
if (rc) if (rc)

View file

@ -538,12 +538,9 @@ msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
struct msi_desc *entry; struct msi_desc *entry;
u16 control; u16 control;
if (affd) { if (affd)
masks = irq_create_affinity_masks(nvec, affd); masks = irq_create_affinity_masks(nvec, affd);
if (!masks)
dev_err(&dev->dev, "can't allocate MSI affinity masks for %d vectors\n",
nvec);
}
/* MSI Entry Initialization */ /* MSI Entry Initialization */
entry = alloc_msi_entry(&dev->dev, nvec, masks); entry = alloc_msi_entry(&dev->dev, nvec, masks);
@ -679,12 +676,8 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
struct msi_desc *entry; struct msi_desc *entry;
int ret, i; int ret, i;
if (affd) { if (affd)
masks = irq_create_affinity_masks(nvec, affd); masks = irq_create_affinity_masks(nvec, affd);
if (!masks)
dev_err(&dev->dev, "can't allocate MSI-X affinity masks for %d vectors\n",
nvec);
}
for (i = 0, curmsk = masks; i < nvec; i++) { for (i = 0, curmsk = masks; i < nvec; i++) {
entry = alloc_msi_entry(&dev->dev, 1, curmsk); entry = alloc_msi_entry(&dev->dev, 1, curmsk);

View file

@ -16,9 +16,9 @@
static bool __must_check fsl_mc_is_allocatable(const char *obj_type) static bool __must_check fsl_mc_is_allocatable(const char *obj_type)
{ {
return strcmp(obj_type, "dpbp") || return strcmp(obj_type, "dpbp") == 0 ||
strcmp(obj_type, "dpmcp") || strcmp(obj_type, "dpmcp") == 0 ||
strcmp(obj_type, "dpcon"); strcmp(obj_type, "dpcon") == 0;
} }
/** /**

View file

@ -45,6 +45,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
{} /* Terminating entry */ {} /* Terminating entry */
}; };

View file

@ -107,6 +107,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
{ {
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
const char *name = dev_name(&vp_dev->vdev.dev); const char *name = dev_name(&vp_dev->vdev.dev);
unsigned flags = PCI_IRQ_MSIX;
unsigned i, v; unsigned i, v;
int err = -ENOMEM; int err = -ENOMEM;
@ -126,10 +127,13 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
GFP_KERNEL)) GFP_KERNEL))
goto error; goto error;
if (desc) {
flags |= PCI_IRQ_AFFINITY;
desc->pre_vectors++; /* virtio config vector */
}
err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
nvectors, PCI_IRQ_MSIX | nvectors, flags, desc);
(desc ? PCI_IRQ_AFFINITY : 0),
desc);
if (err < 0) if (err < 0)
goto error; goto error;
vp_dev->msix_enabled = 1; vp_dev->msix_enabled = 1;

View file

@ -194,15 +194,20 @@ build_path_from_dentry_optional_prefix(struct dentry *direntry, bool prefix)
} }
/* /*
* Don't allow path components longer than the server max.
* Don't allow the separator character in a path component. * Don't allow the separator character in a path component.
* The VFS will not allow "/", but "\" is allowed by posix. * The VFS will not allow "/", but "\" is allowed by posix.
*/ */
static int static int
check_name(struct dentry *direntry) check_name(struct dentry *direntry, struct cifs_tcon *tcon)
{ {
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
int i; int i;
if (unlikely(direntry->d_name.len >
tcon->fsAttrInfo.MaxPathNameComponentLength))
return -ENAMETOOLONG;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
for (i = 0; i < direntry->d_name.len; i++) { for (i = 0; i < direntry->d_name.len; i++) {
if (direntry->d_name.name[i] == '\\') { if (direntry->d_name.name[i] == '\\') {
@ -500,10 +505,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
return finish_no_open(file, res); return finish_no_open(file, res);
} }
rc = check_name(direntry);
if (rc)
return rc;
xid = get_xid(); xid = get_xid();
cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n", cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
@ -516,6 +517,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
} }
tcon = tlink_tcon(tlink); tcon = tlink_tcon(tlink);
rc = check_name(direntry, tcon);
if (rc)
goto out_free_xid;
server = tcon->ses->server; server = tcon->ses->server;
if (server->ops->new_lease_key) if (server->ops->new_lease_key)
@ -776,7 +782,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
} }
pTcon = tlink_tcon(tlink); pTcon = tlink_tcon(tlink);
rc = check_name(direntry); rc = check_name(direntry, pTcon);
if (rc) if (rc)
goto lookup_out; goto lookup_out;

View file

@ -3219,8 +3219,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits); kst->f_bfree = kst->f_bavail =
kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
return; return;
} }

View file

@ -1383,6 +1383,16 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
/*
* Make sure that the faulting address's PMD offset (color) matches
* the PMD offset from the start of the file. This is necessary so
* that a PMD range in the page table overlaps exactly with a PMD
* range in the radix tree.
*/
if ((vmf->pgoff & PG_PMD_COLOUR) !=
((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
goto fallback;
/* Fall back to PTEs if we're going to COW */ /* Fall back to PTEs if we're going to COW */
if (write && !(vma->vm_flags & VM_SHARED)) if (write && !(vma->vm_flags & VM_SHARED))
goto fallback; goto fallback;

View file

@ -144,7 +144,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp)
argp->p = page_address(argp->pagelist[0]); argp->p = page_address(argp->pagelist[0]);
argp->pagelist++; argp->pagelist++;
if (argp->pagelen < PAGE_SIZE) { if (argp->pagelen < PAGE_SIZE) {
argp->end = argp->p + (argp->pagelen>>2); argp->end = argp->p + XDR_QUADLEN(argp->pagelen);
argp->pagelen = 0; argp->pagelen = 0;
} else { } else {
argp->end = argp->p + (PAGE_SIZE>>2); argp->end = argp->p + (PAGE_SIZE>>2);
@ -1279,9 +1279,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
argp->pagelen -= pages * PAGE_SIZE; argp->pagelen -= pages * PAGE_SIZE;
len -= pages * PAGE_SIZE; len -= pages * PAGE_SIZE;
argp->p = (__be32 *)page_address(argp->pagelist[0]); next_decode_page(argp);
argp->pagelist++;
argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
} }
argp->p += XDR_QUADLEN(len); argp->p += XDR_QUADLEN(len);

View file

@ -1164,11 +1164,7 @@ int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
if (ufdset) { if (ufdset) {
return compat_get_bitmap(fdset, ufdset, nr); return compat_get_bitmap(fdset, ufdset, nr);
} else { } else {
/* Tricky, must clear full unsigned long in the zero_fd_set(nr, fdset);
* kernel fdset at the end, ALIGN makes sure that
* actually happens.
*/
memset(fdset, 0, ALIGN(nr, BITS_PER_LONG));
return 0; return 0;
} }
} }

View file

@ -48,7 +48,11 @@
#define parent_node(node) ((void)(node),0) #define parent_node(node) ((void)(node),0)
#endif #endif
#ifndef cpumask_of_node #ifndef cpumask_of_node
#define cpumask_of_node(node) ((void)node, cpu_online_mask) #ifdef CONFIG_NEED_MULTIPLE_NODES
#define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
#else
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
#endif
#endif #endif
#ifndef pcibus_to_node #ifndef pcibus_to_node
#define pcibus_to_node(bus) ((void)(bus), -1) #define pcibus_to_node(bus) ((void)(bus), -1)

View file

@ -568,7 +568,6 @@ struct request_queue {
#if defined(CONFIG_BLK_DEV_BSG) #if defined(CONFIG_BLK_DEV_BSG)
bsg_job_fn *bsg_job_fn; bsg_job_fn *bsg_job_fn;
int bsg_job_size;
struct bsg_class_device bsg_dev; struct bsg_class_device bsg_dev;
#endif #endif

View file

@ -24,6 +24,7 @@
#define _BLK_BSG_ #define _BLK_BSG_
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <scsi/scsi_request.h>
struct request; struct request;
struct device; struct device;
@ -37,6 +38,7 @@ struct bsg_buffer {
}; };
struct bsg_job { struct bsg_job {
struct scsi_request sreq;
struct device *dev; struct device *dev;
struct request *req; struct request *req;

View file

@ -907,9 +907,9 @@ static inline struct file *get_file(struct file *f)
/* Page cache limit. The filesystems should put that into their s_maxbytes /* Page cache limit. The filesystems should put that into their s_maxbytes
limits, otherwise bad things can happen in VM. */ limits, otherwise bad things can happen in VM. */
#if BITS_PER_LONG==32 #if BITS_PER_LONG==32
#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1) #define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
#elif BITS_PER_LONG==64 #elif BITS_PER_LONG==64
#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) #define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
#endif #endif
#define FL_POSIX 1 #define FL_POSIX 1

View file

@ -535,7 +535,7 @@ struct iio_buffer_setup_ops {
* @scan_timestamp: [INTERN] set if any buffers have requested timestamp * @scan_timestamp: [INTERN] set if any buffers have requested timestamp
* @scan_index_timestamp:[INTERN] cache of the index to the timestamp * @scan_index_timestamp:[INTERN] cache of the index to the timestamp
* @trig: [INTERN] current device trigger (buffer modes) * @trig: [INTERN] current device trigger (buffer modes)
* @trig_readonly [INTERN] mark the current trigger immutable * @trig_readonly: [INTERN] mark the current trigger immutable
* @pollfunc: [DRIVER] function run on trigger being received * @pollfunc: [DRIVER] function run on trigger being received
* @pollfunc_event: [DRIVER] function run on events trigger being received * @pollfunc_event: [DRIVER] function run on events trigger being received
* @channels: [DRIVER] channel specification structure table * @channels: [DRIVER] channel specification structure table

View file

@ -144,8 +144,8 @@ void devm_iio_trigger_unregister(struct device *dev,
/** /**
* iio_trigger_set_immutable() - set an immutable trigger on destination * iio_trigger_set_immutable() - set an immutable trigger on destination
* *
* @indio_dev - IIO device structure containing the device * @indio_dev: IIO device structure containing the device
* @trig - trigger to assign to device * @trig: trigger to assign to device
* *
**/ **/
int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig); int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig);

View file

@ -240,7 +240,7 @@ struct iommu_device {
struct list_head list; struct list_head list;
const struct iommu_ops *ops; const struct iommu_ops *ops;
struct fwnode_handle *fwnode; struct fwnode_handle *fwnode;
struct device dev; struct device *dev;
}; };
int iommu_device_register(struct iommu_device *iommu); int iommu_device_register(struct iommu_device *iommu);
@ -265,6 +265,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
iommu->fwnode = fwnode; iommu->fwnode = fwnode;
} }
static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
{
return (struct iommu_device *)dev_get_drvdata(dev);
}
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
@ -589,6 +594,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
{ {
} }
static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
{
return NULL;
}
static inline void iommu_device_unregister(struct iommu_device *iommu) static inline void iommu_device_unregister(struct iommu_device *iommu)
{ {
} }

View file

@ -1201,7 +1201,7 @@ extern void perf_event_init(void);
extern void perf_tp_event(u16 event_type, u64 count, void *record, extern void perf_tp_event(u16 event_type, u64 count, void *record,
int entry_size, struct pt_regs *regs, int entry_size, struct pt_regs *regs,
struct hlist_head *head, int rctx, struct hlist_head *head, int rctx,
struct task_struct *task); struct task_struct *task, struct perf_event *event);
extern void perf_bp_event(struct perf_event *event, void *data); extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags #ifndef perf_misc_flags

View file

@ -508,9 +508,9 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
static inline void static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
u64 count, struct pt_regs *regs, void *head, u64 count, struct pt_regs *regs, void *head,
struct task_struct *task) struct task_struct *task, struct perf_event *event)
{ {
perf_tp_event(type, count, raw_data, size, regs, head, rctx, task); perf_tp_event(type, count, raw_data, size, regs, head, rctx, task, event);
} }
#endif #endif

View file

@ -22,7 +22,6 @@ enum {
LO_FLAGS_AUTOCLEAR = 4, LO_FLAGS_AUTOCLEAR = 4,
LO_FLAGS_PARTSCAN = 8, LO_FLAGS_PARTSCAN = 8,
LO_FLAGS_DIRECT_IO = 16, LO_FLAGS_DIRECT_IO = 16,
LO_FLAGS_BLOCKSIZE = 32,
}; };
#include <asm/posix_types.h> /* for __kernel_old_dev_t */ #include <asm/posix_types.h> /* for __kernel_old_dev_t */
@ -60,8 +59,6 @@ struct loop_info64 {
__u64 lo_init[2]; __u64 lo_init[2];
}; };
#define LO_INFO_BLOCKSIZE(l) (l)->lo_init[0]
/* /*
* Loop filter types * Loop filter types
*/ */

View file

@ -7920,16 +7920,15 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
} }
} }
perf_tp_event(call->event.type, count, raw_data, size, regs, head, perf_tp_event(call->event.type, count, raw_data, size, regs, head,
rctx, task); rctx, task, NULL);
} }
EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit); EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
struct pt_regs *regs, struct hlist_head *head, int rctx, struct pt_regs *regs, struct hlist_head *head, int rctx,
struct task_struct *task) struct task_struct *task, struct perf_event *event)
{ {
struct perf_sample_data data; struct perf_sample_data data;
struct perf_event *event;
struct perf_raw_record raw = { struct perf_raw_record raw = {
.frag = { .frag = {
@ -7943,9 +7942,15 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
perf_trace_buf_update(record, event_type); perf_trace_buf_update(record, event_type);
hlist_for_each_entry_rcu(event, head, hlist_entry) { /* Use the given event instead of the hlist */
if (event) {
if (perf_tp_event_match(event, &data, regs)) if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs); perf_swevent_event(event, count, &data, regs);
} else {
hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
} }
/* /*
@ -9625,6 +9630,8 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
if (ret) if (ret)
return -EFAULT; return -EFAULT;
attr->size = size;
if (attr->__reserved_1) if (attr->__reserved_1)
return -EINVAL; return -EINVAL;

View file

@ -806,6 +806,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm_init_cpumask(mm); mm_init_cpumask(mm);
mm_init_aio(mm); mm_init_aio(mm);
mm_init_owner(mm, p); mm_init_owner(mm, p);
RCU_INIT_POINTER(mm->exe_file, NULL);
mmu_notifier_mm_init(mm); mmu_notifier_mm_init(mm);
init_tlb_flush_pending(mm); init_tlb_flush_pending(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS

View file

@ -70,9 +70,10 @@ static void __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
list_for_each_entry_safe(curr, next, &wq_head->head, entry) { list_for_each_entry_safe(curr, next, &wq_head->head, entry) {
unsigned flags = curr->flags; unsigned flags = curr->flags;
int ret = curr->func(curr, mode, wake_flags, key);
if (curr->func(curr, mode, wake_flags, key) && if (ret < 0)
(flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) break;
if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
break; break;
} }
} }

View file

@ -203,6 +203,7 @@ struct timer_base {
bool migration_enabled; bool migration_enabled;
bool nohz_active; bool nohz_active;
bool is_idle; bool is_idle;
bool must_forward_clk;
DECLARE_BITMAP(pending_map, WHEEL_SIZE); DECLARE_BITMAP(pending_map, WHEEL_SIZE);
struct hlist_head vectors[WHEEL_SIZE]; struct hlist_head vectors[WHEEL_SIZE];
} ____cacheline_aligned; } ____cacheline_aligned;
@ -856,13 +857,19 @@ get_target_base(struct timer_base *base, unsigned tflags)
static inline void forward_timer_base(struct timer_base *base) static inline void forward_timer_base(struct timer_base *base)
{ {
unsigned long jnow = READ_ONCE(jiffies); unsigned long jnow;
/* /*
* We only forward the base when it's idle and we have a delta between * We only forward the base when we are idle or have just come out of
* base clock and jiffies. * idle (must_forward_clk logic), and have a delta between base clock
* and jiffies. In the common case, run_timers will take care of it.
*/ */
if (!base->is_idle || (long) (jnow - base->clk) < 2) if (likely(!base->must_forward_clk))
return;
jnow = READ_ONCE(jiffies);
base->must_forward_clk = base->is_idle;
if ((long)(jnow - base->clk) < 2)
return; return;
/* /*
@ -938,6 +945,11 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
* same array bucket then just return: * same array bucket then just return:
*/ */
if (timer_pending(timer)) { if (timer_pending(timer)) {
/*
* The downside of this optimization is that it can result in
* larger granularity than you would get from adding a new
* timer with this expiry.
*/
if (timer->expires == expires) if (timer->expires == expires)
return 1; return 1;
@ -948,6 +960,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
* dequeue/enqueue dance. * dequeue/enqueue dance.
*/ */
base = lock_timer_base(timer, &flags); base = lock_timer_base(timer, &flags);
forward_timer_base(base);
clk = base->clk; clk = base->clk;
idx = calc_wheel_index(expires, clk); idx = calc_wheel_index(expires, clk);
@ -964,6 +977,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
} }
} else { } else {
base = lock_timer_base(timer, &flags); base = lock_timer_base(timer, &flags);
forward_timer_base(base);
} }
ret = detach_if_pending(timer, base, false); ret = detach_if_pending(timer, base, false);
@ -991,12 +1005,10 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
raw_spin_lock(&base->lock); raw_spin_lock(&base->lock);
WRITE_ONCE(timer->flags, WRITE_ONCE(timer->flags,
(timer->flags & ~TIMER_BASEMASK) | base->cpu); (timer->flags & ~TIMER_BASEMASK) | base->cpu);
forward_timer_base(base);
} }
} }
/* Try to forward a stale timer base clock */
forward_timer_base(base);
timer->expires = expires; timer->expires = expires;
/* /*
* If 'idx' was calculated above and the base time did not advance * If 'idx' was calculated above and the base time did not advance
@ -1112,6 +1124,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
WRITE_ONCE(timer->flags, WRITE_ONCE(timer->flags,
(timer->flags & ~TIMER_BASEMASK) | cpu); (timer->flags & ~TIMER_BASEMASK) | cpu);
} }
forward_timer_base(base);
debug_activate(timer, timer->expires); debug_activate(timer, timer->expires);
internal_add_timer(base, timer); internal_add_timer(base, timer);
@ -1497,10 +1510,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
if (!is_max_delta) if (!is_max_delta)
expires = basem + (u64)(nextevt - basej) * TICK_NSEC; expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
/* /*
* If we expect to sleep more than a tick, mark the base idle: * If we expect to sleep more than a tick, mark the base idle.
* Also the tick is stopped so any added timer must forward
* the base clk itself to keep granularity small. This idle
* logic is only maintained for the BASE_STD base, deferrable
* timers may still see large granularity skew (by design).
*/ */
if ((expires - basem) > TICK_NSEC) if ((expires - basem) > TICK_NSEC) {
base->must_forward_clk = true;
base->is_idle = true; base->is_idle = true;
}
} }
raw_spin_unlock(&base->lock); raw_spin_unlock(&base->lock);
@ -1611,6 +1630,19 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
{ {
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
/*
* must_forward_clk must be cleared before running timers so that any
* timer functions that call mod_timer will not try to forward the
* base. idle trcking / clock forwarding logic is only used with
* BASE_STD timers.
*
* The deferrable base does not do idle tracking at all, so we do
* not forward it. This can result in very large variations in
* granularity for deferrable timers, but they can be deferred for
* long periods due to idle.
*/
base->must_forward_clk = false;
__run_timers(base); __run_timers(base);
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));

View file

@ -306,6 +306,7 @@ static void
perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *pt_regs) struct ftrace_ops *ops, struct pt_regs *pt_regs)
{ {
struct perf_event *event;
struct ftrace_entry *entry; struct ftrace_entry *entry;
struct hlist_head *head; struct hlist_head *head;
struct pt_regs regs; struct pt_regs regs;
@ -329,8 +330,9 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
entry->ip = ip; entry->ip = ip;
entry->parent_ip = parent_ip; entry->parent_ip = parent_ip;
event = container_of(ops, struct perf_event, ftrace_ops);
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN, perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
1, &regs, head, NULL); 1, &regs, head, NULL, event);
#undef ENTRY_SIZE #undef ENTRY_SIZE
} }

View file

@ -1200,7 +1200,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
memset(&entry[1], 0, dsize); memset(&entry[1], 0, dsize);
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
head, NULL); head, NULL, NULL);
} }
NOKPROBE_SYMBOL(kprobe_perf_func); NOKPROBE_SYMBOL(kprobe_perf_func);
@ -1236,7 +1236,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
entry->ret_ip = (unsigned long)ri->ret_addr; entry->ret_ip = (unsigned long)ri->ret_addr;
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
head, NULL); head, NULL, NULL);
} }
NOKPROBE_SYMBOL(kretprobe_perf_func); NOKPROBE_SYMBOL(kretprobe_perf_func);
#endif /* CONFIG_PERF_EVENTS */ #endif /* CONFIG_PERF_EVENTS */

Some files were not shown because too many files have changed in this diff Show more