USB-serial fixes for v4.4-rc8

Here's another device id for cp210x.
 
 Signed-off-by: Johan Hovold <johan@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQIcBAABCAAGBQJWhQCUAAoJEEEN5E/e4bSVNZAQAI1guXglsP9KHpcMBlxHw6j+
 QjCeX07sbArIsJqDFITzSrs9WJbVyXPvOTLtmuuSIeV2qhFPZeRuxztjTng7Q+xK
 Ym5ioyGVbGv7UqzRGI2wX+bNNSS8dW3pFnw78xfZ78kbNM6XVKNI+qecUUE7swqb
 BNPeIc/vMtiJpvrKdhCQsel48vlGcCQJFV0ELj7serOi1dO7xrCSCcu0Afj04qbQ
 I4ZAukYih89vVzGo4rC6u1SqnKr9GVUd56/FEbIRLc9Sq3dUkwmFwKjiqquoL8vb
 g3a/rNgCOPYUAcuz3NIr1anYLlW9pwcWKzfl7brz0hxCT1cfcbSYGsGKa4coKqXd
 tJoQ2W5AJZ07NaK/YEZClWv6/qQr203mUhs0KRH6fQDk7Gz6h0HGZljY9utru0GT
 KyDuCVKomOTVUqUH7pOKcY7WHss9P/AD6+Gw7cf8dArhUJ3lReOu54wZ44FvUl8c
 5TPl8hht0LUb9FNWGlq8LvoLwuMx75vUCRVm5doF+rnH2FZAOCORb1HDi3LG7Pe3
 cg224WNyqxKIIAC7BkzYK/y9qeU6RtkbjFYv43B2R9Gdn5iNbFkqIxZu7LIcR7ph
 cfijHzXvwLdl2ucFlE8xNrs1doN0IICulc1kjwdKegGLv6pInn5iRx8ZEFJi/8Qh
 Z0CB5oVOj0nTHHUW959q
 =jeCr
 -----END PGP SIGNATURE-----

Merge tag 'usb-serial-4.4-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/johan/usb-serial into usb-next

Johan writes:

USB-serial fixes for v4.4-rc8

Here's another device id for cp210x.

Signed-off-by: Johan Hovold <johan@kernel.org>
This commit is contained in:
Greg Kroah-Hartman 2016-01-03 15:19:12 -08:00
commit 48346892ff
92 changed files with 980 additions and 431 deletions

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Blurry Fish Butt
# *DOCUMENTATION*

View File

@ -81,7 +81,7 @@ endif
LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
# Modules with short calls might break for calls into builtin-kernel
KBUILD_CFLAGS_MODULE += -mlong-calls
KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
# Finally dump eveything into kernel build system
KBUILD_CFLAGS += $(cflags-y)

View File

@ -62,9 +62,7 @@ extern int ioc_exists;
#define ARC_REG_IC_IVIC 0x10
#define ARC_REG_IC_CTRL 0x11
#define ARC_REG_IC_IVIL 0x19
#if defined(CONFIG_ARC_MMU_V3) || defined(CONFIG_ARC_MMU_V4)
#define ARC_REG_IC_PTAG 0x1E
#endif
#define ARC_REG_IC_PTAG_HI 0x1F
/* Bit val in IC_CTRL */

View File

@ -293,13 +293,13 @@ static void init_unwind_hdr(struct unwind_table *table,
const u32 *cie = cie_for_fde(fde, table);
signed ptrType;
if (cie == &not_fde) /* only process FDE here */
if (cie == &not_fde)
continue;
if (cie == NULL || cie == &bad_cie)
continue; /* say FDE->CIE.version != 1 */
goto ret_err;
ptrType = fde_pointer_type(cie);
if (ptrType < 0)
continue;
goto ret_err;
ptr = (const u8 *)(fde + 2);
if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
@ -315,14 +315,14 @@ static void init_unwind_hdr(struct unwind_table *table,
}
if (tableSize || !n)
return;
goto ret_err;
hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
+ 2 * n * sizeof(unsigned long);
header = alloc(hdrSize);
if (!header)
return;
goto ret_err;
header->version = 1;
header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
@ -343,10 +343,6 @@ static void init_unwind_hdr(struct unwind_table *table,
if (fde[1] == 0xffffffff)
continue; /* this is a CIE */
if (*(u8 *)(cie + 2) != 1)
continue; /* FDE->CIE.version not supported */
ptr = (const u8 *)(fde + 2);
header->table[n].start = read_pointer(&ptr,
(const u8 *)(fde + 1) +
@ -365,6 +361,10 @@ static void init_unwind_hdr(struct unwind_table *table,
table->hdrsz = hdrSize;
smp_wmb();
table->header = (const void *)header;
return;
ret_err:
panic("Attention !!! Dwarf FDE parsing errors\n");;
}
#ifdef CONFIG_MODULES
@ -523,8 +523,7 @@ static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
|| (*cie & (sizeof(*cie) - 1))
|| (cie[1] != 0xffffffff)
|| ( *(u8 *)(cie + 2) != 1)) /* version 1 supported */
|| (cie[1] != 0xffffffff))
return NULL; /* this is not a (valid) CIE */
return cie;
}
@ -605,9 +604,6 @@ static signed fde_pointer_type(const u32 *cie)
const u8 *ptr = (const u8 *)(cie + 2);
unsigned version = *ptr;
if (version != 1)
return -1; /* unsupported */
if (*++ptr) {
const char *aug;
const u8 *end = (const u8 *)(cie + 1) + *cie;
@ -1019,9 +1015,7 @@ int arc_unwind(struct unwind_frame_info *frame)
ptr = (const u8 *)(cie + 2);
end = (const u8 *)(cie + 1) + *cie;
frame->call_frame = 1;
if ((state.version = *ptr) != 1)
cie = NULL; /* unsupported version */
else if (*++ptr) {
if (*++ptr) {
/* check if augmentation size is first (thus present) */
if (*ptr == 'z') {
while (++ptr < end && *ptr) {

View File

@ -111,7 +111,7 @@ void __kunmap_atomic(void *kv)
}
EXPORT_SYMBOL(__kunmap_atomic);
noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr)
static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
{
pgd_t *pgd_k;
pud_t *pud_k;
@ -127,7 +127,7 @@ noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr)
return pte_k;
}
void kmap_init(void)
void __init kmap_init(void)
{
/* Due to recursive include hell, we can't do this in processor.h */
BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));

View File

@ -154,7 +154,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>;
status = "okay";
};

View File

@ -94,7 +94,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
status = "okay";
};

View File

@ -154,7 +154,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
status = "okay";
};

View File

@ -155,7 +155,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
status = "okay";
};

View File

@ -145,7 +145,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
status = "okay";
};

View File

@ -113,14 +113,14 @@
&clks {
assigned-clocks = <&clks IMX6QDL_PLL4_BYPASS_SRC>,
<&clks IMX6QDL_PLL4_BYPASS>,
<&clks IMX6QDL_CLK_PLL4_POST_DIV>,
<&clks IMX6QDL_CLK_LDB_DI0_SEL>,
<&clks IMX6QDL_CLK_LDB_DI1_SEL>;
<&clks IMX6QDL_CLK_LDB_DI1_SEL>,
<&clks IMX6QDL_CLK_PLL4_POST_DIV>;
assigned-clock-parents = <&clks IMX6QDL_CLK_LVDS2_IN>,
<&clks IMX6QDL_PLL4_BYPASS_SRC>,
<&clks IMX6QDL_CLK_PLL3_USB_OTG>,
<&clks IMX6QDL_CLK_PLL3_USB_OTG>;
assigned-clock-rates = <0>, <0>, <24576000>;
assigned-clock-rates = <0>, <0>, <0>, <0>, <24576000>;
};
&ecspi1 {

View File

@ -189,3 +189,7 @@
};
};
&uart3 {
interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
&omap4_pmx_core OMAP4_UART3_RX>;
};

View File

@ -83,6 +83,7 @@
reg = <0x5d>;
interrupt-parent = <&pio>;
interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>; /* PA3 */
touchscreen-swapped-x-y;
};
};

View File

@ -399,7 +399,7 @@
/* CPU DFLL clock */
clock@0,70110000 {
status = "okay";
status = "disabled";
vdd-cpu-supply = <&vdd_cpu>;
nvidia,i2c-fs-rate = <400000>;
};

View File

@ -65,6 +65,8 @@ config SOC_AM43XX
select MACH_OMAP_GENERIC
select MIGHT_HAVE_CACHE_L2X0
select HAVE_ARM_SCU
select GENERIC_CLOCKEVENTS_BROADCAST
select HAVE_ARM_TWD
config SOC_DRA7XX
bool "TI DRA7XX"

View File

@ -320,6 +320,12 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
return r;
}
#if !defined(CONFIG_SMP) && defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
void tick_broadcast(const struct cpumask *mask)
{
}
#endif
static void __init omap2_gp_clockevent_init(int gptimer_id,
const char *fck_source,
const char *property)

View File

@ -599,7 +599,7 @@ extern void __put_user_unknown(void);
* On error, the variable @x is set to zero.
*/
#define __get_user_unaligned(x,ptr) \
__get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
__get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
/*
* Yuck. We need two variants, one for 64bit operation and one
@ -620,8 +620,8 @@ extern void __get_user_unaligned_unknown(void);
do { \
switch (size) { \
case 1: __get_data_asm(val, "lb", ptr); break; \
case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
case 2: __get_data_unaligned_asm(val, "ulh", ptr); break; \
case 4: __get_data_unaligned_asm(val, "ulw", ptr); break; \
case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
default: __get_user_unaligned_unknown(); break; \
} \
@ -1122,9 +1122,15 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
if (eva_kernel_access()) { \
__cu_len = __invoke_copy_from_kernel(__cu_to, \
__cu_from, \
__cu_len); \
} else { \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
} \
__cu_len; \
})
@ -1229,16 +1235,28 @@ __clear_user(void __user *addr, __kernel_size_t size)
{
__kernel_size_t res;
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, $0\n\t"
"move\t$6, %2\n\t"
__MODULE_JAL(__bzero)
"move\t%0, $6"
: "=r" (res)
: "r" (addr), "r" (size)
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
if (eva_kernel_access()) {
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, $0\n\t"
"move\t$6, %2\n\t"
__MODULE_JAL(__bzero_kernel)
"move\t%0, $6"
: "=r" (res)
: "r" (addr), "r" (size)
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
} else {
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, $0\n\t"
"move\t$6, %2\n\t"
__MODULE_JAL(__bzero)
"move\t%0, $6"
: "=r" (res)
: "r" (addr), "r" (size)
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
}
return res;
}
@ -1384,7 +1402,7 @@ static inline long strlen_user(const char __user *s)
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
__MODULE_JAL(__strlen_kernel_asm)
__MODULE_JAL(__strlen_user_asm)
"move\t%0, $2"
: "=r" (res)
: "r" (s)

View File

@ -257,7 +257,6 @@ LEAF(mips_cps_core_init)
has_mt t0, 3f
.set push
.set mips64r2
.set mt
/* Only allow 1 TC per VPE to execute... */
@ -376,7 +375,6 @@ LEAF(mips_cps_boot_vpes)
nop
.set push
.set mips64r2
.set mt
1: /* Enter VPE configuration state */

View File

@ -17,6 +17,7 @@
#include <asm/fpu.h>
#include <asm/msa.h>
extern void *__bzero_kernel(void *__s, size_t __count);
extern void *__bzero(void *__s, size_t __count);
extern long __strncpy_from_kernel_nocheck_asm(char *__to,
const char *__from, long __len);
@ -64,6 +65,7 @@ EXPORT_SYMBOL(__copy_from_user_eva);
EXPORT_SYMBOL(__copy_in_user_eva);
EXPORT_SYMBOL(__copy_to_user_eva);
EXPORT_SYMBOL(__copy_user_inatomic_eva);
EXPORT_SYMBOL(__bzero_kernel);
#endif
EXPORT_SYMBOL(__bzero);
EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);

View File

@ -283,6 +283,8 @@ LEAF(memset)
1:
#ifndef CONFIG_EVA
FEXPORT(__bzero)
#else
FEXPORT(__bzero_kernel)
#endif
__BUILD_BZERO LEGACY_MODE

View File

@ -221,7 +221,6 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static int rt288x_pci_probe(struct platform_device *pdev)
{
void __iomem *io_map_base;
int i;
rt2880_pci_base = ioremap_nocache(RT2880_PCI_BASE, PAGE_SIZE);

View File

@ -39,7 +39,6 @@ extern void msp_serial_setup(void);
void msp7120_reset(void)
{
void *start, *end, *iptr;
register int i;
/* Diasble all interrupts */
local_irq_disable();

View File

@ -26,7 +26,7 @@ static inline void kb_wait(void)
/* XXX This ends up at the ARC firmware prompt ... */
void sni_machine_restart(char *command)
{
int i, j;
int i;
/* This does a normal via the keyboard controller like a PC.
We can do that easier ... */

View File

@ -26,8 +26,8 @@ aflags-vdso := $(ccflags-vdso) \
# the comments on that file.
#
ifndef CONFIG_CPU_MIPSR6
ifeq ($(call ld-ifversion, -gt, 22400000, y),)
$(warning MIPS VDSO requires binutils > 2.24)
ifeq ($(call ld-ifversion, -lt, 22500000, y),)
$(warning MIPS VDSO requires binutils >= 2.25)
obj-vdso-y := $(filter-out gettimeofday.o, $(obj-vdso-y))
ccflags-vdso += -DDISABLE_MIPS_VDSO
endif

View File

@ -435,6 +435,55 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs, int in_syscall)
regs->gr[28]);
}
/*
* Check how the syscall number gets loaded into %r20 within
* the delay branch in userspace and adjust as needed.
*/
static void check_syscallno_in_delay_branch(struct pt_regs *regs)
{
u32 opcode, source_reg;
u32 __user *uaddr;
int err;
/* Usually we don't have to restore %r20 (the system call number)
* because it gets loaded in the delay slot of the branch external
* instruction via the ldi instruction.
* In some cases a register-to-register copy instruction might have
* been used instead, in which case we need to copy the syscall
* number into the source register before returning to userspace.
*/
/* A syscall is just a branch, so all we have to do is fiddle the
* return pointer so that the ble instruction gets executed again.
*/
regs->gr[31] -= 8; /* delayed branching */
/* Get assembler opcode of code in delay branch */
uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4);
err = get_user(opcode, uaddr);
if (err)
return;
/* Check if delay branch uses "ldi int,%r20" */
if ((opcode & 0xffff0000) == 0x34140000)
return; /* everything ok, just return */
/* Check if delay branch uses "nop" */
if (opcode == INSN_NOP)
return;
/* Check if delay branch uses "copy %rX,%r20" */
if ((opcode & 0xffe0ffff) == 0x08000254) {
source_reg = (opcode >> 16) & 31;
regs->gr[source_reg] = regs->gr[20];
return;
}
pr_warn("syscall restart: %s (pid %d): unexpected opcode 0x%08x\n",
current->comm, task_pid_nr(current), opcode);
}
static inline void
syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
{
@ -457,10 +506,7 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
}
/* fallthrough */
case -ERESTARTNOINTR:
/* A syscall is just a branch, so all
* we have to do is fiddle the return pointer.
*/
regs->gr[31] -= 8; /* delayed branching */
check_syscallno_in_delay_branch(regs);
break;
}
}
@ -510,15 +556,9 @@ insert_restart_trampoline(struct pt_regs *regs)
}
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR: {
/* Hooray for delayed branching. We don't
* have to restore %r20 (the system call
* number) because it gets loaded in the delay
* slot of the branch external instruction.
*/
regs->gr[31] -= 8;
case -ERESTARTNOINTR:
check_syscallno_in_delay_branch(regs);
return;
}
default:
break;
}

View File

@ -224,6 +224,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
{
/*
* Check for illegal transactional state bit combination
* and if we find it, force the TS field to a safe state.
*/
if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
msr &= ~MSR_TS_MASK;
vcpu->arch.shregs.msr = msr;
kvmppc_end_cede(vcpu);
}

View File

@ -1920,16 +1920,23 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
}
if (separator)
ptr += sprintf(ptr, "%c", separator);
/*
* Use four '%' characters below because of the
* following two conversions:
*
* 1) sprintf: %%%%r -> %%r
* 2) printk : %%r -> %r
*/
if (operand->flags & OPERAND_GPR)
ptr += sprintf(ptr, "%%r%i", value);
ptr += sprintf(ptr, "%%%%r%i", value);
else if (operand->flags & OPERAND_FPR)
ptr += sprintf(ptr, "%%f%i", value);
ptr += sprintf(ptr, "%%%%f%i", value);
else if (operand->flags & OPERAND_AR)
ptr += sprintf(ptr, "%%a%i", value);
ptr += sprintf(ptr, "%%%%a%i", value);
else if (operand->flags & OPERAND_CR)
ptr += sprintf(ptr, "%%c%i", value);
ptr += sprintf(ptr, "%%%%c%i", value);
else if (operand->flags & OPERAND_VR)
ptr += sprintf(ptr, "%%v%i", value);
ptr += sprintf(ptr, "%%%%v%i", value);
else if (operand->flags & OPERAND_PCREL)
ptr += sprintf(ptr, "%lx", (signed int) value
+ addr);

View File

@ -95,6 +95,7 @@
* really available. So we simply advertise only "crypto" support.
*/
#define HWCAP_SPARC_CRYPTO 0x04000000 /* CRYPTO insns available */
#define HWCAP_SPARC_ADI 0x08000000 /* ADI available */
#define CORE_DUMP_USE_REGSET

View File

@ -417,8 +417,9 @@
#define __NR_bpf 349
#define __NR_execveat 350
#define __NR_membarrier 351
#define __NR_userfaultfd 352
#define NR_syscalls 352
#define NR_syscalls 353
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001

View File

@ -946,6 +946,12 @@ ENTRY(__retl_one)
mov 1, %o0
ENDPROC(__retl_one)
ENTRY(__retl_one_fp)
VISExitHalf
retl
mov 1, %o0
ENDPROC(__retl_one_fp)
ENTRY(__ret_one_asi)
wr %g0, ASI_AIUS, %asi
ret
@ -958,6 +964,13 @@ ENTRY(__retl_one_asi)
mov 1, %o0
ENDPROC(__retl_one_asi)
ENTRY(__retl_one_asi_fp)
wr %g0, ASI_AIUS, %asi
VISExitHalf
retl
mov 1, %o0
ENDPROC(__retl_one_asi_fp)
ENTRY(__retl_o1)
retl
mov %o1, %o0

View File

@ -1828,11 +1828,18 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
u64 saved_fault_address = current_thread_info()->fault_address;
u8 saved_fault_code = get_thread_fault_code();
mm_segment_t old_fs;
perf_callchain_store(entry, regs->tpc);
if (!current->mm)
return;
old_fs = get_fs();
set_fs(USER_DS);
flushw_user();
pagefault_disable();
@ -1843,4 +1850,8 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
perf_callchain_user_64(entry, regs);
pagefault_enable();
set_fs(old_fs);
set_thread_fault_code(saved_fault_code);
current_thread_info()->fault_address = saved_fault_address;
}

View File

@ -73,7 +73,13 @@ rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
andn %l1, %l4, %l1
srl %l4, 20, %l4
ba,pt %xcc, rtrap_no_irq_enable
wrpr %l4, %pil
nop
/* Do not actually set the %pil here. We will do that
* below after we clear PSTATE_IE in the %pstate register.
* If we re-enable interrupts here, we can recurse down
* the hardirq stack potentially endlessly, causing a
* stack overflow.
*/
.align 64
.globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall

View File

@ -380,7 +380,8 @@ static const char *hwcaps[] = {
*/
"mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
"ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
"ima", "cspare", "pause", "cbcond",
"ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */,
"adp",
};
static const char *crypto_hwcaps[] = {
@ -396,7 +397,7 @@ void cpucap_info(struct seq_file *m)
seq_puts(m, "cpucaps\t\t: ");
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
unsigned long bit = 1UL << i;
if (caps & bit) {
if (hwcaps[i] && (caps & bit)) {
seq_printf(m, "%s%s",
printed ? "," : "", hwcaps[i]);
printed++;
@ -450,7 +451,7 @@ static void __init report_hwcaps(unsigned long caps)
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
unsigned long bit = 1UL << i;
if (caps & bit)
if (hwcaps[i] && (caps & bit))
report_one_hwcap(&printed, hwcaps[i]);
}
if (caps & HWCAP_SPARC_CRYPTO)
@ -485,7 +486,7 @@ static unsigned long __init mdesc_cpu_hwcap_list(void)
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
unsigned long bit = 1UL << i;
if (!strcmp(prop, hwcaps[i])) {
if (hwcaps[i] && !strcmp(prop, hwcaps[i])) {
caps |= bit;
break;
}

View File

@ -87,4 +87,4 @@ sys_call_table:
/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .long sys_execveat, sys_membarrier
/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd

View File

@ -88,7 +88,7 @@ sys_call_table32:
.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys32_execveat, sys_membarrier
/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd
#endif /* CONFIG_COMPAT */
@ -168,4 +168,4 @@ sys_call_table:
.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys64_execveat, sys_membarrier
/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd

View File

@ -11,6 +11,14 @@
.text; \
.align 4;
#define EX_LD_FP(x) \
98: x; \
.section __ex_table,"a";\
.align 4; \
.word 98b, __retl_one_asi_fp;\
.text; \
.align 4;
#ifndef ASI_AIUS
#define ASI_AIUS 0x11
#endif

View File

@ -11,6 +11,14 @@
.text; \
.align 4;
#define EX_ST_FP(x) \
98: x; \
.section __ex_table,"a";\
.align 4; \
.word 98b, __retl_one_asi_fp;\
.text; \
.align 4;
#ifndef ASI_AIUS
#define ASI_AIUS 0x11
#endif

View File

@ -34,10 +34,16 @@
#ifndef EX_LD
#define EX_LD(x) x
#endif
#ifndef EX_LD_FP
#define EX_LD_FP(x) x
#endif
#ifndef EX_ST
#define EX_ST(x) x
#endif
#ifndef EX_ST_FP
#define EX_ST_FP(x) x
#endif
#ifndef EX_RETVAL
#define EX_RETVAL(x) x
@ -134,40 +140,40 @@
fsrc2 %x6, %f12; \
fsrc2 %x7, %f14;
#define FREG_LOAD_1(base, x0) \
EX_LD(LOAD(ldd, base + 0x00, %x0))
EX_LD_FP(LOAD(ldd, base + 0x00, %x0))
#define FREG_LOAD_2(base, x0, x1) \
EX_LD(LOAD(ldd, base + 0x00, %x0)); \
EX_LD(LOAD(ldd, base + 0x08, %x1));
EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
EX_LD_FP(LOAD(ldd, base + 0x08, %x1));
#define FREG_LOAD_3(base, x0, x1, x2) \
EX_LD(LOAD(ldd, base + 0x00, %x0)); \
EX_LD(LOAD(ldd, base + 0x08, %x1)); \
EX_LD(LOAD(ldd, base + 0x10, %x2));
EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
EX_LD_FP(LOAD(ldd, base + 0x10, %x2));
#define FREG_LOAD_4(base, x0, x1, x2, x3) \
EX_LD(LOAD(ldd, base + 0x00, %x0)); \
EX_LD(LOAD(ldd, base + 0x08, %x1)); \
EX_LD(LOAD(ldd, base + 0x10, %x2)); \
EX_LD(LOAD(ldd, base + 0x18, %x3));
EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
EX_LD_FP(LOAD(ldd, base + 0x18, %x3));
#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \
EX_LD(LOAD(ldd, base + 0x00, %x0)); \
EX_LD(LOAD(ldd, base + 0x08, %x1)); \
EX_LD(LOAD(ldd, base + 0x10, %x2)); \
EX_LD(LOAD(ldd, base + 0x18, %x3)); \
EX_LD(LOAD(ldd, base + 0x20, %x4));
EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
EX_LD_FP(LOAD(ldd, base + 0x20, %x4));
#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \
EX_LD(LOAD(ldd, base + 0x00, %x0)); \
EX_LD(LOAD(ldd, base + 0x08, %x1)); \
EX_LD(LOAD(ldd, base + 0x10, %x2)); \
EX_LD(LOAD(ldd, base + 0x18, %x3)); \
EX_LD(LOAD(ldd, base + 0x20, %x4)); \
EX_LD(LOAD(ldd, base + 0x28, %x5));
EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \
EX_LD_FP(LOAD(ldd, base + 0x28, %x5));
#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \
EX_LD(LOAD(ldd, base + 0x00, %x0)); \
EX_LD(LOAD(ldd, base + 0x08, %x1)); \
EX_LD(LOAD(ldd, base + 0x10, %x2)); \
EX_LD(LOAD(ldd, base + 0x18, %x3)); \
EX_LD(LOAD(ldd, base + 0x20, %x4)); \
EX_LD(LOAD(ldd, base + 0x28, %x5)); \
EX_LD(LOAD(ldd, base + 0x30, %x6));
EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \
EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); \
EX_LD_FP(LOAD(ldd, base + 0x30, %x6));
.register %g2,#scratch
.register %g3,#scratch
@ -275,11 +281,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
nop
/* fall through for 0 < low bits < 8 */
110: sub %o4, 64, %g2
EX_LD(LOAD_BLK(%g2, %f0))
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
EX_LD(LOAD_BLK(%o4, %f16))
EX_LD_FP(LOAD_BLK(%g2, %f0))
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
EX_LD_FP(LOAD_BLK(%o4, %f16))
FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16)
EX_ST(STORE_BLK(%f0, %o4 + %g3))
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@ -290,10 +296,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
120: sub %o4, 56, %g2
FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12)
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
EX_LD(LOAD_BLK(%o4, %f16))
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
EX_LD_FP(LOAD_BLK(%o4, %f16))
FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18)
EX_ST(STORE_BLK(%f0, %o4 + %g3))
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@ -304,10 +310,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
130: sub %o4, 48, %g2
FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10)
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
EX_LD(LOAD_BLK(%o4, %f16))
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
EX_LD_FP(LOAD_BLK(%o4, %f16))
FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20)
EX_ST(STORE_BLK(%f0, %o4 + %g3))
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
FREG_MOVE_6(f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@ -318,10 +324,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
140: sub %o4, 40, %g2
FREG_LOAD_5(%g2, f0, f2, f4, f6, f8)
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
EX_LD(LOAD_BLK(%o4, %f16))
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
EX_LD_FP(LOAD_BLK(%o4, %f16))
FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22)
EX_ST(STORE_BLK(%f0, %o4 + %g3))
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
FREG_MOVE_5(f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@ -332,10 +338,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
150: sub %o4, 32, %g2
FREG_LOAD_4(%g2, f0, f2, f4, f6)
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
EX_LD(LOAD_BLK(%o4, %f16))
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
EX_LD_FP(LOAD_BLK(%o4, %f16))
FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24)
EX_ST(STORE_BLK(%f0, %o4 + %g3))
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
FREG_MOVE_4(f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@ -346,10 +352,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
160: sub %o4, 24, %g2
FREG_LOAD_3(%g2, f0, f2, f4)
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
EX_LD(LOAD_BLK(%o4, %f16))
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
EX_LD_FP(LOAD_BLK(%o4, %f16))
FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26)
EX_ST(STORE_BLK(%f0, %o4 + %g3))
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
FREG_MOVE_3(f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@ -360,10 +366,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
170: sub %o4, 16, %g2
FREG_LOAD_2(%g2, f0, f2)
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
EX_LD(LOAD_BLK(%o4, %f16))
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
EX_LD_FP(LOAD_BLK(%o4, %f16))
FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28)
EX_ST(STORE_BLK(%f0, %o4 + %g3))
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
FREG_MOVE_2(f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@ -374,10 +380,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
180: sub %o4, 8, %g2
FREG_LOAD_1(%g2, f0)
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
EX_LD(LOAD_BLK(%o4, %f16))
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
EX_LD_FP(LOAD_BLK(%o4, %f16))
FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30)
EX_ST(STORE_BLK(%f0, %o4 + %g3))
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
FREG_MOVE_1(f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
@ -387,10 +393,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
nop
190:
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
subcc %g1, 64, %g1
EX_LD(LOAD_BLK(%o4, %f0))
EX_ST(STORE_BLK(%f0, %o4 + %g3))
EX_LD_FP(LOAD_BLK(%o4, %f0))
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
add %o4, 64, %o4
bne,pt %xcc, 1b
LOAD(prefetch, %o4 + 64, #one_read)

View File

@ -11,6 +11,14 @@
.text; \
.align 4;
#define EX_LD_FP(x) \
98: x; \
.section __ex_table,"a";\
.align 4; \
.word 98b, __retl_one_asi_fp;\
.text; \
.align 4;
#ifndef ASI_AIUS
#define ASI_AIUS 0x11
#endif

View File

@ -11,6 +11,14 @@
.text; \
.align 4;
#define EX_ST_FP(x) \
98: x; \
.section __ex_table,"a";\
.align 4; \
.word 98b, __retl_one_asi_fp;\
.text; \
.align 4;
#ifndef ASI_AIUS
#define ASI_AIUS 0x11
#endif

View File

@ -48,10 +48,16 @@
#ifndef EX_LD
#define EX_LD(x) x
#endif
#ifndef EX_LD_FP
#define EX_LD_FP(x) x
#endif
#ifndef EX_ST
#define EX_ST(x) x
#endif
#ifndef EX_ST_FP
#define EX_ST_FP(x) x
#endif
#ifndef EX_RETVAL
#define EX_RETVAL(x) x
@ -210,17 +216,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %o4, %o2
alignaddr %o1, %g0, %g1
add %o1, %o4, %o1
EX_LD(LOAD(ldd, %g1 + 0x00, %f0))
1: EX_LD(LOAD(ldd, %g1 + 0x08, %f2))
EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0))
1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2))
subcc %o4, 0x40, %o4
EX_LD(LOAD(ldd, %g1 + 0x10, %f4))
EX_LD(LOAD(ldd, %g1 + 0x18, %f6))
EX_LD(LOAD(ldd, %g1 + 0x20, %f8))
EX_LD(LOAD(ldd, %g1 + 0x28, %f10))
EX_LD(LOAD(ldd, %g1 + 0x30, %f12))
EX_LD(LOAD(ldd, %g1 + 0x38, %f14))
EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4))
EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6))
EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8))
EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10))
EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12))
EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14))
faligndata %f0, %f2, %f16
EX_LD(LOAD(ldd, %g1 + 0x40, %f0))
EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0))
faligndata %f2, %f4, %f18
add %g1, 0x40, %g1
faligndata %f4, %f6, %f20
@ -229,14 +235,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
faligndata %f10, %f12, %f26
faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30
EX_ST(STORE(std, %f16, %o0 + 0x00))
EX_ST(STORE(std, %f18, %o0 + 0x08))
EX_ST(STORE(std, %f20, %o0 + 0x10))
EX_ST(STORE(std, %f22, %o0 + 0x18))
EX_ST(STORE(std, %f24, %o0 + 0x20))
EX_ST(STORE(std, %f26, %o0 + 0x28))
EX_ST(STORE(std, %f28, %o0 + 0x30))
EX_ST(STORE(std, %f30, %o0 + 0x38))
EX_ST_FP(STORE(std, %f16, %o0 + 0x00))
EX_ST_FP(STORE(std, %f18, %o0 + 0x08))
EX_ST_FP(STORE(std, %f20, %o0 + 0x10))
EX_ST_FP(STORE(std, %f22, %o0 + 0x18))
EX_ST_FP(STORE(std, %f24, %o0 + 0x20))
EX_ST_FP(STORE(std, %f26, %o0 + 0x28))
EX_ST_FP(STORE(std, %f28, %o0 + 0x30))
EX_ST_FP(STORE(std, %f30, %o0 + 0x38))
add %o0, 0x40, %o0
bne,pt %icc, 1b
LOAD(prefetch, %g1 + 0x200, #n_reads_strong)

View File

@ -11,6 +11,14 @@
.text; \
.align 4;
#define EX_LD_FP(x) \
98: x; \
.section __ex_table,"a";\
.align 4; \
.word 98b, __retl_one_fp;\
.text; \
.align 4;
#define FUNC_NAME ___copy_from_user
#define LOAD(type,addr,dest) type##a [addr] %asi, dest
#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS, dest

View File

@ -11,6 +11,14 @@
.text; \
.align 4;
#define EX_ST_FP(x) \
98: x; \
.section __ex_table,"a";\
.align 4; \
.word 98b, __retl_one_fp;\
.text; \
.align 4;
#define FUNC_NAME ___copy_to_user
#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS

View File

@ -25,10 +25,16 @@
#ifndef EX_LD
#define EX_LD(x) x
#endif
#ifndef EX_LD_FP
#define EX_LD_FP(x) x
#endif
#ifndef EX_ST
#define EX_ST(x) x
#endif
#ifndef EX_ST_FP
#define EX_ST_FP(x) x
#endif
#ifndef EX_RETVAL
#define EX_RETVAL(x) x
@ -73,8 +79,8 @@
faligndata %f8, %f9, %f62;
#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \
EX_LD(LOAD_BLK(%src, %fdest)); \
EX_ST(STORE_BLK(%fsrc, %dest)); \
EX_LD_FP(LOAD_BLK(%src, %fdest)); \
EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
add %src, 0x40, %src; \
subcc %len, 0x40, %len; \
be,pn %xcc, jmptgt; \
@ -89,12 +95,12 @@
#define DO_SYNC membar #Sync;
#define STORE_SYNC(dest, fsrc) \
EX_ST(STORE_BLK(%fsrc, %dest)); \
EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
add %dest, 0x40, %dest; \
DO_SYNC
#define STORE_JUMP(dest, fsrc, target) \
EX_ST(STORE_BLK(%fsrc, %dest)); \
EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
add %dest, 0x40, %dest; \
ba,pt %xcc, target; \
nop;
@ -103,7 +109,7 @@
subcc %left, 8, %left;\
bl,pn %xcc, 95f; \
faligndata %f0, %f1, %f48; \
EX_ST(STORE(std, %f48, %dest)); \
EX_ST_FP(STORE(std, %f48, %dest)); \
add %dest, 8, %dest;
#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
@ -160,8 +166,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
and %g2, 0x38, %g2
1: subcc %g1, 0x1, %g1
EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
EX_ST(STORE(stb, %o3, %o1 + %GLOBAL_SPARE))
EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE))
bgu,pt %XCC, 1b
add %o1, 0x1, %o1
@ -172,20 +178,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
be,pt %icc, 3f
alignaddr %o1, %g0, %o1
EX_LD(LOAD(ldd, %o1, %f4))
1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
EX_LD_FP(LOAD(ldd, %o1, %f4))
1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f4, %f6, %f0
EX_ST(STORE(std, %f0, %o0))
EX_ST_FP(STORE(std, %f0, %o0))
be,pn %icc, 3f
add %o0, 0x8, %o0
EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f6, %f4, %f0
EX_ST(STORE(std, %f0, %o0))
EX_ST_FP(STORE(std, %f0, %o0))
bne,pt %icc, 1b
add %o0, 0x8, %o0
@ -208,13 +214,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
add %g1, %GLOBAL_SPARE, %g1
subcc %o2, %g3, %o2
EX_LD(LOAD_BLK(%o1, %f0))
EX_LD_FP(LOAD_BLK(%o1, %f0))
add %o1, 0x40, %o1
add %g1, %g3, %g1
EX_LD(LOAD_BLK(%o1, %f16))
EX_LD_FP(LOAD_BLK(%o1, %f16))
add %o1, 0x40, %o1
sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
EX_LD(LOAD_BLK(%o1, %f32))
EX_LD_FP(LOAD_BLK(%o1, %f32))
add %o1, 0x40, %o1
/* There are 8 instances of the unrolled loop,
@ -426,28 +432,28 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
62: FINISH_VISCHUNK(o0, f44, f46, g3)
63: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3)
93: EX_LD(LOAD(ldd, %o1, %f2))
93: EX_LD_FP(LOAD(ldd, %o1, %f2))
add %o1, 8, %o1
subcc %g3, 8, %g3
faligndata %f0, %f2, %f8
EX_ST(STORE(std, %f8, %o0))
EX_ST_FP(STORE(std, %f8, %o0))
bl,pn %xcc, 95f
add %o0, 8, %o0
EX_LD(LOAD(ldd, %o1, %f0))
EX_LD_FP(LOAD(ldd, %o1, %f0))
add %o1, 8, %o1
subcc %g3, 8, %g3
faligndata %f2, %f0, %f8
EX_ST(STORE(std, %f8, %o0))
EX_ST_FP(STORE(std, %f8, %o0))
bge,pt %xcc, 93b
add %o0, 8, %o0
95: brz,pt %o2, 2f
mov %g1, %o1
1: EX_LD(LOAD(ldub, %o1, %o3))
1: EX_LD_FP(LOAD(ldub, %o1, %o3))
add %o1, 1, %o1
subcc %o2, 1, %o2
EX_ST(STORE(stb, %o3, %o0))
EX_ST_FP(STORE(stb, %o3, %o0))
bne,pt %xcc, 1b
add %o0, 1, %o0

View File

@ -11,6 +11,14 @@
.text; \
.align 4;
#define EX_LD_FP(x) \
98: x; \
.section __ex_table,"a";\
.align 4; \
.word 98b, __retl_one_fp;\
.text; \
.align 4;
#define FUNC_NAME U3copy_from_user
#define LOAD(type,addr,dest) type##a [addr] %asi, dest
#define EX_RETVAL(x) 0

View File

@ -11,6 +11,14 @@
.text; \
.align 4;
#define EX_ST_FP(x) \
98: x; \
.section __ex_table,"a";\
.align 4; \
.word 98b, __retl_one_fp;\
.text; \
.align 4;
#define FUNC_NAME U3copy_to_user
#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS

View File

@ -24,10 +24,16 @@
#ifndef EX_LD
#define EX_LD(x) x
#endif
#ifndef EX_LD_FP
#define EX_LD_FP(x) x
#endif
#ifndef EX_ST
#define EX_ST(x) x
#endif
#ifndef EX_ST_FP
#define EX_ST_FP(x) x
#endif
#ifndef EX_RETVAL
#define EX_RETVAL(x) x
@ -120,8 +126,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
and %g2, 0x38, %g2
1: subcc %g1, 0x1, %g1
EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
EX_ST(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
bgu,pt %XCC, 1b
add %o1, 0x1, %o1
@ -132,20 +138,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
be,pt %icc, 3f
alignaddr %o1, %g0, %o1
EX_LD(LOAD(ldd, %o1, %f4))
1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
EX_LD_FP(LOAD(ldd, %o1, %f4))
1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f4, %f6, %f0
EX_ST(STORE(std, %f0, %o0))
EX_ST_FP(STORE(std, %f0, %o0))
be,pn %icc, 3f
add %o0, 0x8, %o0
EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f6, %f4, %f2
EX_ST(STORE(std, %f2, %o0))
EX_ST_FP(STORE(std, %f2, %o0))
bne,pt %icc, 1b
add %o0, 0x8, %o0
@ -155,25 +161,25 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
LOAD(prefetch, %o1 + 0x080, #one_read)
LOAD(prefetch, %o1 + 0x0c0, #one_read)
LOAD(prefetch, %o1 + 0x100, #one_read)
EX_LD(LOAD(ldd, %o1 + 0x000, %f0))
EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0))
LOAD(prefetch, %o1 + 0x140, #one_read)
EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
LOAD(prefetch, %o1 + 0x180, #one_read)
EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
LOAD(prefetch, %o1 + 0x1c0, #one_read)
faligndata %f0, %f2, %f16
EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
faligndata %f2, %f4, %f18
EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
faligndata %f4, %f6, %f20
EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
faligndata %f6, %f8, %f22
EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
faligndata %f8, %f10, %f24
EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
faligndata %f10, %f12, %f26
EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE
add %o1, 0x40, %o1
@ -184,26 +190,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
.align 64
1:
EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
faligndata %f12, %f14, %f28
EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
faligndata %f14, %f0, %f30
EX_ST(STORE_BLK(%f16, %o0))
EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
EX_ST_FP(STORE_BLK(%f16, %o0))
EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
faligndata %f0, %f2, %f16
add %o0, 0x40, %o0
EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
faligndata %f2, %f4, %f18
EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
faligndata %f4, %f6, %f20
EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
subcc %o3, 0x01, %o3
faligndata %f6, %f8, %f22
EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
faligndata %f8, %f10, %f24
EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
LOAD(prefetch, %o1 + 0x1c0, #one_read)
faligndata %f10, %f12, %f26
bg,pt %XCC, 1b
@ -211,29 +217,29 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
/* Finally we copy the last full 64-byte block. */
2:
EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
faligndata %f12, %f14, %f28
EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
faligndata %f14, %f0, %f30
EX_ST(STORE_BLK(%f16, %o0))
EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
EX_ST_FP(STORE_BLK(%f16, %o0))
EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
faligndata %f0, %f2, %f16
EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
faligndata %f2, %f4, %f18
EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
faligndata %f4, %f6, %f20
EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
faligndata %f6, %f8, %f22
EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
faligndata %f8, %f10, %f24
cmp %g1, 0
be,pt %XCC, 1f
add %o0, 0x40, %o0
EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
1: faligndata %f10, %f12, %f26
faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30
EX_ST(STORE_BLK(%f16, %o0))
EX_ST_FP(STORE_BLK(%f16, %o0))
add %o0, 0x40, %o0
add %o1, 0x40, %o1
membar #Sync
@ -253,20 +259,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %o2, %g2, %o2
be,a,pt %XCC, 1f
EX_LD(LOAD(ldd, %o1 + 0x00, %f0))
EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0))
1: EX_LD(LOAD(ldd, %o1 + 0x08, %f2))
1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2))
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f0, %f2, %f8
EX_ST(STORE(std, %f8, %o0))
EX_ST_FP(STORE(std, %f8, %o0))
be,pn %XCC, 2f
add %o0, 0x8, %o0
EX_LD(LOAD(ldd, %o1 + 0x08, %f0))
EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0))
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f2, %f0, %f8
EX_ST(STORE(std, %f8, %o0))
EX_ST_FP(STORE(std, %f8, %o0))
bne,pn %XCC, 1b
add %o0, 0x8, %o0

View File

@ -38,6 +38,14 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
}
static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 1, 0);
return best && (best->edx & bit(X86_FEATURE_MTRR));
}
static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;

View File

@ -120,14 +120,22 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
}
static u8 mtrr_disabled_type(void)
static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
{
/*
* Intel SDM 11.11.2.2: all MTRRs are disabled when
* IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
* memory type is applied to all of physical memory.
*
* However, virtual machines can be run with CPUID such that
* there are no MTRRs. In that case, the firmware will never
* enable MTRRs and it is obviously undesirable to run the
* guest entirely with UC memory and we use WB.
*/
return MTRR_TYPE_UNCACHABLE;
if (guest_cpuid_has_mtrr(vcpu))
return MTRR_TYPE_UNCACHABLE;
else
return MTRR_TYPE_WRBACK;
}
/*
@ -267,7 +275,7 @@ static int fixed_mtrr_addr_to_seg(u64 addr)
for (seg = 0; seg < seg_num; seg++) {
mtrr_seg = &fixed_seg_table[seg];
if (mtrr_seg->start >= addr && addr < mtrr_seg->end)
if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
return seg;
}
@ -300,7 +308,6 @@ static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
*start = range->base & PAGE_MASK;
mask = range->mask & PAGE_MASK;
mask |= ~0ULL << boot_cpu_data.x86_phys_bits;
/* This cannot overflow because writing to the reserved bits of
* variable MTRRs causes a #GP.
@ -356,10 +363,14 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
if (var_mtrr_range_is_valid(cur))
list_del(&mtrr_state->var_ranges[index].node);
/* Extend the mask with all 1 bits to the left, since those
* bits must implicitly be 0. The bits are then cleared
* when reading them.
*/
if (!is_mtrr_mask)
cur->base = data;
else
cur->mask = data;
cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
/* add it to the list if it's enabled. */
if (var_mtrr_range_is_valid(cur)) {
@ -426,6 +437,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
*pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
else
*pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
*pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
}
return 0;
@ -670,7 +683,7 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
}
if (iter.mtrr_disabled)
return mtrr_disabled_type();
return mtrr_disabled_type(vcpu);
/* not contained in any MTRRs. */
if (type == -1)

View File

@ -3422,6 +3422,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
struct kvm_run *kvm_run = vcpu->run;
u32 exit_code = svm->vmcb->control.exit_code;
trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
vcpu->arch.cr0 = svm->vmcb->save.cr0;
if (npt_enabled)
@ -3892,8 +3894,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_before_handle_nmi(&svm->vcpu);

View File

@ -2803,7 +2803,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vcpu->arch.ia32_xss;
break;
case MSR_TSC_AUX:
if (!guest_cpuid_has_rdtscp(vcpu))
if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
return 1;
/* Otherwise falls through */
default:
@ -2909,7 +2909,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
break;
case MSR_TSC_AUX:
if (!guest_cpuid_has_rdtscp(vcpu))
if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
return 1;
/* Check reserved bit, higher 32 bits should be zero */
if ((data >> 32) != 0)
@ -8042,6 +8042,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
u32 exit_reason = vmx->exit_reason;
u32 vectoring_info = vmx->idt_vectoring_info;
trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
/*
* Flush logged GPAs PML buffer, this will make dirty_bitmap more
* updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
@ -8668,7 +8670,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->loaded_vmcs->launched = 1;
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
/*
* the KVM_REQ_EVENT optimization bit is only on for one entry, and if

View File

@ -3572,9 +3572,11 @@ static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
int i;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
for (i = 0; i < 3; i++)
kvm_pit_load_count(kvm, i, ps->channels[i].count, 0);
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return 0;
}
@ -3593,6 +3595,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{
int start = 0;
int i;
u32 prev_legacy, cur_legacy;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
@ -3602,7 +3605,8 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
sizeof(kvm->arch.vpit->pit_state.channels));
kvm->arch.vpit->pit_state.flags = ps->flags;
kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
for (i = 0; i < 3; i++)
kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, start);
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return 0;
}
@ -6515,6 +6519,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (req_immediate_exit)
smp_send_reschedule(vcpu->cpu);
trace_kvm_entry(vcpu->vcpu_id);
wait_lapic_expire(vcpu);
__kvm_guest_enter();
if (unlikely(vcpu->arch.switch_db_regs)) {
@ -6527,8 +6533,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
}
trace_kvm_entry(vcpu->vcpu_id);
wait_lapic_expire(vcpu);
kvm_x86_ops->run(vcpu);
/*

View File

@ -470,7 +470,7 @@ long sys_sigreturn(void)
struct sigcontext __user *sc = &frame->sc;
int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
if (copy_from_user(&set.sig[0], (void *)sc->oldmask, sizeof(set.sig[0])) ||
if (copy_from_user(&set.sig[0], &sc->oldmask, sizeof(set.sig[0])) ||
copy_from_user(&set.sig[1], frame->extramask, sig_size))
goto segfault;

View File

@ -1689,8 +1689,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
struct request *req;
unsigned int request_count = 0;
blk_queue_split(q, &bio, q->bio_split);
/*
* low level driver can indicate that it wants pages above a
* certain limit bounced to low memory (ie for highmem, or even
@ -1698,6 +1696,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
*/
blk_queue_bounce(q, &bio);
blk_queue_split(q, &bio, q->bio_split);
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
bio->bi_error = -EIO;
bio_endio(bio);

View File

@ -200,7 +200,8 @@ static int acpi_pss_perf_init(struct acpi_processor *pr,
goto err_remove_sysfs_thermal;
}
sysfs_remove_link(&pr->cdev->device.kobj, "device");
return 0;
err_remove_sysfs_thermal:
sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
err_thermal_unregister:

View File

@ -219,6 +219,9 @@ static void end_cmd(struct nullb_cmd *cmd)
{
struct request_queue *q = NULL;
if (cmd->rq)
q = cmd->rq->q;
switch (queue_mode) {
case NULL_Q_MQ:
blk_mq_end_request(cmd->rq, 0);
@ -232,9 +235,6 @@ static void end_cmd(struct nullb_cmd *cmd)
goto free_cmd;
}
if (cmd->rq)
q = cmd->rq->q;
/* Restart queue if needed, as we are freeing a tag */
if (q && !q->mq_ops && blk_queue_stopped(q)) {
unsigned long flags;

View File

@ -342,13 +342,13 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
ret = _sunxi_rsb_run_xfer(rsb);
if (ret)
goto out;
goto unlock;
*buf = readl(rsb->regs + RSB_DATA);
unlock:
mutex_unlock(&rsb->lock);
out:
return ret;
}
@ -527,9 +527,9 @@ static int sunxi_rsb_init_device_mode(struct sunxi_rsb *rsb)
*/
static const struct sunxi_rsb_addr_map sunxi_rsb_addr_maps[] = {
{ 0x3e3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */
{ 0x3a3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */
{ 0x745, 0x3a }, /* Secondary PMIC: AXP806, ... */
{ 0xe89, 0x45 }, /* Peripheral IC: AC100, ... */
{ 0xe89, 0x4e }, /* Peripheral IC: AC100, ... */
};
static u8 sunxi_rsb_get_rtaddr(u16 hwaddr)

View File

@ -31,7 +31,7 @@ static struct scpi_ops *scpi_ops;
static struct scpi_dvfs_info *scpi_get_dvfs_info(struct device *cpu_dev)
{
u8 domain = topology_physical_package_id(cpu_dev->id);
int domain = topology_physical_package_id(cpu_dev->id);
if (domain < 0)
return ERR_PTR(-EINVAL);

View File

@ -1264,7 +1264,8 @@ struct amdgpu_cs_parser {
struct ww_acquire_ctx ticket;
/* user fence */
struct amdgpu_user_fence uf;
struct amdgpu_user_fence uf;
struct amdgpu_bo_list_entry uf_entry;
};
struct amdgpu_job {

View File

@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
return 0;
}
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct drm_amdgpu_cs_chunk_fence *fence_data)
{
struct drm_gem_object *gobj;
uint32_t handle;
handle = fence_data->handle;
gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
fence_data->handle);
if (gobj == NULL)
return -EINVAL;
p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf.offset = fence_data->offset;
if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj);
return -EINVAL;
}
p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo);
p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
p->uf_entry.tv.shared = true;
drm_gem_object_unreference_unlocked(gobj);
return 0;
}
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
union drm_amdgpu_cs *cs = data;
@ -207,28 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
case AMDGPU_CHUNK_ID_FENCE:
size = sizeof(struct drm_amdgpu_cs_chunk_fence);
if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) {
uint32_t handle;
struct drm_gem_object *gobj;
struct drm_amdgpu_cs_chunk_fence *fence_data;
fence_data = (void *)p->chunks[i].kdata;
handle = fence_data->handle;
gobj = drm_gem_object_lookup(p->adev->ddev,
p->filp, handle);
if (gobj == NULL) {
ret = -EINVAL;
goto free_partial_kdata;
}
p->uf.bo = gem_to_amdgpu_bo(gobj);
amdgpu_bo_ref(p->uf.bo);
drm_gem_object_unreference_unlocked(gobj);
p->uf.offset = fence_data->offset;
} else {
if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
ret = -EINVAL;
goto free_partial_kdata;
}
ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata);
if (ret)
goto free_partial_kdata;
break;
case AMDGPU_CHUNK_ID_DEPENDENCIES:
@ -391,6 +409,9 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
&p->validated);
if (p->uf.bo)
list_add(&p->uf_entry.tv.head, &p->validated);
if (need_mmap_lock)
down_read(&current->mm->mmap_sem);
@ -488,8 +509,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
for (i = 0; i < parser->num_ibs; i++)
amdgpu_ib_free(parser->adev, &parser->ibs[i]);
kfree(parser->ibs);
if (parser->uf.bo)
amdgpu_bo_unref(&parser->uf.bo);
amdgpu_bo_unref(&parser->uf.bo);
amdgpu_bo_unref(&parser->uf_entry.robj);
}
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,

View File

@ -55,6 +55,9 @@ static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
if (!state->enable)
return 0;
if (exynos_crtc->ops->atomic_check)
return exynos_crtc->ops->atomic_check(exynos_crtc, state);

View File

@ -2193,8 +2193,17 @@ struct drm_i915_gem_request {
struct drm_i915_private *i915;
struct intel_engine_cs *ring;
/** GEM sequence number associated with this request. */
uint32_t seqno;
/** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing
* this request.
*/
u32 previous_seqno;
/** GEM sequence number associated with this request,
* when the HWS breadcrumb is equal or greater than this the GPU
* has finished processing this request.
*/
u32 seqno;
/** Position in the ringbuffer of the start of the request */
u32 head;
@ -2839,6 +2848,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
/*
* BEWARE: Do not use the function below unless you can _absolutely_
@ -2910,15 +2920,17 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
bool lazy_coherency)
{
u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
return i915_seqno_passed(seqno, req->previous_seqno);
}
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
bool lazy_coherency)
{
u32 seqno;
BUG_ON(req == NULL);
seqno = req->ring->get_seqno(req->ring, lazy_coherency);
u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
return i915_seqno_passed(seqno, req->seqno);
}

View File

@ -1146,23 +1146,74 @@ static bool missed_irq(struct drm_i915_private *dev_priv,
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
}
static int __i915_spin_request(struct drm_i915_gem_request *req)
static unsigned long local_clock_us(unsigned *cpu)
{
unsigned long t;
/* Cheaply and approximately convert from nanoseconds to microseconds.
* The result and subsequent calculations are also defined in the same
* approximate microseconds units. The principal source of timing
* error here is from the simple truncation.
*
* Note that local_clock() is only defined wrt to the current CPU;
* the comparisons are no longer valid if we switch CPUs. Instead of
* blocking preemption for the entire busywait, we can detect the CPU
* switch and use that as indicator of system load and a reason to
* stop busywaiting, see busywait_stop().
*/
*cpu = get_cpu();
t = local_clock() >> 10;
put_cpu();
return t;
}
static bool busywait_stop(unsigned long timeout, unsigned cpu)
{
unsigned this_cpu;
if (time_after(local_clock_us(&this_cpu), timeout))
return true;
return this_cpu != cpu;
}
static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
{
unsigned long timeout;
unsigned cpu;
if (i915_gem_request_get_ring(req)->irq_refcount)
/* When waiting for high frequency requests, e.g. during synchronous
* rendering split between the CPU and GPU, the finite amount of time
* required to set up the irq and wait upon it limits the response
* rate. By busywaiting on the request completion for a short while we
* can service the high frequency waits as quick as possible. However,
* if it is a slow request, we want to sleep as quickly as possible.
* The tradeoff between waiting and sleeping is roughly the time it
* takes to sleep on a request, on the order of a microsecond.
*/
if (req->ring->irq_refcount)
return -EBUSY;
timeout = jiffies + 1;
/* Only spin if we know the GPU is processing this request */
if (!i915_gem_request_started(req, true))
return -EAGAIN;
timeout = local_clock_us(&cpu) + 5;
while (!need_resched()) {
if (i915_gem_request_completed(req, true))
return 0;
if (time_after_eq(jiffies, timeout))
if (signal_pending_state(state, current))
break;
if (busywait_stop(timeout, cpu))
break;
cpu_relax_lowlatency();
}
if (i915_gem_request_completed(req, false))
return 0;
@ -1197,6 +1248,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress =
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(wait);
unsigned long timeout_expire;
s64 before, now;
@ -1229,7 +1281,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
before = ktime_get_raw_ns();
/* Optimistic spin for the next jiffie before touching IRQs */
ret = __i915_spin_request(req);
ret = __i915_spin_request(req, state);
if (ret == 0)
goto out;
@ -1241,8 +1293,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
for (;;) {
struct timer_list timer;
prepare_to_wait(&ring->irq_queue, &wait,
interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
prepare_to_wait(&ring->irq_queue, &wait, state);
/* We need to check whether any gpu reset happened in between
* the caller grabbing the seqno and now ... */
@ -1260,7 +1311,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
break;
}
if (interruptible && signal_pending(current)) {
if (signal_pending_state(state, current)) {
ret = -ERESTARTSYS;
break;
}
@ -2554,6 +2605,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
request->batch_obj = obj;
request->emitted_jiffies = jiffies;
request->previous_seqno = ring->last_submitted_seqno;
ring->last_submitted_seqno = request->seqno;
list_add_tail(&request->list, &ring->request_list);
@ -4080,6 +4132,29 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
return false;
}
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
bool mappable, fenceable;
u32 fence_size, fence_alignment;
fence_size = i915_gem_get_gtt_size(obj->base.dev,
obj->base.size,
obj->tiling_mode);
fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
obj->base.size,
obj->tiling_mode,
true);
fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0);
mappable = (vma->node.start + fence_size <=
to_i915(obj->base.dev)->gtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable;
}
static int
i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
@ -4147,25 +4222,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
(bound ^ vma->bound) & GLOBAL_BIND) {
bool mappable, fenceable;
u32 fence_size, fence_alignment;
fence_size = i915_gem_get_gtt_size(obj->base.dev,
obj->base.size,
obj->tiling_mode);
fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
obj->base.size,
obj->tiling_mode,
true);
fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0);
mappable = (vma->node.start + fence_size <=
dev_priv->gtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable;
__i915_vma_set_map_and_fenceable(vma);
WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
}

View File

@ -2676,6 +2676,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
return ret;
}
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
}

View File

@ -687,6 +687,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
}
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
}

View File

@ -116,6 +116,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev);
static void intel_pre_disable_primary(struct drm_crtc *crtc);
typedef struct {
int min, max;
@ -2607,6 +2608,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct drm_i915_gem_object *obj;
struct drm_plane *primary = intel_crtc->base.primary;
struct drm_plane_state *plane_state = primary->state;
struct drm_crtc_state *crtc_state = intel_crtc->base.state;
struct intel_plane *intel_plane = to_intel_plane(primary);
struct drm_framebuffer *fb;
if (!plane_config->fb)
@ -2643,6 +2646,18 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
}
}
/*
* We've failed to reconstruct the BIOS FB. Current display state
* indicates that the primary plane is visible, but has a NULL FB,
* which will lead to problems later if we don't fix it up. The
* simplest solution is to just disable the primary plane now and
* pretend the BIOS never had it enabled.
*/
to_intel_plane_state(plane_state)->visible = false;
crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
intel_pre_disable_primary(&intel_crtc->base);
intel_plane->disable_plane(primary, &intel_crtc->base);
return;
valid_fb:
@ -9910,14 +9925,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
return true;
}
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t cntl = 0, size = 0;
if (base) {
if (on) {
unsigned int width = intel_crtc->base.cursor->state->crtc_w;
unsigned int height = intel_crtc->base.cursor->state->crtc_h;
unsigned int stride = roundup_pow_of_two(width) * 4;
@ -9972,16 +9987,15 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
}
}
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
uint32_t cntl;
uint32_t cntl = 0;
cntl = 0;
if (base) {
if (on) {
cntl = MCURSOR_GAMMA_ENABLE;
switch (intel_crtc->base.cursor->state->crtc_w) {
case 64:
@ -10032,18 +10046,17 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
int y = cursor_state->crtc_y;
u32 base = 0, pos = 0;
if (on)
base = intel_crtc->cursor_addr;
base = intel_crtc->cursor_addr;
if (x >= intel_crtc->config->pipe_src_w)
base = 0;
on = false;
if (y >= intel_crtc->config->pipe_src_h)
base = 0;
on = false;
if (x < 0) {
if (x + cursor_state->crtc_w <= 0)
base = 0;
on = false;
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
x = -x;
@ -10052,16 +10065,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
if (y < 0) {
if (y + cursor_state->crtc_h <= 0)
base = 0;
on = false;
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
y = -y;
}
pos |= y << CURSOR_Y_SHIFT;
if (base == 0 && intel_crtc->cursor_base == 0)
return;
I915_WRITE(CURPOS(pipe), pos);
/* ILK+ do this automagically */
@ -10072,9 +10082,9 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
}
if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
i845_update_cursor(crtc, base, on);
else
i9xx_update_cursor(crtc, base);
i9xx_update_cursor(crtc, base, on);
}
static bool cursor_size_ok(struct drm_device *dev,
@ -13718,6 +13728,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
struct drm_crtc *crtc = crtc_state->base.crtc;
struct drm_framebuffer *fb = state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
enum pipe pipe = to_intel_plane(plane)->pipe;
unsigned stride;
int ret;
@ -13751,6 +13762,22 @@ intel_check_cursor_plane(struct drm_plane *plane,
return -EINVAL;
}
/*
* There's something wrong with the cursor on CHV pipe C.
* If it straddles the left edge of the screen then
* moving it away from the edge or disabling it often
* results in a pipe underrun, and often that can lead to
* dead pipe (constant underrun reported, and it scans
* out just a solid color). To recover from that, the
* display power well must be turned off and on again.
* Refuse the put the cursor into that compromised position.
*/
if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
state->visible && state->base.crtc_x < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
}
return 0;
}
@ -13774,9 +13801,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
crtc = crtc ? crtc : plane->crtc;
intel_crtc = to_intel_crtc(crtc);
if (intel_crtc->cursor_bo == obj)
goto update;
if (!obj)
addr = 0;
else if (!INTEL_INFO(dev)->cursor_needs_physical)
@ -13785,9 +13809,7 @@ intel_commit_cursor_plane(struct drm_plane *plane,
addr = obj->phys_handle->busaddr;
intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = obj;
update:
if (crtc->state->active)
intel_crtc_update_cursor(crtc, state->visible);
}

View File

@ -550,7 +550,6 @@ struct intel_crtc {
int adjusted_x;
int adjusted_y;
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
uint32_t cursor_cntl;
uint32_t cursor_size;

View File

@ -1374,17 +1374,18 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
bool live_status = false;
unsigned int retry = 3;
unsigned int try;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
while (!live_status && --retry) {
for (try = 0; !live_status && try < 4; try++) {
if (try)
msleep(10);
live_status = intel_digital_port_connected(dev_priv,
hdmi_to_dig_port(intel_hdmi));
mdelay(10);
}
if (!live_status)

View File

@ -83,6 +83,7 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
fan->type = NVBIOS_THERM_FAN_UNK;
}
fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
fan->min_duty = nvbios_rd08(bios, data + 0x02);
fan->max_duty = nvbios_rd08(bios, data + 0x03);

View File

@ -4326,8 +4326,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
}
mddev_unlock(mddev);
}
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
else if (cmd_match(page, "resync"))
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@ -4340,8 +4339,12 @@ action_store(struct mddev *mddev, const char *page, size_t len)
return -EINVAL;
err = mddev_lock(mddev);
if (!err) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
err = mddev->pers->start_reshape(mddev);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
err = -EBUSY;
else {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
err = mddev->pers->start_reshape(mddev);
}
mddev_unlock(mddev);
}
if (err)

View File

@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/slab.h>

View File

@ -236,7 +236,7 @@ int ubi_debugfs_init(void)
dfs_rootdir = debugfs_create_dir("ubi", NULL);
if (IS_ERR_OR_NULL(dfs_rootdir)) {
int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
int err = dfs_rootdir ? PTR_ERR(dfs_rootdir) : -ENODEV;
pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n",
err);

View File

@ -1299,7 +1299,7 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
goto exit;
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
if (hdr_crc != crc) {
ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",

View File

@ -603,6 +603,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
return 0;
}
static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
/**
* do_sync_erase - run the erase worker synchronously.
* @ubi: UBI device description object
@ -615,20 +616,16 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
int vol_id, int lnum, int torture)
{
struct ubi_work *wl_wrk;
struct ubi_work wl_wrk;
dbg_wl("sync erase of PEB %i", e->pnum);
wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
if (!wl_wrk)
return -ENOMEM;
wl_wrk.e = e;
wl_wrk.vol_id = vol_id;
wl_wrk.lnum = lnum;
wl_wrk.torture = torture;
wl_wrk->e = e;
wl_wrk->vol_id = vol_id;
wl_wrk->lnum = lnum;
wl_wrk->torture = torture;
return erase_worker(ubi, wl_wrk, 0);
return __erase_worker(ubi, &wl_wrk);
}
/**
@ -1014,7 +1011,7 @@ out_unlock:
}
/**
* erase_worker - physical eraseblock erase worker function.
* __erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
* @wl_wrk: the work object
* @shutdown: non-zero if the worker has to free memory and exit
@ -1025,8 +1022,7 @@ out_unlock:
* needed. Returns zero in case of success and a negative error code in case of
* failure.
*/
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int shutdown)
static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
{
struct ubi_wl_entry *e = wl_wrk->e;
int pnum = e->pnum;
@ -1034,21 +1030,11 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int lnum = wl_wrk->lnum;
int err, available_consumed = 0;
if (shutdown) {
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
kfree(wl_wrk);
wl_entry_destroy(ubi, e);
return 0;
}
dbg_wl("erase PEB %d EC %d LEB %d:%d",
pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
/* Fine, we've erased it successfully */
kfree(wl_wrk);
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->free);
ubi->free_count++;
@ -1066,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
}
ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
kfree(wl_wrk);
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
err == -EBUSY) {
@ -1075,6 +1060,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
/* Re-schedule the LEB for erasure */
err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
if (err1) {
wl_entry_destroy(ubi, e);
err = err1;
goto out_ro;
}
@ -1150,6 +1136,25 @@ out_ro:
return err;
}
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int shutdown)
{
int ret;
if (shutdown) {
struct ubi_wl_entry *e = wl_wrk->e;
dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
kfree(wl_wrk);
wl_entry_destroy(ubi, e);
return 0;
}
ret = __erase_worker(ubi, wl_wrk);
kfree(wl_wrk);
return ret;
}
/**
* ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
* @ubi: UBI device description object

View File

@ -2540,8 +2540,17 @@ static void nvme_ns_remove(struct nvme_ns *ns)
{
bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue);
if (kill)
if (kill) {
blk_set_queue_dying(ns->queue);
/*
* The controller was shutdown first if we got here through
* device removal. The shutdown may requeue outstanding
* requests. These need to be aborted immediately so
* del_gendisk doesn't block indefinitely for their completion.
*/
blk_mq_abort_requeue_list(ns->queue);
}
if (ns->disk->flags & GENHD_FL_UP)
del_gendisk(ns->disk);
if (kill || !blk_queue_dying(ns->queue)) {
@ -2977,6 +2986,15 @@ static void nvme_dev_remove(struct nvme_dev *dev)
{
struct nvme_ns *ns, *next;
if (nvme_io_incapable(dev)) {
/*
* If the device is not capable of IO (surprise hot-removal,
* for example), we need to quiesce prior to deleting the
* namespaces. This will end outstanding requests and prevent
* attempts to sync dirty data.
*/
nvme_dev_shutdown(dev);
}
list_for_each_entry_safe(ns, next, &dev->namespaces, list)
nvme_ns_remove(ns);
}

View File

@ -599,8 +599,10 @@ static enum ap_wait ap_sm_read(struct ap_device *ap_dev)
status = ap_sm_recv(ap_dev);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (ap_dev->queue_count > 0)
if (ap_dev->queue_count > 0) {
ap_dev->state = AP_STATE_WORKING;
return AP_WAIT_AGAIN;
}
ap_dev->state = AP_STATE_IDLE;
return AP_WAIT_NONE;
case AP_RESPONSE_NO_PENDING_REPLY:

View File

@ -984,32 +984,9 @@ static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
return vq;
}
static void virtio_ccw_int_handler(struct ccw_device *cdev,
unsigned long intparm,
struct irb *irb)
static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
__u32 activity)
{
__u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
int i;
struct virtqueue *vq;
if (!vcdev)
return;
/* Check if it's a notification from the host. */
if ((intparm == 0) &&
(scsw_stctl(&irb->scsw) ==
(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
/* OK */
}
if (irb_is_error(irb)) {
/* Command reject? */
if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
(irb->ecw[0] & SNS0_CMD_REJECT))
vcdev->err = -EOPNOTSUPP;
else
/* Map everything else to -EIO. */
vcdev->err = -EIO;
}
if (vcdev->curr_io & activity) {
switch (activity) {
case VIRTIO_CCW_DOING_READ_FEAT:
@ -1029,12 +1006,47 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
break;
default:
/* don't know what to do... */
dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
activity);
dev_warn(&vcdev->cdev->dev,
"Suspicious activity '%08x'\n", activity);
WARN_ON(1);
break;
}
}
}
static void virtio_ccw_int_handler(struct ccw_device *cdev,
unsigned long intparm,
struct irb *irb)
{
__u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
int i;
struct virtqueue *vq;
if (!vcdev)
return;
if (IS_ERR(irb)) {
vcdev->err = PTR_ERR(irb);
virtio_ccw_check_activity(vcdev, activity);
/* Don't poke around indicators, something's wrong. */
return;
}
/* Check if it's a notification from the host. */
if ((intparm == 0) &&
(scsw_stctl(&irb->scsw) ==
(SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
/* OK */
}
if (irb_is_error(irb)) {
/* Command reject? */
if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
(irb->ecw[0] & SNS0_CMD_REJECT))
vcdev->err = -EOPNOTSUPP;
else
/* Map everything else to -EIO. */
vcdev->err = -EIO;
}
virtio_ccw_check_activity(vcdev, activity);
for_each_set_bit(i, &vcdev->indicators,
sizeof(vcdev->indicators) * BITS_PER_BYTE) {
/* The bit clear must happen before the vring kick. */

View File

@ -148,8 +148,10 @@ static int receive_chars_read(struct uart_port *port)
uart_handle_dcd_change(port, 1);
}
for (i = 0; i < bytes_read; i++)
uart_handle_sysrq_char(port, con_read_page[i]);
if (port->sysrq != 0 && *con_read_page) {
for (i = 0; i < bytes_read; i++)
uart_handle_sysrq_char(port, con_read_page[i]);
}
if (port->state == NULL)
continue;
@ -168,17 +170,17 @@ struct sunhv_ops {
int (*receive_chars)(struct uart_port *port);
};
static struct sunhv_ops bychar_ops = {
static const struct sunhv_ops bychar_ops = {
.transmit_chars = transmit_chars_putchar,
.receive_chars = receive_chars_getchar,
};
static struct sunhv_ops bywrite_ops = {
static const struct sunhv_ops bywrite_ops = {
.transmit_chars = transmit_chars_write,
.receive_chars = receive_chars_read,
};
static struct sunhv_ops *sunhv_ops = &bychar_ops;
static const struct sunhv_ops *sunhv_ops = &bychar_ops;
static struct tty_port *receive_chars(struct uart_port *port)
{

View File

@ -161,6 +161,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
{ USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */

View File

@ -616,6 +616,7 @@ nfsd4_cb_layout_prepare(struct nfsd4_callback *cb)
mutex_lock(&ls->ls_mutex);
nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid);
mutex_unlock(&ls->ls_mutex);
}
static int
@ -659,7 +660,6 @@ nfsd4_cb_layout_release(struct nfsd4_callback *cb)
trace_layout_recall_release(&ls->ls_stid.sc_stateid);
mutex_unlock(&ls->ls_mutex);
nfsd4_return_all_layouts(ls, &reaplist);
nfsd4_free_layouts(&reaplist);
nfs4_put_stid(&ls->ls_stid);

View File

@ -48,12 +48,17 @@
static int fd_map; /* File descriptor for file being modified. */
static int mmap_failed; /* Boolean flag. */
static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */
static char gpfx; /* prefix for global symbol name (sometimes '_') */
static struct stat sb; /* Remember .st_size, etc. */
static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */
static const char *altmcount; /* alternate mcount symbol name */
static int warn_on_notrace_sect; /* warn when section has mcount not being recorded */
static void *file_map; /* pointer of the mapped file */
static void *file_end; /* pointer to the end of the mapped file */
static int file_updated; /* flag to state file was changed */
static void *file_ptr; /* current file pointer location */
static void *file_append; /* added to the end of the file */
static size_t file_append_size; /* how much is added to end of file */
/* setjmp() return values */
enum {
@ -67,10 +72,14 @@ static void
cleanup(void)
{
if (!mmap_failed)
munmap(ehdr_curr, sb.st_size);
munmap(file_map, sb.st_size);
else
free(ehdr_curr);
close(fd_map);
free(file_map);
file_map = NULL;
free(file_append);
file_append = NULL;
file_append_size = 0;
file_updated = 0;
}
static void __attribute__((noreturn))
@ -92,12 +101,22 @@ succeed_file(void)
static off_t
ulseek(int const fd, off_t const offset, int const whence)
{
off_t const w = lseek(fd, offset, whence);
if (w == (off_t)-1) {
perror("lseek");
switch (whence) {
case SEEK_SET:
file_ptr = file_map + offset;
break;
case SEEK_CUR:
file_ptr += offset;
break;
case SEEK_END:
file_ptr = file_map + (sb.st_size - offset);
break;
}
if (file_ptr < file_map) {
fprintf(stderr, "lseek: seek before file\n");
fail_file();
}
return w;
return file_ptr - file_map;
}
static size_t
@ -114,12 +133,38 @@ uread(int const fd, void *const buf, size_t const count)
static size_t
uwrite(int const fd, void const *const buf, size_t const count)
{
size_t const n = write(fd, buf, count);
if (n != count) {
perror("write");
fail_file();
size_t cnt = count;
off_t idx = 0;
file_updated = 1;
if (file_ptr + count >= file_end) {
off_t aoffset = (file_ptr + count) - file_end;
if (aoffset > file_append_size) {
file_append = realloc(file_append, aoffset);
file_append_size = aoffset;
}
if (!file_append) {
perror("write");
fail_file();
}
if (file_ptr < file_end) {
cnt = file_end - file_ptr;
} else {
cnt = 0;
idx = aoffset - count;
}
}
return n;
if (cnt)
memcpy(file_ptr, buf, cnt);
if (cnt < count)
memcpy(file_append + idx, buf + cnt, count - cnt);
file_ptr += count;
return count;
}
static void *
@ -192,9 +237,7 @@ static int make_nop_arm64(void *map, size_t const offset)
*/
static void *mmap_file(char const *fname)
{
void *addr;
fd_map = open(fname, O_RDWR);
fd_map = open(fname, O_RDONLY);
if (fd_map < 0 || fstat(fd_map, &sb) < 0) {
perror(fname);
fail_file();
@ -203,15 +246,58 @@ static void *mmap_file(char const *fname)
fprintf(stderr, "not a regular file: %s\n", fname);
fail_file();
}
addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE,
fd_map, 0);
file_map = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE,
fd_map, 0);
mmap_failed = 0;
if (addr == MAP_FAILED) {
if (file_map == MAP_FAILED) {
mmap_failed = 1;
addr = umalloc(sb.st_size);
uread(fd_map, addr, sb.st_size);
file_map = umalloc(sb.st_size);
uread(fd_map, file_map, sb.st_size);
}
close(fd_map);
file_end = file_map + sb.st_size;
return file_map;
}
static void write_file(const char *fname)
{
char tmp_file[strlen(fname) + 4];
size_t n;
if (!file_updated)
return;
sprintf(tmp_file, "%s.rc", fname);
/*
* After reading the entire file into memory, delete it
* and write it back, to prevent weird side effects of modifying
* an object file in place.
*/
fd_map = open(tmp_file, O_WRONLY | O_TRUNC | O_CREAT, sb.st_mode);
if (fd_map < 0) {
perror(fname);
fail_file();
}
n = write(fd_map, file_map, sb.st_size);
if (n != sb.st_size) {
perror("write");
fail_file();
}
if (file_append_size) {
n = write(fd_map, file_append, file_append_size);
if (n != file_append_size) {
perror("write");
fail_file();
}
}
close(fd_map);
if (rename(tmp_file, fname) < 0) {
perror(fname);
fail_file();
}
return addr;
}
/* w8rev, w8nat, ...: Handle endianness. */
@ -318,7 +404,6 @@ do_file(char const *const fname)
Elf32_Ehdr *const ehdr = mmap_file(fname);
unsigned int reltype = 0;
ehdr_curr = ehdr;
w = w4nat;
w2 = w2nat;
w8 = w8nat;
@ -441,6 +526,7 @@ do_file(char const *const fname)
}
} /* end switch */
write_file(fname);
cleanup();
}
@ -493,11 +579,14 @@ main(int argc, char *argv[])
case SJ_SETJMP: /* normal sequence */
/* Avoid problems if early cleanup() */
fd_map = -1;
ehdr_curr = NULL;
mmap_failed = 1;
file_map = NULL;
file_ptr = NULL;
file_updated = 0;
do_file(file);
break;
case SJ_FAIL: /* error in do_file or below */
sprintf("%s: failed\n", file);
++n_error;
break;
case SJ_SUCCEED: /* premature success */

View File

@ -954,6 +954,36 @@ static int azx_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP || SUPPORT_VGA_SWITCHEROO */
#ifdef CONFIG_PM_SLEEP
/* put codec down to D3 at hibernation for Intel SKL+;
* otherwise BIOS may still access the codec and screw up the driver
*/
#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
static int azx_freeze_noirq(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
if (IS_SKL_PLUS(pci))
pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int azx_thaw_noirq(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
if (IS_SKL_PLUS(pci))
pci_set_power_state(pci, PCI_D0);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int azx_runtime_suspend(struct device *dev)
{
@ -1063,6 +1093,10 @@ static int azx_runtime_idle(struct device *dev)
static const struct dev_pm_ops azx_pm = {
SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume)
#ifdef CONFIG_PM_SLEEP
.freeze_noirq = azx_freeze_noirq,
.thaw_noirq = azx_thaw_noirq,
#endif
SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle)
};

View File

@ -1775,6 +1775,7 @@ enum {
ALC889_FIXUP_MBA11_VREF,
ALC889_FIXUP_MBA21_VREF,
ALC889_FIXUP_MP11_VREF,
ALC889_FIXUP_MP41_VREF,
ALC882_FIXUP_INV_DMIC,
ALC882_FIXUP_NO_PRIMARY_HP,
ALC887_FIXUP_ASUS_BASS,
@ -1863,7 +1864,7 @@ static void alc889_fixup_mbp_vref(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
struct alc_spec *spec = codec->spec;
static hda_nid_t nids[2] = { 0x14, 0x15 };
static hda_nid_t nids[3] = { 0x14, 0x15, 0x19 };
int i;
if (action != HDA_FIXUP_ACT_INIT)
@ -2153,6 +2154,12 @@ static const struct hda_fixup alc882_fixups[] = {
.chained = true,
.chain_id = ALC885_FIXUP_MACPRO_GPIO,
},
[ALC889_FIXUP_MP41_VREF] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc889_fixup_mbp_vref,
.chained = true,
.chain_id = ALC885_FIXUP_MACPRO_GPIO,
},
[ALC882_FIXUP_INV_DMIC] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_inv_dmic,
@ -2235,7 +2242,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO),
SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 4,1/5,1", ALC889_FIXUP_MP41_VREF),
SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),

View File

@ -85,7 +85,15 @@ static const DECLARE_TLV_DB_SCALE(pga_tlv, 0, 300, 0);
static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 300, 0);
static const int deemph_settings[] = { 0, 32000, 44100, 48000 };
static const struct {
int rate;
unsigned int val;
} deemph_settings[] = {
{ 0, ES8328_DACCONTROL6_DEEMPH_OFF },
{ 32000, ES8328_DACCONTROL6_DEEMPH_32k },
{ 44100, ES8328_DACCONTROL6_DEEMPH_44_1k },
{ 48000, ES8328_DACCONTROL6_DEEMPH_48k },
};
static int es8328_set_deemph(struct snd_soc_codec *codec)
{
@ -97,21 +105,22 @@ static int es8328_set_deemph(struct snd_soc_codec *codec)
* rate.
*/
if (es8328->deemph) {
best = 1;
for (i = 2; i < ARRAY_SIZE(deemph_settings); i++) {
if (abs(deemph_settings[i] - es8328->playback_fs) <
abs(deemph_settings[best] - es8328->playback_fs))
best = 0;
for (i = 1; i < ARRAY_SIZE(deemph_settings); i++) {
if (abs(deemph_settings[i].rate - es8328->playback_fs) <
abs(deemph_settings[best].rate - es8328->playback_fs))
best = i;
}
val = best << 1;
val = deemph_settings[best].val;
} else {
val = 0;
val = ES8328_DACCONTROL6_DEEMPH_OFF;
}
dev_dbg(codec->dev, "Set deemphasis %d\n", val);
return snd_soc_update_bits(codec, ES8328_DACCONTROL6, 0x6, val);
return snd_soc_update_bits(codec, ES8328_DACCONTROL6,
ES8328_DACCONTROL6_DEEMPH_MASK, val);
}
static int es8328_get_deemph(struct snd_kcontrol *kcontrol,

View File

@ -153,6 +153,7 @@ int es8328_probe(struct device *dev, struct regmap *regmap);
#define ES8328_DACCONTROL6_CLICKFREE (1 << 3)
#define ES8328_DACCONTROL6_DAC_INVR (1 << 4)
#define ES8328_DACCONTROL6_DAC_INVL (1 << 5)
#define ES8328_DACCONTROL6_DEEMPH_MASK (3 << 6)
#define ES8328_DACCONTROL6_DEEMPH_OFF (0 << 6)
#define ES8328_DACCONTROL6_DEEMPH_32k (1 << 6)
#define ES8328_DACCONTROL6_DEEMPH_44_1k (2 << 6)

View File

@ -189,6 +189,7 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
case SND_SOC_DAPM_POST_PMU:
snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
msleep(400);
break;
case SND_SOC_DAPM_PRE_PMD:

View File

@ -574,6 +574,7 @@ static const struct regmap_config wm8974_regmap = {
.max_register = WM8974_MONOMIX,
.reg_defaults = wm8974_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8974_reg_defaults),
.cache_type = REGCACHE_FLAT,
};
static int wm8974_probe(struct snd_soc_codec *codec)

View File

@ -223,8 +223,8 @@ static void mcasp_start_tx(struct davinci_mcasp *mcasp)
/* wait for XDATA to be cleared */
cnt = 0;
while (!(mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) &
~XRDATA) && (cnt < 100000))
while ((mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) & XRDATA) &&
(cnt < 100000))
cnt++;
/* Release TX state machine */

View File

@ -505,6 +505,24 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
FSL_SAI_CSR_FR, FSL_SAI_CSR_FR);
regmap_update_bits(sai->regmap, FSL_SAI_RCSR,
FSL_SAI_CSR_FR, FSL_SAI_CSR_FR);
/*
* For sai master mode, after several open/close sai,
* there will be no frame clock, and can't recover
* anymore. Add software reset to fix this issue.
* This is a hardware bug, and will be fix in the
* next sai version.
*/
if (!sai->is_slave_mode) {
/* Software Reset for both Tx and Rx */
regmap_write(sai->regmap,
FSL_SAI_TCSR, FSL_SAI_CSR_SR);
regmap_write(sai->regmap,
FSL_SAI_RCSR, FSL_SAI_CSR_SR);
/* Clear SR bit to finish the reset */
regmap_write(sai->regmap, FSL_SAI_TCSR, 0);
regmap_write(sai->regmap, FSL_SAI_RCSR, 0);
}
}
break;
default:

View File

@ -152,8 +152,10 @@ static int rk_spdif_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
ret = regmap_update_bits(spdif->regmap, SPDIF_DMACR,
SPDIF_DMACR_TDE_ENABLE,
SPDIF_DMACR_TDE_ENABLE);
SPDIF_DMACR_TDE_ENABLE |
SPDIF_DMACR_TDL_MASK,
SPDIF_DMACR_TDE_ENABLE |
SPDIF_DMACR_TDL(16));
if (ret != 0)
return ret;

View File

@ -42,7 +42,7 @@
#define SPDIF_DMACR_TDL_SHIFT 0
#define SPDIF_DMACR_TDL(x) ((x) << SPDIF_DMACR_TDL_SHIFT)
#define SPDIF_DMACR_TDL_MASK (0x1f << SDPIF_DMACR_TDL_SHIFT)
#define SPDIF_DMACR_TDL_MASK (0x1f << SPDIF_DMACR_TDL_SHIFT)
/*
* XFER

View File

@ -1114,7 +1114,7 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
return true;
}
return dist_active_irq(vcpu);
return vgic_irq_is_active(vcpu, map->virt_irq);
}
/*