LoongArch fixes for v5.19-rc4

-----BEGIN PGP SIGNATURE-----
 
 iQJKBAABCAA0FiEEzOlt8mkP+tbeiYy5AoYrw/LiJnoFAmK23qMWHGNoZW5odWFj
 YWlAa2VybmVsLm9yZwAKCRAChivD8uImepraEACAthwUc86HzuL4zCkuezHSnH03
 fwTxxf9oR74W175INsEEP1vZ5DPb5nRD017DsouoABd/bKexOJvaOoKz0PBu1diM
 wQCNullLR6RZ3QpGj7T2teUrb3MFvS0ZwyQ+6H/MKoe0DRv3QgD7sanDbti32V1V
 SH9plOMe7Tu5OYBUMPmhE9E3jYEXYoJyP02J3HpaqonN0MOqPxXtWT/uhbV5esEO
 VMWiZoTn5y+pZkmnvVJwfPz0vBUdHZVva2jwo3l1urX4AvAigv8oZNzbVyB1TMn+
 /wTVL3oB4exNUfT+WPPQAl6mtmnXxpQRYPb5zXknNyxlsQbCX9mfcEZwdZj8AUw4
 Up8iCb49oKMsRQQg2XLtgTiJU8Owcaqt+kfB77JZX+WkzNOE/CzJis0dLu0sRCpM
 757taCwRv4d+ufAW2KBcMGIoF7lK69suHYyosR8lE0ykjfr9XpCNartM2HLolQJw
 uTaYKRxrt4u0CzVrxPz8cOXRAqJ449Px0YUJlDZgV5wsuReEdpzn6gc27w1aEk29
 ORdA00veugJtJx/0NbHRQ9+oKgZ6Jmlfr+PgEaT4Ak5TMaRvR8UuRMObDxy23+8N
 CjKLZEmqz2DHMNjUZArD7oFOdRIodO2eNP2CIxlKkJzsjEgrnURqatUF0FXEUsNf
 diqcd+BW2hM1a9ARwQ==
 =3Gtz
 -----END PGP SIGNATURE-----

Merge tag 'loongarch-fixes-5.19-3' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch fixes from Huacai Chen:
 "Some bug fixes and a trivial cleanup"

* tag 'loongarch-fixes-5.19-3' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
  LoongArch: Make compute_return_era() return void
  LoongArch: Fix wrong fpu version
  LoongArch: Fix EENTRY/MERRENTRY setting in setup_tlb_handler()
  LoongArch: Fix sleeping in atomic context in setup_tlb_handler()
  LoongArch: Fix the _stext symbol address
  LoongArch: Fix the !THP build
This commit is contained in:
Linus Torvalds 2022-06-25 09:24:59 -07:00
commit cb84318baa
7 changed files with 13 additions and 15 deletions

View File

@ -12,10 +12,9 @@ static inline unsigned long exception_era(struct pt_regs *regs)
return regs->csr_era;
}
static inline int compute_return_era(struct pt_regs *regs)
static inline void compute_return_era(struct pt_regs *regs)
{
regs->csr_era += 4;
return 0;
}
#endif /* _ASM_BRANCH_H */

View File

@ -426,6 +426,11 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
#define kern_addr_valid(addr) (1)
static inline unsigned long pmd_pfn(pmd_t pmd)
{
return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
@ -497,11 +502,6 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
return pmd;
}
static inline unsigned long pmd_pfn(pmd_t pmd)
{
return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT;
}
static inline struct page *pmd_page(pmd_t pmd)
{
if (pmd_trans_huge(pmd))

View File

@ -263,7 +263,7 @@ void cpu_probe(void)
c->cputype = CPU_UNKNOWN;
c->processor_id = read_cpucfg(LOONGARCH_CPUCFG0);
c->fpu_vers = (read_cpucfg(LOONGARCH_CPUCFG2) >> 3) & 0x3;
c->fpu_vers = (read_cpucfg(LOONGARCH_CPUCFG2) & CPUCFG2_FPVERS) >> 3;
c->fpu_csr0 = FPU_CSR_RN;
c->fpu_mask = FPU_CSR_RSVD;

View File

@ -14,8 +14,6 @@
__REF
SYM_ENTRY(_stext, SYM_L_GLOBAL, SYM_A_NONE)
SYM_CODE_START(kernel_entry) # kernel entry point
/* Config direct window and set PG */

View File

@ -475,8 +475,7 @@ asmlinkage void noinstr do_ri(struct pt_regs *regs)
die_if_kernel("Reserved instruction in kernel code", regs);
if (unlikely(compute_return_era(regs) < 0))
goto out;
compute_return_era(regs);
if (unlikely(get_user(opcode, era) < 0)) {
status = SIGSEGV;

View File

@ -37,6 +37,7 @@ SECTIONS
HEAD_TEXT_SECTION
. = ALIGN(PECOFF_SEGMENT_ALIGN);
_stext = .;
.text : {
TEXT_TEXT
SCHED_TEXT

View File

@ -281,15 +281,16 @@ void setup_tlb_handler(int cpu)
if (pcpu_handlers[cpu])
return;
page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, get_order(vec_sz));
page = alloc_pages_node(cpu_to_node(cpu), GFP_ATOMIC, get_order(vec_sz));
if (!page)
return;
addr = page_address(page);
pcpu_handlers[cpu] = virt_to_phys(addr);
pcpu_handlers[cpu] = (unsigned long)addr;
memcpy((void *)addr, (void *)eentry, vec_sz);
local_flush_icache_range((unsigned long)addr, (unsigned long)addr + vec_sz);
csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_TLBRENTRY);
csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
}
#endif