ARC updates for v6.6

-Wmissing-prototype warning fixes
 
  - Missing compiler barrier in relaxed atomics
 
  - some uaccess simplification, declutter
 
  - Removal of massive glocal struct cpuinfo_arc from bootlog code
 
  - __switch_to consolidation (removal of inline asm variant)
 
  - use GP to cache task pointer (vs. r25)
 
  - Miscll rework of entry code
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEOXpuCuR6hedrdLCJadfx3eKKwl4FAmT2RVMACgkQadfx3eKK
 wl7tqg//V/KIqB3fkqWlXjGGlqsanm/ImcDKJ9D8SBdWSa2qpWVjAKO9gbn1dHrw
 OsTtVHOjSiCIFhn3i/zv+ldPk48hQmEcdVvkMA86E4zhVeLXk561LIQa/ihm9Ic9
 yLgulyqcYLZkboTTzITLYQtb5vqLqNq3NQhX+t4MriJErDP25jrDCyIU7f2/qrMa
 6qrCSU6wCQYT7tL7MNtkCDgH92NVgiBwFW/Grc0Avc6limk3IfspoSDS3bzK271K
 CtYjCIRBAVJ5W8MZ3bV7Jb9UezrCXsWz0X5Zq7z2Je7o85rHE6Bk+B/cxgVd4qFZ
 Eu88f+1V4dwnDWpxL40bW+OpGZUCPJBj2ayDxt2hQGhrX7+jeEXiRHOFxOc3quuw
 OLBMoazsf3sGWhg2w1ojfwIIqt8hbg8iRzDqOFzugdyp1zJPTST5Av3oVvXg4+PZ
 mCj89hcjAfbQ0oRC/yoB0ECNeb1kcjHXBE/EhRVbWbnvT4XsTVitUXNd8isLGZLW
 kQ9RA7u1RB9Ra25bwpXjf/U+F4uLaw9wH9gOBY2uWrGUDUrju7UZwoIE3tFUh2hr
 qhLwsZmw7gmixO2lInkfUk8rTUL6lMPhzNVmv5yy3SNAqBVhdghrPWVP+ys9Q0c+
 MV8g73w7CJC0l35CHMDhQ87hvJHTKqd1yunfQuPy5NZ/Q5GzI6s=
 =+uaN
 -----END PGP SIGNATURE-----

Merge tag 'arc-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull ARC updates from Vineet Gupta:

 - fixes for -Wmissing-prototype warnings

 - missing compiler barrier in relaxed atomics

 - some uaccess simplification, declutter

 - removal of massive glocal struct cpuinfo_arc from bootlog code

 - __switch_to consolidation (removal of inline asm variant)

 - use GP to cache task pointer (vs. r25)

 - misc rework of entry code

* tag 'arc-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: (24 commits)
  ARC: boot log: fix warning
  arc: Explicitly include correct DT includes
  ARC: pt_regs: create seperate type for ecr
  ARCv2: entry: rearrange pt_regs slightly
  ARC: entry: replace 8 byte ADD.ne with 4 byte ADD2.ne
  ARC: entry: replace 8 byte OR with 4 byte BSET
  ARC: entry: Add more common chores to EXCEPTION_PROLOGUE
  ARC: entry: EV_MachineCheck dont re-read ECR
  ARC: entry: ARcompact EV_ProtV to use r10 directly
  ARC: entry: rework (non-functional)
  ARC: __switch_to: move ksp to thread_info from thread_struct
  ARC: __switch_to: asm with dwarf ops (vs. inline asm)
  ARC: kernel stack: INIT_THREAD need not setup @init_stack in @ksp
  ARC: entry: use gp to cache task pointer (vs. r25)
  ARC: boot log: eliminate struct cpuinfo_arc #4: boot log per ISA
  ARC: boot log: eliminate struct cpuinfo_arc #3: don't export
  ARC: boot log: eliminate struct cpuinfo_arc #2: cache
  ARC: boot log: eliminate struct cpuinfo_arc #1: mm
  ARCv2: memset: don't prefetch for len == 0 which happens a alot
  ARC: uaccess: elide unaliged handling if hardware supports
  ...
This commit is contained in:
Linus Torvalds 2023-09-04 15:38:24 -07:00
commit 3f86ed6ec0
44 changed files with 744 additions and 1053 deletions

View File

@ -27,6 +27,8 @@ config ARC
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_IOREMAP
select GENERIC_STRNCPY_FROM_USER if MMU
select GENERIC_STRNLEN_USER if MMU
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4
@ -491,11 +493,11 @@ config ARC_KVADDR_SIZE
kernel-user gutter)
config ARC_CURR_IN_REG
bool "Dedicate Register r25 for current_task pointer"
bool "cache current task pointer in gp"
default y
help
This reserved Register R25 to point to Current Task in
kernel mode. This saves memory access for each such access
This reserves gp register to point to Current Task in
kernel mode eliding memory access for each access
config ARC_EMUL_UNALIGNED

View File

@ -28,14 +28,14 @@ cflags-y += $(tune-mcpu-def-y)
endif
endif
ifdef CONFIG_ARC_CURR_IN_REG
# For a global register definition, make sure it gets passed to every file
# We had a customer reported bug where some code built in kernel was NOT using
# any kernel headers, and missing the r25 global register
# any kernel headers, and missing the global register
# Can't do unconditionally because of recursive include issues
# due to <linux/thread_info.h>
LINUXINCLUDE += -include $(srctree)/arch/arc/include/asm/current.h
cflags-y += -ffixed-gp
endif
cflags-y += -fsection-anchors
@ -67,7 +67,7 @@ cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables $(cfi)
# small data is default for elf32 tool-chain. If not usable, disable it
# This also allows repurposing GP as scratch reg to gcc reg allocator
disable_small_data := y
cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
cflags-$(disable_small_data) += -mno-sdata
cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB

View File

@ -23,7 +23,7 @@
#define ARC_REG_ICCM_BUILD 0x78 /* ICCM size (common) */
#define ARC_REG_XY_MEM_BCR 0x79
#define ARC_REG_MAC_BCR 0x7a
#define ARC_REG_MUL_BCR 0x7b
#define ARC_REG_MPY_BCR 0x7b
#define ARC_REG_SWAP_BCR 0x7c
#define ARC_REG_NORM_BCR 0x7d
#define ARC_REG_MIXMAX_BCR 0x7e
@ -177,7 +177,7 @@ struct bcr_isa_arcv2 {
#endif
};
struct bcr_uarch_build_arcv2 {
struct bcr_uarch_build {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:8, prod:8, maj:8, min:8;
#else
@ -185,6 +185,59 @@ struct bcr_uarch_build_arcv2 {
#endif
};
struct bcr_mmu_3 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
u_itlb:4, u_dtlb:4;
#else
unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
ways:4, ver:8;
#endif
};
struct bcr_mmu_4 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
#else
/* DTLB ITLB JES JE JA */
unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
#endif
};
struct bcr_cache {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
#else
unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
#endif
};
struct bcr_slc_cfg {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:24, way:2, lsz:2, sz:4;
#else
unsigned int sz:4, lsz:2, way:2, pad:24;
#endif
};
struct bcr_clust_cfg {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
#else
unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
#endif
};
struct bcr_volatile {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int start:4, limit:4, pad:22, order:1, disable:1;
#else
unsigned int disable:1, order:1, pad:22, limit:4, start:4;
#endif
};
struct bcr_mpy {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
@ -302,48 +355,6 @@ struct bcr_generic {
#endif
};
/*
*******************************************************************
* Generic structures to hold build configuration used at runtime
*/
struct cpuinfo_arc_mmu {
unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, pad:10, sasid:1, pae:1;
unsigned int sets:12, ways:4, u_dtlb:8, u_itlb:8;
};
struct cpuinfo_arc_cache {
unsigned int sz_k:14, line_len:8, assoc:4, alias:1, vipt:1, pad:4;
};
struct cpuinfo_arc_bpu {
unsigned int ver, full, num_cache, num_pred, ret_stk;
};
struct cpuinfo_arc_ccm {
unsigned int base_addr, sz;
};
struct cpuinfo_arc {
struct cpuinfo_arc_cache icache, dcache, slc;
struct cpuinfo_arc_mmu mmu;
struct cpuinfo_arc_bpu bpu;
struct bcr_identity core;
struct bcr_isa_arcv2 isa;
const char *release, *name;
unsigned int vec_base;
struct cpuinfo_arc_ccm iccm, dccm;
struct {
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1,
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
} extn;
struct bcr_mpy extn_mpy;
};
extern struct cpuinfo_arc cpuinfo_arc700[];
static inline int is_isa_arcv2(void)
{
return IS_ENABLED(CONFIG_ISA_ARCV2);

View File

@ -18,7 +18,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v) \
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \
: "cc"); \
: "cc", "memory"); \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
@ -34,7 +34,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
: [val] "=&r" (val) \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
: "cc"); \
: "cc", "memory"); \
\
return val; \
}
@ -56,7 +56,7 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
[orig] "=&r" (orig) \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
: "cc"); \
: "cc", "memory"); \
\
return orig; \
}

View File

@ -60,7 +60,7 @@ static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
: "cc"); \
: "cc", "memory"); \
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
@ -77,7 +77,7 @@ static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: [val] "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
: "cc"); /* memory clobber comes from smp_mb() */ \
: "cc", "memory"); \
\
return val; \
}
@ -99,7 +99,7 @@ static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: "=&r"(orig), "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
: "cc"); /* memory clobber comes from smp_mb() */ \
: "cc", "memory"); \
\
return orig; \
}

View File

@ -13,7 +13,7 @@
#ifdef CONFIG_ARC_CURR_IN_REG
register struct task_struct *curr_arc asm("r25");
register struct task_struct *curr_arc asm("gp");
#define current (curr_arc)
#else

View File

@ -10,23 +10,31 @@
#ifdef ARC_DW2_UNWIND_AS_CFI
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_REGISTER .cfi_register
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_UNDEFINED .cfi_undefined
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
#define CFI_OFFSET .cfi_offset
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_REGISTER .cfi_register
#define CFI_RESTORE .cfi_restore
#define CFI_UNDEFINED .cfi_undefined
#else
#define CFI_IGNORE #
#define CFI_STARTPROC CFI_IGNORE
#define CFI_ENDPROC CFI_IGNORE
#define CFI_DEF_CFA CFI_IGNORE
#define CFI_REGISTER CFI_IGNORE
#define CFI_REL_OFFSET CFI_IGNORE
#define CFI_UNDEFINED CFI_IGNORE
#define CFI_STARTPROC CFI_IGNORE
#define CFI_ENDPROC CFI_IGNORE
#define CFI_DEF_CFA CFI_IGNORE
#define CFI_DEF_CFA_OFFSET CFI_IGNORE
#define CFI_DEF_CFA_REGISTER CFI_IGNORE
#define CFI_OFFSET CFI_IGNORE
#define CFI_REL_OFFSET CFI_IGNORE
#define CFI_REGISTER CFI_IGNORE
#define CFI_RESTORE CFI_IGNORE
#define CFI_UNDEFINED CFI_IGNORE
#endif /* !ARC_DW2_UNWIND_AS_CFI */

View File

@ -18,7 +18,6 @@
* | orig_r0 |
* | event/ECR |
* | bta |
* | user_r25 |
* | gp |
* | fp |
* | sp |
@ -49,14 +48,18 @@
/*------------------------------------------------------------------------*/
.macro INTERRUPT_PROLOGUE
; (A) Before jumping to Interrupt Vector, hardware micro-ops did following:
; Before jumping to Interrupt Vector, hardware micro-ops did following:
; 1. SP auto-switched to kernel mode stack
; 2. STATUS32.Z flag set if in U mode at time of interrupt (U:1,K:0)
; 3. Auto save: (mandatory) Push PC and STAT32 on stack
; hardware does even if CONFIG_ARC_IRQ_NO_AUTOSAVE
; 4. Auto save: (optional) r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI
; 4a. Auto save: (optional) r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI
;
; (B) Manually saved some regs: r12,r25,r30, sp,fp,gp, ACCL pair
; Now
; 4b. If Auto-save (optional) not enabled in hw, manually save them
; 5. Manually save: r12,r30, sp,fp,gp, ACCL pair
;
; At the end, SP points to pt_regs
#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
; carve pt_regs on stack (case #3), PC/STAT32 already on stack
@ -72,15 +75,16 @@
.endm
/*------------------------------------------------------------------------*/
.macro EXCEPTION_PROLOGUE
.macro EXCEPTION_PROLOGUE_KEEP_AE
; (A) Before jumping to Exception Vector, hardware micro-ops did following:
; Before jumping to Exception Vector, hardware micro-ops did following:
; 1. SP auto-switched to kernel mode stack
; 2. STATUS32.Z flag set if in U mode at time of exception (U:1,K:0)
;
; (B) Manually save the complete reg file below
; Now manually save rest of reg file
; At the end, SP points to pt_regs
sub sp, sp, SZ_PT_REGS ; carve pt_regs
sub sp, sp, SZ_PT_REGS ; carve space for pt_regs
; _HARD saves r10 clobbered by _SOFT as scratch hence comes first
@ -100,6 +104,16 @@
; OUTPUT: r10 has ECR expected by EV_Trap
.endm
.macro EXCEPTION_PROLOGUE
EXCEPTION_PROLOGUE_KEEP_AE ; return ECR in r10
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN ; clobbers r9
.endm
/*------------------------------------------------------------------------
* This macro saves the registers manually which would normally be autosaved
* by hardware on taken interrupts. It is used by
@ -135,10 +149,10 @@
*/
.macro __SAVE_REGFILE_SOFT
ST2 gp, fp, PT_r26 ; gp (r26), fp (r27)
st r12, [sp, PT_sp + 4]
st r30, [sp, PT_sp + 8]
st fp, [sp, PT_fp] ; r27
st r30, [sp, PT_r30]
st r12, [sp, PT_r12]
st r26, [sp, PT_r26] ; gp
; Saving pt_regs->sp correctly requires some extra work due to the way
; Auto stack switch works
@ -153,30 +167,30 @@
; ISA requires ADD.nz to have same dest and src reg operands
mov.nz r10, sp
add.nz r10, r10, SZ_PT_REGS ; K mode SP
add2.nz r10, r10, SZ_PT_REGS/4 ; K mode SP
st r10, [sp, PT_sp] ; SP (pt_regs->sp)
#ifdef CONFIG_ARC_CURR_IN_REG
st r25, [sp, PT_user_r25]
GET_CURR_TASK_ON_CPU r25
#endif
#ifdef CONFIG_ARC_HAS_ACCL_REGS
ST2 r58, r59, PT_r58
#endif
/* clobbers r10, r11 registers pair */
DSP_SAVE_REGFILE_IRQ
#ifdef CONFIG_ARC_CURR_IN_REG
GET_CURR_TASK_ON_CPU gp
#endif
.endm
/*------------------------------------------------------------------------*/
.macro __RESTORE_REGFILE_SOFT
LD2 gp, fp, PT_r26 ; gp (r26), fp (r27)
ld r12, [sp, PT_r12]
ld fp, [sp, PT_fp]
ld r30, [sp, PT_r30]
ld r12, [sp, PT_r12]
ld r26, [sp, PT_r26]
; Restore SP (into AUX_USER_SP) only if returning to U mode
; - for K mode, it will be implicitly restored as stack is unwound
@ -188,10 +202,6 @@
sr r10, [AUX_USER_SP]
1:
#ifdef CONFIG_ARC_CURR_IN_REG
ld r25, [sp, PT_user_r25]
#endif
/* clobbers r10, r11 registers pair */
DSP_RESTORE_REGFILE_IRQ
@ -249,7 +259,7 @@
btst r0, STATUS_U_BIT ; Z flag set if K, used in restoring SP
ld r10, [sp, PT_event + 4]
ld r10, [sp, PT_bta]
sr r10, [erbta]
LD2 r10, r11, PT_ret
@ -264,8 +274,8 @@
.macro FAKE_RET_FROM_EXCPN
lr r9, [status32]
bic r9, r9, STATUS_AE_MASK
or r9, r9, STATUS_IE_MASK
bclr r9, r9, STATUS_AE_BIT
bset r9, r9, STATUS_IE_BIT
kflag r9
.endm

View File

@ -140,7 +140,7 @@
*
* After this it is safe to call the "C" handlers
*-------------------------------------------------------------*/
.macro EXCEPTION_PROLOGUE
.macro EXCEPTION_PROLOGUE_KEEP_AE
/* Need at least 1 reg to code the early exception prologue */
PROLOG_FREEUP_REG r9, @ex_saved_reg1
@ -151,14 +151,6 @@
/* ARC700 doesn't provide auto-stack switching */
SWITCH_TO_KERNEL_STK
#ifdef CONFIG_ARC_CURR_IN_REG
/* Treat r25 as scratch reg (save on stack) and load with "current" */
PUSH r25
GET_CURR_TASK_ON_CPU r25
#else
sub sp, sp, 4
#endif
st.a r0, [sp, -8] /* orig_r0 needed for syscall (skip ECR slot) */
sub sp, sp, 4 /* skip pt_regs->sp, already saved above */
@ -178,7 +170,23 @@
PUSHAX erbta
lr r10, [ecr]
st r10, [sp, PT_event] /* EV_Trap expects r10 to have ECR */
st r10, [sp, PT_event]
#ifdef CONFIG_ARC_CURR_IN_REG
/* gp already saved on stack: now load with "current" */
GET_CURR_TASK_ON_CPU gp
#endif
; OUTPUT: r10 has ECR expected by EV_Trap
.endm
.macro EXCEPTION_PROLOGUE
EXCEPTION_PROLOGUE_KEEP_AE ; return ECR in r10
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN ; clobbers r9
.endm
/*--------------------------------------------------------------
@ -208,11 +216,8 @@
POP gp
RESTORE_R12_TO_R0
#ifdef CONFIG_ARC_CURR_IN_REG
ld r25, [sp, 12]
#endif
ld sp, [sp] /* restore original sp */
/* orig_r0, ECR, user_r25 skipped automatically */
/* orig_r0, ECR skipped automatically */
.endm
/* Dummy ECR values for Interrupts */
@ -229,13 +234,6 @@
SWITCH_TO_KERNEL_STK
#ifdef CONFIG_ARC_CURR_IN_REG
/* Treat r25 as scratch reg (save on stack) and load with "current" */
PUSH r25
GET_CURR_TASK_ON_CPU r25
#else
sub sp, sp, 4
#endif
PUSH 0x003\LVL\()abcd /* Dummy ECR */
sub sp, sp, 8 /* skip orig_r0 (not needed)
@ -255,6 +253,10 @@
PUSHAX lp_start
PUSHAX bta_l\LVL\()
#ifdef CONFIG_ARC_CURR_IN_REG
/* gp already saved on stack: now load with "current" */
GET_CURR_TASK_ON_CPU gp
#endif
.endm
/*--------------------------------------------------------------
@ -282,11 +284,7 @@
POP gp
RESTORE_R12_TO_R0
#ifdef CONFIG_ARC_CURR_IN_REG
ld r25, [sp, 12]
#endif
ld sp, [sp] /* restore original sp */
/* orig_r0, ECR, user_r25 skipped automatically */
ld sp, [sp] /* restore original sp; orig_r0, ECR skipped implicitly */
.endm
/* Get thread_info of "current" tsk */

View File

@ -13,6 +13,8 @@
#include <asm/processor.h> /* For VMALLOC_START */
#include <asm/mmu.h>
#ifdef __ASSEMBLY__
#ifdef CONFIG_ISA_ARCOMPACT
#include <asm/entry-compact.h> /* ISA specific bits */
#else
@ -89,7 +91,7 @@
* Helpers to save/restore callee-saved regs:
* used by several macros below
*-------------------------------------------------------------*/
.macro SAVE_R13_TO_R24
.macro SAVE_R13_TO_R25
PUSH r13
PUSH r14
PUSH r15
@ -102,9 +104,11 @@
PUSH r22
PUSH r23
PUSH r24
PUSH r25
.endm
.macro RESTORE_R24_TO_R13
.macro RESTORE_R25_TO_R13
POP r25
POP r24
POP r23
POP r22
@ -119,81 +123,31 @@
POP r13
.endm
/*--------------------------------------------------------------
* Collect User Mode callee regs as struct callee_regs - needed by
* fork/do_signal/unaligned-access-emulation.
* (By default only scratch regs are saved on entry to kernel)
*
* Special handling for r25 if used for caching Task Pointer.
* It would have been saved in task->thread.user_r25 already, but to keep
* the interface same it is copied into regular r25 placeholder in
* struct callee_regs.
*-------------------------------------------------------------*/
/*
* save user mode callee regs as struct callee_regs
* - needed by fork/do_signal/unaligned-access-emulation.
*/
.macro SAVE_CALLEE_SAVED_USER
mov r12, sp ; save SP as ref to pt_regs
SAVE_R13_TO_R24
#ifdef CONFIG_ARC_CURR_IN_REG
; Retrieve orig r25 and save it with rest of callee_regs
ld r12, [r12, PT_user_r25]
PUSH r12
#else
PUSH r25
#endif
SAVE_R13_TO_R25
.endm
/*--------------------------------------------------------------
* Save kernel Mode callee regs at the time of Contect Switch.
*
* Special handling for r25 if used for caching Task Pointer.
* Kernel simply skips saving it since it will be loaded with
* incoming task pointer anyways
*-------------------------------------------------------------*/
.macro SAVE_CALLEE_SAVED_KERNEL
SAVE_R13_TO_R24
#ifdef CONFIG_ARC_CURR_IN_REG
sub sp, sp, 4
#else
PUSH r25
#endif
.endm
/*--------------------------------------------------------------
* Opposite of SAVE_CALLEE_SAVED_KERNEL
*-------------------------------------------------------------*/
.macro RESTORE_CALLEE_SAVED_KERNEL
#ifdef CONFIG_ARC_CURR_IN_REG
add sp, sp, 4 /* skip usual r25 placeholder */
#else
POP r25
#endif
RESTORE_R24_TO_R13
.endm
/*--------------------------------------------------------------
* Opposite of SAVE_CALLEE_SAVED_USER
*
* ptrace tracer or unaligned-access fixup might have changed a user mode
* callee reg which is saved back to usual r25 storage location
*-------------------------------------------------------------*/
/*
* restore user mode callee regs as struct callee_regs
* - could have been changed by ptrace tracer or unaligned-access fixup
*/
.macro RESTORE_CALLEE_SAVED_USER
RESTORE_R25_TO_R13
.endm
#ifdef CONFIG_ARC_CURR_IN_REG
POP r12
#else
POP r25
#endif
RESTORE_R24_TO_R13
/*
* save/restore kernel mode callee regs at the time of context switch
*/
.macro SAVE_CALLEE_SAVED_KERNEL
SAVE_R13_TO_R25
.endm
; SP is back to start of pt_regs
#ifdef CONFIG_ARC_CURR_IN_REG
st r12, [sp, PT_user_r25]
#endif
.macro RESTORE_CALLEE_SAVED_KERNEL
RESTORE_R25_TO_R13
.endm
/*--------------------------------------------------------------
@ -229,10 +183,10 @@
#ifdef CONFIG_SMP
/*-------------------------------------------------
/*
* Retrieve the current running task on this CPU
* 1. Determine curr CPU id.
* 2. Use it to index into _current_task[ ]
* - loads it from backing _current_task[] (and can't use the
* caching reg for current task
*/
.macro GET_CURR_TASK_ON_CPU reg
GET_CPU_ID \reg
@ -254,7 +208,7 @@
add2 \tmp, @_current_task, \tmp
st \tsk, [\tmp]
#ifdef CONFIG_ARC_CURR_IN_REG
mov r25, \tsk
mov gp, \tsk
#endif
.endm
@ -269,21 +223,20 @@
.macro SET_CURR_TASK_ON_CPU tsk, tmp
st \tsk, [@_current_task]
#ifdef CONFIG_ARC_CURR_IN_REG
mov r25, \tsk
mov gp, \tsk
#endif
.endm
#endif /* SMP / UNI */
/* ------------------------------------------------------------------
/*
* Get the ptr to some field of Current Task at @off in task struct
* -Uses r25 for Current task ptr if that is enabled
* - Uses current task cached in reg if enabled
*/
#ifdef CONFIG_ARC_CURR_IN_REG
.macro GET_CURR_TASK_FIELD_PTR off, reg
add \reg, r25, \off
add \reg, gp, \off
.endm
#else
@ -295,4 +248,23 @@
#endif /* CONFIG_ARC_CURR_IN_REG */
#else /* !__ASSEMBLY__ */
extern void do_signal(struct pt_regs *);
extern void do_notify_resume(struct pt_regs *);
extern int do_privilege_fault(unsigned long, struct pt_regs *);
extern int do_extension_fault(unsigned long, struct pt_regs *);
extern int insterror_is_error(unsigned long, struct pt_regs *);
extern int do_memory_error(unsigned long, struct pt_regs *);
extern int trap_is_brkpt(unsigned long, struct pt_regs *);
extern int do_misaligned_error(unsigned long, struct pt_regs *);
extern int do_trap5_error(unsigned long, struct pt_regs *);
extern int do_misaligned_access(unsigned long, struct pt_regs *, struct callee_regs *);
extern void do_machine_check_fault(unsigned long, struct pt_regs *);
extern void do_non_swi_trap(unsigned long, struct pt_regs *);
extern void do_insterror_or_kprobe(unsigned long, struct pt_regs *);
extern void do_page_fault(unsigned long, struct pt_regs *);
#endif
#endif /* __ASM_ARC_ENTRY_H */

View File

@ -25,5 +25,6 @@
#include <asm-generic/irq.h>
extern void arc_init_IRQ(void);
extern void arch_do_IRQ(unsigned int, struct pt_regs *);
#endif

View File

@ -14,6 +14,8 @@ typedef struct {
unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */
} mm_context_t;
extern void do_tlb_overlap_fault(unsigned long, unsigned long, struct pt_regs *);
#endif
#include <asm/mmu-arcv2.h>

View File

@ -22,7 +22,6 @@
* struct thread_info
*/
struct thread_struct {
unsigned long ksp; /* kernel mode stack pointer */
unsigned long callee_reg; /* pointer to callee regs */
unsigned long fault_address; /* dbls as brkpt holder as well */
#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
@ -33,9 +32,7 @@ struct thread_struct {
#endif
};
#define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) init_stack, \
}
#define INIT_THREAD { }
/* Forward declaration, a strange C thing */
struct task_struct;
@ -56,7 +53,7 @@ struct task_struct;
* Where about of Task's sp, fp, blink when it was last seen in kernel mode.
* Look in process.c for details of kernel stack layout
*/
#define TSK_K_ESP(tsk) (tsk->thread.ksp)
#define TSK_K_ESP(tsk) (task_thread_info(tsk)->ksp)
#define TSK_K_REG(tsk, off) (*((unsigned long *)(TSK_K_ESP(tsk) + \
sizeof(struct callee_regs) + off)))

View File

@ -12,6 +12,17 @@
#ifndef __ASSEMBLY__
typedef union {
struct {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned long state:8, vec:8, cause:8, param:8;
#else
unsigned long param:8, cause:8, vec:8, state:8;
#endif
};
unsigned long full;
} ecr_reg;
/* THE pt_regs: Defines how regs are saved during entry into kernel */
#ifdef CONFIG_ISA_ARCOMPACT
@ -40,23 +51,10 @@ struct pt_regs {
* Last word used by Linux for extra state mgmt (syscall-restart)
* For interrupts, use artificial ECR values to note current prio-level
*/
union {
struct {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned long state:8, ecr_vec:8,
ecr_cause:8, ecr_param:8;
#else
unsigned long ecr_param:8, ecr_cause:8,
ecr_vec:8, state:8;
#endif
};
unsigned long event;
};
unsigned long user_r25;
ecr_reg ecr;
};
#define MAX_REG_OFFSET offsetof(struct pt_regs, user_r25)
#define MAX_REG_OFFSET offsetof(struct pt_regs, ecr)
#else
@ -64,28 +62,14 @@ struct pt_regs {
unsigned long orig_r0;
union {
struct {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned long state:8, ecr_vec:8,
ecr_cause:8, ecr_param:8;
#else
unsigned long ecr_param:8, ecr_cause:8,
ecr_vec:8, state:8;
#endif
};
unsigned long event;
};
ecr_reg ecr; /* Exception Cause Reg */
unsigned long bta; /* bta_l1, bta_l2, erbta */
unsigned long bta; /* erbta */
unsigned long user_r25;
unsigned long r26; /* gp */
unsigned long fp;
unsigned long sp; /* user/kernel sp depending on where we came from */
unsigned long r12, r30;
unsigned long r30;
unsigned long r12;
unsigned long r26; /* gp */
#ifdef CONFIG_ARC_HAS_ACCL_REGS
unsigned long r58, r59; /* ACCL/ACCH used by FPU / DSP MPY */
@ -94,6 +78,8 @@ struct pt_regs {
unsigned long DSP_CTRL;
#endif
unsigned long sp; /* user/kernel sp depending on entry */
/*------- Below list auto saved by h/w -----------*/
unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
@ -134,13 +120,13 @@ struct callee_regs {
/* return 1 if PC in delay slot */
#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
#define in_syscall(regs) ((regs->ecr_vec == ECR_V_TRAP) && !regs->ecr_param)
#define in_brkpt_trap(regs) ((regs->ecr_vec == ECR_V_TRAP) && regs->ecr_param)
#define in_syscall(regs) ((regs->ecr.vec == ECR_V_TRAP) && !regs->ecr.param)
#define in_brkpt_trap(regs) ((regs->ecr.vec == ECR_V_TRAP) && regs->ecr.param)
#define STATE_SCALL_RESTARTED 0x01
#define syscall_wont_restart(reg) (reg->state |= STATE_SCALL_RESTARTED)
#define syscall_restartable(reg) !(reg->state & STATE_SCALL_RESTARTED)
#define syscall_wont_restart(regs) (regs->ecr.state |= STATE_SCALL_RESTARTED)
#define syscall_restartable(regs) !(regs->ecr.state & STATE_SCALL_RESTARTED)
#define current_pt_regs() \
({ \
@ -181,6 +167,9 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
return *(unsigned long *)((unsigned long)regs + offset);
}
extern int syscall_trace_entry(struct pt_regs *);
extern void syscall_trace_exit(struct pt_regs *);
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_PTRACE_H */

View File

@ -35,11 +35,11 @@ long __init arc_get_mem_sz(void);
#define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2))
extern void arc_mmu_init(void);
extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_mmu_bcr(void);
extern int arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void);
extern int arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void __init handle_uboot_args(void);
#endif /* __ASMARC_SETUP_H */

View File

@ -29,6 +29,8 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void __init smp_init_cpus(void);
extern void first_lines_of_secondary(void);
extern const char *arc_platform_smp_cpuinfo(void);
extern void arc_platform_smp_wait_to_boot(int);
extern void start_kernel_secondary(void);
/*
* API expected BY platform smp code (FROM arch smp code)

View File

@ -37,16 +37,16 @@
*/
struct thread_info {
unsigned long flags; /* low level flags */
unsigned long ksp; /* kernel mode stack top in __switch_to */
int preempt_count; /* 0 => preemptable, <0 => BUG */
struct task_struct *task; /* main task structure */
__u32 cpu; /* current CPU */
int cpu; /* current CPU */
unsigned long thr_ptr; /* TLS ptr */
struct task_struct *task; /* main task structure */
};
/*
* macros/functions for gaining access to the thread information structure
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
* initilaize thread_info for any @tsk
* - this is not related to init_task per se
*/
#define INIT_THREAD_INFO(tsk) \
{ \

View File

@ -146,8 +146,9 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
if (n == 0)
return 0;
/* unaligned */
if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
/* fallback for unaligned access when hardware doesn't support */
if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) &&
(((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) {
unsigned char tmp;
@ -373,8 +374,9 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
if (n == 0)
return 0;
/* unaligned */
if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
/* fallback for unaligned access when hardware doesn't support */
if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) &&
(((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) {
unsigned char tmp;
@ -584,7 +586,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
return res;
}
static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
static inline unsigned long __clear_user(void __user *to, unsigned long n)
{
long res = n;
unsigned char *d_char = to;
@ -626,17 +628,10 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
return res;
}
#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
#define __clear_user(d, n) __arc_clear_user(d, n)
#else
extern unsigned long arc_clear_user_noinline(void __user *to,
unsigned long n);
#define __clear_user(d, n) arc_clear_user_noinline(d, n)
#endif
#define __clear_user __clear_user
#include <asm-generic/uaccess.h>

View File

@ -5,6 +5,8 @@
obj-y := head.o arcksyms.o setup.o irq.o reset.o ptrace.o process.o devtree.o
obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o
obj-y += ctx_sw_asm.o
obj-$(CONFIG_ISA_ARCOMPACT) += entry-compact.o intc-compact.o
obj-$(CONFIG_ISA_ARCV2) += entry-arcv2.o intc-arcv2.o
@ -24,11 +26,4 @@ ifdef CONFIG_ISA_ARCOMPACT
CFLAGS_fpu.o += -mdpfp
endif
ifdef CONFIG_ARC_DW2_UNWIND
CFLAGS_ctx_sw.o += -fno-omit-frame-pointer
obj-y += ctx_sw.o
else
obj-y += ctx_sw_asm.o
endif
extra-y := vmlinux.lds

View File

@ -20,13 +20,13 @@ int main(void)
BLANK();
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg));
DEFINE(THREAD_FAULT_ADDR,
offsetof(struct thread_struct, fault_address));
BLANK();
DEFINE(THREAD_INFO_KSP, offsetof(struct thread_info, ksp));
DEFINE(THREAD_INFO_FLAGS, offsetof(struct thread_info, flags));
DEFINE(THREAD_INFO_PREEMPT_COUNT,
offsetof(struct thread_info, preempt_count));
@ -46,7 +46,8 @@ int main(void)
BLANK();
DEFINE(PT_status32, offsetof(struct pt_regs, status32));
DEFINE(PT_event, offsetof(struct pt_regs, event));
DEFINE(PT_event, offsetof(struct pt_regs, ecr));
DEFINE(PT_bta, offsetof(struct pt_regs, bta));
DEFINE(PT_sp, offsetof(struct pt_regs, sp));
DEFINE(PT_r0, offsetof(struct pt_regs, r0));
DEFINE(PT_r1, offsetof(struct pt_regs, r1));
@ -61,13 +62,9 @@ int main(void)
DEFINE(PT_r26, offsetof(struct pt_regs, r26));
DEFINE(PT_ret, offsetof(struct pt_regs, ret));
DEFINE(PT_blink, offsetof(struct pt_regs, blink));
OFFSET(PT_fp, pt_regs, fp);
DEFINE(PT_lpe, offsetof(struct pt_regs, lp_end));
DEFINE(PT_lpc, offsetof(struct pt_regs, lp_count));
DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25));
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
#ifdef CONFIG_ISA_ARCV2
OFFSET(PT_r12, pt_regs, r12);
OFFSET(PT_r30, pt_regs, r30);
@ -80,5 +77,8 @@ int main(void)
OFFSET(PT_DSP_CTRL, pt_regs, DSP_CTRL);
#endif
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
return 0;
}

View File

@ -1,112 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* Vineetg: Aug 2009
* -"C" version of lowest level context switch asm macro called by schedular
* gcc doesn't generate the dward CFI info for hand written asm, hence can't
* backtrace out of it (e.g. tasks sleeping in kernel).
* So we cheat a bit by writing almost similar code in inline-asm.
* -This is a hacky way of doing things, but there is no other simple way.
* I don't want/intend to extend unwinding code to understand raw asm
*/
#include <asm/asm-offsets.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
struct task_struct *__sched
__switch_to(struct task_struct *prev_task, struct task_struct *next_task)
{
unsigned int tmp;
unsigned int prev = (unsigned int)prev_task;
unsigned int next = (unsigned int)next_task;
__asm__ __volatile__(
/* FP/BLINK save generated by gcc (standard function prologue */
"st.a r13, [sp, -4] \n\t"
"st.a r14, [sp, -4] \n\t"
"st.a r15, [sp, -4] \n\t"
"st.a r16, [sp, -4] \n\t"
"st.a r17, [sp, -4] \n\t"
"st.a r18, [sp, -4] \n\t"
"st.a r19, [sp, -4] \n\t"
"st.a r20, [sp, -4] \n\t"
"st.a r21, [sp, -4] \n\t"
"st.a r22, [sp, -4] \n\t"
"st.a r23, [sp, -4] \n\t"
"st.a r24, [sp, -4] \n\t"
#ifndef CONFIG_ARC_CURR_IN_REG
"st.a r25, [sp, -4] \n\t"
#else
"sub sp, sp, 4 \n\t" /* usual r25 placeholder */
#endif
/* set ksp of outgoing task in tsk->thread.ksp */
#if KSP_WORD_OFF <= 255
"st.as sp, [%3, %1] \n\t"
#else
/*
* Workaround for NR_CPUS=4k
* %1 is bigger than 255 (S9 offset for st.as)
*/
"add2 r24, %3, %1 \n\t"
"st sp, [r24] \n\t"
#endif
/*
* setup _current_task with incoming tsk.
* optionally, set r25 to that as well
* For SMP extra work to get to &_current_task[cpu]
* (open coded SET_CURR_TASK_ON_CPU)
*/
#ifndef CONFIG_SMP
"st %2, [@_current_task] \n\t"
#else
"lr r24, [identity] \n\t"
"lsr r24, r24, 8 \n\t"
"bmsk r24, r24, 7 \n\t"
"add2 r24, @_current_task, r24 \n\t"
"st %2, [r24] \n\t"
#endif
#ifdef CONFIG_ARC_CURR_IN_REG
"mov r25, %2 \n\t"
#endif
/* get ksp of incoming task from tsk->thread.ksp */
"ld.as sp, [%2, %1] \n\t"
/* start loading it's CALLEE reg file */
#ifndef CONFIG_ARC_CURR_IN_REG
"ld.ab r25, [sp, 4] \n\t"
#else
"add sp, sp, 4 \n\t"
#endif
"ld.ab r24, [sp, 4] \n\t"
"ld.ab r23, [sp, 4] \n\t"
"ld.ab r22, [sp, 4] \n\t"
"ld.ab r21, [sp, 4] \n\t"
"ld.ab r20, [sp, 4] \n\t"
"ld.ab r19, [sp, 4] \n\t"
"ld.ab r18, [sp, 4] \n\t"
"ld.ab r17, [sp, 4] \n\t"
"ld.ab r16, [sp, 4] \n\t"
"ld.ab r15, [sp, 4] \n\t"
"ld.ab r14, [sp, 4] \n\t"
"ld.ab r13, [sp, 4] \n\t"
/* last (ret value) = prev : although for ARC it mov r0, r0 */
"mov %0, %3 \n\t"
/* FP/BLINK restore generated by gcc (standard func epilogue */
: "=r"(tmp)
: "n"(KSP_WORD_OFF), "r"(next), "r"(prev)
: "blink"
);
return (struct task_struct *)tmp;
}

View File

@ -11,50 +11,54 @@
#include <asm/entry.h> /* For the SAVE_* macros */
#include <asm/asm-offsets.h>
#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
;################### Low Level Context Switch ##########################
; IN
; - r0: prev task (also current)
; - r1: next task
; OUT
; - r0: prev task (so r0 not touched)
.section .sched.text,"ax",@progbits
.align 4
.global __switch_to
.type __switch_to, @function
__switch_to:
CFI_STARTPROC
ENTRY_CFI(__switch_to)
/* Save regs on kernel mode stack of task */
st.a blink, [sp, -4]
st.a fp, [sp, -4]
/* save kernel stack frame regs of @prev task */
push blink
CFI_DEF_CFA_OFFSET 4
CFI_OFFSET r31, -4
push fp
CFI_DEF_CFA_OFFSET 8
CFI_OFFSET r27, -8
mov fp, sp
CFI_DEF_CFA_REGISTER r27
/* kernel mode callee regs of @prev */
SAVE_CALLEE_SAVED_KERNEL
/* Save the now KSP in task->thread.ksp */
#if KSP_WORD_OFF <= 255
st.as sp, [r0, KSP_WORD_OFF]
#else
/* Workaround for NR_CPUS=4k as ST.as can only take s9 offset */
add2 r24, r0, KSP_WORD_OFF
st sp, [r24]
#endif
/*
* Return last task in r0 (return reg)
* On ARC, Return reg = First Arg reg = r0.
* Since we already have last task in r0,
* don't need to do anything special to return it
*/
/*
* switch to new task, contained in r1
* Temp reg r3 is required to get the ptr to store val
* save final SP to @prev->thread_info.ksp
* @prev is "current" so thread_info derived from SP
*/
SET_CURR_TASK_ON_CPU r1, r3
GET_CURR_THR_INFO_FROM_SP r10
st sp, [r10, THREAD_INFO_KSP]
/* reload SP with kernel mode stack pointer in task->thread.ksp */
ld.as sp, [r1, (TASK_THREAD + THREAD_KSP)/4]
/* update @next in _current_task[] and GP register caching it */
SET_CURR_TASK_ON_CPU r1, r10
/* restore the registers */
/* load SP from @next->thread_info.ksp */
ld r10, [r1, TASK_THREAD_INFO]
ld sp, [r10, THREAD_INFO_KSP]
/* restore callee regs, stack frame regs of @next */
RESTORE_CALLEE_SAVED_KERNEL
ld.ab fp, [sp, 4]
ld.ab blink, [sp, 4]
j [blink]
pop fp
CFI_RESTORE r27
CFI_DEF_CFA r28, 4
pop blink
CFI_RESTORE r31
CFI_DEF_CFA_OFFSET 0
j [blink]
END_CFI(__switch_to)

View File

@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/mach_desc.h>
#include <asm/serial.h>
#ifdef CONFIG_SERIAL_EARLYCON

View File

@ -125,11 +125,6 @@ ENTRY(mem_service)
EXCEPTION_PROLOGUE
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN
bl do_memory_error
b ret_from_exception
END(mem_service)
@ -138,11 +133,6 @@ ENTRY(EV_Misaligned)
EXCEPTION_PROLOGUE
lr r0, [efa] ; Faulting Data address
mov r1, sp
FAKE_RET_FROM_EXCPN
SAVE_CALLEE_SAVED_USER
mov r2, sp ; callee_regs
@ -163,11 +153,6 @@ ENTRY(EV_TLBProtV)
EXCEPTION_PROLOGUE
lr r0, [efa] ; Faulting Data address
mov r1, sp ; pt_regs
FAKE_RET_FROM_EXCPN
mov blink, ret_from_exception
b do_page_fault

View File

@ -254,18 +254,7 @@ END(handle_interrupt_level1)
ENTRY(EV_TLBProtV)
EXCEPTION_PROLOGUE
mov r2, r10 ; ECR set into r10 already
lr r0, [efa] ; Faulting Data address (not part of pt_regs saved above)
; Exception auto-disables further Intr/exceptions.
; Re-enable them by pretending to return from exception
; (so rest of handler executes in pure K mode)
FAKE_RET_FROM_EXCPN
mov r1, sp ; Handle to pt_regs
EXCEPTION_PROLOGUE ; ECR returned in r10
;------ (5) Type of Protection Violation? ----------
;
@ -273,8 +262,7 @@ ENTRY(EV_TLBProtV)
; -Access Violation : 00_23_(00|01|02|03)_00
; x r w r+w
; -Unaligned Access : 00_23_04_00
;
bbit1 r2, ECR_C_BIT_PROTV_MISALIG_DATA, 4f
bbit1 r10, ECR_C_BIT_PROTV_MISALIG_DATA, 4f
;========= (6a) Access Violation Processing ========
bl do_page_fault
@ -303,9 +291,6 @@ END(EV_TLBProtV)
ENTRY(call_do_page_fault)
EXCEPTION_PROLOGUE
lr r0, [efa] ; Faulting Data address
mov r1, sp
FAKE_RET_FROM_EXCPN
mov blink, ret_from_exception
b do_page_fault

View File

@ -80,11 +80,6 @@ ENTRY(instr_service)
EXCEPTION_PROLOGUE
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN
bl do_insterror_or_kprobe
b ret_from_exception
END(instr_service)
@ -95,16 +90,15 @@ END(instr_service)
ENTRY(EV_MachineCheck)
EXCEPTION_PROLOGUE
EXCEPTION_PROLOGUE_KEEP_AE ; ECR returned in r10
lr r2, [ecr]
lr r0, [efa]
mov r1, sp
; MC excpetions disable MMU
ARC_MMU_REENABLE r3
lsr r3, r2, 8
lsr r3, r10, 8
bmsk r3, r3, 7
brne r3, ECR_C_MCHK_DUP_TLB, 1f
@ -129,11 +123,6 @@ ENTRY(EV_PrivilegeV)
EXCEPTION_PROLOGUE
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN
bl do_privilege_fault
b ret_from_exception
END(EV_PrivilegeV)
@ -145,11 +134,6 @@ ENTRY(EV_Extension)
EXCEPTION_PROLOGUE
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN
bl do_extension_fault
b ret_from_exception
END(EV_Extension)
@ -160,20 +144,19 @@ END(EV_Extension)
; syscall Tracing
; ---------------------------------------------
tracesys:
; save EFA in case tracer wants the PC of traced task
; using ERET won't work since next-PC has already committed
; safekeep EFA (r12) if syscall tracer wanted PC
; for traps, ERET is pre-commit so points to next-PC
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
; PRE Sys Call Ptrace hook
mov r0, sp ; pt_regs needed
bl @syscall_trace_entry
; PRE syscall trace hook
mov r0, sp ; pt_regs
bl @syscall_trace_enter
; Tracing code now returns the syscall num (orig or modif)
mov r8, r0
; Do the Sys Call as we normally would.
; Validate the Sys Call number
cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi tracesys_exit
@ -190,37 +173,36 @@ tracesys:
ld r6, [sp, PT_r6]
ld r7, [sp, PT_r7]
ld.as r9, [sys_call_table, r8]
jl [r9] ; Entry into Sys Call Handler
jl [r9]
tracesys_exit:
st r0, [sp, PT_r0] ; sys call return value in pt_regs
st r0, [sp, PT_r0]
;POST Sys Call Ptrace Hook
; POST syscall trace hook
mov r0, sp ; pt_regs needed
bl @syscall_trace_exit
b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
; we'd done before calling post hook above
; don't call ret_from_system_call as it saves r0, already done above
b ret_from_exception
; ---------------------------------------------
; Breakpoint TRAP
; ---------------------------------------------
trap_with_param:
mov r0, r12 ; EFA in case ptracer/gdb wants stop_pc
mov r1, sp
mov r1, sp ; pt_regs
; Save callee regs in case gdb wants to have a look
; SP will grow up by size of CALLEE Reg-File
; NOTE: clobbers r12
; save callee regs in case tracer/gdb wants to peek
SAVE_CALLEE_SAVED_USER
; save location of saved Callee Regs @ thread_struct->pc
; safekeep ref to callee regs
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
st sp, [r10, THREAD_CALLEE_REG]
; Call the trap handler
; call the non syscall trap handler
bl do_non_swi_trap
; unwind stack to discard Callee saved Regs
; unwind stack to discard callee regs
DISCARD_CALLEE_SAVED_USER
b ret_from_exception
@ -232,37 +214,33 @@ trap_with_param:
ENTRY(EV_Trap)
EXCEPTION_PROLOGUE
EXCEPTION_PROLOGUE_KEEP_AE
lr r12, [efa]
FAKE_RET_FROM_EXCPN
;============ TRAP 1 :breakpoints
; Check ECR for trap with arg (PROLOGUE ensures r10 has ECR)
;============ TRAP N : breakpoints, kprobes etc
bmsk.f 0, r10, 7
bnz trap_with_param
;============ TRAP (no param): syscall top level
;============ TRAP 0 (no param): syscall
; If syscall tracing ongoing, invoke pre-post-hooks
; syscall tracing ongoing, invoke pre-post-hooks around syscall
GET_CURR_THR_INFO_FLAGS r10
and.f 0, r10, _TIF_SYSCALL_WORK
bnz tracesys ; this never comes back
;============ Normal syscall case
; syscall num shd not exceed the total system calls avail
cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi .Lret_from_system_call
; Offset into the syscall_table and call handler
ld.as r9,[sys_call_table, r8]
jl [r9] ; Entry into Sys Call Handler
jl [r9]
.Lret_from_system_call:
st r0, [sp, PT_r0] ; sys call return value in pt_regs
; fall through to ret_from_exception
@ -318,7 +296,7 @@ resume_user_mode_begin:
; tracer might call PEEKUSR(CALLEE reg)
;
; NOTE: SP will grow up by size of CALLEE Reg-File
SAVE_CALLEE_SAVED_USER ; clobbers r12
SAVE_CALLEE_SAVED_USER
; save location of saved Callee Regs @ thread_struct->callee
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10

View File

@ -108,7 +108,7 @@ static void arcv2_irq_unmask(struct irq_data *data)
write_aux_reg(AUX_IRQ_ENABLE, 1);
}
void arcv2_irq_enable(struct irq_data *data)
static void arcv2_irq_enable(struct irq_data *data)
{
/* set default priority */
write_aux_reg(AUX_IRQ_SELECT, data->hwirq);

View File

@ -175,7 +175,7 @@ void kgdb_trap(struct pt_regs *regs)
* with trap_s 4 (compiled) breakpoints, continuation needs to
* start after the breakpoint.
*/
if (regs->ecr_param == 3)
if (regs->ecr.param == 3)
instruction_pointer(regs) -= BREAK_INSTR_SIZE;
kgdb_handle_exception(1, SIGTRAP, 0, regs);

View File

@ -165,8 +165,6 @@ static void mcip_probe_n_setup(void)
IS_AVAIL1(mp.idu, "IDU "),
IS_AVAIL1(mp.dbg, "DEBUG "),
IS_AVAIL1(mp.gfrc, "GFRC"));
cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
}
struct plat_smp_ops plat_smp_ops = {

View File

@ -141,7 +141,7 @@ asmlinkage void ret_from_fork(void);
* | unused |
* | |
* ------------------
* | r25 | <==== top of Stack (thread.ksp)
* | r25 | <==== top of Stack (thread_info.ksp)
* ~ ~
* | --to-- | (CALLEE Regs of kernel mode)
* | r13 |
@ -162,7 +162,6 @@ asmlinkage void ret_from_fork(void);
* | SP |
* | orig_r0 |
* | event/ECR |
* | user_r25 |
* ------------------ <===== END of PAGE
*/
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
@ -182,14 +181,14 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
c_callee = ((struct callee_regs *)childksp) - 1;
/*
* __switch_to() uses thread.ksp to start unwinding stack
* __switch_to() uses thread_info.ksp to start unwinding stack
* For kernel threads we don't need to create callee regs, the
* stack layout nevertheless needs to remain the same.
* Also, since __switch_to anyways unwinds callee regs, we use
* this to populate kernel thread entry-pt/args into callee regs,
* so that ret_from_kernel_thread() becomes simpler.
*/
p->thread.ksp = (unsigned long)c_callee; /* THREAD_KSP */
task_thread_info(p)->ksp = (unsigned long)c_callee; /* THREAD_INFO_KSP */
/* __switch_to expects FP(0), BLINK(return addr) at top */
childksp[0] = 0; /* fp */
@ -243,16 +242,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
*/
c_callee->r25 = task_thread_info(p)->thr_ptr;
#ifdef CONFIG_ARC_CURR_IN_REG
/*
* setup usermode thread pointer #2:
* however for this special use of r25 in kernel, __switch_to() sets
* r25 for kernel needs and only in the final return path is usermode
* r25 setup, from pt_regs->user_r25. So set that up as well
*/
c_regs->user_r25 = c_callee->r25;
#endif
return 0;
}

View File

@ -46,8 +46,7 @@ static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(r0),
REG_OFFSET_NAME(sp),
REG_OFFSET_NAME(orig_r0),
REG_OFFSET_NAME(event),
REG_OFFSET_NAME(user_r25),
REG_OFFSET_NAME(ecr),
REG_OFFSET_END,
};
@ -55,9 +54,8 @@ static const struct pt_regs_offset regoffset_table[] = {
static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(orig_r0),
REG_OFFSET_NAME(event),
REG_OFFSET_NAME(ecr),
REG_OFFSET_NAME(bta),
REG_OFFSET_NAME(user_r25),
REG_OFFSET_NAME(r26),
REG_OFFSET_NAME(fp),
REG_OFFSET_NAME(sp),
@ -341,7 +339,7 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
asmlinkage int syscall_trace_entry(struct pt_regs *regs)
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
if (ptrace_report_syscall_entry(regs))

View File

@ -29,6 +29,7 @@
#include <asm/mach_desc.h>
#include <asm/smp.h>
#include <asm/dsp-impl.h>
#include <soc/arc/mcip.h>
#define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
@ -43,19 +44,22 @@ const struct machine_desc *machine_desc;
struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
struct cpuinfo_arc {
int arcver;
unsigned int t0:1, t1:1;
struct {
unsigned long base;
unsigned int sz;
} iccm, dccm;
};
static const struct id_to_str arc_legacy_rel[] = {
#ifdef CONFIG_ISA_ARCV2
static const struct id_to_str arc_hs_rel[] = {
/* ID.ARCVER, Release */
#ifdef CONFIG_ISA_ARCOMPACT
{ 0x34, "R4.10"},
{ 0x35, "R4.11"},
#else
{ 0x51, "R2.0" },
{ 0x52, "R2.1" },
{ 0x53, "R3.0" },
#endif
{ 0x00, NULL }
};
static const struct id_to_str arc_hs_ver54_rel[] = {
@ -66,324 +70,297 @@ static const struct id_to_str arc_hs_ver54_rel[] = {
{ 3, "R4.00a"},
{ 0xFF, NULL }
};
#endif
static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
static int
arcompact_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
{
if (is_isa_arcompact()) {
struct bcr_iccm_arcompact iccm;
struct bcr_dccm_arcompact dccm;
int n = 0;
#ifdef CONFIG_ISA_ARCOMPACT
char *cpu_nm, *isa_nm = "ARCompact";
struct bcr_fp_arcompact fpu_sp, fpu_dp;
int atomic = 0, be, present;
int bpu_full, bpu_cache, bpu_pred;
struct bcr_bpu_arcompact bpu;
struct bcr_iccm_arcompact iccm;
struct bcr_dccm_arcompact dccm;
struct bcr_generic isa;
READ_BCR(ARC_REG_ICCM_BUILD, iccm);
if (iccm.ver) {
cpu->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */
cpu->iccm.base_addr = iccm.base << 16;
}
READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
READ_BCR(ARC_REG_DCCM_BUILD, dccm);
if (dccm.ver) {
unsigned long base;
cpu->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */
base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD);
cpu->dccm.base_addr = base & ~0xF;
}
} else {
struct bcr_iccm_arcv2 iccm;
struct bcr_dccm_arcv2 dccm;
unsigned long region;
READ_BCR(ARC_REG_ICCM_BUILD, iccm);
if (iccm.ver) {
cpu->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */
if (iccm.sz00 == 0xF && iccm.sz01 > 0)
cpu->iccm.sz <<= iccm.sz01;
region = read_aux_reg(ARC_REG_AUX_ICCM);
cpu->iccm.base_addr = region & 0xF0000000;
}
READ_BCR(ARC_REG_DCCM_BUILD, dccm);
if (dccm.ver) {
cpu->dccm.sz = 256 << dccm.sz0;
if (dccm.sz0 == 0xF && dccm.sz1 > 0)
cpu->dccm.sz <<= dccm.sz1;
region = read_aux_reg(ARC_REG_AUX_DCCM);
cpu->dccm.base_addr = region & 0xF0000000;
}
if (!isa.ver) /* ISA BCR absent, use Kconfig info */
atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
else {
/* ARC700_BUILD only has 2 bits of isa info */
atomic = isa.info & 1;
}
be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
if (info->arcver < 0x34)
cpu_nm = "ARC750";
else
cpu_nm = "ARC770";
n += scnprintf(buf + n, len - n, "processor [%d]\t: %s (%s ISA) %s%s%s\n",
c, cpu_nm, isa_nm,
IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL1(be, "[Big-Endian]"));
READ_BCR(ARC_REG_FP_BCR, fpu_sp);
READ_BCR(ARC_REG_DPFP_BCR, fpu_dp);
if (fpu_sp.ver | fpu_dp.ver)
n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
IS_AVAIL1(fpu_sp.ver, "SP "),
IS_AVAIL1(fpu_dp.ver, "DP "));
READ_BCR(ARC_REG_BPU_BCR, bpu);
bpu_full = bpu.fam ? 1 : 0;
bpu_cache = 256 << (bpu.ent - 1);
bpu_pred = 256 << (bpu.ent - 1);
n += scnprintf(buf + n, len - n,
"BPU\t\t: %s%s match, cache:%d, Predict Table:%d\n",
IS_AVAIL1(bpu_full, "full"),
IS_AVAIL1(!bpu_full, "partial"),
bpu_cache, bpu_pred);
READ_BCR(ARC_REG_ICCM_BUILD, iccm);
if (iccm.ver) {
info->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */
info->iccm.base = iccm.base << 16;
}
READ_BCR(ARC_REG_DCCM_BUILD, dccm);
if (dccm.ver) {
unsigned long base;
info->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */
base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD);
info->dccm.base = base & ~0xF;
}
/* ARCompact ISA specific sanity checks */
present = fpu_dp.ver; /* SP has no arch visible regs */
CHK_OPT_STRICT(CONFIG_ARC_FPU_SAVE_RESTORE, present);
#endif
return n;
}
static void decode_arc_core(struct cpuinfo_arc *cpu)
static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
{
struct bcr_uarch_build_arcv2 uarch;
const struct id_to_str *tbl;
if (cpu->core.family < 0x54) { /* includes arc700 */
for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) {
if (cpu->core.family == tbl->id) {
cpu->release = tbl->str;
break;
}
}
if (is_isa_arcompact())
cpu->name = "ARC700";
else if (tbl->str)
cpu->name = "HS38";
else
cpu->name = cpu->release = "Unknown";
return;
}
int n = 0;
#ifdef CONFIG_ISA_ARCV2
const char *release, *cpu_nm, *isa_nm = "ARCv2";
int dual_issue = 0, dual_enb = 0, mpy_opt, present;
int bpu_full, bpu_cache, bpu_pred, bpu_ret_stk;
char mpy_nm[16], lpb_nm[32];
struct bcr_isa_arcv2 isa;
struct bcr_mpy mpy;
struct bcr_fp_arcv2 fpu;
struct bcr_bpu_arcv2 bpu;
struct bcr_lpb lpb;
struct bcr_iccm_arcv2 iccm;
struct bcr_dccm_arcv2 dccm;
struct bcr_erp erp;
/*
* Initial HS cores bumped AUX IDENTITY.ARCVER for each release until
* ARCVER 0x54 which introduced AUX MICRO_ARCH_BUILD and subsequent
* releases only update it.
*/
READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
if (uarch.prod == 4) {
cpu->name = "HS48";
cpu->extn.dual = 1;
cpu_nm = "HS38";
if (info->arcver > 0x50 && info->arcver <= 0x53) {
release = arc_hs_rel[info->arcver - 0x51].str;
} else {
cpu->name = "HS38";
}
const struct id_to_str *tbl;
struct bcr_uarch_build uarch;
for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) {
if (uarch.maj == tbl->id) {
cpu->release = tbl->str;
break;
READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) {
if (uarch.maj == tbl->id) {
release = tbl->str;
break;
}
}
}
}
static void read_arc_build_cfg_regs(void)
{
struct bcr_timer timer;
struct bcr_generic bcr;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
struct bcr_isa_arcv2 isa;
struct bcr_actionpoint ap;
FIX_PTR(cpu);
READ_BCR(AUX_IDENTITY, cpu->core);
decode_arc_core(cpu);
READ_BCR(ARC_REG_TIMERS_BCR, timer);
cpu->extn.timer0 = timer.t0;
cpu->extn.timer1 = timer.t1;
cpu->extn.rtc = timer.rtc;
cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
/* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
read_decode_ccm_bcr(cpu);
read_decode_mmu_bcr();
read_decode_cache_bcr();
if (is_isa_arcompact()) {
struct bcr_fp_arcompact sp, dp;
struct bcr_bpu_arcompact bpu;
READ_BCR(ARC_REG_FP_BCR, sp);
READ_BCR(ARC_REG_DPFP_BCR, dp);
cpu->extn.fpu_sp = sp.ver ? 1 : 0;
cpu->extn.fpu_dp = dp.ver ? 1 : 0;
READ_BCR(ARC_REG_BPU_BCR, bpu);
cpu->bpu.ver = bpu.ver;
cpu->bpu.full = bpu.fam ? 1 : 0;
if (bpu.ent) {
cpu->bpu.num_cache = 256 << (bpu.ent - 1);
cpu->bpu.num_pred = 256 << (bpu.ent - 1);
}
} else {
struct bcr_fp_arcv2 spdp;
struct bcr_bpu_arcv2 bpu;
READ_BCR(ARC_REG_FP_V2_BCR, spdp);
cpu->extn.fpu_sp = spdp.sp ? 1 : 0;
cpu->extn.fpu_dp = spdp.dp ? 1 : 0;
READ_BCR(ARC_REG_BPU_BCR, bpu);
cpu->bpu.ver = bpu.ver;
cpu->bpu.full = bpu.ft;
cpu->bpu.num_cache = 256 << bpu.bce;
cpu->bpu.num_pred = 2048 << bpu.pte;
cpu->bpu.ret_stk = 4 << bpu.rse;
/* if dual issue hardware, is it enabled ? */
if (cpu->extn.dual) {
if (uarch.prod == 4) {
unsigned int exec_ctrl;
cpu_nm = "HS48";
dual_issue = 1;
/* if dual issue hardware, is it enabled ? */
READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
cpu->extn.dual_enb = !(exec_ctrl & 1);
dual_enb = !(exec_ctrl & 1);
}
}
READ_BCR(ARC_REG_AP_BCR, ap);
if (ap.ver) {
cpu->extn.ap_num = 2 << ap.num;
cpu->extn.ap_full = !ap.min;
}
READ_BCR(ARC_REG_SMART_BCR, bcr);
cpu->extn.smart = bcr.ver ? 1 : 0;
READ_BCR(ARC_REG_RTT_BCR, bcr);
cpu->extn.rtt = bcr.ver ? 1 : 0;
READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
/* some hacks for lack of feature BCR info in old ARC700 cores */
if (is_isa_arcompact()) {
if (!isa.ver) /* ISA BCR absent, use Kconfig info */
cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
else {
/* ARC700_BUILD only has 2 bits of isa info */
struct bcr_generic bcr = *(struct bcr_generic *)&isa;
cpu->isa.atomic = bcr.info & 1;
}
n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
c, cpu_nm, release, isa_nm,
IS_AVAIL1(isa.be, "[Big-Endian]"),
IS_AVAIL3(dual_issue, dual_enb, " Dual-Issue "));
cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
READ_BCR(ARC_REG_MPY_BCR, mpy);
mpy_opt = 2; /* stock MPY/MPYH */
if (mpy.dsp) /* OPT 7-9 */
mpy_opt = mpy.dsp + 6;
/* there's no direct way to distinguish 750 vs. 770 */
if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
cpu->name = "ARC750";
} else {
cpu->isa = isa;
scnprintf(mpy_nm, 16, "mpy[opt %d] ", mpy_opt);
READ_BCR(ARC_REG_FP_V2_BCR, fpu);
n += scnprintf(buf + n, len - n, "ISA Extn\t: %s%s%s%s%s%s%s%s%s%s%s\n",
IS_AVAIL2(isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL2(isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
IS_AVAIL2(isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS),
IS_AVAIL1(mpy.ver, mpy_nm),
IS_AVAIL1(isa.div_rem, "div_rem "),
IS_AVAIL1((fpu.sp | fpu.dp), " FPU:"),
IS_AVAIL1(fpu.sp, " sp"),
IS_AVAIL1(fpu.dp, " dp"));
READ_BCR(ARC_REG_BPU_BCR, bpu);
bpu_full = bpu.ft;
bpu_cache = 256 << bpu.bce;
bpu_pred = 2048 << bpu.pte;
bpu_ret_stk = 4 << bpu.rse;
READ_BCR(ARC_REG_LPB_BUILD, lpb);
if (lpb.ver) {
unsigned int ctl;
ctl = read_aux_reg(ARC_REG_LPB_CTRL);
scnprintf(lpb_nm, sizeof(lpb_nm), " Loop Buffer:%d %s",
lpb.entries, IS_DISABLED_RUN(!ctl));
}
}
static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
struct bcr_identity *core = &cpu->core;
char mpy_opt[16];
int n = 0;
FIX_PTR(cpu);
n += scnprintf(buf + n, len - n,
"\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
core->family, core->cpu_id, core->chip_id);
"BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d%s\n",
IS_AVAIL1(bpu_full, "full"),
IS_AVAIL1(!bpu_full, "partial"),
bpu_cache, bpu_pred, bpu_ret_stk,
lpb_nm);
n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
cpu_id, cpu->name, cpu->release,
is_isa_arcompact() ? "ARCompact" : "ARCv2",
IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
IS_AVAIL3(cpu->extn.dual, cpu->extn.dual_enb, " Dual-Issue "));
n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ",
IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
IS_AVAIL1(cpu->extn.timer1, "Timer1 "),
IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
if (cpu->extn_mpy.ver) {
if (is_isa_arcompact()) {
scnprintf(mpy_opt, 16, "mpy");
} else {
int opt = 2; /* stock MPY/MPYH */
if (cpu->extn_mpy.dsp) /* OPT 7-9 */
opt = cpu->extn_mpy.dsp + 6;
scnprintf(mpy_opt, 16, "mpy[opt %d] ", opt);
}
READ_BCR(ARC_REG_ICCM_BUILD, iccm);
if (iccm.ver) {
unsigned long base;
info->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */
if (iccm.sz00 == 0xF && iccm.sz01 > 0)
info->iccm.sz <<= iccm.sz01;
base = read_aux_reg(ARC_REG_AUX_ICCM);
info->iccm.base = base & 0xF0000000;
}
n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
IS_AVAIL2(cpu->isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS),
IS_AVAIL1(cpu->extn_mpy.ver, mpy_opt),
IS_AVAIL1(cpu->isa.div_rem, "div_rem "));
if (cpu->bpu.ver) {
n += scnprintf(buf + n, len - n,
"BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
IS_AVAIL1(cpu->bpu.full, "full"),
IS_AVAIL1(!cpu->bpu.full, "partial"),
cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
if (is_isa_arcv2()) {
struct bcr_lpb lpb;
READ_BCR(ARC_REG_LPB_BUILD, lpb);
if (lpb.ver) {
unsigned int ctl;
ctl = read_aux_reg(ARC_REG_LPB_CTRL);
n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s",
lpb.entries,
IS_DISABLED_RUN(!ctl));
}
}
n += scnprintf(buf + n, len - n, "\n");
READ_BCR(ARC_REG_DCCM_BUILD, dccm);
if (dccm.ver) {
unsigned long base;
info->dccm.sz = 256 << dccm.sz0;
if (dccm.sz0 == 0xF && dccm.sz1 > 0)
info->dccm.sz <<= dccm.sz1;
base = read_aux_reg(ARC_REG_AUX_DCCM);
info->dccm.base = base & 0xF0000000;
}
return buf;
}
static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
{
int n = 0;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
FIX_PTR(cpu);
n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base);
if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) {
n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
IS_AVAIL1(cpu->extn.smart, "smaRT "),
IS_AVAIL1(cpu->extn.rtt, "RTT "));
if (cpu->extn.ap_num) {
n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
cpu->extn.ap_num,
cpu->extn.ap_full ? "full":"min");
}
n += scnprintf(buf + n, len - n, "\n");
}
if (cpu->dccm.sz || cpu->iccm.sz)
n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",
cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
if (is_isa_arcv2()) {
/* Error Protection: ECC/Parity */
struct bcr_erp erp;
READ_BCR(ARC_REG_ERP_BUILD, erp);
if (erp.ver) {
struct ctl_erp ctl;
READ_BCR(ARC_REG_ERP_CTRL, ctl);
/* inverted bits: 0 means enabled */
n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n",
/* Error Protection: ECC/Parity */
READ_BCR(ARC_REG_ERP_BUILD, erp);
if (erp.ver) {
struct ctl_erp ctl;
READ_BCR(ARC_REG_ERP_CTRL, ctl);
/* inverted bits: 0 means enabled */
n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n",
IS_AVAIL3(erp.ic, !ctl.dpi, "IC "),
IS_AVAIL3(erp.dc, !ctl.dpd, "DC "),
IS_AVAIL3(erp.mmu, !ctl.mpd, "MMU "));
}
}
/* ARCv2 ISA specific sanity checks */
present = fpu.sp | fpu.dp | mpy.dsp; /* DSP and/or FPU */
CHK_OPT_STRICT(CONFIG_ARC_HAS_ACCL_REGS, present);
dsp_config_check();
#endif
return n;
}
static char *arc_cpu_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
{
struct bcr_identity ident;
struct bcr_timer timer;
struct bcr_generic bcr;
struct mcip_bcr mp;
struct bcr_actionpoint ap;
unsigned long vec_base;
int ap_num, ap_full, smart, rtt, n;
memset(info, 0, sizeof(struct cpuinfo_arc));
READ_BCR(AUX_IDENTITY, ident);
info->arcver = ident.family;
n = scnprintf(buf, len,
"\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
ident.family, ident.cpu_id, ident.chip_id);
if (is_isa_arcompact()) {
n += arcompact_mumbojumbo(c, info, buf + n, len - n);
} else if (is_isa_arcv2()){
n += arcv2_mumbojumbo(c, info, buf + n, len - n);
}
n += arc_mmu_mumbojumbo(c, buf + n, len - n);
n += arc_cache_mumbojumbo(c, buf + n, len - n);
READ_BCR(ARC_REG_TIMERS_BCR, timer);
info->t0 = timer.t0;
info->t1 = timer.t1;
READ_BCR(ARC_REG_MCIP_BCR, mp);
vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
n += scnprintf(buf + n, len - n,
"Timers\t\t: %s%s%s%s%s%s\nVector Table\t: %#lx\n",
IS_AVAIL1(timer.t0, "Timer0 "),
IS_AVAIL1(timer.t1, "Timer1 "),
IS_AVAIL2(timer.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
IS_AVAIL2(mp.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
vec_base);
READ_BCR(ARC_REG_AP_BCR, ap);
if (ap.ver) {
ap_num = 2 << ap.num;
ap_full = !ap.min;
}
READ_BCR(ARC_REG_SMART_BCR, bcr);
smart = bcr.ver ? 1 : 0;
READ_BCR(ARC_REG_RTT_BCR, bcr);
rtt = bcr.ver ? 1 : 0;
if (ap.ver | smart | rtt) {
n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
IS_AVAIL1(smart, "smaRT "),
IS_AVAIL1(rtt, "RTT "));
if (ap.ver) {
n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
ap_num,
ap_full ? "full":"min");
}
n += scnprintf(buf + n, len - n, "\n");
}
if (info->dccm.sz || info->iccm.sz)
n += scnprintf(buf + n, len - n,
"Extn [CCM]\t: DCCM @ %lx, %d KB / ICCM: @ %lx, %d KB\n",
info->dccm.base, TO_KB(info->dccm.sz),
info->iccm.base, TO_KB(info->iccm.sz));
return buf;
}
@ -401,15 +378,15 @@ void chk_opt_weak(char *opt_name, bool hw_exists, bool opt_ena)
panic("Disable %s, hardware NOT present\n", opt_name);
}
static void arc_chk_core_config(void)
/*
* ISA agnostic sanity checks
*/
static void arc_chk_core_config(struct cpuinfo_arc *info)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
int present = 0;
if (!cpu->extn.timer0)
if (!info->t0)
panic("Timer0 is not present!\n");
if (!cpu->extn.timer1)
if (!info->t1)
panic("Timer1 is not present!\n");
#ifdef CONFIG_ARC_HAS_DCCM
@ -417,35 +394,17 @@ static void arc_chk_core_config(void)
* DCCM can be arbit placed in hardware.
* Make sure it's placement/sz matches what Linux is built with
*/
if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
if ((unsigned int)__arc_dccm_base != info->dccm.base)
panic("Linux built with incorrect DCCM Base address\n");
if (CONFIG_ARC_DCCM_SZ * SZ_1K != cpu->dccm.sz)
if (CONFIG_ARC_DCCM_SZ * SZ_1K != info->dccm.sz)
panic("Linux built with incorrect DCCM Size\n");
#endif
#ifdef CONFIG_ARC_HAS_ICCM
if (CONFIG_ARC_ICCM_SZ * SZ_1K != cpu->iccm.sz)
if (CONFIG_ARC_ICCM_SZ * SZ_1K != info->iccm.sz)
panic("Linux built with incorrect ICCM Size\n");
#endif
/*
* FP hardware/software config sanity
* -If hardware present, kernel needs to save/restore FPU state
* -If not, it will crash trying to save/restore the non-existant regs
*/
if (is_isa_arcompact()) {
/* only DPDP checked since SP has no arch visible regs */
present = cpu->extn.fpu_dp;
CHK_OPT_STRICT(CONFIG_ARC_FPU_SAVE_RESTORE, present);
} else {
/* Accumulator Low:High pair (r58:59) present if DSP MPY or FPU */
present = cpu->extn_mpy.dsp | cpu->extn.fpu_sp | cpu->extn.fpu_dp;
CHK_OPT_STRICT(CONFIG_ARC_HAS_ACCL_REGS, present);
dsp_config_check();
}
}
/*
@ -456,21 +415,19 @@ static void arc_chk_core_config(void)
void setup_processor(void)
{
struct cpuinfo_arc info;
int c = smp_processor_id();
char str[512];
int cpu_id = smp_processor_id();
read_arc_build_cfg_regs();
pr_info("%s", arc_cpu_mumbojumbo(c, &info, str, sizeof(str)));
pr_info("%s", arc_platform_smp_cpuinfo());
arc_chk_core_config(&info);
arc_init_IRQ();
pr_info("%s", arc_cpu_mumbojumbo(cpu_id, str, sizeof(str)));
arc_mmu_init();
arc_cache_init();
pr_info("%s", arc_extn_mumbojumbo(cpu_id, str, sizeof(str)));
pr_info("%s", arc_platform_smp_cpuinfo());
arc_chk_core_config();
}
static inline bool uboot_arg_invalid(unsigned long addr)
@ -617,6 +574,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
char *str;
int cpu_id = ptr_to_cpu(v);
struct device *cpu_dev = get_cpu_device(cpu_id);
struct cpuinfo_arc info;
struct clk *cpu_clk;
unsigned long freq = 0;
@ -629,7 +587,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (!str)
goto done;
seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
seq_printf(m, arc_cpu_mumbojumbo(cpu_id, &info, str, PAGE_SIZE));
cpu_clk = clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
@ -646,9 +604,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100);
seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE));
seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE));
seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE));
seq_printf(m, arc_platform_smp_cpuinfo());
free_page((unsigned long)str);

View File

@ -53,6 +53,7 @@
#include <linux/sched/task_stack.h>
#include <asm/ucontext.h>
#include <asm/entry.h>
struct rt_sigframe {
struct siginfo info;

View File

@ -23,9 +23,10 @@
#include <linux/export.h>
#include <linux/of_fdt.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/mach_desc.h>
#include <asm/setup.h>
#include <asm/smp.h>
#include <asm/processor.h>
#ifndef CONFIG_ARC_HAS_LLSC
arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
@ -351,7 +352,7 @@ static inline int __do_IPI(unsigned long msg)
* arch-common ISR to handle for inter-processor interrupts
* Has hooks for platform specific IPI
*/
irqreturn_t do_IPI(int irq, void *dev_id)
static irqreturn_t do_IPI(int irq, void *dev_id)
{
unsigned long pending;
unsigned long __maybe_unused copy;

View File

@ -29,6 +29,7 @@
#include <asm/arcregs.h>
#include <asm/unwind.h>
#include <asm/stacktrace.h>
#include <asm/switch_to.h>
/*-------------------------------------------------------------------------

View File

@ -16,6 +16,7 @@
#include <linux/ptrace.h>
#include <linux/kprobes.h>
#include <linux/kgdb.h>
#include <asm/entry.h>
#include <asm/setup.h>
#include <asm/unaligned.h>
#include <asm/kprobes.h>
@ -109,9 +110,7 @@ void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
*/
void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
{
unsigned int param = regs->ecr_param;
switch (param) {
switch (regs->ecr.param) {
case 1:
trap_is_brkpt(address, regs);
break;

View File

@ -115,8 +115,8 @@ static void show_ecr_verbose(struct pt_regs *regs)
/* For Data fault, this is data address not instruction addr */
address = current->thread.fault_address;
vec = regs->ecr_vec;
cause_code = regs->ecr_cause;
vec = regs->ecr.vec;
cause_code = regs->ecr.cause;
/* For DTLB Miss or ProtV, display the memory involved too */
if (vec == ECR_V_DTLB_MISS) {
@ -154,7 +154,7 @@ static void show_ecr_verbose(struct pt_regs *regs)
pr_cont("Misaligned r/w from 0x%08lx\n", address);
#endif
} else if (vec == ECR_V_TRAP) {
if (regs->ecr_param == 5)
if (regs->ecr.param == 5)
pr_cont("gcc generated __builtin_trap\n");
} else {
pr_cont("Check Programmer's Manual\n");
@ -184,9 +184,10 @@ void show_regs(struct pt_regs *regs)
if (user_mode(regs))
show_faulting_vma(regs->ret); /* faulting code, not data */
pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\nSTAT: 0x%08lx",
regs->event, current->thread.fault_address, regs->ret,
regs->status32);
pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\n",
regs->ecr.full, current->thread.fault_address, regs->ret);
pr_info("STAT32: 0x%08lx", regs->status32);
#define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit" " : ""

View File

@ -36,12 +36,13 @@
#endif
ENTRY_CFI(memset)
PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
mov.f 0, r2
;;; if size is zero
jz.d [blink]
mov r3, r0 ; don't clobber ret val
PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
;;; if length < 8
brls.d.nt r2, 8, .Lsmallchunk
mov.f lp_count,r2

View File

@ -28,6 +28,10 @@ int slc_enable = 1, ioc_enable = 1;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
static struct cpuinfo_arc_cache {
unsigned int sz_k, line_len, colors;
} ic_info, dc_info, slc_info;
void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op, const int full_page);
@ -35,78 +39,24 @@ void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
char *arc_cache_mumbojumbo(int c, char *buf, int len)
static int read_decode_cache_bcr_arcv2(int c, char *buf, int len)
{
int n = 0;
struct cpuinfo_arc_cache *p;
#define PR_CACHE(p, cfg, str) \
if (!(p)->line_len) \
n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
else \
n += scnprintf(buf + n, len - n, \
str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
(p)->sz_k, (p)->assoc, (p)->line_len, \
(p)->vipt ? "VIPT" : "PIPT", \
(p)->alias ? " aliasing" : "", \
IS_USED_CFG(cfg));
PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
p = &cpuinfo_arc700[c].slc;
if (p->line_len)
n += scnprintf(buf + n, len - n,
"SLC\t\t: %uK, %uB Line%s\n",
p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
perip_base,
IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
return buf;
}
/*
* Read the Cache Build Confuration Registers, Decode them and save into
* the cpuinfo structure for later use.
* No Validation done here, simply read/convert the BCRs
*/
static void read_decode_cache_bcr_arcv2(int cpu)
{
struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
struct cpuinfo_arc_cache *p_slc = &slc_info;
struct bcr_identity ident;
struct bcr_generic sbcr;
struct bcr_slc_cfg {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:24, way:2, lsz:2, sz:4;
#else
unsigned int sz:4, lsz:2, way:2, pad:24;
#endif
} slc_cfg;
struct bcr_clust_cfg {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
#else
unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
#endif
} cbcr;
struct bcr_volatile {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int start:4, limit:4, pad:22, order:1, disable:1;
#else
unsigned int disable:1, order:1, pad:22, limit:4, start:4;
#endif
} vol;
struct bcr_clust_cfg cbcr;
struct bcr_volatile vol;
int n = 0;
READ_BCR(ARC_REG_SLC_BCR, sbcr);
if (sbcr.ver) {
struct bcr_slc_cfg slc_cfg;
READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
p_slc->sz_k = 128 << slc_cfg.sz;
l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
n += scnprintf(buf + n, len - n,
"SLC\t\t: %uK, %uB Line%s\n",
p_slc->sz_k, p_slc->line_len, IS_USED_RUN(slc_enable));
}
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
@ -129,70 +79,83 @@ static void read_decode_cache_bcr_arcv2(int cpu)
ioc_enable = 0;
}
READ_BCR(AUX_IDENTITY, ident);
/* HS 2.0 didn't have AUX_VOL */
if (cpuinfo_arc700[cpu].core.family > 0x51) {
if (ident.family > 0x51) {
READ_BCR(AUX_VOL, vol);
perip_base = vol.start << 28;
/* HS 3.0 has limit and strict-ordering fields */
if (cpuinfo_arc700[cpu].core.family > 0x52)
if (ident.family > 0x52)
perip_end = (vol.limit << 28) - 1;
}
n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
perip_base,
IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
return n;
}
void read_decode_cache_bcr(void)
int arc_cache_mumbojumbo(int c, char *buf, int len)
{
struct cpuinfo_arc_cache *p_ic, *p_dc;
unsigned int cpu = smp_processor_id();
struct bcr_cache {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
#else
unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
#endif
} ibcr, dbcr;
struct cpuinfo_arc_cache *p_ic = &ic_info, *p_dc = &dc_info;
struct bcr_cache ibcr, dbcr;
int vipt, assoc;
int n = 0;
p_ic = &cpuinfo_arc700[cpu].icache;
READ_BCR(ARC_REG_IC_BCR, ibcr);
if (!ibcr.ver)
goto dc_chk;
if (ibcr.ver <= 3) {
if (is_isa_arcompact() && (ibcr.ver <= 3)) {
BUG_ON(ibcr.config != 3);
p_ic->assoc = 2; /* Fixed to 2w set assoc */
} else if (ibcr.ver >= 4) {
p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
assoc = 2; /* Fixed to 2w set assoc */
} else if (is_isa_arcv2() && (ibcr.ver >= 4)) {
assoc = 1 << ibcr.config; /* 1,2,4,8 */
}
p_ic->line_len = 8 << ibcr.line_len;
p_ic->sz_k = 1 << (ibcr.sz - 1);
p_ic->vipt = 1;
p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
p_ic->colors = p_ic->sz_k/assoc/TO_KB(PAGE_SIZE);
n += scnprintf(buf + n, len - n,
"I-Cache\t\t: %uK, %dway/set, %uB Line, VIPT%s%s\n",
p_ic->sz_k, assoc, p_ic->line_len,
p_ic->colors > 1 ? " aliasing" : "",
IS_USED_CFG(CONFIG_ARC_HAS_ICACHE));
dc_chk:
p_dc = &cpuinfo_arc700[cpu].dcache;
READ_BCR(ARC_REG_DC_BCR, dbcr);
if (!dbcr.ver)
goto slc_chk;
if (dbcr.ver <= 3) {
if (is_isa_arcompact() && (dbcr.ver <= 3)) {
BUG_ON(dbcr.config != 2);
p_dc->assoc = 4; /* Fixed to 4w set assoc */
p_dc->vipt = 1;
p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
} else if (dbcr.ver >= 4) {
p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
p_dc->vipt = 0;
p_dc->alias = 0; /* PIPT so can't VIPT alias */
vipt = 1;
assoc = 4; /* Fixed to 4w set assoc */
p_dc->colors = p_dc->sz_k/assoc/TO_KB(PAGE_SIZE);
} else if (is_isa_arcv2() && (dbcr.ver >= 4)) {
vipt = 0;
assoc = 1 << dbcr.config; /* 1,2,4,8 */
p_dc->colors = 1; /* PIPT so can't VIPT alias */
}
p_dc->line_len = 16 << dbcr.line_len;
p_dc->sz_k = 1 << (dbcr.sz - 1);
n += scnprintf(buf + n, len - n,
"D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",
p_dc->sz_k, assoc, p_dc->line_len,
vipt ? "VIPT" : "PIPT",
p_dc->colors > 1 ? " aliasing" : "",
IS_USED_CFG(CONFIG_ARC_HAS_DCACHE));
slc_chk:
if (is_isa_arcv2())
read_decode_cache_bcr_arcv2(cpu);
n += read_decode_cache_bcr_arcv2(c, buf + n, len - n);
return n;
}
/*
@ -581,7 +544,7 @@ static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
#endif /* CONFIG_ARC_HAS_ICACHE */
noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
static noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
/*
@ -644,7 +607,7 @@ noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
#endif
}
noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
static __maybe_unused noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
/*
@ -1082,7 +1045,7 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
* 3. All Caches need to be disabled when setting up IOC to elide any in-flight
* Coherency transactions
*/
noinline void __init arc_ioc_setup(void)
static noinline void __init arc_ioc_setup(void)
{
unsigned int ioc_base, mem_sz;
@ -1144,12 +1107,10 @@ noinline void __init arc_ioc_setup(void)
* one core suffices for all
* - IOC setup / dma callbacks only need to be done once
*/
void __init arc_cache_init_master(void)
static noinline void __init arc_cache_init_master(void)
{
unsigned int __maybe_unused cpu = smp_processor_id();
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
struct cpuinfo_arc_cache *ic = &ic_info;
if (!ic->line_len)
panic("cache support enabled but non-existent cache\n");
@ -1162,14 +1123,14 @@ void __init arc_cache_init_master(void)
* In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
* pair to provide vaddr/paddr respectively, just as in MMU v3
*/
if (is_isa_arcv2() && ic->alias)
if (is_isa_arcv2() && ic->colors > 1)
_cache_line_loop_ic_fn = __cache_line_loop_v3;
else
_cache_line_loop_ic_fn = __cache_line_loop;
}
if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
struct cpuinfo_arc_cache *dc = &dc_info;
if (!dc->line_len)
panic("cache support enabled but non-existent cache\n");
@ -1181,14 +1142,13 @@ void __init arc_cache_init_master(void)
/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
if (is_isa_arcompact()) {
int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
if (dc->alias) {
if (dc->colors > 1) {
if (!handled)
panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
if (CACHE_COLORS_NUM != num_colors)
if (CACHE_COLORS_NUM != dc->colors)
panic("CACHE_COLORS_NUM not optimized for config\n");
} else if (!dc->alias && handled) {
} else if (handled && dc->colors == 1) {
panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
}
}
@ -1231,9 +1191,6 @@ void __init arc_cache_init_master(void)
void __ref arc_cache_init(void)
{
unsigned int __maybe_unused cpu = smp_processor_id();
char str[256];
pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str)));
if (!cpu)
arc_cache_init_master();

View File

@ -22,14 +22,3 @@ int fixup_exception(struct pt_regs *regs)
return 0;
}
#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
unsigned long arc_clear_user_noinline(void __user *to,
unsigned long n)
{
return __arc_clear_user(to, n);
}
EXPORT_SYMBOL(arc_clear_user_noinline);
#endif

View File

@ -13,6 +13,7 @@
#include <linux/kdebug.h>
#include <linux/perf_event.h>
#include <linux/mm_types.h>
#include <asm/entry.h>
#include <asm/mmu.h>
/*
@ -99,10 +100,10 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
if (faulthandler_disabled() || !mm)
goto no_context;
if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */
if (regs->ecr.cause & ECR_C_PROTV_STORE) /* ST/EX */
write = 1;
else if ((regs->ecr_vec == ECR_V_PROTV) &&
(regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
else if ((regs->ecr.vec == ECR_V_PROTV) &&
(regs->ecr.cause == ECR_C_PROTV_INST_FETCH))
exec = 1;
flags = FAULT_FLAG_DEFAULT;

View File

@ -15,6 +15,7 @@
#include <linux/highmem.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/arcregs.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);

View File

@ -18,7 +18,9 @@
/* A copy of the ASID from the PID reg is kept in asid_cache */
DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
static int __read_mostly pae_exists;
static struct cpuinfo_arc_mmu {
unsigned int ver, pg_sz_k, s_pg_sz_m, pae, sets, ways;
} mmuinfo;
/*
* Utility Routine to erase a J-TLB entry
@ -131,7 +133,7 @@ static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
noinline void local_flush_tlb_all(void)
{
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
struct cpuinfo_arc_mmu *mmu = &mmuinfo;
unsigned long flags;
unsigned int entry;
int num_tlb = mmu->sets * mmu->ways;
@ -389,7 +391,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
/*
* Routine to create a TLB entry
*/
void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
{
unsigned long flags;
unsigned int asid_or_sasid, rwx;
@ -564,89 +566,64 @@ void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
* the cpuinfo structure for later use.
* No Validation is done here, simply read/convert the BCRs
*/
void read_decode_mmu_bcr(void)
int arc_mmu_mumbojumbo(int c, char *buf, int len)
{
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
unsigned int tmp;
struct bcr_mmu_3 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
u_itlb:4, u_dtlb:4;
#else
unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
ways:4, ver:8;
#endif
} *mmu3;
struct cpuinfo_arc_mmu *mmu = &mmuinfo;
unsigned int bcr, u_dtlb, u_itlb, sasid;
struct bcr_mmu_3 *mmu3;
struct bcr_mmu_4 *mmu4;
char super_pg[64] = "";
int n = 0;
struct bcr_mmu_4 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
#else
/* DTLB ITLB JES JE JA */
unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
#endif
} *mmu4;
tmp = read_aux_reg(ARC_REG_MMU_BCR);
mmu->ver = (tmp >> 24);
bcr = read_aux_reg(ARC_REG_MMU_BCR);
mmu->ver = (bcr >> 24);
if (is_isa_arcompact() && mmu->ver == 3) {
mmu3 = (struct bcr_mmu_3 *)&tmp;
mmu3 = (struct bcr_mmu_3 *)&bcr;
mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
mmu->sets = 1 << mmu3->sets;
mmu->ways = 1 << mmu3->ways;
mmu->u_dtlb = mmu3->u_dtlb;
mmu->u_itlb = mmu3->u_itlb;
mmu->sasid = mmu3->sasid;
u_dtlb = mmu3->u_dtlb;
u_itlb = mmu3->u_itlb;
sasid = mmu3->sasid;
} else {
mmu4 = (struct bcr_mmu_4 *)&tmp;
mmu4 = (struct bcr_mmu_4 *)&bcr;
mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
mmu->sets = 64 << mmu4->n_entry;
mmu->ways = mmu4->n_ways * 2;
mmu->u_dtlb = mmu4->u_dtlb * 4;
mmu->u_itlb = mmu4->u_itlb * 4;
mmu->sasid = mmu4->sasid;
pae_exists = mmu->pae = mmu4->pae;
u_dtlb = mmu4->u_dtlb * 4;
u_itlb = mmu4->u_itlb * 4;
sasid = mmu4->sasid;
mmu->pae = mmu4->pae;
}
}
char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
{
int n = 0;
struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
char super_pg[64] = "";
if (p_mmu->s_pg_sz_m)
scnprintf(super_pg, 64, "%dM Super Page %s",
p_mmu->s_pg_sz_m,
IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
if (mmu->s_pg_sz_m)
scnprintf(super_pg, 64, "/%dM%s",
mmu->s_pg_sz_m,
IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) ? " (THP enabled)":"");
n += scnprintf(buf + n, len - n,
"MMU [v%x]\t: %dk PAGE, %s, swalk %d lvl, JTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
p_mmu->ver, p_mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
p_mmu->u_dtlb, p_mmu->u_itlb,
IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
"MMU [v%x]\t: %dk%s, swalk %d lvl, JTLB %dx%d, uDTLB %d, uITLB %d%s%s%s\n",
mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
mmu->sets, mmu->ways,
u_dtlb, u_itlb,
IS_AVAIL1(sasid, ", SASID"),
IS_AVAIL2(mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
return buf;
return n;
}
int pae40_exist_but_not_enab(void)
{
return pae_exists && !is_pae40_enabled();
return mmuinfo.pae && !is_pae40_enabled();
}
void arc_mmu_init(void)
{
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
char str[256];
struct cpuinfo_arc_mmu *mmu = &mmuinfo;
int compat = 0;
pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
/*
* Can't be done in processor.h due to header include dependencies
*/
@ -723,7 +700,7 @@ volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
struct pt_regs *regs)
{
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
struct cpuinfo_arc_mmu *mmu = &mmuinfo;
unsigned long flags;
int set, n_ways = mmu->ways;

View File

@ -6,7 +6,6 @@
*/
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/libfdt.h>
#include <asm/asm-offsets.h>