mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 17:08:10 +00:00
517af33237
This way we don't need to lock the TSB into the TLB. The trick is that every TSB load/store is registered into a special instruction patch section. The default uses virtual addresses, and the patch instructions use physical address load/stores. We can't do this on all chips because only cheetah+ and later have the physical variant of the atomic quad load. Signed-off-by: David S. Miller <davem@davemloft.net>
39 lines
744 B
ArmAsm
39 lines
744 B
ArmAsm
/* DTLB ** ICACHE line 1: Context 0 check and TSB load */
|
|
ldxa [%g0] ASI_DMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer
|
|
ldxa [%g0] ASI_DMMU, %g6 ! Get TAG TARGET
|
|
srlx %g6, 48, %g5 ! Get context
|
|
brz,pn %g5, kvmap_dtlb ! Context 0 processing
|
|
nop ! Delay slot (fill me)
|
|
TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry
|
|
nop ! Push branch to next I$ line
|
|
cmp %g4, %g6 ! Compare TAG
|
|
|
|
/* DTLB ** ICACHE line 2: TSB compare and TLB load */
|
|
bne,pn %xcc, tsb_miss_dtlb ! Miss
|
|
mov FAULT_CODE_DTLB, %g3
|
|
stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Load TLB
|
|
retry ! Trap done
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
|
|
/* DTLB ** ICACHE line 3: */
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
|
|
/* DTLB ** ICACHE line 4: */
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|