- added support for Mobileye SoCs

- unified GPR/CP0 regs handling for uasm
 - cleanups and fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQJOBAABCAA4FiEEbt46xwy6kEcDOXoUeZbBVTGwZHAFAmX0Fx4aHHRzYm9nZW5k
 QGFscGhhLmZyYW5rZW4uZGUACgkQeZbBVTGwZHDraxAAkrN9HiaozP0NtXfMPb5v
 7aJiPbgDrABmUxsvPAf054rtSGrORhNG9PM7+PYxhp0kYgb4vqVrh+ICTBVFKkZr
 MwGiYahkgddPlpaowh8G7HtrMyiW5CpMh6O31nw88OYGjoRuwCic8z8kQlzZMNJe
 JGgX+TNJtDW0yUp93zOu+j99ImByfgC7P1/V+8fRJ7js3trQ/JWEpW0e+nez/2Sz
 SNANiDA6g8scGvh9OOEwBG4jh6XLbRSOvMECskCCTGOBDpzJCN59j1irC2JRnZ6H
 PIirv6sfK4/n8/YpCLa+j9DOdHl2D/bW2LLE0sYVfew5T2lK3yainhdHIbsCC/J1
 89YiXi6I1anD4nERODSEkq40naQJVwuM3LPW2pVVcUyRDP28cEsqn7MDJp1L79fq
 sxtUy+Kur4ryCALwlaYBIVI+9SRAvcV8b9z0Z37dpN57h49d+o65tEuYle69t7Cy
 uM9ECTE3ZqgHvuyvSmRH69KLEuGahLavtUHjGs60or1cgVXznQpqMvS9soIa+IAQ
 uuZo7Cb0TBedVAEjcFSxAMrpmx+sGKAPvWauqBFHH9wrTOjYbkzGQGCRABXjafmi
 vGgGYCYbRhFFrPJXf48hAsdLNqOxwXotvCU/9eP2HwxaZD8OTArhrO/j+dMqiapm
 //2zHnmcSZ4H17ml8YySiqQ=
 =M7G5
 -----END PGP SIGNATURE-----

Merge tag 'mips_6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux

Pull MIPS updates from Thomas Bogendoerfer:

 - added support for Mobileye SoCs

 - unified GPR/CP0 regs handling for uasm

 - cleanups and fixes

* tag 'mips_6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: (56 commits)
  mips: cm: Convert __mips_cm_phys_base() to weak function
  mips: cm: Convert __mips_cm_l2sync_phys_base() to weak function
  mips: dts: ralink: mt7621: add cell count properties to usb
  mips: dts: ralink: mt7621: add serial1 and serial2 nodes
  mips: dts: ralink: mt7621: reorder serial0 properties
  mips: dts: ralink: mt7621: associate uart1_pins with serial0
  MIPS: ralink: Don't use "proxy" headers
  mips: sibyte: make tb_class constant
  mips: mt: make mt_class constant
  MIPS: ralink: Remove unused of_gpio.h
  bus: bt1-apb: Remove duplicate include
  MAINTAINERS: remove entry to non-existing file in MOBILEYE MIPS SOCS
  MIPS: mipsregs: Parse fp and sp register by name in parse_r
  tty: mips_ejtag_fdc: Fix passing incompatible pointer type warning
  mips: zboot: Fix "no previous prototype" build warning
  MIPS: mipsregs: Set proper ISA level for virt extensions
  MIPS: Implement microMIPS MT ASE helpers
  MIPS: Limit MIPS_MT_SMP support by ISA reversion
  MIPS: Loongson64: test for -march=loongson3a cflag
  MIPS: BMIPS: Drop unnecessary assembler flag
  ...
This commit is contained in:
Linus Torvalds 2024-03-15 12:44:32 -07:00
commit 54f42d2ca8
57 changed files with 2014 additions and 925 deletions

View File

@ -23,22 +23,23 @@ properties:
- brcm,bmips4380
- brcm,bmips5000
- brcm,bmips5200
- ingenic,xburst-mxu1.0
- img,i6500
- ingenic,xburst-fpu1.0-mxu1.1
- ingenic,xburst-fpu2.0-mxu2.0
- ingenic,xburst-mxu1.0
- ingenic,xburst2-fpu2.1-mxu2.1-smt
- loongson,gs264
- mips,m14Kc
- mips,mips4Kc
- mips,mips4KEc
- mips,mips24Kc
- mips,mips24KEc
- mips,mips74Kc
- mips,mips1004Kc
- mips,mips24KEc
- mips,mips24Kc
- mips,mips4KEc
- mips,mips4Kc
- mips,mips74Kc
- mti,interaptiv
- mti,mips24KEc
- mti,mips14KEc
- mti,mips14Kc
- mti,mips24KEc
reg:
maxItems: 1

View File

@ -0,0 +1,32 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
# Copyright 2023 Mobileye Vision Technologies Ltd.
%YAML 1.2
---
$id: http://devicetree.org/schemas/mips/mobileye.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Mobileye SoC series
maintainers:
- Vladimir Kondratiev <vladimir.kondratiev@intel.com>
- Gregory CLEMENT <gregory.clement@bootlin.com>
- Théo Lebrun <theo.lebrun@bootlin.com>
description:
Boards with a Mobileye SoC shall have the following properties.
properties:
$nodename:
const: '/'
compatible:
oneOf:
- description: Boards with Mobileye EyeQ5 SoC
items:
- enum:
- mobileye,eyeq5-epm5
- const: mobileye,eyeq5
additionalProperties: true
...

View File

@ -941,6 +941,8 @@ patternProperties:
description: Miyoo
"^mntre,.*":
description: MNT Research GmbH
"^mobileye,.*":
description: Mobileye Vision Technologies Ltd.
"^modtronix,.*":
description: Modtronix Engineering
"^moortec,.*":

View File

@ -14741,6 +14741,17 @@ F: arch/mips/
F: drivers/platform/mips/
F: include/dt-bindings/mips/
MIPS BAIKAL-T1 PLATFORM
M: Serge Semin <fancer.lancer@gmail.com>
L: linux-mips@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/bus/baikal,bt1-*.yaml
F: Documentation/devicetree/bindings/clock/baikal,bt1-*.yaml
F: drivers/bus/bt1-*.c
F: drivers/clk/baikal-t1/
F: drivers/memory/bt1-l2-ctl.c
F: drivers/mtd/maps/physmap-bt1-rom.[ch]
MIPS BOSTON DEVELOPMENT BOARD
M: Paul Burton <paulburton@kernel.org>
L: linux-mips@vger.kernel.org
@ -14861,6 +14872,17 @@ W: https://linuxtv.org
Q: http://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/dvb-frontends/mn88473*
MOBILEYE MIPS SOCS
M: Vladimir Kondratiev <vladimir.kondratiev@mobileye.com>
M: Gregory CLEMENT <gregory.clement@bootlin.com>
M: Théo Lebrun <theo.lebrun@bootlin.com>
L: linux-mips@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/mips/mobileye.yaml
F: arch/mips/boot/dts/mobileye/
F: arch/mips/configs/eyeq5_defconfig
F: arch/mips/mobileye/board-epm5.its.S
MODULE SUPPORT
M: Luis Chamberlain <mcgrof@kernel.org>
L: linux-modules@vger.kernel.org

View File

@ -11,6 +11,7 @@ obj- := $(platform-y)
# mips object files
# The object files are linked as core-y files would be linked
obj-y += generic/
obj-y += kernel/
obj-y += mm/
obj-y += net/

View File

@ -17,6 +17,7 @@ platform-$(CONFIG_MACH_LOONGSON2EF) += loongson2ef/
platform-$(CONFIG_MACH_LOONGSON32) += loongson32/
platform-$(CONFIG_MACH_LOONGSON64) += loongson64/
platform-$(CONFIG_MIPS_MALTA) += mti-malta/
platform-$(CONFIG_MACH_EYEQ5) += mobileye/
platform-$(CONFIG_MACH_NINTENDO64) += n64/
platform-$(CONFIG_PIC32MZDA) += pic32/
platform-$(CONFIG_RALINK) += ralink/

View File

@ -113,6 +113,9 @@ config MIPS_FIXUP_BIGPHYS_ADDR
config MIPS_GENERIC
bool
config MACH_GENERIC_CORE
bool
config MACH_INGENIC
bool
select SYS_SUPPORTS_32BIT_KERNEL
@ -149,6 +152,7 @@ config MIPS_GENERIC_KERNEL
select DMA_NONCOHERENT
select HAVE_PCI
select IRQ_MIPS_CPU
select MACH_GENERIC_CORE
select MIPS_AUTO_PFN_OFFSET
select MIPS_CPU_SCACHE
select MIPS_GIC
@ -417,6 +421,7 @@ config MACH_INGENIC_SOC
bool "Ingenic SoC based machines"
select MIPS_GENERIC
select MACH_INGENIC
select MACH_GENERIC_CORE
select SYS_SUPPORTS_ZBOOT_UART16550
select CPU_SUPPORTS_CPUFREQ
select MIPS_EXTERNAL_TIMER
@ -570,6 +575,59 @@ config MACH_PIC32
Microchip PIC32 is a family of general-purpose 32 bit MIPS core
microcontrollers.
config MACH_EYEQ5
bool "Mobileye EyeQ5 SoC"
select MACH_GENERIC_CORE
select ARM_AMBA
select PHYSICAL_START_BOOL
select ARCH_SPARSEMEM_DEFAULT if 64BIT
select BOOT_RAW
select BUILTIN_DTB
select CEVT_R4K
select CLKSRC_MIPS_GIC
select COMMON_CLK
select CPU_MIPSR2_IRQ_EI
select CPU_MIPSR2_IRQ_VI
select CSRC_R4K
select DMA_NONCOHERENT
select HAVE_PCI
select IRQ_MIPS_CPU
select MIPS_AUTO_PFN_OFFSET
select MIPS_CPU_SCACHE
select MIPS_GIC
select MIPS_L1_CACHE_SHIFT_7
select PCI_DRIVERS_GENERIC
select SMP_UP if SMP
select SWAP_IO_SPACE
select SYS_HAS_CPU_MIPS64_R6
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_MIPS_CPS
select SYS_SUPPORTS_RELOCATABLE
select SYS_SUPPORTS_ZBOOT
select UHI_BOOT
select USB_EHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
select USB_UHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
select USB_UHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
select USE_OF
help
Select this to build a kernel supporting EyeQ5 SoC from Mobileye.
bool
config FIT_IMAGE_FDT_EPM5
bool "Include FDT for Mobileye EyeQ5 development platforms"
depends on MACH_EYEQ5
default n
help
Enable this to include the FDT for the EyeQ5 development platforms
from Mobileye in the FIT kernel image.
This requires u-boot on the platform.
config MACH_NINTENDO64
bool "Nintendo 64 console"
select CEVT_R4K
@ -603,6 +661,7 @@ config RALINK
config MACH_REALTEK_RTL
bool "Realtek RTL838x/RTL839x based machines"
select MIPS_GENERIC
select MACH_GENERIC_CORE
select DMA_NONCOHERENT
select IRQ_MIPS_CPU
select CSRC_R4K
@ -1273,44 +1332,6 @@ config CPU_LOONGSON64
3B1000, 3B1500, 3A2000, 3A3000 and 3A4000) processors. However, old
Loongson-2E/2F is not covered here and will be removed in future.
config LOONGSON3_ENHANCEMENT
bool "New Loongson-3 CPU Enhancements"
default n
depends on CPU_LOONGSON64
help
New Loongson-3 cores (since Loongson-3A R2, as opposed to Loongson-3A
R1, Loongson-3B R1 and Loongson-3B R2) has many enhancements, such as
FTLB, L1-VCache, EI/DI/Wait/Prefetch instruction, DSP/DSPr2 ASE, User
Local register, Read-Inhibit/Execute-Inhibit, SFB (Store Fill Buffer),
Fast TLB refill support, etc.
This option enable those enhancements which are not probed at run
time. If you want a generic kernel to run on all Loongson 3 machines,
please say 'N' here. If you want a high-performance kernel to run on
new Loongson-3 machines only, please say 'Y' here.
config CPU_LOONGSON3_WORKAROUNDS
bool "Loongson-3 LLSC Workarounds"
default y if SMP
depends on CPU_LOONGSON64
help
Loongson-3 processors have the llsc issues which require workarounds.
Without workarounds the system may hang unexpectedly.
Say Y, unless you know what you are doing.
config CPU_LOONGSON3_CPUCFG_EMULATION
bool "Emulate the CPUCFG instruction on older Loongson cores"
default y
depends on CPU_LOONGSON64
help
Loongson-3A R4 and newer have the CPUCFG instruction available for
userland to query CPU capabilities, much like CPUID on x86. This
option provides emulation of the instruction on older Loongson
cores, back to Loongson-3A1000.
If unsure, please say Y.
config CPU_LOONGSON2E
bool "Loongson 2E"
depends on SYS_HAS_CPU_LOONGSON2E
@ -1650,6 +1671,44 @@ config CPU_BMIPS
endchoice
config LOONGSON3_ENHANCEMENT
bool "New Loongson-3 CPU Enhancements"
default n
depends on CPU_LOONGSON64
help
New Loongson-3 cores (since Loongson-3A R2, as opposed to Loongson-3A
R1, Loongson-3B R1 and Loongson-3B R2) has many enhancements, such as
FTLB, L1-VCache, EI/DI/Wait/Prefetch instruction, DSP/DSPr2 ASE, User
Local register, Read-Inhibit/Execute-Inhibit, SFB (Store Fill Buffer),
Fast TLB refill support, etc.
This option enable those enhancements which are not probed at run
time. If you want a generic kernel to run on all Loongson 3 machines,
please say 'N' here. If you want a high-performance kernel to run on
new Loongson-3 machines only, please say 'Y' here.
config CPU_LOONGSON3_WORKAROUNDS
bool "Loongson-3 LLSC Workarounds"
default y if SMP
depends on CPU_LOONGSON64
help
Loongson-3 processors have the llsc issues which require workarounds.
Without workarounds the system may hang unexpectedly.
Say Y, unless you know what you are doing.
config CPU_LOONGSON3_CPUCFG_EMULATION
bool "Emulate the CPUCFG instruction on older Loongson cores"
default y
depends on CPU_LOONGSON64
help
Loongson-3A R4 and newer have the CPUCFG instruction available for
userland to query CPU capabilities, much like CPUID on x86. This
option provides emulation of the instruction on older Loongson
cores, back to Loongson-3A1000.
If unsure, please say Y.
config CPU_MIPS32_3_5_FEATURES
bool "MIPS32 Release 3.5 Features"
depends on SYS_HAS_CPU_MIPS32_R3_5
@ -2124,7 +2183,8 @@ config CPU_R4K_CACHE_TLB
config MIPS_MT_SMP
bool "MIPS MT SMP support (1 TC on each available VPE)"
default y
depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 && !CPU_MICROMIPS
depends on TARGET_ISA_REV > 0 && TARGET_ISA_REV < 6
depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MICROMIPS
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select SYNC_R4K

View File

@ -148,10 +148,10 @@ cflags-y += $(call cc-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
#
# CPU-dependent compiler/assembler options for optimization.
#
cflags-$(CONFIG_CPU_R3000) += -march=r3000
cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap
cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap
cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
cflags-$(CONFIG_CPU_R3000) += $(call cc-option,-march=r3000,-march=mips1)
cflags-$(CONFIG_CPU_R4300) += $(call cc-option,-march=r4300,-march=mips3) -Wa,--trap
cflags-$(CONFIG_CPU_R4X00) += $(call cc-option,-march=r4600,-march=mips3) -Wa,--trap
cflags-$(CONFIG_CPU_TX49XX) += $(call cc-option,-march=r4600,-march=mips3) -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R5) += -march=mips32r5 -Wa,--trap -modd-spreg
@ -160,37 +160,35 @@ cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R2) += -march=mips64r2 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R5) += -march=mips64r5 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap
cflags-$(CONFIG_CPU_P5600) += -march=p5600 -Wa,--trap -modd-spreg
cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap
cflags-$(CONFIG_CPU_R5500) += $(call cc-option,-march=r5500,-march=r5000) \
cflags-$(CONFIG_CPU_P5600) += $(call cc-option,-march=p5600,-march=mips32r5) \
-Wa,--trap -modd-spreg
cflags-$(CONFIG_CPU_R5000) += $(call cc-option,-march=r5000,-march=mips4) \
-Wa,--trap
cflags-$(CONFIG_CPU_NEVADA) += $(call cc-option,-march=rm5200,-march=r5000) \
cflags-$(CONFIG_CPU_R5500) += $(call cc-option,-march=r5500,-march=mips4) \
-Wa,--trap
cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=r5000) \
cflags-$(CONFIG_CPU_NEVADA) += $(call cc-option,-march=rm5200,-march=mips4) \
-Wa,--trap
cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=r5000) \
cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=mips4) \
-Wa,--trap
cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=mips64r1) \
-Wa,--trap
cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-mno-mdmx)
cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-mno-mips3d)
cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=r8000) \
cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=mips4) \
-Wa,--trap
cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += $(call cc-option,-march=octeon) -Wa,--trap
ifeq (,$(findstring march=octeon, $(cflags-$(CONFIG_CPU_CAVIUM_OCTEON))))
cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -Wa,-march=octeon
endif
cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -march=octeon -Wa,--trap
cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1
cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap
cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,--trap
cflags-$(CONFIG_CPU_LOONGSON2E) += -march=loongson2e -Wa,--trap
cflags-$(CONFIG_CPU_LOONGSON2F) += -march=loongson2f -Wa,--trap
cflags-$(CONFIG_CPU_LOONGSON2E) += \
$(call cc-option,-march=loongson2e,-march=mips3) -Wa,--trap
cflags-$(CONFIG_CPU_LOONGSON2F) += \
$(call cc-option,-march=loongson2f,-march=mips3) -Wa,--trap
# Some -march= flags enable MMI instructions, and GCC complains about that
# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
cflags-$(CONFIG_CPU_LOONGSON2EF) += $(call cc-option,-mno-loongson-mmi)
ifdef CONFIG_CPU_LOONGSON64
cflags-$(CONFIG_CPU_LOONGSON64) += -Wa,--trap
cflags-$(CONFIG_CC_IS_GCC) += -march=loongson3a
cflags-$(CONFIG_CC_IS_CLANG) += -march=mips64r2
endif
cflags-$(CONFIG_CPU_LOONGSON64) += \
$(call cc-option,-march=loongson3a,-march=mips64r2) -Wa,--trap
cflags-$(CONFIG_CPU_LOONGSON64) += $(call cc-option,-mno-loongson-mmi)
cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
@ -299,7 +297,7 @@ drivers-$(CONFIG_PCI) += arch/mips/pci/
ifdef CONFIG_64BIT
ifndef KBUILD_SYM32
ifeq ($(shell expr $(load-y) \< 0xffffffff80000000), 0)
KBUILD_SYM32 = y
KBUILD_SYM32 = $(call cc-option-yn, -msym32)
endif
endif

View File

@ -771,7 +771,7 @@ static int __init alchemy_clk_init_fgens(int ctype)
}
id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL);
a = kcalloc(6, sizeof(*a), GFP_KERNEL);
if (!a)
return -ENOMEM;

View File

@ -8,6 +8,8 @@
#include <asm/addrspace.h>
#include "decompress.h"
#if defined(CONFIG_MACH_LOONGSON64) || defined(CONFIG_MIPS_MALTA)
#define UART_BASE 0x1fd003f8
#define PORT(offset) (CKSEG1ADDR(UART_BASE) + (offset))

View File

@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <asm/mach-au1x00/au1000.h>
#include "decompress.h"
void putc(char c)
{
alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);

View File

@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <asm/setup.h>
#include "decompress.h"
void putc(char c)
{
prom_putchar(c);

View File

@ -8,6 +8,7 @@ subdir-$(CONFIG_LANTIQ) += lantiq
subdir-$(CONFIG_MACH_LOONGSON64) += loongson
subdir-$(CONFIG_SOC_VCOREIII) += mscc
subdir-$(CONFIG_MIPS_MALTA) += mti
subdir-$(CONFIG_MACH_EYEQ5) += mobileye
subdir-$(CONFIG_LEGACY_BOARD_SEAD3) += mti
subdir-$(CONFIG_FIT_IMAGE_FDT_NI169445) += ni
subdir-$(CONFIG_MACH_PIC32) += pic32

View File

@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright 2023 Mobileye Vision Technologies Ltd.
dtb-$(CONFIG_MACH_EYEQ5) += eyeq5-epm5.dtb

View File

@ -0,0 +1,23 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Copyright 2023 Mobileye Vision Technologies Ltd.
*/
/dts-v1/;
#include "eyeq5.dtsi"
/ {
compatible = "mobileye,eyeq5-epm5", "mobileye,eyeq5";
model = "Mobile EyeQ5 MP5 Evaluation board";
chosen {
stdout-path = "serial2:115200n8";
};
memory@0 {
device_type = "memory";
reg = <0x0 0x40000000 0x0 0x02000000>,
<0x8 0x02000000 0x0 0x7E000000>;
};
};

View File

@ -0,0 +1,292 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Copyright 2023 Mobileye Vision Technologies Ltd.
*/
/ {
/* Fixed clock */
pll_cpu: pll-cpu {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <1500000000>;
};
pll_vdi: pll-vdi {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <1280000000>;
};
pll_per: pll-per {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <2000000000>;
};
pll_ddr0: pll-ddr0 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <1857210000>;
};
pll_ddr1: pll-ddr1 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <1857210000>;
};
/* PLL_CPU derivatives */
occ_cpu: occ-cpu {
compatible = "fixed-factor-clock";
clocks = <&pll_cpu>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
si_css0_ref_clk: si-css0-ref-clk { /* gate ClkRstGen_si_css0_ref */
compatible = "fixed-factor-clock";
clocks = <&occ_cpu>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
cpc_clk: cpc-clk {
compatible = "fixed-factor-clock";
clocks = <&si_css0_ref_clk>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
core0_clk: core0-clk {
compatible = "fixed-factor-clock";
clocks = <&si_css0_ref_clk>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
core1_clk: core1-clk {
compatible = "fixed-factor-clock";
clocks = <&si_css0_ref_clk>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
core2_clk: core2-clk {
compatible = "fixed-factor-clock";
clocks = <&si_css0_ref_clk>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
core3_clk: core3-clk {
compatible = "fixed-factor-clock";
clocks = <&si_css0_ref_clk>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
cm_clk: cm-clk {
compatible = "fixed-factor-clock";
clocks = <&si_css0_ref_clk>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
mem_clk: mem-clk {
compatible = "fixed-factor-clock";
clocks = <&si_css0_ref_clk>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
occ_isram: occ-isram {
compatible = "fixed-factor-clock";
clocks = <&pll_cpu>;
#clock-cells = <0>;
clock-div = <2>;
clock-mult = <1>;
};
isram_clk: isram-clk { /* gate ClkRstGen_isram */
compatible = "fixed-factor-clock";
clocks = <&occ_isram>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
occ_dbu: occ-dbu {
compatible = "fixed-factor-clock";
clocks = <&pll_cpu>;
#clock-cells = <0>;
clock-div = <10>;
clock-mult = <1>;
};
si_dbu_tp_pclk: si-dbu-tp-pclk { /* gate ClkRstGen_dbu */
compatible = "fixed-factor-clock";
clocks = <&occ_dbu>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
/* PLL_VDI derivatives */
occ_vdi: occ-vdi {
compatible = "fixed-factor-clock";
clocks = <&pll_vdi>;
#clock-cells = <0>;
clock-div = <2>;
clock-mult = <1>;
};
vdi_clk: vdi-clk { /* gate ClkRstGen_vdi */
compatible = "fixed-factor-clock";
clocks = <&occ_vdi>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
occ_can_ser: occ-can-ser {
compatible = "fixed-factor-clock";
clocks = <&pll_vdi>;
#clock-cells = <0>;
clock-div = <16>;
clock-mult = <1>;
};
can_ser_clk: can-ser-clk { /* gate ClkRstGen_can_ser */
compatible = "fixed-factor-clock";
clocks = <&occ_can_ser>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
i2c_ser_clk: i2c-ser-clk {
compatible = "fixed-factor-clock";
clocks = <&pll_vdi>;
#clock-cells = <0>;
clock-div = <20>;
clock-mult = <1>;
};
/* PLL_PER derivatives */
occ_periph: occ-periph {
compatible = "fixed-factor-clock";
clocks = <&pll_per>;
#clock-cells = <0>;
clock-div = <16>;
clock-mult = <1>;
};
periph_clk: periph-clk {
compatible = "fixed-factor-clock";
clocks = <&occ_periph>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
can_clk: can-clk {
compatible = "fixed-factor-clock";
clocks = <&occ_periph>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
spi_clk: spi-clk {
compatible = "fixed-factor-clock";
clocks = <&occ_periph>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
uart_clk: uart-clk {
compatible = "fixed-factor-clock";
clocks = <&occ_periph>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
};
i2c_clk: i2c-clk {
compatible = "fixed-factor-clock";
clocks = <&occ_periph>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
clock-output-names = "i2c_clk";
};
timer_clk: timer-clk {
compatible = "fixed-factor-clock";
clocks = <&occ_periph>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
clock-output-names = "timer_clk";
};
gpio_clk: gpio-clk {
compatible = "fixed-factor-clock";
clocks = <&occ_periph>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
clock-output-names = "gpio_clk";
};
emmc_sys_clk: emmc-sys-clk {
compatible = "fixed-factor-clock";
clocks = <&pll_per>;
#clock-cells = <0>;
clock-div = <10>;
clock-mult = <1>;
clock-output-names = "emmc_sys_clk";
};
ccf_ctrl_clk: ccf-ctrl-clk {
compatible = "fixed-factor-clock";
clocks = <&pll_per>;
#clock-cells = <0>;
clock-div = <4>;
clock-mult = <1>;
clock-output-names = "ccf_ctrl_clk";
};
occ_mjpeg_core: occ-mjpeg-core {
compatible = "fixed-factor-clock";
clocks = <&pll_per>;
#clock-cells = <0>;
clock-div = <2>;
clock-mult = <1>;
clock-output-names = "occ_mjpeg_core";
};
hsm_clk: hsm-clk { /* gate ClkRstGen_hsm */
compatible = "fixed-factor-clock";
clocks = <&occ_mjpeg_core>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
clock-output-names = "hsm_clk";
};
mjpeg_core_clk: mjpeg-core-clk { /* gate ClkRstGen_mjpeg_gen */
compatible = "fixed-factor-clock";
clocks = <&occ_mjpeg_core>;
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <1>;
clock-output-names = "mjpeg_core_clk";
};
fcmu_a_clk: fcmu-a-clk {
compatible = "fixed-factor-clock";
clocks = <&pll_per>;
#clock-cells = <0>;
clock-div = <20>;
clock-mult = <1>;
clock-output-names = "fcmu_a_clk";
};
occ_pci_sys: occ-pci-sys {
compatible = "fixed-factor-clock";
clocks = <&pll_per>;
#clock-cells = <0>;
clock-div = <8>;
clock-mult = <1>;
clock-output-names = "occ_pci_sys";
};
pclk: pclk {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <250000000>; /* 250MHz */
};
tsu_clk: tsu-clk {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <125000000>; /* 125MHz */
};
};

View File

@ -0,0 +1,124 @@
// SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
/*
* Copyright 2023 Mobileye Vision Technologies Ltd.
*/
#include <dt-bindings/interrupt-controller/mips-gic.h>
#include "eyeq5-fixed-clocks.dtsi"
/ {
#address-cells = <2>;
#size-cells = <2>;
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
device_type = "cpu";
compatible = "img,i6500";
reg = <0>;
clocks = <&core0_clk>;
};
};
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
/* These reserved memory regions are also defined in bootmanager
* for configuring inbound translation for BARS, don't change
* these without syncing with bootmanager
*/
shmem0_reserved: shmem@804000000 {
reg = <0x8 0x04000000 0x0 0x1000000>;
};
shmem1_reserved: shmem@805000000 {
reg = <0x8 0x05000000 0x0 0x1000000>;
};
pci0_msi_reserved: pci0-msi@806000000 {
reg = <0x8 0x06000000 0x0 0x100000>;
};
pci1_msi_reserved: pci1-msi@806100000 {
reg = <0x8 0x06100000 0x0 0x100000>;
};
mini_coredump0_reserved: mini-coredump0@806200000 {
reg = <0x8 0x06200000 0x0 0x100000>;
};
mhm_reserved_0: the-mhm-reserved-0@0 {
reg = <0x8 0x00000000 0x0 0x0000800>;
};
};
aliases {
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
};
cpu_intc: interrupt-controller {
compatible = "mti,cpu-interrupt-controller";
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <1>;
};
soc: soc {
#address-cells = <2>;
#size-cells = <2>;
ranges;
compatible = "simple-bus";
uart0: serial@800000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0 0x800000 0x0 0x1000>;
reg-io-width = <4>;
interrupt-parent = <&gic>;
interrupts = <GIC_SHARED 6 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&uart_clk>, <&occ_periph>;
clock-names = "uartclk", "apb_pclk";
};
uart1: serial@900000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0 0x900000 0x0 0x1000>;
reg-io-width = <4>;
interrupt-parent = <&gic>;
interrupts = <GIC_SHARED 6 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&uart_clk>, <&occ_periph>;
clock-names = "uartclk", "apb_pclk";
};
uart2: serial@a00000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0 0xa00000 0x0 0x1000>;
reg-io-width = <4>;
interrupt-parent = <&gic>;
interrupts = <GIC_SHARED 6 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&uart_clk>, <&occ_periph>;
clock-names = "uartclk", "apb_pclk";
};
gic: interrupt-controller@140000 {
compatible = "mti,gic";
reg = <0x0 0x140000 0x0 0x20000>;
interrupt-controller;
#interrupt-cells = <3>;
/*
* Declare the interrupt-parent even though the mti,gic
* binding doesn't require it, such that the kernel can
* figure out that cpu_intc is the root interrupt
* controller & should be probed first.
*/
interrupt-parent = <&cpu_intc>;
timer {
compatible = "mti,gic-timer";
interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>;
clocks = <&core0_clk>;
};
};
};
};

View File

@ -115,14 +115,58 @@
compatible = "ns16550a";
reg = <0xc00 0x100>;
reg-io-width = <4>;
reg-shift = <2>;
clocks = <&sysc MT7621_CLK_UART1>;
interrupt-parent = <&gic>;
interrupts = <GIC_SHARED 26 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
no-loopback-test;
pinctrl-names = "default";
pinctrl-0 = <&uart1_pins>;
};
serial1: serial@d00 {
compatible = "ns16550a";
reg = <0xd00 0x100>;
reg-io-width = <4>;
reg-shift = <2>;
clocks = <&sysc MT7621_CLK_UART2>;
interrupt-parent = <&gic>;
interrupts = <GIC_SHARED 27 IRQ_TYPE_LEVEL_HIGH>;
no-loopback-test;
pinctrl-names = "default";
pinctrl-0 = <&uart2_pins>;
status = "disabled";
};
serial2: serial@e00 {
compatible = "ns16550a";
reg = <0xe00 0x100>;
reg-io-width = <4>;
reg-shift = <2>;
clocks = <&sysc MT7621_CLK_UART3>;
interrupt-parent = <&gic>;
interrupts = <GIC_SHARED 28 IRQ_TYPE_LEVEL_HIGH>;
no-loopback-test;
pinctrl-names = "default";
pinctrl-0 = <&uart3_pins>;
status = "disabled";
};
spi0: spi@b00 {
@ -263,6 +307,9 @@
0x1e1d0700 0x0100>;
reg-names = "mac", "ippc";
#address-cells = <1>;
#size-cells = <0>;
clocks = <&sysc MT7621_CLK_XTAL>;
clock-names = "sys_ck";

View File

@ -0,0 +1,108 @@
CONFIG_SYSVIPC=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BPF_SYSCALL=y
CONFIG_TASKSTATS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_MACH_EYEQ5=y
CONFIG_FIT_IMAGE_FDT_EPM5=y
CONFIG_PAGE_SIZE_16KB=y
CONFIG_MIPS_CPS=y
CONFIG_CPU_HAS_MSA=y
CONFIG_NR_CPUS=16
CONFIG_MIPS_RAW_APPENDED_DTB=y
CONFIG_JUMP_LABEL=y
CONFIG_COMPAT_32BIT_TIME=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_TRIM_UNUSED_KSYMS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_USERFAULTFD=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_NETFILTER=y
CONFIG_CAN=y
CONFIG_PCI=y
CONFIG_PCI_MSI=y
CONFIG_PCI_DEBUG=y
CONFIG_PCI_ENDPOINT=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_UBI=y
CONFIG_MTD_UBI_BLOCK=y
CONFIG_SCSI=y
CONFIG_NETDEVICES=y
CONFIG_MACVLAN=y
CONFIG_IPVLAN=y
CONFIG_MACB=y
CONFIG_MARVELL_PHY=y
CONFIG_MICREL_PHY=y
CONFIG_CAN_M_CAN=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_HW_RANDOM=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_PINCTRL=y
CONFIG_MFD_SYSCON=y
CONFIG_HID_A4TECH=y
CONFIG_HID_BELKIN=y
CONFIG_HID_CHERRY=y
CONFIG_HID_CYPRESS=y
CONFIG_HID_EZKEY=y
CONFIG_HID_ITE=y
CONFIG_HID_KENSINGTON=y
CONFIG_HID_REDRAGON=y
CONFIG_HID_MICROSOFT=y
CONFIG_HID_MONTEREY=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_RESET_CONTROLLER=y
# CONFIG_NVMEM is not set
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_CUSE=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NFS_V4_1=y
CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
CONFIG_CRYPTO_CRC32_MIPS=y
CONFIG_FRAME_WARN=1024
CONFIG_DEBUG_FS=y
# CONFIG_RCU_TRACE is not set
# CONFIG_FTRACE is not set
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon"

View File

@ -4,9 +4,9 @@
# Author: Paul Burton <paul.burton@mips.com>
#
obj-y += init.o
obj-y += irq.o
obj-y += proc.o
obj-$(CONFIG_MACH_GENERIC_CORE) += init.o
obj-$(CONFIG_MACH_GENERIC_CORE) += irq.o
obj-$(CONFIG_MACH_GENERIC_CORE) += proc.o
obj-$(CONFIG_YAMON_DT_SHIM) += yamon-dt.o
obj-$(CONFIG_LEGACY_BOARD_SEAD3) += board-sead3.o

View File

@ -47,6 +47,11 @@
*/
#define KSEGX(a) ((_ACAST32_(a)) & _ACAST32_(0xe0000000))
/*
* Gives the size of each kernel segment
*/
#define CSEGX_SIZE 0x20000000
/*
* Returns the physical address of a CKSEGx / XKPHYS address
*/

View File

@ -216,27 +216,33 @@
* Temporary until all gas have MT ASE support
*/
.macro DMT reg=0
.word 0x41600bc1 | (\reg << 16)
insn_if_mips 0x41600bc1 | (\reg << 16)
insn32_if_mm 0x0000057C | (\reg << 21)
.endm
.macro EMT reg=0
.word 0x41600be1 | (\reg << 16)
insn_if_mips 0x41600be1 | (\reg << 16)
insn32_if_mm 0x0000257C | (\reg << 21)
.endm
.macro DVPE reg=0
.word 0x41600001 | (\reg << 16)
insn_if_mips 0x41600001 | (\reg << 16)
insn32_if_mm 0x0000157C | (\reg << 21)
.endm
.macro EVPE reg=0
.word 0x41600021 | (\reg << 16)
insn_if_mips 0x41600021 | (\reg << 16)
insn32_if_mm 0x0000357C | (\reg << 21)
.endm
.macro MFTR rt=0, rd=0, u=0, sel=0
.word 0x41000000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)
.macro MFTR rs=0, rt=0, u=0, sel=0
insn_if_mips 0x41000000 | (\rt << 16) | (\rs << 11) | (\u << 5) | (\sel)
insn32_if_mm 0x0000000E | (\rt << 21) | (\rs << 16) | (\u << 10) | (\sel << 4)
.endm
.macro MTTR rt=0, rd=0, u=0, sel=0
.word 0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)
.macro MTTR rt=0, rs=0, u=0, sel=0
insn_if_mips 0x41800000 | (\rt << 16) | (\rs << 11) | (\u << 5) | (\sel)
insn32_if_mm 0x00000006 | (\rt << 21) | (\rs << 16) | (\u << 10) | (\sel << 4)
.endm
#ifdef TOOLCHAIN_SUPPORTS_MSA

View File

@ -63,7 +63,7 @@ struct mips_cdmm_driver {
*/
phys_addr_t mips_cdmm_phys_base(void);
extern struct bus_type mips_cdmm_bustype;
extern const struct bus_type mips_cdmm_bustype;
void __iomem *mips_cdmm_early_probe(unsigned int dev_type);
#define to_mips_cdmm_device(d) container_of(d, struct mips_cdmm_device, dev)

View File

@ -49,6 +49,8 @@
#define HIGHMEM_START _AC(0x20000000, UL)
#endif
#define CKSEG0ADDR_OR_64BIT(x) CKSEG0ADDR(x)
#define CKSEG1ADDR_OR_64BIT(x) CKSEG1ADDR(x)
#endif /* CONFIG_32BIT */
#ifdef CONFIG_64BIT
@ -82,6 +84,8 @@
#define TO_CAC(x) (CAC_BASE | ((x) & TO_PHYS_MASK))
#define TO_UNCAC(x) (UNCAC_BASE | ((x) & TO_PHYS_MASK))
#define CKSEG0ADDR_OR_64BIT(x) TO_CAC(x)
#define CKSEG1ADDR_OR_64BIT(x) TO_UNCAC(x)
#endif /* CONFIG_64BIT */
/*

View File

@ -22,16 +22,28 @@ extern void __iomem *mips_gcr_base;
extern void __iomem *mips_cm_l2sync_base;
/**
* __mips_cm_phys_base - retrieve the physical base address of the CM
* mips_cm_phys_base - retrieve the physical base address of the CM
*
* This function returns the physical base address of the Coherence Manager
* global control block, or 0 if no Coherence Manager is present. It provides
* a default implementation which reads the CMGCRBase register where available,
* and may be overridden by platforms which determine this address in a
* different way by defining a function with the same prototype except for the
* name mips_cm_phys_base (without underscores).
* different way by defining a function with the same prototype.
*/
extern phys_addr_t __mips_cm_phys_base(void);
extern phys_addr_t mips_cm_phys_base(void);
/**
* mips_cm_l2sync_phys_base - retrieve the physical base address of the CM
* L2-sync region
*
* This function returns the physical base address of the Coherence Manager
* L2-cache only region. It provides a default implementation which reads the
* CMGCRL2OnlySyncBase register where available or returns a 4K region just
* behind the CM GCR base address. It may be overridden by platforms which
* determine this address in a different way by defining a function with the
* same prototype.
*/
extern phys_addr_t mips_cm_l2sync_phys_base(void);
/*
* mips_cm_is64 - determine CM register width
@ -311,6 +323,7 @@ GCR_CX_ACCESSOR_RW(32, 0x018, other)
/* GCR_Cx_RESET_BASE - Configure where powered up cores will fetch from */
GCR_CX_ACCESSOR_RW(32, 0x020, reset_base)
#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE GENMASK(31, 12)
#define CM_GCR_Cx_RESET_BASE_MODE BIT(1)
/* GCR_Cx_ID - Identify the current core */
GCR_CX_ACCESSOR_RO(32, 0x028, id)

View File

@ -26,6 +26,6 @@ static inline void mips_mt_set_cpuoptions(void) { }
#endif
struct class;
extern struct class *mt_class;
extern const struct class mt_class;
#endif /* __ASM_MIPS_MT_H */

View File

@ -189,19 +189,24 @@ static inline unsigned core_nvpes(void)
return ((conf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
}
#define _ASM_SET_DVPE \
_ASM_MACRO_1R(dvpe, rt, \
_ASM_INSN_IF_MIPS(0x41600001 | __rt << 16) \
_ASM_INSN32_IF_MM(0x0000157C | __rt << 21))
#define _ASM_UNSET_DVPE ".purgem dvpe\n\t"
static inline unsigned int dvpe(void)
{
int res = 0;
__asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set noat \n"
" .set mips32r2 \n"
" .word 0x41610001 # dvpe $1 \n"
" move %0, $1 \n"
" ehb \n"
" .set pop \n"
" .set push \n"
" .set "MIPS_ISA_LEVEL" \n"
_ASM_SET_DVPE
" dvpe %0 \n"
" ehb \n"
_ASM_UNSET_DVPE
" .set pop \n"
: "=r" (res));
instruction_hazard();
@ -209,16 +214,22 @@ static inline unsigned int dvpe(void)
return res;
}
#define _ASM_SET_EVPE \
_ASM_MACRO_1R(evpe, rt, \
_ASM_INSN_IF_MIPS(0x41600021 | __rt << 16) \
_ASM_INSN32_IF_MM(0x0000357C | __rt << 21))
#define _ASM_UNSET_EVPE ".purgem evpe\n\t"
static inline void __raw_evpe(void)
{
__asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set noat \n"
" .set mips32r2 \n"
" .word 0x41600021 # evpe \n"
" ehb \n"
" .set pop \n");
" .set push \n"
" .set "MIPS_ISA_LEVEL" \n"
_ASM_SET_EVPE
" evpe $0 \n"
" ehb \n"
_ASM_UNSET_EVPE
" .set pop \n");
}
/* Enable virtual processor execution if previous suggested it should be.
@ -232,18 +243,24 @@ static inline void evpe(int previous)
__raw_evpe();
}
#define _ASM_SET_DMT \
_ASM_MACRO_1R(dmt, rt, \
_ASM_INSN_IF_MIPS(0x41600bc1 | __rt << 16) \
_ASM_INSN32_IF_MM(0x0000057C | __rt << 21))
#define _ASM_UNSET_DMT ".purgem dmt\n\t"
static inline unsigned int dmt(void)
{
int res;
__asm__ __volatile__(
" .set push \n"
" .set mips32r2 \n"
" .set noat \n"
" .word 0x41610BC1 # dmt $1 \n"
" ehb \n"
" move %0, $1 \n"
" .set pop \n"
" .set push \n"
" .set "MIPS_ISA_LEVEL" \n"
_ASM_SET_DMT
" dmt %0 \n"
" ehb \n"
_ASM_UNSET_DMT
" .set pop \n"
: "=r" (res));
instruction_hazard();
@ -251,14 +268,21 @@ static inline unsigned int dmt(void)
return res;
}
#define _ASM_SET_EMT \
_ASM_MACRO_1R(emt, rt, \
_ASM_INSN_IF_MIPS(0x41600be1 | __rt << 16) \
_ASM_INSN32_IF_MM(0x0000257C | __rt << 21))
#define _ASM_UNSET_EMT ".purgem emt\n\t"
static inline void __raw_emt(void)
{
__asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set mips32r2 \n"
" .word 0x41600be1 # emt \n"
" ehb \n"
" .set push \n"
" .set "MIPS_ISA_LEVEL" \n"
_ASM_SET_EMT
" emt $0 \n"
_ASM_UNSET_EMT
" ehb \n"
" .set pop");
}
@ -276,41 +300,55 @@ static inline void emt(int previous)
static inline void ehb(void)
{
__asm__ __volatile__(
" .set push \n"
" .set mips32r2 \n"
" ehb \n"
" .set pop \n");
" .set push \n"
" .set "MIPS_ISA_LEVEL" \n"
" ehb \n"
" .set pop \n");
}
#define mftc0(rt,sel) \
#define _ASM_SET_MFTC0 \
_ASM_MACRO_2R_1S(mftc0, rs, rt, sel, \
_ASM_INSN_IF_MIPS(0x41000000 | __rt << 16 | \
__rs << 11 | \\sel) \
_ASM_INSN32_IF_MM(0x0000000E | __rt << 21 | \
__rs << 16 | \\sel << 4))
#define _ASM_UNSET_MFTC0 ".purgem mftc0\n\t"
#define mftc0(rt, sel) \
({ \
unsigned long __res; \
unsigned long __res; \
\
__asm__ __volatile__( \
" .set push \n" \
" .set mips32r2 \n" \
" .set noat \n" \
" # mftc0 $1, $" #rt ", " #sel " \n" \
" .word 0x41000800 | (" #rt " << 16) | " #sel " \n" \
" move %0, $1 \n" \
" .set pop \n" \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
_ASM_SET_MFTC0 \
" mftc0 $1, " #rt ", " #sel " \n" \
_ASM_UNSET_MFTC0 \
" .set pop \n" \
: "=r" (__res)); \
\
__res; \
})
#define _ASM_SET_MFTGPR \
_ASM_MACRO_2R(mftgpr, rs, rt, \
_ASM_INSN_IF_MIPS(0x41000020 | __rt << 16 | \
__rs << 11) \
_ASM_INSN32_IF_MM(0x0000040E | __rt << 21 | \
__rs << 16))
#define _ASM_UNSET_MFTGPR ".purgem mftgpr\n\t"
#define mftgpr(rt) \
({ \
unsigned long __res; \
\
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set mips32r2 \n" \
" # mftgpr $1," #rt " \n" \
" .word 0x41000820 | (" #rt " << 16) \n" \
" move %0, $1 \n" \
" .set pop \n" \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
_ASM_SET_MFTGPR \
" mftgpr %0," #rt " \n" \
_ASM_UNSET_MFTGPR \
" .set pop \n" \
: "=r" (__res)); \
\
__res; \
@ -321,35 +359,49 @@ static inline void ehb(void)
unsigned long __res; \
\
__asm__ __volatile__( \
" mftr %0, " #rt ", " #u ", " #sel " \n" \
" mftr %0, " #rt ", " #u ", " #sel " \n" \
: "=r" (__res)); \
\
__res; \
})
#define mttgpr(rd,v) \
#define _ASM_SET_MTTGPR \
_ASM_MACRO_2R(mttgpr, rt, rs, \
_ASM_INSN_IF_MIPS(0x41800020 | __rt << 16 | \
__rs << 11) \
_ASM_INSN32_IF_MM(0x00000406 | __rt << 21 | \
__rs << 16))
#define _ASM_UNSET_MTTGPR ".purgem mttgpr\n\t"
#define mttgpr(rs, v) \
do { \
__asm__ __volatile__( \
" .set push \n" \
" .set mips32r2 \n" \
" .set noat \n" \
" move $1, %0 \n" \
" # mttgpr $1, " #rd " \n" \
" .word 0x41810020 | (" #rd " << 11) \n" \
" .set pop \n" \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
_ASM_SET_MTTGPR \
" mttgpr %0, " #rs " \n" \
_ASM_UNSET_MTTGPR \
" .set pop \n" \
: : "r" (v)); \
} while (0)
#define mttc0(rd, sel, v) \
#define _ASM_SET_MTTC0 \
_ASM_MACRO_2R_1S(mttc0, rt, rs, sel, \
_ASM_INSN_IF_MIPS(0x41800000 | __rt << 16 | \
__rs << 11 | \\sel) \
_ASM_INSN32_IF_MM(0x0000040E | __rt << 21 | \
__rs << 16 | \\sel << 4))
#define _ASM_UNSET_MTTC0 ".purgem mttc0\n\t"
#define mttc0(rs, sel, v) \
({ \
__asm__ __volatile__( \
" .set push \n" \
" .set mips32r2 \n" \
" .set noat \n" \
" move $1, %0 \n" \
" # mttc0 %0," #rd ", " #sel " \n" \
" .word 0x41810000 | (" #rd " << 11) | " #sel " \n" \
" .set pop \n" \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
_ASM_SET_MTTC0 \
" mttc0 %0," #rs ", " #sel " \n" \
_ASM_UNSET_MTTC0 \
" .set pop \n" \
: \
: "r" (v)); \
})
@ -371,49 +423,49 @@ do { \
/* you *must* set the target tc (settc) before trying to use these */
#define read_vpe_c0_vpecontrol() mftc0(1, 1)
#define write_vpe_c0_vpecontrol(val) mttc0(1, 1, val)
#define read_vpe_c0_vpeconf0() mftc0(1, 2)
#define write_vpe_c0_vpeconf0(val) mttc0(1, 2, val)
#define read_vpe_c0_vpeconf1() mftc0(1, 3)
#define write_vpe_c0_vpeconf1(val) mttc0(1, 3, val)
#define read_vpe_c0_count() mftc0(9, 0)
#define write_vpe_c0_count(val) mttc0(9, 0, val)
#define read_vpe_c0_status() mftc0(12, 0)
#define write_vpe_c0_status(val) mttc0(12, 0, val)
#define read_vpe_c0_cause() mftc0(13, 0)
#define write_vpe_c0_cause(val) mttc0(13, 0, val)
#define read_vpe_c0_config() mftc0(16, 0)
#define write_vpe_c0_config(val) mttc0(16, 0, val)
#define read_vpe_c0_config1() mftc0(16, 1)
#define write_vpe_c0_config1(val) mttc0(16, 1, val)
#define read_vpe_c0_config7() mftc0(16, 7)
#define write_vpe_c0_config7(val) mttc0(16, 7, val)
#define read_vpe_c0_ebase() mftc0(15, 1)
#define write_vpe_c0_ebase(val) mttc0(15, 1, val)
#define write_vpe_c0_compare(val) mttc0(11, 0, val)
#define read_vpe_c0_badvaddr() mftc0(8, 0)
#define read_vpe_c0_epc() mftc0(14, 0)
#define write_vpe_c0_epc(val) mttc0(14, 0, val)
#define read_vpe_c0_vpecontrol() mftc0($1, 1)
#define write_vpe_c0_vpecontrol(val) mttc0($1, 1, val)
#define read_vpe_c0_vpeconf0() mftc0($1, 2)
#define write_vpe_c0_vpeconf0(val) mttc0($1, 2, val)
#define read_vpe_c0_vpeconf1() mftc0($1, 3)
#define write_vpe_c0_vpeconf1(val) mttc0($1, 3, val)
#define read_vpe_c0_count() mftc0($9, 0)
#define write_vpe_c0_count(val) mttc0($9, 0, val)
#define read_vpe_c0_status() mftc0($12, 0)
#define write_vpe_c0_status(val) mttc0($12, 0, val)
#define read_vpe_c0_cause() mftc0($13, 0)
#define write_vpe_c0_cause(val) mttc0($13, 0, val)
#define read_vpe_c0_config() mftc0($16, 0)
#define write_vpe_c0_config(val) mttc0($16, 0, val)
#define read_vpe_c0_config1() mftc0($16, 1)
#define write_vpe_c0_config1(val) mttc0($16, 1, val)
#define read_vpe_c0_config7() mftc0($16, 7)
#define write_vpe_c0_config7(val) mttc0($16, 7, val)
#define read_vpe_c0_ebase() mftc0($15, 1)
#define write_vpe_c0_ebase(val) mttc0($15, 1, val)
#define write_vpe_c0_compare(val) mttc0($11, 0, val)
#define read_vpe_c0_badvaddr() mftc0($8, 0)
#define read_vpe_c0_epc() mftc0($14, 0)
#define write_vpe_c0_epc(val) mttc0($14, 0, val)
/* TC */
#define read_tc_c0_tcstatus() mftc0(2, 1)
#define write_tc_c0_tcstatus(val) mttc0(2, 1, val)
#define read_tc_c0_tcbind() mftc0(2, 2)
#define write_tc_c0_tcbind(val) mttc0(2, 2, val)
#define read_tc_c0_tcrestart() mftc0(2, 3)
#define write_tc_c0_tcrestart(val) mttc0(2, 3, val)
#define read_tc_c0_tchalt() mftc0(2, 4)
#define write_tc_c0_tchalt(val) mttc0(2, 4, val)
#define read_tc_c0_tccontext() mftc0(2, 5)
#define write_tc_c0_tccontext(val) mttc0(2, 5, val)
#define read_tc_c0_tcstatus() mftc0($2, 1)
#define write_tc_c0_tcstatus(val) mttc0($2, 1, val)
#define read_tc_c0_tcbind() mftc0($2, 2)
#define write_tc_c0_tcbind(val) mttc0($2, 2, val)
#define read_tc_c0_tcrestart() mftc0($2, 3)
#define write_tc_c0_tcrestart(val) mttc0($2, 3, val)
#define read_tc_c0_tchalt() mftc0($2, 4)
#define write_tc_c0_tchalt(val) mttc0($2, 4, val)
#define read_tc_c0_tccontext() mftc0($2, 5)
#define write_tc_c0_tccontext(val) mttc0($2, 5, val)
/* GPR */
#define read_tc_gpr_sp() mftgpr(29)
#define write_tc_gpr_sp(val) mttgpr(29, val)
#define read_tc_gpr_gp() mftgpr(28)
#define write_tc_gpr_gp(val) mttgpr(28, val)
#define read_tc_gpr_sp() mftgpr($29)
#define write_tc_gpr_sp(val) mttgpr($29, val)
#define read_tc_gpr_gp() mftgpr($28)
#define write_tc_gpr_gp(val) mttgpr($28, val)
__BUILD_SET_C0(mvpcontrol)

View File

@ -42,59 +42,198 @@
/*
* Coprocessor 0 register names
*
* CP0_REGISTER variant is meant to be used in assembly code, C0_REGISTER
* variant is meant to be used in C (uasm) code.
*/
#define CP0_INDEX $0
#define CP0_RANDOM $1
#define CP0_ENTRYLO0 $2
#define CP0_ENTRYLO1 $3
#define CP0_CONF $3
#define CP0_GLOBALNUMBER $3, 1
#define CP0_CONTEXT $4
#define CP0_PAGEMASK $5
#define CP0_PAGEGRAIN $5, 1
#define CP0_SEGCTL0 $5, 2
#define CP0_SEGCTL1 $5, 3
#define CP0_SEGCTL2 $5, 4
#define CP0_WIRED $6
#define CP0_INFO $7
#define CP0_HWRENA $7
#define CP0_BADVADDR $8
#define CP0_BADINSTR $8, 1
#define CP0_COUNT $9
#define CP0_ENTRYHI $10
#define CP0_GUESTCTL1 $10, 4
#define CP0_GUESTCTL2 $10, 5
#define CP0_GUESTCTL3 $10, 6
#define CP0_COMPARE $11
#define CP0_GUESTCTL0EXT $11, 4
#define CP0_STATUS $12
#define CP0_GUESTCTL0 $12, 6
#define CP0_GTOFFSET $12, 7
#define CP0_CAUSE $13
#define CP0_EPC $14
#define CP0_PRID $15
#define CP0_EBASE $15, 1
#define CP0_CMGCRBASE $15, 3
#define CP0_CONFIG $16
#define CP0_CONFIG3 $16, 3
#define CP0_CONFIG5 $16, 5
#define CP0_CONFIG6 $16, 6
#define CP0_LLADDR $17
#define CP0_WATCHLO $18
#define CP0_WATCHHI $19
#define CP0_XCONTEXT $20
#define CP0_FRAMEMASK $21
#define CP0_DIAGNOSTIC $22
#define CP0_DIAGNOSTIC1 $22, 1
#define CP0_DEBUG $23
#define CP0_DEPC $24
#define CP0_PERFORMANCE $25
#define CP0_ECC $26
#define CP0_CACHEERR $27
#define CP0_TAGLO $28
#define CP0_TAGHI $29
#define CP0_ERROREPC $30
#define CP0_DESAVE $31
#define CP0_INDEX $0
#define C0_INDEX 0, 0
#define CP0_RANDOM $1
#define C0_RANDOM 1, 0
#define CP0_ENTRYLO0 $2
#define C0_ENTRYLO0 2, 0
#define CP0_ENTRYLO1 $3
#define C0_ENTRYLO1 3, 0
#define CP0_CONF $3
#define C0_CONF 3, 0
#define CP0_GLOBALNUMBER $3, 1
#define C0_GLOBALNUMBER 3, 1
#define CP0_CONTEXT $4
#define C0_CONTEXT 4, 0
#define CP0_PAGEMASK $5
#define C0_PAGEMASK 5, 0
#define CP0_PAGEGRAIN $5, 1
#define C0_PAGEGRAIN 5, 1
#define CP0_SEGCTL0 $5, 2
#define C0_SEGCTL0 5, 2
#define CP0_SEGCTL1 $5, 3
#define C0_SEGCTL1 5, 3
#define CP0_SEGCTL2 $5, 4
#define C0_SEGCTL2 5, 4
#define CP0_PWBASE $5, 5
#define C0_PWBASE 5, 5
#define CP0_PWFIELD $5, 6
#define C0_PWFIELD 5, 6
#define CP0_PWCTL $5, 7
#define C0_PWCTL 5, 7
#define CP0_WIRED $6
#define C0_WIRED 6, 0
#define CP0_INFO $7
#define C0_INFO 7, 0
#define CP0_HWRENA $7
#define C0_HWRENA 7, 0
#define CP0_BADVADDR $8
#define C0_BADVADDR 8, 0
#define CP0_BADINSTR $8, 1
#define C0_BADINSTR 8, 1
#define CP0_BADINSTRP $8, 2
#define C0_BADINSTRP 8, 2
#define CP0_COUNT $9
#define C0_COUNT 9, 0
#define CP0_PGD $9, 7
#define C0_PGD 9, 7
#define CP0_ENTRYHI $10
#define C0_ENTRYHI 10, 0
#define CP0_GUESTCTL1 $10, 4
#define C0_GUESTCTL1 10, 5
#define CP0_GUESTCTL2 $10, 5
#define C0_GUESTCTL2 10, 5
#define CP0_GUESTCTL3 $10, 6
#define C0_GUESTCTL3 10, 6
#define CP0_COMPARE $11
#define C0_COMPARE 11, 0
#define CP0_GUESTCTL0EXT $11, 4
#define C0_GUESTCTL0EXT 11, 4
#define CP0_STATUS $12
#define C0_STATUS 12, 0
#define CP0_GUESTCTL0 $12, 6
#define C0_GUESTCTL0 12, 6
#define CP0_GTOFFSET $12, 7
#define C0_GTOFFSET 12, 7
#define CP0_CAUSE $13
#define C0_CAUSE 13, 0
#define CP0_EPC $14
#define C0_EPC 14, 0
#define CP0_PRID $15
#define C0_PRID 15, 0
#define CP0_EBASE $15, 1
#define C0_EBASE 15, 1
#define CP0_CMGCRBASE $15, 3
#define C0_CMGCRBASE 15, 3
#define CP0_CONFIG $16
#define C0_CONFIG 16, 0
#define CP0_CONFIG1 $16, 1
#define C0_CONFIG1 16, 1
#define CP0_CONFIG2 $16, 2
#define C0_CONFIG2 16, 2
#define CP0_CONFIG3 $16, 3
#define C0_CONFIG3 16, 3
#define CP0_CONFIG4 $16, 4
#define C0_CONFIG4 16, 4
#define CP0_CONFIG5 $16, 5
#define C0_CONFIG5 16, 5
#define CP0_CONFIG6 $16, 6
#define C0_CONFIG6 16, 6
#define CP0_LLADDR $17
#define C0_LLADDR 17, 0
#define CP0_WATCHLO $18
#define C0_WATCHLO 18, 0
#define CP0_WATCHHI $19
#define C0_WATCHHI 19, 0
#define CP0_XCONTEXT $20
#define C0_XCONTEXT 20, 0
#define CP0_FRAMEMASK $21
#define C0_FRAMEMASK 21, 0
#define CP0_DIAGNOSTIC $22
#define C0_DIAGNOSTIC 22, 0
#define CP0_DIAGNOSTIC1 $22, 1
#define C0_DIAGNOSTIC1 22, 1
#define CP0_DEBUG $23
#define C0_DEBUG 23, 0
#define CP0_DEPC $24
#define C0_DEPC 24, 0
#define CP0_PERFORMANCE $25
#define C0_PERFORMANCE 25, 0
#define CP0_ECC $26
#define C0_ECC 26, 0
#define CP0_CACHEERR $27
#define C0_CACHEERR 27, 0
#define CP0_TAGLO $28
#define C0_TAGLO 28, 0
#define CP0_DTAGLO $28, 2
#define C0_DTAGLO 28, 2
#define CP0_DDATALO $28, 3
#define C0_DDATALO 28, 3
#define CP0_STAGLO $28, 4
#define C0_STAGLO 28, 4
#define CP0_TAGHI $29
#define C0_TAGHI 29, 0
#define CP0_ERROREPC $30
#define C0_ERROREPC 30, 0
#define CP0_DESAVE $31
#define C0_DESAVE 31, 0
/*
* R4640/R4650 cp0 register names. These registers are listed
@ -291,6 +430,12 @@
#define ST0_DE 0x00010000
#define ST0_CE 0x00020000
#ifdef CONFIG_64BIT
#define ST0_KX_IF_64 ST0_KX
#else
#define ST0_KX_IF_64 0
#endif
/*
* Setting c0_status.co enables Hit_Writeback and Hit_Writeback_Invalidate
* cacheops in userspace. This bit exists only on RM7000 and RM9000
@ -1277,11 +1422,13 @@ static inline int mm_insn_16bit(u16 insn)
*/
/* Match an individual register number and assign to \var */
#define _IFC_REG(n) \
".ifc \\r, $" #n "\n\t" \
#define _IFC_REG_NAME(name, n) \
".ifc \\r, $" #name "\n\t" \
"\\var = " #n "\n\t" \
".endif\n\t"
#define _IFC_REG(n) _IFC_REG_NAME(n, n)
#define _ASM_SET_PARSE_R \
".macro parse_r var r\n\t" \
"\\var = -1\n\t" \
@ -1293,6 +1440,7 @@ static inline int mm_insn_16bit(u16 insn)
_IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23) \
_IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27) \
_IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31) \
_IFC_REG_NAME(sp, 29) _IFC_REG_NAME(fp, 30) \
".iflt \\var\n\t" \
".error \"Unable to parse register name \\r\"\n\t" \
".endif\n\t" \
@ -1307,6 +1455,15 @@ static inline int mm_insn_16bit(u16 insn)
* the ENC encodings.
*/
/* Instructions with 1 register operand */
#define _ASM_MACRO_1R(OP, R1, ENC) \
".macro " #OP " " #R1 "\n\t" \
_ASM_SET_PARSE_R \
"parse_r __" #R1 ", \\" #R1 "\n\t" \
ENC \
_ASM_UNSET_PARSE_R \
".endm\n\t"
/* Instructions with 1 register operand & 1 immediate operand */
#define _ASM_MACRO_1R1I(OP, R1, I2, ENC) \
".macro " #OP " " #R1 ", " #I2 "\n\t" \
@ -2078,7 +2235,14 @@ do { \
_ASM_INSN_IF_MIPS(0x4200000c) \
_ASM_INSN32_IF_MM(0x0000517c)
#else /* !TOOLCHAIN_SUPPORTS_VIRT */
#define _ASM_SET_VIRT ".set\tvirt\n\t"
#if MIPS_ISA_REV >= 5
#define _ASM_SET_VIRT_ISA
#elif defined(CONFIG_64BIT)
#define _ASM_SET_VIRT_ISA ".set\tmips64r5\n\t"
#else
#define _ASM_SET_VIRT_ISA ".set\tmips32r5\n\t"
#endif
#define _ASM_SET_VIRT _ASM_SET_VIRT_ISA ".set\tvirt\n\t"
#define _ASM_SET_MFGC0 _ASM_SET_VIRT
#define _ASM_SET_DMFGC0 _ASM_SET_VIRT
#define _ASM_SET_MTGC0 _ASM_SET_VIRT
@ -2099,7 +2263,6 @@ do { \
({ int __res; \
__asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips32r5\n\t" \
_ASM_SET_MFGC0 \
"mfgc0\t%0, " #source ", %1\n\t" \
_ASM_UNSET_MFGC0 \
@ -2113,7 +2276,6 @@ do { \
({ unsigned long long __res; \
__asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips64r5\n\t" \
_ASM_SET_DMFGC0 \
"dmfgc0\t%0, " #source ", %1\n\t" \
_ASM_UNSET_DMFGC0 \
@ -2127,7 +2289,6 @@ do { \
do { \
__asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips32r5\n\t" \
_ASM_SET_MTGC0 \
"mtgc0\t%z0, " #register ", %1\n\t" \
_ASM_UNSET_MTGC0 \
@ -2140,7 +2301,6 @@ do { \
do { \
__asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips64r5\n\t" \
_ASM_SET_DMTGC0 \
"dmtgc0\t%z0, " #register ", %1\n\t" \
_ASM_UNSET_DMTGC0 \

View File

@ -16,6 +16,96 @@
#if _MIPS_SIM == _MIPS_SIM_ABI32
/*
* General purpose register numbers for 32 bit ABI
*/
#define GPR_ZERO 0 /* wired zero */
#define GPR_AT 1 /* assembler temp */
#define GPR_V0 2 /* return value */
#define GPR_V1 3
#define GPR_A0 4 /* argument registers */
#define GPR_A1 5
#define GPR_A2 6
#define GPR_A3 7
#define GPR_T0 8 /* caller saved */
#define GPR_T1 9
#define GPR_T2 10
#define GPR_T3 11
#define GPR_T4 12
#define GPR_TA0 12
#define GPR_T5 13
#define GPR_TA1 13
#define GPR_T6 14
#define GPR_TA2 14
#define GPR_T7 15
#define GPR_TA3 15
#define GPR_S0 16 /* callee saved */
#define GPR_S1 17
#define GPR_S2 18
#define GPR_S3 19
#define GPR_S4 20
#define GPR_S5 21
#define GPR_S6 22
#define GPR_S7 23
#define GPR_T8 24 /* caller saved */
#define GPR_T9 25
#define GPR_JP 25 /* PIC jump register */
#define GPR_K0 26 /* kernel scratch */
#define GPR_K1 27
#define GPR_GP 28 /* global pointer */
#define GPR_SP 29 /* stack pointer */
#define GPR_FP 30 /* frame pointer */
#define GPR_S8 30 /* same like fp! */
#define GPR_RA 31 /* return address */
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
#define GPR_ZERO 0 /* wired zero */
#define GPR_AT 1 /* assembler temp */
#define GPR_V0 2 /* return value - caller saved */
#define GPR_V1 3
#define GPR_A0 4 /* argument registers */
#define GPR_A1 5
#define GPR_A2 6
#define GPR_A3 7
#define GPR_A4 8 /* arg reg 64 bit; caller saved in 32 bit */
#define GPR_TA0 8
#define GPR_A5 9
#define GPR_TA1 9
#define GPR_A6 10
#define GPR_TA2 10
#define GPR_A7 11
#define GPR_TA3 11
#define GPR_T0 12 /* caller saved */
#define GPR_T1 13
#define GPR_T2 14
#define GPR_T3 15
#define GPR_S0 16 /* callee saved */
#define GPR_S1 17
#define GPR_S2 18
#define GPR_S3 19
#define GPR_S4 20
#define GPR_S5 21
#define GPR_S6 22
#define GPR_S7 23
#define GPR_T8 24 /* caller saved */
#define GPR_T9 25 /* callee address for PIC/temp */
#define GPR_JP 25 /* PIC jump register */
#define GPR_K0 26 /* kernel temporary */
#define GPR_K1 27
#define GPR_GP 28 /* global pointer - caller saved for PIC */
#define GPR_SP 29 /* stack pointer */
#define GPR_FP 30 /* frame pointer */
#define GPR_S8 30 /* callee saved */
#define GPR_RA 31 /* return address */
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
#ifdef __ASSEMBLY__
#if _MIPS_SIM == _MIPS_SIM_ABI32
/*
* Symbolic register names for 32 bit ABI
*/
@ -102,5 +192,6 @@
#define ra $31 /* return address */
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_REGDEF_H */

View File

@ -24,7 +24,7 @@ struct core_boot_config {
extern struct core_boot_config *mips_cps_core_bootcfg;
extern void mips_cps_core_entry(void);
extern void mips_cps_core_boot(int cca, void __iomem *gcr_base);
extern void mips_cps_core_init(void);
extern void mips_cps_boot_vpes(struct core_boot_config *cfg, unsigned vpe);
@ -32,7 +32,12 @@ extern void mips_cps_boot_vpes(struct core_boot_config *cfg, unsigned vpe);
extern void mips_cps_pm_save(void);
extern void mips_cps_pm_restore(void);
extern void *mips_cps_core_entry_patch_end;
extern void excep_tlbfill(void);
extern void excep_xtlbfill(void);
extern void excep_cache(void);
extern void excep_genex(void);
extern void excep_intex(void);
extern void excep_ejtag(void);
#ifdef CONFIG_MIPS_CPS

View File

@ -4,6 +4,7 @@
* Author: Paul Burton <paul.burton@mips.com>
*/
#include <linux/init.h>
#include <asm/addrspace.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
@ -82,39 +83,10 @@
.endm
.balign 0x1000
LEAF(mips_cps_core_entry)
/*
* These first several instructions will be patched by cps_smp_setup to load the
* CCA to use into register s0 and GCR base address to register s1.
*/
.rept CPS_ENTRY_PATCH_INSNS
nop
.endr
.global mips_cps_core_entry_patch_end
mips_cps_core_entry_patch_end:
/* Check whether we're here due to an NMI */
mfc0 k0, CP0_STATUS
and k0, k0, ST0_NMI
beqz k0, not_nmi
nop
/* This is an NMI */
PTR_LA k0, nmi_handler
jr k0
nop
not_nmi:
/* Setup Cause */
li t0, CAUSEF_IV
mtc0 t0, CP0_CAUSE
/* Setup Status */
li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
mtc0 t0, CP0_STATUS
LEAF(mips_cps_core_boot)
/* Save CCA and GCR base */
move s0, a0
move s1, a1
/* We don't know how to do coherence setup on earlier ISA */
#if MIPS_ISA_REV > 0
@ -178,49 +150,45 @@ not_nmi:
PTR_L sp, VPEBOOTCFG_SP(v1)
jr t1
nop
END(mips_cps_core_entry)
END(mips_cps_core_boot)
.org 0x200
__INIT
LEAF(excep_tlbfill)
DUMP_EXCEP("TLB Fill")
b .
nop
END(excep_tlbfill)
.org 0x280
LEAF(excep_xtlbfill)
DUMP_EXCEP("XTLB Fill")
b .
nop
END(excep_xtlbfill)
.org 0x300
LEAF(excep_cache)
DUMP_EXCEP("Cache")
b .
nop
END(excep_cache)
.org 0x380
LEAF(excep_genex)
DUMP_EXCEP("General")
b .
nop
END(excep_genex)
.org 0x400
LEAF(excep_intex)
DUMP_EXCEP("Interrupt")
b .
nop
END(excep_intex)
.org 0x480
LEAF(excep_ejtag)
PTR_LA k0, ejtag_debug_handler
jr k0
nop
END(excep_ejtag)
__FINIT
LEAF(mips_cps_core_init)
#ifdef CONFIG_MIPS_MT_SMP
@ -428,7 +396,7 @@ LEAF(mips_cps_boot_vpes)
/* Calculate a pointer to the VPEs struct vpe_boot_config */
li t0, VPEBOOTCFG_SIZE
mul t0, t0, ta1
addu t0, t0, ta3
PTR_ADDU t0, t0, ta3
/* Set the TC restart PC */
lw t1, VPEBOOTCFG_PC(t0)
@ -603,10 +571,10 @@ dcache_done:
lw $1, TI_CPU(gp)
sll $1, $1, LONGLOG
PTR_LA \dest, __per_cpu_offset
addu $1, $1, \dest
PTR_ADDU $1, $1, \dest
lw $1, 0($1)
PTR_LA \dest, cps_cpu_state
addu \dest, \dest, $1
PTR_ADDU \dest, \dest, $1
.set pop
.endm

View File

@ -179,7 +179,7 @@ static char *cm3_causes[32] = {
static DEFINE_PER_CPU_ALIGNED(spinlock_t, cm_core_lock);
static DEFINE_PER_CPU_ALIGNED(unsigned long, cm_core_lock_flags);
phys_addr_t __mips_cm_phys_base(void)
phys_addr_t __weak mips_cm_phys_base(void)
{
unsigned long cmgcr;
@ -198,10 +198,7 @@ phys_addr_t __mips_cm_phys_base(void)
return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32);
}
phys_addr_t mips_cm_phys_base(void)
__attribute__((weak, alias("__mips_cm_phys_base")));
static phys_addr_t __mips_cm_l2sync_phys_base(void)
phys_addr_t __weak mips_cm_l2sync_phys_base(void)
{
u32 base_reg;
@ -217,9 +214,6 @@ static phys_addr_t __mips_cm_l2sync_phys_base(void)
return mips_cm_phys_base() + MIPS_CM_GCR_SIZE;
}
phys_addr_t mips_cm_l2sync_phys_base(void)
__attribute__((weak, alias("__mips_cm_l2sync_phys_base")));
static void mips_cm_probe_l2sync(void)
{
unsigned major_rev;

View File

@ -229,19 +229,13 @@ void mips_mt_set_cpuoptions(void)
}
}
struct class *mt_class;
const struct class mt_class = {
.name = "mt",
};
static int __init mips_mt_init(void)
{
struct class *mtc;
mtc = class_create("mt");
if (IS_ERR(mtc))
return PTR_ERR(mtc);
mt_class = mtc;
return 0;
return class_register(&mt_class);
}
subsys_initcall(mips_mt_init);

View File

@ -18,6 +18,7 @@
#include <asm/mipsmtregs.h>
#include <asm/pm.h>
#include <asm/pm-cps.h>
#include <asm/regdef.h>
#include <asm/smp-cps.h>
#include <asm/uasm.h>
@ -69,13 +70,6 @@ DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
static struct uasm_label labels[32];
static struct uasm_reloc relocs[32];
enum mips_reg {
zero, at, v0, v1, a0, a1, a2, a3,
t0, t1, t2, t3, t4, t5, t6, t7,
s0, s1, s2, s3, s4, s5, s6, s7,
t8, t9, k0, k1, gp, sp, fp, ra,
};
bool cps_pm_support_state(enum cps_pm_state state)
{
return test_bit(state, state_support);
@ -203,13 +197,13 @@ static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
return;
/* Load base address */
UASM_i_LA(pp, t0, (long)CKSEG0);
UASM_i_LA(pp, GPR_T0, (long)CKSEG0);
/* Calculate end address */
if (cache_size < 0x8000)
uasm_i_addiu(pp, t1, t0, cache_size);
uasm_i_addiu(pp, GPR_T1, GPR_T0, cache_size);
else
UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size));
UASM_i_LA(pp, GPR_T1, (long)(CKSEG0 + cache_size));
/* Start of cache op loop */
uasm_build_label(pl, *pp, lbl);
@ -217,19 +211,19 @@ static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
/* Generate the cache ops */
for (i = 0; i < unroll_lines; i++) {
if (cpu_has_mips_r6) {
uasm_i_cache(pp, op, 0, t0);
uasm_i_addiu(pp, t0, t0, cache->linesz);
uasm_i_cache(pp, op, 0, GPR_T0);
uasm_i_addiu(pp, GPR_T0, GPR_T0, cache->linesz);
} else {
uasm_i_cache(pp, op, i * cache->linesz, t0);
uasm_i_cache(pp, op, i * cache->linesz, GPR_T0);
}
}
if (!cpu_has_mips_r6)
/* Update the base address */
uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
uasm_i_addiu(pp, GPR_T0, GPR_T0, unroll_lines * cache->linesz);
/* Loop if we haven't reached the end address yet */
uasm_il_bne(pp, pr, t0, t1, lbl);
uasm_il_bne(pp, pr, GPR_T0, GPR_T1, lbl);
uasm_i_nop(pp);
}
@ -275,25 +269,25 @@ static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
*/
/* Preserve perf counter setup */
uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
uasm_i_mfc0(pp, GPR_T2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
uasm_i_mfc0(pp, GPR_T3, 25, (perf_counter * 2) + 1); /* PerfCntN */
/* Setup perf counter to count FSB full pipeline stalls */
uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
uasm_i_addiu(pp, GPR_T0, GPR_ZERO, (perf_event << 5) | 0xf);
uasm_i_mtc0(pp, GPR_T0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
uasm_i_ehb(pp);
uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
uasm_i_mtc0(pp, GPR_ZERO, 25, (perf_counter * 2) + 1); /* PerfCntN */
uasm_i_ehb(pp);
/* Base address for loads */
UASM_i_LA(pp, t0, (long)CKSEG0);
UASM_i_LA(pp, GPR_T0, (long)CKSEG0);
/* Start of clear loop */
uasm_build_label(pl, *pp, lbl);
/* Perform some loads to fill the FSB */
for (i = 0; i < num_loads; i++)
uasm_i_lw(pp, zero, i * line_size * line_stride, t0);
uasm_i_lw(pp, GPR_ZERO, i * line_size * line_stride, GPR_T0);
/*
* Invalidate the new D-cache entries so that the cache will need
@ -301,9 +295,9 @@ static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
*/
for (i = 0; i < num_loads; i++) {
uasm_i_cache(pp, Hit_Invalidate_D,
i * line_size * line_stride, t0);
i * line_size * line_stride, GPR_T0);
uasm_i_cache(pp, Hit_Writeback_Inv_SD,
i * line_size * line_stride, t0);
i * line_size * line_stride, GPR_T0);
}
/* Barrier ensuring previous cache invalidates are complete */
@ -311,16 +305,16 @@ static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
uasm_i_ehb(pp);
/* Check whether the pipeline stalled due to the FSB being full */
uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */
uasm_i_mfc0(pp, GPR_T1, 25, (perf_counter * 2) + 1); /* PerfCntN */
/* Loop if it didn't */
uasm_il_beqz(pp, pr, t1, lbl);
uasm_il_beqz(pp, pr, GPR_T1, lbl);
uasm_i_nop(pp);
/* Restore perf counter 1. The count may well now be wrong... */
uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
uasm_i_mtc0(pp, GPR_T2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
uasm_i_ehb(pp);
uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
uasm_i_mtc0(pp, GPR_T3, 25, (perf_counter * 2) + 1); /* PerfCntN */
uasm_i_ehb(pp);
return 0;
@ -330,12 +324,12 @@ static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
struct uasm_reloc **pr,
unsigned r_addr, int lbl)
{
uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
uasm_i_lui(pp, GPR_T0, uasm_rel_hi(0x80000000));
uasm_build_label(pl, *pp, lbl);
uasm_i_ll(pp, t1, 0, r_addr);
uasm_i_or(pp, t1, t1, t0);
uasm_i_sc(pp, t1, 0, r_addr);
uasm_il_beqz(pp, pr, t1, lbl);
uasm_i_ll(pp, GPR_T1, 0, r_addr);
uasm_i_or(pp, GPR_T1, GPR_T1, GPR_T0);
uasm_i_sc(pp, GPR_T1, 0, r_addr);
uasm_il_beqz(pp, pr, GPR_T1, lbl);
uasm_i_nop(pp);
}
@ -344,9 +338,9 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
u32 *buf, *p;
const unsigned r_online = a0;
const unsigned r_nc_count = a1;
const unsigned r_pcohctl = t7;
const unsigned r_online = GPR_A0;
const unsigned r_nc_count = GPR_A1;
const unsigned r_pcohctl = GPR_T8;
const unsigned max_instrs = 256;
unsigned cpc_cmd;
int err;
@ -383,8 +377,8 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
* with the return address placed in v0 to avoid clobbering
* the ra register before it is saved.
*/
UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
uasm_i_jalr(&p, v0, t0);
UASM_i_LA(&p, GPR_T0, (long)mips_cps_pm_save);
uasm_i_jalr(&p, GPR_V0, GPR_T0);
uasm_i_nop(&p);
}
@ -399,11 +393,11 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
/* Increment ready_count */
uasm_i_sync(&p, __SYNC_mb);
uasm_build_label(&l, p, lbl_incready);
uasm_i_ll(&p, t1, 0, r_nc_count);
uasm_i_addiu(&p, t2, t1, 1);
uasm_i_sc(&p, t2, 0, r_nc_count);
uasm_il_beqz(&p, &r, t2, lbl_incready);
uasm_i_addiu(&p, t1, t1, 1);
uasm_i_ll(&p, GPR_T1, 0, r_nc_count);
uasm_i_addiu(&p, GPR_T2, GPR_T1, 1);
uasm_i_sc(&p, GPR_T2, 0, r_nc_count);
uasm_il_beqz(&p, &r, GPR_T2, lbl_incready);
uasm_i_addiu(&p, GPR_T1, GPR_T1, 1);
/* Barrier ensuring all CPUs see the updated r_nc_count value */
uasm_i_sync(&p, __SYNC_mb);
@ -412,7 +406,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
* If this is the last VPE to become ready for non-coherence
* then it should branch below.
*/
uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
uasm_il_beq(&p, &r, GPR_T1, r_online, lbl_disable_coherence);
uasm_i_nop(&p);
if (state < CPS_PM_POWER_GATED) {
@ -422,13 +416,13 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
* has been disabled before proceeding, which it will do
* by polling for the top bit of ready_count being set.
*/
uasm_i_addiu(&p, t1, zero, -1);
uasm_i_addiu(&p, GPR_T1, GPR_ZERO, -1);
uasm_build_label(&l, p, lbl_poll_cont);
uasm_i_lw(&p, t0, 0, r_nc_count);
uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
uasm_i_lw(&p, GPR_T0, 0, r_nc_count);
uasm_il_bltz(&p, &r, GPR_T0, lbl_secondary_cont);
uasm_i_ehb(&p);
if (cpu_has_mipsmt)
uasm_i_yield(&p, zero, t1);
uasm_i_yield(&p, GPR_ZERO, GPR_T1);
uasm_il_b(&p, &r, lbl_poll_cont);
uasm_i_nop(&p);
} else {
@ -438,16 +432,16 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
*/
if (cpu_has_mipsmt) {
/* Halt the VPE via C0 tchalt register */
uasm_i_addiu(&p, t0, zero, TCHALT_H);
uasm_i_mtc0(&p, t0, 2, 4);
uasm_i_addiu(&p, GPR_T0, GPR_ZERO, TCHALT_H);
uasm_i_mtc0(&p, GPR_T0, 2, 4);
} else if (cpu_has_vp) {
/* Halt the VP via the CPC VP_STOP register */
unsigned int vpe_id;
vpe_id = cpu_vpe_id(&cpu_data[cpu]);
uasm_i_addiu(&p, t0, zero, 1 << vpe_id);
UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop());
uasm_i_sw(&p, t0, 0, t1);
uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << vpe_id);
UASM_i_LA(&p, GPR_T1, (long)addr_cpc_cl_vp_stop());
uasm_i_sw(&p, GPR_T0, 0, GPR_T1);
} else {
BUG();
}
@ -482,9 +476,9 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
* defined by the interAptiv & proAptiv SUMs as ensuring that the
* operation resulting from the preceding store is complete.
*/
uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu]));
uasm_i_sw(&p, t0, 0, r_pcohctl);
uasm_i_lw(&p, t0, 0, r_pcohctl);
uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << cpu_core(&cpu_data[cpu]));
uasm_i_sw(&p, GPR_T0, 0, r_pcohctl);
uasm_i_lw(&p, GPR_T0, 0, r_pcohctl);
/* Barrier to ensure write to coherence control is complete */
uasm_i_sync(&p, __SYNC_full);
@ -492,8 +486,8 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
}
/* Disable coherence */
uasm_i_sw(&p, zero, 0, r_pcohctl);
uasm_i_lw(&p, t0, 0, r_pcohctl);
uasm_i_sw(&p, GPR_ZERO, 0, r_pcohctl);
uasm_i_lw(&p, GPR_T0, 0, r_pcohctl);
if (state >= CPS_PM_CLOCK_GATED) {
err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
@ -515,9 +509,9 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
}
/* Issue the CPC command */
UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
uasm_i_addiu(&p, t1, zero, cpc_cmd);
uasm_i_sw(&p, t1, 0, t0);
UASM_i_LA(&p, GPR_T0, (long)addr_cpc_cl_cmd());
uasm_i_addiu(&p, GPR_T1, GPR_ZERO, cpc_cmd);
uasm_i_sw(&p, GPR_T1, 0, GPR_T0);
if (state == CPS_PM_POWER_GATED) {
/* If anything goes wrong just hang */
@ -564,12 +558,12 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
* will run this. The first will actually re-enable coherence & the
* rest will just be performing a rather unusual nop.
*/
uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3
uasm_i_addiu(&p, GPR_T0, GPR_ZERO, mips_cm_revision() < CM_REV_CM3
? CM_GCR_Cx_COHERENCE_COHDOMAINEN
: CM3_GCR_Cx_COHERENCE_COHEN);
uasm_i_sw(&p, t0, 0, r_pcohctl);
uasm_i_lw(&p, t0, 0, r_pcohctl);
uasm_i_sw(&p, GPR_T0, 0, r_pcohctl);
uasm_i_lw(&p, GPR_T0, 0, r_pcohctl);
/* Barrier to ensure write to coherence control is complete */
uasm_i_sync(&p, __SYNC_full);
@ -579,11 +573,11 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
/* Decrement ready_count */
uasm_build_label(&l, p, lbl_decready);
uasm_i_sync(&p, __SYNC_mb);
uasm_i_ll(&p, t1, 0, r_nc_count);
uasm_i_addiu(&p, t2, t1, -1);
uasm_i_sc(&p, t2, 0, r_nc_count);
uasm_il_beqz(&p, &r, t2, lbl_decready);
uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
uasm_i_ll(&p, GPR_T1, 0, r_nc_count);
uasm_i_addiu(&p, GPR_T2, GPR_T1, -1);
uasm_i_sc(&p, GPR_T2, 0, r_nc_count);
uasm_il_beqz(&p, &r, GPR_T2, lbl_decready);
uasm_i_andi(&p, GPR_V0, GPR_T1, (1 << fls(smp_num_siblings)) - 1);
/* Barrier ensuring all CPUs see the updated r_nc_count value */
uasm_i_sync(&p, __SYNC_mb);
@ -612,7 +606,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
}
/* The core is coherent, time to return to C code */
uasm_i_jr(&p, ra);
uasm_i_jr(&p, GPR_RA);
uasm_i_nop(&p);
gen_done:

View File

@ -95,11 +95,11 @@ int __init rtlx_module_init(void)
atomic_set(&channel_wqs[i].in_open, 0);
mutex_init(&channel_wqs[i].mutex);
dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
dev = device_create(&mt_class, NULL, MKDEV(major, i), NULL,
"%s%d", RTLX_MODULE_NAME, i);
if (IS_ERR(dev)) {
while (i--)
device_destroy(mt_class, MKDEV(major, i));
device_destroy(&mt_class, MKDEV(major, i));
err = PTR_ERR(dev);
goto out_chrdev;
@ -127,7 +127,7 @@ int __init rtlx_module_init(void)
out_class:
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
device_destroy(&mt_class, MKDEV(major, i));
out_chrdev:
unregister_chrdev(major, RTLX_MODULE_NAME);
@ -139,7 +139,7 @@ void __exit rtlx_module_exit(void)
int i;
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
device_destroy(&mt_class, MKDEV(major, i));
unregister_chrdev(major, RTLX_MODULE_NAME);

View File

@ -7,6 +7,7 @@
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/hotplug.h>
#include <linux/slab.h>
@ -20,12 +21,24 @@
#include <asm/mipsregs.h>
#include <asm/pm-cps.h>
#include <asm/r4kcache.h>
#include <asm/regdef.h>
#include <asm/smp.h>
#include <asm/smp-cps.h>
#include <asm/time.h>
#include <asm/uasm.h>
#define BEV_VEC_SIZE 0x500
#define BEV_VEC_ALIGN 0x1000
enum label_id {
label_not_nmi = 1,
};
UASM_L_LA(_not_nmi)
static DECLARE_BITMAP(core_power, NR_CPUS);
static uint32_t core_entry_reg;
static phys_addr_t cps_vec_pa;
struct core_boot_config *mips_cps_core_bootcfg;
@ -34,10 +47,100 @@ static unsigned __init core_vpe_count(unsigned int cluster, unsigned core)
return min(smp_max_threads, mips_cps_numvps(cluster, core));
}
static void __init *mips_cps_build_core_entry(void *addr)
{
extern void (*nmi_handler)(void);
u32 *p = addr;
u32 val;
struct uasm_label labels[2];
struct uasm_reloc relocs[2];
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
uasm_i_mfc0(&p, GPR_K0, C0_STATUS);
UASM_i_LA(&p, GPR_T9, ST0_NMI);
uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T9);
uasm_il_bnez(&p, &r, GPR_K0, label_not_nmi);
uasm_i_nop(&p);
UASM_i_LA(&p, GPR_K0, (long)&nmi_handler);
uasm_l_not_nmi(&l, p);
val = CAUSEF_IV;
uasm_i_lui(&p, GPR_K0, val >> 16);
uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff);
uasm_i_mtc0(&p, GPR_K0, C0_CAUSE);
val = ST0_CU1 | ST0_CU0 | ST0_BEV | ST0_KX_IF_64;
uasm_i_lui(&p, GPR_K0, val >> 16);
uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff);
uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
uasm_i_ehb(&p);
uasm_i_ori(&p, GPR_A0, 0, read_c0_config() & CONF_CM_CMASK);
UASM_i_LA(&p, GPR_A1, (long)mips_gcr_base);
#if defined(KBUILD_64BIT_SYM32) || defined(CONFIG_32BIT)
UASM_i_LA(&p, GPR_T9, CKSEG1ADDR(__pa_symbol(mips_cps_core_boot)));
#else
UASM_i_LA(&p, GPR_T9, TO_UNCAC(__pa_symbol(mips_cps_core_boot)));
#endif
uasm_i_jr(&p, GPR_T9);
uasm_i_nop(&p);
uasm_resolve_relocs(relocs, labels);
return p;
}
static int __init allocate_cps_vecs(void)
{
/* Try to allocate in KSEG1 first */
cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN,
0x0, CSEGX_SIZE - 1);
if (cps_vec_pa)
core_entry_reg = CKSEG1ADDR(cps_vec_pa) &
CM_GCR_Cx_RESET_BASE_BEVEXCBASE;
if (!cps_vec_pa && mips_cm_is64) {
cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN,
0x0, SZ_4G - 1);
if (cps_vec_pa)
core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) |
CM_GCR_Cx_RESET_BASE_MODE;
}
if (!cps_vec_pa)
return -ENOMEM;
return 0;
}
static void __init setup_cps_vecs(void)
{
void *cps_vec;
cps_vec = (void *)CKSEG1ADDR_OR_64BIT(cps_vec_pa);
mips_cps_build_core_entry(cps_vec);
memcpy(cps_vec + 0x200, &excep_tlbfill, 0x80);
memcpy(cps_vec + 0x280, &excep_xtlbfill, 0x80);
memcpy(cps_vec + 0x300, &excep_cache, 0x80);
memcpy(cps_vec + 0x380, &excep_genex, 0x80);
memcpy(cps_vec + 0x400, &excep_intex, 0x80);
memcpy(cps_vec + 0x480, &excep_ejtag, 0x80);
/* Make sure no prefetched data in cache */
blast_inv_dcache_range(CKSEG0ADDR_OR_64BIT(cps_vec_pa), CKSEG0ADDR_OR_64BIT(cps_vec_pa) + BEV_VEC_SIZE);
bc_inv(CKSEG0ADDR_OR_64BIT(cps_vec_pa), BEV_VEC_SIZE);
__sync();
}
static void __init cps_smp_setup(void)
{
unsigned int nclusters, ncores, nvpes, core_vpes;
unsigned long core_entry;
int cl, c, v;
/* Detect & record VPE topology */
@ -94,10 +197,11 @@ static void __init cps_smp_setup(void)
/* Make core 0 coherent with everything */
write_gcr_cl_coherence(0xff);
if (mips_cm_revision() >= CM_REV_CM3) {
core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
write_gcr_bev_base(core_entry);
}
if (allocate_cps_vecs())
pr_err("Failed to allocate CPS vectors\n");
if (core_entry_reg && mips_cm_revision() >= CM_REV_CM3)
write_gcr_bev_base(core_entry_reg);
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
@ -110,10 +214,14 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
{
unsigned ncores, core_vpes, c, cca;
bool cca_unsuitable, cores_limited;
u32 *entry_code;
mips_mt_set_cpuoptions();
if (!core_entry_reg) {
pr_err("core_entry address unsuitable, disabling smp-cps\n");
goto err_out;
}
/* Detect whether the CCA is unsuited to multi-core SMP */
cca = read_c0_config() & CONF_CM_CMASK;
switch (cca) {
@ -145,20 +253,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
(cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
cpu_has_dc_aliases ? "dcache aliasing" : "");
/*
* Patch the start of mips_cps_core_entry to provide:
*
* s0 = kseg0 CCA
*/
entry_code = (u32 *)&mips_cps_core_entry;
uasm_i_addiu(&entry_code, 16, 0, cca);
UASM_i_LA(&entry_code, 17, (long)mips_gcr_base);
BUG_ON((void *)entry_code > (void *)&mips_cps_core_entry_patch_end);
blast_dcache_range((unsigned long)&mips_cps_core_entry,
(unsigned long)entry_code);
bc_wback_inv((unsigned long)&mips_cps_core_entry,
(void *)entry_code - (void *)&mips_cps_core_entry);
__sync();
setup_cps_vecs();
/* Allocate core boot configuration structs */
ncores = mips_cps_numcores(0);
@ -213,7 +308,7 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
/* Set its reset vector */
write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
write_gcr_co_reset_base(core_entry_reg);
/* Ensure its coherency is disabled */
write_gcr_co_coherence(0);
@ -290,7 +385,6 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle)
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
unsigned long core_entry;
unsigned int remote;
int err;
@ -314,8 +408,7 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle)
if (cpu_has_vp) {
mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
write_gcr_co_reset_base(core_entry);
write_gcr_co_reset_base(core_entry_reg);
mips_cm_unlock_other();
}

View File

@ -58,6 +58,7 @@
#include <asm/module.h>
#include <asm/msa.h>
#include <asm/ptrace.h>
#include <asm/regdef.h>
#include <asm/sections.h>
#include <asm/siginfo.h>
#include <asm/tlbdebug.h>
@ -2041,13 +2042,12 @@ void __init *set_except_vector(int n, void *addr)
unsigned long jump_mask = ~((1 << 28) - 1);
#endif
u32 *buf = (u32 *)(ebase + 0x200);
unsigned int k0 = 26;
if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
uasm_i_j(&buf, handler & ~jump_mask);
uasm_i_nop(&buf);
} else {
UASM_i_LA(&buf, k0, handler);
uasm_i_jr(&buf, k0);
UASM_i_LA(&buf, GPR_K0, handler);
uasm_i_jr(&buf, GPR_K0);
uasm_i_nop(&buf);
}
local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
@ -2299,7 +2299,7 @@ static const char panic_null_cerr[] =
void set_uncached_handler(unsigned long offset, void *addr,
unsigned long size)
{
unsigned long uncached_ebase = CKSEG1ADDR(ebase);
unsigned long uncached_ebase = CKSEG1ADDR_OR_64BIT(__pa(ebase));
if (!addr)
panic(panic_null_cerr);
@ -2351,10 +2351,13 @@ void __init trap_init(void)
* EVA is special though as it allows segments to be rearranged
* and to become uncached during cache error handling.
*/
if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
if (!IS_ENABLED(CONFIG_EVA) && ebase_pa < 0x20000000)
ebase = CKSEG0ADDR(ebase_pa);
else
ebase = (unsigned long)phys_to_virt(ebase_pa);
if (ebase_pa >= 0x20000000)
pr_warn("ebase(%pa) should better be in KSeg0",
&ebase_pa);
}
if (cpu_has_mmips) {

View File

@ -95,8 +95,8 @@ int vpe_run(struct vpe *v)
* We don't pass the memsize here, so VPE programs need to be
* compiled with DFLT_STACK_SIZE and DFLT_HEAP_SIZE defined.
*/
mttgpr(7, 0);
mttgpr(6, v->ntcs);
mttgpr($7, 0);
mttgpr($6, v->ntcs);
/* set up VPE1 */
/*

View File

@ -13,70 +13,17 @@
#include <linux/kvm_host.h>
#include <linux/log2.h>
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/msa.h>
#include <asm/regdef.h>
#include <asm/setup.h>
#include <asm/tlbex.h>
#include <asm/uasm.h>
/* Register names */
#define ZERO 0
#define AT 1
#define V0 2
#define V1 3
#define A0 4
#define A1 5
#if _MIPS_SIM == _MIPS_SIM_ABI32
#define T0 8
#define T1 9
#define T2 10
#define T3 11
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
#define T0 12
#define T1 13
#define T2 14
#define T3 15
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
#define S0 16
#define S1 17
#define T9 25
#define K0 26
#define K1 27
#define GP 28
#define SP 29
#define RA 31
/* Some CP0 registers */
#define C0_PWBASE 5, 5
#define C0_HWRENA 7, 0
#define C0_BADVADDR 8, 0
#define C0_BADINSTR 8, 1
#define C0_BADINSTRP 8, 2
#define C0_PGD 9, 7
#define C0_ENTRYHI 10, 0
#define C0_GUESTCTL1 10, 4
#define C0_STATUS 12, 0
#define C0_GUESTCTL0 12, 6
#define C0_CAUSE 13, 0
#define C0_EPC 14, 0
#define C0_EBASE 15, 1
#define C0_CONFIG5 16, 5
#define C0_DDATA_LO 28, 3
#define C0_ERROREPC 30, 0
#define CALLFRAME_SIZ 32
#ifdef CONFIG_64BIT
#define ST0_KX_IF_64 ST0_KX
#else
#define ST0_KX_IF_64 0
#endif
static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
static unsigned int scratch_vcpu[2] = { C0_DDATALO };
static unsigned int scratch_tmp[2] = { C0_ERROREPC };
enum label_id {
@ -212,60 +159,60 @@ void *kvm_mips_build_vcpu_run(void *addr)
unsigned int i;
/*
* A0: vcpu
* GPR_A0: vcpu
*/
/* k0/k1 not being used in host kernel context */
UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
UASM_i_ADDIU(&p, GPR_K1, GPR_SP, -(int)sizeof(struct pt_regs));
for (i = 16; i < 32; ++i) {
if (i == 24)
i = 28;
UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), GPR_K1);
}
/* Save host status */
uasm_i_mfc0(&p, V0, C0_STATUS);
UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
uasm_i_mfc0(&p, GPR_V0, C0_STATUS);
UASM_i_SW(&p, GPR_V0, offsetof(struct pt_regs, cp0_status), GPR_K1);
/* Save scratch registers, will be used to store pointer to vcpu etc */
kvm_mips_build_save_scratch(&p, V1, K1);
kvm_mips_build_save_scratch(&p, GPR_V1, GPR_K1);
/* VCPU scratch register has pointer to vcpu */
UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]);
UASM_i_MTC0(&p, GPR_A0, scratch_vcpu[0], scratch_vcpu[1]);
/* Offset into vcpu->arch */
UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch));
UASM_i_ADDIU(&p, GPR_K1, GPR_A0, offsetof(struct kvm_vcpu, arch));
/*
* Save the host stack to VCPU, used for exception processing
* when we exit from the Guest
*/
UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
UASM_i_SW(&p, GPR_SP, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1);
/* Save the kernel gp as well */
UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
UASM_i_SW(&p, GPR_GP, offsetof(struct kvm_vcpu_arch, host_gp), GPR_K1);
/*
* Setup status register for running the guest in UM, interrupts
* are disabled
*/
UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
uasm_i_mtc0(&p, K0, C0_STATUS);
UASM_i_LA(&p, GPR_K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
uasm_i_ehb(&p);
/* load up the new EBASE */
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
build_set_exc_base(&p, K0);
UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, guest_ebase), GPR_K1);
build_set_exc_base(&p, GPR_K0);
/*
* Now that the new EBASE has been loaded, unset BEV, set
* interrupt mask as it was but make sure that timer interrupts
* are enabled
*/
uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
uasm_i_andi(&p, V0, V0, ST0_IM);
uasm_i_or(&p, K0, K0, V0);
uasm_i_mtc0(&p, K0, C0_STATUS);
uasm_i_addiu(&p, GPR_K0, GPR_ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
uasm_i_andi(&p, GPR_V0, GPR_V0, ST0_IM);
uasm_i_or(&p, GPR_K0, GPR_K0, GPR_V0);
uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
uasm_i_ehb(&p);
p = kvm_mips_build_enter_guest(p);
@ -296,15 +243,15 @@ static void *kvm_mips_build_enter_guest(void *addr)
memset(relocs, 0, sizeof(relocs));
/* Set Guest EPC */
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
UASM_i_MTC0(&p, T0, C0_EPC);
UASM_i_LW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, pc), GPR_K1);
UASM_i_MTC0(&p, GPR_T0, C0_EPC);
/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
if (cpu_has_ldpte)
UASM_i_MFC0(&p, K0, C0_PWBASE);
UASM_i_MFC0(&p, GPR_K0, C0_PWBASE);
else
UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
UASM_i_MFC0(&p, GPR_K0, c0_kscratch(), pgd_reg);
UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_pgd), GPR_K1);
/*
* Set up KVM GPA pgd.
@ -312,24 +259,24 @@ static void *kvm_mips_build_enter_guest(void *addr)
* - call tlbmiss_handler_setup_pgd(mm->pgd)
* - write mm->pgd into CP0_PWBase
*
* We keep S0 pointing at struct kvm so we can load the ASID below.
* We keep GPR_S0 pointing at struct kvm so we can load the ASID below.
*/
UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
(int)offsetof(struct kvm_vcpu, arch), K1);
UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
uasm_i_jalr(&p, RA, T9);
UASM_i_LW(&p, GPR_S0, (int)offsetof(struct kvm_vcpu, kvm) -
(int)offsetof(struct kvm_vcpu, arch), GPR_K1);
UASM_i_LW(&p, GPR_A0, offsetof(struct kvm, arch.gpa_mm.pgd), GPR_S0);
UASM_i_LA(&p, GPR_T9, (unsigned long)tlbmiss_handler_setup_pgd);
uasm_i_jalr(&p, GPR_RA, GPR_T9);
/* delay slot */
if (cpu_has_htw)
UASM_i_MTC0(&p, A0, C0_PWBASE);
UASM_i_MTC0(&p, GPR_A0, C0_PWBASE);
else
uasm_i_nop(&p);
/* Set GM bit to setup eret to VZ guest context */
uasm_i_addiu(&p, V1, ZERO, 1);
uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1);
uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
uasm_i_addiu(&p, GPR_V1, GPR_ZERO, 1);
uasm_i_mfc0(&p, GPR_K0, C0_GUESTCTL0);
uasm_i_ins(&p, GPR_K0, GPR_V1, MIPS_GCTL0_GM_SHIFT, 1);
uasm_i_mtc0(&p, GPR_K0, C0_GUESTCTL0);
if (cpu_has_guestid) {
/*
@ -338,13 +285,13 @@ static void *kvm_mips_build_enter_guest(void *addr)
*/
/* Get current GuestID */
uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
uasm_i_mfc0(&p, GPR_T0, C0_GUESTCTL1);
/* Set GuestCtl1.RID = GuestCtl1.ID */
uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT,
uasm_i_ext(&p, GPR_T1, GPR_T0, MIPS_GCTL1_ID_SHIFT,
MIPS_GCTL1_ID_WIDTH);
uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT,
uasm_i_ins(&p, GPR_T0, GPR_T1, MIPS_GCTL1_RID_SHIFT,
MIPS_GCTL1_RID_WIDTH);
uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
uasm_i_mtc0(&p, GPR_T0, C0_GUESTCTL1);
/* GuestID handles dealiasing so we don't need to touch ASID */
goto skip_asid_restore;
@ -353,65 +300,65 @@ static void *kvm_mips_build_enter_guest(void *addr)
/* Root ASID Dealias (RAD) */
/* Save host ASID */
UASM_i_MFC0(&p, K0, C0_ENTRYHI);
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
K1);
UASM_i_MFC0(&p, GPR_K0, C0_ENTRYHI);
UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
GPR_K1);
/* Set the root ASID for the Guest */
UASM_i_ADDIU(&p, T1, S0,
UASM_i_ADDIU(&p, GPR_T1, GPR_S0,
offsetof(struct kvm, arch.gpa_mm.context.asid));
/* t1: contains the base of the ASID array, need to get the cpu id */
/* smp_processor_id */
uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
uasm_i_lw(&p, GPR_T2, offsetof(struct thread_info, cpu), GPR_GP);
/* index the ASID array */
uasm_i_sll(&p, T2, T2, ilog2(sizeof(long)));
UASM_i_ADDU(&p, T3, T1, T2);
UASM_i_LW(&p, K0, 0, T3);
uasm_i_sll(&p, GPR_T2, GPR_T2, ilog2(sizeof(long)));
UASM_i_ADDU(&p, GPR_T3, GPR_T1, GPR_T2);
UASM_i_LW(&p, GPR_K0, 0, GPR_T3);
#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
/*
* reuse ASID array offset
* cpuinfo_mips is a multiple of sizeof(long)
*/
uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
uasm_i_mul(&p, T2, T2, T3);
uasm_i_addiu(&p, GPR_T3, GPR_ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
uasm_i_mul(&p, GPR_T2, GPR_T2, GPR_T3);
UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
UASM_i_ADDU(&p, AT, AT, T2);
UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
uasm_i_and(&p, K0, K0, T2);
UASM_i_LA_mostly(&p, GPR_AT, (long)&cpu_data[0].asid_mask);
UASM_i_ADDU(&p, GPR_AT, GPR_AT, GPR_T2);
UASM_i_LW(&p, GPR_T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), GPR_AT);
uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T2);
#else
uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
uasm_i_andi(&p, GPR_K0, GPR_K0, MIPS_ENTRYHI_ASID);
#endif
/* Set up KVM VZ root ASID (!guestid) */
uasm_i_mtc0(&p, K0, C0_ENTRYHI);
uasm_i_mtc0(&p, GPR_K0, C0_ENTRYHI);
skip_asid_restore:
uasm_i_ehb(&p);
/* Disable RDHWR access */
uasm_i_mtc0(&p, ZERO, C0_HWRENA);
uasm_i_mtc0(&p, GPR_ZERO, C0_HWRENA);
/* load the guest context from VCPU and return */
for (i = 1; i < 32; ++i) {
/* Guest k0/k1 loaded later */
if (i == K0 || i == K1)
if (i == GPR_K0 || i == GPR_K1)
continue;
UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), GPR_K1);
}
#ifndef CONFIG_CPU_MIPSR6
/* Restore hi/lo */
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
uasm_i_mthi(&p, K0);
UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, hi), GPR_K1);
uasm_i_mthi(&p, GPR_K0);
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
uasm_i_mtlo(&p, K0);
UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, lo), GPR_K1);
uasm_i_mtlo(&p, GPR_K0);
#endif
/* Restore the guest's k0/k1 registers */
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K0]), GPR_K1);
UASM_i_LW(&p, GPR_K1, offsetof(struct kvm_vcpu_arch, gprs[GPR_K1]), GPR_K1);
/* Jump to guest */
uasm_i_eret(&p);
@ -444,13 +391,13 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
memset(relocs, 0, sizeof(relocs));
/* Save guest k1 into scratch register */
UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
UASM_i_MTC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]);
/* Get the VCPU pointer from the VCPU scratch register */
UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]);
/* Save guest k0 into VCPU structure */
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu, arch.gprs[GPR_K0]), GPR_K1);
/*
* Some of the common tlbex code uses current_cpu_type(). For KVM we
@ -459,13 +406,13 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
preempt_disable();
#ifdef CONFIG_CPU_LOONGSON64
UASM_i_MFC0(&p, K1, C0_PGD);
uasm_i_lddir(&p, K0, K1, 3); /* global page dir */
UASM_i_MFC0(&p, GPR_K1, C0_PGD);
uasm_i_lddir(&p, GPR_K0, GPR_K1, 3); /* global page dir */
#ifndef __PAGETABLE_PMD_FOLDED
uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */
uasm_i_lddir(&p, GPR_K1, GPR_K0, 1); /* middle page dir */
#endif
uasm_i_ldpte(&p, K1, 0); /* even */
uasm_i_ldpte(&p, K1, 1); /* odd */
uasm_i_ldpte(&p, GPR_K1, 0); /* even */
uasm_i_ldpte(&p, GPR_K1, 1); /* odd */
uasm_i_tlbwr(&p);
#else
/*
@ -480,27 +427,27 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
*/
#ifdef CONFIG_64BIT
build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
build_get_pmde64(&p, &l, &r, GPR_K0, GPR_K1); /* get pmd in GPR_K1 */
#else
build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
build_get_pgde32(&p, GPR_K0, GPR_K1); /* get pgd in GPR_K1 */
#endif
/* we don't support huge pages yet */
build_get_ptep(&p, K0, K1);
build_update_entries(&p, K0, K1);
build_get_ptep(&p, GPR_K0, GPR_K1);
build_update_entries(&p, GPR_K0, GPR_K1);
build_tlb_write_entry(&p, &l, &r, tlb_random);
#endif
preempt_enable();
/* Get the VCPU pointer from the VCPU scratch register again */
UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]);
/* Restore the guest's k0/k1 registers */
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu, arch.gprs[GPR_K0]), GPR_K1);
uasm_i_ehb(&p);
UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
UASM_i_MFC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]);
/* Jump to guest */
uasm_i_eret(&p);
@ -530,14 +477,14 @@ void *kvm_mips_build_exception(void *addr, void *handler)
memset(relocs, 0, sizeof(relocs));
/* Save guest k1 into scratch register */
UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
UASM_i_MTC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]);
/* Get the VCPU pointer from the VCPU scratch register */
UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]);
UASM_i_ADDIU(&p, GPR_K1, GPR_K1, offsetof(struct kvm_vcpu, arch));
/* Save guest k0 into VCPU structure */
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K0]), GPR_K1);
/* Branch to the common handler */
uasm_il_b(&p, &r, label_exit_common);
@ -585,85 +532,85 @@ void *kvm_mips_build_exit(void *addr)
/* Start saving Guest context to VCPU */
for (i = 0; i < 32; ++i) {
/* Guest k0/k1 saved later */
if (i == K0 || i == K1)
if (i == GPR_K0 || i == GPR_K1)
continue;
UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), GPR_K1);
}
#ifndef CONFIG_CPU_MIPSR6
/* We need to save hi/lo and restore them on the way out */
uasm_i_mfhi(&p, T0);
UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
uasm_i_mfhi(&p, GPR_T0);
UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, hi), GPR_K1);
uasm_i_mflo(&p, T0);
UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
uasm_i_mflo(&p, GPR_T0);
UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, lo), GPR_K1);
#endif
/* Finally save guest k1 to VCPU */
uasm_i_ehb(&p);
UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
UASM_i_MFC0(&p, GPR_T0, scratch_tmp[0], scratch_tmp[1]);
UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K1]), GPR_K1);
/* Now that context has been saved, we can use other registers */
/* Restore vcpu */
UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
UASM_i_MFC0(&p, GPR_S0, scratch_vcpu[0], scratch_vcpu[1]);
/*
* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
* the exception
*/
UASM_i_MFC0(&p, K0, C0_EPC);
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
UASM_i_MFC0(&p, GPR_K0, C0_EPC);
UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, pc), GPR_K1);
UASM_i_MFC0(&p, K0, C0_BADVADDR);
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
K1);
UASM_i_MFC0(&p, GPR_K0, C0_BADVADDR);
UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
GPR_K1);
uasm_i_mfc0(&p, K0, C0_CAUSE);
uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
uasm_i_mfc0(&p, GPR_K0, C0_CAUSE);
uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), GPR_K1);
if (cpu_has_badinstr) {
uasm_i_mfc0(&p, K0, C0_BADINSTR);
uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
host_cp0_badinstr), K1);
uasm_i_mfc0(&p, GPR_K0, C0_BADINSTR);
uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch,
host_cp0_badinstr), GPR_K1);
}
if (cpu_has_badinstrp) {
uasm_i_mfc0(&p, K0, C0_BADINSTRP);
uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
host_cp0_badinstrp), K1);
uasm_i_mfc0(&p, GPR_K0, C0_BADINSTRP);
uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch,
host_cp0_badinstrp), GPR_K1);
}
/* Now restore the host state just enough to run the handlers */
/* Switch EBASE to the one used by Linux */
/* load up the host EBASE */
uasm_i_mfc0(&p, V0, C0_STATUS);
uasm_i_mfc0(&p, GPR_V0, C0_STATUS);
uasm_i_lui(&p, AT, ST0_BEV >> 16);
uasm_i_or(&p, K0, V0, AT);
uasm_i_lui(&p, GPR_AT, ST0_BEV >> 16);
uasm_i_or(&p, GPR_K0, GPR_V0, GPR_AT);
uasm_i_mtc0(&p, K0, C0_STATUS);
uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
uasm_i_ehb(&p);
UASM_i_LA_mostly(&p, K0, (long)&ebase);
UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
build_set_exc_base(&p, K0);
UASM_i_LA_mostly(&p, GPR_K0, (long)&ebase);
UASM_i_LW(&p, GPR_K0, uasm_rel_lo((long)&ebase), GPR_K0);
build_set_exc_base(&p, GPR_K0);
if (raw_cpu_has_fpu) {
/*
* If FPU is enabled, save FCR31 and clear it so that later
* ctc1's don't trigger FPE for pending exceptions.
*/
uasm_i_lui(&p, AT, ST0_CU1 >> 16);
uasm_i_and(&p, V1, V0, AT);
uasm_il_beqz(&p, &r, V1, label_fpu_1);
uasm_i_lui(&p, GPR_AT, ST0_CU1 >> 16);
uasm_i_and(&p, GPR_V1, GPR_V0, GPR_AT);
uasm_il_beqz(&p, &r, GPR_V1, label_fpu_1);
uasm_i_nop(&p);
uasm_i_cfc1(&p, T0, 31);
uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
K1);
uasm_i_ctc1(&p, ZERO, 31);
uasm_i_cfc1(&p, GPR_T0, 31);
uasm_i_sw(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
GPR_K1);
uasm_i_ctc1(&p, GPR_ZERO, 31);
uasm_l_fpu_1(&l, p);
}
@ -672,22 +619,22 @@ void *kvm_mips_build_exit(void *addr)
* If MSA is enabled, save MSACSR and clear it so that later
* instructions don't trigger MSAFPE for pending exceptions.
*/
uasm_i_mfc0(&p, T0, C0_CONFIG5);
uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
uasm_il_beqz(&p, &r, T0, label_msa_1);
uasm_i_mfc0(&p, GPR_T0, C0_CONFIG5);
uasm_i_ext(&p, GPR_T0, GPR_T0, 27, 1); /* MIPS_CONF5_MSAEN */
uasm_il_beqz(&p, &r, GPR_T0, label_msa_1);
uasm_i_nop(&p);
uasm_i_cfcmsa(&p, T0, MSA_CSR);
uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
K1);
uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
uasm_i_cfcmsa(&p, GPR_T0, MSA_CSR);
uasm_i_sw(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
GPR_K1);
uasm_i_ctcmsa(&p, MSA_CSR, GPR_ZERO);
uasm_l_msa_1(&l, p);
}
/* Restore host ASID */
if (!cpu_has_guestid) {
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
K1);
UASM_i_MTC0(&p, K0, C0_ENTRYHI);
UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
GPR_K1);
UASM_i_MTC0(&p, GPR_K0, C0_ENTRYHI);
}
/*
@ -696,56 +643,56 @@ void *kvm_mips_build_exit(void *addr)
* - call tlbmiss_handler_setup_pgd(mm->pgd)
* - write mm->pgd into CP0_PWBase
*/
UASM_i_LW(&p, A0,
offsetof(struct kvm_vcpu_arch, host_pgd), K1);
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
uasm_i_jalr(&p, RA, T9);
UASM_i_LW(&p, GPR_A0,
offsetof(struct kvm_vcpu_arch, host_pgd), GPR_K1);
UASM_i_LA(&p, GPR_T9, (unsigned long)tlbmiss_handler_setup_pgd);
uasm_i_jalr(&p, GPR_RA, GPR_T9);
/* delay slot */
if (cpu_has_htw)
UASM_i_MTC0(&p, A0, C0_PWBASE);
UASM_i_MTC0(&p, GPR_A0, C0_PWBASE);
else
uasm_i_nop(&p);
/* Clear GM bit so we don't enter guest mode when EXL is cleared */
uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1);
uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
uasm_i_mfc0(&p, GPR_K0, C0_GUESTCTL0);
uasm_i_ins(&p, GPR_K0, GPR_ZERO, MIPS_GCTL0_GM_SHIFT, 1);
uasm_i_mtc0(&p, GPR_K0, C0_GUESTCTL0);
/* Save GuestCtl0 so we can access GExcCode after CPU migration */
uasm_i_sw(&p, K0,
offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1);
uasm_i_sw(&p, GPR_K0,
offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), GPR_K1);
if (cpu_has_guestid) {
/*
* Clear root mode GuestID, so that root TLB operations use the
* root GuestID in the root TLB.
*/
uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
uasm_i_mfc0(&p, GPR_T0, C0_GUESTCTL1);
/* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */
uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT,
uasm_i_ins(&p, GPR_T0, GPR_ZERO, MIPS_GCTL1_RID_SHIFT,
MIPS_GCTL1_RID_WIDTH);
uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
uasm_i_mtc0(&p, GPR_T0, C0_GUESTCTL1);
}
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
uasm_i_and(&p, V0, V0, AT);
uasm_i_lui(&p, AT, ST0_CU0 >> 16);
uasm_i_or(&p, V0, V0, AT);
uasm_i_addiu(&p, GPR_AT, GPR_ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
uasm_i_and(&p, GPR_V0, GPR_V0, GPR_AT);
uasm_i_lui(&p, GPR_AT, ST0_CU0 >> 16);
uasm_i_or(&p, GPR_V0, GPR_V0, GPR_AT);
#ifdef CONFIG_64BIT
uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
uasm_i_ori(&p, GPR_V0, GPR_V0, ST0_SX | ST0_UX);
#endif
uasm_i_mtc0(&p, V0, C0_STATUS);
uasm_i_mtc0(&p, GPR_V0, C0_STATUS);
uasm_i_ehb(&p);
/* Load up host GP */
UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
/* Load up host GPR_GP */
UASM_i_LW(&p, GPR_GP, offsetof(struct kvm_vcpu_arch, host_gp), GPR_K1);
/* Need a stack before we can jump to "C" */
UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
UASM_i_LW(&p, GPR_SP, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1);
/* Saved host state */
UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
UASM_i_ADDIU(&p, GPR_SP, GPR_SP, -(int)sizeof(struct pt_regs));
/*
* XXXKYMA do we need to load the host ASID, maybe not because the
@ -753,12 +700,12 @@ void *kvm_mips_build_exit(void *addr)
*/
/* Restore host scratch registers, as we'll have clobbered them */
kvm_mips_build_restore_scratch(&p, K0, SP);
kvm_mips_build_restore_scratch(&p, GPR_K0, GPR_SP);
/* Restore RDHWR access */
UASM_i_LA_mostly(&p, K0, (long)&hwrena);
uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
uasm_i_mtc0(&p, K0, C0_HWRENA);
UASM_i_LA_mostly(&p, GPR_K0, (long)&hwrena);
uasm_i_lw(&p, GPR_K0, uasm_rel_lo((long)&hwrena), GPR_K0);
uasm_i_mtc0(&p, GPR_K0, C0_HWRENA);
/* Jump to handler */
/*
@ -766,10 +713,10 @@ void *kvm_mips_build_exit(void *addr)
* Now jump to the kvm_mips_handle_exit() to see if we can deal
* with this in the kernel
*/
uasm_i_move(&p, A0, S0);
UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
uasm_i_jalr(&p, RA, T9);
UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
uasm_i_move(&p, GPR_A0, GPR_S0);
UASM_i_LA(&p, GPR_T9, (unsigned long)kvm_mips_handle_exit);
uasm_i_jalr(&p, GPR_RA, GPR_T9);
UASM_i_ADDIU(&p, GPR_SP, GPR_SP, -CALLFRAME_SIZ);
uasm_resolve_relocs(relocs, labels);
@ -799,7 +746,7 @@ static void *kvm_mips_build_ret_from_exit(void *addr)
memset(relocs, 0, sizeof(relocs));
/* Return from handler Make sure interrupts are disabled */
uasm_i_di(&p, ZERO);
uasm_i_di(&p, GPR_ZERO);
uasm_i_ehb(&p);
/*
@ -808,15 +755,15 @@ static void *kvm_mips_build_ret_from_exit(void *addr)
* guest, reload k1
*/
uasm_i_move(&p, K1, S0);
UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
uasm_i_move(&p, GPR_K1, GPR_S0);
UASM_i_ADDIU(&p, GPR_K1, GPR_K1, offsetof(struct kvm_vcpu, arch));
/*
* Check return value, should tell us if we are returning to the
* host (handle I/O etc)or resuming the guest
*/
uasm_i_andi(&p, T0, V0, RESUME_HOST);
uasm_il_bnez(&p, &r, T0, label_return_to_host);
uasm_i_andi(&p, GPR_T0, GPR_V0, RESUME_HOST);
uasm_il_bnez(&p, &r, GPR_T0, label_return_to_host);
uasm_i_nop(&p);
p = kvm_mips_build_ret_to_guest(p);
@ -843,24 +790,24 @@ static void *kvm_mips_build_ret_to_guest(void *addr)
u32 *p = addr;
/* Put the saved pointer to vcpu (s0) back into the scratch register */
UASM_i_MTC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
UASM_i_MTC0(&p, GPR_S0, scratch_vcpu[0], scratch_vcpu[1]);
/* Load up the Guest EBASE to minimize the window where BEV is set */
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
UASM_i_LW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, guest_ebase), GPR_K1);
/* Switch EBASE back to the one used by KVM */
uasm_i_mfc0(&p, V1, C0_STATUS);
uasm_i_lui(&p, AT, ST0_BEV >> 16);
uasm_i_or(&p, K0, V1, AT);
uasm_i_mtc0(&p, K0, C0_STATUS);
uasm_i_mfc0(&p, GPR_V1, C0_STATUS);
uasm_i_lui(&p, GPR_AT, ST0_BEV >> 16);
uasm_i_or(&p, GPR_K0, GPR_V1, GPR_AT);
uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
uasm_i_ehb(&p);
build_set_exc_base(&p, T0);
build_set_exc_base(&p, GPR_T0);
/* Setup status register for running guest in UM */
uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
uasm_i_and(&p, V1, V1, AT);
uasm_i_mtc0(&p, V1, C0_STATUS);
uasm_i_ori(&p, GPR_V1, GPR_V1, ST0_EXL | KSU_USER | ST0_IE);
UASM_i_LA(&p, GPR_AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
uasm_i_and(&p, GPR_V1, GPR_V1, GPR_AT);
uasm_i_mtc0(&p, GPR_V1, C0_STATUS);
uasm_i_ehb(&p);
p = kvm_mips_build_enter_guest(p);
@ -884,31 +831,31 @@ static void *kvm_mips_build_ret_to_host(void *addr)
unsigned int i;
/* EBASE is already pointing to Linux */
UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
UASM_i_LW(&p, GPR_K1, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1);
UASM_i_ADDIU(&p, GPR_K1, GPR_K1, -(int)sizeof(struct pt_regs));
/*
* r2/v0 is the return code, shift it down by 2 (arithmetic)
* to recover the err code
*/
uasm_i_sra(&p, K0, V0, 2);
uasm_i_move(&p, V0, K0);
uasm_i_sra(&p, GPR_K0, GPR_V0, 2);
uasm_i_move(&p, GPR_V0, GPR_K0);
/* Load context saved on the host stack */
for (i = 16; i < 31; ++i) {
if (i == 24)
i = 28;
UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), GPR_K1);
}
/* Restore RDHWR access */
UASM_i_LA_mostly(&p, K0, (long)&hwrena);
uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
uasm_i_mtc0(&p, K0, C0_HWRENA);
UASM_i_LA_mostly(&p, GPR_K0, (long)&hwrena);
uasm_i_lw(&p, GPR_K0, uasm_rel_lo((long)&hwrena), GPR_K0);
uasm_i_mtc0(&p, GPR_K0, C0_HWRENA);
/* Restore RA, which is the address we will return to */
UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
uasm_i_jr(&p, RA);
/* Restore GPR_RA, which is the address we will return to */
UASM_i_LW(&p, GPR_RA, offsetof(struct pt_regs, regs[GPR_RA]), GPR_K1);
uasm_i_jr(&p, GPR_RA);
uasm_i_nop(&p);
return p;

View File

@ -24,6 +24,7 @@
#include <asm/bootinfo.h>
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/regdef.h>
#include <asm/cpu.h>
#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
@ -34,19 +35,6 @@
#include <asm/uasm.h>
/* Registers used in the assembled routines. */
#define ZERO 0
#define AT 2
#define A0 4
#define A1 5
#define A2 6
#define T0 8
#define T1 9
#define T2 10
#define T3 11
#define T9 25
#define RA 31
/* Handle labels (which must be positive integers). */
enum label_id {
label_clear_nopref = 1,
@ -106,16 +94,16 @@ pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS) &&
r4k_daddiu_bug()) {
if (off > 0x7fff) {
uasm_i_lui(buf, T9, uasm_rel_hi(off));
uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
uasm_i_lui(buf, GPR_T9, uasm_rel_hi(off));
uasm_i_addiu(buf, GPR_T9, GPR_T9, uasm_rel_lo(off));
} else
uasm_i_addiu(buf, T9, ZERO, off);
uasm_i_daddu(buf, reg1, reg2, T9);
uasm_i_addiu(buf, GPR_T9, GPR_ZERO, off);
uasm_i_daddu(buf, reg1, reg2, GPR_T9);
} else {
if (off > 0x7fff) {
uasm_i_lui(buf, T9, uasm_rel_hi(off));
uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
UASM_i_ADDU(buf, reg1, reg2, T9);
uasm_i_lui(buf, GPR_T9, uasm_rel_hi(off));
uasm_i_addiu(buf, GPR_T9, GPR_T9, uasm_rel_lo(off));
UASM_i_ADDU(buf, reg1, reg2, GPR_T9);
} else
UASM_i_ADDIU(buf, reg1, reg2, off);
}
@ -233,9 +221,9 @@ static void set_prefetch_parameters(void)
static void build_clear_store(u32 **buf, int off)
{
if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
uasm_i_sd(buf, ZERO, off, A0);
uasm_i_sd(buf, GPR_ZERO, off, GPR_A0);
} else {
uasm_i_sw(buf, ZERO, off, A0);
uasm_i_sw(buf, GPR_ZERO, off, GPR_A0);
}
}
@ -246,10 +234,10 @@ static inline void build_clear_pref(u32 **buf, int off)
if (pref_bias_clear_store) {
_uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
A0);
GPR_A0);
} else if (cache_line_size == (half_clear_loop_size << 1)) {
if (cpu_has_cache_cdex_s) {
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, GPR_A0);
} else if (cpu_has_cache_cdex_p) {
if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) &&
cpu_is_r4600_v1_x()) {
@ -261,9 +249,9 @@ static inline void build_clear_pref(u32 **buf, int off)
if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) &&
cpu_is_r4600_v2_x())
uasm_i_lw(buf, ZERO, ZERO, AT);
uasm_i_lw(buf, GPR_ZERO, GPR_ZERO, GPR_AT);
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
uasm_i_cache(buf, Create_Dirty_Excl_D, off, GPR_A0);
}
}
}
@ -301,12 +289,12 @@ void build_clear_page(void)
off = PAGE_SIZE - pref_bias_clear_store;
if (off > 0xffff || !pref_bias_clear_store)
pg_addiu(&buf, A2, A0, off);
pg_addiu(&buf, GPR_A2, GPR_A0, off);
else
uasm_i_ori(&buf, A2, A0, off);
uasm_i_ori(&buf, GPR_A2, GPR_A0, off);
if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x())
uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
uasm_i_lui(&buf, GPR_AT, uasm_rel_hi(0xa0000000));
off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
* cache_line_size : 0;
@ -320,36 +308,36 @@ void build_clear_page(void)
build_clear_store(&buf, off);
off += clear_word_size;
} while (off < half_clear_loop_size);
pg_addiu(&buf, A0, A0, 2 * off);
pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off);
off = -off;
do {
build_clear_pref(&buf, off);
if (off == -clear_word_size)
uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
uasm_il_bne(&buf, &r, GPR_A0, GPR_A2, label_clear_pref);
build_clear_store(&buf, off);
off += clear_word_size;
} while (off < 0);
if (pref_bias_clear_store) {
pg_addiu(&buf, A2, A0, pref_bias_clear_store);
pg_addiu(&buf, GPR_A2, GPR_A0, pref_bias_clear_store);
uasm_l_clear_nopref(&l, buf);
off = 0;
do {
build_clear_store(&buf, off);
off += clear_word_size;
} while (off < half_clear_loop_size);
pg_addiu(&buf, A0, A0, 2 * off);
pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off);
off = -off;
do {
if (off == -clear_word_size)
uasm_il_bne(&buf, &r, A0, A2,
uasm_il_bne(&buf, &r, GPR_A0, GPR_A2,
label_clear_nopref);
build_clear_store(&buf, off);
off += clear_word_size;
} while (off < 0);
}
uasm_i_jr(&buf, RA);
uasm_i_jr(&buf, GPR_RA);
uasm_i_nop(&buf);
BUG_ON(buf > &__clear_page_end);
@ -369,18 +357,18 @@ void build_clear_page(void)
static void build_copy_load(u32 **buf, int reg, int off)
{
if (cpu_has_64bit_gp_regs) {
uasm_i_ld(buf, reg, off, A1);
uasm_i_ld(buf, reg, off, GPR_A1);
} else {
uasm_i_lw(buf, reg, off, A1);
uasm_i_lw(buf, reg, off, GPR_A1);
}
}
static void build_copy_store(u32 **buf, int reg, int off)
{
if (cpu_has_64bit_gp_regs) {
uasm_i_sd(buf, reg, off, A0);
uasm_i_sd(buf, reg, off, GPR_A0);
} else {
uasm_i_sw(buf, reg, off, A0);
uasm_i_sw(buf, reg, off, GPR_A0);
}
}
@ -390,7 +378,7 @@ static inline void build_copy_load_pref(u32 **buf, int off)
return;
if (pref_bias_copy_load)
_uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
_uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, GPR_A1);
}
static inline void build_copy_store_pref(u32 **buf, int off)
@ -400,10 +388,10 @@ static inline void build_copy_store_pref(u32 **buf, int off)
if (pref_bias_copy_store) {
_uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
A0);
GPR_A0);
} else if (cache_line_size == (half_copy_loop_size << 1)) {
if (cpu_has_cache_cdex_s) {
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, GPR_A0);
} else if (cpu_has_cache_cdex_p) {
if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) &&
cpu_is_r4600_v1_x()) {
@ -415,9 +403,9 @@ static inline void build_copy_store_pref(u32 **buf, int off)
if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) &&
cpu_is_r4600_v2_x())
uasm_i_lw(buf, ZERO, ZERO, AT);
uasm_i_lw(buf, GPR_ZERO, GPR_ZERO, GPR_AT);
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
uasm_i_cache(buf, Create_Dirty_Excl_D, off, GPR_A0);
}
}
}
@ -454,12 +442,12 @@ void build_copy_page(void)
off = PAGE_SIZE - pref_bias_copy_load;
if (off > 0xffff || !pref_bias_copy_load)
pg_addiu(&buf, A2, A0, off);
pg_addiu(&buf, GPR_A2, GPR_A0, off);
else
uasm_i_ori(&buf, A2, A0, off);
uasm_i_ori(&buf, GPR_A2, GPR_A0, off);
if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x())
uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
uasm_i_lui(&buf, GPR_AT, uasm_rel_hi(0xa0000000));
off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
cache_line_size : 0;
@ -476,126 +464,126 @@ void build_copy_page(void)
uasm_l_copy_pref_both(&l, buf);
do {
build_copy_load_pref(&buf, off);
build_copy_load(&buf, T0, off);
build_copy_load(&buf, GPR_T0, off);
build_copy_load_pref(&buf, off + copy_word_size);
build_copy_load(&buf, T1, off + copy_word_size);
build_copy_load(&buf, GPR_T1, off + copy_word_size);
build_copy_load_pref(&buf, off + 2 * copy_word_size);
build_copy_load(&buf, T2, off + 2 * copy_word_size);
build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_load_pref(&buf, off + 3 * copy_word_size);
build_copy_load(&buf, T3, off + 3 * copy_word_size);
build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
build_copy_store_pref(&buf, off);
build_copy_store(&buf, T0, off);
build_copy_store(&buf, GPR_T0, off);
build_copy_store_pref(&buf, off + copy_word_size);
build_copy_store(&buf, T1, off + copy_word_size);
build_copy_store(&buf, GPR_T1, off + copy_word_size);
build_copy_store_pref(&buf, off + 2 * copy_word_size);
build_copy_store(&buf, T2, off + 2 * copy_word_size);
build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_store_pref(&buf, off + 3 * copy_word_size);
build_copy_store(&buf, T3, off + 3 * copy_word_size);
build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < half_copy_loop_size);
pg_addiu(&buf, A1, A1, 2 * off);
pg_addiu(&buf, A0, A0, 2 * off);
pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off);
pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off);
off = -off;
do {
build_copy_load_pref(&buf, off);
build_copy_load(&buf, T0, off);
build_copy_load(&buf, GPR_T0, off);
build_copy_load_pref(&buf, off + copy_word_size);
build_copy_load(&buf, T1, off + copy_word_size);
build_copy_load(&buf, GPR_T1, off + copy_word_size);
build_copy_load_pref(&buf, off + 2 * copy_word_size);
build_copy_load(&buf, T2, off + 2 * copy_word_size);
build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_load_pref(&buf, off + 3 * copy_word_size);
build_copy_load(&buf, T3, off + 3 * copy_word_size);
build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
build_copy_store_pref(&buf, off);
build_copy_store(&buf, T0, off);
build_copy_store(&buf, GPR_T0, off);
build_copy_store_pref(&buf, off + copy_word_size);
build_copy_store(&buf, T1, off + copy_word_size);
build_copy_store(&buf, GPR_T1, off + copy_word_size);
build_copy_store_pref(&buf, off + 2 * copy_word_size);
build_copy_store(&buf, T2, off + 2 * copy_word_size);
build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_store_pref(&buf, off + 3 * copy_word_size);
if (off == -(4 * copy_word_size))
uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
build_copy_store(&buf, T3, off + 3 * copy_word_size);
uasm_il_bne(&buf, &r, GPR_A2, GPR_A0, label_copy_pref_both);
build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < 0);
if (pref_bias_copy_load - pref_bias_copy_store) {
pg_addiu(&buf, A2, A0,
pg_addiu(&buf, GPR_A2, GPR_A0,
pref_bias_copy_load - pref_bias_copy_store);
uasm_l_copy_pref_store(&l, buf);
off = 0;
do {
build_copy_load(&buf, T0, off);
build_copy_load(&buf, T1, off + copy_word_size);
build_copy_load(&buf, T2, off + 2 * copy_word_size);
build_copy_load(&buf, T3, off + 3 * copy_word_size);
build_copy_load(&buf, GPR_T0, off);
build_copy_load(&buf, GPR_T1, off + copy_word_size);
build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
build_copy_store_pref(&buf, off);
build_copy_store(&buf, T0, off);
build_copy_store(&buf, GPR_T0, off);
build_copy_store_pref(&buf, off + copy_word_size);
build_copy_store(&buf, T1, off + copy_word_size);
build_copy_store(&buf, GPR_T1, off + copy_word_size);
build_copy_store_pref(&buf, off + 2 * copy_word_size);
build_copy_store(&buf, T2, off + 2 * copy_word_size);
build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_store_pref(&buf, off + 3 * copy_word_size);
build_copy_store(&buf, T3, off + 3 * copy_word_size);
build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < half_copy_loop_size);
pg_addiu(&buf, A1, A1, 2 * off);
pg_addiu(&buf, A0, A0, 2 * off);
pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off);
pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off);
off = -off;
do {
build_copy_load(&buf, T0, off);
build_copy_load(&buf, T1, off + copy_word_size);
build_copy_load(&buf, T2, off + 2 * copy_word_size);
build_copy_load(&buf, T3, off + 3 * copy_word_size);
build_copy_load(&buf, GPR_T0, off);
build_copy_load(&buf, GPR_T1, off + copy_word_size);
build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
build_copy_store_pref(&buf, off);
build_copy_store(&buf, T0, off);
build_copy_store(&buf, GPR_T0, off);
build_copy_store_pref(&buf, off + copy_word_size);
build_copy_store(&buf, T1, off + copy_word_size);
build_copy_store(&buf, GPR_T1, off + copy_word_size);
build_copy_store_pref(&buf, off + 2 * copy_word_size);
build_copy_store(&buf, T2, off + 2 * copy_word_size);
build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_store_pref(&buf, off + 3 * copy_word_size);
if (off == -(4 * copy_word_size))
uasm_il_bne(&buf, &r, A2, A0,
uasm_il_bne(&buf, &r, GPR_A2, GPR_A0,
label_copy_pref_store);
build_copy_store(&buf, T3, off + 3 * copy_word_size);
build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < 0);
}
if (pref_bias_copy_store) {
pg_addiu(&buf, A2, A0, pref_bias_copy_store);
pg_addiu(&buf, GPR_A2, GPR_A0, pref_bias_copy_store);
uasm_l_copy_nopref(&l, buf);
off = 0;
do {
build_copy_load(&buf, T0, off);
build_copy_load(&buf, T1, off + copy_word_size);
build_copy_load(&buf, T2, off + 2 * copy_word_size);
build_copy_load(&buf, T3, off + 3 * copy_word_size);
build_copy_store(&buf, T0, off);
build_copy_store(&buf, T1, off + copy_word_size);
build_copy_store(&buf, T2, off + 2 * copy_word_size);
build_copy_store(&buf, T3, off + 3 * copy_word_size);
build_copy_load(&buf, GPR_T0, off);
build_copy_load(&buf, GPR_T1, off + copy_word_size);
build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
build_copy_store(&buf, GPR_T0, off);
build_copy_store(&buf, GPR_T1, off + copy_word_size);
build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < half_copy_loop_size);
pg_addiu(&buf, A1, A1, 2 * off);
pg_addiu(&buf, A0, A0, 2 * off);
pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off);
pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off);
off = -off;
do {
build_copy_load(&buf, T0, off);
build_copy_load(&buf, T1, off + copy_word_size);
build_copy_load(&buf, T2, off + 2 * copy_word_size);
build_copy_load(&buf, T3, off + 3 * copy_word_size);
build_copy_store(&buf, T0, off);
build_copy_store(&buf, T1, off + copy_word_size);
build_copy_store(&buf, T2, off + 2 * copy_word_size);
build_copy_load(&buf, GPR_T0, off);
build_copy_load(&buf, GPR_T1, off + copy_word_size);
build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
build_copy_store(&buf, GPR_T0, off);
build_copy_store(&buf, GPR_T1, off + copy_word_size);
build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
if (off == -(4 * copy_word_size))
uasm_il_bne(&buf, &r, A2, A0,
uasm_il_bne(&buf, &r, GPR_A2, GPR_A0,
label_copy_nopref);
build_copy_store(&buf, T3, off + 3 * copy_word_size);
build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
off += 4 * copy_word_size;
} while (off < 0);
}
uasm_i_jr(&buf, RA);
uasm_i_jr(&buf, GPR_RA);
uasm_i_nop(&buf);
BUG_ON(buf > &__copy_page_end);

View File

@ -32,7 +32,9 @@
#include <asm/cacheflush.h>
#include <asm/cpu-type.h>
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/regdef.h>
#include <asm/uasm.h>
#include <asm/setup.h>
#include <asm/tlbex.h>
@ -276,27 +278,6 @@ static inline void dump_handler(const char *symbol, const void *start, const voi
pr_debug("\tEND(%s)\n", symbol);
}
/* The only general purpose registers allowed in TLB handlers. */
#define K0 26
#define K1 27
/* Some CP0 registers */
#define C0_INDEX 0, 0
#define C0_ENTRYLO0 2, 0
#define C0_TCBIND 2, 2
#define C0_ENTRYLO1 3, 0
#define C0_CONTEXT 4, 0
#define C0_PAGEMASK 5, 0
#define C0_PWBASE 5, 5
#define C0_PWFIELD 5, 6
#define C0_PWSIZE 5, 7
#define C0_PWCTL 6, 6
#define C0_BADVADDR 8, 0
#define C0_PGD 9, 7
#define C0_ENTRYHI 10, 0
#define C0_EPC 14, 0
#define C0_XCONTEXT 20, 0
#ifdef CONFIG_64BIT
# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
#else
@ -356,30 +337,30 @@ static struct work_registers build_get_work_registers(u32 **p)
if (scratch_reg >= 0) {
/* Save in CPU local C0_KScratch? */
UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
r.r1 = K0;
r.r2 = K1;
r.r3 = 1;
r.r1 = GPR_K0;
r.r2 = GPR_K1;
r.r3 = GPR_AT;
return r;
}
if (num_possible_cpus() > 1) {
/* Get smp_processor_id */
UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
UASM_i_CPUID_MFC0(p, GPR_K0, SMP_CPUID_REG);
UASM_i_SRL_SAFE(p, GPR_K0, GPR_K0, SMP_CPUID_REGSHIFT);
/* handler_reg_save index in K0 */
UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
/* handler_reg_save index in GPR_K0 */
UASM_i_SLL(p, GPR_K0, GPR_K0, ilog2(sizeof(struct tlb_reg_save)));
UASM_i_LA(p, K1, (long)&handler_reg_save);
UASM_i_ADDU(p, K0, K0, K1);
UASM_i_LA(p, GPR_K1, (long)&handler_reg_save);
UASM_i_ADDU(p, GPR_K0, GPR_K0, GPR_K1);
} else {
UASM_i_LA(p, K0, (long)&handler_reg_save);
UASM_i_LA(p, GPR_K0, (long)&handler_reg_save);
}
/* K0 now points to save area, save $1 and $2 */
UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
/* GPR_K0 now points to save area, save $1 and $2 */
UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), GPR_K0);
UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), GPR_K0);
r.r1 = K1;
r.r1 = GPR_K1;
r.r2 = 1;
r.r3 = 2;
return r;
@ -392,9 +373,9 @@ static void build_restore_work_registers(u32 **p)
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
return;
}
/* K0 already points to save area, restore $1 and $2 */
UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
/* GPR_K0 already points to save area, restore $1 and $2 */
UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), GPR_K0);
UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), GPR_K0);
}
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
@ -413,22 +394,22 @@ static void build_r3000_tlb_refill_handler(void)
memset(tlb_handler, 0, sizeof(tlb_handler));
p = tlb_handler;
uasm_i_mfc0(&p, K0, C0_BADVADDR);
uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
uasm_i_srl(&p, K0, K0, 22); /* load delay */
uasm_i_sll(&p, K0, K0, 2);
uasm_i_addu(&p, K1, K1, K0);
uasm_i_mfc0(&p, K0, C0_CONTEXT);
uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
uasm_i_addu(&p, K1, K1, K0);
uasm_i_lw(&p, K0, 0, K1);
uasm_i_mfc0(&p, GPR_K0, C0_BADVADDR);
uasm_i_lui(&p, GPR_K1, uasm_rel_hi(pgdc)); /* cp0 delay */
uasm_i_lw(&p, GPR_K1, uasm_rel_lo(pgdc), GPR_K1);
uasm_i_srl(&p, GPR_K0, GPR_K0, 22); /* load delay */
uasm_i_sll(&p, GPR_K0, GPR_K0, 2);
uasm_i_addu(&p, GPR_K1, GPR_K1, GPR_K0);
uasm_i_mfc0(&p, GPR_K0, C0_CONTEXT);
uasm_i_lw(&p, GPR_K1, 0, GPR_K1); /* cp0 delay */
uasm_i_andi(&p, GPR_K0, GPR_K0, 0xffc); /* load delay */
uasm_i_addu(&p, GPR_K1, GPR_K1, GPR_K0);
uasm_i_lw(&p, GPR_K0, 0, GPR_K1);
uasm_i_nop(&p); /* load delay */
uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
uasm_i_mtc0(&p, GPR_K0, C0_ENTRYLO0);
uasm_i_mfc0(&p, GPR_K1, C0_EPC); /* cp0 delay */
uasm_i_tlbwr(&p); /* cp0 delay */
uasm_i_jr(&p, K1);
uasm_i_jr(&p, GPR_K1);
uasm_i_rfe(&p); /* branch delay */
if (p > tlb_handler + 32)
@ -1276,11 +1257,11 @@ static void build_r4000_tlb_refill_handler(void)
memset(final_handler, 0, sizeof(final_handler));
if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {
htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, GPR_K0, GPR_K1,
scratch_reg);
vmalloc_mode = refill_scratch;
} else {
htlb_info.huge_pte = K0;
htlb_info.huge_pte = GPR_K0;
htlb_info.restore_scratch = 0;
htlb_info.need_reload_pte = true;
vmalloc_mode = refill_noscratch;
@ -1290,29 +1271,29 @@ static void build_r4000_tlb_refill_handler(void)
if (bcm1250_m3_war()) {
unsigned int segbits = 44;
uasm_i_dmfc0(&p, K0, C0_BADVADDR);
uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
uasm_i_xor(&p, K0, K0, K1);
uasm_i_dsrl_safe(&p, K1, K0, 62);
uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
uasm_i_or(&p, K0, K0, K1);
uasm_il_bnez(&p, &r, K0, label_leave);
uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR);
uasm_i_dmfc0(&p, GPR_K1, C0_ENTRYHI);
uasm_i_xor(&p, GPR_K0, GPR_K0, GPR_K1);
uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, 62);
uasm_i_dsrl_safe(&p, GPR_K0, GPR_K0, 12 + 1);
uasm_i_dsll_safe(&p, GPR_K0, GPR_K0, 64 + 12 + 1 - segbits);
uasm_i_or(&p, GPR_K0, GPR_K0, GPR_K1);
uasm_il_bnez(&p, &r, GPR_K0, label_leave);
/* No need for uasm_i_nop */
}
#ifdef CONFIG_64BIT
build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
build_get_pmde64(&p, &l, &r, GPR_K0, GPR_K1); /* get pmd in GPR_K1 */
#else
build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
build_get_pgde32(&p, GPR_K0, GPR_K1); /* get pgd in GPR_K1 */
#endif
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
build_is_huge_pte(&p, &r, GPR_K0, GPR_K1, label_tlb_huge_update);
#endif
build_get_ptep(&p, K0, K1);
build_update_entries(&p, K0, K1);
build_get_ptep(&p, GPR_K0, GPR_K1);
build_update_entries(&p, GPR_K0, GPR_K1);
build_tlb_write_entry(&p, &l, &r, tlb_random);
uasm_l_leave(&l, p);
uasm_i_eret(&p); /* return from trap */
@ -1320,14 +1301,14 @@ static void build_r4000_tlb_refill_handler(void)
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
uasm_l_tlb_huge_update(&l, p);
if (htlb_info.need_reload_pte)
UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
build_huge_update_entries(&p, htlb_info.huge_pte, K1);
build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
UASM_i_LW(&p, htlb_info.huge_pte, 0, GPR_K1);
build_huge_update_entries(&p, htlb_info.huge_pte, GPR_K1);
build_huge_tlb_write_entry(&p, &l, &r, GPR_K0, tlb_random,
htlb_info.restore_scratch);
#endif
#ifdef CONFIG_64BIT
build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
build_get_pgd_vmalloc64(&p, &l, &r, GPR_K0, GPR_K1, vmalloc_mode);
#endif
/*
@ -1500,34 +1481,35 @@ static void build_loongson3_tlb_refill_handler(void)
memset(tlb_handler, 0, sizeof(tlb_handler));
if (check_for_high_segbits) {
uasm_i_dmfc0(&p, K0, C0_BADVADDR);
uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
uasm_il_beqz(&p, &r, K1, label_vmalloc);
uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR);
uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0,
PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
uasm_il_beqz(&p, &r, GPR_K1, label_vmalloc);
uasm_i_nop(&p);
uasm_il_bgez(&p, &r, K0, label_large_segbits_fault);
uasm_il_bgez(&p, &r, GPR_K0, label_large_segbits_fault);
uasm_i_nop(&p);
uasm_l_vmalloc(&l, p);
}
uasm_i_dmfc0(&p, K1, C0_PGD);
uasm_i_dmfc0(&p, GPR_K1, C0_PGD);
uasm_i_lddir(&p, K0, K1, 3); /* global page dir */
uasm_i_lddir(&p, GPR_K0, GPR_K1, 3); /* global page dir */
#ifndef __PAGETABLE_PMD_FOLDED
uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */
uasm_i_lddir(&p, GPR_K1, GPR_K0, 1); /* middle page dir */
#endif
uasm_i_ldpte(&p, K1, 0); /* even */
uasm_i_ldpte(&p, K1, 1); /* odd */
uasm_i_ldpte(&p, GPR_K1, 0); /* even */
uasm_i_ldpte(&p, GPR_K1, 1); /* odd */
uasm_i_tlbwr(&p);
/* restore page mask */
if (PM_DEFAULT_MASK >> 16) {
uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16);
uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff);
uasm_i_mtc0(&p, K0, C0_PAGEMASK);
uasm_i_lui(&p, GPR_K0, PM_DEFAULT_MASK >> 16);
uasm_i_ori(&p, GPR_K0, GPR_K0, PM_DEFAULT_MASK & 0xffff);
uasm_i_mtc0(&p, GPR_K0, C0_PAGEMASK);
} else if (PM_DEFAULT_MASK) {
uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK);
uasm_i_mtc0(&p, K0, C0_PAGEMASK);
uasm_i_ori(&p, GPR_K0, 0, PM_DEFAULT_MASK);
uasm_i_mtc0(&p, GPR_K0, C0_PAGEMASK);
} else {
uasm_i_mtc0(&p, 0, C0_PAGEMASK);
}
@ -1536,8 +1518,8 @@ static void build_loongson3_tlb_refill_handler(void)
if (check_for_high_segbits) {
uasm_l_large_segbits_fault(&l, p);
UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0);
uasm_i_jr(&p, K1);
UASM_i_LA(&p, GPR_K1, (unsigned long)tlb_do_page_fault_0);
uasm_i_jr(&p, GPR_K1);
uasm_i_nop(&p);
}
@ -1903,11 +1885,11 @@ static void build_r3000_tlb_load_handler(void)
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
build_r3000_tlbchange_handler_head(&p, K0, K1);
build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1);
build_pte_present(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbl);
uasm_i_nop(&p); /* load delay */
build_make_valid(&p, &r, K0, K1, -1);
build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
build_make_valid(&p, &r, GPR_K0, GPR_K1, -1);
build_r3000_tlb_reload_write(&p, &l, &r, GPR_K0, GPR_K1);
uasm_l_nopage_tlbl(&l, p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
@ -1933,11 +1915,11 @@ static void build_r3000_tlb_store_handler(void)
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
build_r3000_tlbchange_handler_head(&p, K0, K1);
build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1);
build_pte_writable(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbs);
uasm_i_nop(&p); /* load delay */
build_make_write(&p, &r, K0, K1, -1);
build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
build_make_write(&p, &r, GPR_K0, GPR_K1, -1);
build_r3000_tlb_reload_write(&p, &l, &r, GPR_K0, GPR_K1);
uasm_l_nopage_tlbs(&l, p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
@ -1963,11 +1945,11 @@ static void build_r3000_tlb_modify_handler(void)
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
build_r3000_tlbchange_handler_head(&p, K0, K1);
build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1);
build_pte_modifiable(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbm);
uasm_i_nop(&p); /* load delay */
build_make_write(&p, &r, K0, K1, -1);
build_r3000_pte_reload_tlbwi(&p, K0, K1);
build_make_write(&p, &r, GPR_K0, GPR_K1, -1);
build_r3000_pte_reload_tlbwi(&p, GPR_K0, GPR_K1);
uasm_l_nopage_tlbm(&l, p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
@ -2083,14 +2065,14 @@ static void build_r4000_tlb_load_handler(void)
if (bcm1250_m3_war()) {
unsigned int segbits = 44;
uasm_i_dmfc0(&p, K0, C0_BADVADDR);
uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
uasm_i_xor(&p, K0, K0, K1);
uasm_i_dsrl_safe(&p, K1, K0, 62);
uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
uasm_i_or(&p, K0, K0, K1);
uasm_il_bnez(&p, &r, K0, label_leave);
uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR);
uasm_i_dmfc0(&p, GPR_K1, C0_ENTRYHI);
uasm_i_xor(&p, GPR_K0, GPR_K0, GPR_K1);
uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, 62);
uasm_i_dsrl_safe(&p, GPR_K0, GPR_K0, 12 + 1);
uasm_i_dsll_safe(&p, GPR_K0, GPR_K0, 64 + 12 + 1 - segbits);
uasm_i_or(&p, GPR_K0, GPR_K0, GPR_K1);
uasm_il_bnez(&p, &r, GPR_K0, label_leave);
/* No need for uasm_i_nop */
}
@ -2233,9 +2215,9 @@ static void build_r4000_tlb_load_handler(void)
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_0 & 1) {
uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
uasm_i_jr(&p, K0);
uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_0));
uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_0));
uasm_i_jr(&p, GPR_K0);
} else
#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
@ -2289,9 +2271,9 @@ static void build_r4000_tlb_store_handler(void)
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_1 & 1) {
uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
uasm_i_jr(&p, K0);
uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_1));
uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_1));
uasm_i_jr(&p, GPR_K0);
} else
#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
@ -2346,9 +2328,9 @@ static void build_r4000_tlb_modify_handler(void)
build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_1 & 1) {
uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
uasm_i_jr(&p, K0);
uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_1));
uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_1));
uasm_i_jr(&p, GPR_K0);
} else
#endif
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);

View File

@ -0,0 +1 @@
# SPDX-License-Identifier: GPL-2.0-or-later

View File

@ -0,0 +1,15 @@
#
# Copyright (C) 2016 Imagination Technologies
# Author: Paul Burton <paul.burton@mips.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
load-$(CONFIG_MACH_EYEQ5) = 0xa800000808000000
all-$(CONFIG_MACH_EYEQ5) += vmlinux.gz.itb
its-y := vmlinux.its.S
its-$(CONFIG_FIT_IMAGE_FDT_EPM5) += board-epm5.its.S

View File

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/ {
images {
fdt-mobileye-epm5 {
description = "Mobileeye MP5 Device Tree";
data = /incbin/("boot/dts/mobileye/eyeq5-epm5.dtb");
type = "flat_dt";
arch = "mips";
compression = "none";
hash {
algo = "sha1";
};
};
};
configurations {
default = "conf-1";
conf-1 {
description = "Mobileye EPM5 Linux kernel";
kernel = "kernel";
fdt = "fdt-mobileye-epm5";
};
};
};

View File

@ -0,0 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0 */
/dts-v1/;
/ {
description = KERNEL_NAME;
#address-cells = <ADDR_CELLS>;
images {
kernel {
description = KERNEL_NAME;
data = /incbin/(VMLINUX_BINARY);
type = "kernel";
arch = "mips";
os = "linux";
compression = VMLINUX_COMPRESSION;
load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>;
entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>;
hash {
algo = "sha1";
};
};
};
configurations {
default = "conf-default";
conf-default {
description = "Generic Linux kernel";
kernel = "kernel";
};
};
};

View File

@ -9,7 +9,7 @@
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return PCIBIOS_SUCCESSFUL;
return 0;
}
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)

View File

@ -7,17 +7,8 @@
#include <linux/of_pci.h>
#include <linux/pci.h>
int (*ltq_pci_plat_arch_init)(struct pci_dev *dev) = NULL;
int (*ltq_pci_plat_dev_init)(struct pci_dev *dev) = NULL;
int pcibios_plat_dev_init(struct pci_dev *dev)
{
if (ltq_pci_plat_arch_init)
return ltq_pci_plat_arch_init(dev);
if (ltq_pci_plat_dev_init)
return ltq_pci_plat_dev_init(dev);
return 0;
}

View File

@ -60,7 +60,7 @@ static int mkaddr(struct pci_bus *bus, unsigned int devfn, int where,
{
if (bus->parent == NULL &&
devfn >= PCI_DEVFN(TX4927_PCIC_MAX_DEVNU, 0))
return -1;
return PCIBIOS_DEVICE_NOT_FOUND;
__raw_writel(((bus->number & 0xff) << 0x10)
| ((devfn & 0xff) << 0x08) | (where & 0xfc)
| (bus->parent ? 1 : 0),
@ -69,7 +69,7 @@ static int mkaddr(struct pci_bus *bus, unsigned int devfn, int where,
__raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff)
| (PCI_STATUS_REC_MASTER_ABORT << 16),
&pcicptr->pcistatus);
return 0;
return PCIBIOS_SUCCESSFUL;
}
static int check_abort(struct tx4927_pcic_reg __iomem *pcicptr)
@ -140,10 +140,12 @@ static int tx4927_pci_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(bus);
int ret;
if (mkaddr(bus, devfn, where, pcicptr)) {
*val = 0xffffffff;
return -1;
ret = mkaddr(bus, devfn, where, pcicptr);
if (ret != PCIBIOS_SUCCESSFUL) {
PCI_SET_ERROR_RESPONSE(val);
return ret;
}
switch (size) {
case 1:
@ -162,9 +164,11 @@ static int tx4927_pci_config_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(bus);
int ret;
if (mkaddr(bus, devfn, where, pcicptr))
return -1;
ret = mkaddr(bus, devfn, where, pcicptr);
if (ret != PCIBIOS_SUCCESSFUL)
return ret;
switch (size) {
case 1:
icd_writeb(val, where & 3, pcicptr);

View File

@ -6,11 +6,16 @@
* Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/of_gpio.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <asm/mach-ralink/ralink_regs.h>

View File

@ -12,7 +12,7 @@
#include <asm/sgi/mc.h>
#include <asm/sgi/ip22.h>
static struct bus_type gio_bus_type;
static const struct bus_type gio_bus_type;
static struct {
const char *name;
@ -378,7 +378,7 @@ static void ip22_check_gio(int slotno, unsigned long addr, int irq)
printk(KERN_INFO "GIO: slot %d : Empty\n", slotno);
}
static struct bus_type gio_bus_type = {
static const struct bus_type gio_bus_type = {
.name = "gio",
.dev_groups = gio_dev_groups,
.match = gio_bus_match,

View File

@ -535,13 +535,14 @@ static const struct file_operations sbprof_tb_fops = {
.llseek = default_llseek,
};
static struct class *tb_class;
static const struct class tb_class = {
.name = "sb_tracebuffer",
};
static struct device *tb_dev;
static int __init sbprof_tb_init(void)
{
struct device *dev;
struct class *tbc;
int err;
if (register_chrdev(SBPROF_TB_MAJOR, DEVNAME, &sbprof_tb_fops)) {
@ -550,15 +551,11 @@ static int __init sbprof_tb_init(void)
return -EIO;
}
tbc = class_create("sb_tracebuffer");
if (IS_ERR(tbc)) {
err = PTR_ERR(tbc);
err = class_register(&tb_class);
if (err)
goto out_chrdev;
}
tb_class = tbc;
dev = device_create(tbc, NULL, MKDEV(SBPROF_TB_MAJOR, 0), NULL, "tb");
dev = device_create(&tb_class, NULL, MKDEV(SBPROF_TB_MAJOR, 0), NULL, "tb");
if (IS_ERR(dev)) {
err = PTR_ERR(dev);
goto out_class;
@ -573,7 +570,7 @@ static int __init sbprof_tb_init(void)
return 0;
out_class:
class_destroy(tb_class);
class_unregister(&tb_class);
out_chrdev:
unregister_chrdev(SBPROF_TB_MAJOR, DEVNAME);
@ -582,9 +579,9 @@ out_chrdev:
static void __exit sbprof_tb_cleanup(void)
{
device_destroy(tb_class, MKDEV(SBPROF_TB_MAJOR, 0));
device_destroy(&tb_class, MKDEV(SBPROF_TB_MAJOR, 0));
unregister_chrdev(SBPROF_TB_MAJOR, DEVNAME);
class_destroy(tb_class);
class_unregister(&tb_class);
}
module_init(sbprof_tb_init);

View File

@ -762,7 +762,7 @@ void __init txx9_aclc_init(unsigned long baseaddr, int irq,
{
}
static struct bus_type txx9_sramc_subsys = {
static const struct bus_type txx9_sramc_subsys = {
.name = "txx9_sram",
.dev_name = "txx9_sram",
};

View File

@ -22,7 +22,6 @@
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/time64.h>
#include <linux/clk.h>
#include <linux/sysfs.h>
#define APB_EHB_ISR 0x00

View File

@ -118,7 +118,7 @@ static struct attribute *mips_cdmm_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(mips_cdmm_dev);
struct bus_type mips_cdmm_bustype = {
const struct bus_type mips_cdmm_bustype = {
.name = "cdmm",
.dev_groups = mips_cdmm_dev_groups,
.match = mips_cdmm_match,

View File

@ -95,7 +95,7 @@ static int tc_bus_match(struct device *dev, struct device_driver *drv)
return 0;
}
struct bus_type tc_bus_type = {
const struct bus_type tc_bus_type = {
.name = "tc",
.match = tc_bus_match,
};

View File

@ -309,7 +309,7 @@ static void mips_ejtag_fdc_console_write(struct console *c, const char *s,
unsigned int i, buf_len, cpu;
bool done_cr = false;
char buf[4];
const char *buf_ptr = buf;
const u8 *buf_ptr = buf;
/* Number of bytes of input data encoded up to each byte in buf */
u8 inc[4];

View File

@ -120,7 +120,7 @@ static inline unsigned long tc_get_speed(struct tc_bus *tbus)
#ifdef CONFIG_TC
extern struct bus_type tc_bus_type;
extern const struct bus_type tc_bus_type;
extern int tc_register_driver(struct tc_driver *tdrv);
extern void tc_unregister_driver(struct tc_driver *tdrv);