sh: remove sh5 support

sh5 never became a product and has probably never really worked.

Remove it by recursively deleting all associated Kconfig options
and all corresponding files.

Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Rich Felker <dalias@libc.org>
This commit is contained in:
Arnd Bergmann 2020-04-20 11:37:12 +02:00 committed by Rich Felker
parent d1f56f318d
commit 37744feebc
117 changed files with 67 additions and 11554 deletions

View File

@ -54,15 +54,6 @@ config SUPERH
select HAVE_NMI
select NEED_SG_DMA_LENGTH
select ARCH_HAS_GIGANTIC_PAGE
help
The SuperH is a RISC processor targeted for use in embedded systems
and consumer electronics; it was also used in the Sega Dreamcast
gaming console. The SuperH port has a home page at
<http://www.linux-sh.org/>.
config SUPERH32
def_bool "$(ARCH)" = "sh"
select ARCH_32BIT_OFF_T
select GUP_GET_PTE_LOW_HIGH if X2TLB
select HAVE_KPROBES
@ -81,19 +72,15 @@ config SUPERH32
select ARCH_HIBERNATION_POSSIBLE if MMU
select SPARSE_IRQ
select HAVE_STACKPROTECTOR
config SUPERH64
def_bool "$(ARCH)" = "sh64"
select HAVE_EXIT_THREAD
select KALLSYMS
help
The SuperH is a RISC processor targeted for use in embedded systems
and consumer electronics; it was also used in the Sega Dreamcast
gaming console. The SuperH port has a home page at
<http://www.linux-sh.org/>.
config GENERIC_BUG
def_bool y
depends on BUG && SUPERH32
config GENERIC_CSUM
def_bool y
depends on SUPERH64
depends on BUG
config GENERIC_HWEIGHT
def_bool y
@ -203,12 +190,6 @@ config CPU_SH4AL_DSP
select CPU_SH4A
select CPU_HAS_DSP
config CPU_SH5
bool
select CPU_HAS_FPU
select SYS_SUPPORTS_SH_TMU
select SYS_SUPPORTS_HUGETLBFS if MMU
config CPU_SHX2
bool
@ -228,8 +209,6 @@ config CPU_HAS_PMU
default y
bool
if SUPERH32
choice
prompt "Processor sub-type selection"
@ -518,27 +497,6 @@ config CPU_SUBTYPE_SH7366
endchoice
endif
if SUPERH64
choice
prompt "Processor sub-type selection"
# SH-5 Processor Support
config CPU_SUBTYPE_SH5_101
bool "Support SH5-101 processor"
select CPU_SH5
config CPU_SUBTYPE_SH5_103
bool "Support SH5-103 processor"
select CPU_SH5
endchoice
endif
source "arch/sh/mm/Kconfig"
source "arch/sh/Kconfig.cpu"
@ -592,7 +550,7 @@ source "kernel/Kconfig.hz"
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
depends on SUPERH32 && MMU
depends on MMU
select KEXEC_CORE
help
kexec is a system call that implements the ability to shutdown your
@ -610,7 +568,7 @@ config KEXEC
config CRASH_DUMP
bool "kernel crash dumps (EXPERIMENTAL)"
depends on SUPERH32 && BROKEN_ON_SMP
depends on BROKEN_ON_SMP
help
Generate crash dump after being started by kexec.
This should be normally only set in special crash dump kernels
@ -624,7 +582,7 @@ config CRASH_DUMP
config KEXEC_JUMP
bool "kexec jump (EXPERIMENTAL)"
depends on SUPERH32 && KEXEC && HIBERNATION
depends on KEXEC && HIBERNATION
help
Jump between original kernel and kexeced kernel and invoke
code via KEXEC
@ -701,7 +659,7 @@ config HOTPLUG_CPU
config GUSA
def_bool y
depends on !SMP && SUPERH32
depends on !SMP
help
This enables support for gUSA (general UserSpace Atomicity).
This is the default implementation for both UP and non-ll/sc

View File

@ -13,7 +13,6 @@ config CPU_LITTLE_ENDIAN
config CPU_BIG_ENDIAN
bool "Big Endian"
depends on !CPU_SH5
endchoice
@ -27,10 +26,6 @@ config SH_FPU
This option must be set in order to enable the FPU.
config SH64_FPU_DENORM_FLUSH
bool "Flush floating point denorms to zero"
depends on SH_FPU && SUPERH64
config SH_FPU_EMU
def_bool n
prompt "FPU emulation support"
@ -77,10 +72,6 @@ config SPECULATIVE_EXECUTION
If unsure, say N.
config SH64_ID2815_WORKAROUND
bool "Include workaround for SH5-101 cut2 silicon defect ID2815"
depends on CPU_SUBTYPE_SH5_101
config CPU_HAS_INTEVT
bool

View File

@ -5,7 +5,6 @@ config TRACE_IRQFLAGS_SUPPORT
config SH_STANDARD_BIOS
bool "Use LinuxSH standard BIOS"
depends on SUPERH32
help
Say Y here if your target has the gdb-sh-stub
package from www.m17n.org (or any conforming standard LinuxSH BIOS)
@ -19,7 +18,7 @@ config SH_STANDARD_BIOS
config STACK_DEBUG
bool "Check for stack overflows"
depends on DEBUG_KERNEL && SUPERH32
depends on DEBUG_KERNEL
help
This option will cause messages to be printed if free stack space
drops below a certain limit. Saying Y here will add overhead to
@ -38,7 +37,7 @@ config 4KSTACKS
config IRQSTACKS
bool "Use separate kernel stacks when processing interrupts"
depends on DEBUG_KERNEL && SUPERH32 && BROKEN
depends on DEBUG_KERNEL && BROKEN
help
If you say Y here the kernel will use separate kernel stacks
for handling hard and soft interrupts. This can help avoid
@ -46,7 +45,7 @@ config IRQSTACKS
config DUMP_CODE
bool "Show disassembly of nearby code in register dumps"
depends on DEBUG_KERNEL && SUPERH32
depends on DEBUG_KERNEL
default y if DEBUG_BUGVERBOSE
default n
help
@ -59,7 +58,6 @@ config DUMP_CODE
config DWARF_UNWINDER
bool "Enable the DWARF unwinder for stacktraces"
select FRAME_POINTER
depends on SUPERH32
default n
help
Enabling this option will make stacktraces more accurate, at
@ -77,11 +75,6 @@ config SH_NO_BSS_INIT
For all other cases, say N. If this option seems perplexing, or
you aren't sure, say N.
config SH64_SR_WATCH
bool "Debug: set SR.WATCH to enable hardware watchpoints and trace"
depends on SUPERH64
config MCOUNT
def_bool y
depends on SUPERH32
depends on STACK_DEBUG || FUNCTION_TRACER

View File

@ -11,7 +11,7 @@
#
ifneq ($(SUBARCH),$(ARCH))
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-)
CROSS_COMPILE := $(call cc-cross-prefix, sh-linux- sh-linux-gnu- sh-unknown-linux-gnu-)
endif
endif
@ -29,12 +29,9 @@ isa-$(CONFIG_CPU_SH3) := sh3
isa-$(CONFIG_CPU_SH4) := sh4
isa-$(CONFIG_CPU_SH4A) := sh4a
isa-$(CONFIG_CPU_SH4AL_DSP) := sh4al
isa-$(CONFIG_CPU_SH5) := shmedia
ifeq ($(CONFIG_SUPERH32),y)
isa-$(CONFIG_SH_DSP) := $(isa-y)-dsp
isa-y := $(isa-y)-up
endif
cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
cflags-$(CONFIG_CPU_J2) += $(call cc-option,-mj2,)
@ -47,7 +44,6 @@ cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \
cflags-$(CONFIG_CPU_SH4A) += $(call cc-option,-m4a,) \
$(call cc-option,-m4a-nofpu,)
cflags-$(CONFIG_CPU_SH4AL_DSP) += $(call cc-option,-m4al,)
cflags-$(CONFIG_CPU_SH5) := $(call cc-option,-m5-32media-nofpu,)
ifeq ($(cflags-y),)
#
@ -88,7 +84,7 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment \
-R .stab -R .stabstr -S
# Give the various platforms the opportunity to set default image types
defaultimage-$(CONFIG_SUPERH32) := zImage
defaultimage-y := zImage
defaultimage-$(CONFIG_SH_SH7785LCR) := uImage
defaultimage-$(CONFIG_SH_RSK) := uImage
defaultimage-$(CONFIG_SH_URQUELL) := uImage
@ -107,31 +103,22 @@ KBUILD_IMAGE := $(boot)/$(defaultimage-y)
# Choosing incompatible machines durings configuration will result in
# error messages during linking.
#
ifdef CONFIG_SUPERH32
UTS_MACHINE := sh
BITS := 32
LDFLAGS_vmlinux += -e _stext
else
UTS_MACHINE := sh64
BITS := 64
LDFLAGS_vmlinux += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \
--defsym phys_stext_shmedia=phys_stext+1 \
-e phys_stext_shmedia
endif
ifdef CONFIG_CPU_LITTLE_ENDIAN
ld-bfd := elf32-$(UTS_MACHINE)-linux
ld-bfd := elf32-sh-linux
LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd)
KBUILD_LDFLAGS += -EL
else
ld-bfd := elf32-$(UTS_MACHINE)big-linux
ld-bfd := elf32-shbig-linux
LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd)
KBUILD_LDFLAGS += -EB
endif
export ld-bfd BITS
export ld-bfd
head-y := arch/sh/kernel/head_$(BITS).o
head-y := arch/sh/kernel/head_32.o
core-y += arch/sh/kernel/ arch/sh/mm/ arch/sh/boards/
core-$(CONFIG_SH_FPU_EMU) += arch/sh/math-emu/
@ -185,7 +172,6 @@ cpuincdir-$(CONFIG_CPU_SH2) += cpu-sh2
cpuincdir-$(CONFIG_CPU_SH3) += cpu-sh3
cpuincdir-$(CONFIG_CPU_SH4A) += cpu-sh4a
cpuincdir-$(CONFIG_CPU_SH4) += cpu-sh4
cpuincdir-$(CONFIG_CPU_SH5) += cpu-sh5
cpuincdir-y += cpu-common # Must be last
drivers-y += arch/sh/drivers/
@ -206,8 +192,7 @@ ifeq ($(CONFIG_DWARF_UNWINDER),y)
KBUILD_CFLAGS += -fasynchronous-unwind-tables
endif
libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y)
libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y)
libs-y := arch/sh/lib/ $(libs-y)
BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.xz uImage.lzo \
uImage.srec uImage.bin zImage vmlinux.bin vmlinux.srec \

View File

@ -8,9 +8,9 @@
targets := vmlinux vmlinux.bin vmlinux.bin.gz \
vmlinux.bin.bz2 vmlinux.bin.lzma \
vmlinux.bin.xz vmlinux.bin.lzo \
head_$(BITS).o misc.o piggy.o
head_32.o misc.o piggy.o
OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/cache.o
OBJECTS = $(obj)/head_32.o $(obj)/misc.o $(obj)/cache.o
GCOV_PROFILE := n
@ -39,15 +39,11 @@ LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \
#
# Pull in the necessary libgcc bits from the in-kernel implementation.
#
lib1funcs-$(CONFIG_SUPERH32) := ashiftrt.S ashldi3.c ashrsi3.S ashlsi3.S \
lshrsi3.S
lib1funcs-obj := \
lib1funcs-y := ashiftrt.S ashldi3.c ashrsi3.S ashlsi3.S lshrsi3.S
lib1funcs-obj := \
$(addsuffix .o, $(basename $(addprefix $(obj)/, $(lib1funcs-y))))
lib1funcs-dir := $(srctree)/arch/$(SRCARCH)/lib
ifeq ($(BITS),64)
lib1funcs-dir := $(addsuffix $(BITS), $(lib1funcs-dir))
endif
KBUILD_CFLAGS += -I$(lib1funcs-dir) -DDISABLE_BRANCH_PROFILING

View File

@ -116,11 +116,7 @@ void ftrace_stub(void)
{
}
#ifdef CONFIG_SUPERH64
#define stackalign 8
#else
#define stackalign 4
#endif
#define STACK_SIZE (4096)
long __attribute__ ((aligned(stackalign))) user_stack[STACK_SIZE];
@ -130,13 +126,9 @@ void decompress_kernel(void)
{
unsigned long output_addr;
#ifdef CONFIG_SUPERH64
output_addr = (CONFIG_MEMORY_START + 0x2000);
#else
output_addr = __pa((unsigned long)&_text+PAGE_SIZE);
#if defined(CONFIG_29BIT)
output_addr |= P2SEG;
#endif
#endif
output = (unsigned char *)output_addr;

View File

@ -10,7 +10,6 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7763) += pci-sh7780.o ops-sh4.o
obj-$(CONFIG_CPU_SUBTYPE_SH7780) += pci-sh7780.o ops-sh4.o
obj-$(CONFIG_CPU_SUBTYPE_SH7785) += pci-sh7780.o ops-sh4.o
obj-$(CONFIG_CPU_SUBTYPE_SH7786) += pcie-sh7786.o ops-sh7786.o
obj-$(CONFIG_CPU_SH5) += pci-sh5.o ops-sh5.o
obj-$(CONFIG_SH_DREAMCAST) += ops-dreamcast.o fixups-dreamcast.o \
pci-dreamcast.o

View File

@ -1,65 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Support functions for the SH5 PCI hardware.
*
* Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <asm/io.h>
#include "pci-sh5.h"
static int sh5pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
switch (size) {
case 1:
*val = (u8)SH5PCI_READ_BYTE(PDR + (where & 3));
break;
case 2:
*val = (u16)SH5PCI_READ_SHORT(PDR + (where & 2));
break;
case 4:
*val = SH5PCI_READ(PDR);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int sh5pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
switch (size) {
case 1:
SH5PCI_WRITE_BYTE(PDR + (where & 3), (u8)val);
break;
case 2:
SH5PCI_WRITE_SHORT(PDR + (where & 2), (u16)val);
break;
case 4:
SH5PCI_WRITE(PDR, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops sh5_pci_ops = {
.read = sh5pci_read,
.write = sh5pci_write,
};

View File

@ -1,217 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* Support functions for the SH5 PCI hardware.
*/
#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <cpu/irq.h>
#include <asm/io.h>
#include "pci-sh5.h"
unsigned long pcicr_virt;
unsigned long PCI_IO_AREA;
/* Rounds a number UP to the nearest power of two. Used for
* sizing the PCI window.
*/
static u32 __init r2p2(u32 num)
{
int i = 31;
u32 tmp = num;
if (num == 0)
return 0;
do {
if (tmp & (1 << 31))
break;
i--;
tmp <<= 1;
} while (i >= 0);
tmp = 1 << i;
/* If the original number isn't a power of 2, round it up */
if (tmp != num)
tmp <<= 1;
return tmp;
}
static irqreturn_t pcish5_err_irq(int irq, void *dev_id)
{
struct pt_regs *regs = get_irq_regs();
unsigned pci_int, pci_air, pci_cir, pci_aint;
pci_int = SH5PCI_READ(INT);
pci_cir = SH5PCI_READ(CIR);
pci_air = SH5PCI_READ(AIR);
if (pci_int) {
printk("PCI INTERRUPT (at %08llx)!\n", regs->pc);
printk("PCI INT -> 0x%x\n", pci_int & 0xffff);
printk("PCI AIR -> 0x%x\n", pci_air);
printk("PCI CIR -> 0x%x\n", pci_cir);
SH5PCI_WRITE(INT, ~0);
}
pci_aint = SH5PCI_READ(AINT);
if (pci_aint) {
printk("PCI ARB INTERRUPT!\n");
printk("PCI AINT -> 0x%x\n", pci_aint);
printk("PCI AIR -> 0x%x\n", pci_air);
printk("PCI CIR -> 0x%x\n", pci_cir);
SH5PCI_WRITE(AINT, ~0);
}
return IRQ_HANDLED;
}
static irqreturn_t pcish5_serr_irq(int irq, void *dev_id)
{
printk("SERR IRQ\n");
return IRQ_NONE;
}
static struct resource sh5_pci_resources[2];
static struct pci_channel sh5pci_controller = {
.pci_ops = &sh5_pci_ops,
.resources = sh5_pci_resources,
.nr_resources = ARRAY_SIZE(sh5_pci_resources),
.mem_offset = 0x00000000,
.io_offset = 0x00000000,
};
static int __init sh5pci_init(void)
{
unsigned long memStart = __pa(memory_start);
unsigned long memSize = __pa(memory_end) - memStart;
u32 lsr0;
u32 uval;
if (request_irq(IRQ_ERR, pcish5_err_irq,
0, "PCI Error",NULL) < 0) {
printk(KERN_ERR "PCISH5: Cannot hook PCI_PERR interrupt\n");
return -EINVAL;
}
if (request_irq(IRQ_SERR, pcish5_serr_irq,
0, "PCI SERR interrupt", NULL) < 0) {
printk(KERN_ERR "PCISH5: Cannot hook PCI_SERR interrupt\n");
return -EINVAL;
}
pcicr_virt = (unsigned long)ioremap(SH5PCI_ICR_BASE, 1024);
if (!pcicr_virt) {
panic("Unable to remap PCICR\n");
}
PCI_IO_AREA = (unsigned long)ioremap(SH5PCI_IO_BASE, 0x10000);
if (!PCI_IO_AREA) {
panic("Unable to remap PCIIO\n");
}
/* Clear snoop registers */
SH5PCI_WRITE(CSCR0, 0);
SH5PCI_WRITE(CSCR1, 0);
/* Switch off interrupts */
SH5PCI_WRITE(INTM, 0);
SH5PCI_WRITE(AINTM, 0);
SH5PCI_WRITE(PINTM, 0);
/* Set bus active, take it out of reset */
uval = SH5PCI_READ(CR);
/* Set command Register */
SH5PCI_WRITE(CR, uval | CR_LOCK_MASK | CR_CFINT| CR_FTO | CR_PFE |
CR_PFCS | CR_BMAM);
uval=SH5PCI_READ(CR);
/* Allow it to be a master */
/* NB - WE DISABLE I/O ACCESS to stop overlap */
/* set WAIT bit to enable stepping, an attempt to improve stability */
SH5PCI_WRITE_SHORT(CSR_CMD,
PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
PCI_COMMAND_WAIT);
/*
** Set translation mapping memory in order to convert the address
** used for the main bus, to the PCI internal address.
*/
SH5PCI_WRITE(MBR,0x40000000);
/* Always set the max size 512M */
SH5PCI_WRITE(MBMR, PCISH5_MEM_SIZCONV(512*1024*1024));
/*
** I/O addresses are mapped at internal PCI specific address
** as is described into the configuration bridge table.
** These are changed to 0, to allow cards that have legacy
** io such as vga to function correctly. We set the SH5 IOBAR to
** 256K, which is a bit big as we can only have 64K of address space
*/
SH5PCI_WRITE(IOBR,0x0);
/* Set up a 256K window. Totally pointless waste of address space */
SH5PCI_WRITE(IOBMR,0);
/* The SH5 has a HUGE 256K I/O region, which breaks the PCI spec.
* Ideally, we would want to map the I/O region somewhere, but it
* is so big this is not that easy!
*/
SH5PCI_WRITE(CSR_IBAR0,~0);
/* Set memory size value */
memSize = memory_end - memory_start;
/* Now we set up the mbars so the PCI bus can see the memory of
* the machine */
if (memSize < (1024 * 1024)) {
printk(KERN_ERR "PCISH5: Ridiculous memory size of 0x%lx?\n",
memSize);
return -EINVAL;
}
/* Set LSR 0 */
lsr0 = (memSize > (512 * 1024 * 1024)) ? 0x1ff00001 :
((r2p2(memSize) - 0x100000) | 0x1);
SH5PCI_WRITE(LSR0, lsr0);
/* Set MBAR 0 */
SH5PCI_WRITE(CSR_MBAR0, memory_start);
SH5PCI_WRITE(LAR0, memory_start);
SH5PCI_WRITE(CSR_MBAR1,0);
SH5PCI_WRITE(LAR1,0);
SH5PCI_WRITE(LSR1,0);
/* Enable the PCI interrupts on the device */
SH5PCI_WRITE(INTM, ~0);
SH5PCI_WRITE(AINTM, ~0);
SH5PCI_WRITE(PINTM, ~0);
sh5_pci_resources[0].start = PCI_IO_AREA;
sh5_pci_resources[0].end = PCI_IO_AREA + 0x10000;
sh5_pci_resources[1].start = memStart;
sh5_pci_resources[1].end = memStart + memSize;
return register_pci_controller(&sh5pci_controller);
}
arch_initcall(sh5pci_init);

View File

@ -1,108 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
*
* Definitions for the SH5 PCI hardware.
*/
#ifndef __PCI_SH5_H
#define __PCI_SH5_H
/* Product ID */
#define PCISH5_PID 0x350d
/* vendor ID */
#define PCISH5_VID 0x1054
/* Configuration types */
#define ST_TYPE0 0x00 /* Configuration cycle type 0 */
#define ST_TYPE1 0x01 /* Configuration cycle type 1 */
/* VCR data */
#define PCISH5_VCR_STATUS 0x00
#define PCISH5_VCR_VERSION 0x08
/*
** ICR register offsets and bits
*/
#define PCISH5_ICR_CR 0x100 /* PCI control register values */
#define CR_PBAM (1<<12)
#define CR_PFCS (1<<11)
#define CR_FTO (1<<10)
#define CR_PFE (1<<9)
#define CR_TBS (1<<8)
#define CR_SPUE (1<<7)
#define CR_BMAM (1<<6)
#define CR_HOST (1<<5)
#define CR_CLKEN (1<<4)
#define CR_SOCS (1<<3)
#define CR_IOCS (1<<2)
#define CR_RSTCTL (1<<1)
#define CR_CFINT (1<<0)
#define CR_LOCK_MASK 0xa5000000
#define PCISH5_ICR_INT 0x114 /* Interrupt registert values */
#define INT_MADIM (1<<2)
#define PCISH5_ICR_LSR0 0X104 /* Local space register values */
#define PCISH5_ICR_LSR1 0X108 /* Local space register values */
#define PCISH5_ICR_LAR0 0x10c /* Local address register values */
#define PCISH5_ICR_LAR1 0x110 /* Local address register values */
#define PCISH5_ICR_INTM 0x118 /* Interrupt mask register values */
#define PCISH5_ICR_AIR 0x11c /* Interrupt error address information register values */
#define PCISH5_ICR_CIR 0x120 /* Interrupt error command information register values */
#define PCISH5_ICR_AINT 0x130 /* Interrupt error arbiter interrupt register values */
#define PCISH5_ICR_AINTM 0x134 /* Interrupt error arbiter interrupt mask register values */
#define PCISH5_ICR_BMIR 0x138 /* Interrupt error info register of bus master values */
#define PCISH5_ICR_PAR 0x1c0 /* Pio address register values */
#define PCISH5_ICR_MBR 0x1c4 /* Memory space bank register values */
#define PCISH5_ICR_IOBR 0x1c8 /* I/O space bank register values */
#define PCISH5_ICR_PINT 0x1cc /* power management interrupt register values */
#define PCISH5_ICR_PINTM 0x1d0 /* power management interrupt mask register values */
#define PCISH5_ICR_MBMR 0x1d8 /* memory space bank mask register values */
#define PCISH5_ICR_IOBMR 0x1dc /* I/O space bank mask register values */
#define PCISH5_ICR_CSCR0 0x210 /* PCI cache snoop control register 0 */
#define PCISH5_ICR_CSCR1 0x214 /* PCI cache snoop control register 1 */
#define PCISH5_ICR_PDR 0x220 /* Pio data register values */
/* These are configs space registers */
#define PCISH5_ICR_CSR_VID 0x000 /* Vendor id */
#define PCISH5_ICR_CSR_DID 0x002 /* Device id */
#define PCISH5_ICR_CSR_CMD 0x004 /* Command register */
#define PCISH5_ICR_CSR_STATUS 0x006 /* Stautus */
#define PCISH5_ICR_CSR_IBAR0 0x010 /* I/O base address register */
#define PCISH5_ICR_CSR_MBAR0 0x014 /* First Memory base address register */
#define PCISH5_ICR_CSR_MBAR1 0x018 /* Second Memory base address register */
/* Base address of registers */
#define SH5PCI_ICR_BASE (PHYS_PCI_BLOCK + 0x00040000)
#define SH5PCI_IO_BASE (PHYS_PCI_BLOCK + 0x00800000)
/* #define SH5PCI_VCR_BASE (P2SEG_PCICB_BLOCK + P2SEG) */
extern unsigned long pcicr_virt;
/* Register selection macro */
#define PCISH5_ICR_REG(x) ( pcicr_virt + (PCISH5_ICR_##x))
/* #define PCISH5_VCR_REG(x) ( SH5PCI_VCR_BASE (PCISH5_VCR_##x)) */
/* Write I/O functions */
#define SH5PCI_WRITE(reg,val) __raw_writel((u32)(val),PCISH5_ICR_REG(reg))
#define SH5PCI_WRITE_SHORT(reg,val) __raw_writew((u16)(val),PCISH5_ICR_REG(reg))
#define SH5PCI_WRITE_BYTE(reg,val) __raw_writeb((u8)(val),PCISH5_ICR_REG(reg))
/* Read I/O functions */
#define SH5PCI_READ(reg) __raw_readl(PCISH5_ICR_REG(reg))
#define SH5PCI_READ_SHORT(reg) __raw_readw(PCISH5_ICR_REG(reg))
#define SH5PCI_READ_BYTE(reg) __raw_readb(PCISH5_ICR_REG(reg))
/* Set PCI config bits */
#define SET_CONFIG_BITS(bus,devfn,where) ((((bus) << 16) | ((devfn) << 8) | ((where) & ~3)) | 0x80000000)
/* Set PCI command register */
#define CONFIG_CMD(bus, devfn, where) SET_CONFIG_BITS(bus->number,devfn,where)
/* Size converters */
#define PCISH5_MEM_SIZCONV(x) (((x / 0x40000) - 1) << 18)
#define PCISH5_IO_SIZCONV(x) (((x / 0x40000) - 1) << 18)
extern struct pci_ops sh5_pci_ops;
#endif /* __PCI_SH5_H */

View File

@ -6,7 +6,7 @@
#ifndef __ASM_SH_BARRIER_H
#define __ASM_SH_BARRIER_H
#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
#if defined(CONFIG_CPU_SH4A)
#include <asm/cache_insns.h>
#endif
@ -24,7 +24,7 @@
* Historically we have only done this type of barrier for the MMUCR, but
* it's also necessary for the CCR, so we make it generic here instead.
*/
#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
#if defined(CONFIG_CPU_SH4A)
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb()
#define wmb() mb()

View File

@ -26,7 +26,6 @@
#include <asm-generic/bitops/non-atomic.h>
#endif
#ifdef CONFIG_SUPERH32
static inline unsigned long ffz(unsigned long word)
{
unsigned long result;
@ -60,31 +59,6 @@ static inline unsigned long __ffs(unsigned long word)
: "t");
return result;
}
#else
static inline unsigned long ffz(unsigned long word)
{
unsigned long result, __d2, __d3;
__asm__("gettr tr0, %2\n\t"
"pta $+32, tr0\n\t"
"andi %1, 1, %3\n\t"
"beq %3, r63, tr0\n\t"
"pta $+4, tr0\n"
"0:\n\t"
"shlri.l %1, 1, %1\n\t"
"addi %0, 1, %0\n\t"
"andi %1, 1, %3\n\t"
"beqi %3, 1, tr0\n"
"1:\n\t"
"ptabs %2, tr0\n\t"
: "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3)
: "0" (0L), "1" (word));
return result;
}
#include <asm-generic/bitops/__ffs.h>
#endif
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ffs.h>

View File

@ -1,11 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_BL_BIT_H
#define __ASM_SH_BL_BIT_H
#ifdef CONFIG_SUPERH32
# include <asm/bl_bit_32.h>
#else
# include <asm/bl_bit_64.h>
#endif
#endif /* __ASM_SH_BL_BIT_H */
#include <asm/bl_bit_32.h>

View File

@ -1,37 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#ifndef __ASM_SH_BL_BIT_64_H
#define __ASM_SH_BL_BIT_64_H
#include <asm/processor.h>
#define SR_BL_LL 0x0000000010000000LL
static inline void set_bl_bit(void)
{
unsigned long long __dummy0, __dummy1 = SR_BL_LL;
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"or %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy0)
: "r" (__dummy1));
}
static inline void clear_bl_bit(void)
{
unsigned long long __dummy0, __dummy1 = ~SR_BL_LL;
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy0)
: "r" (__dummy1));
}
#endif /* __ASM_SH_BL_BIT_64_H */

View File

@ -53,10 +53,6 @@ static void __init check_bugs(void)
*p++ = 's';
*p++ = 'p';
break;
case CPU_FAMILY_SH5:
*p++ = '6';
*p++ = '4';
break;
case CPU_FAMILY_UNKNOWN:
/*
* Specifically use CPU_FAMILY_UNKNOWN rather than

View File

@ -1,12 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_CACHE_INSNS_H
#define __ASM_SH_CACHE_INSNS_H
#ifdef CONFIG_SUPERH32
# include <asm/cache_insns_32.h>
#else
# include <asm/cache_insns_64.h>
#endif
#endif /* __ASM_SH_CACHE_INSNS_H */
#include <asm/cache_insns_32.h>

View File

@ -1,20 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#ifndef __ASM_SH_CACHE_INSNS_64_H
#define __ASM_SH_CACHE_INSNS_64_H
#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr))
static inline reg_size_t register_align(void *val)
{
return (unsigned long long)(signed long long)(signed long)val;
}
#endif /* __ASM_SH_CACHE_INSNS_64_H */

View File

@ -1,6 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifdef CONFIG_SUPERH32
# include <asm/checksum_32.h>
#else
# include <asm-generic/checksum.h>
#endif
#include <asm/checksum_32.h>

View File

@ -133,28 +133,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
#define ELF_PLATFORM (utsname()->machine)
#ifdef __SH5__
#define ELF_PLAT_INIT(_r, load_addr) \
do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
_r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
_r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
_r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; _r->regs[15]=0; \
_r->regs[16]=0; _r->regs[17]=0; _r->regs[18]=0; _r->regs[19]=0; \
_r->regs[20]=0; _r->regs[21]=0; _r->regs[22]=0; _r->regs[23]=0; \
_r->regs[24]=0; _r->regs[25]=0; _r->regs[26]=0; _r->regs[27]=0; \
_r->regs[28]=0; _r->regs[29]=0; _r->regs[30]=0; _r->regs[31]=0; \
_r->regs[32]=0; _r->regs[33]=0; _r->regs[34]=0; _r->regs[35]=0; \
_r->regs[36]=0; _r->regs[37]=0; _r->regs[38]=0; _r->regs[39]=0; \
_r->regs[40]=0; _r->regs[41]=0; _r->regs[42]=0; _r->regs[43]=0; \
_r->regs[44]=0; _r->regs[45]=0; _r->regs[46]=0; _r->regs[47]=0; \
_r->regs[48]=0; _r->regs[49]=0; _r->regs[50]=0; _r->regs[51]=0; \
_r->regs[52]=0; _r->regs[53]=0; _r->regs[54]=0; _r->regs[55]=0; \
_r->regs[56]=0; _r->regs[57]=0; _r->regs[58]=0; _r->regs[59]=0; \
_r->regs[60]=0; _r->regs[61]=0; _r->regs[62]=0; \
_r->tregs[0]=0; _r->tregs[1]=0; _r->tregs[2]=0; _r->tregs[3]=0; \
_r->tregs[4]=0; _r->tregs[5]=0; _r->tregs[6]=0; _r->tregs[7]=0; \
_r->sr = SR_FD | SR_MMU; } while (0)
#else
#define ELF_PLAT_INIT(_r, load_addr) \
do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
_r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
@ -182,7 +160,6 @@ do { \
_r->regs[14] = 0; \
_r->sr = SR_FD; \
} while (0)
#endif
#define SET_PERSONALITY(ex) \
set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))

View File

@ -4,8 +4,4 @@
#include <asm-generic/extable.h>
#if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU)
#define ARCH_HAS_SEARCH_EXTABLE
#endif
#endif

View File

@ -83,11 +83,7 @@ extern void __clear_fixmap(enum fixed_addresses idx, pgprot_t flags);
* the start of the fixmap, and leave one page empty
* at the top of mem..
*/
#ifdef CONFIG_SUPERH32
#define FIXADDR_TOP (P4SEG - PAGE_SIZE)
#else
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
#endif
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)

View File

@ -115,12 +115,8 @@ static inline void pfx##reads##bwlq(volatile void __iomem *mem, \
__BUILD_MEMORY_STRING(__raw_, b, u8)
__BUILD_MEMORY_STRING(__raw_, w, u16)
#ifdef CONFIG_SUPERH32
void __raw_writesl(void __iomem *addr, const void *data, int longlen);
void __raw_readsl(const void __iomem *addr, void *data, int longlen);
#else
__BUILD_MEMORY_STRING(__raw_, l, u32)
#endif
__BUILD_MEMORY_STRING(__raw_, q, u64)

View File

@ -66,8 +66,5 @@ extern void irq_finish(unsigned int irq);
#endif
#include <asm-generic/irq.h>
#ifdef CONFIG_CPU_SH5
#include <cpu/irq.h>
#endif
#endif /* __ASM_SH_IRQ_H */

View File

@ -48,11 +48,7 @@
*/
#define MMU_VPN_MASK 0xfffff000
#if defined(CONFIG_SUPERH32)
#include <asm/mmu_context_32.h>
#else
#include <asm/mmu_context_64.h>
#endif
/*
* Get MMU context if needed.
@ -74,14 +70,6 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
*/
local_flush_tlb_all();
#ifdef CONFIG_SUPERH64
/*
* The SH-5 cache uses the ASIDs, requiring both the I and D
* cache to be flushed when the ASID is exhausted. Weak.
*/
flush_cache_all();
#endif
/*
* Fix version; Note that we avoid version #0
* to distinguish NO_CONTEXT.

View File

@ -1,75 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_MMU_CONTEXT_64_H
#define __ASM_SH_MMU_CONTEXT_64_H
/*
* sh64-specific mmu_context interface.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2007 Paul Mundt
*/
#include <cpu/registers.h>
#include <asm/cacheflush.h>
#define SR_ASID_MASK 0xffffffffff00ffffULL
#define SR_ASID_SHIFT 16
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
static inline void destroy_context(struct mm_struct *mm)
{
/* Well, at least free TLB entries */
flush_tlb_mm(mm);
}
static inline unsigned long get_asid(void)
{
unsigned long long sr;
asm volatile ("getcon " __SR ", %0\n\t"
: "=r" (sr));
sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK;
return (unsigned long) sr;
}
/* Set ASID into SR */
static inline void set_asid(unsigned long asid)
{
unsigned long long sr, pc;
asm volatile ("getcon " __SR ", %0" : "=r" (sr));
sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT);
/*
* It is possible that this function may be inlined and so to avoid
* the assembler reporting duplicate symbols we make use of the
* gas trick of generating symbols using numerics and forward
* reference.
*/
asm volatile ("movi 1, %1\n\t"
"shlli %1, 28, %1\n\t"
"or %0, %1, %1\n\t"
"putcon %1, " __SR "\n\t"
"putcon %0, " __SSR "\n\t"
"movi 1f, %1\n\t"
"ori %1, 1 , %1\n\t"
"putcon %1, " __SPC "\n\t"
"rte\n"
"1:\n\t"
: "=r" (sr), "=r" (pc) : "0" (sr));
}
/* arch/sh/kernel/cpu/sh5/entry.S */
extern unsigned long switch_and_save_asid(unsigned long new_asid);
/* No spare register to twiddle, so use a software cache */
extern pgd_t *mmu_pdtp_cache;
#define set_TTB(pgd) (mmu_pdtp_cache = (pgd))
#define get_TTB() (mmu_pdtp_cache)
#endif /* __ASM_SH_MMU_CONTEXT_64_H */

View File

@ -35,8 +35,6 @@
#define HPAGE_SHIFT 22
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
#define HPAGE_SHIFT 26
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
#define HPAGE_SHIFT 29
#endif
#ifdef CONFIG_HUGETLB_PAGE
@ -82,18 +80,12 @@ typedef struct { unsigned long long pgd; } pgd_t;
((x).pte_low | ((unsigned long long)(x).pte_high << 32))
#define __pte(x) \
({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
#elif defined(CONFIG_SUPERH32)
#else
typedef struct { unsigned long pte_low; } pte_t;
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low)
#define __pte(x) ((pte_t) { (x) } )
#else
typedef struct { unsigned long long pte_low; } pte_t;
typedef struct { unsigned long long pgprot; } pgprot_t;
typedef struct { unsigned long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low)
#define __pte(x) ((pte_t) { (x) } )
#endif
#define pgd_val(x) ((x).pgd)
@ -191,15 +183,4 @@ typedef struct page *pgtable_t;
*/
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifdef CONFIG_SUPERH64
/*
* While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still
* happily generate {ld/st}.q pairs, requiring us to have 8-byte
* alignment to avoid traps. The kmalloc alignment is guaranteed by
* virtue of L1_CACHE_BYTES, requiring this to only be special cased
* for slab caches.
*/
#define ARCH_SLAB_MINALIGN 8
#endif
#endif /* __ASM_SH_PAGE_H */

View File

@ -76,18 +76,10 @@ static inline unsigned long phys_addr_mask(void)
#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK)
#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT)
#ifdef CONFIG_SUPERH32
#define VMALLOC_START (P3SEG)
#else
#define VMALLOC_START (0xf0000000)
#endif
#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
#if defined(CONFIG_SUPERH32)
#include <asm/pgtable_32.h>
#else
#include <asm/pgtable_64.h>
#endif
/*
* SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page
@ -159,15 +151,6 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
prot |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
return __pte_access_permitted(pte, prot);
}
#elif defined(CONFIG_SUPERH64)
static inline bool pte_access_permitted(pte_t pte, bool write)
{
u64 prot = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
if (write)
prot |= _PAGE_WRITE;
return __pte_access_permitted(pte, prot);
}
#else
static inline bool pte_access_permitted(pte_t pte, bool write)
{

View File

@ -1,307 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_PGTABLE_64_H
#define __ASM_SH_PGTABLE_64_H
/*
* include/asm-sh/pgtable_64.h
*
* This file contains the functions and defines necessary to modify and use
* the SuperH page table tree.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*/
#include <linux/threads.h>
#include <asm/processor.h>
#include <asm/page.h>
/*
* Error outputs.
*/
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/*
* Table setting routines. Used within arch/mm only.
*/
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
{
unsigned long long x = ((unsigned long long) pteval.pte_low);
unsigned long long *xp = (unsigned long long *) pteptr;
/*
* Sign-extend based on NPHYS.
*/
*(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;
}
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
/*
* PGD defines. Top level.
*/
/* To find an entry in a generic PGD. */
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define __pgd_offset(address) pgd_index(address)
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
/* To find an entry in a kernel PGD. */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
/*
* PMD level access routines. Same notes as above.
*/
#define _PMD_EMPTY 0x0
/* Either the PMD is empty or present, it's not paged out */
#define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT)
#define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))
#define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY)
#define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
#define pmd_page_vaddr(pmd_entry) \
((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))
#define pmd_page(pmd) \
(virt_to_page(pmd_val(pmd)))
/* PMD to PTE dereferencing */
#define pte_index(address) \
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define __pte_offset(address) pte_index(address)
#define pte_offset_kernel(dir, addr) \
((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
#define pte_unmap(pte) do { } while (0)
#ifndef __ASSEMBLY__
/*
* PTEL coherent flags.
* See Chapter 17 ST50 CPU Core Volume 1, Architecture.
*/
/* The bits that are required in the SH-5 TLB are placed in the h/w-defined
positions, to avoid expensive bit shuffling on every refill. The remaining
bits are used for s/w purposes and masked out on each refill.
Note, the PTE slots are used to hold data of type swp_entry_t when a page is
swapped out. Only the _PAGE_PRESENT flag is significant when the page is
swapped out, and it must be placed so that it doesn't overlap either the
type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type
at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This
scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit
[2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split
into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */
#define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */
#define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
#define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */
#define _PAGE_PRESENT 0x004 /* software: page referenced */
#define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */
#define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */
#define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */
#define _PAGE_READ 0x040 /* PR0-bit : read access allowed */
#define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */
#define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */
#define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */
#define _PAGE_DIRTY 0x400 /* software: page accessed in write */
#define _PAGE_ACCESSED 0x800 /* software: page referenced */
/* Wrapper for extended mode pgprot twiddling */
#define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
/*
* We can use the sign-extended bits in the PTEL to get 32 bits of
* software flags. This works for now because no implementations uses
* anything above the PPN field.
*/
#define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */
#define _PAGE_SPECIAL _PAGE_EXT(0x002)
#define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_SHARED | \
_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
/* Mask which drops software flags */
#define _PAGE_FLAGS_HARDWARE_MASK (NEFF_MASK & ~(_PAGE_CLEAR_FLAGS))
/*
* HugeTLB support
*/
#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define _PAGE_SZHUGE (_PAGE_SIZE0)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
#define _PAGE_SZHUGE (_PAGE_SIZE1)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
#define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1)
#endif
/*
* Stub out _PAGE_SZHUGE if we don't have a good definition for it,
* to make pte_mkhuge() happy.
*/
#ifndef _PAGE_SZHUGE
# define _PAGE_SZHUGE (0)
#endif
/*
* Default flags for a Kernel page.
* This is fundametally also SHARED because the main use of this define
* (other than for PGD/PMD entries) is for the VMALLOC pool which is
* contextless.
*
* _PAGE_EXECUTE is required for modules
*
*/
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_EXECUTE | \
_PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SHARED)
/* Default flags for a User page */
#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SPECIAL)
/*
* We have full permissions (Read/Write/Execute/Shared).
*/
#define _PAGE_COMMON (_PAGE_PRESENT | _PAGE_USER | \
_PAGE_CACHABLE | _PAGE_ACCESSED)
#define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \
_PAGE_SHARED)
#define PAGE_EXECREAD __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE)
/*
* We need to include PAGE_EXECUTE in PAGE_COPY because it is the default
* protection mode for the stack.
*/
#define PAGE_COPY PAGE_EXECREAD
#define PAGE_READONLY __pgprot(_PAGE_COMMON | _PAGE_READ)
#define PAGE_WRITEONLY __pgprot(_PAGE_COMMON | _PAGE_WRITE)
#define PAGE_RWX __pgprot(_PAGE_COMMON | _PAGE_READ | \
_PAGE_WRITE | _PAGE_EXECUTE)
#define PAGE_KERNEL __pgprot(_KERNPG_TABLE)
#define PAGE_KERNEL_NOCACHE \
__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_EXECUTE | _PAGE_ACCESSED | \
_PAGE_DIRTY | _PAGE_SHARED)
/* Make it a device mapping for maximum safety (e.g. for mapping device
registers into user-space via /dev/map). */
#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
/*
* PTE level access routines.
*
* Note1:
* It's the tree walk leaf. This is physical address to be stored.
*
* Note 2:
* Regarding the choice of _PTE_EMPTY:
We must choose a bit pattern that cannot be valid, whether or not the page
is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped
out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is
left for us to select. If we force bit[7]==0 when swapped out, we could use
the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if
we force bit[7]==1 when swapped out, we can use all zeroes to indicate
empty. This is convenient, because the page tables get cleared to zero
when they are allocated.
*/
#define _PTE_EMPTY 0x0
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY)))
#define pte_none(x) (pte_val(x) == _PTE_EMPTY)
/*
* Some definitions to translate between mem_map, PTEs, and page
* addresses:
*/
/*
* Given a PTE, return the index of the mem_map[] entry corresponding
* to the page frame the PTE. Get the absolute physical address, make
* a relative physical address and translate it to an index.
*/
#define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \
__MEMORY_START) >> PAGE_SHIFT)
/*
* Given a PTE, return the "struct page *".
*/
#define pte_page(x) (mem_map + pte_pagenr(x))
/*
* Return number of (down rounded) MB corresponding to x pages.
*/
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/*
* The following have defined behavior only work if pte_present() is true.
*/
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; }
static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
static inline pte_t pte_mkspecial(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SPECIAL)); return pte; }
/*
* Conversion functions: convert a page and protection to a page entry.
*
* extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
*/
#define mk_pte(page,pgprot) \
({ \
pte_t __pte; \
\
set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \
__MEMORY_START | pgprot_val((pgprot)))); \
__pte; \
})
/*
* This takes a (absolute) physical page address that is used
* by the remapping functions
*/
#define mk_pte_phys(physpage, pgprot) \
({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
/* Encode and decode a swap entry */
#define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c))
#define __swp_offset(x) ((x).val >> 8)
#define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#endif /* !__ASSEMBLY__ */
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#endif /* __ASM_SH_PGTABLE_64_H */

View File

@ -1,6 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
# ifdef CONFIG_SUPERH32
# include <asm/posix_types_32.h>
# else
# include <asm/posix_types_64.h>
# endif
#include <asm/posix_types_32.h>

View File

@ -39,9 +39,6 @@ enum cpu_type {
/* SH4AL-DSP types */
CPU_SH7343, CPU_SH7722, CPU_SH7366, CPU_SH7372,
/* SH-5 types */
CPU_SH5_101, CPU_SH5_103,
/* Unknown subtype */
CPU_SH_NONE
};
@ -53,7 +50,6 @@ enum cpu_family {
CPU_FAMILY_SH4,
CPU_FAMILY_SH4A,
CPU_FAMILY_SH4AL_DSP,
CPU_FAMILY_SH5,
CPU_FAMILY_UNKNOWN,
};
@ -167,18 +163,12 @@ int vsyscall_init(void);
*/
#ifdef CONFIG_CPU_SH2A
extern unsigned int instruction_size(unsigned int insn);
#elif defined(CONFIG_SUPERH32)
#define instruction_size(insn) (2)
#else
#define instruction_size(insn) (4)
#define instruction_size(insn) (2)
#endif
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_SUPERH32
# include <asm/processor_32.h>
#else
# include <asm/processor_64.h>
#endif
#include <asm/processor_32.h>
#endif /* __ASM_SH_PROCESSOR_H */

View File

@ -1,212 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_PROCESSOR_64_H
#define __ASM_SH_PROCESSOR_64_H
/*
* include/asm-sh/processor_64.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
#include <asm/page.h>
#include <asm/types.h>
#include <cpu/registers.h>
#endif
/*
* User space process size: 2GB - 4k.
*/
#define TASK_SIZE 0x7ffff000UL
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
/*
* Bit of SR register
*
* FD-bit:
* When it's set, it means the processor doesn't have right to use FPU,
* and it results exception when the floating operation is executed.
*
* IMASK-bit:
* Interrupt level mask
*
* STEP-bit:
* Single step bit
*
*/
#if defined(CONFIG_SH64_SR_WATCH)
#define SR_MMU 0x84000000
#else
#define SR_MMU 0x80000000
#endif
#define SR_IMASK 0x000000f0
#define SR_FD 0x00008000
#define SR_SSTEP 0x08000000
#ifndef __ASSEMBLY__
/*
* FPU structure and data : require 8-byte alignment as we need to access it
with fld.p, fst.p
*/
struct sh_fpu_hard_struct {
unsigned long fp_regs[64];
unsigned int fpscr;
/* long status; * software status information */
};
/* Dummy fpu emulator */
struct sh_fpu_soft_struct {
unsigned long fp_regs[64];
unsigned int fpscr;
unsigned char lookahead;
unsigned long entry_pc;
};
union thread_xstate {
struct sh_fpu_hard_struct hardfpu;
struct sh_fpu_soft_struct softfpu;
/*
* The structure definitions only produce 32 bit alignment, yet we need
* to access them using 64 bit load/store as well.
*/
unsigned long long alignment_dummy;
};
struct thread_struct {
unsigned long sp;
unsigned long pc;
/* Various thread flags, see SH_THREAD_xxx */
unsigned long flags;
/* This stores the address of the pt_regs built during a context
switch, or of the register save area built for a kernel mode
exception. It is used for backtracing the stack of a sleeping task
or one that traps in kernel mode. */
struct pt_regs *kregs;
/* This stores the address of the pt_regs constructed on entry from
user mode. It is a fixed value over the lifetime of a process, or
NULL for a kernel thread. */
struct pt_regs *uregs;
unsigned long address;
/* Hardware debugging registers may come here */
/* floating point info */
union thread_xstate *xstate;
/*
* fpu_counter contains the number of consecutive context switches
* that the FPU is used. If this is over a threshold, the lazy fpu
* saving becomes unlazy to save the trap. This is an unsigned char
* so that after 256 times the counter wraps and the behavior turns
* lazy again; this to deal with bursty apps that only use FPU for
* a short time
*/
unsigned char fpu_counter;
};
#define INIT_MMAP \
{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
#define INIT_THREAD { \
.sp = sizeof(init_stack) + \
(long) &init_stack, \
.pc = 0, \
.kregs = &fake_swapper_regs, \
.uregs = NULL, \
.address = 0, \
.flags = 0, \
}
/*
* Do necessary setup to start up a newly executed thread.
*/
#define SR_USER (SR_MMU | SR_FD)
#define start_thread(_regs, new_pc, new_sp) \
_regs->sr = SR_USER; /* User mode. */ \
_regs->pc = new_pc - 4; /* Compensate syscall exit */ \
_regs->pc |= 1; /* Set SHmedia ! */ \
_regs->regs[18] = 0; \
_regs->regs[15] = new_sp
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
/*
* FPU lazy state save handling.
*/
static inline void disable_fpu(void)
{
unsigned long long __dummy;
/* Set FD flag in SR */
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"or %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy)
: "r" (SR_FD));
}
static inline void enable_fpu(void)
{
unsigned long long __dummy;
/* Clear out FD flag in SR */
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy)
: "r" (~SR_FD));
}
/* Round to nearest, no exceptions on inexact, overflow, underflow,
zero-divide, invalid. Configure option for whether to flush denorms to
zero, or except if a denorm is encountered. */
#if defined(CONFIG_SH64_FPU_DENORM_FLUSH)
#define FPSCR_INIT 0x00040000
#else
#define FPSCR_INIT 0x00000000
#endif
#ifdef CONFIG_SH_FPU
/* Initialise the FP state of a task */
void fpinit(struct sh_fpu_hard_struct *fpregs);
#else
#define fpinit(fpregs) do { } while (0)
#endif
extern struct task_struct *last_task_used_math;
/*
* Return saved PC of a blocked thread.
*/
#define thread_saved_pc(tsk) (tsk->thread.pc)
extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.pc)
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_PROCESSOR_64_H */

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_PTRACE_64_H
#define __ASM_SH_PTRACE_64_H
#include <uapi/asm/ptrace_64.h>
#define MAX_REG_OFFSET offsetof(struct pt_regs, tregs[7])
static inline long regs_return_value(struct pt_regs *regs)
{
return regs->regs[3];
}
#endif /* __ASM_SH_PTRACE_64_H */

View File

@ -1,6 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifdef CONFIG_SUPERH32
# include <asm/string_32.h>
#else
# include <asm/string_64.h>
#endif
#include <asm/string_32.h>

View File

@ -1,21 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_STRING_64_H
#define __ASM_SH_STRING_64_H
#ifdef __KERNEL__
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *dest, const void *src, size_t count);
#define __HAVE_ARCH_STRLEN
extern size_t strlen(const char *);
#define __HAVE_ARCH_STRCPY
extern char *strcpy(char *__dest, const char *__src);
#endif /* __KERNEL__ */
#endif /* __ASM_SH_STRING_64_H */

View File

@ -4,13 +4,4 @@
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#ifndef __ASM_SH_SWITCH_TO_H
#define __ASM_SH_SWITCH_TO_H
#ifdef CONFIG_SUPERH32
# include <asm/switch_to_32.h>
#else
# include <asm/switch_to_64.h>
#endif
#endif /* __ASM_SH_SWITCH_TO_H */
#include <asm/switch_to_32.h>

View File

@ -1,32 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#ifndef __ASM_SH_SWITCH_TO_64_H
#define __ASM_SH_SWITCH_TO_64_H
struct thread_struct;
struct task_struct;
/*
* switch_to() should switch tasks to task nr n, first
*/
struct task_struct *sh64_switch_to(struct task_struct *prev,
struct thread_struct *prev_thread,
struct task_struct *next,
struct thread_struct *next_thread);
#define switch_to(prev,next,last) \
do { \
if (last_task_used_math != next) { \
struct pt_regs *regs = next->thread.uregs; \
if (regs) regs->sr |= SR_FD; \
} \
last = sh64_switch_to(prev, &prev->thread, next, \
&next->thread); \
} while (0)
#endif /* __ASM_SH_SWITCH_TO_64_H */

View File

@ -4,10 +4,6 @@
extern const unsigned long sys_call_table[];
#ifdef CONFIG_SUPERH32
# include <asm/syscall_32.h>
#else
# include <asm/syscall_64.h>
#endif
#include <asm/syscall_32.h>
#endif /* __ASM_SH_SYSCALL_H */

View File

@ -1,75 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_SYSCALL_64_H
#define __ASM_SH_SYSCALL_64_H
#include <uapi/linux/audit.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/ptrace.h>
/* The system call number is given by the user in R9 */
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
return (regs->syscall_nr >= 0) ? regs->regs[9] : -1L;
}
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
/*
* XXX: This needs some thought. On SH we don't
* save away the original R9 value anywhere.
*/
}
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
return IS_ERR_VALUE(regs->regs[9]) ? regs->regs[9] : 0;
}
static inline long syscall_get_return_value(struct task_struct *task,
struct pt_regs *regs)
{
return regs->regs[9];
}
static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
if (error)
regs->regs[9] = -error;
else
regs->regs[9] = val;
}
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
unsigned long *args)
{
memcpy(args, &regs->regs[2], 6 * sizeof(args[0]));
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
const unsigned long *args)
{
memcpy(&regs->regs[2], args, 6 * sizeof(args[0]));
}
static inline int syscall_get_arch(struct task_struct *task)
{
int arch = AUDIT_ARCH_SH;
#ifdef CONFIG_64BIT
arch |= __AUDIT_ARCH_64BIT;
#endif
#ifdef CONFIG_CPU_LITTLE_ENDIAN
arch |= __AUDIT_ARCH_LE;
#endif
return arch;
}
#endif /* __ASM_SH_SYSCALL_64_H */

View File

@ -2,8 +2,6 @@
#ifndef __ASM_SH_SYSCALLS_H
#define __ASM_SH_SYSCALLS_H
#ifdef __KERNEL__
asmlinkage int old_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
int fd, unsigned long off);
@ -11,11 +9,6 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff);
#ifdef CONFIG_SUPERH32
# include <asm/syscalls_32.h>
#else
# include <asm/syscalls_64.h>
#endif
#include <asm/syscalls_32.h>
#endif /* __KERNEL__ */
#endif /* __ASM_SH_SYSCALLS_H */

View File

@ -1,18 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_SYSCALLS_64_H
#define __ASM_SH_SYSCALLS_64_H
#ifdef __KERNEL__
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/types.h>
struct pt_regs;
/* Misc syscall related bits */
asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs);
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
#endif /* __KERNEL__ */
#endif /* __ASM_SH_SYSCALLS_64_H */

View File

@ -70,9 +70,7 @@ register unsigned long current_stack_pointer asm("r15") __used;
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
#if defined(CONFIG_SUPERH64)
__asm__ __volatile__ ("getcon cr17, %0" : "=r" (ti));
#elif defined(CONFIG_CPU_HAS_SR_RB)
#if defined(CONFIG_CPU_HAS_SR_RB)
__asm__ __volatile__ ("stc r7_bank, %0" : "=r" (ti));
#else
unsigned long __dummy;

View File

@ -2,10 +2,6 @@
#ifndef __ASM_SH_TLB_H
#define __ASM_SH_TLB_H
#ifdef CONFIG_SUPERH64
# include <asm/tlb_64.h>
#endif
#ifndef __ASSEMBLY__
#include <linux/pagemap.h>
@ -14,7 +10,7 @@
#include <asm-generic/tlb.h>
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
#if defined(CONFIG_CPU_SH4)
extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
extern void tlb_unwire_entry(void);
#else

View File

@ -1,68 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* include/asm-sh/tlb_64.h
*
* Copyright (C) 2003 Paul Mundt
*/
#ifndef __ASM_SH_TLB_64_H
#define __ASM_SH_TLB_64_H
/* ITLB defines */
#define ITLB_FIXED 0x00000000 /* First fixed ITLB, see head.S */
#define ITLB_LAST_VAR_UNRESTRICTED 0x000003F0 /* Last ITLB */
/* DTLB defines */
#define DTLB_FIXED 0x00800000 /* First fixed DTLB, see head.S */
#define DTLB_LAST_VAR_UNRESTRICTED 0x008003F0 /* Last DTLB */
#ifndef __ASSEMBLY__
/**
* for_each_dtlb_entry - Iterate over free (non-wired) DTLB entries
*
* @tlb: TLB entry
*/
#define for_each_dtlb_entry(tlb) \
for (tlb = cpu_data->dtlb.first; \
tlb <= cpu_data->dtlb.last; \
tlb += cpu_data->dtlb.step)
/**
* for_each_itlb_entry - Iterate over free (non-wired) ITLB entries
*
* @tlb: TLB entry
*/
#define for_each_itlb_entry(tlb) \
for (tlb = cpu_data->itlb.first; \
tlb <= cpu_data->itlb.last; \
tlb += cpu_data->itlb.step)
/**
* __flush_tlb_slot - Flushes TLB slot @slot.
*
* @slot: Address of TLB slot.
*/
static inline void __flush_tlb_slot(unsigned long long slot)
{
__asm__ __volatile__ ("putcfg %0, 0, r63\n" : : "r" (slot));
}
#ifdef CONFIG_MMU
/* arch/sh64/mm/tlb.c */
int sh64_tlb_init(void);
unsigned long long sh64_next_free_dtlb_entry(void);
unsigned long long sh64_get_wired_dtlb_entry(void);
int sh64_put_wired_dtlb_entry(unsigned long long entry);
void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
unsigned long asid, unsigned long paddr);
void sh64_teardown_tlb_slot(unsigned long long config_addr);
#else
#define sh64_tlb_init() do { } while (0)
#define sh64_next_free_dtlb_entry() (0)
#define sh64_get_wired_dtlb_entry() (0)
#define sh64_put_wired_dtlb_entry(entry) do { } while (0)
#define sh64_setup_tlb_slot(conf, virt, asid, phys) do { } while (0)
#define sh64_teardown_tlb_slot(addr) do { } while (0)
#endif /* CONFIG_MMU */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_TLB_64_H */

View File

@ -4,11 +4,7 @@
#include <linux/compiler.h>
#ifdef CONFIG_SUPERH32
# include <asm/traps_32.h>
#else
# include <asm/traps_64.h>
#endif
BUILD_TRAP_HANDLER(address_error);
BUILD_TRAP_HANDLER(debug);

View File

@ -1,35 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#ifndef __ASM_SH_TRAPS_64_H
#define __ASM_SH_TRAPS_64_H
#include <cpu/registers.h>
extern void phys_stext(void);
#define lookup_exception_vector() \
({ \
unsigned long _vec; \
\
__asm__ __volatile__ ( \
"getcon " __EXPEVT ", %0\n\t" \
: "=r" (_vec) \
); \
\
_vec; \
})
static inline void trigger_address_error(void)
{
phys_stext();
}
#define BUILD_TRAP_HANDLER(name) \
asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs)
#define TRAP_HANDLER_DECL
#endif /* __ASM_SH_TRAPS_64_H */

View File

@ -9,13 +9,8 @@
*/
#ifndef __ASSEMBLY__
#ifdef CONFIG_SUPERH32
typedef u16 insn_size_t;
typedef u32 reg_size_t;
#else
typedef u32 insn_size_t;
typedef u64 reg_size_t;
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_TYPES_H */

View File

@ -96,11 +96,7 @@ struct __large_struct { unsigned long buf[100]; };
__pu_err; \
})
#ifdef CONFIG_SUPERH32
# include <asm/uaccess_32.h>
#else
# include <asm/uaccess_64.h>
#endif
extern long strncpy_from_user(char *dest, const char __user *src, long count);

View File

@ -1,85 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_UACCESS_64_H
#define __ASM_SH_UACCESS_64_H
/*
* include/asm-sh/uaccess_64.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
*
* User space memory access functions
*
* Copyright (C) 1999 Niibe Yutaka
*
* Based on:
* MIPS implementation version 1.15 by
* Copyright (C) 1996, 1997, 1998 by Ralf Baechle
* and i386 version.
*/
#define __get_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
x = 0; \
switch (size) { \
case 1: \
retval = __get_user_asm_b((void *)&x, \
(long)ptr); \
break; \
case 2: \
retval = __get_user_asm_w((void *)&x, \
(long)ptr); \
break; \
case 4: \
retval = __get_user_asm_l((void *)&x, \
(long)ptr); \
break; \
case 8: \
retval = __get_user_asm_q((void *)&x, \
(long)ptr); \
break; \
default: \
__get_user_unknown(); \
break; \
} \
} while (0)
extern long __get_user_asm_b(void *, long);
extern long __get_user_asm_w(void *, long);
extern long __get_user_asm_l(void *, long);
extern long __get_user_asm_q(void *, long);
extern void __get_user_unknown(void);
#define __put_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
switch (size) { \
case 1: \
retval = __put_user_asm_b((void *)&x, \
(__force long)ptr); \
break; \
case 2: \
retval = __put_user_asm_w((void *)&x, \
(__force long)ptr); \
break; \
case 4: \
retval = __put_user_asm_l((void *)&x, \
(__force long)ptr); \
break; \
case 8: \
retval = __put_user_asm_q((void *)&x, \
(__force long)ptr); \
break; \
default: \
__put_user_unknown(); \
} \
} while (0)
extern long __put_user_asm_b(void *, long);
extern long __put_user_asm_w(void *, long);
extern long __put_user_asm_l(void *, long);
extern long __put_user_asm_q(void *, long);
extern void __put_user_unknown(void);
#endif /* __ASM_SH_UACCESS_64_H */

View File

@ -1,9 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
# ifdef CONFIG_SUPERH32
# include <asm/unistd_32.h>
# else
# include <asm/unistd_64.h>
# endif
#include <asm/unistd_32.h>
#define NR_syscalls __NR_syscalls

View File

@ -28,19 +28,12 @@
* to write an integer number of pages.
*/
#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
struct user_fpu_struct {
unsigned long fp_regs[32];
unsigned int fpscr;
};
#else
struct user_fpu_struct {
unsigned long fp_regs[16];
unsigned long xfp_regs[16];
unsigned long fpscr;
unsigned long fpul;
};
#endif
struct user {
struct pt_regs regs; /* entire machine state */

View File

@ -10,8 +10,6 @@
# define MODULE_PROC_FAMILY "SH3LE "
# elif defined CONFIG_CPU_SH4
# define MODULE_PROC_FAMILY "SH4LE "
# elif defined CONFIG_CPU_SH5
# define MODULE_PROC_FAMILY "SH5LE "
# else
# error unknown processor family
# endif
@ -22,8 +20,6 @@
# define MODULE_PROC_FAMILY "SH3BE "
# elif defined CONFIG_CPU_SH4
# define MODULE_PROC_FAMILY "SH4BE "
# elif defined CONFIG_CPU_SH5
# define MODULE_PROC_FAMILY "SH5BE "
# else
# error unknown processor family
# endif

View File

@ -15,12 +15,4 @@
#define DWARF_EH_FRAME
#endif
#ifdef CONFIG_SUPERH64
#define EXTRA_TEXT \
*(.text64) \
*(.text..SHmedia32)
#else
#define EXTRA_TEXT
#endif
#endif /* __ASM_SH_VMLINUX_LDS_H */

View File

@ -1,12 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_CPU_SH5_ADDRSPACE_H
#define __ASM_SH_CPU_SH5_ADDRSPACE_H
#define PHYS_PERIPHERAL_BLOCK 0x09000000
#define PHYS_DMAC_BLOCK 0x0e000000
#define PHYS_PCI_BLOCK 0x60000000
#define PHYS_EMI_BLOCK 0xff000000
/* No segmentation.. */
#endif /* __ASM_SH_CPU_SH5_ADDRSPACE_H */

View File

@ -1,94 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_CPU_SH5_CACHE_H
#define __ASM_SH_CPU_SH5_CACHE_H
/*
* include/asm-sh/cpu-sh5/cache.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
*/
#define L1_CACHE_SHIFT 5
/* Valid and Dirty bits */
#define SH_CACHE_VALID (1LL<<0)
#define SH_CACHE_UPDATED (1LL<<57)
/* Unimplemented compat bits.. */
#define SH_CACHE_COMBINED 0
#define SH_CACHE_ASSOC 0
/* Cache flags */
#define SH_CACHE_MODE_WT (1LL<<0)
#define SH_CACHE_MODE_WB (1LL<<1)
/*
* Control Registers.
*/
#define ICCR_BASE 0x01600000 /* Instruction Cache Control Register */
#define ICCR_REG0 0 /* Register 0 offset */
#define ICCR_REG1 1 /* Register 1 offset */
#define ICCR0 ICCR_BASE+ICCR_REG0
#define ICCR1 ICCR_BASE+ICCR_REG1
#define ICCR0_OFF 0x0 /* Set ICACHE off */
#define ICCR0_ON 0x1 /* Set ICACHE on */
#define ICCR0_ICI 0x2 /* Invalidate all in IC */
#define ICCR1_NOLOCK 0x0 /* Set No Locking */
#define OCCR_BASE 0x01E00000 /* Operand Cache Control Register */
#define OCCR_REG0 0 /* Register 0 offset */
#define OCCR_REG1 1 /* Register 1 offset */
#define OCCR0 OCCR_BASE+OCCR_REG0
#define OCCR1 OCCR_BASE+OCCR_REG1
#define OCCR0_OFF 0x0 /* Set OCACHE off */
#define OCCR0_ON 0x1 /* Set OCACHE on */
#define OCCR0_OCI 0x2 /* Invalidate all in OC */
#define OCCR0_WT 0x4 /* Set OCACHE in WT Mode */
#define OCCR0_WB 0x0 /* Set OCACHE in WB Mode */
#define OCCR1_NOLOCK 0x0 /* Set No Locking */
/*
* SH-5
* A bit of description here, for neff=32.
*
* |<--- tag (19 bits) --->|
* +-----------------------------+-----------------+------+----------+------+
* | | | ways |set index |offset|
* +-----------------------------+-----------------+------+----------+------+
* ^ 2 bits 8 bits 5 bits
* +- Bit 31
*
* Cacheline size is based on offset: 5 bits = 32 bytes per line
* A cache line is identified by a tag + set but OCACHETAG/ICACHETAG
* have a broader space for registers. These are outlined by
* CACHE_?C_*_STEP below.
*
*/
/* Instruction cache */
#define CACHE_IC_ADDRESS_ARRAY 0x01000000
/* Operand Cache */
#define CACHE_OC_ADDRESS_ARRAY 0x01800000
/* These declarations relate to cache 'synonyms' in the operand cache. A
'synonym' occurs where effective address bits overlap between those used for
indexing the cache sets and those passed to the MMU for translation. In the
case of SH5-101 & SH5-103, only bit 12 is affected for 4k pages. */
#define CACHE_OC_N_SYNBITS 1 /* Number of synonym bits */
#define CACHE_OC_SYN_SHIFT 12
/* Mask to select synonym bit(s) */
#define CACHE_OC_SYN_MASK (((1UL<<CACHE_OC_N_SYNBITS)-1)<<CACHE_OC_SYN_SHIFT)
/*
* Instruction cache can't be invalidated based on physical addresses.
* No Instruction Cache defines required, then.
*/
#endif /* __ASM_SH_CPU_SH5_CACHE_H */

View File

@ -1,113 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_CPU_SH5_IRQ_H
#define __ASM_SH_CPU_SH5_IRQ_H
/*
* include/asm-sh/cpu-sh5/irq.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*/
/*
* Encoded IRQs are not considered worth to be supported.
* Main reason is that there's no per-encoded-interrupt
* enable/disable mechanism (as there was in SH3/4).
* An all enabled/all disabled is worth only if there's
* a cascaded IC to disable/enable/ack on. Until such
* IC is available there's no such support.
*
* Presumably Encoded IRQs may use extra IRQs beyond 64,
* below. Some logic must be added to cope with IRQ_IRL?
* in an exclusive way.
*
* Priorities are set at Platform level, when IRQ_IRL0-3
* are set to 0 Encoding is allowed. Otherwise it's not
* allowed.
*/
/* Independent IRQs */
#define IRQ_IRL0 0
#define IRQ_IRL1 1
#define IRQ_IRL2 2
#define IRQ_IRL3 3
#define IRQ_INTA 4
#define IRQ_INTB 5
#define IRQ_INTC 6
#define IRQ_INTD 7
#define IRQ_SERR 12
#define IRQ_ERR 13
#define IRQ_PWR3 14
#define IRQ_PWR2 15
#define IRQ_PWR1 16
#define IRQ_PWR0 17
#define IRQ_DMTE0 18
#define IRQ_DMTE1 19
#define IRQ_DMTE2 20
#define IRQ_DMTE3 21
#define IRQ_DAERR 22
#define IRQ_TUNI0 32
#define IRQ_TUNI1 33
#define IRQ_TUNI2 34
#define IRQ_TICPI2 35
#define IRQ_ATI 36
#define IRQ_PRI 37
#define IRQ_CUI 38
#define IRQ_ERI 39
#define IRQ_RXI 40
#define IRQ_BRI 41
#define IRQ_TXI 42
#define IRQ_ITI 63
#define NR_INTC_IRQS 64
#ifdef CONFIG_SH_CAYMAN
#define NR_EXT_IRQS 32
#define START_EXT_IRQS 64
/* PCI bus 2 uses encoded external interrupts on the Cayman board */
#define IRQ_P2INTA (START_EXT_IRQS + (3*8) + 0)
#define IRQ_P2INTB (START_EXT_IRQS + (3*8) + 1)
#define IRQ_P2INTC (START_EXT_IRQS + (3*8) + 2)
#define IRQ_P2INTD (START_EXT_IRQS + (3*8) + 3)
#define I8042_KBD_IRQ (START_EXT_IRQS + 2)
#define I8042_AUX_IRQ (START_EXT_IRQS + 6)
#define IRQ_CFCARD (START_EXT_IRQS + 7)
#define IRQ_PCMCIA (0)
#else
#define NR_EXT_IRQS 0
#endif
/* Default IRQs, fixed */
#define TIMER_IRQ IRQ_TUNI0
#define RTC_IRQ IRQ_CUI
/* Default Priorities, Platform may choose differently */
#define NO_PRIORITY 0 /* Disabled */
#define TIMER_PRIORITY 2
#define RTC_PRIORITY TIMER_PRIORITY
#define SCIF_PRIORITY 3
#define INTD_PRIORITY 3
#define IRL3_PRIORITY 4
#define INTC_PRIORITY 6
#define IRL2_PRIORITY 7
#define INTB_PRIORITY 9
#define IRL1_PRIORITY 10
#define INTA_PRIORITY 12
#define IRL0_PRIORITY 13
#define TOP_PRIORITY 15
extern int intc_evt_to_irq[(0xE20/0x20)+1];
extern int platform_int_priority[NR_INTC_IRQS];
#endif /* __ASM_SH_CPU_SH5_IRQ_H */

View File

@ -1,22 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_CPU_SH5_MMU_CONTEXT_H
#define __ASM_SH_CPU_SH5_MMU_CONTEXT_H
/* Common defines */
#define TLB_STEP 0x00000010
#define TLB_PTEH 0x00000000
#define TLB_PTEL 0x00000008
/* PTEH defines */
#define PTEH_ASID_SHIFT 2
#define PTEH_VALID 0x0000000000000001
#define PTEH_SHARED 0x0000000000000002
#define PTEH_MATCH_ASID 0x00000000000003ff
#ifndef __ASSEMBLY__
/* This has to be a common function because the next location to fill
* information is shared. */
extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte);
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_CPU_SH5_MMU_CONTEXT_H */

View File

@ -1,103 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_CPU_SH5_REGISTERS_H
#define __ASM_SH_CPU_SH5_REGISTERS_H
/*
* include/asm-sh/cpu-sh5/registers.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2004 Richard Curnow
*/
#ifdef __ASSEMBLY__
/* =====================================================================
**
** Section 1: acts on assembly sources pre-processed by GPP ( <source.S>).
** Assigns symbolic names to control & target registers.
*/
/*
* Define some useful aliases for control registers.
*/
#define SR cr0
#define SSR cr1
#define PSSR cr2
/* cr3 UNDEFINED */
#define INTEVT cr4
#define EXPEVT cr5
#define PEXPEVT cr6
#define TRA cr7
#define SPC cr8
#define PSPC cr9
#define RESVEC cr10
#define VBR cr11
/* cr12 UNDEFINED */
#define TEA cr13
/* cr14-cr15 UNDEFINED */
#define DCR cr16
#define KCR0 cr17
#define KCR1 cr18
/* cr19-cr31 UNDEFINED */
/* cr32-cr61 RESERVED */
#define CTC cr62
#define USR cr63
/*
* ABI dependent registers (general purpose set)
*/
#define RET r2
#define ARG1 r2
#define ARG2 r3
#define ARG3 r4
#define ARG4 r5
#define ARG5 r6
#define ARG6 r7
#define SP r15
#define LINK r18
#define ZERO r63
/*
* Status register defines: used only by assembly sources (and
* syntax independednt)
*/
#define SR_RESET_VAL 0x0000000050008000
#define SR_HARMLESS 0x00000000500080f0 /* Write ignores for most */
#define SR_ENABLE_FPU 0xffffffffffff7fff /* AND with this */
#if defined (CONFIG_SH64_SR_WATCH)
#define SR_ENABLE_MMU 0x0000000084000000 /* OR with this */
#else
#define SR_ENABLE_MMU 0x0000000080000000 /* OR with this */
#endif
#define SR_UNBLOCK_EXC 0xffffffffefffffff /* AND with this */
#define SR_BLOCK_EXC 0x0000000010000000 /* OR with this */
#else /* Not __ASSEMBLY__ syntax */
/*
** Stringify reg. name
*/
#define __str(x) #x
/* Stringify control register names for use in inline assembly */
#define __SR __str(SR)
#define __SSR __str(SSR)
#define __PSSR __str(PSSR)
#define __INTEVT __str(INTEVT)
#define __EXPEVT __str(EXPEVT)
#define __PEXPEVT __str(PEXPEVT)
#define __TRA __str(TRA)
#define __SPC __str(SPC)
#define __PSPC __str(PSPC)
#define __RESVEC __str(RESVEC)
#define __VBR __str(VBR)
#define __TEA __str(TEA)
#define __DCR __str(DCR)
#define __KCR0 __str(KCR0)
#define __KCR1 __str(KCR1)
#define __CTC __str(CTC)
#define __USR __str(USR)
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_CPU_SH5_REGISTERS_H */

View File

@ -1,9 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_CPU_SH5_RTC_H
#define __ASM_SH_CPU_SH5_RTC_H
#define rtc_reg_size sizeof(u32)
#define RTC_BIT_INVERTED 0 /* The SH-5 RTC is surprisingly sane! */
#define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR
#endif /* __ASM_SH_CPU_SH5_RTC_H */

View File

@ -1,8 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __KERNEL__
# ifdef __SH5__
# include <asm/posix_types_64.h>
# else
# include <asm/posix_types_32.h>
# endif
#endif /* __KERNEL__ */
#include <asm/posix_types_32.h>

View File

@ -1,29 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __ASM_SH_POSIX_TYPES_64_H
#define __ASM_SH_POSIX_TYPES_64_H
typedef unsigned short __kernel_mode_t;
#define __kernel_mode_t __kernel_mode_t
typedef unsigned short __kernel_ipc_pid_t;
#define __kernel_ipc_pid_t __kernel_ipc_pid_t
typedef unsigned short __kernel_uid_t;
#define __kernel_uid_t __kernel_uid_t
typedef unsigned short __kernel_gid_t;
#define __kernel_gid_t __kernel_gid_t
typedef long unsigned int __kernel_size_t;
#define __kernel_size_t __kernel_size_t
typedef int __kernel_ssize_t;
#define __kernel_ssize_t __kernel_ssize_t
typedef int __kernel_ptrdiff_t;
#define __kernel_ptrdiff_t __kernel_ptrdiff_t
typedef unsigned short __kernel_old_uid_t;
#define __kernel_old_uid_t __kernel_old_uid_t
typedef unsigned short __kernel_old_gid_t;
#define __kernel_old_gid_t __kernel_old_gid_t
typedef unsigned short __kernel_old_dev_t;
#define __kernel_old_dev_t __kernel_old_dev_t
#include <asm-generic/posix_types.h>
#endif /* __ASM_SH_POSIX_TYPES_64_H */

View File

@ -25,11 +25,6 @@
#define PT_DATA_ADDR 248 /* &(struct user)->start_data */
#define PT_TEXT_LEN 252
#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
#include <asm/ptrace_64.h>
#else
#include <asm/ptrace_32.h>
#endif
#endif /* _UAPI__ASM_SH_PTRACE_H */

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI__ASM_SH_PTRACE_64_H
#define _UAPI__ASM_SH_PTRACE_64_H
struct pt_regs {
unsigned long long pc;
unsigned long long sr;
long long syscall_nr;
unsigned long long regs[63];
unsigned long long tregs[8];
unsigned long long pad[2];
};
#endif /* _UAPI__ASM_SH_PTRACE_64_H */

View File

@ -5,18 +5,6 @@
struct sigcontext {
unsigned long oldmask;
#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
/* CPU registers */
unsigned long long sc_regs[63];
unsigned long long sc_tregs[8];
unsigned long long sc_pc;
unsigned long long sc_sr;
/* FPU registers */
unsigned long long sc_fpregs[32];
unsigned int sc_fpscr;
unsigned int sc_fpvalid;
#else
/* CPU registers */
unsigned long sc_regs[16];
unsigned long sc_pc;
@ -32,7 +20,6 @@ struct sigcontext {
unsigned int sc_fpscr;
unsigned int sc_fpul;
unsigned int sc_ownedfp;
#endif
};
#endif /* __ASM_SH_SIGCONTEXT_H */

View File

@ -16,66 +16,6 @@ struct __old_kernel_stat {
unsigned long st_ctime;
};
#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
struct stat {
unsigned short st_dev;
unsigned short __pad1;
unsigned long st_ino;
unsigned short st_mode;
unsigned short st_nlink;
unsigned short st_uid;
unsigned short st_gid;
unsigned short st_rdev;
unsigned short __pad2;
unsigned long st_size;
unsigned long st_blksize;
unsigned long st_blocks;
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec;
unsigned long __unused4;
unsigned long __unused5;
};
/* This matches struct stat64 in glibc2.1, hence the absolutely
* insane amounts of padding around dev_t's.
*/
struct stat64 {
unsigned short st_dev;
unsigned char __pad0[10];
unsigned long st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned long st_uid;
unsigned long st_gid;
unsigned short st_rdev;
unsigned char __pad3[10];
long long st_size;
unsigned long st_blksize;
unsigned long st_blocks; /* Number 512-byte blocks allocated. */
unsigned long __pad4; /* future possible st_blocks high bits */
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec; /* will be high 32 bits of ctime someday */
unsigned long __unused1;
unsigned long __unused2;
};
#else
struct stat {
unsigned long st_dev;
unsigned long st_ino;
@ -134,6 +74,5 @@ struct stat64 {
};
#define STAT_HAVE_NSEC 1
#endif
#endif /* __ASM_SH_STAT_H */

View File

@ -13,14 +13,9 @@
static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
{
__asm__(
#ifdef __SH5__
"byterev %1, %0\n\t"
"shari %0, 32, %0"
#else
"swap.b %1, %0\n\t"
"swap.w %0, %0\n\t"
"swap.b %0, %0"
#endif
: "=r" (x)
: "r" (x));
@ -31,12 +26,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
{
__asm__(
#ifdef __SH5__
"byterev %1, %0\n\t"
"shari %0, 32, %0"
#else
"swap.b %1, %0"
#endif
: "=r" (x)
: "r" (x));

View File

@ -1,8 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __KERNEL__
# ifdef __SH5__
# include <asm/unistd_64.h>
# else
# include <asm/unistd_32.h>
# endif
#endif
#include <asm/unistd_32.h>

View File

@ -1,423 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __ASM_SH_UNISTD_64_H
#define __ASM_SH_UNISTD_64_H
/*
* include/asm-sh/unistd_64.h
*
* This file contains the system call numbers.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2007 Paul Mundt
* Copyright (C) 2004 Sean McGoogan
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#define __NR_restart_syscall 0
#define __NR_exit 1
#define __NR_fork 2
#define __NR_read 3
#define __NR_write 4
#define __NR_open 5
#define __NR_close 6
#define __NR_waitpid 7
#define __NR_creat 8
#define __NR_link 9
#define __NR_unlink 10
#define __NR_execve 11
#define __NR_chdir 12
#define __NR_time 13
#define __NR_mknod 14
#define __NR_chmod 15
#define __NR_lchown 16
/* 17 was sys_break */
#define __NR_oldstat 18
#define __NR_lseek 19
#define __NR_getpid 20
#define __NR_mount 21
#define __NR_umount 22
#define __NR_setuid 23
#define __NR_getuid 24
#define __NR_stime 25
#define __NR_ptrace 26
#define __NR_alarm 27
#define __NR_oldfstat 28
#define __NR_pause 29
#define __NR_utime 30
/* 31 was sys_stty */
/* 32 was sys_gtty */
#define __NR_access 33
#define __NR_nice 34
/* 35 was sys_ftime */
#define __NR_sync 36
#define __NR_kill 37
#define __NR_rename 38
#define __NR_mkdir 39
#define __NR_rmdir 40
#define __NR_dup 41
#define __NR_pipe 42
#define __NR_times 43
/* 44 was sys_prof */
#define __NR_brk 45
#define __NR_setgid 46
#define __NR_getgid 47
#define __NR_signal 48
#define __NR_geteuid 49
#define __NR_getegid 50
#define __NR_acct 51
#define __NR_umount2 52
/* 53 was sys_lock */
#define __NR_ioctl 54
#define __NR_fcntl 55
/* 56 was sys_mpx */
#define __NR_setpgid 57
/* 58 was sys_ulimit */
/* 59 was sys_olduname */
#define __NR_umask 60
#define __NR_chroot 61
#define __NR_ustat 62
#define __NR_dup2 63
#define __NR_getppid 64
#define __NR_getpgrp 65
#define __NR_setsid 66
#define __NR_sigaction 67
#define __NR_sgetmask 68
#define __NR_ssetmask 69
#define __NR_setreuid 70
#define __NR_setregid 71
#define __NR_sigsuspend 72
#define __NR_sigpending 73
#define __NR_sethostname 74
#define __NR_setrlimit 75
#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
#define __NR_getrusage 77
#define __NR_gettimeofday 78
#define __NR_settimeofday 79
#define __NR_getgroups 80
#define __NR_setgroups 81
/* 82 was sys_select */
#define __NR_symlink 83
#define __NR_oldlstat 84
#define __NR_readlink 85
#define __NR_uselib 86
#define __NR_swapon 87
#define __NR_reboot 88
#define __NR_readdir 89
#define __NR_mmap 90
#define __NR_munmap 91
#define __NR_truncate 92
#define __NR_ftruncate 93
#define __NR_fchmod 94
#define __NR_fchown 95
#define __NR_getpriority 96
#define __NR_setpriority 97
/* 98 was sys_profil */
#define __NR_statfs 99
#define __NR_fstatfs 100
/* 101 was sys_ioperm */
#define __NR_socketcall 102 /* old implementation of socket systemcall */
#define __NR_syslog 103
#define __NR_setitimer 104
#define __NR_getitimer 105
#define __NR_stat 106
#define __NR_lstat 107
#define __NR_fstat 108
#define __NR_olduname 109
/* 110 was sys_iopl */
#define __NR_vhangup 111
/* 112 was sys_idle */
/* 113 was sys_vm86old */
#define __NR_wait4 114
#define __NR_swapoff 115
#define __NR_sysinfo 116
#define __NR_ipc 117
#define __NR_fsync 118
#define __NR_sigreturn 119
#define __NR_clone 120
#define __NR_setdomainname 121
#define __NR_uname 122
#define __NR_cacheflush 123
#define __NR_adjtimex 124
#define __NR_mprotect 125
#define __NR_sigprocmask 126
/* 127 was sys_create_module */
#define __NR_init_module 128
#define __NR_delete_module 129
/* 130 was sys_get_kernel_syms */
#define __NR_quotactl 131
#define __NR_getpgid 132
#define __NR_fchdir 133
#define __NR_bdflush 134
#define __NR_sysfs 135
#define __NR_personality 136
/* 137 was sys_afs_syscall */
#define __NR_setfsuid 138
#define __NR_setfsgid 139
#define __NR__llseek 140
#define __NR_getdents 141
#define __NR__newselect 142
#define __NR_flock 143
#define __NR_msync 144
#define __NR_readv 145
#define __NR_writev 146
#define __NR_getsid 147
#define __NR_fdatasync 148
#define __NR__sysctl 149
#define __NR_mlock 150
#define __NR_munlock 151
#define __NR_mlockall 152
#define __NR_munlockall 153
#define __NR_sched_setparam 154
#define __NR_sched_getparam 155
#define __NR_sched_setscheduler 156
#define __NR_sched_getscheduler 157
#define __NR_sched_yield 158
#define __NR_sched_get_priority_max 159
#define __NR_sched_get_priority_min 160
#define __NR_sched_rr_get_interval 161
#define __NR_nanosleep 162
#define __NR_mremap 163
#define __NR_setresuid 164
#define __NR_getresuid 165
/* 166 was sys_vm86 */
/* 167 was sys_query_module */
#define __NR_poll 168
#define __NR_nfsservctl 169
#define __NR_setresgid 170
#define __NR_getresgid 171
#define __NR_prctl 172
#define __NR_rt_sigreturn 173
#define __NR_rt_sigaction 174
#define __NR_rt_sigprocmask 175
#define __NR_rt_sigpending 176
#define __NR_rt_sigtimedwait 177
#define __NR_rt_sigqueueinfo 178
#define __NR_rt_sigsuspend 179
#define __NR_pread64 180
#define __NR_pwrite64 181
#define __NR_chown 182
#define __NR_getcwd 183
#define __NR_capget 184
#define __NR_capset 185
#define __NR_sigaltstack 186
#define __NR_sendfile 187
/* 188 reserved for getpmsg */
/* 189 reserved for putpmsg */
#define __NR_vfork 190
#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
#define __NR_mmap2 192
#define __NR_truncate64 193
#define __NR_ftruncate64 194
#define __NR_stat64 195
#define __NR_lstat64 196
#define __NR_fstat64 197
#define __NR_lchown32 198
#define __NR_getuid32 199
#define __NR_getgid32 200
#define __NR_geteuid32 201
#define __NR_getegid32 202
#define __NR_setreuid32 203
#define __NR_setregid32 204
#define __NR_getgroups32 205
#define __NR_setgroups32 206
#define __NR_fchown32 207
#define __NR_setresuid32 208
#define __NR_getresuid32 209
#define __NR_setresgid32 210
#define __NR_getresgid32 211
#define __NR_chown32 212
#define __NR_setuid32 213
#define __NR_setgid32 214
#define __NR_setfsuid32 215
#define __NR_setfsgid32 216
#define __NR_pivot_root 217
#define __NR_mincore 218
#define __NR_madvise 219
/* Non-multiplexed socket family */
#define __NR_socket 220
#define __NR_bind 221
#define __NR_connect 222
#define __NR_listen 223
#define __NR_accept 224
#define __NR_getsockname 225
#define __NR_getpeername 226
#define __NR_socketpair 227
#define __NR_send 228
#define __NR_sendto 229
#define __NR_recv 230
#define __NR_recvfrom 231
#define __NR_shutdown 232
#define __NR_setsockopt 233
#define __NR_getsockopt 234
#define __NR_sendmsg 235
#define __NR_recvmsg 236
/* Non-multiplexed IPC family */
#define __NR_semop 237
#define __NR_semget 238
#define __NR_semctl 239
#define __NR_msgsnd 240
#define __NR_msgrcv 241
#define __NR_msgget 242
#define __NR_msgctl 243
#define __NR_shmat 244
#define __NR_shmdt 245
#define __NR_shmget 246
#define __NR_shmctl 247
#define __NR_getdents64 248
#define __NR_fcntl64 249
/* 250 is reserved for tux */
/* 251 is unused */
#define __NR_gettid 252
#define __NR_readahead 253
#define __NR_setxattr 254
#define __NR_lsetxattr 255
#define __NR_fsetxattr 256
#define __NR_getxattr 257
#define __NR_lgetxattr 258
#define __NR_fgetxattr 259
#define __NR_listxattr 260
#define __NR_llistxattr 261
#define __NR_flistxattr 262
#define __NR_removexattr 263
#define __NR_lremovexattr 264
#define __NR_fremovexattr 265
#define __NR_tkill 266
#define __NR_sendfile64 267
#define __NR_futex 268
#define __NR_sched_setaffinity 269
#define __NR_sched_getaffinity 270
/* 271 is reserved for set_thread_area */
/* 272 is reserved for get_thread_area */
#define __NR_io_setup 273
#define __NR_io_destroy 274
#define __NR_io_getevents 275
#define __NR_io_submit 276
#define __NR_io_cancel 277
#define __NR_fadvise64 278
/* 279 is unused */
#define __NR_exit_group 280
#define __NR_lookup_dcookie 281
#define __NR_epoll_create 282
#define __NR_epoll_ctl 283
#define __NR_epoll_wait 284
#define __NR_remap_file_pages 285
#define __NR_set_tid_address 286
#define __NR_timer_create 287
#define __NR_timer_settime (__NR_timer_create+1)
#define __NR_timer_gettime (__NR_timer_create+2)
#define __NR_timer_getoverrun (__NR_timer_create+3)
#define __NR_timer_delete (__NR_timer_create+4)
#define __NR_clock_settime (__NR_timer_create+5)
#define __NR_clock_gettime (__NR_timer_create+6)
#define __NR_clock_getres (__NR_timer_create+7)
#define __NR_clock_nanosleep (__NR_timer_create+8)
#define __NR_statfs64 296
#define __NR_fstatfs64 297
#define __NR_tgkill 298
#define __NR_utimes 299
#define __NR_fadvise64_64 300
/* 301 is reserved for vserver */
/* 302 is reserved for mbind */
/* 303 is reserved for get_mempolicy */
/* 304 is reserved for set_mempolicy */
#define __NR_mq_open 305
#define __NR_mq_unlink (__NR_mq_open+1)
#define __NR_mq_timedsend (__NR_mq_open+2)
#define __NR_mq_timedreceive (__NR_mq_open+3)
#define __NR_mq_notify (__NR_mq_open+4)
#define __NR_mq_getsetattr (__NR_mq_open+5)
/* 311 is reserved for kexec */
#define __NR_waitid 312
#define __NR_add_key 313
#define __NR_request_key 314
#define __NR_keyctl 315
#define __NR_ioprio_set 316
#define __NR_ioprio_get 317
#define __NR_inotify_init 318
#define __NR_inotify_add_watch 319
#define __NR_inotify_rm_watch 320
/* 321 is unused */
#define __NR_migrate_pages 322
#define __NR_openat 323
#define __NR_mkdirat 324
#define __NR_mknodat 325
#define __NR_fchownat 326
#define __NR_futimesat 327
#define __NR_fstatat64 328
#define __NR_unlinkat 329
#define __NR_renameat 330
#define __NR_linkat 331
#define __NR_symlinkat 332
#define __NR_readlinkat 333
#define __NR_fchmodat 334
#define __NR_faccessat 335
#define __NR_pselect6 336
#define __NR_ppoll 337
#define __NR_unshare 338
#define __NR_set_robust_list 339
#define __NR_get_robust_list 340
#define __NR_splice 341
#define __NR_sync_file_range 342
#define __NR_tee 343
#define __NR_vmsplice 344
#define __NR_move_pages 345
#define __NR_getcpu 346
#define __NR_epoll_pwait 347
#define __NR_utimensat 348
#define __NR_signalfd 349
#define __NR_timerfd_create 350
#define __NR_eventfd 351
#define __NR_fallocate 352
#define __NR_timerfd_settime 353
#define __NR_timerfd_gettime 354
#define __NR_signalfd4 355
#define __NR_eventfd2 356
#define __NR_epoll_create1 357
#define __NR_dup3 358
#define __NR_pipe2 359
#define __NR_inotify_init1 360
#define __NR_preadv 361
#define __NR_pwritev 362
#define __NR_rt_tgsigqueueinfo 363
#define __NR_perf_event_open 364
#define __NR_recvmmsg 365
#define __NR_accept4 366
#define __NR_fanotify_init 367
#define __NR_fanotify_mark 368
#define __NR_prlimit64 369
#define __NR_name_to_handle_at 370
#define __NR_open_by_handle_at 371
#define __NR_clock_adjtime 372
#define __NR_syncfs 373
#define __NR_sendmmsg 374
#define __NR_setns 375
#define __NR_process_vm_readv 376
#define __NR_process_vm_writev 377
#define __NR_kcmp 378
#define __NR_finit_module 379
#define __NR_sched_getattr 380
#define __NR_sched_setattr 381
#define __NR_renameat2 382
#define __NR_seccomp 383
#define __NR_getrandom 384
#define __NR_memfd_create 385
#define __NR_bpf 386
#define __NR_execveat 387
#define __NR_userfaultfd 388
#define __NR_membarrier 389
#define __NR_mlock2 390
#define __NR_copy_file_range 391
#define __NR_preadv2 392
#define __NR_pwritev2 393
#ifdef __KERNEL__
#define __NR_syscalls 394
#endif
#endif /* __ASM_SH_UNISTD_64_H */

View File

@ -3,7 +3,7 @@
# Makefile for the Linux/SuperH kernel.
#
extra-y := head_$(BITS).o vmlinux.lds
extra-y := head_32.o vmlinux.lds
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
@ -13,26 +13,26 @@ endif
CFLAGS_REMOVE_return_address.o = -pg
obj-y := debugtraps.o dumpstack.o \
idle.o io.o irq.o irq_$(BITS).o kdebugfs.o \
idle.o io.o irq.o irq_32.o kdebugfs.o \
machvec.o nmi_debug.o process.o \
process_$(BITS).o ptrace.o ptrace_$(BITS).o \
process_32.o ptrace.o ptrace_32.o \
reboot.o return_address.o \
setup.o signal_$(BITS).o sys_sh.o \
syscalls_$(BITS).o time.o topology.o traps.o \
traps_$(BITS).o unwinder.o
setup.o signal_32.o sys_sh.o \
syscalls_32.o time.o topology.o traps.o \
traps_32.o unwinder.o
ifndef CONFIG_GENERIC_IOMAP
obj-y += iomap.o
obj-$(CONFIG_HAS_IOPORT_MAP) += ioport.o
endif
obj-$(CONFIG_SUPERH32) += sys_sh32.o
obj-y += sys_sh32.o
obj-y += cpu/
obj-$(CONFIG_VSYSCALL) += vsyscall/
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o
obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o

View File

@ -7,7 +7,6 @@ obj-$(CONFIG_CPU_SH2) = sh2/
obj-$(CONFIG_CPU_SH2A) = sh2a/
obj-$(CONFIG_CPU_SH3) = sh3/
obj-$(CONFIG_CPU_SH4) = sh4/
obj-$(CONFIG_CPU_SH5) = sh5/
# Special cases for family ancestry.

View File

@ -103,7 +103,7 @@ void __attribute__ ((weak)) l2_cache_init(void)
/*
* Generic first-level cache init
*/
#if defined(CONFIG_SUPERH32) && !defined(CONFIG_CPU_J2)
#if !defined(CONFIG_CPU_J2)
static void cache_init(void)
{
unsigned long ccr, flags;

View File

@ -2,6 +2,5 @@
#
# Makefile for the Linux/SuperH CPU-specific IRQ handlers.
#
obj-$(CONFIG_SUPERH32) += imask.o
obj-$(CONFIG_CPU_SH5) += intc-sh5.o
obj-y += imask.o
obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o

View File

@ -1,194 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/irq/intc-sh5.c
*
* Interrupt Controller support for SH5 INTC.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
*
* Per-interrupt selective. IRLM=0 (Fixed priority) is not
* supported being useless without a cascaded interrupt
* controller.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <cpu/irq.h>
#include <asm/page.h>
/*
* Maybe the generic Peripheral block could move to a more
* generic include file. INTC Block will be defined here
* and only here to make INTC self-contained in a single
* file.
*/
#define INTC_BLOCK_OFFSET 0x01000000
/* Base */
#define INTC_BASE PHYS_PERIPHERAL_BLOCK + \
INTC_BLOCK_OFFSET
/* Address */
#define INTC_ICR_SET (intc_virt + 0x0)
#define INTC_ICR_CLEAR (intc_virt + 0x8)
#define INTC_INTPRI_0 (intc_virt + 0x10)
#define INTC_INTSRC_0 (intc_virt + 0x50)
#define INTC_INTSRC_1 (intc_virt + 0x58)
#define INTC_INTREQ_0 (intc_virt + 0x60)
#define INTC_INTREQ_1 (intc_virt + 0x68)
#define INTC_INTENB_0 (intc_virt + 0x70)
#define INTC_INTENB_1 (intc_virt + 0x78)
#define INTC_INTDSB_0 (intc_virt + 0x80)
#define INTC_INTDSB_1 (intc_virt + 0x88)
#define INTC_ICR_IRLM 0x1
#define INTC_INTPRI_PREGS 8 /* 8 Priority Registers */
#define INTC_INTPRI_PPREG 8 /* 8 Priorities per Register */
/*
* Mapper between the vector ordinal and the IRQ number
* passed to kernel/device drivers.
*/
int intc_evt_to_irq[(0xE20/0x20)+1] = {
-1, -1, -1, -1, -1, -1, -1, -1, /* 0x000 - 0x0E0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0x100 - 0x1E0 */
0, 0, 0, 0, 0, 1, 0, 0, /* 0x200 - 0x2E0 */
2, 0, 0, 3, 0, 0, 0, -1, /* 0x300 - 0x3E0 */
32, 33, 34, 35, 36, 37, 38, -1, /* 0x400 - 0x4E0 */
-1, -1, -1, 63, -1, -1, -1, -1, /* 0x500 - 0x5E0 */
-1, -1, 18, 19, 20, 21, 22, -1, /* 0x600 - 0x6E0 */
39, 40, 41, 42, -1, -1, -1, -1, /* 0x700 - 0x7E0 */
4, 5, 6, 7, -1, -1, -1, -1, /* 0x800 - 0x8E0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0x900 - 0x9E0 */
12, 13, 14, 15, 16, 17, -1, -1, /* 0xA00 - 0xAE0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0xB00 - 0xBE0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0xC00 - 0xCE0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0xD00 - 0xDE0 */
-1, -1 /* 0xE00 - 0xE20 */
};
static unsigned long intc_virt;
static int irlm; /* IRL mode */
static void enable_intc_irq(struct irq_data *data)
{
unsigned int irq = data->irq;
unsigned long reg;
unsigned long bitmask;
if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
printk("Trying to use straight IRL0-3 with an encoding platform.\n");
if (irq < 32) {
reg = INTC_INTENB_0;
bitmask = 1 << irq;
} else {
reg = INTC_INTENB_1;
bitmask = 1 << (irq - 32);
}
__raw_writel(bitmask, reg);
}
static void disable_intc_irq(struct irq_data *data)
{
unsigned int irq = data->irq;
unsigned long reg;
unsigned long bitmask;
if (irq < 32) {
reg = INTC_INTDSB_0;
bitmask = 1 << irq;
} else {
reg = INTC_INTDSB_1;
bitmask = 1 << (irq - 32);
}
__raw_writel(bitmask, reg);
}
static struct irq_chip intc_irq_type = {
.name = "INTC",
.irq_enable = enable_intc_irq,
.irq_disable = disable_intc_irq,
};
void __init plat_irq_setup(void)
{
unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
unsigned long reg;
int i;
intc_virt = (unsigned long)ioremap(INTC_BASE, 1024);
if (!intc_virt) {
panic("Unable to remap INTC\n");
}
/* Set default: per-line enable/disable, priority driven ack/eoi */
for (i = 0; i < NR_INTC_IRQS; i++)
irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq);
/* Disable all interrupts and set all priorities to 0 to avoid trouble */
__raw_writel(-1, INTC_INTDSB_0);
__raw_writel(-1, INTC_INTDSB_1);
for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
__raw_writel( NO_PRIORITY, reg);
#ifdef CONFIG_SH_CAYMAN
{
unsigned long data;
/* Set IRLM */
/* If all the priorities are set to 'no priority', then
* assume we are using encoded mode.
*/
irlm = platform_int_priority[IRQ_IRL0] +
platform_int_priority[IRQ_IRL1] +
platform_int_priority[IRQ_IRL2] +
platform_int_priority[IRQ_IRL3];
if (irlm == NO_PRIORITY) {
/* IRLM = 0 */
reg = INTC_ICR_CLEAR;
i = IRQ_INTA;
printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
} else {
/* IRLM = 1 */
reg = INTC_ICR_SET;
i = IRQ_IRL0;
}
__raw_writel(INTC_ICR_IRLM, reg);
/* Set interrupt priorities according to platform description */
for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
data |= platform_int_priority[i] <<
((i % INTC_INTPRI_PPREG) * 4);
if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
/* Upon the 7th, set Priority Register */
__raw_writel(data, reg);
data = 0;
reg += 8;
}
}
}
#endif
/*
* And now let interrupts come in.
* sti() is not enough, we need to
* lower priority, too.
*/
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy0)
: "r" (__dummy1));
}

View File

@ -24,7 +24,6 @@ static const char *cpu_name[] = {
[CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
[CPU_SH7786] = "SH7786", [CPU_SH7757] = "SH7757",
[CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
[CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
[CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
[CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724",
[CPU_SH7372] = "SH7372", [CPU_SH7734] = "SH7734",

View File

@ -1,16 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux/SuperH SH-5 backends.
#
obj-y := entry.o probe.o switchto.o
obj-$(CONFIG_SH_FPU) += fpu.o
obj-$(CONFIG_KALLSYMS) += unwind.o
# CPU subtype setup
obj-$(CONFIG_CPU_SH5) += setup-sh5.o
# Primary on-chip clocks (common)
clock-$(CONFIG_CPU_SH5) := clock-sh5.o
obj-y += $(clock-y)

View File

@ -1,76 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh5/clock-sh5.c
*
* SH-5 support for the clock framework
*
* Copyright (C) 2008 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/io.h>
static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
/* Clock, Power and Reset Controller */
#define CPRC_BLOCK_OFF 0x01010000
#define CPRC_BASE (PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF)
static unsigned long cprc_base;
static void master_clk_init(struct clk *clk)
{
int idx = (__raw_readl(cprc_base + 0x00) >> 6) & 0x0007;
clk->rate *= ifc_table[idx];
}
static struct sh_clk_ops sh5_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(cprc_base) >> 12) & 0x0007;
return clk->parent->rate / ifc_table[idx];
}
static struct sh_clk_ops sh5_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(cprc_base) >> 3) & 0x0007;
return clk->parent->rate / ifc_table[idx];
}
static struct sh_clk_ops sh5_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(cprc_base) & 0x0007);
return clk->parent->rate / ifc_table[idx];
}
static struct sh_clk_ops sh5_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh5_clk_ops[] = {
&sh5_master_clk_ops,
&sh5_module_clk_ops,
&sh5_bus_clk_ops,
&sh5_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
cprc_base = (unsigned long)ioremap(CPRC_BASE, 1024);
BUG_ON(!cprc_base);
if (idx < ARRAY_SIZE(sh5_clk_ops))
*ops = sh5_clk_ops[idx];
}

File diff suppressed because it is too large Load Diff

View File

@ -1,106 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh5/fpu.c
*
* Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli
* Copyright (C) 2002 STMicroelectronics Limited
* Author : Stuart Menefy
*
* Started from SH4 version:
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
*/
#include <linux/sched.h>
#include <linux/signal.h>
#include <asm/processor.h>
void save_fpu(struct task_struct *tsk)
{
asm volatile("fst.p %0, (0*8), fp0\n\t"
"fst.p %0, (1*8), fp2\n\t"
"fst.p %0, (2*8), fp4\n\t"
"fst.p %0, (3*8), fp6\n\t"
"fst.p %0, (4*8), fp8\n\t"
"fst.p %0, (5*8), fp10\n\t"
"fst.p %0, (6*8), fp12\n\t"
"fst.p %0, (7*8), fp14\n\t"
"fst.p %0, (8*8), fp16\n\t"
"fst.p %0, (9*8), fp18\n\t"
"fst.p %0, (10*8), fp20\n\t"
"fst.p %0, (11*8), fp22\n\t"
"fst.p %0, (12*8), fp24\n\t"
"fst.p %0, (13*8), fp26\n\t"
"fst.p %0, (14*8), fp28\n\t"
"fst.p %0, (15*8), fp30\n\t"
"fst.p %0, (16*8), fp32\n\t"
"fst.p %0, (17*8), fp34\n\t"
"fst.p %0, (18*8), fp36\n\t"
"fst.p %0, (19*8), fp38\n\t"
"fst.p %0, (20*8), fp40\n\t"
"fst.p %0, (21*8), fp42\n\t"
"fst.p %0, (22*8), fp44\n\t"
"fst.p %0, (23*8), fp46\n\t"
"fst.p %0, (24*8), fp48\n\t"
"fst.p %0, (25*8), fp50\n\t"
"fst.p %0, (26*8), fp52\n\t"
"fst.p %0, (27*8), fp54\n\t"
"fst.p %0, (28*8), fp56\n\t"
"fst.p %0, (29*8), fp58\n\t"
"fst.p %0, (30*8), fp60\n\t"
"fst.p %0, (31*8), fp62\n\t"
"fgetscr fr63\n\t"
"fst.s %0, (32*8), fr63\n\t"
: /* no output */
: "r" (&tsk->thread.xstate->hardfpu)
: "memory");
}
void restore_fpu(struct task_struct *tsk)
{
asm volatile("fld.p %0, (0*8), fp0\n\t"
"fld.p %0, (1*8), fp2\n\t"
"fld.p %0, (2*8), fp4\n\t"
"fld.p %0, (3*8), fp6\n\t"
"fld.p %0, (4*8), fp8\n\t"
"fld.p %0, (5*8), fp10\n\t"
"fld.p %0, (6*8), fp12\n\t"
"fld.p %0, (7*8), fp14\n\t"
"fld.p %0, (8*8), fp16\n\t"
"fld.p %0, (9*8), fp18\n\t"
"fld.p %0, (10*8), fp20\n\t"
"fld.p %0, (11*8), fp22\n\t"
"fld.p %0, (12*8), fp24\n\t"
"fld.p %0, (13*8), fp26\n\t"
"fld.p %0, (14*8), fp28\n\t"
"fld.p %0, (15*8), fp30\n\t"
"fld.p %0, (16*8), fp32\n\t"
"fld.p %0, (17*8), fp34\n\t"
"fld.p %0, (18*8), fp36\n\t"
"fld.p %0, (19*8), fp38\n\t"
"fld.p %0, (20*8), fp40\n\t"
"fld.p %0, (21*8), fp42\n\t"
"fld.p %0, (22*8), fp44\n\t"
"fld.p %0, (23*8), fp46\n\t"
"fld.p %0, (24*8), fp48\n\t"
"fld.p %0, (25*8), fp50\n\t"
"fld.p %0, (26*8), fp52\n\t"
"fld.p %0, (27*8), fp54\n\t"
"fld.p %0, (28*8), fp56\n\t"
"fld.p %0, (29*8), fp58\n\t"
"fld.p %0, (30*8), fp60\n\t"
"fld.s %0, (32*8), fr63\n\t"
"fputscr fr63\n\t"
"fld.p %0, (31*8), fp62\n\t"
: /* no output */
: "r" (&tsk->thread.xstate->hardfpu)
: "memory");
}
asmlinkage void do_fpu_error(unsigned long ex, struct pt_regs *regs)
{
regs->pc += 4;
force_sig(SIGFPE);
}

View File

@ -1,72 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh5/probe.c
*
* CPU Subtype Probing for SH-5.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2007 Paul Mundt
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/string.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/tlb.h>
void cpu_probe(void)
{
unsigned long long cir;
/*
* Do peeks in real mode to avoid having to set up a mapping for
* the WPC registers. On SH5-101 cut2, such a mapping would be
* exposed to an address translation erratum which would make it
* hard to set up correctly.
*/
cir = peek_real_address_q(0x0d000008);
if ((cir & 0xffff) == 0x5103)
boot_cpu_data.type = CPU_SH5_103;
else if (((cir >> 32) & 0xffff) == 0x51e2)
/* CPU.VCR aliased at CIR address on SH5-101 */
boot_cpu_data.type = CPU_SH5_101;
boot_cpu_data.family = CPU_FAMILY_SH5;
/*
* First, setup some sane values for the I-cache.
*/
boot_cpu_data.icache.ways = 4;
boot_cpu_data.icache.sets = 256;
boot_cpu_data.icache.linesz = L1_CACHE_BYTES;
boot_cpu_data.icache.way_incr = (1 << 13);
boot_cpu_data.icache.entry_shift = 5;
boot_cpu_data.icache.way_size = boot_cpu_data.icache.sets *
boot_cpu_data.icache.linesz;
boot_cpu_data.icache.entry_mask = 0x1fe0;
boot_cpu_data.icache.flags = 0;
/*
* Next, setup some sane values for the D-cache.
*
* On the SH5, these are pretty consistent with the I-cache settings,
* so we just copy over the existing definitions.. these can be fixed
* up later, especially if we add runtime CPU probing.
*
* Though in the meantime it saves us from having to duplicate all of
* the above definitions..
*/
boot_cpu_data.dcache = boot_cpu_data.icache;
/*
* Setup any cache-related flags here
*/
#if defined(CONFIG_CACHE_WRITETHROUGH)
set_bit(SH_CACHE_MODE_WT, &(boot_cpu_data.dcache.flags));
#elif defined(CONFIG_CACHE_WRITEBACK)
set_bit(SH_CACHE_MODE_WB, &(boot_cpu_data.dcache.flags));
#endif
/* Setup some I/D TLB defaults */
sh64_tlb_init();
}

View File

@ -1,121 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* SH5-101/SH5-103 CPU Setup
*
* Copyright (C) 2009 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sh_timer.h>
#include <asm/addrspace.h>
#include <asm/platform_early.h>
static struct plat_sci_port scif0_platform_data = {
.flags = UPF_IOREMAP,
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(PHYS_PERIPHERAL_BLOCK + 0x01030000, 0x100),
DEFINE_RES_IRQ(39),
DEFINE_RES_IRQ(40),
DEFINE_RES_IRQ(42),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct resource rtc_resources[] = {
[0] = {
.start = PHYS_PERIPHERAL_BLOCK + 0x01040000,
.end = PHYS_PERIPHERAL_BLOCK + 0x01040000 + 0x58 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
/* Period IRQ */
.start = IRQ_PRI,
.flags = IORESOURCE_IRQ,
},
[2] = {
/* Carry IRQ */
.start = IRQ_CUI,
.flags = IORESOURCE_IRQ,
},
[3] = {
/* Alarm IRQ */
.start = IRQ_ATI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
#define TMU_BLOCK_OFF 0x01020000
#define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(TMU_BASE, 0x30),
DEFINE_RES_IRQ(IRQ_TUNI0),
DEFINE_RES_IRQ(IRQ_TUNI1),
DEFINE_RES_IRQ(IRQ_TUNI2),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct platform_device *sh5_early_devices[] __initdata = {
&scif0_device,
&tmu0_device,
};
static struct platform_device *sh5_devices[] __initdata = {
&rtc_device,
};
static int __init sh5_devices_setup(void)
{
int ret;
ret = platform_add_devices(sh5_early_devices,
ARRAY_SIZE(sh5_early_devices));
if (unlikely(ret != 0))
return ret;
return platform_add_devices(sh5_devices,
ARRAY_SIZE(sh5_devices));
}
arch_initcall(sh5_devices_setup);
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh5_early_devices,
ARRAY_SIZE(sh5_early_devices));
}

View File

@ -1,195 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/kernel/cpu/sh5/switchto.S
*
* sh64 context switch
*
* Copyright (C) 2004 Richard Curnow
*/
.section .text..SHmedia32,"ax"
.little
.balign 32
.type sh64_switch_to,@function
.global sh64_switch_to
.global __sh64_switch_to_end
sh64_switch_to:
/* Incoming args
r2 - prev
r3 - &prev->thread
r4 - next
r5 - &next->thread
Outgoing results
r2 - last (=prev) : this just stays in r2 throughout
Want to create a full (struct pt_regs) on the stack to allow backtracing
functions to work. However, we only need to populate the callee-save
register slots in this structure; since we're a function our ancestors must
have themselves preserved all caller saved state in the stack. This saves
some wasted effort since we won't need to look at the values.
In particular, all caller-save registers are immediately available for
scratch use.
*/
#define FRAME_SIZE (76*8 + 8)
movi FRAME_SIZE, r0
sub.l r15, r0, r15
! Do normal-style register save to support backtrace
st.l r15, 0, r18 ! save link reg
st.l r15, 4, r14 ! save fp
add.l r15, r63, r14 ! setup frame pointer
! hopefully this looks normal to the backtrace now.
addi.l r15, 8, r1 ! base of pt_regs
addi.l r1, 24, r0 ! base of pt_regs.regs
addi.l r0, (63*8), r8 ! base of pt_regs.trregs
/* Note : to be fixed?
struct pt_regs is really designed for holding the state on entry
to an exception, i.e. pc,sr,regs etc. However, for the context
switch state, some of this is not required. But the unwinder takes
struct pt_regs * as an arg so we have to build this structure
to allow unwinding switched tasks in show_state() */
st.q r0, ( 9*8), r9
st.q r0, (10*8), r10
st.q r0, (11*8), r11
st.q r0, (12*8), r12
st.q r0, (13*8), r13
st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
! the point where the process is left in suspended animation, i.e. current
! fp here, not the saved one.
st.q r0, (16*8), r16
st.q r0, (24*8), r24
st.q r0, (25*8), r25
st.q r0, (26*8), r26
st.q r0, (27*8), r27
st.q r0, (28*8), r28
st.q r0, (29*8), r29
st.q r0, (30*8), r30
st.q r0, (31*8), r31
st.q r0, (32*8), r32
st.q r0, (33*8), r33
st.q r0, (34*8), r34
st.q r0, (35*8), r35
st.q r0, (44*8), r44
st.q r0, (45*8), r45
st.q r0, (46*8), r46
st.q r0, (47*8), r47
st.q r0, (48*8), r48
st.q r0, (49*8), r49
st.q r0, (50*8), r50
st.q r0, (51*8), r51
st.q r0, (52*8), r52
st.q r0, (53*8), r53
st.q r0, (54*8), r54
st.q r0, (55*8), r55
st.q r0, (56*8), r56
st.q r0, (57*8), r57
st.q r0, (58*8), r58
st.q r0, (59*8), r59
! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
! Use a local label to avoid creating a symbol that will confuse the !
! backtrace
pta .Lsave_pc, tr0
gettr tr5, r45
gettr tr6, r46
gettr tr7, r47
st.q r8, (5*8), r45
st.q r8, (6*8), r46
st.q r8, (7*8), r47
! Now switch context
gettr tr0, r9
st.l r3, 0, r15 ! prev->thread.sp
st.l r3, 8, r1 ! prev->thread.kregs
st.l r3, 4, r9 ! prev->thread.pc
st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc
! Load PC for next task (init value or save_pc later)
ld.l r5, 4, r18 ! next->thread.pc
! Switch stacks
ld.l r5, 0, r15 ! next->thread.sp
ptabs r18, tr0
! Update current
ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct)
putcon r9, kcr0 ! current = next->thread_info
! go to save_pc for a reschedule, or the initial thread.pc for a new process
blink tr0, r63
! Restore (when we come back to a previously saved task)
.Lsave_pc:
addi.l r15, 32, r0 ! r0 = next's regs
addi.l r0, (63*8), r8 ! r8 = next's tr_regs
ld.q r8, (5*8), r45
ld.q r8, (6*8), r46
ld.q r8, (7*8), r47
ptabs r45, tr5
ptabs r46, tr6
ptabs r47, tr7
ld.q r0, ( 9*8), r9
ld.q r0, (10*8), r10
ld.q r0, (11*8), r11
ld.q r0, (12*8), r12
ld.q r0, (13*8), r13
ld.q r0, (14*8), r14
ld.q r0, (16*8), r16
ld.q r0, (24*8), r24
ld.q r0, (25*8), r25
ld.q r0, (26*8), r26
ld.q r0, (27*8), r27
ld.q r0, (28*8), r28
ld.q r0, (29*8), r29
ld.q r0, (30*8), r30
ld.q r0, (31*8), r31
ld.q r0, (32*8), r32
ld.q r0, (33*8), r33
ld.q r0, (34*8), r34
ld.q r0, (35*8), r35
ld.q r0, (44*8), r44
ld.q r0, (45*8), r45
ld.q r0, (46*8), r46
ld.q r0, (47*8), r47
ld.q r0, (48*8), r48
ld.q r0, (49*8), r49
ld.q r0, (50*8), r50
ld.q r0, (51*8), r51
ld.q r0, (52*8), r52
ld.q r0, (53*8), r53
ld.q r0, (54*8), r54
ld.q r0, (55*8), r55
ld.q r0, (56*8), r56
ld.q r0, (57*8), r57
ld.q r0, (58*8), r58
ld.q r0, (59*8), r59
! epilogue
ld.l r15, 0, r18
ld.l r15, 4, r14
ptabs r18, tr0
movi FRAME_SIZE, r0
add r15, r0, r15
blink tr0, r63
__sh64_switch_to_end:
.LFE1:
.size sh64_switch_to,.LFE1-sh64_switch_to

View File

@ -1,342 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh5/unwind.c
*
* Copyright (C) 2004 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/unwinder.h>
#include <asm/stacktrace.h>
static u8 regcache[63];
/*
* Finding the previous stack frame isn't horribly straightforward as it is
* on some other platforms. In the sh64 case, we don't have "linked" stack
* frames, so we need to do a bit of work to determine the previous frame,
* and in turn, the previous r14/r18 pair.
*
* There are generally a few cases which determine where we can find out
* the r14/r18 values. In the general case, this can be determined by poking
* around the prologue of the symbol PC is in (note that we absolutely must
* have frame pointer support as well as the kernel symbol table mapped,
* otherwise we can't even get this far).
*
* In other cases, such as the interrupt/exception path, we can poke around
* the sp/fp.
*
* Notably, this entire approach is somewhat error prone, and in the event
* that the previous frame cannot be determined, that's all we can do.
* Either way, this still leaves us with a more correct backtrace then what
* we would be able to come up with by walking the stack (which is garbage
* for anything beyond the first frame).
* -- PFM.
*/
static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
unsigned long *pprev_fp, unsigned long *pprev_pc,
struct pt_regs *regs)
{
const char *sym;
char namebuf[128];
unsigned long offset;
unsigned long prologue = 0;
unsigned long fp_displacement = 0;
unsigned long fp_prev = 0;
unsigned long offset_r14 = 0, offset_r18 = 0;
int i, found_prologue_end = 0;
sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
if (!sym)
return -EINVAL;
prologue = pc - offset;
if (!prologue)
return -EINVAL;
/* Validate fp, to avoid risk of dereferencing a bad pointer later.
Assume 128Mb since that's the amount of RAM on a Cayman. Modify
when there is an SH-5 board with more. */
if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
(fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
((fp & 7) != 0)) {
return -EINVAL;
}
/*
* Depth to walk, depth is completely arbitrary.
*/
for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
unsigned long op;
u8 major, minor;
u8 src, dest, disp;
op = *(unsigned long *)prologue;
major = (op >> 26) & 0x3f;
src = (op >> 20) & 0x3f;
minor = (op >> 16) & 0xf;
disp = (op >> 10) & 0x3f;
dest = (op >> 4) & 0x3f;
/*
* Stack frame creation happens in a number of ways.. in the
* general case when the stack frame is less than 511 bytes,
* it's generally created by an addi or addi.l:
*
* addi/addi.l r15, -FRAME_SIZE, r15
*
* in the event that the frame size is bigger than this, it's
* typically created using a movi/sub pair as follows:
*
* movi FRAME_SIZE, rX
* sub r15, rX, r15
*/
switch (major) {
case (0x00 >> 2):
switch (minor) {
case 0x8: /* add.l */
case 0x9: /* add */
/* Look for r15, r63, r14 */
if (src == 15 && disp == 63 && dest == 14)
found_prologue_end = 1;
break;
case 0xa: /* sub.l */
case 0xb: /* sub */
if (src != 15 || dest != 15)
continue;
fp_displacement -= regcache[disp];
fp_prev = fp - fp_displacement;
break;
}
break;
case (0xa8 >> 2): /* st.l */
if (src != 15)
continue;
switch (dest) {
case 14:
if (offset_r14 || fp_displacement == 0)
continue;
offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
offset_r14 *= sizeof(unsigned long);
offset_r14 += fp_displacement;
break;
case 18:
if (offset_r18 || fp_displacement == 0)
continue;
offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
offset_r18 *= sizeof(unsigned long);
offset_r18 += fp_displacement;
break;
}
break;
case (0xcc >> 2): /* movi */
if (dest >= 63) {
printk(KERN_NOTICE "%s: Invalid dest reg %d "
"specified in movi handler. Failed "
"opcode was 0x%lx: ", __func__,
dest, op);
continue;
}
/* Sign extend */
regcache[dest] =
sign_extend64((((u64)op >> 10) & 0xffff), 9);
break;
case (0xd0 >> 2): /* addi */
case (0xd4 >> 2): /* addi.l */
/* Look for r15, -FRAME_SIZE, r15 */
if (src != 15 || dest != 15)
continue;
/* Sign extended frame size.. */
fp_displacement +=
(u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
fp_prev = fp - fp_displacement;
break;
}
if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
break;
}
if (offset_r14 == 0 || fp_prev == 0) {
if (!offset_r14)
pr_debug("Unable to find r14 offset\n");
if (!fp_prev)
pr_debug("Unable to find previous fp\n");
return -EINVAL;
}
/* For innermost leaf function, there might not be a offset_r18 */
if (!*pprev_pc && (offset_r18 == 0))
return -EINVAL;
*pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
if (offset_r18)
*pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
*pprev_pc &= ~1;
return 0;
}
/*
* Don't put this on the stack since we'll want to call in to
* sh64_unwinder_dump() when we're close to underflowing the stack
* anyway.
*/
static struct pt_regs here_regs;
extern const char syscall_ret;
extern const char ret_from_syscall;
extern const char ret_from_exception;
extern const char ret_from_irq;
static void sh64_unwind_inner(const struct stacktrace_ops *ops,
void *data, struct pt_regs *regs);
static inline void unwind_nested(const struct stacktrace_ops *ops, void *data,
unsigned long pc, unsigned long fp)
{
if ((fp >= __MEMORY_START) &&
((fp & 7) == 0))
sh64_unwind_inner(ops, data, (struct pt_regs *)fp);
}
static void sh64_unwind_inner(const struct stacktrace_ops *ops,
void *data, struct pt_regs *regs)
{
unsigned long pc, fp;
int ofs = 0;
int first_pass;
pc = regs->pc & ~1;
fp = regs->regs[14];
first_pass = 1;
for (;;) {
int cond;
unsigned long next_fp, next_pc;
if (pc == ((unsigned long)&syscall_ret & ~1)) {
printk("SYSCALL\n");
unwind_nested(ops, data, pc, fp);
return;
}
if (pc == ((unsigned long)&ret_from_syscall & ~1)) {
printk("SYSCALL (PREEMPTED)\n");
unwind_nested(ops, data, pc, fp);
return;
}
/* In this case, the PC is discovered by lookup_prev_stack_frame but
it has 4 taken off it to look like the 'caller' */
if (pc == ((unsigned long)&ret_from_exception & ~1)) {
printk("EXCEPTION\n");
unwind_nested(ops, data, pc, fp);
return;
}
if (pc == ((unsigned long)&ret_from_irq & ~1)) {
printk("IRQ\n");
unwind_nested(ops, data, pc, fp);
return;
}
cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
((pc & 3) == 0) && ((fp & 7) == 0));
pc -= ofs;
ops->address(data, pc, 1);
if (first_pass) {
/* If the innermost frame is a leaf function, it's
* possible that r18 is never saved out to the stack.
*/
next_pc = regs->regs[18];
} else {
next_pc = 0;
}
if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
ofs = sizeof(unsigned long);
pc = next_pc & ~1;
fp = next_fp;
} else {
printk("Unable to lookup previous stack frame\n");
break;
}
first_pass = 0;
}
printk("\n");
}
static void sh64_unwinder_dump(struct task_struct *task,
struct pt_regs *regs,
unsigned long *sp,
const struct stacktrace_ops *ops,
void *data)
{
if (!regs) {
/*
* Fetch current regs if we have no other saved state to back
* trace from.
*/
regs = &here_regs;
__asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
__asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
__asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
__asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
__asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
__asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
__asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
__asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
__asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
__asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
__asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
__asm__ __volatile__ (
"pta 0f, tr0\n\t"
"blink tr0, %0\n\t"
"0: nop"
: "=r" (regs->pc)
);
}
sh64_unwind_inner(ops, data, regs);
}
static struct unwinder sh64_unwinder = {
.name = "sh64-unwinder",
.dump = sh64_unwinder_dump,
.rating = 150,
};
static int __init sh64_unwinder_init(void)
{
return unwinder_register(&sh64_unwinder);
}
early_initcall(sh64_unwinder_init);

View File

@ -1,346 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/kernel/head_64.S
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
*/
#include <linux/init.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/tlb.h>
#include <cpu/registers.h>
#include <cpu/mmu_context.h>
#include <asm/thread_info.h>
/*
* MMU defines: TLB boundaries.
*/
#define MMUIR_FIRST ITLB_FIXED
#define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
#define MMUIR_STEP TLB_STEP
#define MMUDR_FIRST DTLB_FIXED
#define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
#define MMUDR_STEP TLB_STEP
/* Safety check : CONFIG_PAGE_OFFSET has to be a multiple of 512Mb */
#if (CONFIG_PAGE_OFFSET & ((1UL<<29)-1))
#error "CONFIG_PAGE_OFFSET must be a multiple of 512Mb"
#endif
/*
* MMU defines: Fixed TLBs.
*/
/* Deal safely with the case where the base of RAM is not 512Mb aligned */
#define ALIGN_512M_MASK (0xffffffffe0000000)
#define ALIGNED_EFFECTIVE ((CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
#define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE)
/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
#define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL)
/* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE
/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL
/* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
#ifdef CONFIG_CACHE_OFF
#define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */
#else
#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */
#endif
#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */
#if defined (CONFIG_CACHE_OFF)
#define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */
#elif defined (CONFIG_CACHE_WRITETHROUGH)
#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */
/* WT, invalidate */
#elif defined (CONFIG_CACHE_WRITEBACK)
#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */
/* WB, invalidate */
#else
#error preprocessor flag CONFIG_CACHE_... not recognized!
#endif
#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */
.section .empty_zero_page, "aw"
.global empty_zero_page
empty_zero_page:
.long 1 /* MOUNT_ROOT_RDONLY */
.long 0 /* RAMDISK_FLAGS */
.long 0x0200 /* ORIG_ROOT_DEV */
.long 1 /* LOADER_TYPE */
.long 0x00800000 /* INITRD_START */
.long 0x00800000 /* INITRD_SIZE */
.long 0
.text
.balign 4096,0,4096
.section .data, "aw"
.balign PAGE_SIZE
.section .data, "aw"
.balign PAGE_SIZE
.global mmu_pdtp_cache
mmu_pdtp_cache:
.space PAGE_SIZE, 0
.global fpu_in_use
fpu_in_use: .quad 0
__HEAD
.balign L1_CACHE_BYTES
/*
* Condition at the entry of __stext:
* . Reset state:
* . SR.FD = 1 (FPU disabled)
* . SR.BL = 1 (Exceptions disabled)
* . SR.MD = 1 (Privileged Mode)
* . SR.MMU = 0 (MMU Disabled)
* . SR.CD = 0 (CTC User Visible)
* . SR.IMASK = Undefined (Interrupt Mask)
*
* Operations supposed to be performed by __stext:
* . prevent speculative fetch onto device memory while MMU is off
* . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
* . first, save CPU state and set it to something harmless
* . any CPU detection and/or endianness settings (?)
* . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
* . set initial TLB entries for cached and uncached regions
* (no fine granularity paging)
* . set initial cache state
* . enable MMU and caches
* . set CPU to a consistent state
* . registers (including stack pointer and current/KCR0)
* . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
* at this stage. This is all to later Linux initialization steps.
* . initialize FPU
* . clear BSS
* . jump into start_kernel()
* . be prepared to hopeless start_kernel() returns.
*
*/
.global _stext
_stext:
/*
* Prevent speculative fetch on device memory due to
* uninitialized target registers.
*/
ptabs/u ZERO, tr0
ptabs/u ZERO, tr1
ptabs/u ZERO, tr2
ptabs/u ZERO, tr3
ptabs/u ZERO, tr4
ptabs/u ZERO, tr5
ptabs/u ZERO, tr6
ptabs/u ZERO, tr7
synci
/*
* Read/Set CPU state. After this block:
* r29 = Initial SR
*/
getcon SR, r29
movi SR_HARMLESS, r20
putcon r20, SR
/*
* Initialize EMI/LMI. To Be Done.
*/
/*
* CPU detection and/or endianness settings (?). To Be Done.
* Pure PIC code here, please ! Just save state into r30.
* After this block:
* r30 = CPU type/Platform Endianness
*/
/*
* Set initial TLB entries for cached and uncached regions.
* Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
*/
/* Clear ITLBs */
pta clear_ITLB, tr1
movi MMUIR_FIRST, r21
movi MMUIR_END, r22
clear_ITLB:
putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */
addi r21, MMUIR_STEP, r21
bne r21, r22, tr1
/* Clear DTLBs */
pta clear_DTLB, tr1
movi MMUDR_FIRST, r21
movi MMUDR_END, r22
clear_DTLB:
putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */
addi r21, MMUDR_STEP, r21
bne r21, r22, tr1
/* Map one big (512Mb) page for ITLB */
movi MMUIR_FIRST, r21
movi MMUIR_TEXT_L, r22 /* PTEL first */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */
movi MMUIR_TEXT_H, r22 /* PTEH last */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */
/* Map one big CACHED (512Mb) page for DTLB */
movi MMUDR_FIRST, r21
movi MMUDR_CACHED_L, r22 /* PTEL first */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */
movi MMUDR_CACHED_H, r22 /* PTEH last */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
/*
* Setup a DTLB translation for SCIF phys.
*/
addi r21, MMUDR_STEP, r21
movi 0x0a03, r22 /* SCIF phys */
shori 0x0148, r22
putcfg r21, 1, r22 /* PTEL first */
movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
shori 0x0003, r22
putcfg r21, 0, r22 /* PTEH last */
/*
* Set cache behaviours.
*/
/* ICache */
movi ICCR_BASE, r21
movi ICCR0_INIT_VAL, r22
movi ICCR1_INIT_VAL, r23
putcfg r21, ICCR_REG0, r22
putcfg r21, ICCR_REG1, r23
/* OCache */
movi OCCR_BASE, r21
movi OCCR0_INIT_VAL, r22
movi OCCR1_INIT_VAL, r23
putcfg r21, OCCR_REG0, r22
putcfg r21, OCCR_REG1, r23
/*
* Enable Caches and MMU. Do the first non-PIC jump.
* Now head.S global variables, constants and externs
* can be used.
*/
getcon SR, r21
movi SR_ENABLE_MMU, r22
or r21, r22, r21
putcon r21, SSR
movi hyperspace, r22
ori r22, 1, r22 /* Make it SHmedia, not required but..*/
putcon r22, SPC
synco
rte /* And now go into the hyperspace ... */
hyperspace: /* ... that's the next instruction ! */
/*
* Set CPU to a consistent state.
* r31 = FPU support flag
* tr0/tr7 in use. Others give a chance to loop somewhere safe
*/
movi start_kernel, r32
ori r32, 1, r32
ptabs r32, tr0 /* r32 = _start_kernel address */
pta/u hopeless, tr1
pta/u hopeless, tr2
pta/u hopeless, tr3
pta/u hopeless, tr4
pta/u hopeless, tr5
pta/u hopeless, tr6
pta/u hopeless, tr7
gettr tr1, r28 /* r28 = hopeless address */
/* Set initial stack pointer */
movi init_thread_union, SP
putcon SP, KCR0 /* Set current to init_task */
movi THREAD_SIZE, r22 /* Point to the end */
add SP, r22, SP
/*
* Initialize FPU.
* Keep FPU flag in r31. After this block:
* r31 = FPU flag
*/
movi fpu_in_use, r31 /* Temporary */
#ifdef CONFIG_SH_FPU
getcon SR, r21
movi SR_ENABLE_FPU, r22
and r21, r22, r22
putcon r22, SR /* Try to enable */
getcon SR, r22
xor r21, r22, r21
shlri r21, 15, r21 /* Supposedly 0/1 */
st.q r31, 0 , r21 /* Set fpu_in_use */
#else
movi 0, r21
st.q r31, 0 , r21 /* Set fpu_in_use */
#endif
or r21, ZERO, r31 /* Set FPU flag at last */
#ifndef CONFIG_SH_NO_BSS_INIT
/* Don't clear BSS if running on slow platforms such as an RTL simulation,
remote memory via SHdebug link, etc. For these the memory can be guaranteed
to be all zero on boot anyway. */
/*
* Clear bss
*/
pta clear_quad, tr1
movi __bss_start, r22
movi _end, r23
clear_quad:
st.q r22, 0, ZERO
addi r22, 8, r22
bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */
#endif
pta/u hopeless, tr1
/* Say bye to head.S but be prepared to wrongly get back ... */
blink tr0, LINK
/* If we ever get back here through LINK/tr1-tr7 */
pta/u hopeless, tr7
hopeless:
/*
* Something's badly wrong here. Loop endlessly,
* there's nothing more we can do about it.
*
* Note on hopeless: it can be jumped into invariably
* before or after jumping into hyperspace. The only
* requirement is to be PIC called (PTA) before and
* any way (PTA/PTABS) after. According to Virtual
* to Physical mapping a simulator/emulator can easily
* tell where we came here from just looking at hopeless
* (PC) address.
*
* For debugging purposes:
* (r28) hopeless/loop address
* (r29) Original SR
* (r30) CPU type/Platform endianness
* (r31) FPU Support
* (r32) _start_kernel address
*/
blink tr7, ZERO

View File

@ -1,48 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* SHmedia irqflags support
*
* Copyright (C) 2006 - 2009 Paul Mundt
*/
#include <linux/irqflags.h>
#include <linux/module.h>
#include <cpu/registers.h>
void notrace arch_local_irq_restore(unsigned long flags)
{
unsigned long long __dummy;
if (flags == ARCH_IRQ_DISABLED) {
__asm__ __volatile__ (
"getcon " __SR ", %0\n\t"
"or %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy)
: "r" (ARCH_IRQ_DISABLED)
);
} else {
__asm__ __volatile__ (
"getcon " __SR ", %0\n\t"
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy)
: "r" (~ARCH_IRQ_DISABLED)
);
}
}
EXPORT_SYMBOL(arch_local_irq_restore);
unsigned long notrace arch_local_save_flags(void)
{
unsigned long flags;
__asm__ __volatile__ (
"getcon " __SR ", %0\n\t"
"and %0, %1, %0"
: "=&r" (flags)
: "r" (ARCH_IRQ_DISABLED)
);
return flags;
}
EXPORT_SYMBOL(arch_local_save_flags);

View File

@ -46,15 +46,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
+ ELF32_R_SYM(rel[i].r_info);
relocation = sym->st_value + rel[i].r_addend;
#ifdef CONFIG_SUPERH64
/* For text addresses, bit2 of the st_other field indicates
* whether the symbol is SHmedia (1) or SHcompact (0). If
* SHmedia, the LSB of the symbol needs to be asserted
* for the CPU to be in SHmedia mode when it starts executing
* the branch target. */
relocation |= !!(sym->st_other & 4);
#endif
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_SH_NONE:
break;

View File

@ -23,9 +23,7 @@ EXPORT_SYMBOL(__stack_chk_guard);
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
#ifdef CONFIG_SUPERH32
unlazy_fpu(src, task_pt_regs(src));
#endif
*dst = *src;
if (src->thread.xstate) {

View File

@ -1,461 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/process_64.c
*
* This file handles the architecture-dependent parts of process handling..
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2007 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*
* Started from SH3/4 version:
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
*
* In turn started from i386 version:
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/ptrace.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <asm/syscalls.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/fpu.h>
#include <asm/switch_to.h>
struct task_struct *last_task_used_math = NULL;
struct pt_regs fake_swapper_regs = { 0, };
void show_regs(struct pt_regs *regs)
{
unsigned long long ah, al, bh, bl, ch, cl;
printk("\n");
show_regs_print_info(KERN_DEFAULT);
ah = (regs->pc) >> 32;
al = (regs->pc) & 0xffffffff;
bh = (regs->regs[18]) >> 32;
bl = (regs->regs[18]) & 0xffffffff;
ch = (regs->regs[15]) >> 32;
cl = (regs->regs[15]) & 0xffffffff;
printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->sr) >> 32;
al = (regs->sr) & 0xffffffff;
asm volatile ("getcon " __TEA ", %0" : "=r" (bh));
asm volatile ("getcon " __TEA ", %0" : "=r" (bl));
bh = (bh) >> 32;
bl = (bl) & 0xffffffff;
asm volatile ("getcon " __KCR0 ", %0" : "=r" (ch));
asm volatile ("getcon " __KCR0 ", %0" : "=r" (cl));
ch = (ch) >> 32;
cl = (cl) & 0xffffffff;
printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[0]) >> 32;
al = (regs->regs[0]) & 0xffffffff;
bh = (regs->regs[1]) >> 32;
bl = (regs->regs[1]) & 0xffffffff;
ch = (regs->regs[2]) >> 32;
cl = (regs->regs[2]) & 0xffffffff;
printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[3]) >> 32;
al = (regs->regs[3]) & 0xffffffff;
bh = (regs->regs[4]) >> 32;
bl = (regs->regs[4]) & 0xffffffff;
ch = (regs->regs[5]) >> 32;
cl = (regs->regs[5]) & 0xffffffff;
printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[6]) >> 32;
al = (regs->regs[6]) & 0xffffffff;
bh = (regs->regs[7]) >> 32;
bl = (regs->regs[7]) & 0xffffffff;
ch = (regs->regs[8]) >> 32;
cl = (regs->regs[8]) & 0xffffffff;
printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[9]) >> 32;
al = (regs->regs[9]) & 0xffffffff;
bh = (regs->regs[10]) >> 32;
bl = (regs->regs[10]) & 0xffffffff;
ch = (regs->regs[11]) >> 32;
cl = (regs->regs[11]) & 0xffffffff;
printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[12]) >> 32;
al = (regs->regs[12]) & 0xffffffff;
bh = (regs->regs[13]) >> 32;
bl = (regs->regs[13]) & 0xffffffff;
ch = (regs->regs[14]) >> 32;
cl = (regs->regs[14]) & 0xffffffff;
printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[16]) >> 32;
al = (regs->regs[16]) & 0xffffffff;
bh = (regs->regs[17]) >> 32;
bl = (regs->regs[17]) & 0xffffffff;
ch = (regs->regs[19]) >> 32;
cl = (regs->regs[19]) & 0xffffffff;
printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[20]) >> 32;
al = (regs->regs[20]) & 0xffffffff;
bh = (regs->regs[21]) >> 32;
bl = (regs->regs[21]) & 0xffffffff;
ch = (regs->regs[22]) >> 32;
cl = (regs->regs[22]) & 0xffffffff;
printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[23]) >> 32;
al = (regs->regs[23]) & 0xffffffff;
bh = (regs->regs[24]) >> 32;
bl = (regs->regs[24]) & 0xffffffff;
ch = (regs->regs[25]) >> 32;
cl = (regs->regs[25]) & 0xffffffff;
printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[26]) >> 32;
al = (regs->regs[26]) & 0xffffffff;
bh = (regs->regs[27]) >> 32;
bl = (regs->regs[27]) & 0xffffffff;
ch = (regs->regs[28]) >> 32;
cl = (regs->regs[28]) & 0xffffffff;
printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[29]) >> 32;
al = (regs->regs[29]) & 0xffffffff;
bh = (regs->regs[30]) >> 32;
bl = (regs->regs[30]) & 0xffffffff;
ch = (regs->regs[31]) >> 32;
cl = (regs->regs[31]) & 0xffffffff;
printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[32]) >> 32;
al = (regs->regs[32]) & 0xffffffff;
bh = (regs->regs[33]) >> 32;
bl = (regs->regs[33]) & 0xffffffff;
ch = (regs->regs[34]) >> 32;
cl = (regs->regs[34]) & 0xffffffff;
printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[35]) >> 32;
al = (regs->regs[35]) & 0xffffffff;
bh = (regs->regs[36]) >> 32;
bl = (regs->regs[36]) & 0xffffffff;
ch = (regs->regs[37]) >> 32;
cl = (regs->regs[37]) & 0xffffffff;
printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[38]) >> 32;
al = (regs->regs[38]) & 0xffffffff;
bh = (regs->regs[39]) >> 32;
bl = (regs->regs[39]) & 0xffffffff;
ch = (regs->regs[40]) >> 32;
cl = (regs->regs[40]) & 0xffffffff;
printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[41]) >> 32;
al = (regs->regs[41]) & 0xffffffff;
bh = (regs->regs[42]) >> 32;
bl = (regs->regs[42]) & 0xffffffff;
ch = (regs->regs[43]) >> 32;
cl = (regs->regs[43]) & 0xffffffff;
printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[44]) >> 32;
al = (regs->regs[44]) & 0xffffffff;
bh = (regs->regs[45]) >> 32;
bl = (regs->regs[45]) & 0xffffffff;
ch = (regs->regs[46]) >> 32;
cl = (regs->regs[46]) & 0xffffffff;
printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[47]) >> 32;
al = (regs->regs[47]) & 0xffffffff;
bh = (regs->regs[48]) >> 32;
bl = (regs->regs[48]) & 0xffffffff;
ch = (regs->regs[49]) >> 32;
cl = (regs->regs[49]) & 0xffffffff;
printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[50]) >> 32;
al = (regs->regs[50]) & 0xffffffff;
bh = (regs->regs[51]) >> 32;
bl = (regs->regs[51]) & 0xffffffff;
ch = (regs->regs[52]) >> 32;
cl = (regs->regs[52]) & 0xffffffff;
printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[53]) >> 32;
al = (regs->regs[53]) & 0xffffffff;
bh = (regs->regs[54]) >> 32;
bl = (regs->regs[54]) & 0xffffffff;
ch = (regs->regs[55]) >> 32;
cl = (regs->regs[55]) & 0xffffffff;
printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[56]) >> 32;
al = (regs->regs[56]) & 0xffffffff;
bh = (regs->regs[57]) >> 32;
bl = (regs->regs[57]) & 0xffffffff;
ch = (regs->regs[58]) >> 32;
cl = (regs->regs[58]) & 0xffffffff;
printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[59]) >> 32;
al = (regs->regs[59]) & 0xffffffff;
bh = (regs->regs[60]) >> 32;
bl = (regs->regs[60]) & 0xffffffff;
ch = (regs->regs[61]) >> 32;
cl = (regs->regs[61]) & 0xffffffff;
printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[62]) >> 32;
al = (regs->regs[62]) & 0xffffffff;
bh = (regs->tregs[0]) >> 32;
bl = (regs->tregs[0]) & 0xffffffff;
ch = (regs->tregs[1]) >> 32;
cl = (regs->tregs[1]) & 0xffffffff;
printk("R62 : %08Lx%08Lx T0 : %08Lx%08Lx T1 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->tregs[2]) >> 32;
al = (regs->tregs[2]) & 0xffffffff;
bh = (regs->tregs[3]) >> 32;
bl = (regs->tregs[3]) & 0xffffffff;
ch = (regs->tregs[4]) >> 32;
cl = (regs->tregs[4]) & 0xffffffff;
printk("T2 : %08Lx%08Lx T3 : %08Lx%08Lx T4 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->tregs[5]) >> 32;
al = (regs->tregs[5]) & 0xffffffff;
bh = (regs->tregs[6]) >> 32;
bl = (regs->tregs[6]) & 0xffffffff;
ch = (regs->tregs[7]) >> 32;
cl = (regs->tregs[7]) & 0xffffffff;
printk("T5 : %08Lx%08Lx T6 : %08Lx%08Lx T7 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
/*
* If we're in kernel mode, dump the stack too..
*/
if (!user_mode(regs)) {
void show_stack(struct task_struct *tsk, unsigned long *sp);
unsigned long sp = regs->regs[15] & 0xffffffff;
struct task_struct *tsk = get_current();
tsk->thread.kregs = regs;
show_stack(tsk, (unsigned long *)sp);
}
}
/*
* Free current thread data structures etc..
*/
void exit_thread(struct task_struct *tsk)
{
/*
* See arch/sparc/kernel/process.c for the precedent for doing
* this -- RPC.
*
* The SH-5 FPU save/restore approach relies on
* last_task_used_math pointing to a live task_struct. When
* another task tries to use the FPU for the 1st time, the FPUDIS
* trap handling (see arch/sh/kernel/cpu/sh5/fpu.c) will save the
* existing FPU state to the FP regs field within
* last_task_used_math before re-loading the new task's FPU state
* (or initialising it if the FPU has been used before). So if
* last_task_used_math is stale, and its page has already been
* re-allocated for another use, the consequences are rather
* grim. Unless we null it here, there is no other path through
* which it would get safely nulled.
*/
#ifdef CONFIG_SH_FPU
if (last_task_used_math == tsk)
last_task_used_math = NULL;
#endif
}
void flush_thread(void)
{
/* Called by fs/exec.c (setup_new_exec) to remove traces of a
* previously running executable. */
#ifdef CONFIG_SH_FPU
if (last_task_used_math == current) {
last_task_used_math = NULL;
}
/* Force FPU state to be reinitialised after exec */
clear_used_math();
#endif
/* if we are a kernel thread, about to change to user thread,
* update kreg
*/
if(current->thread.kregs==&fake_swapper_regs) {
current->thread.kregs =
((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
current->thread.uregs = current->thread.kregs;
}
}
void release_thread(struct task_struct *dead_task)
{
/* do nothing */
}
/* Fill in the fpu structure for a core dump.. */
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
{
#ifdef CONFIG_SH_FPU
int fpvalid;
struct task_struct *tsk = current;
fpvalid = !!tsk_used_math(tsk);
if (fpvalid) {
if (current == last_task_used_math) {
enable_fpu();
save_fpu(tsk);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
}
memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu));
}
return fpvalid;
#else
return 0; /* Task didn't use the fpu at all. */
#endif
}
EXPORT_SYMBOL(dump_fpu);
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg, struct task_struct *p)
{
struct pt_regs *childregs;
#ifdef CONFIG_SH_FPU
/* can't happen for a kernel thread */
if (last_task_used_math == current) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
current_pt_regs()->sr |= SR_FD;
}
#endif
/* Copy from sh version */
childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
p->thread.sp = (unsigned long) childregs;
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->regs[2] = (unsigned long)arg;
childregs->regs[3] = (unsigned long)usp;
childregs->sr = (1 << 30); /* not user_mode */
childregs->sr |= SR_FD; /* Invalidate FPU flag */
p->thread.pc = (unsigned long) ret_from_kernel_thread;
return 0;
}
*childregs = *current_pt_regs();
/*
* Sign extend the edited stack.
* Note that thread.pc and thread.pc will stay
* 32-bit wide and context switch must take care
* of NEFF sign extension.
*/
if (usp)
childregs->regs[15] = neff_sign_extend(usp);
p->thread.uregs = childregs;
childregs->regs[9] = 0; /* Set return value for child */
childregs->sr |= SR_FD; /* Invalidate FPU flag */
p->thread.pc = (unsigned long) ret_from_fork;
return 0;
}
#ifdef CONFIG_FRAME_POINTER
static int in_sh64_switch_to(unsigned long pc)
{
extern char __sh64_switch_to_end;
/* For a sleeping task, the PC is somewhere in the middle of the function,
so we don't have to worry about masking the LSB off */
return (pc >= (unsigned long) sh64_switch_to) &&
(pc < (unsigned long) &__sh64_switch_to_end);
}
#endif
unsigned long get_wchan(struct task_struct *p)
{
unsigned long pc;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
/*
* The same comment as on the Alpha applies here, too ...
*/
pc = thread_saved_pc(p);
#ifdef CONFIG_FRAME_POINTER
if (in_sh64_switch_to(pc)) {
unsigned long schedule_fp;
unsigned long sh64_switch_to_fp;
unsigned long schedule_caller_pc;
sh64_switch_to_fp = (long) p->thread.sp;
/* r14 is saved at offset 4 in the sh64_switch_to frame */
schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
/* and the caller of 'schedule' is (currently!) saved at offset 24
in the frame of schedule (from disasm) */
schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24);
return schedule_caller_pc;
}
#endif
return pc;
}

View File

@ -1,576 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/ptrace_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2008 Paul Mundt
*
* Started from SH3/4 version:
* SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
*
* Original x86 implementation:
* By Ross Biro 1/23/92
* edited by Linus Torvalds
*/
#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
#include <linux/tracehook.h>
#include <linux/elf.h>
#include <linux/regset.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
#include <asm/fpu.h>
#include <asm/traps.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/* This mask defines the bits of the SR which the user is not allowed to
change, which are everything except S, Q, M, PR, SZ, FR. */
#define SR_MASK (0xffff8cfd)
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/*
* This routine will get a word from the user area in the process kernel stack.
*/
static inline int get_stack_long(struct task_struct *task, int offset)
{
unsigned char *stack;
stack = (unsigned char *)(task->thread.uregs);
stack += offset;
return (*((int *)stack));
}
static inline unsigned long
get_fpu_long(struct task_struct *task, unsigned long addr)
{
unsigned long tmp;
struct pt_regs *regs;
regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
if (!tsk_used_math(task)) {
if (addr == offsetof(struct user_fpu_struct, fpscr)) {
tmp = FPSCR_INIT;
} else {
tmp = 0xffffffffUL; /* matches initial value in fpu.c */
}
return tmp;
}
if (last_task_used_math == task) {
enable_fpu();
save_fpu(task);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
}
tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)];
return tmp;
}
/*
* This routine will put a word into the user area in the process kernel stack.
*/
static inline int put_stack_long(struct task_struct *task, int offset,
unsigned long data)
{
unsigned char *stack;
stack = (unsigned char *)(task->thread.uregs);
stack += offset;
*(unsigned long *) stack = data;
return 0;
}
static inline int
put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
{
struct pt_regs *regs;
regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
if (!tsk_used_math(task)) {
init_fpu(task);
} else if (last_task_used_math == task) {
enable_fpu();
save_fpu(task);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
}
((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data;
return 0;
}
void user_enable_single_step(struct task_struct *child)
{
struct pt_regs *regs = child->thread.uregs;
regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
set_tsk_thread_flag(child, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *child)
{
struct pt_regs *regs = child->thread.uregs;
regs->sr &= ~SR_SSTEP;
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_regs *regs = task_pt_regs(target);
int ret;
/* PC, SR, SYSCALL */
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&regs->pc,
0, 3 * sizeof(unsigned long long));
/* R1 -> R63 */
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
regs->regs,
offsetof(struct pt_regs, regs[0]),
63 * sizeof(unsigned long long));
/* TR0 -> TR7 */
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
regs->tregs,
offsetof(struct pt_regs, tregs[0]),
8 * sizeof(unsigned long long));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
int ret;
/* PC, SR, SYSCALL */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&regs->pc,
0, 3 * sizeof(unsigned long long));
/* R1 -> R63 */
if (!ret && count > 0)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->regs,
offsetof(struct pt_regs, regs[0]),
63 * sizeof(unsigned long long));
/* TR0 -> TR7 */
if (!ret && count > 0)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->tregs,
offsetof(struct pt_regs, tregs[0]),
8 * sizeof(unsigned long long));
if (!ret)
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
#ifdef CONFIG_SH_FPU
int fpregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
ret = init_fpu(target);
if (ret)
return ret;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->hardfpu, 0, -1);
}
static int fpregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
ret = init_fpu(target);
if (ret)
return ret;
set_stopped_child_used_math(target);
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->hardfpu, 0, -1);
}
static int fpregs_active(struct task_struct *target,
const struct user_regset *regset)
{
return tsk_used_math(target) ? regset->n : 0;
}
#endif
const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(pc),
REG_OFFSET_NAME(sr),
REG_OFFSET_NAME(syscall_nr),
REGS_OFFSET_NAME(0),
REGS_OFFSET_NAME(1),
REGS_OFFSET_NAME(2),
REGS_OFFSET_NAME(3),
REGS_OFFSET_NAME(4),
REGS_OFFSET_NAME(5),
REGS_OFFSET_NAME(6),
REGS_OFFSET_NAME(7),
REGS_OFFSET_NAME(8),
REGS_OFFSET_NAME(9),
REGS_OFFSET_NAME(10),
REGS_OFFSET_NAME(11),
REGS_OFFSET_NAME(12),
REGS_OFFSET_NAME(13),
REGS_OFFSET_NAME(14),
REGS_OFFSET_NAME(15),
REGS_OFFSET_NAME(16),
REGS_OFFSET_NAME(17),
REGS_OFFSET_NAME(18),
REGS_OFFSET_NAME(19),
REGS_OFFSET_NAME(20),
REGS_OFFSET_NAME(21),
REGS_OFFSET_NAME(22),
REGS_OFFSET_NAME(23),
REGS_OFFSET_NAME(24),
REGS_OFFSET_NAME(25),
REGS_OFFSET_NAME(26),
REGS_OFFSET_NAME(27),
REGS_OFFSET_NAME(28),
REGS_OFFSET_NAME(29),
REGS_OFFSET_NAME(30),
REGS_OFFSET_NAME(31),
REGS_OFFSET_NAME(32),
REGS_OFFSET_NAME(33),
REGS_OFFSET_NAME(34),
REGS_OFFSET_NAME(35),
REGS_OFFSET_NAME(36),
REGS_OFFSET_NAME(37),
REGS_OFFSET_NAME(38),
REGS_OFFSET_NAME(39),
REGS_OFFSET_NAME(40),
REGS_OFFSET_NAME(41),
REGS_OFFSET_NAME(42),
REGS_OFFSET_NAME(43),
REGS_OFFSET_NAME(44),
REGS_OFFSET_NAME(45),
REGS_OFFSET_NAME(46),
REGS_OFFSET_NAME(47),
REGS_OFFSET_NAME(48),
REGS_OFFSET_NAME(49),
REGS_OFFSET_NAME(50),
REGS_OFFSET_NAME(51),
REGS_OFFSET_NAME(52),
REGS_OFFSET_NAME(53),
REGS_OFFSET_NAME(54),
REGS_OFFSET_NAME(55),
REGS_OFFSET_NAME(56),
REGS_OFFSET_NAME(57),
REGS_OFFSET_NAME(58),
REGS_OFFSET_NAME(59),
REGS_OFFSET_NAME(60),
REGS_OFFSET_NAME(61),
REGS_OFFSET_NAME(62),
REGS_OFFSET_NAME(63),
TREGS_OFFSET_NAME(0),
TREGS_OFFSET_NAME(1),
TREGS_OFFSET_NAME(2),
TREGS_OFFSET_NAME(3),
TREGS_OFFSET_NAME(4),
TREGS_OFFSET_NAME(5),
TREGS_OFFSET_NAME(6),
TREGS_OFFSET_NAME(7),
REG_OFFSET_END,
};
/*
* These are our native regset flavours.
*/
enum sh_regset {
REGSET_GENERAL,
#ifdef CONFIG_SH_FPU
REGSET_FPU,
#endif
};
static const struct user_regset sh_regsets[] = {
/*
* Format is:
* PC, SR, SYSCALL,
* R1 --> R63,
* TR0 --> TR7,
*/
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(long long),
.align = sizeof(long long),
.get = genregs_get,
.set = genregs_set,
},
#ifdef CONFIG_SH_FPU
[REGSET_FPU] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_fpu_struct) /
sizeof(long long),
.size = sizeof(long long),
.align = sizeof(long long),
.get = fpregs_get,
.set = fpregs_set,
.active = fpregs_active,
},
#endif
};
static const struct user_regset_view user_sh64_native_view = {
.name = "sh64",
.e_machine = EM_SH,
.regsets = sh_regsets,
.n = ARRAY_SIZE(sh_regsets),
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_sh64_native_view;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
ret = -EIO;
if ((addr & 3) || addr < 0)
break;
if (addr < sizeof(struct pt_regs))
tmp = get_stack_long(child, addr);
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
unsigned long index;
ret = init_fpu(child);
if (ret)
break;
index = addr - offsetof(struct user, fpu);
tmp = get_fpu_long(child, index);
} else if (addr == offsetof(struct user, u_fpvalid)) {
tmp = !!tsk_used_math(child);
} else {
break;
}
ret = put_user(tmp, datap);
break;
}
case PTRACE_POKEUSR:
/* write the word at location addr in the USER area. We must
disallow any changes to certain SR bits or u_fpvalid, since
this could crash the kernel or result in a security
loophole. */
ret = -EIO;
if ((addr & 3) || addr < 0)
break;
if (addr < sizeof(struct pt_regs)) {
/* Ignore change of top 32 bits of SR */
if (addr == offsetof (struct pt_regs, sr)+4)
{
ret = 0;
break;
}
/* If lower 32 bits of SR, ignore non-user bits */
if (addr == offsetof (struct pt_regs, sr))
{
long cursr = get_stack_long(child, addr);
data &= ~(SR_MASK);
data |= (cursr & SR_MASK);
}
ret = put_stack_long(child, addr, data);
}
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
unsigned long index;
ret = init_fpu(child);
if (ret)
break;
index = addr - offsetof(struct user, fpu);
ret = put_fpu_long(child, index, data);
}
break;
case PTRACE_GETREGS:
return copy_regset_to_user(child, &user_sh64_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
datap);
case PTRACE_SETREGS:
return copy_regset_from_user(child, &user_sh64_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
datap);
#ifdef CONFIG_SH_FPU
case PTRACE_GETFPREGS:
return copy_regset_to_user(child, &user_sh64_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
datap);
case PTRACE_SETFPREGS:
return copy_regset_from_user(child, &user_sh64_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
datap);
#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
asmlinkage int sh64_ptrace(long request, long pid,
unsigned long addr, unsigned long data)
{
#define WPC_DBRMODE 0x0d104008
static unsigned long first_call;
if (!test_and_set_bit(0, &first_call)) {
/* Set WPC.DBRMODE to 0. This makes all debug events get
* delivered through RESVEC, i.e. into the handlers in entry.S.
* (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
* would normally be left set to 1, which makes debug events get
* delivered through DBRVEC, i.e. into the remote gdb's
* handlers. This prevents ptrace getting them, and confuses
* the remote gdb.) */
printk("DBRMODE set to 0 to permit native debugging\n");
poke_real_address_q(WPC_DBRMODE, 0);
}
return sys_ptrace(request, pid, addr, data);
}
asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
{
long long ret = 0;
secure_computing_strict(regs->regs[9]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
/*
* Tracing decided this syscall should not happen.
* We'll return a bogus call number to get an ENOSYS
* error, but leave the original number in regs->regs[0].
*/
ret = -1LL;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[9]);
audit_syscall_entry(regs->regs[1], regs->regs[2], regs->regs[3],
regs->regs[4], regs->regs[5]);
return ret ?: regs->regs[9];
}
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->regs[9]);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
}
/* Called with interrupts disabled */
asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
{
/* This is called after a single step exception (DEBUGSS).
There is no need to change the PC, as it is a post-execution
exception, as entry.S does not do anything to the PC for DEBUGSS.
We need to clear the Single Step setting in SR to avoid
continually stepping. */
local_irq_enable();
regs->sr &= ~SR_SSTEP;
force_sig(SIGTRAP);
}
/* Called with interrupts disabled */
BUILD_TRAP_HANDLER(breakpoint)
{
TRAP_HANDLER_DECL;
/* We need to forward step the PC, to counteract the backstep done
in signal.c. */
local_irq_enable();
force_sig(SIGTRAP);
regs->pc += 4;
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
}

View File

@ -4,9 +4,7 @@
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/module.h>
#ifdef CONFIG_SUPERH32
#include <asm/watchdog.h>
#endif
#include <asm/addrspace.h>
#include <asm/reboot.h>
#include <asm/tlbflush.h>
@ -15,13 +13,11 @@
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
#ifdef CONFIG_SUPERH32
static void watchdog_trigger_immediate(void)
{
sh_wdt_write_cnt(0xFF);
sh_wdt_write_csr(0xC2);
}
#endif
static void native_machine_restart(char * __unused)
{
@ -33,10 +29,8 @@ static void native_machine_restart(char * __unused)
/* Address error with SR.BL=1 first. */
trigger_address_error();
#ifdef CONFIG_SUPERH32
/* If that fails or is unsupported, go for the watchdog next. */
watchdog_trigger_immediate();
#endif
/*
* Give up and sleep.

View File

@ -1,51 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/sh_ksyms_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*/
#include <linux/rwsem.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/user.h>
#include <linux/elfcore.h>
#include <linux/sched.h>
#include <linux/in6.h>
#include <linux/interrupt.h>
#include <linux/screen_info.h>
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/checksum.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/irq.h>
EXPORT_SYMBOL(__put_user_asm_b);
EXPORT_SYMBOL(__put_user_asm_w);
EXPORT_SYMBOL(__put_user_asm_l);
EXPORT_SYMBOL(__put_user_asm_q);
EXPORT_SYMBOL(__get_user_asm_b);
EXPORT_SYMBOL(__get_user_asm_w);
EXPORT_SYMBOL(__get_user_asm_l);
EXPORT_SYMBOL(__get_user_asm_q);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay);
EXPORT_SYMBOL(__const_udelay);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strcpy);
/* Ugh. These come in from libgcc.a at link time. */
#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
DECLARE_EXPORT(__sdivsi3);
DECLARE_EXPORT(__sdivsi3_1);
DECLARE_EXPORT(__sdivsi3_2);
DECLARE_EXPORT(__udivsi3);
DECLARE_EXPORT(__div_table);

View File

@ -1,567 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/signal_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2008 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/tracehook.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/fpu.h>
#define REG_RET 9
#define REG_ARG1 2
#define REG_ARG2 3
#define REG_ARG3 4
#define REG_SP 15
#define REG_PR 18
#define REF_REG_RET regs->regs[REG_RET]
#define REF_REG_SP regs->regs[REG_SP]
#define DEREF_REG_PR regs->regs[REG_PR]
#define DEBUG_SIG 0
static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs);
static inline void
handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa)
{
/* If we're not from a syscall, bail out */
if (regs->syscall_nr < 0)
return;
/* check for system call restart.. */
switch (regs->regs[REG_RET]) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
no_system_call_restart:
regs->regs[REG_RET] = -EINTR;
break;
case -ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
/* fallthrough */
case -ERESTARTNOINTR:
/* Decode syscall # */
regs->regs[REG_RET] = regs->syscall_nr;
regs->pc -= 4;
break;
}
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*
* Note that we go through the signals twice: once to check the signals that
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*/
static void do_signal(struct pt_regs *regs)
{
struct ksignal ksig;
/*
* We want the common case to go fast, which
* is why we may in certain cases get here from
* kernel mode. Just return without doing anything
* if so.
*/
if (!user_mode(regs))
return;
if (get_signal(&ksig)) {
handle_syscall_restart(regs, &ksig.ka.sa);
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs);
return;
}
/* Did we come from a system call? */
if (regs->syscall_nr >= 0) {
/* Restart the system call - no handlers present */
switch (regs->regs[REG_RET]) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
/* Decode Syscall # */
regs->regs[REG_RET] = regs->syscall_nr;
regs->pc -= 4;
break;
case -ERESTART_RESTARTBLOCK:
regs->regs[REG_RET] = __NR_restart_syscall;
regs->pc -= 4;
break;
}
}
/* No signal to deliver -- put the saved sigmask back */
restore_saved_sigmask();
}
/*
* Do a signal return; undo the signal stack.
*/
struct sigframe {
struct sigcontext sc;
unsigned long extramask[_NSIG_WORDS-1];
long long retcode[2];
};
struct rt_sigframe {
struct siginfo __user *pinfo;
void *puc;
struct siginfo info;
struct ucontext uc;
long long retcode[2];
};
#ifdef CONFIG_SH_FPU
static inline int
restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
int fpvalid;
err |= __get_user (fpvalid, &sc->sc_fpvalid);
conditional_used_math(fpvalid);
if (! fpvalid)
return err;
if (current == last_task_used_math) {
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
err |= __copy_from_user(&current->thread.xstate->hardfpu, &sc->sc_fpregs[0],
(sizeof(long long) * 32) + (sizeof(int) * 1));
return err;
}
static inline int
setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
int fpvalid;
fpvalid = !!used_math();
err |= __put_user(fpvalid, &sc->sc_fpvalid);
if (! fpvalid)
return err;
if (current == last_task_used_math) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.xstate->hardfpu,
(sizeof(long long) * 32) + (sizeof(int) * 1));
clear_used_math();
return err;
}
#else
static inline int
restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{
return 0;
}
static inline int
setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{
return 0;
}
#endif
static int
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p)
{
unsigned int err = 0;
unsigned long long current_sr, new_sr;
#define SR_MASK 0xffff8cfd
#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
/* Prevent the signal handler manipulating SR in a way that can
crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be
modified */
current_sr = regs->sr;
err |= __get_user(new_sr, &sc->sc_sr);
regs->sr &= SR_MASK;
regs->sr |= (new_sr & ~SR_MASK);
COPY(pc);
#undef COPY
/* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr
* has been restored above.) */
err |= restore_sigcontext_fpu(regs, sc);
regs->syscall_nr = -1; /* disable syscall checks */
err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]);
return err;
}
asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs * regs)
{
struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP;
sigset_t set;
long long ret;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.oldmask)
|| (_NSIG_WORDS > 1
&& __copy_from_user(&set.sig[1], &frame->extramask,
sizeof(frame->extramask))))
goto badframe;
set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->sc, &ret))
goto badframe;
regs->pc -= 4;
return (int) ret;
badframe:
force_sig(SIGSEGV);
return 0;
}
asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs * regs)
{
struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
sigset_t set;
long long ret;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
goto badframe;
regs->pc -= 4;
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
return (int) ret;
badframe:
force_sig(SIGSEGV);
return 0;
}
/*
* Set up a signal frame.
*/
static int
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
unsigned long mask)
{
int err = 0;
/* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */
err |= setup_sigcontext_fpu(regs, sc);
#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
COPY(sr); COPY(pc);
#undef COPY
err |= __put_user(mask, &sc->oldmask);
return err;
}
/*
* Determine which stack to use..
*/
static inline void __user *
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void __user *)((sp - frame_size) & -8ul);
}
void sa_default_restorer(void); /* See comments below */
void sa_default_rt_restorer(void); /* See comments below */
static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
{
struct sigframe __user *frame;
int err = 0, sig = ksig->sig;
int signal;
frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame));
if (!access_ok(frame, sizeof(*frame)))
return -EFAULT;
err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
/* Give up earlier as i386, in case */
if (err)
return -EFAULT;
if (_NSIG_WORDS > 1) {
err |= __copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask)); }
/* Give up earlier as i386, in case */
if (err)
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
/*
* On SH5 all edited pointers are subject to NEFF
*/
DEREF_REG_PR = neff_sign_extend((unsigned long)
ksig->ka->sa.sa_restorer | 0x1);
} else {
/*
* Different approach on SH5.
* . Endianness independent asm code gets placed in entry.S .
* This is limited to four ASM instructions corresponding
* to two long longs in size.
* . err checking is done on the else branch only
* . flush_icache_range() is called upon __put_user() only
* . all edited pointers are subject to NEFF
* . being code, linker turns ShMedia bit on, always
* dereference index -1.
*/
DEREF_REG_PR = neff_sign_extend((unsigned long)
frame->retcode | 0x01);
if (__copy_to_user(frame->retcode,
(void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0)
return -EFAULT;
/* Cohere the trampoline with the I-cache. */
flush_cache_sigtramp(DEREF_REG_PR-1);
}
/*
* Set up registers for signal handler.
* All edited pointers are subject to NEFF.
*/
regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
regs->regs[REG_ARG1] = sig; /* Arg for signal handler */
/* FIXME:
The glibc profiling support for SH-5 needs to be passed a sigcontext
so it can retrieve the PC. At some point during 2003 the glibc
support was changed to receive the sigcontext through the 2nd
argument, but there are still versions of libc.so in use that use
the 3rd argument. Until libc.so is stabilised, pass the sigcontext
through both 2nd and 3rd arguments.
*/
regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
/* Broken %016Lx */
pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
sig, current->comm, current->pid, frame,
regs->pc >> 32, regs->pc & 0xffffffff,
DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
return 0;
}
static int setup_rt_frame(struct ksignal *kig, sigset_t *set,
struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
int err = 0, sig = ksig->sig;
frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame));
if (!access_ok(frame, sizeof(*frame)))
return -EFAULT;
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
/* Give up earlier as i386, in case */
if (err)
return -EFAULT;
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __save_altstack(&frame->uc.uc_stack, regs->regs[REG_SP]);
err |= setup_sigcontext(&frame->uc.uc_mcontext,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
/* Give up earlier as i386, in case */
if (err)
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
/*
* On SH5 all edited pointers are subject to NEFF
*/
DEREF_REG_PR = neff_sign_extend((unsigned long)
ksig->ka.sa.sa_restorer | 0x1);
} else {
/*
* Different approach on SH5.
* . Endianness independent asm code gets placed in entry.S .
* This is limited to four ASM instructions corresponding
* to two long longs in size.
* . err checking is done on the else branch only
* . flush_icache_range() is called upon __put_user() only
* . all edited pointers are subject to NEFF
* . being code, linker turns ShMedia bit on, always
* dereference index -1.
*/
DEREF_REG_PR = neff_sign_extend((unsigned long)
frame->retcode | 0x01);
if (__copy_to_user(frame->retcode,
(void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0)
return -EFAULT;
/* Cohere the trampoline with the I-cache. */
flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
}
/*
* Set up registers for signal handler.
* All edited pointers are subject to NEFF.
*/
regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
regs->regs[REG_ARG1] = sig; /* Arg for signal handler */
regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
sig, current->comm, current->pid, frame,
regs->pc >> 32, regs->pc & 0xffffffff,
DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
return 0;
}
/*
* OK, we're invoking a handler
*/
static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
int ret;
/* Set up the stack frame */
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
ret = setup_rt_frame(ksig, oldset, regs);
else
ret = setup_frame(ksig, oldset, regs);
signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
}
asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
{
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
}

View File

@ -1,419 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/kernel/syscalls_64.S
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2004 - 2007 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*/
#include <linux/sys.h>
.section .data, "aw"
.balign 32
/*
* System calls jump table
*/
.globl sys_call_table
sys_call_table:
.long sys_restart_syscall /* 0 - old "setup()" system call */
.long sys_exit
.long sys_fork
.long sys_read
.long sys_write
.long sys_open /* 5 */
.long sys_close
.long sys_waitpid
.long sys_creat
.long sys_link
.long sys_unlink /* 10 */
.long sys_execve
.long sys_chdir
.long sys_time
.long sys_mknod
.long sys_chmod /* 15 */
.long sys_lchown16
.long sys_ni_syscall /* old break syscall holder */
.long sys_stat
.long sys_lseek
.long sys_getpid /* 20 */
.long sys_mount
.long sys_oldumount
.long sys_setuid16
.long sys_getuid16
.long sys_stime /* 25 */
.long sh64_ptrace
.long sys_alarm
.long sys_fstat
.long sys_pause
.long sys_utime /* 30 */
.long sys_ni_syscall /* old stty syscall holder */
.long sys_ni_syscall /* old gtty syscall holder */
.long sys_access
.long sys_nice
.long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
.long sys_sync
.long sys_kill
.long sys_rename
.long sys_mkdir
.long sys_rmdir /* 40 */
.long sys_dup
.long sys_pipe
.long sys_times
.long sys_ni_syscall /* old prof syscall holder */
.long sys_brk /* 45 */
.long sys_setgid16
.long sys_getgid16
.long sys_signal
.long sys_geteuid16
.long sys_getegid16 /* 50 */
.long sys_acct
.long sys_umount /* recycled never used phys( */
.long sys_ni_syscall /* old lock syscall holder */
.long sys_ioctl
.long sys_fcntl /* 55 */
.long sys_ni_syscall /* old mpx syscall holder */
.long sys_setpgid
.long sys_ni_syscall /* old ulimit syscall holder */
.long sys_ni_syscall /* sys_olduname */
.long sys_umask /* 60 */
.long sys_chroot
.long sys_ustat
.long sys_dup2
.long sys_getppid
.long sys_getpgrp /* 65 */
.long sys_setsid
.long sys_sigaction
.long sys_sgetmask
.long sys_ssetmask
.long sys_setreuid16 /* 70 */
.long sys_setregid16
.long sys_sigsuspend
.long sys_sigpending
.long sys_sethostname
.long sys_setrlimit /* 75 */
.long sys_old_getrlimit
.long sys_getrusage
.long sys_gettimeofday
.long sys_settimeofday
.long sys_getgroups16 /* 80 */
.long sys_setgroups16
.long sys_ni_syscall /* sys_oldselect */
.long sys_symlink
.long sys_lstat
.long sys_readlink /* 85 */
.long sys_uselib
.long sys_swapon
.long sys_reboot
.long sys_old_readdir
.long old_mmap /* 90 */
.long sys_munmap
.long sys_truncate
.long sys_ftruncate
.long sys_fchmod
.long sys_fchown16 /* 95 */
.long sys_getpriority
.long sys_setpriority
.long sys_ni_syscall /* old profil syscall holder */
.long sys_statfs
.long sys_fstatfs /* 100 */
.long sys_ni_syscall /* ioperm */
.long sys_socketcall /* Obsolete implementation of socket syscall */
.long sys_syslog
.long sys_setitimer
.long sys_getitimer /* 105 */
.long sys_newstat
.long sys_newlstat
.long sys_newfstat
.long sys_uname
.long sys_ni_syscall /* 110 */ /* iopl */
.long sys_vhangup
.long sys_ni_syscall /* idle */
.long sys_ni_syscall /* vm86old */
.long sys_wait4
.long sys_swapoff /* 115 */
.long sys_sysinfo
.long sys_ipc /* Obsolete ipc syscall implementation */
.long sys_fsync
.long sys_sigreturn
.long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
.long sys_cacheflush /* x86: sys_modify_ldt */
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
.long sys_ni_syscall /* old "create_module" */
.long sys_init_module
.long sys_delete_module
.long sys_ni_syscall /* 130: old "get_kernel_syms" */
.long sys_quotactl
.long sys_getpgid
.long sys_fchdir
.long sys_bdflush
.long sys_sysfs /* 135 */
.long sys_personality
.long sys_ni_syscall /* for afs_syscall */
.long sys_setfsuid16
.long sys_setfsgid16
.long sys_llseek /* 140 */
.long sys_getdents
.long sys_select
.long sys_flock
.long sys_msync
.long sys_readv /* 145 */
.long sys_writev
.long sys_getsid
.long sys_fdatasync
.long sys_sysctl
.long sys_mlock /* 150 */
.long sys_munlock
.long sys_mlockall
.long sys_munlockall
.long sys_sched_setparam
.long sys_sched_getparam /* 155 */
.long sys_sched_setscheduler
.long sys_sched_getscheduler
.long sys_sched_yield
.long sys_sched_get_priority_max
.long sys_sched_get_priority_min /* 160 */
.long sys_sched_rr_get_interval
.long sys_nanosleep
.long sys_mremap
.long sys_setresuid16
.long sys_getresuid16 /* 165 */
.long sys_ni_syscall /* vm86 */
.long sys_ni_syscall /* old "query_module" */
.long sys_poll
.long sys_ni_syscall /* was nfsservctl */
.long sys_setresgid16 /* 170 */
.long sys_getresgid16
.long sys_prctl
.long sys_rt_sigreturn
.long sys_rt_sigaction
.long sys_rt_sigprocmask /* 175 */
.long sys_rt_sigpending
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
.long sys_rt_sigsuspend
.long sys_pread64 /* 180 */
.long sys_pwrite64
.long sys_chown16
.long sys_getcwd
.long sys_capget
.long sys_capset /* 185 */
.long sys_sigaltstack
.long sys_sendfile
.long sys_ni_syscall /* getpmsg */
.long sys_ni_syscall /* putpmsg */
.long sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap2
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
.long sys_lstat64
.long sys_fstat64
.long sys_lchown
.long sys_getuid
.long sys_getgid /* 200 */
.long sys_geteuid
.long sys_getegid
.long sys_setreuid
.long sys_setregid
.long sys_getgroups /* 205 */
.long sys_setgroups
.long sys_fchown
.long sys_setresuid
.long sys_getresuid
.long sys_setresgid /* 210 */
.long sys_getresgid
.long sys_chown
.long sys_setuid
.long sys_setgid
.long sys_setfsuid /* 215 */
.long sys_setfsgid
.long sys_pivot_root
.long sys_mincore
.long sys_madvise
/* Broken-out socket family (maintain backwards compatibility in syscall
numbering with 2.4) */
.long sys_socket /* 220 */
.long sys_bind
.long sys_connect
.long sys_listen
.long sys_accept
.long sys_getsockname /* 225 */
.long sys_getpeername
.long sys_socketpair
.long sys_send
.long sys_sendto
.long sys_recv /* 230*/
.long sys_recvfrom
.long sys_shutdown
.long sys_setsockopt
.long sys_getsockopt
.long sys_sendmsg /* 235 */
.long sys_recvmsg
/* Broken-out IPC family (maintain backwards compatibility in syscall
numbering with 2.4) */
.long sys_semop
.long sys_semget
.long sys_semctl
.long sys_msgsnd /* 240 */
.long sys_msgrcv
.long sys_msgget
.long sys_msgctl
.long sys_shmat
.long sys_shmdt /* 245 */
.long sys_shmget
.long sys_shmctl
/* Rest of syscalls listed in 2.4 i386 unistd.h */
.long sys_getdents64
.long sys_fcntl64
.long sys_ni_syscall /* 250 reserved for TUX */
.long sys_ni_syscall /* Reserved for Security */
.long sys_gettid
.long sys_readahead
.long sys_setxattr
.long sys_lsetxattr /* 255 */
.long sys_fsetxattr
.long sys_getxattr
.long sys_lgetxattr
.long sys_fgetxattr
.long sys_listxattr /* 260 */
.long sys_llistxattr
.long sys_flistxattr
.long sys_removexattr
.long sys_lremovexattr
.long sys_fremovexattr /* 265 */
.long sys_tkill
.long sys_sendfile64
.long sys_futex
.long sys_sched_setaffinity
.long sys_sched_getaffinity /* 270 */
.long sys_ni_syscall /* reserved for set_thread_area */
.long sys_ni_syscall /* reserved for get_thread_area */
.long sys_io_setup
.long sys_io_destroy
.long sys_io_getevents /* 275 */
.long sys_io_submit
.long sys_io_cancel
.long sys_fadvise64
.long sys_ni_syscall
.long sys_exit_group /* 280 */
/* Rest of new 2.6 syscalls */
.long sys_lookup_dcookie
.long sys_epoll_create
.long sys_epoll_ctl
.long sys_epoll_wait
.long sys_remap_file_pages /* 285 */
.long sys_set_tid_address
.long sys_timer_create
.long sys_timer_settime
.long sys_timer_gettime
.long sys_timer_getoverrun /* 290 */
.long sys_timer_delete
.long sys_clock_settime
.long sys_clock_gettime
.long sys_clock_getres
.long sys_clock_nanosleep /* 295 */
.long sys_statfs64
.long sys_fstatfs64
.long sys_tgkill
.long sys_utimes
.long sys_fadvise64_64 /* 300 */
.long sys_ni_syscall /* Reserved for vserver */
.long sys_ni_syscall /* Reserved for mbind */
.long sys_ni_syscall /* get_mempolicy */
.long sys_ni_syscall /* set_mempolicy */
.long sys_mq_open /* 305 */
.long sys_mq_unlink
.long sys_mq_timedsend
.long sys_mq_timedreceive
.long sys_mq_notify
.long sys_mq_getsetattr /* 310 */
.long sys_ni_syscall /* Reserved for kexec */
.long sys_waitid
.long sys_add_key
.long sys_request_key
.long sys_keyctl /* 315 */
.long sys_ioprio_set
.long sys_ioprio_get
.long sys_inotify_init
.long sys_inotify_add_watch
.long sys_inotify_rm_watch /* 320 */
.long sys_ni_syscall
.long sys_migrate_pages
.long sys_openat
.long sys_mkdirat
.long sys_mknodat /* 325 */
.long sys_fchownat
.long sys_futimesat
.long sys_fstatat64
.long sys_unlinkat
.long sys_renameat /* 330 */
.long sys_linkat
.long sys_symlinkat
.long sys_readlinkat
.long sys_fchmodat
.long sys_faccessat /* 335 */
.long sys_pselect6
.long sys_ppoll
.long sys_unshare
.long sys_set_robust_list
.long sys_get_robust_list /* 340 */
.long sys_splice
.long sys_sync_file_range
.long sys_tee
.long sys_vmsplice
.long sys_move_pages /* 345 */
.long sys_getcpu
.long sys_epoll_pwait
.long sys_utimensat
.long sys_signalfd
.long sys_timerfd_create /* 350 */
.long sys_eventfd
.long sys_fallocate
.long sys_timerfd_settime
.long sys_timerfd_gettime
.long sys_signalfd4 /* 355 */
.long sys_eventfd2
.long sys_epoll_create1
.long sys_dup3
.long sys_pipe2
.long sys_inotify_init1 /* 360 */
.long sys_preadv
.long sys_pwritev
.long sys_rt_tgsigqueueinfo
.long sys_perf_event_open
.long sys_recvmmsg /* 365 */
.long sys_accept4
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64
.long sys_name_to_handle_at /* 370 */
.long sys_open_by_handle_at
.long sys_clock_adjtime
.long sys_syncfs
.long sys_sendmmsg
.long sys_setns /* 375 */
.long sys_process_vm_readv
.long sys_process_vm_writev
.long sys_kcmp
.long sys_finit_module
.long sys_sched_getattr /* 380 */
.long sys_sched_setattr
.long sys_renameat2
.long sys_seccomp
.long sys_getrandom
.long sys_memfd_create /* 385 */
.long sys_bpf
.long sys_execveat
.long sys_userfaultfd
.long sys_membarrier
.long sys_mlock2 /* 390 */
.long sys_copy_file_range
.long sys_preadv2
.long sys_pwritev2

View File

@ -1,814 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/traps_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*/
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/module.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/alignment.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/fpu.h>
static int read_opcode(reg_size_t pc, insn_size_t *result_opcode, int from_user_mode)
{
int get_user_error;
unsigned long aligned_pc;
insn_size_t opcode;
if ((pc & 3) == 1) {
/* SHmedia */
aligned_pc = pc & ~3;
if (from_user_mode) {
if (!access_ok(aligned_pc, sizeof(insn_size_t))) {
get_user_error = -EFAULT;
} else {
get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
*result_opcode = opcode;
}
return get_user_error;
} else {
/* If the fault was in the kernel, we can either read
* this directly, or if not, we fault.
*/
*result_opcode = *(insn_size_t *)aligned_pc;
return 0;
}
} else if ((pc & 1) == 0) {
/* SHcompact */
/* TODO : provide handling for this. We don't really support
user-mode SHcompact yet, and for a kernel fault, this would
have to come from a module built for SHcompact. */
return -EFAULT;
} else {
/* misaligned */
return -EFAULT;
}
}
static int address_is_sign_extended(__u64 a)
{
__u64 b;
#if (NEFF == 32)
b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
return (b == a) ? 1 : 0;
#else
#error "Sign extend check only works for NEFF==32"
#endif
}
/* return -1 for fault, 0 for OK */
static int generate_and_check_address(struct pt_regs *regs,
insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
__u64 *address)
{
__u64 base_address, addr;
int basereg;
switch (1 << width_shift) {
case 1: inc_unaligned_byte_access(); break;
case 2: inc_unaligned_word_access(); break;
case 4: inc_unaligned_dword_access(); break;
case 8: inc_unaligned_multi_access(); break;
}
basereg = (opcode >> 20) & 0x3f;
base_address = regs->regs[basereg];
if (displacement_not_indexed) {
__s64 displacement;
displacement = (opcode >> 10) & 0x3ff;
displacement = sign_extend64(displacement, 9);
addr = (__u64)((__s64)base_address + (displacement << width_shift));
} else {
__u64 offset;
int offsetreg;
offsetreg = (opcode >> 10) & 0x3f;
offset = regs->regs[offsetreg];
addr = base_address + offset;
}
/* Check sign extended */
if (!address_is_sign_extended(addr))
return -1;
/* Check accessible. For misaligned access in the kernel, assume the
address is always accessible (and if not, just fault when the
load/store gets done.) */
if (user_mode(regs)) {
inc_unaligned_user_access();
if (addr >= TASK_SIZE)
return -1;
} else
inc_unaligned_kernel_access();
*address = addr;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, addr);
unaligned_fixups_notify(current, opcode, regs);
return 0;
}
static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
{
unsigned short x;
unsigned char *p, *q;
p = (unsigned char *) (int) address;
q = (unsigned char *) &x;
q[0] = p[0];
q[1] = p[1];
if (do_sign_extend) {
*result = (__u64)(__s64) *(short *) &x;
} else {
*result = (__u64) x;
}
}
static void misaligned_kernel_word_store(__u64 address, __u64 value)
{
unsigned short x;
unsigned char *p, *q;
p = (unsigned char *) (int) address;
q = (unsigned char *) &x;
x = (__u16) value;
p[0] = q[0];
p[1] = q[1];
}
static int misaligned_load(struct pt_regs *regs,
insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
int do_sign_extend)
{
/* Return -1 for a fault, 0 for OK */
int error;
int destreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0)
return error;
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
return -1;
}
if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
return -1; /* fault */
}
switch (width_shift) {
case 1:
if (do_sign_extend) {
regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
} else {
regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
}
break;
case 2:
regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
break;
case 3:
regs->regs[destreg] = buffer;
break;
default:
printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
} else {
/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
__u64 lo, hi;
switch (width_shift) {
case 1:
misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
break;
case 2:
asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
regs->regs[destreg] = lo | hi;
break;
case 3:
asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
regs->regs[destreg] = lo | hi;
break;
default:
printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
}
return 0;
}
static int misaligned_store(struct pt_regs *regs,
insn_size_t opcode,
int displacement_not_indexed,
int width_shift)
{
/* Return -1 for a fault, 0 for OK */
int error;
int srcreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0)
return error;
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
return -1;
}
switch (width_shift) {
case 1:
*(__u16 *) &buffer = (__u16) regs->regs[srcreg];
break;
case 2:
*(__u32 *) &buffer = (__u32) regs->regs[srcreg];
break;
case 3:
buffer = regs->regs[srcreg];
break;
default:
printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
return -1; /* fault */
}
} else {
/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
__u64 val = regs->regs[srcreg];
switch (width_shift) {
case 1:
misaligned_kernel_word_store(address, val);
break;
case 2:
asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
break;
case 3:
asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
break;
default:
printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
}
return 0;
}
/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
error. */
static int misaligned_fpu_load(struct pt_regs *regs,
insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
{
/* Return -1 for a fault, 0 for OK */
int error;
int destreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0)
return error;
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
__u32 buflo, bufhi;
if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
return -1;
}
if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
return -1; /* fault */
}
/* 'current' may be the current owner of the FPU state, so
context switch the registers into memory so they can be
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
buflo = *(__u32*) &buffer;
bufhi = *(1 + (__u32*) &buffer);
switch (width_shift) {
case 2:
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
break;
case 3:
if (do_paired_load) {
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
#else
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
#endif
}
break;
default:
printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
return 0;
} else {
die ("Misaligned FPU load inside kernel", regs, 0);
return -1;
}
}
static int misaligned_fpu_store(struct pt_regs *regs,
insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
{
/* Return -1 for a fault, 0 for OK */
int error;
int srcreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0)
return error;
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
/* Initialise these to NaNs. */
__u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
return -1;
}
/* 'current' may be the current owner of the FPU state, so
context switch the registers into memory so they can be
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
switch (width_shift) {
case 2:
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
break;
case 3:
if (do_paired_load) {
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#else
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#endif
}
break;
default:
printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
*(__u32*) &buffer = buflo;
*(1 + (__u32*) &buffer) = bufhi;
if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
return -1; /* fault */
}
return 0;
} else {
die ("Misaligned FPU load inside kernel", regs, 0);
return -1;
}
}
static int misaligned_fixup(struct pt_regs *regs)
{
insn_size_t opcode;
int error;
int major, minor;
unsigned int user_action;
user_action = unaligned_user_action();
if (!(user_action & UM_FIXUP))
return -1;
error = read_opcode(regs->pc, &opcode, user_mode(regs));
if (error < 0) {
return error;
}
major = (opcode >> 26) & 0x3f;
minor = (opcode >> 16) & 0xf;
switch (major) {
case (0x84>>2): /* LD.W */
error = misaligned_load(regs, opcode, 1, 1, 1);
break;
case (0xb0>>2): /* LD.UW */
error = misaligned_load(regs, opcode, 1, 1, 0);
break;
case (0x88>>2): /* LD.L */
error = misaligned_load(regs, opcode, 1, 2, 1);
break;
case (0x8c>>2): /* LD.Q */
error = misaligned_load(regs, opcode, 1, 3, 0);
break;
case (0xa4>>2): /* ST.W */
error = misaligned_store(regs, opcode, 1, 1);
break;
case (0xa8>>2): /* ST.L */
error = misaligned_store(regs, opcode, 1, 2);
break;
case (0xac>>2): /* ST.Q */
error = misaligned_store(regs, opcode, 1, 3);
break;
case (0x40>>2): /* indexed loads */
switch (minor) {
case 0x1: /* LDX.W */
error = misaligned_load(regs, opcode, 0, 1, 1);
break;
case 0x5: /* LDX.UW */
error = misaligned_load(regs, opcode, 0, 1, 0);
break;
case 0x2: /* LDX.L */
error = misaligned_load(regs, opcode, 0, 2, 1);
break;
case 0x3: /* LDX.Q */
error = misaligned_load(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
case (0x60>>2): /* indexed stores */
switch (minor) {
case 0x1: /* STX.W */
error = misaligned_store(regs, opcode, 0, 1);
break;
case 0x2: /* STX.L */
error = misaligned_store(regs, opcode, 0, 2);
break;
case 0x3: /* STX.Q */
error = misaligned_store(regs, opcode, 0, 3);
break;
default:
error = -1;
break;
}
break;
case (0x94>>2): /* FLD.S */
error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
break;
case (0x98>>2): /* FLD.P */
error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
break;
case (0x9c>>2): /* FLD.D */
error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
break;
case (0x1c>>2): /* floating indexed loads */
switch (minor) {
case 0x8: /* FLDX.S */
error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
break;
case 0xd: /* FLDX.P */
error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
break;
case 0x9: /* FLDX.D */
error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
case (0xb4>>2): /* FLD.S */
error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
break;
case (0xb8>>2): /* FLD.P */
error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
break;
case (0xbc>>2): /* FLD.D */
error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
break;
case (0x3c>>2): /* floating indexed stores */
switch (minor) {
case 0x8: /* FSTX.S */
error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
break;
case 0xd: /* FSTX.P */
error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
break;
case 0x9: /* FSTX.D */
error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
default:
/* Fault */
error = -1;
break;
}
if (error < 0) {
return error;
} else {
regs->pc += 4; /* Skip the instruction that's just been emulated */
return 0;
}
}
static void do_unhandled_exception(int signr, char *str, unsigned long error,
struct pt_regs *regs)
{
if (user_mode(regs))
force_sig(signr);
die_if_no_fixup(str, regs, error);
}
#define DO_ERROR(signr, str, name) \
asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
{ \
do_unhandled_exception(signr, str, error_code, regs); \
}
DO_ERROR(SIGILL, "illegal slot instruction", illegal_slot_inst)
DO_ERROR(SIGSEGV, "address error (exec)", address_error_exec)
#if defined(CONFIG_SH64_ID2815_WORKAROUND)
#define OPCODE_INVALID 0
#define OPCODE_USER_VALID 1
#define OPCODE_PRIV_VALID 2
/* getcon/putcon - requires checking which control register is referenced. */
#define OPCODE_CTRL_REG 3
/* Table of valid opcodes for SHmedia mode.
Form a 10-bit value by concatenating the major/minor opcodes i.e.
opcode[31:26,20:16]. The 6 MSBs of this value index into the following
array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
LSBs==4'b0000 etc). */
static unsigned long shmedia_opcode_table[64] = {
0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
};
/* Workaround SH5-101 cut2 silicon defect #2815 :
in some situations, inter-mode branches from SHcompact -> SHmedia
which should take ITLBMISS or EXECPROT exceptions at the target
falsely take RESINST at the target instead. */
void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
{
insn_size_t opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
unsigned long pc, aligned_pc;
unsigned long index, shift;
unsigned long major, minor, combined;
unsigned long reserved_field;
int opcode_state;
int get_user_error;
int signr = SIGILL;
char *exception_name = "reserved_instruction";
pc = regs->pc;
/* SHcompact is not handled */
if (unlikely((pc & 3) == 0))
goto out;
/* SHmedia : check for defect. This requires executable vmas
to be readable too. */
aligned_pc = pc & ~3;
if (!access_ok(aligned_pc, sizeof(insn_size_t)))
get_user_error = -EFAULT;
else
get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
if (get_user_error < 0) {
/*
* Error trying to read opcode. This typically means a
* real fault, not a RESINST any more. So change the
* codes.
*/
exception_name = "address error (exec)";
signr = SIGSEGV;
goto out;
}
/* These bits are currently reserved as zero in all valid opcodes */
reserved_field = opcode & 0xf;
if (unlikely(reserved_field))
goto out; /* invalid opcode */
major = (opcode >> 26) & 0x3f;
minor = (opcode >> 16) & 0xf;
combined = (major << 4) | minor;
index = major;
shift = minor << 1;
opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
switch (opcode_state) {
case OPCODE_INVALID:
/* Trap. */
break;
case OPCODE_USER_VALID:
/*
* Restart the instruction: the branch to the instruction
* will now be from an RTE not from SHcompact so the
* silicon defect won't be triggered.
*/
return;
case OPCODE_PRIV_VALID:
if (!user_mode(regs)) {
/*
* Should only ever get here if a module has
* SHcompact code inside it. If so, the same fix
* up is needed.
*/
return; /* same reason */
}
/*
* Otherwise, user mode trying to execute a privileged
* instruction - fall through to trap.
*/
break;
case OPCODE_CTRL_REG:
/* If in privileged mode, return as above. */
if (!user_mode(regs))
return;
/* In user mode ... */
if (combined == 0x9f) { /* GETCON */
unsigned long regno = (opcode >> 20) & 0x3f;
if (regno >= 62)
return;
/* reserved/privileged control register => trap */
} else if (combined == 0x1bf) { /* PUTCON */
unsigned long regno = (opcode >> 4) & 0x3f;
if (regno >= 62)
return;
/* reserved/privileged control register => trap */
}
break;
default:
/* Fall through to trap. */
break;
}
out:
do_unhandled_exception(signr, exception_name, error_code, regs);
}
#else /* CONFIG_SH64_ID2815_WORKAROUND */
/* If the workaround isn't needed, this is just a straightforward reserved
instruction */
DO_ERROR(SIGILL, "reserved instruction", reserved_inst)
#endif /* CONFIG_SH64_ID2815_WORKAROUND */
/* Called with interrupts disabled */
asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
{
die_if_kernel("exception", regs, ex);
}
asmlinkage int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
{
/* Syscall debug */
printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
die_if_kernel("unknown trapa", regs, scId);
return -ENOSYS;
}
/* Implement misaligned load/store handling for kernel (and optionally for user
mode too). Limitation : only SHmedia mode code is handled - there is no
handling at all for misaligned accesses occurring in SHcompact code yet. */
asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
{
if (misaligned_fixup(regs) < 0)
do_unhandled_exception(SIGSEGV, "address error(load)",
error_code, regs);
}
asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
{
if (misaligned_fixup(regs) < 0)
do_unhandled_exception(SIGSEGV, "address error(store)",
error_code, regs);
}
asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
{
u64 peek_real_address_q(u64 addr);
u64 poke_real_address_q(u64 addr, u64 val);
unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
unsigned long long exp_cause;
/* It's not worth ioremapping the debug module registers for the amount
of access we make to them - just go direct to their physical
addresses. */
exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
if (exp_cause & ~4)
printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
(unsigned long)(exp_cause & 0xffffffff));
show_state();
/* Clear all DEBUGINT causes */
poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
}
void per_cpu_trap_init(void)
{
/* Nothing to do for now, VBR initialization later. */
}

View File

@ -3,14 +3,7 @@
* ld script to make SuperH Linux kernel
* Written by Niibe Yutaka and Paul Mundt
*/
#ifdef CONFIG_SUPERH64
#define LOAD_OFFSET PAGE_OFFSET
OUTPUT_ARCH(sh:sh5)
#else
#define LOAD_OFFSET 0
OUTPUT_ARCH(sh)
#endif
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/vmlinux.lds.h>
@ -28,14 +21,13 @@ SECTIONS
_text = .; /* Text and read-only data */
.empty_zero_page : AT(ADDR(.empty_zero_page) - LOAD_OFFSET) {
.empty_zero_page : AT(ADDR(.empty_zero_page)) {
*(.empty_zero_page)
} = 0
.text : AT(ADDR(.text) - LOAD_OFFSET) {
.text : AT(ADDR(.text)) {
HEAD_TEXT
TEXT_TEXT
EXTRA_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
@ -62,7 +54,7 @@ SECTIONS
INIT_DATA_SECTION(16)
. = ALIGN(4);
.machvec.init : AT(ADDR(.machvec.init) - LOAD_OFFSET) {
.machvec.init : AT(ADDR(.machvec.init)) {
__machvec_start = .;
*(.machvec.init)
__machvec_end = .;
@ -74,8 +66,8 @@ SECTIONS
* .exit.text is discarded at runtime, not link time, to deal with
* references from __bug_table
*/
.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { EXIT_TEXT }
.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA }
.exit.text : AT(ADDR(.exit.text)) { EXIT_TEXT }
.exit.data : AT(ADDR(.exit.data)) { EXIT_DATA }
. = ALIGN(PAGE_SIZE);
__init_end = .;

View File

@ -1,17 +0,0 @@
#
# Makefile for the SH-5 specific library files..
#
# Copyright (C) 2000, 2001 Paolo Alberelli
# Copyright (C) 2003 - 2008 Paul Mundt
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Panic should really be compiled as PIC
lib-y := udelay.o panic.o memcpy.o memset.o \
copy_user_memcpy.o copy_page.o strcpy.o strlen.o
# Extracted from libgcc
lib-y += udivsi3.o udivdi3.o sdivsi3.o

View File

@ -1,89 +0,0 @@
/*
Copyright 2003 Richard Curnow, SuperH (UK) Ltd.
This file is subject to the terms and conditions of the GNU General Public
License. See the file "COPYING" in the main directory of this archive
for more details.
Tight version of mempy for the case of just copying a page.
Prefetch strategy empirically optimised against RTL simulations
of SH5-101 cut2 eval chip with Cayman board DDR memory.
Parameters:
r2 : destination effective address (start of page)
r3 : source effective address (start of page)
Always copies 4096 bytes.
Points to review.
* Currently the prefetch is 4 lines ahead and the alloco is 2 lines ahead.
It seems like the prefetch needs to be at at least 4 lines ahead to get
the data into the cache in time, and the allocos contend with outstanding
prefetches for the same cache set, so it's better to have the numbers
different.
*/
.section .text..SHmedia32,"ax"
.little
.balign 8
.global copy_page
copy_page:
/* Copy 4096 bytes worth of data from r3 to r2.
Do prefetches 4 lines ahead.
Do alloco 2 lines ahead */
pta 1f, tr1
pta 2f, tr2
pta 3f, tr3
ptabs r18, tr0
#if 0
/* TAKum03020 */
ld.q r3, 0x00, r63
ld.q r3, 0x20, r63
ld.q r3, 0x40, r63
ld.q r3, 0x60, r63
#endif
alloco r2, 0x00
synco ! TAKum03020
alloco r2, 0x20
synco ! TAKum03020
movi 3968, r6
add r2, r6, r6
addi r6, 64, r7
addi r7, 64, r8
sub r3, r2, r60
addi r60, 8, r61
addi r61, 8, r62
addi r62, 8, r23
addi r60, 0x80, r22
/* Minimal code size. The extra branches inside the loop don't cost much
because they overlap with the time spent waiting for prefetches to
complete. */
1:
#if 0
/* TAKum03020 */
bge/u r2, r6, tr2 ! skip prefetch for last 4 lines
ldx.q r2, r22, r63 ! prefetch 4 lines hence
#endif
2:
bge/u r2, r7, tr3 ! skip alloco for last 2 lines
alloco r2, 0x40 ! alloc destination line 2 lines ahead
synco ! TAKum03020
3:
ldx.q r2, r60, r36
ldx.q r2, r61, r37
ldx.q r2, r62, r38
ldx.q r2, r23, r39
st.q r2, 0, r36
st.q r2, 8, r37
st.q r2, 16, r38
st.q r2, 24, r39
addi r2, 32, r2
bgt/l r8, r2, tr1
blink tr0, r63 ! return

View File

@ -1,218 +0,0 @@
! SPDX-License-Identifier: GPL-2.0
!
! Fast SH memcpy
!
! by Toshiyasu Morita (tm@netcom.com)
! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut)
! SH5 code Copyright 2002 SuperH Ltd.
!
! Entry: ARG0: destination pointer
! ARG1: source pointer
! ARG2: byte count
!
! Exit: RESULT: destination pointer
! any other registers in the range r0-r7: trashed
!
! Notes: Usually one wants to do small reads and write a longword, but
! unfortunately it is difficult in some cases to concatanate bytes
! into a longword on the SH, so this does a longword read and small
! writes.
!
! This implementation makes two assumptions about how it is called:
!
! 1.: If the byte count is nonzero, the address of the last byte to be
! copied is unsigned greater than the address of the first byte to
! be copied. This could be easily swapped for a signed comparison,
! but the algorithm used needs some comparison.
!
! 2.: When there are two or three bytes in the last word of an 11-or-more
! bytes memory chunk to b copied, the rest of the word can be read
! without side effects.
! This could be easily changed by increasing the minimum size of
! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
! however, this would cost a few extra cyles on average.
! For SHmedia, the assumption is that any quadword can be read in its
! enirety if at least one byte is included in the copy.
/* Imported into Linux kernel by Richard Curnow. This is used to implement the
__copy_user function in the general case, so it has to be a distinct
function from intra-kernel memcpy to allow for exception fix-ups in the
event that the user pointer is bad somewhere in the copy (e.g. due to
running off the end of the vma).
Note, this algorithm will be slightly wasteful in the case where the source
and destination pointers are equally aligned, because the stlo/sthi pairs
could then be merged back into single stores. If there are a lot of cache
misses, this is probably offset by the stall lengths on the preloads.
*/
/* NOTE : Prefetches removed and allocos guarded by synco to avoid TAKum03020
* erratum. The first two prefetches are nop-ed out to avoid upsetting the
* instruction counts used in the jump address calculation.
* */
.section .text..SHmedia32,"ax"
.little
.balign 32
.global copy_user_memcpy
.global copy_user_memcpy_end
copy_user_memcpy:
#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
nop ! ld.b r3,0,r63 ! TAKum03020
pta/l Large,tr0
movi 25,r0
bgeu/u r4,r0,tr0
nsb r4,r0
shlli r0,5,r0
movi (L1-L0+63*32 + 1) & 0xffff,r1
sub r1, r0, r0
L0: ptrel r0,tr0
add r2,r4,r5
ptabs r18,tr1
add r3,r4,r6
blink tr0,r63
/* Rearranged to make cut2 safe */
.balign 8
L4_7: /* 4..7 byte memcpy cntd. */
stlo.l r2, 0, r0
or r6, r7, r6
sthi.l r5, -1, r6
stlo.l r5, -4, r6
blink tr1,r63
.balign 8
L1: /* 0 byte memcpy */
nop
blink tr1,r63
nop
nop
nop
nop
L2_3: /* 2 or 3 byte memcpy cntd. */
st.b r5,-1,r6
blink tr1,r63
/* 1 byte memcpy */
ld.b r3,0,r0
st.b r2,0,r0
blink tr1,r63
L8_15: /* 8..15 byte memcpy cntd. */
stlo.q r2, 0, r0
or r6, r7, r6
sthi.q r5, -1, r6
stlo.q r5, -8, r6
blink tr1,r63
/* 2 or 3 byte memcpy */
ld.b r3,0,r0
nop ! ld.b r2,0,r63 ! TAKum03020
ld.b r3,1,r1
st.b r2,0,r0
pta/l L2_3,tr0
ld.b r6,-1,r6
st.b r2,1,r1
blink tr0, r63
/* 4 .. 7 byte memcpy */
LDUAL (r3, 0, r0, r1)
pta L4_7, tr0
ldlo.l r6, -4, r7
or r0, r1, r0
sthi.l r2, 3, r0
ldhi.l r6, -1, r6
blink tr0, r63
/* 8 .. 15 byte memcpy */
LDUAQ (r3, 0, r0, r1)
pta L8_15, tr0
ldlo.q r6, -8, r7
or r0, r1, r0
sthi.q r2, 7, r0
ldhi.q r6, -1, r6
blink tr0, r63
/* 16 .. 24 byte memcpy */
LDUAQ (r3, 0, r0, r1)
LDUAQ (r3, 8, r8, r9)
or r0, r1, r0
sthi.q r2, 7, r0
or r8, r9, r8
sthi.q r2, 15, r8
ldlo.q r6, -8, r7
ldhi.q r6, -1, r6
stlo.q r2, 8, r8
stlo.q r2, 0, r0
or r6, r7, r6
sthi.q r5, -1, r6
stlo.q r5, -8, r6
blink tr1,r63
Large:
! ld.b r2, 0, r63 ! TAKum03020
pta/l Loop_ua, tr1
ori r3, -8, r7
sub r2, r7, r22
sub r3, r2, r6
add r2, r4, r5
ldlo.q r3, 0, r0
addi r5, -16, r5
movi 64+8, r27 ! could subtract r7 from that.
stlo.q r2, 0, r0
sthi.q r2, 7, r0
ldx.q r22, r6, r0
bgtu/l r27, r4, tr1
addi r5, -48, r27
pta/l Loop_line, tr0
addi r6, 64, r36
addi r6, -24, r19
addi r6, -16, r20
addi r6, -8, r21
Loop_line:
! ldx.q r22, r36, r63 ! TAKum03020
alloco r22, 32
synco
addi r22, 32, r22
ldx.q r22, r19, r23
sthi.q r22, -25, r0
ldx.q r22, r20, r24
ldx.q r22, r21, r25
stlo.q r22, -32, r0
ldx.q r22, r6, r0
sthi.q r22, -17, r23
sthi.q r22, -9, r24
sthi.q r22, -1, r25
stlo.q r22, -24, r23
stlo.q r22, -16, r24
stlo.q r22, -8, r25
bgeu r27, r22, tr0
Loop_ua:
addi r22, 8, r22
sthi.q r22, -1, r0
stlo.q r22, -8, r0
ldx.q r22, r6, r0
bgtu/l r5, r22, tr1
add r3, r4, r7
ldlo.q r7, -8, r1
sthi.q r22, 7, r0
ldhi.q r7, -1, r7
ptabs r18,tr1
stlo.q r22, 0, r0
or r1, r7, r1
sthi.q r5, 15, r1
stlo.q r5, 8, r1
blink tr1, r63
copy_user_memcpy_end:
nop

View File

@ -1,202 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */
/* Modified by SuperH, Inc. September 2003 */
!
! Fast SH memcpy
!
! by Toshiyasu Morita (tm@netcom.com)
! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut)
! SH5 code Copyright 2002 SuperH Ltd.
!
! Entry: ARG0: destination pointer
! ARG1: source pointer
! ARG2: byte count
!
! Exit: RESULT: destination pointer
! any other registers in the range r0-r7: trashed
!
! Notes: Usually one wants to do small reads and write a longword, but
! unfortunately it is difficult in some cases to concatanate bytes
! into a longword on the SH, so this does a longword read and small
! writes.
!
! This implementation makes two assumptions about how it is called:
!
! 1.: If the byte count is nonzero, the address of the last byte to be
! copied is unsigned greater than the address of the first byte to
! be copied. This could be easily swapped for a signed comparison,
! but the algorithm used needs some comparison.
!
! 2.: When there are two or three bytes in the last word of an 11-or-more
! bytes memory chunk to b copied, the rest of the word can be read
! without side effects.
! This could be easily changed by increasing the minimum size of
! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
! however, this would cost a few extra cyles on average.
! For SHmedia, the assumption is that any quadword can be read in its
! enirety if at least one byte is included in the copy.
!
.section .text..SHmedia32,"ax"
.globl memcpy
.type memcpy, @function
.align 5
memcpy:
#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
ld.b r3,0,r63
pta/l Large,tr0
movi 25,r0
bgeu/u r4,r0,tr0
nsb r4,r0
shlli r0,5,r0
movi (L1-L0+63*32 + 1) & 0xffff,r1
sub r1, r0, r0
L0: ptrel r0,tr0
add r2,r4,r5
ptabs r18,tr1
add r3,r4,r6
blink tr0,r63
/* Rearranged to make cut2 safe */
.balign 8
L4_7: /* 4..7 byte memcpy cntd. */
stlo.l r2, 0, r0
or r6, r7, r6
sthi.l r5, -1, r6
stlo.l r5, -4, r6
blink tr1,r63
.balign 8
L1: /* 0 byte memcpy */
nop
blink tr1,r63
nop
nop
nop
nop
L2_3: /* 2 or 3 byte memcpy cntd. */
st.b r5,-1,r6
blink tr1,r63
/* 1 byte memcpy */
ld.b r3,0,r0
st.b r2,0,r0
blink tr1,r63
L8_15: /* 8..15 byte memcpy cntd. */
stlo.q r2, 0, r0
or r6, r7, r6
sthi.q r5, -1, r6
stlo.q r5, -8, r6
blink tr1,r63
/* 2 or 3 byte memcpy */
ld.b r3,0,r0
ld.b r2,0,r63
ld.b r3,1,r1
st.b r2,0,r0
pta/l L2_3,tr0
ld.b r6,-1,r6
st.b r2,1,r1
blink tr0, r63
/* 4 .. 7 byte memcpy */
LDUAL (r3, 0, r0, r1)
pta L4_7, tr0
ldlo.l r6, -4, r7
or r0, r1, r0
sthi.l r2, 3, r0
ldhi.l r6, -1, r6
blink tr0, r63
/* 8 .. 15 byte memcpy */
LDUAQ (r3, 0, r0, r1)
pta L8_15, tr0
ldlo.q r6, -8, r7
or r0, r1, r0
sthi.q r2, 7, r0
ldhi.q r6, -1, r6
blink tr0, r63
/* 16 .. 24 byte memcpy */
LDUAQ (r3, 0, r0, r1)
LDUAQ (r3, 8, r8, r9)
or r0, r1, r0
sthi.q r2, 7, r0
or r8, r9, r8
sthi.q r2, 15, r8
ldlo.q r6, -8, r7
ldhi.q r6, -1, r6
stlo.q r2, 8, r8
stlo.q r2, 0, r0
or r6, r7, r6
sthi.q r5, -1, r6
stlo.q r5, -8, r6
blink tr1,r63
Large:
ld.b r2, 0, r63
pta/l Loop_ua, tr1
ori r3, -8, r7
sub r2, r7, r22
sub r3, r2, r6
add r2, r4, r5
ldlo.q r3, 0, r0
addi r5, -16, r5
movi 64+8, r27 // could subtract r7 from that.
stlo.q r2, 0, r0
sthi.q r2, 7, r0
ldx.q r22, r6, r0
bgtu/l r27, r4, tr1
addi r5, -48, r27
pta/l Loop_line, tr0
addi r6, 64, r36
addi r6, -24, r19
addi r6, -16, r20
addi r6, -8, r21
Loop_line:
ldx.q r22, r36, r63
alloco r22, 32
addi r22, 32, r22
ldx.q r22, r19, r23
sthi.q r22, -25, r0
ldx.q r22, r20, r24
ldx.q r22, r21, r25
stlo.q r22, -32, r0
ldx.q r22, r6, r0
sthi.q r22, -17, r23
sthi.q r22, -9, r24
sthi.q r22, -1, r25
stlo.q r22, -24, r23
stlo.q r22, -16, r24
stlo.q r22, -8, r25
bgeu r27, r22, tr0
Loop_ua:
addi r22, 8, r22
sthi.q r22, -1, r0
stlo.q r22, -8, r0
ldx.q r22, r6, r0
bgtu/l r5, r22, tr1
add r3, r4, r7
ldlo.q r7, -8, r1
sthi.q r22, 7, r0
ldhi.q r7, -1, r7
ptabs r18,tr1
stlo.q r22, 0, r0
or r1, r7, r1
sthi.q r5, 15, r1
stlo.q r5, 8, r1
blink tr1, r63
.size memcpy,.-memcpy

View File

@ -1,92 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */
/* Modified by SuperH, Inc. September 2003 */
!
! Fast SH memset
!
! by Toshiyasu Morita (tm@netcom.com)
!
! SH5 code by J"orn Rennecke (joern.rennecke@superh.com)
! Copyright 2002 SuperH Ltd.
!
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define SHHI shlld
#define SHLO shlrd
#else
#define SHHI shlrd
#define SHLO shlld
#endif
.section .text..SHmedia32,"ax"
.globl memset
.type memset, @function
.align 5
memset:
pta/l multiquad, tr0
andi r2, 7, r22
ptabs r18, tr2
mshflo.b r3,r3,r3
add r4, r22, r23
mperm.w r3, r63, r3 // Fill pattern now in every byte of r3
movi 8, r9
bgtu/u r23, r9, tr0 // multiquad
beqi/u r4, 0, tr2 // Return with size 0 - ensures no mem accesses
ldlo.q r2, 0, r7
shlli r4, 2, r4
movi -1, r8
SHHI r8, r4, r8
SHHI r8, r4, r8
mcmv r7, r8, r3
stlo.q r2, 0, r3
blink tr2, r63
multiquad:
pta/l lastquad, tr0
stlo.q r2, 0, r3
shlri r23, 3, r24
add r2, r4, r5
beqi/u r24, 1, tr0 // lastquad
pta/l loop, tr1
sub r2, r22, r25
andi r5, -8, r20 // calculate end address and
addi r20, -7*8, r8 // loop end address; This might overflow, so we need
// to use a different test before we start the loop
bge/u r24, r9, tr1 // loop
st.q r25, 8, r3
st.q r20, -8, r3
shlri r24, 1, r24
beqi/u r24, 1, tr0 // lastquad
st.q r25, 16, r3
st.q r20, -16, r3
beqi/u r24, 2, tr0 // lastquad
st.q r25, 24, r3
st.q r20, -24, r3
lastquad:
sthi.q r5, -1, r3
blink tr2,r63
loop:
!!! alloco r25, 32 // QQQ comment out for short-term fix to SHUK #3895.
// QQQ commenting out is locically correct, but sub-optimal
// QQQ Sean McGoogan - 4th April 2003.
st.q r25, 8, r3
st.q r25, 16, r3
st.q r25, 24, r3
st.q r25, 32, r3
addi r25, 32, r25
bgeu/l r8, r25, tr1 // loop
st.q r20, -40, r3
st.q r20, -32, r3
st.q r20, -24, r3
st.q r20, -16, r3
st.q r20, -8, r3
sthi.q r5, -1, r3
blink tr2,r63
.size memset,.-memset

View File

@ -1,15 +0,0 @@
/*
* Copyright (C) 2003 Richard Curnow, SuperH UK Limited
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
void
panic_handler(unsigned long panicPC, unsigned long panicSSR,
unsigned long panicEXPEVT)
{
/* Never return from the panic handler */
for (;;) ;
}

View File

@ -1,136 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
.global __sdivsi3
.global __sdivsi3_1
.global __sdivsi3_2
.section .text..SHmedia32,"ax"
.align 2
/* inputs: r4,r5 */
/* clobbered: r1,r18,r19,r20,r21,r25,tr0 */
/* result in r0 */
__sdivsi3:
__sdivsi3_1:
ptb __div_table,tr0
gettr tr0,r20
__sdivsi3_2:
nsb r5, r1
shlld r5, r1, r25 /* normalize; [-2 ..1, 1..2) in s2.62 */
shari r25, 58, r21 /* extract 5(6) bit index (s2.4 with hole -1..1) */
/* bubble */
ldx.ub r20, r21, r19 /* u0.8 */
shari r25, 32, r25 /* normalize to s2.30 */
shlli r21, 1, r21
muls.l r25, r19, r19 /* s2.38 */
ldx.w r20, r21, r21 /* s2.14 */
ptabs r18, tr0
shari r19, 24, r19 /* truncate to s2.14 */
sub r21, r19, r19 /* some 11 bit inverse in s1.14 */
muls.l r19, r19, r21 /* u0.28 */
sub r63, r1, r1
addi r1, 92, r1
muls.l r25, r21, r18 /* s2.58 */
shlli r19, 45, r19 /* multiply by two and convert to s2.58 */
/* bubble */
sub r19, r18, r18
shari r18, 28, r18 /* some 22 bit inverse in s1.30 */
muls.l r18, r25, r0 /* s2.60 */
muls.l r18, r4, r25 /* s32.30 */
/* bubble */
shari r0, 16, r19 /* s-16.44 */
muls.l r19, r18, r19 /* s-16.74 */
shari r25, 63, r0
shari r4, 14, r18 /* s19.-14 */
shari r19, 30, r19 /* s-16.44 */
muls.l r19, r18, r19 /* s15.30 */
xor r21, r0, r21 /* You could also use the constant 1 << 27. */
add r21, r25, r21
sub r21, r19, r21
shard r21, r1, r21
sub r21, r0, r0
blink tr0, r63
/* This table has been generated by divtab.c .
Defects for bias -330:
Max defect: 6.081536e-07 at -1.000000e+00
Min defect: 2.849516e-08 at 1.030651e+00
Max 2nd step defect: 9.606539e-12 at -1.000000e+00
Min 2nd step defect: 0.000000e+00 at 0.000000e+00
Defect at 1: 1.238659e-07
Defect at -2: 1.061708e-07 */
.balign 2
.type __div_table,@object
.size __div_table,128
/* negative division constants */
.word -16638
.word -17135
.word -17737
.word -18433
.word -19103
.word -19751
.word -20583
.word -21383
.word -22343
.word -23353
.word -24407
.word -25582
.word -26863
.word -28382
.word -29965
.word -31800
/* negative division factors */
.byte 66
.byte 70
.byte 75
.byte 81
.byte 87
.byte 93
.byte 101
.byte 109
.byte 119
.byte 130
.byte 142
.byte 156
.byte 172
.byte 192
.byte 214
.byte 241
.skip 16
.global __div_table
__div_table:
.skip 16
/* positive division factors */
.byte 241
.byte 214
.byte 192
.byte 172
.byte 156
.byte 142
.byte 130
.byte 119
.byte 109
.byte 101
.byte 93
.byte 87
.byte 81
.byte 75
.byte 70
.byte 66
/* positive division constants */
.word 31801
.word 29966
.word 28383
.word 26864
.word 25583
.word 24408
.word 23354
.word 22344
.word 21384
.word 20584
.word 19752
.word 19104
.word 18434
.word 17738
.word 17136
.word 16639

Some files were not shown because too many files have changed in this diff Show More