Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

No conflicts.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Paolo Abeni 2023-11-16 16:05:44 +01:00
commit 56eddc3cb1
524 changed files with 9182 additions and 8581 deletions

View file

@ -174,7 +174,7 @@ HWCAP2_DCPODP
Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0010.
HWCAP2_SVE2
Functionality implied by ID_AA64ZFR0_EL1.SVEVer == 0b0001.
Functionality implied by ID_AA64ZFR0_EL1.SVEver == 0b0001.
HWCAP2_SVEAES
Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0001.
@ -222,7 +222,7 @@ HWCAP2_RNG
Functionality implied by ID_AA64ISAR0_EL1.RNDR == 0b0001.
HWCAP2_BTI
Functionality implied by ID_AA64PFR0_EL1.BT == 0b0001.
Functionality implied by ID_AA64PFR1_EL1.BT == 0b0001.
HWCAP2_MTE
Functionality implied by ID_AA64PFR1_EL1.MTE == 0b0010, as described
@ -232,7 +232,7 @@ HWCAP2_ECV
Functionality implied by ID_AA64MMFR0_EL1.ECV == 0b0001.
HWCAP2_AFP
Functionality implied by ID_AA64MFR1_EL1.AFP == 0b0001.
Functionality implied by ID_AA64MMFR1_EL1.AFP == 0b0001.
HWCAP2_RPRES
Functionality implied by ID_AA64ISAR2_EL1.RPRES == 0b0001.

View file

@ -42,6 +42,26 @@ An example string following the order is::
rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux
"isa" and "hart isa" lines in /proc/cpuinfo
-------------------------------------------
The "isa" line in /proc/cpuinfo describes the lowest common denominator of
RISC-V ISA extensions recognized by the kernel and implemented on all harts. The
"hart isa" line, in contrast, describes the set of extensions recognized by the
kernel on the particular hart being described, even if those extensions may not
be present on all harts in the system.
In both lines, the presence of an extension guarantees only that the hardware
has the described capability. Additional kernel support or policy changes may be
required before an extension's capability is fully usable by userspace programs.
Similarly, for S-mode extensions, presence in one of these lines does not
guarantee that the kernel is taking advantage of the extension, or that the
feature will be visible in guest VMs managed by this kernel.
Inversely, the absence of an extension in these lines does not necessarily mean
the hardware does not support that feature. The running kernel may not recognize
the extension, or may have deliberately removed it from the listing.
Misaligned accesses
-------------------

View file

@ -275,12 +275,12 @@ allOf:
properties:
rx-internal-delay-ps:
description:
RGMII Receive Clock Delay defined in pico seconds.This is used for
RGMII Receive Clock Delay defined in pico seconds. This is used for
controllers that have configurable RX internal delays. If this
property is present then the MAC applies the RX delay.
tx-internal-delay-ps:
description:
RGMII Transmit Clock Delay defined in pico seconds.This is used for
RGMII Transmit Clock Delay defined in pico seconds. This is used for
controllers that have configurable TX internal delays. If this
property is present then the MAC applies the TX delay.

View file

@ -59,8 +59,12 @@ Synopsis of fprobe-events
and bitfield are supported.
(\*1) This is available only when BTF is enabled.
(\*2) only for the probe on function entry (offs == 0).
(\*3) only for return probe.
(\*2) only for the probe on function entry (offs == 0). Note, this argument access
is best effort, because depending on the argument type, it may be passed on
the stack. But this only support the arguments via registers.
(\*3) only for return probe. Note that this is also best effort. Depending on the
return value type, it might be passed via a pair of registers. But this only
accesses one register.
(\*4) this is useful for fetching a field of data structures.
(\*5) "u" means user-space dereference.

View file

@ -61,8 +61,12 @@ Synopsis of kprobe_events
(x8/x16/x32/x64), "char", "string", "ustring", "symbol", "symstr"
and bitfield are supported.
(\*1) only for the probe on function entry (offs == 0).
(\*2) only for return probe.
(\*1) only for the probe on function entry (offs == 0). Note, this argument access
is best effort, because depending on the argument type, it may be passed on
the stack. But this only support the arguments via registers.
(\*2) only for return probe. Note that this is also best effort. Depending on the
return value type, it might be passed via a pair of registers. But this only
accesses one register.
(\*3) this is useful for fetching a field of data structures.
(\*4) "u" means user-space dereference. See :ref:`user_mem_access`.

View file

@ -8950,7 +8950,6 @@ S: Maintained
F: scripts/get_maintainer.pl
GFS2 FILE SYSTEM
M: Bob Peterson <rpeterso@redhat.com>
M: Andreas Gruenbacher <agruenba@redhat.com>
L: gfs2@lists.linux.dev
S: Supported
@ -21769,7 +21768,9 @@ F: Documentation/devicetree/bindings/counter/ti-eqep.yaml
F: drivers/counter/ti-eqep.c
TI ETHERNET SWITCH DRIVER (CPSW)
R: Grygorii Strashko <grygorii.strashko@ti.com>
R: Siddharth Vadapalli <s-vadapalli@ti.com>
R: Ravi Gunasekaran <r-gunasekaran@ti.com>
R: Roger Quadros <rogerq@kernel.org>
L: linux-omap@vger.kernel.org
L: netdev@vger.kernel.org
S: Maintained
@ -21793,6 +21794,15 @@ F: Documentation/devicetree/bindings/media/i2c/ti,ds90*
F: drivers/media/i2c/ds90*
F: include/media/i2c/ds90*
TI ICSSG ETHERNET DRIVER (ICSSG)
R: MD Danish Anwar <danishanwar@ti.com>
R: Roger Quadros <rogerq@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/ti,icss*.yaml
F: drivers/net/ethernet/ti/icssg/*
TI J721E CSI2RX DRIVER
M: Jai Luthra <j-luthra@ti.com>
L: linux-media@vger.kernel.org
@ -23198,13 +23208,6 @@ W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: drivers/media/test-drivers/vivid/*
VLYNQ BUS
M: Florian Fainelli <f.fainelli@gmail.com>
L: openwrt-devel@lists.openwrt.org (subscribers-only)
S: Maintained
F: drivers/vlynq/vlynq.c
F: include/linux/vlynq.h
VM SOCKETS (AF_VSOCK)
M: Stefano Garzarella <sgarzare@redhat.com>
L: virtualization@lists.linux.dev

View file

@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
PATCHLEVEL = 7
SUBLEVEL = 0
EXTRAVERSION =
EXTRAVERSION = -rc1
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View file

@ -32,9 +32,6 @@ struct kprobe;
void arch_remove_kprobe(struct kprobe *p);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
struct prev_kprobe {
struct kprobe *kp;
unsigned long status;

View file

@ -100,7 +100,6 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_AR7_PARTS=m
CONFIG_MTD_CMDLINE_PARTS=m
CONFIG_MTD_OF_PARTS=m
CONFIG_MTD_AFS_PARTS=m

View file

@ -23,6 +23,8 @@
#define PMUSERENR __ACCESS_CP15(c9, 0, c14, 0)
#define PMINTENSET __ACCESS_CP15(c9, 0, c14, 1)
#define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2)
#define PMCEID2 __ACCESS_CP15(c9, 0, c14, 4)
#define PMCEID3 __ACCESS_CP15(c9, 0, c14, 5)
#define PMMIR __ACCESS_CP15(c9, 0, c14, 6)
#define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7)
@ -150,21 +152,6 @@ static inline u64 read_pmccntr(void)
return read_sysreg(PMCCNTR);
}
static inline void write_pmxevcntr(u32 val)
{
write_sysreg(val, PMXEVCNTR);
}
static inline u32 read_pmxevcntr(void)
{
return read_sysreg(PMXEVCNTR);
}
static inline void write_pmxevtyper(u32 val)
{
write_sysreg(val, PMXEVTYPER);
}
static inline void write_pmcntenset(u32 val)
{
write_sysreg(val, PMCNTENSET);
@ -205,16 +192,6 @@ static inline void write_pmuserenr(u32 val)
write_sysreg(val, PMUSERENR);
}
static inline u32 read_pmceid0(void)
{
return read_sysreg(PMCEID0);
}
static inline u32 read_pmceid1(void)
{
return read_sysreg(PMCEID1);
}
static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u32 clr) {}
static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
@ -231,6 +208,7 @@ static inline void kvm_vcpu_pmu_resync_el0(void) {}
/* PMU Version in DFR Register */
#define ARMV8_PMU_DFR_VER_NI 0
#define ARMV8_PMU_DFR_VER_V3P1 0x4
#define ARMV8_PMU_DFR_VER_V3P4 0x5
#define ARMV8_PMU_DFR_VER_V3P5 0x6
#define ARMV8_PMU_DFR_VER_IMP_DEF 0xF
@ -251,4 +229,24 @@ static inline bool is_pmuv3p5(int pmuver)
return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
}
static inline u64 read_pmceid0(void)
{
u64 val = read_sysreg(PMCEID0);
if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
val |= (u64)read_sysreg(PMCEID2) << 32;
return val;
}
static inline u64 read_pmceid1(void)
{
u64 val = read_sysreg(PMCEID1);
if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
val |= (u64)read_sysreg(PMCEID3) << 32;
return val;
}
#endif

View file

@ -40,8 +40,6 @@ struct kprobe_ctlblk {
void arch_remove_kprobe(struct kprobe *);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
/* optinsn template addresses */
extern __visible kprobe_opcode_t optprobe_template_entry[];

View file

@ -46,12 +46,12 @@ static inline u32 read_pmuver(void)
ID_AA64DFR0_EL1_PMUVer_SHIFT);
}
static inline void write_pmcr(u32 val)
static inline void write_pmcr(u64 val)
{
write_sysreg(val, pmcr_el0);
}
static inline u32 read_pmcr(void)
static inline u64 read_pmcr(void)
{
return read_sysreg(pmcr_el0);
}
@ -71,21 +71,6 @@ static inline u64 read_pmccntr(void)
return read_sysreg(pmccntr_el0);
}
static inline void write_pmxevcntr(u32 val)
{
write_sysreg(val, pmxevcntr_el0);
}
static inline u32 read_pmxevcntr(void)
{
return read_sysreg(pmxevcntr_el0);
}
static inline void write_pmxevtyper(u32 val)
{
write_sysreg(val, pmxevtyper_el0);
}
static inline void write_pmcntenset(u32 val)
{
write_sysreg(val, pmcntenset_el0);
@ -106,7 +91,7 @@ static inline void write_pmintenclr(u32 val)
write_sysreg(val, pmintenclr_el1);
}
static inline void write_pmccfiltr(u32 val)
static inline void write_pmccfiltr(u64 val)
{
write_sysreg(val, pmccfiltr_el0);
}
@ -126,12 +111,12 @@ static inline void write_pmuserenr(u32 val)
write_sysreg(val, pmuserenr_el0);
}
static inline u32 read_pmceid0(void)
static inline u64 read_pmceid0(void)
{
return read_sysreg(pmceid0_el0);
}
static inline u32 read_pmceid1(void)
static inline u64 read_pmceid1(void)
{
return read_sysreg(pmceid1_el0);
}

View file

@ -37,8 +37,6 @@ struct kprobe_ctlblk {
void arch_remove_kprobe(struct kprobe *);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
void __kretprobe_trampoline(void);
void __kprobes *trampoline_probe_handler(struct pt_regs *regs);

View file

@ -54,7 +54,6 @@
ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \
static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \
asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \
{ \
return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \

View file

@ -999,6 +999,37 @@ static void init_32bit_cpu_features(struct cpuinfo_32bit *info)
init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
}
#ifdef CONFIG_ARM64_PSEUDO_NMI
static bool enable_pseudo_nmi;
static int __init early_enable_pseudo_nmi(char *p)
{
return kstrtobool(p, &enable_pseudo_nmi);
}
early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
static __init void detect_system_supports_pseudo_nmi(void)
{
struct device_node *np;
if (!enable_pseudo_nmi)
return;
/*
* Detect broken MediaTek firmware that doesn't properly save and
* restore GIC priorities.
*/
np = of_find_compatible_node(NULL, NULL, "arm,gic-v3");
if (np && of_property_read_bool(np, "mediatek,broken-save-restore-fw")) {
pr_info("Pseudo-NMI disabled due to MediaTek Chromebook GICR save problem\n");
enable_pseudo_nmi = false;
}
of_node_put(np);
}
#else /* CONFIG_ARM64_PSEUDO_NMI */
static inline void detect_system_supports_pseudo_nmi(void) { }
#endif
void __init init_cpu_features(struct cpuinfo_arm64 *info)
{
/* Before we start using the tables, make sure it is sorted */
@ -1057,6 +1088,13 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
*/
init_cpucap_indirect_list();
/*
* Detect broken pseudo-NMI. Must be called _before_ the call to
* setup_boot_cpu_capabilities() since it interacts with
* can_use_gic_priorities().
*/
detect_system_supports_pseudo_nmi();
/*
* Detect and enable early CPU capabilities based on the boot CPU,
* after we have initialised the CPU feature infrastructure.
@ -2085,14 +2123,6 @@ static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
#endif /* CONFIG_ARM64_E0PD */
#ifdef CONFIG_ARM64_PSEUDO_NMI
static bool enable_pseudo_nmi;
static int __init early_enable_pseudo_nmi(char *p)
{
return kstrtobool(p, &enable_pseudo_nmi);
}
early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
int scope)
{

View file

@ -965,10 +965,7 @@ static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
static bool ipi_should_be_nmi(enum ipi_msg_type ipi)
{
DECLARE_STATIC_KEY_FALSE(supports_pseudo_nmis);
if (!system_uses_irq_prio_masking() ||
!static_branch_likely(&supports_pseudo_nmis))
if (!system_uses_irq_prio_masking())
return false;
switch (ipi) {

View file

@ -136,6 +136,7 @@ config LOONGARCH
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_PREEMPT_DYNAMIC_KEY
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RETHOOK
select HAVE_RSEQ

View file

@ -68,6 +68,8 @@ LDFLAGS_vmlinux += -static -n -nostdlib
ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS
cflags-y += $(call cc-option,-mexplicit-relocs)
KBUILD_CFLAGS_KERNEL += $(call cc-option,-mdirect-extern-access)
KBUILD_AFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data)
KBUILD_CFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data)
KBUILD_AFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
else

View file

@ -36,19 +36,19 @@
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
__asm__ __volatile__( \
"am"#asm_op"_db.w" " $zero, %1, %0 \n" \
"am"#asm_op".w" " $zero, %1, %0 \n" \
: "+ZB" (v->counter) \
: "r" (I) \
: "memory"); \
}
#define ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \
{ \
int result; \
\
__asm__ __volatile__( \
"am"#asm_op"_db.w" " %1, %2, %0 \n" \
"am"#asm_op#mb".w" " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
@ -56,13 +56,13 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
return result c_op I; \
}
#define ATOMIC_FETCH_OP(op, I, asm_op) \
static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \
static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \
{ \
int result; \
\
__asm__ __volatile__( \
"am"#asm_op"_db.w" " %1, %2, %0 \n" \
"am"#asm_op#mb".w" " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
@ -72,29 +72,53 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
#define ATOMIC_OPS(op, I, asm_op, c_op) \
ATOMIC_OP(op, I, asm_op) \
ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
ATOMIC_FETCH_OP(op, I, asm_op)
ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \
ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
ATOMIC_OPS(add, i, add, +)
ATOMIC_OPS(sub, -i, add, +)
#define arch_atomic_add_return arch_atomic_add_return
#define arch_atomic_add_return_acquire arch_atomic_add_return
#define arch_atomic_add_return_release arch_atomic_add_return
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return arch_atomic_sub_return
#define arch_atomic_sub_return_acquire arch_atomic_sub_return
#define arch_atomic_sub_return_release arch_atomic_sub_return
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define arch_atomic_fetch_add arch_atomic_fetch_add
#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
#define arch_atomic_fetch_add_release arch_atomic_fetch_add
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, I, asm_op) \
ATOMIC_OP(op, I, asm_op) \
ATOMIC_FETCH_OP(op, I, asm_op)
ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
ATOMIC_OPS(and, i, and)
ATOMIC_OPS(or, i, or)
ATOMIC_OPS(xor, i, xor)
#define arch_atomic_fetch_and arch_atomic_fetch_and
#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
#define arch_atomic_fetch_and_release arch_atomic_fetch_and
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define arch_atomic_fetch_or arch_atomic_fetch_or
#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
#define arch_atomic_fetch_or_release arch_atomic_fetch_or
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
@ -172,18 +196,18 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
static inline void arch_atomic64_##op(long i, atomic64_t *v) \
{ \
__asm__ __volatile__( \
"am"#asm_op"_db.d " " $zero, %1, %0 \n" \
"am"#asm_op".d " " $zero, %1, %0 \n" \
: "+ZB" (v->counter) \
: "r" (I) \
: "memory"); \
}
#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \
#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \
{ \
long result; \
__asm__ __volatile__( \
"am"#asm_op"_db.d " " %1, %2, %0 \n" \
"am"#asm_op#mb".d " " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
@ -191,13 +215,13 @@ static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \
return result c_op I; \
}
#define ATOMIC64_FETCH_OP(op, I, asm_op) \
static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \
#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \
static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \
{ \
long result; \
\
__asm__ __volatile__( \
"am"#asm_op"_db.d " " %1, %2, %0 \n" \
"am"#asm_op#mb".d " " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
@ -207,29 +231,53 @@ static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \
#define ATOMIC64_OPS(op, I, asm_op, c_op) \
ATOMIC64_OP(op, I, asm_op) \
ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
ATOMIC64_FETCH_OP(op, I, asm_op)
ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \
ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
ATOMIC64_OPS(add, i, add, +)
ATOMIC64_OPS(sub, -i, add, +)
#define arch_atomic64_add_return arch_atomic64_add_return
#define arch_atomic64_add_return_acquire arch_atomic64_add_return
#define arch_atomic64_add_return_release arch_atomic64_add_return
#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
#define arch_atomic64_sub_return arch_atomic64_sub_return
#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
#define arch_atomic64_sub_return_release arch_atomic64_sub_return
#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, I, asm_op) \
ATOMIC64_OP(op, I, asm_op) \
ATOMIC64_FETCH_OP(op, I, asm_op)
ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
ATOMIC64_OPS(and, i, and)
ATOMIC64_OPS(or, i, or)
ATOMIC64_OPS(xor, i, xor)
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS

View file

@ -65,6 +65,8 @@ enum reg2_op {
revbd_op = 0x0f,
revh2w_op = 0x10,
revhd_op = 0x11,
extwh_op = 0x16,
extwb_op = 0x17,
iocsrrdb_op = 0x19200,
iocsrrdh_op = 0x19201,
iocsrrdw_op = 0x19202,
@ -572,6 +574,8 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \
DEF_EMIT_REG2_FORMAT(revb2h, revb2h_op)
DEF_EMIT_REG2_FORMAT(revb2w, revb2w_op)
DEF_EMIT_REG2_FORMAT(revbd, revbd_op)
DEF_EMIT_REG2_FORMAT(extwh, extwh_op)
DEF_EMIT_REG2_FORMAT(extwb, extwb_op)
#define DEF_EMIT_REG2I5_FORMAT(NAME, OP) \
static inline void emit_##NAME(union loongarch_instruction *insn, \
@ -623,6 +627,9 @@ DEF_EMIT_REG2I12_FORMAT(lu52id, lu52id_op)
DEF_EMIT_REG2I12_FORMAT(andi, andi_op)
DEF_EMIT_REG2I12_FORMAT(ori, ori_op)
DEF_EMIT_REG2I12_FORMAT(xori, xori_op)
DEF_EMIT_REG2I12_FORMAT(ldb, ldb_op)
DEF_EMIT_REG2I12_FORMAT(ldh, ldh_op)
DEF_EMIT_REG2I12_FORMAT(ldw, ldw_op)
DEF_EMIT_REG2I12_FORMAT(ldbu, ldbu_op)
DEF_EMIT_REG2I12_FORMAT(ldhu, ldhu_op)
DEF_EMIT_REG2I12_FORMAT(ldwu, ldwu_op)
@ -701,9 +708,12 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \
insn->reg3_format.rk = rk; \
}
DEF_EMIT_REG3_FORMAT(addw, addw_op)
DEF_EMIT_REG3_FORMAT(addd, addd_op)
DEF_EMIT_REG3_FORMAT(subd, subd_op)
DEF_EMIT_REG3_FORMAT(muld, muld_op)
DEF_EMIT_REG3_FORMAT(divd, divd_op)
DEF_EMIT_REG3_FORMAT(modd, modd_op)
DEF_EMIT_REG3_FORMAT(divdu, divdu_op)
DEF_EMIT_REG3_FORMAT(moddu, moddu_op)
DEF_EMIT_REG3_FORMAT(and, and_op)
@ -715,6 +725,9 @@ DEF_EMIT_REG3_FORMAT(srlw, srlw_op)
DEF_EMIT_REG3_FORMAT(srld, srld_op)
DEF_EMIT_REG3_FORMAT(sraw, sraw_op)
DEF_EMIT_REG3_FORMAT(srad, srad_op)
DEF_EMIT_REG3_FORMAT(ldxb, ldxb_op)
DEF_EMIT_REG3_FORMAT(ldxh, ldxh_op)
DEF_EMIT_REG3_FORMAT(ldxw, ldxw_op)
DEF_EMIT_REG3_FORMAT(ldxbu, ldxbu_op)
DEF_EMIT_REG3_FORMAT(ldxhu, ldxhu_op)
DEF_EMIT_REG3_FORMAT(ldxwu, ldxwu_op)

View file

@ -32,7 +32,7 @@ static inline void set_my_cpu_offset(unsigned long off)
#define __my_cpu_offset __my_cpu_offset
#define PERCPU_OP(op, asm_op, c_op) \
static inline unsigned long __percpu_##op(void *ptr, \
static __always_inline unsigned long __percpu_##op(void *ptr, \
unsigned long val, int size) \
{ \
unsigned long ret; \
@ -63,7 +63,7 @@ PERCPU_OP(and, and, &)
PERCPU_OP(or, or, |)
#undef PERCPU_OP
static inline unsigned long __percpu_read(void *ptr, int size)
static __always_inline unsigned long __percpu_read(void *ptr, int size)
{
unsigned long ret;
@ -100,7 +100,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
return ret;
}
static inline void __percpu_write(void *ptr, unsigned long val, int size)
static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
{
switch (size) {
case 1:
@ -132,8 +132,8 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
}
}
static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
int size)
static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
int size)
{
switch (size) {
case 1:

View file

@ -504,8 +504,9 @@ asmlinkage void start_secondary(void)
unsigned int cpu;
sync_counter();
cpu = smp_processor_id();
cpu = raw_smp_processor_id();
set_my_cpu_offset(per_cpu_offset(cpu));
rcutree_report_cpu_starting(cpu);
cpu_probe();
constant_clockevent_init();

View file

@ -411,7 +411,11 @@ static int add_exception_handler(const struct bpf_insn *insn,
off_t offset;
struct exception_table_entry *ex;
if (!ctx->image || !ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM)
if (!ctx->image || !ctx->prog->aux->extable)
return 0;
if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
return 0;
if (WARN_ON_ONCE(ctx->num_exentries >= ctx->prog->aux->num_exentries))
@ -450,7 +454,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
{
u8 tm = -1;
u64 func_addr;
bool func_addr_fixed;
bool func_addr_fixed, sign_extend;
int i = insn - ctx->prog->insnsi;
int ret, jmp_offset;
const u8 code = insn->code;
@ -468,8 +472,23 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_MOV | BPF_X:
move_reg(ctx, dst, src);
emit_zext_32(ctx, dst, is32);
switch (off) {
case 0:
move_reg(ctx, dst, src);
emit_zext_32(ctx, dst, is32);
break;
case 8:
move_reg(ctx, t1, src);
emit_insn(ctx, extwb, dst, t1);
break;
case 16:
move_reg(ctx, t1, src);
emit_insn(ctx, extwh, dst, t1);
break;
case 32:
emit_insn(ctx, addw, dst, src, LOONGARCH_GPR_ZERO);
break;
}
break;
/* dst = imm */
@ -534,39 +553,71 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
/* dst = dst / src */
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_X:
emit_zext_32(ctx, dst, is32);
move_reg(ctx, t1, src);
emit_zext_32(ctx, t1, is32);
emit_insn(ctx, divdu, dst, dst, t1);
emit_zext_32(ctx, dst, is32);
if (!off) {
emit_zext_32(ctx, dst, is32);
move_reg(ctx, t1, src);
emit_zext_32(ctx, t1, is32);
emit_insn(ctx, divdu, dst, dst, t1);
emit_zext_32(ctx, dst, is32);
} else {
emit_sext_32(ctx, dst, is32);
move_reg(ctx, t1, src);
emit_sext_32(ctx, t1, is32);
emit_insn(ctx, divd, dst, dst, t1);
emit_sext_32(ctx, dst, is32);
}
break;
/* dst = dst / imm */
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU64 | BPF_DIV | BPF_K:
move_imm(ctx, t1, imm, is32);
emit_zext_32(ctx, dst, is32);
emit_insn(ctx, divdu, dst, dst, t1);
emit_zext_32(ctx, dst, is32);
if (!off) {
move_imm(ctx, t1, imm, is32);
emit_zext_32(ctx, dst, is32);
emit_insn(ctx, divdu, dst, dst, t1);
emit_zext_32(ctx, dst, is32);
} else {
move_imm(ctx, t1, imm, false);
emit_sext_32(ctx, t1, is32);
emit_sext_32(ctx, dst, is32);
emit_insn(ctx, divd, dst, dst, t1);
emit_sext_32(ctx, dst, is32);
}
break;
/* dst = dst % src */
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_X:
emit_zext_32(ctx, dst, is32);
move_reg(ctx, t1, src);
emit_zext_32(ctx, t1, is32);
emit_insn(ctx, moddu, dst, dst, t1);
emit_zext_32(ctx, dst, is32);
if (!off) {
emit_zext_32(ctx, dst, is32);
move_reg(ctx, t1, src);
emit_zext_32(ctx, t1, is32);
emit_insn(ctx, moddu, dst, dst, t1);
emit_zext_32(ctx, dst, is32);
} else {
emit_sext_32(ctx, dst, is32);
move_reg(ctx, t1, src);
emit_sext_32(ctx, t1, is32);
emit_insn(ctx, modd, dst, dst, t1);
emit_sext_32(ctx, dst, is32);
}
break;
/* dst = dst % imm */
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU64 | BPF_MOD | BPF_K:
move_imm(ctx, t1, imm, is32);
emit_zext_32(ctx, dst, is32);
emit_insn(ctx, moddu, dst, dst, t1);
emit_zext_32(ctx, dst, is32);
if (!off) {
move_imm(ctx, t1, imm, is32);
emit_zext_32(ctx, dst, is32);
emit_insn(ctx, moddu, dst, dst, t1);
emit_zext_32(ctx, dst, is32);
} else {
move_imm(ctx, t1, imm, false);
emit_sext_32(ctx, t1, is32);
emit_sext_32(ctx, dst, is32);
emit_insn(ctx, modd, dst, dst, t1);
emit_sext_32(ctx, dst, is32);
}
break;
/* dst = -dst */
@ -712,6 +763,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
break;
case BPF_ALU | BPF_END | BPF_FROM_BE:
case BPF_ALU64 | BPF_END | BPF_FROM_LE:
switch (imm) {
case 16:
emit_insn(ctx, revb2h, dst, dst);
@ -828,7 +880,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
/* PC += off */
case BPF_JMP | BPF_JA:
jmp_offset = bpf2la_offset(i, off, ctx);
case BPF_JMP32 | BPF_JA:
if (BPF_CLASS(code) == BPF_JMP)
jmp_offset = bpf2la_offset(i, off, ctx);
else
jmp_offset = bpf2la_offset(i, imm, ctx);
if (emit_uncond_jmp(ctx, jmp_offset) < 0)
goto toofar;
break;
@ -879,31 +935,56 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
/* dst_reg = (s64)*(signed size *)(src_reg + off) */
case BPF_LDX | BPF_MEMSX | BPF_B:
case BPF_LDX | BPF_MEMSX | BPF_H:
case BPF_LDX | BPF_MEMSX | BPF_W:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
sign_extend = BPF_MODE(insn->code) == BPF_MEMSX ||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX;
switch (BPF_SIZE(code)) {
case BPF_B:
if (is_signed_imm12(off)) {
emit_insn(ctx, ldbu, dst, src, off);
if (sign_extend)
emit_insn(ctx, ldb, dst, src, off);
else
emit_insn(ctx, ldbu, dst, src, off);
} else {
move_imm(ctx, t1, off, is32);
emit_insn(ctx, ldxbu, dst, src, t1);
if (sign_extend)
emit_insn(ctx, ldxb, dst, src, t1);
else
emit_insn(ctx, ldxbu, dst, src, t1);
}
break;
case BPF_H:
if (is_signed_imm12(off)) {
emit_insn(ctx, ldhu, dst, src, off);
if (sign_extend)
emit_insn(ctx, ldh, dst, src, off);
else
emit_insn(ctx, ldhu, dst, src, off);
} else {
move_imm(ctx, t1, off, is32);
emit_insn(ctx, ldxhu, dst, src, t1);
if (sign_extend)
emit_insn(ctx, ldxh, dst, src, t1);
else
emit_insn(ctx, ldxhu, dst, src, t1);
}
break;
case BPF_W:
if (is_signed_imm12(off)) {
emit_insn(ctx, ldwu, dst, src, off);
} else if (is_signed_imm14(off)) {
emit_insn(ctx, ldptrw, dst, src, off);
if (sign_extend)
emit_insn(ctx, ldw, dst, src, off);
else
emit_insn(ctx, ldwu, dst, src, off);
} else {
move_imm(ctx, t1, off, is32);
emit_insn(ctx, ldxwu, dst, src, t1);
if (sign_extend)
emit_insn(ctx, ldxw, dst, src, t1);
else
emit_insn(ctx, ldxwu, dst, src, t1);
}
break;
case BPF_DW:

View file

@ -2,7 +2,6 @@
# All platforms listed in alphabetic order
platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/
platform-$(CONFIG_AR7) += ar7/
platform-$(CONFIG_ATH25) += ath25/
platform-$(CONFIG_ATH79) += ath79/
platform-$(CONFIG_BCM47XX) += bcm47xx/

View file

@ -202,28 +202,6 @@ config MIPS_ALCHEMY
select SYS_SUPPORTS_ZBOOT
select COMMON_CLK
config AR7
bool "Texas Instruments AR7"
select BOOT_ELF32
select COMMON_CLK
select DMA_NONCOHERENT
select CEVT_R4K
select CSRC_R4K
select IRQ_MIPS_CPU
select NO_EXCEPT_FILL
select SWAP_IO_SPACE
select SYS_HAS_CPU_MIPS32_R1
select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_MIPS16
select SYS_SUPPORTS_ZBOOT_UART16550
select GPIOLIB
select VLYNQ
help
Support for the Texas Instruments AR7 System-on-a-Chip
family: TNETD7100, 7200 and 7300.
config ATH25
bool "Atheros AR231x/AR531x SoC support"
select CEVT_R4K

View file

@ -1,11 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
obj-y := \
prom.o \
setup.o \
memory.o \
irq.o \
time.o \
platform.o \
gpio.o \
clock.o

View file

@ -1,5 +0,0 @@
#
# Texas Instruments AR7
#
cflags-$(CONFIG_AR7) += -I$(srctree)/arch/mips/include/asm/mach-ar7
load-$(CONFIG_AR7) += 0xffffffff94100000

View file

@ -1,439 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
* Copyright (C) 2009 Florian Fainelli <florian@openwrt.org>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/gcd.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <asm/addrspace.h>
#include <asm/mach-ar7/ar7.h>
#define BOOT_PLL_SOURCE_MASK 0x3
#define CPU_PLL_SOURCE_SHIFT 16
#define BUS_PLL_SOURCE_SHIFT 14
#define USB_PLL_SOURCE_SHIFT 18
#define DSP_PLL_SOURCE_SHIFT 22
#define BOOT_PLL_SOURCE_AFE 0
#define BOOT_PLL_SOURCE_BUS 0
#define BOOT_PLL_SOURCE_REF 1
#define BOOT_PLL_SOURCE_XTAL 2
#define BOOT_PLL_SOURCE_CPU 3
#define BOOT_PLL_BYPASS 0x00000020
#define BOOT_PLL_ASYNC_MODE 0x02000000
#define BOOT_PLL_2TO1_MODE 0x00008000
#define TNETD7200_CLOCK_ID_CPU 0
#define TNETD7200_CLOCK_ID_DSP 1
#define TNETD7200_CLOCK_ID_USB 2
#define TNETD7200_DEF_CPU_CLK 211000000
#define TNETD7200_DEF_DSP_CLK 125000000
#define TNETD7200_DEF_USB_CLK 48000000
struct tnetd7300_clock {
u32 ctrl;
#define PREDIV_MASK 0x001f0000
#define PREDIV_SHIFT 16
#define POSTDIV_MASK 0x0000001f
u32 unused1[3];
u32 pll;
#define MUL_MASK 0x0000f000
#define MUL_SHIFT 12
#define PLL_MODE_MASK 0x00000001
#define PLL_NDIV 0x00000800
#define PLL_DIV 0x00000002
#define PLL_STATUS 0x00000001
u32 unused2[3];
};
struct tnetd7300_clocks {
struct tnetd7300_clock bus;
struct tnetd7300_clock cpu;
struct tnetd7300_clock usb;
struct tnetd7300_clock dsp;
};
struct tnetd7200_clock {
u32 ctrl;
u32 unused1[3];
#define DIVISOR_ENABLE_MASK 0x00008000
u32 mul;
u32 prediv;
u32 postdiv;
u32 postdiv2;
u32 unused2[6];
u32 cmd;
u32 status;
u32 cmden;
u32 padding[15];
};
struct tnetd7200_clocks {
struct tnetd7200_clock cpu;
struct tnetd7200_clock dsp;
struct tnetd7200_clock usb;
};
struct clk_rate {
u32 rate;
};
static struct clk_rate bus_clk = {
.rate = 125000000,
};
static struct clk_rate cpu_clk = {
.rate = 150000000,
};
static void approximate(int base, int target, int *prediv,
int *postdiv, int *mul)
{
int i, j, k, freq, res = target;
for (i = 1; i <= 16; i++)
for (j = 1; j <= 32; j++)
for (k = 1; k <= 32; k++) {
freq = abs(base / j * i / k - target);
if (freq < res) {
res = freq;
*mul = i;
*prediv = j;
*postdiv = k;
}
}
}
static void calculate(int base, int target, int *prediv, int *postdiv,
int *mul)
{
int tmp_gcd, tmp_base, tmp_freq;
for (*prediv = 1; *prediv <= 32; (*prediv)++) {
tmp_base = base / *prediv;
tmp_gcd = gcd(target, tmp_base);
*mul = target / tmp_gcd;
*postdiv = tmp_base / tmp_gcd;
if ((*mul < 1) || (*mul >= 16))
continue;
if ((*postdiv > 0) & (*postdiv <= 32))
break;
}
if (base / *prediv * *mul / *postdiv != target) {
approximate(base, target, prediv, postdiv, mul);
tmp_freq = base / *prediv * *mul / *postdiv;
printk(KERN_WARNING
"Adjusted requested frequency %d to %d\n",
target, tmp_freq);
}
printk(KERN_DEBUG "Clocks: prediv: %d, postdiv: %d, mul: %d\n",
*prediv, *postdiv, *mul);
}
static int tnetd7300_dsp_clock(void)
{
u32 didr1, didr2;
u8 rev = ar7_chip_rev();
didr1 = readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x18));
didr2 = readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x1c));
if (didr2 & (1 << 23))
return 0;
if ((rev >= 0x23) && (rev != 0x57))
return 250000000;
if ((((didr2 & 0x1fff) << 10) | ((didr1 & 0xffc00000) >> 22))
> 4208000)
return 250000000;
return 0;
}
static int tnetd7300_get_clock(u32 shift, struct tnetd7300_clock *clock,
u32 *bootcr, u32 bus_clock)
{
int product;
int base_clock = AR7_REF_CLOCK;
u32 ctrl = readl(&clock->ctrl);
u32 pll = readl(&clock->pll);
int prediv = ((ctrl & PREDIV_MASK) >> PREDIV_SHIFT) + 1;
int postdiv = (ctrl & POSTDIV_MASK) + 1;
int divisor = prediv * postdiv;
int mul = ((pll & MUL_MASK) >> MUL_SHIFT) + 1;
switch ((*bootcr & (BOOT_PLL_SOURCE_MASK << shift)) >> shift) {
case BOOT_PLL_SOURCE_BUS:
base_clock = bus_clock;
break;
case BOOT_PLL_SOURCE_REF:
base_clock = AR7_REF_CLOCK;
break;
case BOOT_PLL_SOURCE_XTAL:
base_clock = AR7_XTAL_CLOCK;
break;
case BOOT_PLL_SOURCE_CPU:
base_clock = cpu_clk.rate;
break;
}
if (*bootcr & BOOT_PLL_BYPASS)
return base_clock / divisor;
if ((pll & PLL_MODE_MASK) == 0)
return (base_clock >> (mul / 16 + 1)) / divisor;
if ((pll & (PLL_NDIV | PLL_DIV)) == (PLL_NDIV | PLL_DIV)) {
product = (mul & 1) ?
(base_clock * mul) >> 1 :
(base_clock * (mul - 1)) >> 2;
return product / divisor;
}
if (mul == 16)
return base_clock / divisor;
return base_clock * mul / divisor;
}
static void tnetd7300_set_clock(u32 shift, struct tnetd7300_clock *clock,
u32 *bootcr, u32 frequency)
{
int prediv, postdiv, mul;
int base_clock = bus_clk.rate;
switch ((*bootcr & (BOOT_PLL_SOURCE_MASK << shift)) >> shift) {
case BOOT_PLL_SOURCE_BUS:
base_clock = bus_clk.rate;
break;
case BOOT_PLL_SOURCE_REF:
base_clock = AR7_REF_CLOCK;
break;
case BOOT_PLL_SOURCE_XTAL:
base_clock = AR7_XTAL_CLOCK;
break;
case BOOT_PLL_SOURCE_CPU:
base_clock = cpu_clk.rate;
break;
}
calculate(base_clock, frequency, &prediv, &postdiv, &mul);
writel(((prediv - 1) << PREDIV_SHIFT) | (postdiv - 1), &clock->ctrl);
mdelay(1);
writel(4, &clock->pll);
while (readl(&clock->pll) & PLL_STATUS)
;
writel(((mul - 1) << MUL_SHIFT) | (0xff << 3) | 0x0e, &clock->pll);
mdelay(75);
}
static void __init tnetd7300_init_clocks(void)
{
u32 *bootcr = (u32 *)ioremap(AR7_REGS_DCL, 4);
struct tnetd7300_clocks *clocks =
ioremap(UR8_REGS_CLOCKS,
sizeof(struct tnetd7300_clocks));
u32 dsp_clk;
struct clk *clk;
bus_clk.rate = tnetd7300_get_clock(BUS_PLL_SOURCE_SHIFT,
&clocks->bus, bootcr, AR7_AFE_CLOCK);
if (*bootcr & BOOT_PLL_ASYNC_MODE)
cpu_clk.rate = tnetd7300_get_clock(CPU_PLL_SOURCE_SHIFT,
&clocks->cpu, bootcr, AR7_AFE_CLOCK);
else
cpu_clk.rate = bus_clk.rate;
dsp_clk = tnetd7300_dsp_clock();
if (dsp_clk == 250000000)
tnetd7300_set_clock(DSP_PLL_SOURCE_SHIFT, &clocks->dsp,
bootcr, dsp_clk);
iounmap(clocks);
iounmap(bootcr);
clk = clk_register_fixed_rate(NULL, "cpu", NULL, 0, cpu_clk.rate);
clkdev_create(clk, "cpu", NULL);
clk = clk_register_fixed_rate(NULL, "dsp", NULL, 0, dsp_clk);
clkdev_create(clk, "dsp", NULL);
}
static void tnetd7200_set_clock(int base, struct tnetd7200_clock *clock,
int prediv, int postdiv, int postdiv2, int mul, u32 frequency)
{
printk(KERN_INFO
"Clocks: base = %d, frequency = %u, prediv = %d, "
"postdiv = %d, postdiv2 = %d, mul = %d\n",
base, frequency, prediv, postdiv, postdiv2, mul);
writel(0, &clock->ctrl);
writel(DIVISOR_ENABLE_MASK | ((prediv - 1) & 0x1F), &clock->prediv);
writel((mul - 1) & 0xF, &clock->mul);
while (readl(&clock->status) & 0x1)
; /* nop */
writel(DIVISOR_ENABLE_MASK | ((postdiv - 1) & 0x1F), &clock->postdiv);
writel(readl(&clock->cmden) | 1, &clock->cmden);
writel(readl(&clock->cmd) | 1, &clock->cmd);
while (readl(&clock->status) & 0x1)
; /* nop */
writel(DIVISOR_ENABLE_MASK | ((postdiv2 - 1) & 0x1F), &clock->postdiv2);
writel(readl(&clock->cmden) | 1, &clock->cmden);
writel(readl(&clock->cmd) | 1, &clock->cmd);
while (readl(&clock->status) & 0x1)
; /* nop */
writel(readl(&clock->ctrl) | 1, &clock->ctrl);
}
static int tnetd7200_get_clock_base(int clock_id, u32 *bootcr)
{
if (*bootcr & BOOT_PLL_ASYNC_MODE)
/* Async */
switch (clock_id) {
case TNETD7200_CLOCK_ID_DSP:
return AR7_REF_CLOCK;
default:
return AR7_AFE_CLOCK;
}
else
/* Sync */
if (*bootcr & BOOT_PLL_2TO1_MODE)
/* 2:1 */
switch (clock_id) {
case TNETD7200_CLOCK_ID_DSP:
return AR7_REF_CLOCK;
default:
return AR7_AFE_CLOCK;
}
else
/* 1:1 */
return AR7_REF_CLOCK;
}
static void __init tnetd7200_init_clocks(void)
{
u32 *bootcr = (u32 *)ioremap(AR7_REGS_DCL, 4);
struct tnetd7200_clocks *clocks =
ioremap(AR7_REGS_CLOCKS,
sizeof(struct tnetd7200_clocks));
int cpu_base, cpu_mul, cpu_prediv, cpu_postdiv;
int dsp_base, dsp_mul, dsp_prediv, dsp_postdiv;
int usb_base, usb_mul, usb_prediv, usb_postdiv;
struct clk *clk;
cpu_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_CPU, bootcr);
dsp_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_DSP, bootcr);
if (*bootcr & BOOT_PLL_ASYNC_MODE) {
printk(KERN_INFO "Clocks: Async mode\n");
printk(KERN_INFO "Clocks: Setting DSP clock\n");
calculate(dsp_base, TNETD7200_DEF_DSP_CLK,
&dsp_prediv, &dsp_postdiv, &dsp_mul);
bus_clk.rate =
((dsp_base / dsp_prediv) * dsp_mul) / dsp_postdiv;
tnetd7200_set_clock(dsp_base, &clocks->dsp,
dsp_prediv, dsp_postdiv * 2, dsp_postdiv, dsp_mul * 2,
bus_clk.rate);
printk(KERN_INFO "Clocks: Setting CPU clock\n");
calculate(cpu_base, TNETD7200_DEF_CPU_CLK, &cpu_prediv,
&cpu_postdiv, &cpu_mul);
cpu_clk.rate =
((cpu_base / cpu_prediv) * cpu_mul) / cpu_postdiv;
tnetd7200_set_clock(cpu_base, &clocks->cpu,
cpu_prediv, cpu_postdiv, -1, cpu_mul,
cpu_clk.rate);
} else
if (*bootcr & BOOT_PLL_2TO1_MODE) {
printk(KERN_INFO "Clocks: Sync 2:1 mode\n");
printk(KERN_INFO "Clocks: Setting CPU clock\n");
calculate(cpu_base, TNETD7200_DEF_CPU_CLK, &cpu_prediv,
&cpu_postdiv, &cpu_mul);
cpu_clk.rate = ((cpu_base / cpu_prediv) * cpu_mul)
/ cpu_postdiv;
tnetd7200_set_clock(cpu_base, &clocks->cpu,
cpu_prediv, cpu_postdiv, -1, cpu_mul,
cpu_clk.rate);
printk(KERN_INFO "Clocks: Setting DSP clock\n");
calculate(dsp_base, TNETD7200_DEF_DSP_CLK, &dsp_prediv,
&dsp_postdiv, &dsp_mul);
bus_clk.rate = cpu_clk.rate / 2;
tnetd7200_set_clock(dsp_base, &clocks->dsp,
dsp_prediv, dsp_postdiv * 2, dsp_postdiv,
dsp_mul * 2, bus_clk.rate);
} else {
printk(KERN_INFO "Clocks: Sync 1:1 mode\n");
printk(KERN_INFO "Clocks: Setting DSP clock\n");
calculate(dsp_base, TNETD7200_DEF_DSP_CLK, &dsp_prediv,
&dsp_postdiv, &dsp_mul);
bus_clk.rate = ((dsp_base / dsp_prediv) * dsp_mul)
/ dsp_postdiv;
tnetd7200_set_clock(dsp_base, &clocks->dsp,
dsp_prediv, dsp_postdiv * 2, dsp_postdiv,
dsp_mul * 2, bus_clk.rate);
cpu_clk.rate = bus_clk.rate;
}
printk(KERN_INFO "Clocks: Setting USB clock\n");
usb_base = bus_clk.rate;
calculate(usb_base, TNETD7200_DEF_USB_CLK, &usb_prediv,
&usb_postdiv, &usb_mul);
tnetd7200_set_clock(usb_base, &clocks->usb,
usb_prediv, usb_postdiv, -1, usb_mul,
TNETD7200_DEF_USB_CLK);
iounmap(clocks);
iounmap(bootcr);
clk = clk_register_fixed_rate(NULL, "cpu", NULL, 0, cpu_clk.rate);
clkdev_create(clk, "cpu", NULL);
clkdev_create(clk, "dsp", NULL);
}
void __init ar7_init_clocks(void)
{
struct clk *clk;
switch (ar7_chip_id()) {
case AR7_CHIP_7100:
case AR7_CHIP_7200:
tnetd7200_init_clocks();
break;
case AR7_CHIP_7300:
tnetd7300_init_clocks();
break;
default:
break;
}
clk = clk_register_fixed_rate(NULL, "bus", NULL, 0, bus_clk.rate);
clkdev_create(clk, "bus", NULL);
/* adjust vbus clock rate */
clk = clk_register_fixed_factor(NULL, "vbus", "bus", 0, 1, 2);
clkdev_create(clk, "vbus", NULL);
clkdev_create(clk, "cpmac", "cpmac.1");
clkdev_create(clk, "cpmac", "cpmac.1");
}

View file

@ -1,332 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
* Copyright (C) 2009-2010 Florian Fainelli <florian@openwrt.org>
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/gpio/driver.h>
#include <asm/mach-ar7/ar7.h>
#define AR7_GPIO_MAX 32
#define TITAN_GPIO_MAX 51
struct ar7_gpio_chip {
void __iomem *regs;
struct gpio_chip chip;
};
static int ar7_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
{
struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
void __iomem *gpio_in = gpch->regs + AR7_GPIO_INPUT;
return !!(readl(gpio_in) & (1 << gpio));
}
static int titan_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
{
struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
void __iomem *gpio_in0 = gpch->regs + TITAN_GPIO_INPUT_0;
void __iomem *gpio_in1 = gpch->regs + TITAN_GPIO_INPUT_1;
return readl(gpio >> 5 ? gpio_in1 : gpio_in0) & (1 << (gpio & 0x1f));
}
static void ar7_gpio_set_value(struct gpio_chip *chip,
unsigned gpio, int value)
{
struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
void __iomem *gpio_out = gpch->regs + AR7_GPIO_OUTPUT;
unsigned tmp;
tmp = readl(gpio_out) & ~(1 << gpio);
if (value)
tmp |= 1 << gpio;
writel(tmp, gpio_out);
}
static void titan_gpio_set_value(struct gpio_chip *chip,
unsigned gpio, int value)
{
struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
void __iomem *gpio_out0 = gpch->regs + TITAN_GPIO_OUTPUT_0;
void __iomem *gpio_out1 = gpch->regs + TITAN_GPIO_OUTPUT_1;
unsigned tmp;
tmp = readl(gpio >> 5 ? gpio_out1 : gpio_out0) & ~(1 << (gpio & 0x1f));
if (value)
tmp |= 1 << (gpio & 0x1f);
writel(tmp, gpio >> 5 ? gpio_out1 : gpio_out0);
}
static int ar7_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
void __iomem *gpio_dir = gpch->regs + AR7_GPIO_DIR;
writel(readl(gpio_dir) | (1 << gpio), gpio_dir);
return 0;
}
static int titan_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
void __iomem *gpio_dir0 = gpch->regs + TITAN_GPIO_DIR_0;
void __iomem *gpio_dir1 = gpch->regs + TITAN_GPIO_DIR_1;
if (gpio >= TITAN_GPIO_MAX)
return -EINVAL;
writel(readl(gpio >> 5 ? gpio_dir1 : gpio_dir0) | (1 << (gpio & 0x1f)),
gpio >> 5 ? gpio_dir1 : gpio_dir0);
return 0;
}
static int ar7_gpio_direction_output(struct gpio_chip *chip,
unsigned gpio, int value)
{
struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
void __iomem *gpio_dir = gpch->regs + AR7_GPIO_DIR;
ar7_gpio_set_value(chip, gpio, value);
writel(readl(gpio_dir) & ~(1 << gpio), gpio_dir);
return 0;
}
static int titan_gpio_direction_output(struct gpio_chip *chip,
unsigned gpio, int value)
{
struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
void __iomem *gpio_dir0 = gpch->regs + TITAN_GPIO_DIR_0;
void __iomem *gpio_dir1 = gpch->regs + TITAN_GPIO_DIR_1;
if (gpio >= TITAN_GPIO_MAX)
return -EINVAL;
titan_gpio_set_value(chip, gpio, value);
writel(readl(gpio >> 5 ? gpio_dir1 : gpio_dir0) & ~(1 <<
(gpio & 0x1f)), gpio >> 5 ? gpio_dir1 : gpio_dir0);
return 0;
}
static struct ar7_gpio_chip ar7_gpio_chip = {
.chip = {
.label = "ar7-gpio",
.direction_input = ar7_gpio_direction_input,
.direction_output = ar7_gpio_direction_output,
.set = ar7_gpio_set_value,
.get = ar7_gpio_get_value,
.base = 0,
.ngpio = AR7_GPIO_MAX,
}
};
static struct ar7_gpio_chip titan_gpio_chip = {
.chip = {
.label = "titan-gpio",
.direction_input = titan_gpio_direction_input,
.direction_output = titan_gpio_direction_output,
.set = titan_gpio_set_value,
.get = titan_gpio_get_value,
.base = 0,
.ngpio = TITAN_GPIO_MAX,
}
};
static inline int ar7_gpio_enable_ar7(unsigned gpio)
{
void __iomem *gpio_en = ar7_gpio_chip.regs + AR7_GPIO_ENABLE;
writel(readl(gpio_en) | (1 << gpio), gpio_en);
return 0;
}
static inline int ar7_gpio_enable_titan(unsigned gpio)
{
void __iomem *gpio_en0 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_0;
void __iomem *gpio_en1 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_1;
writel(readl(gpio >> 5 ? gpio_en1 : gpio_en0) | (1 << (gpio & 0x1f)),
gpio >> 5 ? gpio_en1 : gpio_en0);
return 0;
}
int ar7_gpio_enable(unsigned gpio)
{
return ar7_is_titan() ? ar7_gpio_enable_titan(gpio) :
ar7_gpio_enable_ar7(gpio);
}
EXPORT_SYMBOL(ar7_gpio_enable);
static inline int ar7_gpio_disable_ar7(unsigned gpio)
{
void __iomem *gpio_en = ar7_gpio_chip.regs + AR7_GPIO_ENABLE;
writel(readl(gpio_en) & ~(1 << gpio), gpio_en);
return 0;
}
static inline int ar7_gpio_disable_titan(unsigned gpio)
{
void __iomem *gpio_en0 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_0;
void __iomem *gpio_en1 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_1;
writel(readl(gpio >> 5 ? gpio_en1 : gpio_en0) & ~(1 << (gpio & 0x1f)),
gpio >> 5 ? gpio_en1 : gpio_en0);
return 0;
}
int ar7_gpio_disable(unsigned gpio)
{
return ar7_is_titan() ? ar7_gpio_disable_titan(gpio) :
ar7_gpio_disable_ar7(gpio);
}
EXPORT_SYMBOL(ar7_gpio_disable);
struct titan_gpio_cfg {
u32 reg;
u32 shift;
u32 func;
};
static const struct titan_gpio_cfg titan_gpio_table[] = {
/* reg, start bit, mux value */
{4, 24, 1},
{4, 26, 1},
{4, 28, 1},
{4, 30, 1},
{5, 6, 1},
{5, 8, 1},
{5, 10, 1},
{5, 12, 1},
{7, 14, 3},
{7, 16, 3},
{7, 18, 3},
{7, 20, 3},
{7, 22, 3},
{7, 26, 3},
{7, 28, 3},
{7, 30, 3},
{8, 0, 3},
{8, 2, 3},
{8, 4, 3},
{8, 10, 3},
{8, 14, 3},
{8, 16, 3},
{8, 18, 3},
{8, 20, 3},
{9, 8, 3},
{9, 10, 3},
{9, 12, 3},
{9, 14, 3},
{9, 18, 3},
{9, 20, 3},
{9, 24, 3},
{9, 26, 3},
{9, 28, 3},
{9, 30, 3},
{10, 0, 3},
{10, 2, 3},
{10, 8, 3},
{10, 10, 3},
{10, 12, 3},
{10, 14, 3},
{13, 12, 3},
{13, 14, 3},
{13, 16, 3},
{13, 18, 3},
{13, 24, 3},
{13, 26, 3},
{13, 28, 3},
{13, 30, 3},
{14, 2, 3},
{14, 6, 3},
{14, 8, 3},
{14, 12, 3}
};
static int titan_gpio_pinsel(unsigned gpio)
{
struct titan_gpio_cfg gpio_cfg;
u32 mux_status, pin_sel_reg, tmp;
void __iomem *pin_sel = (void __iomem *)KSEG1ADDR(AR7_REGS_PINSEL);
if (gpio >= ARRAY_SIZE(titan_gpio_table))
return -EINVAL;
gpio_cfg = titan_gpio_table[gpio];
pin_sel_reg = gpio_cfg.reg - 1;
mux_status = (readl(pin_sel + pin_sel_reg) >> gpio_cfg.shift) & 0x3;
/* Check the mux status */
if (!((mux_status == 0) || (mux_status == gpio_cfg.func)))
return 0;
/* Set the pin sel value */
tmp = readl(pin_sel + pin_sel_reg);
tmp |= ((gpio_cfg.func & 0x3) << gpio_cfg.shift);
writel(tmp, pin_sel + pin_sel_reg);
return 0;
}
/* Perform minimal Titan GPIO configuration */
static void titan_gpio_init(void)
{
unsigned i;
for (i = 44; i < 48; i++) {
titan_gpio_pinsel(i);
ar7_gpio_enable_titan(i);
titan_gpio_direction_input(&titan_gpio_chip.chip, i);
}
}
int __init ar7_gpio_init(void)
{
int ret;
struct ar7_gpio_chip *gpch;
unsigned size;
if (!ar7_is_titan()) {
gpch = &ar7_gpio_chip;
size = 0x10;
} else {
gpch = &titan_gpio_chip;
size = 0x1f;
}
gpch->regs = ioremap(AR7_REGS_GPIO, size);
if (!gpch->regs) {
printk(KERN_ERR "%s: failed to ioremap regs\n",
gpch->chip.label);
return -ENOMEM;
}
ret = gpiochip_add_data(&gpch->chip, gpch);
if (ret) {
printk(KERN_ERR "%s: failed to add gpiochip\n",
gpch->chip.label);
iounmap(gpch->regs);
return ret;
}
printk(KERN_INFO "%s: registered %d GPIOs\n",
gpch->chip.label, gpch->chip.ngpio);
if (ar7_is_titan())
titan_gpio_init();
return ret;
}

View file

@ -1,165 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2006,2007 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2006,2007 Eugene Konev <ejka@openwrt.org>
*/
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include <asm/mach-ar7/ar7.h>
#define EXCEPT_OFFSET 0x80
#define PACE_OFFSET 0xA0
#define CHNLS_OFFSET 0x200
#define REG_OFFSET(irq, reg) ((irq) / 32 * 0x4 + reg * 0x10)
#define SEC_REG_OFFSET(reg) (EXCEPT_OFFSET + reg * 0x8)
#define SEC_SR_OFFSET (SEC_REG_OFFSET(0)) /* 0x80 */
#define CR_OFFSET(irq) (REG_OFFSET(irq, 1)) /* 0x10 */
#define SEC_CR_OFFSET (SEC_REG_OFFSET(1)) /* 0x88 */
#define ESR_OFFSET(irq) (REG_OFFSET(irq, 2)) /* 0x20 */
#define SEC_ESR_OFFSET (SEC_REG_OFFSET(2)) /* 0x90 */
#define ECR_OFFSET(irq) (REG_OFFSET(irq, 3)) /* 0x30 */
#define SEC_ECR_OFFSET (SEC_REG_OFFSET(3)) /* 0x98 */
#define PIR_OFFSET (0x40)
#define MSR_OFFSET (0x44)
#define PM_OFFSET(irq) (REG_OFFSET(irq, 5)) /* 0x50 */
#define TM_OFFSET(irq) (REG_OFFSET(irq, 6)) /* 0x60 */
#define REG(addr) ((u32 *)(KSEG1ADDR(AR7_REGS_IRQ) + addr))
#define CHNL_OFFSET(chnl) (CHNLS_OFFSET + (chnl * 4))
static int ar7_irq_base;
static void ar7_unmask_irq(struct irq_data *d)
{
writel(1 << ((d->irq - ar7_irq_base) % 32),
REG(ESR_OFFSET(d->irq - ar7_irq_base)));
}
static void ar7_mask_irq(struct irq_data *d)
{
writel(1 << ((d->irq - ar7_irq_base) % 32),
REG(ECR_OFFSET(d->irq - ar7_irq_base)));
}
static void ar7_ack_irq(struct irq_data *d)
{
writel(1 << ((d->irq - ar7_irq_base) % 32),
REG(CR_OFFSET(d->irq - ar7_irq_base)));
}
static void ar7_unmask_sec_irq(struct irq_data *d)
{
writel(1 << (d->irq - ar7_irq_base - 40), REG(SEC_ESR_OFFSET));
}
static void ar7_mask_sec_irq(struct irq_data *d)
{
writel(1 << (d->irq - ar7_irq_base - 40), REG(SEC_ECR_OFFSET));
}
static void ar7_ack_sec_irq(struct irq_data *d)
{
writel(1 << (d->irq - ar7_irq_base - 40), REG(SEC_CR_OFFSET));
}
static struct irq_chip ar7_irq_type = {
.name = "AR7",
.irq_unmask = ar7_unmask_irq,
.irq_mask = ar7_mask_irq,
.irq_ack = ar7_ack_irq
};
static struct irq_chip ar7_sec_irq_type = {
.name = "AR7",
.irq_unmask = ar7_unmask_sec_irq,
.irq_mask = ar7_mask_sec_irq,
.irq_ack = ar7_ack_sec_irq,
};
static void __init ar7_irq_init(int base)
{
int i;
/*
* Disable interrupts and clear pending
*/
writel(0xffffffff, REG(ECR_OFFSET(0)));
writel(0xff, REG(ECR_OFFSET(32)));
writel(0xffffffff, REG(SEC_ECR_OFFSET));
writel(0xffffffff, REG(CR_OFFSET(0)));
writel(0xff, REG(CR_OFFSET(32)));
writel(0xffffffff, REG(SEC_CR_OFFSET));
ar7_irq_base = base;
for (i = 0; i < 40; i++) {
writel(i, REG(CHNL_OFFSET(i)));
/* Primary IRQ's */
irq_set_chip_and_handler(base + i, &ar7_irq_type,
handle_level_irq);
/* Secondary IRQ's */
if (i < 32)
irq_set_chip_and_handler(base + i + 40,
&ar7_sec_irq_type,
handle_level_irq);
}
if (request_irq(2, no_action, IRQF_NO_THREAD, "AR7 cascade interrupt",
NULL))
pr_err("Failed to request irq 2 (AR7 cascade interrupt)\n");
if (request_irq(ar7_irq_base, no_action, IRQF_NO_THREAD,
"AR7 cascade interrupt", NULL)) {
pr_err("Failed to request irq %d (AR7 cascade interrupt)\n",
ar7_irq_base);
}
set_c0_status(IE_IRQ0);
}
void __init arch_init_irq(void)
{
mips_cpu_irq_init();
ar7_irq_init(8);
}
static void ar7_cascade(void)
{
u32 status;
int i, irq;
/* Primary IRQ's */
irq = readl(REG(PIR_OFFSET)) & 0x3f;
if (irq) {
do_IRQ(ar7_irq_base + irq);
return;
}
/* Secondary IRQ's are cascaded through primary '0' */
writel(1, REG(CR_OFFSET(irq)));
status = readl(REG(SEC_SR_OFFSET));
for (i = 0; i < 32; i++) {
if (status & 1) {
do_IRQ(ar7_irq_base + i + 40);
return;
}
status >>= 1;
}
spurious_interrupt();
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
if (pending & STATUSF_IP7) /* cpu timer */
do_IRQ(7);
else if (pending & STATUSF_IP2) /* int0 hardware line */
ar7_cascade();
else
spurious_interrupt();
}

View file

@ -1,51 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
*/
#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/pfn.h>
#include <linux/proc_fs.h>
#include <linux/string.h>
#include <linux/swap.h>
#include <asm/bootinfo.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/mach-ar7/ar7.h>
static int __init memsize(void)
{
u32 size = (64 << 20);
u32 *addr = (u32 *)KSEG1ADDR(AR7_SDRAM_BASE + size - 4);
u32 *kernel_end = (u32 *)KSEG1ADDR(CPHYSADDR((u32)&_end));
u32 *tmpaddr = addr;
while (tmpaddr > kernel_end) {
*tmpaddr = (u32)tmpaddr;
size >>= 1;
tmpaddr -= size >> 2;
}
do {
tmpaddr += size >> 2;
if (*tmpaddr != (u32)tmpaddr)
break;
size <<= 1;
} while (size < (64 << 20));
writel((u32)tmpaddr, &addr);
return size;
}
void __init prom_meminit(void)
{
unsigned long pages;
pages = memsize() >> PAGE_SHIFT;
memblock_add(PHYS_OFFSET, pages << PAGE_SHIFT);
}

View file

@ -1,722 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2006,2007 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2006,2007 Eugene Konev <ejka@openwrt.org>
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/vlynq.h>
#include <linux/leds.h>
#include <linux/string.h>
#include <linux/etherdevice.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/gpio.h>
#include <linux/clk.h>
#include <asm/addrspace.h>
#include <asm/mach-ar7/ar7.h>
#include <asm/mach-ar7/prom.h>
/*****************************************************************************
* VLYNQ Bus
****************************************************************************/
struct plat_vlynq_data {
struct plat_vlynq_ops ops;
int gpio_bit;
int reset_bit;
};
static int vlynq_on(struct vlynq_device *dev)
{
int ret;
struct plat_vlynq_data *pdata = dev->dev.platform_data;
ret = gpio_request(pdata->gpio_bit, "vlynq");
if (ret)
goto out;
ar7_device_reset(pdata->reset_bit);
ret = ar7_gpio_disable(pdata->gpio_bit);
if (ret)
goto out_enabled;
ret = ar7_gpio_enable(pdata->gpio_bit);
if (ret)
goto out_enabled;
ret = gpio_direction_output(pdata->gpio_bit, 0);
if (ret)
goto out_gpio_enabled;
msleep(50);
gpio_set_value(pdata->gpio_bit, 1);
msleep(50);
return 0;
out_gpio_enabled:
ar7_gpio_disable(pdata->gpio_bit);
out_enabled:
ar7_device_disable(pdata->reset_bit);
gpio_free(pdata->gpio_bit);
out:
return ret;
}
static void vlynq_off(struct vlynq_device *dev)
{
struct plat_vlynq_data *pdata = dev->dev.platform_data;
ar7_gpio_disable(pdata->gpio_bit);
gpio_free(pdata->gpio_bit);
ar7_device_disable(pdata->reset_bit);
}
static struct resource vlynq_low_res[] = {
{
.name = "regs",
.flags = IORESOURCE_MEM,
.start = AR7_REGS_VLYNQ0,
.end = AR7_REGS_VLYNQ0 + 0xff,
},
{
.name = "irq",
.flags = IORESOURCE_IRQ,
.start = 29,
.end = 29,
},
{
.name = "mem",
.flags = IORESOURCE_MEM,
.start = 0x04000000,
.end = 0x04ffffff,
},
{
.name = "devirq",
.flags = IORESOURCE_IRQ,
.start = 80,
.end = 111,
},
};
static struct resource vlynq_high_res[] = {
{
.name = "regs",
.flags = IORESOURCE_MEM,
.start = AR7_REGS_VLYNQ1,
.end = AR7_REGS_VLYNQ1 + 0xff,
},
{
.name = "irq",
.flags = IORESOURCE_IRQ,
.start = 33,
.end = 33,
},
{
.name = "mem",
.flags = IORESOURCE_MEM,
.start = 0x0c000000,
.end = 0x0cffffff,
},
{
.name = "devirq",
.flags = IORESOURCE_IRQ,
.start = 112,
.end = 143,
},
};
static struct plat_vlynq_data vlynq_low_data = {
.ops = {
.on = vlynq_on,
.off = vlynq_off,
},
.reset_bit = 20,
.gpio_bit = 18,
};
static struct plat_vlynq_data vlynq_high_data = {
.ops = {
.on = vlynq_on,
.off = vlynq_off,
},
.reset_bit = 16,
.gpio_bit = 19,
};
static struct platform_device vlynq_low = {
.id = 0,
.name = "vlynq",
.dev = {
.platform_data = &vlynq_low_data,
},
.resource = vlynq_low_res,
.num_resources = ARRAY_SIZE(vlynq_low_res),
};
static struct platform_device vlynq_high = {
.id = 1,
.name = "vlynq",
.dev = {
.platform_data = &vlynq_high_data,
},
.resource = vlynq_high_res,
.num_resources = ARRAY_SIZE(vlynq_high_res),
};
/*****************************************************************************
* Flash
****************************************************************************/
static struct resource physmap_flash_resource = {
.name = "mem",
.flags = IORESOURCE_MEM,
.start = 0x10000000,
.end = 0x107fffff,
};
static const char *ar7_probe_types[] = { "ar7part", NULL };
static struct physmap_flash_data physmap_flash_data = {
.width = 2,
.part_probe_types = ar7_probe_types,
};
static struct platform_device physmap_flash = {
.name = "physmap-flash",
.dev = {
.platform_data = &physmap_flash_data,
},
.resource = &physmap_flash_resource,
.num_resources = 1,
};
/*****************************************************************************
* Ethernet
****************************************************************************/
static struct resource cpmac_low_res[] = {
{
.name = "regs",
.flags = IORESOURCE_MEM,
.start = AR7_REGS_MAC0,
.end = AR7_REGS_MAC0 + 0x7ff,
},
{
.name = "irq",
.flags = IORESOURCE_IRQ,
.start = 27,
.end = 27,
},
};
static struct resource cpmac_high_res[] = {
{
.name = "regs",
.flags = IORESOURCE_MEM,
.start = AR7_REGS_MAC1,
.end = AR7_REGS_MAC1 + 0x7ff,
},
{
.name = "irq",
.flags = IORESOURCE_IRQ,
.start = 41,
.end = 41,
},
};
static struct fixed_phy_status fixed_phy_status __initdata = {
.link = 1,
.speed = 100,
.duplex = 1,
};
static struct plat_cpmac_data cpmac_low_data = {
.reset_bit = 17,
.power_bit = 20,
.phy_mask = 0x80000000,
};
static struct plat_cpmac_data cpmac_high_data = {
.reset_bit = 21,
.power_bit = 22,
.phy_mask = 0x7fffffff,
};
static u64 cpmac_dma_mask = DMA_BIT_MASK(32);
static struct platform_device cpmac_low = {
.id = 0,
.name = "cpmac",
.dev = {
.dma_mask = &cpmac_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &cpmac_low_data,
},
.resource = cpmac_low_res,
.num_resources = ARRAY_SIZE(cpmac_low_res),
};
static struct platform_device cpmac_high = {
.id = 1,
.name = "cpmac",
.dev = {
.dma_mask = &cpmac_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &cpmac_high_data,
},
.resource = cpmac_high_res,
.num_resources = ARRAY_SIZE(cpmac_high_res),
};
static void __init cpmac_get_mac(int instance, unsigned char *dev_addr)
{
char name[5], *mac;
sprintf(name, "mac%c", 'a' + instance);
mac = prom_getenv(name);
if (!mac && instance) {
sprintf(name, "mac%c", 'a');
mac = prom_getenv(name);
}
if (mac) {
if (!mac_pton(mac, dev_addr)) {
pr_warn("cannot parse mac address, using random address\n");
eth_random_addr(dev_addr);
}
} else
eth_random_addr(dev_addr);
}
/*****************************************************************************
* USB
****************************************************************************/
static struct resource usb_res[] = {
{
.name = "regs",
.flags = IORESOURCE_MEM,
.start = AR7_REGS_USB,
.end = AR7_REGS_USB + 0xff,
},
{
.name = "irq",
.flags = IORESOURCE_IRQ,
.start = 32,
.end = 32,
},
{
.name = "mem",
.flags = IORESOURCE_MEM,
.start = 0x03400000,
.end = 0x03401fff,
},
};
static struct platform_device ar7_udc = {
.name = "ar7_udc",
.resource = usb_res,
.num_resources = ARRAY_SIZE(usb_res),
};
/*****************************************************************************
* LEDs
****************************************************************************/
static const struct gpio_led default_leds[] = {
{
.name = "status",
.gpio = 8,
.active_low = 1,
},
};
static const struct gpio_led titan_leds[] = {
{ .name = "status", .gpio = 8, .active_low = 1, },
{ .name = "wifi", .gpio = 13, .active_low = 1, },
};
static const struct gpio_led dsl502t_leds[] = {
{
.name = "status",
.gpio = 9,
.active_low = 1,
},
{
.name = "ethernet",
.gpio = 7,
.active_low = 1,
},
{
.name = "usb",
.gpio = 12,
.active_low = 1,
},
};
static const struct gpio_led dg834g_leds[] = {
{
.name = "ppp",
.gpio = 6,
.active_low = 1,
},
{
.name = "status",
.gpio = 7,
.active_low = 1,
},
{
.name = "adsl",
.gpio = 8,
.active_low = 1,
},
{
.name = "wifi",
.gpio = 12,
.active_low = 1,
},
{
.name = "power",
.gpio = 14,
.active_low = 1,
.default_trigger = "default-on",
},
};
static const struct gpio_led fb_sl_leds[] = {
{
.name = "1",
.gpio = 7,
},
{
.name = "2",
.gpio = 13,
.active_low = 1,
},
{
.name = "3",
.gpio = 10,
.active_low = 1,
},
{
.name = "4",
.gpio = 12,
.active_low = 1,
},
{
.name = "5",
.gpio = 9,
.active_low = 1,
},
};
static const struct gpio_led fb_fon_leds[] = {
{
.name = "1",
.gpio = 8,
},
{
.name = "2",
.gpio = 3,
.active_low = 1,
},
{
.name = "3",
.gpio = 5,
},
{
.name = "4",
.gpio = 4,
.active_low = 1,
},
{
.name = "5",
.gpio = 11,
.active_low = 1,
},
};
static const struct gpio_led gt701_leds[] = {
{
.name = "inet:green",
.gpio = 13,
.active_low = 1,
},
{
.name = "usb",
.gpio = 12,
.active_low = 1,
},
{
.name = "inet:red",
.gpio = 9,
.active_low = 1,
},
{
.name = "power:red",
.gpio = 7,
.active_low = 1,
},
{
.name = "power:green",
.gpio = 8,
.active_low = 1,
.default_trigger = "default-on",
},
{
.name = "ethernet",
.gpio = 10,
.active_low = 1,
},
};
static struct gpio_led_platform_data ar7_led_data;
static struct platform_device ar7_gpio_leds = {
.name = "leds-gpio",
.dev = {
.platform_data = &ar7_led_data,
}
};
static void __init detect_leds(void)
{
char *prid, *usb_prod;
/* Default LEDs */
ar7_led_data.num_leds = ARRAY_SIZE(default_leds);
ar7_led_data.leds = default_leds;
/* FIXME: the whole thing is unreliable */
prid = prom_getenv("ProductID");
usb_prod = prom_getenv("usb_prod");
/* If we can't get the product id from PROM, use the default LEDs */
if (!prid)
return;
if (strstr(prid, "Fritz_Box_FON")) {
ar7_led_data.num_leds = ARRAY_SIZE(fb_fon_leds);
ar7_led_data.leds = fb_fon_leds;
} else if (strstr(prid, "Fritz_Box_")) {
ar7_led_data.num_leds = ARRAY_SIZE(fb_sl_leds);
ar7_led_data.leds = fb_sl_leds;
} else if ((!strcmp(prid, "AR7RD") || !strcmp(prid, "AR7DB"))
&& usb_prod != NULL && strstr(usb_prod, "DSL-502T")) {
ar7_led_data.num_leds = ARRAY_SIZE(dsl502t_leds);
ar7_led_data.leds = dsl502t_leds;
} else if (strstr(prid, "DG834")) {
ar7_led_data.num_leds = ARRAY_SIZE(dg834g_leds);
ar7_led_data.leds = dg834g_leds;
} else if (strstr(prid, "CYWM") || strstr(prid, "CYWL")) {
ar7_led_data.num_leds = ARRAY_SIZE(titan_leds);
ar7_led_data.leds = titan_leds;
} else if (strstr(prid, "GT701")) {
ar7_led_data.num_leds = ARRAY_SIZE(gt701_leds);
ar7_led_data.leds = gt701_leds;
}
}
/*****************************************************************************
* Watchdog
****************************************************************************/
static struct resource ar7_wdt_res = {
.name = "regs",
.flags = IORESOURCE_MEM,
.start = -1, /* Filled at runtime */
.end = -1, /* Filled at runtime */
};
static struct platform_device ar7_wdt = {
.name = "ar7_wdt",
.resource = &ar7_wdt_res,
.num_resources = 1,
};
/*****************************************************************************
* Init
****************************************************************************/
static int __init ar7_register_uarts(void)
{
#ifdef CONFIG_SERIAL_8250
static struct uart_port uart_port __initdata;
struct clk *bus_clk;
int res;
memset(&uart_port, 0, sizeof(struct uart_port));
bus_clk = clk_get(NULL, "bus");
if (IS_ERR(bus_clk))
panic("unable to get bus clk");
uart_port.type = PORT_AR7;
uart_port.uartclk = clk_get_rate(bus_clk) / 2;
uart_port.iotype = UPIO_MEM32;
uart_port.flags = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
uart_port.regshift = 2;
uart_port.line = 0;
uart_port.irq = AR7_IRQ_UART0;
uart_port.mapbase = AR7_REGS_UART0;
uart_port.membase = ioremap(uart_port.mapbase, 256);
res = early_serial_setup(&uart_port);
if (res)
return res;
/* Only TNETD73xx have a second serial port */
if (ar7_has_second_uart()) {
uart_port.line = 1;
uart_port.irq = AR7_IRQ_UART1;
uart_port.mapbase = UR8_REGS_UART1;
uart_port.membase = ioremap(uart_port.mapbase, 256);
res = early_serial_setup(&uart_port);
if (res)
return res;
}
#endif
return 0;
}
static void __init titan_fixup_devices(void)
{
/* Set vlynq0 data */
vlynq_low_data.reset_bit = 15;
vlynq_low_data.gpio_bit = 14;
/* Set vlynq1 data */
vlynq_high_data.reset_bit = 16;
vlynq_high_data.gpio_bit = 7;
/* Set vlynq0 resources */
vlynq_low_res[0].start = TITAN_REGS_VLYNQ0;
vlynq_low_res[0].end = TITAN_REGS_VLYNQ0 + 0xff;
vlynq_low_res[1].start = 33;
vlynq_low_res[1].end = 33;
vlynq_low_res[2].start = 0x0c000000;
vlynq_low_res[2].end = 0x0fffffff;
vlynq_low_res[3].start = 80;
vlynq_low_res[3].end = 111;
/* Set vlynq1 resources */
vlynq_high_res[0].start = TITAN_REGS_VLYNQ1;
vlynq_high_res[0].end = TITAN_REGS_VLYNQ1 + 0xff;
vlynq_high_res[1].start = 34;
vlynq_high_res[1].end = 34;
vlynq_high_res[2].start = 0x40000000;
vlynq_high_res[2].end = 0x43ffffff;
vlynq_high_res[3].start = 112;
vlynq_high_res[3].end = 143;
/* Set cpmac0 data */
cpmac_low_data.phy_mask = 0x40000000;
/* Set cpmac1 data */
cpmac_high_data.phy_mask = 0x80000000;
/* Set cpmac0 resources */
cpmac_low_res[0].start = TITAN_REGS_MAC0;
cpmac_low_res[0].end = TITAN_REGS_MAC0 + 0x7ff;
/* Set cpmac1 resources */
cpmac_high_res[0].start = TITAN_REGS_MAC1;
cpmac_high_res[0].end = TITAN_REGS_MAC1 + 0x7ff;
}
static int __init ar7_register_devices(void)
{
void __iomem *bootcr;
u32 val;
int res;
res = ar7_gpio_init();
if (res)
pr_warn("unable to register gpios: %d\n", res);
res = ar7_register_uarts();
if (res)
pr_err("unable to setup uart(s): %d\n", res);
res = platform_device_register(&physmap_flash);
if (res)
pr_warn("unable to register physmap-flash: %d\n", res);
if (ar7_is_titan())
titan_fixup_devices();
ar7_device_disable(vlynq_low_data.reset_bit);
res = platform_device_register(&vlynq_low);
if (res)
pr_warn("unable to register vlynq-low: %d\n", res);
if (ar7_has_high_vlynq()) {
ar7_device_disable(vlynq_high_data.reset_bit);
res = platform_device_register(&vlynq_high);
if (res)
pr_warn("unable to register vlynq-high: %d\n", res);
}
if (ar7_has_high_cpmac()) {
res = fixed_phy_add(PHY_POLL, cpmac_high.id,
&fixed_phy_status);
if (!res) {
cpmac_get_mac(1, cpmac_high_data.dev_addr);
res = platform_device_register(&cpmac_high);
if (res)
pr_warn("unable to register cpmac-high: %d\n",
res);
} else
pr_warn("unable to add cpmac-high phy: %d\n", res);
} else
cpmac_low_data.phy_mask = 0xffffffff;
res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status);
if (!res) {
cpmac_get_mac(0, cpmac_low_data.dev_addr);
res = platform_device_register(&cpmac_low);
if (res)
pr_warn("unable to register cpmac-low: %d\n", res);
} else
pr_warn("unable to add cpmac-low phy: %d\n", res);
detect_leds();
res = platform_device_register(&ar7_gpio_leds);
if (res)
pr_warn("unable to register leds: %d\n", res);
res = platform_device_register(&ar7_udc);
if (res)
pr_warn("unable to register usb slave: %d\n", res);
/* Register watchdog only if enabled in hardware */
bootcr = ioremap(AR7_REGS_DCL, 4);
val = readl(bootcr);
iounmap(bootcr);
if (val & AR7_WDT_HW_ENA) {
if (ar7_has_high_vlynq())
ar7_wdt_res.start = UR8_REGS_WDT;
else
ar7_wdt_res.start = AR7_REGS_WDT;
ar7_wdt_res.end = ar7_wdt_res.start + 0x20;
res = platform_device_register(&ar7_wdt);
if (res)
pr_warn("unable to register watchdog: %d\n", res);
}
return 0;
}
device_initcall(ar7_register_devices);

View file

@ -1,256 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
*
* Putting things on the screen/serial line using YAMONs facilities.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/serial_reg.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/io.h>
#include <asm/bootinfo.h>
#include <asm/setup.h>
#include <asm/mach-ar7/ar7.h>
#include <asm/mach-ar7/prom.h>
#define MAX_ENTRY 80
struct env_var {
char *name;
char *value;
};
static struct env_var adam2_env[MAX_ENTRY];
char *prom_getenv(const char *name)
{
int i;
for (i = 0; (i < MAX_ENTRY) && adam2_env[i].name; i++)
if (!strcmp(name, adam2_env[i].name))
return adam2_env[i].value;
return NULL;
}
EXPORT_SYMBOL(prom_getenv);
static void __init ar7_init_cmdline(int argc, char *argv[])
{
int i;
for (i = 1; i < argc; i++) {
strlcat(arcs_cmdline, argv[i], COMMAND_LINE_SIZE);
if (i < (argc - 1))
strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
}
}
struct psbl_rec {
u32 psbl_size;
u32 env_base;
u32 env_size;
u32 ffs_base;
u32 ffs_size;
};
static const char psp_env_version[] __initconst = "TIENV0.8";
struct psp_env_chunk {
u8 num;
u8 ctrl;
u16 csum;
u8 len;
char data[11];
} __packed;
struct psp_var_map_entry {
u8 num;
char *value;
};
static const struct psp_var_map_entry psp_var_map[] = {
{ 1, "cpufrequency" },
{ 2, "memsize" },
{ 3, "flashsize" },
{ 4, "modetty0" },
{ 5, "modetty1" },
{ 8, "maca" },
{ 9, "macb" },
{ 28, "sysfrequency" },
{ 38, "mipsfrequency" },
};
/*
Well-known variable (num is looked up in table above for matching variable name)
Example: cpufrequency=211968000
+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
| 01 |CTRL|CHECKSUM | 01 | _2 | _1 | _1 | _9 | _6 | _8 | _0 | _0 | _0 | \0 | FF
+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
Name=Value pair in a single chunk
Example: NAME=VALUE
+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
| 00 |CTRL|CHECKSUM | 01 | _N | _A | _M | _E | _0 | _V | _A | _L | _U | _E | \0
+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
Name=Value pair in 2 chunks (len is the number of chunks)
Example: bootloaderVersion=1.3.7.15
+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
| 00 |CTRL|CHECKSUM | 02 | _b | _o | _o | _t | _l | _o | _a | _d | _e | _r | _V
+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
| _e | _r | _s | _i | _o | _n | \0 | _1 | _. | _3 | _. | _7 | _. | _1 | _5 | \0
+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
Data is padded with 0xFF
*/
#define PSP_ENV_SIZE 4096
static char psp_env_data[PSP_ENV_SIZE] = { 0, };
static char * __init lookup_psp_var_map(u8 num)
{
int i;
for (i = 0; i < ARRAY_SIZE(psp_var_map); i++)
if (psp_var_map[i].num == num)
return psp_var_map[i].value;
return NULL;
}
static void __init add_adam2_var(char *name, char *value)
{
int i;
for (i = 0; i < MAX_ENTRY; i++) {
if (!adam2_env[i].name) {
adam2_env[i].name = name;
adam2_env[i].value = value;
return;
} else if (!strcmp(adam2_env[i].name, name)) {
adam2_env[i].value = value;
return;
}
}
}
static int __init parse_psp_env(void *psp_env_base)
{
int i, n;
char *name, *value;
struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data;
memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE);
i = 1;
n = PSP_ENV_SIZE / sizeof(struct psp_env_chunk);
while (i < n) {
if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n))
break;
value = chunks[i].data;
if (chunks[i].num) {
name = lookup_psp_var_map(chunks[i].num);
} else {
name = value;
value += strlen(name) + 1;
}
if (name)
add_adam2_var(name, value);
i += chunks[i].len;
}
return 0;
}
static void __init ar7_init_env(struct env_var *env)
{
int i;
struct psbl_rec *psbl = (struct psbl_rec *)(KSEG1ADDR(0x14000300));
void *psp_env = (void *)KSEG1ADDR(psbl->env_base);
if (strcmp(psp_env, psp_env_version) == 0) {
parse_psp_env(psp_env);
} else {
for (i = 0; i < MAX_ENTRY; i++, env++)
if (env->name)
add_adam2_var(env->name, env->value);
}
}
static void __init console_config(void)
{
#ifdef CONFIG_SERIAL_8250_CONSOLE
char console_string[40];
int baud = 0;
char parity = '\0', bits = '\0', flow = '\0';
char *s, *p;
if (strstr(arcs_cmdline, "console="))
return;
s = prom_getenv("modetty0");
if (s) {
baud = simple_strtoul(s, &p, 10);
s = p;
if (*s == ',')
s++;
if (*s)
parity = *s++;
if (*s == ',')
s++;
if (*s)
bits = *s++;
if (*s == ',')
s++;
if (*s == 'h')
flow = 'r';
}
if (baud == 0)
baud = 38400;
if (parity != 'n' && parity != 'o' && parity != 'e')
parity = 'n';
if (bits != '7' && bits != '8')
bits = '8';
if (flow == 'r')
sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
parity, bits, flow);
else
sprintf(console_string, " console=ttyS0,%d%c%c", baud, parity,
bits);
strlcat(arcs_cmdline, console_string, COMMAND_LINE_SIZE);
#endif
}
void __init prom_init(void)
{
ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
ar7_init_env((struct env_var *)fw_arg2);
console_config();
}
#define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))
static inline unsigned int serial_in(int offset)
{
return readl((void *)PORT(offset));
}
static inline void serial_out(int offset, int value)
{
writel(value, (void *)PORT(offset));
}
void prom_putchar(char c)
{
while ((serial_in(UART_LSR) & UART_LSR_TEMT) == 0)
;
serial_out(UART_TX, c);
}

View file

@ -1,93 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/pm.h>
#include <linux/time.h>
#include <asm/reboot.h>
#include <asm/mach-ar7/ar7.h>
#include <asm/mach-ar7/prom.h>
static void ar7_machine_restart(char *command)
{
u32 *softres_reg = ioremap(AR7_REGS_RESET + AR7_RESET_SOFTWARE, 1);
writel(1, softres_reg);
}
static void ar7_machine_halt(void)
{
while (1)
;
}
static void ar7_machine_power_off(void)
{
u32 *power_reg = (u32 *)ioremap(AR7_REGS_POWER, 1);
u32 power_state = readl(power_reg) | (3 << 30);
writel(power_state, power_reg);
ar7_machine_halt();
}
const char *get_system_type(void)
{
u16 chip_id = ar7_chip_id();
u16 titan_variant_id = titan_chip_id();
switch (chip_id) {
case AR7_CHIP_7100:
return "TI AR7 (TNETD7100)";
case AR7_CHIP_7200:
return "TI AR7 (TNETD7200)";
case AR7_CHIP_7300:
return "TI AR7 (TNETD7300)";
case AR7_CHIP_TITAN:
switch (titan_variant_id) {
case TITAN_CHIP_1050:
return "TI AR7 (TNETV1050)";
case TITAN_CHIP_1055:
return "TI AR7 (TNETV1055)";
case TITAN_CHIP_1056:
return "TI AR7 (TNETV1056)";
case TITAN_CHIP_1060:
return "TI AR7 (TNETV1060)";
}
fallthrough;
default:
return "TI AR7 (unknown)";
}
}
static int __init ar7_init_console(void)
{
return 0;
}
console_initcall(ar7_init_console);
/*
* Initializes basic routines and structures pointers, memory size (as
* given by the bios and saves the command line.
*/
void __init plat_mem_setup(void)
{
unsigned long io_base;
_machine_restart = ar7_machine_restart;
_machine_halt = ar7_machine_halt;
pm_power_off = ar7_machine_power_off;
io_base = (unsigned long)ioremap(AR7_REGS_BASE, 0x10000);
if (!io_base)
panic("Can't remap IO base!");
set_io_port_base(io_base);
prom_meminit();
printk(KERN_INFO "%s, ID: 0x%04x, Revision: 0x%02x\n",
get_system_type(), ar7_chip_id(), ar7_chip_rev());
}

View file

@ -1,31 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
*
* Setting up the clock on the MIPS boards.
*/
#include <linux/init.h>
#include <linux/time.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <asm/time.h>
#include <asm/mach-ar7/ar7.h>
void __init plat_time_init(void)
{
struct clk *cpu_clk;
/* Initialize ar7 clocks so the CPU clock frequency is correct */
ar7_init_clocks();
cpu_clk = clk_get(NULL, "cpu");
if (IS_ERR(cpu_clk)) {
printk(KERN_ERR "unable to get cpu clock\n");
return;
}
mips_hpt_frequency = clk_get_rate(cpu_clk) / 2;
}

View file

@ -13,11 +13,6 @@
#define PORT(offset) (CKSEG1ADDR(UART_BASE) + (offset))
#endif
#ifdef CONFIG_AR7
#include <ar7.h>
#define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset))
#endif
#ifdef CONFIG_MACH_INGENIC
#define INGENIC_UART_BASE_ADDR (0x10030000 + 0x1000 * CONFIG_ZBOOT_INGENIC_UART)
#define PORT(offset) (CKSEG1ADDR(INGENIC_UART_BASE_ADDR) + (4 * offset))

View file

@ -366,7 +366,6 @@ bch: ecc-controller@130d0000 {
rom: memory@1fc00000 {
compatible = "mtd-rom";
probe-type = "map_rom";
reg = <0x1fc00000 0x2000>;
bank-width = <4>;

View file

@ -461,7 +461,6 @@ usb_otg: usb@13440000 {
rom: memory@1fc00000 {
compatible = "mtd-rom";
probe-type = "map_rom";
reg = <0x1fc00000 0x2000>;
bank-width = <4>;

View file

@ -8,7 +8,7 @@
/ {
compatible = "gnubee,gb-pc1", "mediatek,mt7621-soc";
model = "GB-PC1";
model = "GnuBee GB-PC1";
memory@0 {
device_type = "memory";

View file

@ -8,7 +8,7 @@
/ {
compatible = "gnubee,gb-pc2", "mediatek,mt7621-soc";
model = "GB-PC2";
model = "GnuBee GB-PC2";
memory@0 {
device_type = "memory";

View file

@ -300,14 +300,13 @@ ethernet: ethernet@1e100000 {
compatible = "mediatek,mt7621-eth";
reg = <0x1e100000 0x10000>;
clocks = <&sysc MT7621_CLK_FE>,
<&sysc MT7621_CLK_ETH>;
clocks = <&sysc MT7621_CLK_FE>, <&sysc MT7621_CLK_ETH>;
clock-names = "fe", "ethif";
#address-cells = <1>;
#size-cells = <0>;
resets = <&sysc MT7621_RST_FE &sysc MT7621_RST_ETH>;
resets = <&sysc MT7621_RST_FE>, <&sysc MT7621_RST_ETH>;
reset-names = "fe", "eth";
interrupt-parent = <&gic>;

View file

@ -1,119 +0,0 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_KERNEL_LZMA=y
CONFIG_SYSVIPC=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set
# CONFIG_KALLSYMS is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_AR7=y
CONFIG_HZ_100=y
CONFIG_KEXEC=y
# CONFIG_SECCOMP is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_BSD_DISKLABEL=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_MROUTE=y
CONFIG_SYN_COOKIES=y
# CONFIG_INET_DIAG is not set
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
# CONFIG_TCP_CONG_CUBIC is not set
CONFIG_TCP_CONG_WESTWOOD=y
# CONFIG_TCP_CONG_HTCP is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
# CONFIG_BRIDGE_NETFILTER is not set
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_MARK=y
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_RAW=m
CONFIG_ATM=m
CONFIG_ATM_BR2684=m
CONFIG_ATM_BR2684_IPFILTER=y
CONFIG_BRIDGE=y
CONFIG_VLAN_8021Q=y
CONFIG_NET_SCHED=y
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=y
CONFIG_HAMRADIO=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y
CONFIG_NETDEVICES=y
CONFIG_CPMAC=y
CONFIG_FIXED_PHY=y
CONFIG_PPP=m
CONFIG_PPP_FILTER=y
CONFIG_PPP_MULTILINK=y
CONFIG_PPPOATM=m
CONFIG_PPPOE=m
CONFIG_PPP_ASYNC=m
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
CONFIG_HW_RANDOM=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_AR7_WDT=y
# CONFIG_USB_SUPPORT is not set
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
# CONFIG_DNOTIFY is not set
CONFIG_PROC_KCORE=y
# CONFIG_PROC_PAGE_MONITOR is not set
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_JFFS2_SUMMARY=y
CONFIG_JFFS2_COMPRESSION_OPTIONS=y
CONFIG_SQUASHFS=y
# CONFIG_CRYPTO_HW is not set
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="rootfstype=squashfs,jffs2"

View file

@ -177,7 +177,6 @@ CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_REISERFS_FS=m
CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=y
CONFIG_ISO9660_FS=m

View file

@ -70,10 +70,6 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_HWMON is not set
CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=y
CONFIG_REISERFS_FS=m
CONFIG_REISERFS_FS_XATTR=y
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_QUOTA=y
CONFIG_AUTOFS_FS=m

View file

@ -229,9 +229,6 @@ CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_REISERFS_FS=m
CONFIG_REISERFS_PROC_INFO=y
CONFIG_REISERFS_FS_XATTR=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_XFS_FS=m

View file

@ -317,11 +317,6 @@ CONFIG_UIO=m
CONFIG_UIO_CIF=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_REISERFS_FS=m
CONFIG_REISERFS_PROC_INFO=y
CONFIG_REISERFS_FS_XATTR=y
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y

View file

@ -323,11 +323,6 @@ CONFIG_UIO=m
CONFIG_UIO_CIF=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_REISERFS_FS=m
CONFIG_REISERFS_PROC_INFO=y
CONFIG_REISERFS_FS_XATTR=y
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y

View file

@ -323,11 +323,6 @@ CONFIG_UIO=m
CONFIG_UIO_CIF=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_REISERFS_FS=m
CONFIG_REISERFS_PROC_INFO=y
CONFIG_REISERFS_FS_XATTR=y
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y

View file

@ -310,10 +310,6 @@ CONFIG_USB_LD=m
CONFIG_USB_TEST=m
CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=y
CONFIG_REISERFS_FS=m
CONFIG_REISERFS_FS_XATTR=y
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_QUOTA=y
CONFIG_AUTOFS_FS=m

View file

@ -71,8 +71,6 @@ struct kprobe_ctlblk {
struct prev_kprobe prev_kprobe;
};
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
#endif /* CONFIG_KPROBES */
#endif /* _ASM_KPROBES_H */

View file

@ -1,191 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2006,2007 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2006,2007 Eugene Konev <ejka@openwrt.org>
*/
#ifndef __AR7_H__
#define __AR7_H__
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/errno.h>
#include <asm/addrspace.h>
#define AR7_SDRAM_BASE 0x14000000
#define AR7_REGS_BASE 0x08610000
#define AR7_REGS_MAC0 (AR7_REGS_BASE + 0x0000)
#define AR7_REGS_GPIO (AR7_REGS_BASE + 0x0900)
/* 0x08610A00 - 0x08610BFF (512 bytes, 128 bytes / clock) */
#define AR7_REGS_POWER (AR7_REGS_BASE + 0x0a00)
#define AR7_REGS_CLOCKS (AR7_REGS_POWER + 0x80)
#define UR8_REGS_CLOCKS (AR7_REGS_POWER + 0x20)
#define AR7_REGS_UART0 (AR7_REGS_BASE + 0x0e00)
#define AR7_REGS_USB (AR7_REGS_BASE + 0x1200)
#define AR7_REGS_RESET (AR7_REGS_BASE + 0x1600)
#define AR7_REGS_PINSEL (AR7_REGS_BASE + 0x160C)
#define AR7_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1800)
#define AR7_REGS_DCL (AR7_REGS_BASE + 0x1a00)
#define AR7_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1c00)
#define AR7_REGS_MDIO (AR7_REGS_BASE + 0x1e00)
#define AR7_REGS_IRQ (AR7_REGS_BASE + 0x2400)
#define AR7_REGS_MAC1 (AR7_REGS_BASE + 0x2800)
#define AR7_REGS_WDT (AR7_REGS_BASE + 0x1f00)
#define UR8_REGS_WDT (AR7_REGS_BASE + 0x0b00)
#define UR8_REGS_UART1 (AR7_REGS_BASE + 0x0f00)
/* Titan registers */
#define TITAN_REGS_ESWITCH_BASE (0x08640000)
#define TITAN_REGS_MAC0 (TITAN_REGS_ESWITCH_BASE)
#define TITAN_REGS_MAC1 (TITAN_REGS_ESWITCH_BASE + 0x0800)
#define TITAN_REGS_MDIO (TITAN_REGS_ESWITCH_BASE + 0x02000)
#define TITAN_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1c00)
#define TITAN_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1300)
#define AR7_RESET_PERIPHERAL 0x0
#define AR7_RESET_SOFTWARE 0x4
#define AR7_RESET_STATUS 0x8
#define AR7_RESET_BIT_CPMAC_LO 17
#define AR7_RESET_BIT_CPMAC_HI 21
#define AR7_RESET_BIT_MDIO 22
#define AR7_RESET_BIT_EPHY 26
#define TITAN_RESET_BIT_EPHY1 28
/* GPIO control registers */
#define AR7_GPIO_INPUT 0x0
#define AR7_GPIO_OUTPUT 0x4
#define AR7_GPIO_DIR 0x8
#define AR7_GPIO_ENABLE 0xc
#define TITAN_GPIO_INPUT_0 0x0
#define TITAN_GPIO_INPUT_1 0x4
#define TITAN_GPIO_OUTPUT_0 0x8
#define TITAN_GPIO_OUTPUT_1 0xc
#define TITAN_GPIO_DIR_0 0x10
#define TITAN_GPIO_DIR_1 0x14
#define TITAN_GPIO_ENBL_0 0x18
#define TITAN_GPIO_ENBL_1 0x1c
#define AR7_CHIP_7100 0x18
#define AR7_CHIP_7200 0x2b
#define AR7_CHIP_7300 0x05
#define AR7_CHIP_TITAN 0x07
#define TITAN_CHIP_1050 0x0f
#define TITAN_CHIP_1055 0x0e
#define TITAN_CHIP_1056 0x0d
#define TITAN_CHIP_1060 0x07
/* Interrupts */
#define AR7_IRQ_UART0 15
#define AR7_IRQ_UART1 16
/* Clocks */
#define AR7_AFE_CLOCK 35328000
#define AR7_REF_CLOCK 25000000
#define AR7_XTAL_CLOCK 24000000
/* DCL */
#define AR7_WDT_HW_ENA 0x10
struct plat_cpmac_data {
int reset_bit;
int power_bit;
u32 phy_mask;
char dev_addr[6];
};
struct plat_dsl_data {
int reset_bit_dsl;
int reset_bit_sar;
};
static inline int ar7_is_titan(void)
{
return (readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x24)) & 0xffff) ==
AR7_CHIP_TITAN;
}
static inline u16 ar7_chip_id(void)
{
return ar7_is_titan() ? AR7_CHIP_TITAN : (readl((void *)
KSEG1ADDR(AR7_REGS_GPIO + 0x14)) & 0xffff);
}
static inline u16 titan_chip_id(void)
{
unsigned int val = readl((void *)KSEG1ADDR(AR7_REGS_GPIO +
TITAN_GPIO_INPUT_1));
return ((val >> 12) & 0x0f);
}
static inline u8 ar7_chip_rev(void)
{
return (readl((void *)KSEG1ADDR(AR7_REGS_GPIO + (ar7_is_titan() ? 0x24 :
0x14))) >> 16) & 0xff;
}
static inline int ar7_has_high_cpmac(void)
{
u16 chip_id = ar7_chip_id();
switch (chip_id) {
case AR7_CHIP_7100:
case AR7_CHIP_7200:
return 0;
case AR7_CHIP_7300:
return 1;
default:
return -ENXIO;
}
}
#define ar7_has_high_vlynq ar7_has_high_cpmac
#define ar7_has_second_uart ar7_has_high_cpmac
static inline void ar7_device_enable(u32 bit)
{
void *reset_reg =
(void *)KSEG1ADDR(AR7_REGS_RESET + AR7_RESET_PERIPHERAL);
writel(readl(reset_reg) | (1 << bit), reset_reg);
msleep(20);
}
static inline void ar7_device_disable(u32 bit)
{
void *reset_reg =
(void *)KSEG1ADDR(AR7_REGS_RESET + AR7_RESET_PERIPHERAL);
writel(readl(reset_reg) & ~(1 << bit), reset_reg);
msleep(20);
}
static inline void ar7_device_reset(u32 bit)
{
ar7_device_disable(bit);
ar7_device_enable(bit);
}
static inline void ar7_device_on(u32 bit)
{
void *power_reg = (void *)KSEG1ADDR(AR7_REGS_POWER);
writel(readl(power_reg) | (1 << bit), power_reg);
msleep(20);
}
static inline void ar7_device_off(u32 bit)
{
void *power_reg = (void *)KSEG1ADDR(AR7_REGS_POWER);
writel(readl(power_reg) & ~(1 << bit), power_reg);
msleep(20);
}
int __init ar7_gpio_init(void);
void __init ar7_init_clocks(void);
/* Board specific GPIO functions */
int ar7_gpio_enable(unsigned gpio);
int ar7_gpio_disable(unsigned gpio);
#endif /* __AR7_H__ */

View file

@ -1,16 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Shamelessly copied from asm-mips/mach-emma2rh/
* Copyright (C) 2003 by Ralf Baechle
*/
#ifndef __ASM_AR7_IRQ_H
#define __ASM_AR7_IRQ_H
#define NR_IRQS 256
#include <asm/mach-generic/irq.h>
#endif /* __ASM_AR7_IRQ_H */

View file

@ -1,12 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2006, 2007 Florian Fainelli <florian@openwrt.org>
*/
#ifndef __PROM_H__
#define __PROM_H__
extern char *prom_getenv(const char *name);
extern void prom_meminit(void);
#endif /* __PROM_H__ */

View file

@ -1,22 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
* Copyright (C) 2000, 2002 Maciej W. Rozycki
* Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
*/
#ifndef _ASM_AR7_SPACES_H
#define _ASM_AR7_SPACES_H
/*
* This handles the memory map.
* We handle pages at KSEG0 for kernels with 32 bit address space.
*/
#define PAGE_OFFSET _AC(0x94000000, UL)
#define PHYS_OFFSET _AC(0x14000000, UL)
#include <asm/mach-generic/spaces.h>
#endif /* __ASM_AR7_SPACES_H */

View file

@ -1,21 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2015 Zhang, Keguang <keguang.zhang@gmail.com>
*
* Loongson 1 NAND platform support.
*/
#ifndef __ASM_MACH_LOONGSON32_DMA_H
#define __ASM_MACH_LOONGSON32_DMA_H
#define LS1X_DMA_CHANNEL0 0
#define LS1X_DMA_CHANNEL1 1
#define LS1X_DMA_CHANNEL2 2
struct plat_ls1x_dma {
int nr_channels;
};
extern struct plat_ls1x_dma ls1b_dma_pdata;
#endif /* __ASM_MACH_LOONGSON32_DMA_H */

View file

@ -1,26 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2015 Zhang, Keguang <keguang.zhang@gmail.com>
*
* Loongson 1 NAND platform support.
*/
#ifndef __ASM_MACH_LOONGSON32_NAND_H
#define __ASM_MACH_LOONGSON32_NAND_H
#include <linux/dmaengine.h>
#include <linux/mtd/partitions.h>
struct plat_ls1x_nand {
struct mtd_partition *parts;
unsigned int nr_parts;
int hold_cycle;
int wait_cycle;
};
extern struct plat_ls1x_nand ls1b_nand_pdata;
bool ls1x_dma_filter_fn(struct dma_chan *chan, void *param);
#endif /* __ASM_MACH_LOONGSON32_NAND_H */

View file

@ -8,9 +8,6 @@
#include <linux/platform_device.h>
#include <dma.h>
#include <nand.h>
extern struct platform_device ls1x_uart_pdev;
extern struct platform_device ls1x_eth0_pdev;
extern struct platform_device ls1x_eth1_pdev;

View file

@ -66,7 +66,6 @@ copy_word:
LONG_ADDIU s6, s6, -1
beq s6, zero, process_entry
b copy_word
b process_entry
done:
#ifdef CONFIG_SMP

View file

@ -15,8 +15,6 @@
#include <platform.h>
#include <loongson1.h>
#include <dma.h>
#include <nand.h>
/* 8250/16550 compatible UART */
#define LS1X_UART(_id) \

View file

@ -8,8 +8,6 @@
#include <linux/sizes.h>
#include <loongson1.h>
#include <dma.h>
#include <nand.h>
#include <platform.h>
static const struct gpio_led ls1x_gpio_leds[] __initconst = {

View file

@ -4,8 +4,8 @@
* Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
int (*ltq_pci_plat_arch_init)(struct pci_dev *dev) = NULL;
int (*ltq_pci_plat_dev_init)(struct pci_dev *dev) = NULL;

View file

@ -475,13 +475,13 @@
* to a CPU TLB 4k PFN (4k => 12 bits to shift) */
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
#define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
#define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
.macro convert_for_tlb_insert20 pte,tmp
#ifdef CONFIG_HUGETLB_PAGE
copy \pte,\tmp
extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte
@ -489,8 +489,7 @@
depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
#else /* Huge pages disabled */
extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte
#endif

View file

@ -70,9 +70,8 @@ $bss_loop:
stw,ma %arg2,4(%r1)
stw,ma %arg3,4(%r1)
#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
/* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
* and halt kernel if we detect a PA1.x CPU. */
#if defined(CONFIG_PA20)
/* check for 64-bit capable CPU as required by current kernel */
ldi 32,%r10
mtctl %r10,%cr11
.level 2.0

View file

@ -8,12 +8,7 @@ static inline pgprot_t pgprot_framebuffer(pgprot_t prot,
unsigned long vm_start, unsigned long vm_end,
unsigned long offset)
{
/*
* PowerPC's implementation of phys_mem_access_prot() does
* not use the file argument. Set it to NULL in preparation
* of later updates to the interface.
*/
return phys_mem_access_prot(NULL, PHYS_PFN(offset), vm_end - vm_start, prot);
return __phys_mem_access_prot(PHYS_PFN(offset), vm_end - vm_start, prot);
}
#define pgprot_framebuffer pgprot_framebuffer

View file

@ -84,8 +84,6 @@ struct arch_optimized_insn {
kprobe_opcode_t *insn;
};
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_handler(struct pt_regs *regs);
extern int kprobe_post_handler(struct pt_regs *regs);

View file

@ -10,7 +10,7 @@
#include <linux/export.h>
struct pt_regs;
struct pci_bus;
struct pci_bus;
struct device_node;
struct iommu_table;
struct rtc_time;
@ -78,8 +78,8 @@ struct machdep_calls {
unsigned char (*nvram_read_val)(int addr);
void (*nvram_write_val)(int addr, unsigned char val);
ssize_t (*nvram_write)(char *buf, size_t count, loff_t *index);
ssize_t (*nvram_read)(char *buf, size_t count, loff_t *index);
ssize_t (*nvram_size)(void);
ssize_t (*nvram_read)(char *buf, size_t count, loff_t *index);
ssize_t (*nvram_size)(void);
void (*nvram_sync)(void);
/* Exception handlers */
@ -102,12 +102,11 @@ struct machdep_calls {
*/
long (*feature_call)(unsigned int feature, ...);
/* Get legacy PCI/IDE interrupt mapping */
/* Get legacy PCI/IDE interrupt mapping */
int (*pci_get_legacy_ide_irq)(struct pci_dev *dev, int channel);
/* Get access protection for /dev/mem */
pgprot_t (*phys_mem_access_prot)(struct file *file,
unsigned long pfn,
pgprot_t (*phys_mem_access_prot)(unsigned long pfn,
unsigned long size,
pgprot_t vma_prot);

View file

@ -105,9 +105,7 @@ extern void of_scan_pci_bridge(struct pci_dev *dev);
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
struct file;
extern pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long pfn,
extern pgprot_t pci_phys_mem_access_prot(unsigned long pfn,
unsigned long size,
pgprot_t prot);

View file

@ -120,9 +120,15 @@ static inline void mark_initmem_nx(void) { }
int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, pte_t entry, int dirty);
pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
pgprot_t vma_prot);
struct file;
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
static inline pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
return __phys_mem_access_prot(pfn, size, vma_prot);
}
#define __HAVE_PHYS_MEM_ACCESS_PROT
void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);

View file

@ -521,8 +521,7 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
* PCI device, it tries to find the PCI device first and calls the
* above routine
*/
pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long pfn,
pgprot_t pci_phys_mem_access_prot(unsigned long pfn,
unsigned long size,
pgprot_t prot)
{

View file

@ -752,6 +752,8 @@ static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v)
/**
* ppc_rtas_rmo_buf_show() - Describe RTAS-addressable region for user space.
* @m: seq_file output target.
* @v: Unused.
*
* Base + size description of a range of RTAS-addressable memory set
* aside for user space to use as work area(s) for certain RTAS

View file

@ -35,18 +35,18 @@ unsigned long long memory_limit;
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
pgprot_t vma_prot)
{
if (ppc_md.phys_mem_access_prot)
return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
return ppc_md.phys_mem_access_prot(pfn, size, vma_prot);
if (!page_is_ram(pfn))
vma_prot = pgprot_noncached(vma_prot);
return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);
EXPORT_SYMBOL(__phys_mem_access_prot);
#ifdef CONFIG_MEMORY_HOTPLUG
static DEFINE_MUTEX(linear_mapping_mutex);

View file

@ -184,6 +184,7 @@ machine_arch_initcall(pseries, rtas_work_area_allocator_init);
/**
* rtas_work_area_reserve_arena() - Reserve memory suitable for RTAS work areas.
* @limit: Upper limit for memblock allocation.
*/
void __init rtas_work_area_reserve_arena(const phys_addr_t limit)
{

View file

@ -642,6 +642,15 @@ config THREAD_SIZE_ORDER
Specify the Pages of thread stack size (from 4KB to 64KB), which also
affects irq stack size, which is equal to thread stack size.
config RISCV_MISALIGNED
bool "Support misaligned load/store traps for kernel and userspace"
select SYSCTL_ARCH_UNALIGN_ALLOW
default y
help
Say Y here if you want the kernel to embed support for misaligned
load/store for both kernel and userspace. When disable, misaligned
accesses will generate SIGBUS in userspace and panic in kernel.
endmenu # "Platform type"
menu "Kernel features"
@ -909,6 +918,9 @@ config PORTABLE
select MMU
select OF
config ARCH_PROC_KCORE_TEXT
def_bool y
menu "Power management options"
source "kernel/power/Kconfig"

View file

@ -0,0 +1 @@
source "arch/riscv/kernel/tests/Kconfig.debug"

View file

@ -17,6 +17,7 @@
KCOV_INSTRUMENT := n
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
OBJCOPYFLAGS_loader.bin :=-O binary
OBJCOPYFLAGS_xipImage :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
targets := Image Image.* loader loader.o loader.lds loader.bin

View file

@ -215,6 +215,8 @@ CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_CADENCE=y
CONFIG_MMC_SPI=y
CONFIG_MMC_DW=y
CONFIG_MMC_DW_STARFIVE=y
CONFIG_MMC_SDHI=y
CONFIG_MMC_SUNXI=y
CONFIG_RTC_CLASS=y

View file

@ -15,13 +15,261 @@
#include <asm/barrier.h>
#include <asm/bitsperlong.h>
#if !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE)
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#else
#include <asm/alternative-macros.h>
#include <asm/hwcap.h>
#if (BITS_PER_LONG == 64)
#define CTZW "ctzw "
#define CLZW "clzw "
#elif (BITS_PER_LONG == 32)
#define CTZW "ctz "
#define CLZW "clz "
#else
#error "Unexpected BITS_PER_LONG"
#endif
static __always_inline unsigned long variable__ffs(unsigned long word)
{
int num;
asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
asm volatile (".option push\n"
".option arch,+zbb\n"
"ctz %0, %1\n"
".option pop\n"
: "=r" (word) : "r" (word) :);
return word;
legacy:
num = 0;
#if BITS_PER_LONG == 64
if ((word & 0xffffffff) == 0) {
num += 32;
word >>= 32;
}
#endif
if ((word & 0xffff) == 0) {
num += 16;
word >>= 16;
}
if ((word & 0xff) == 0) {
num += 8;
word >>= 8;
}
if ((word & 0xf) == 0) {
num += 4;
word >>= 4;
}
if ((word & 0x3) == 0) {
num += 2;
word >>= 2;
}
if ((word & 0x1) == 0)
num += 1;
return num;
}
/**
* __ffs - find first set bit in a long word
* @word: The word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
#define __ffs(word) \
(__builtin_constant_p(word) ? \
(unsigned long)__builtin_ctzl(word) : \
variable__ffs(word))
static __always_inline unsigned long variable__fls(unsigned long word)
{
int num;
asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
asm volatile (".option push\n"
".option arch,+zbb\n"
"clz %0, %1\n"
".option pop\n"
: "=r" (word) : "r" (word) :);
return BITS_PER_LONG - 1 - word;
legacy:
num = BITS_PER_LONG - 1;
#if BITS_PER_LONG == 64
if (!(word & (~0ul << 32))) {
num -= 32;
word <<= 32;
}
#endif
if (!(word & (~0ul << (BITS_PER_LONG - 16)))) {
num -= 16;
word <<= 16;
}
if (!(word & (~0ul << (BITS_PER_LONG - 8)))) {
num -= 8;
word <<= 8;
}
if (!(word & (~0ul << (BITS_PER_LONG - 4)))) {
num -= 4;
word <<= 4;
}
if (!(word & (~0ul << (BITS_PER_LONG - 2)))) {
num -= 2;
word <<= 2;
}
if (!(word & (~0ul << (BITS_PER_LONG - 1))))
num -= 1;
return num;
}
/**
* __fls - find last set bit in a long word
* @word: the word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
#define __fls(word) \
(__builtin_constant_p(word) ? \
(unsigned long)(BITS_PER_LONG - 1 - __builtin_clzl(word)) : \
variable__fls(word))
static __always_inline int variable_ffs(int x)
{
int r;
if (!x)
return 0;
asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
asm volatile (".option push\n"
".option arch,+zbb\n"
CTZW "%0, %1\n"
".option pop\n"
: "=r" (r) : "r" (x) :);
return r + 1;
legacy:
r = 1;
if (!(x & 0xffff)) {
x >>= 16;
r += 16;
}
if (!(x & 0xff)) {
x >>= 8;
r += 8;
}
if (!(x & 0xf)) {
x >>= 4;
r += 4;
}
if (!(x & 3)) {
x >>= 2;
r += 2;
}
if (!(x & 1)) {
x >>= 1;
r += 1;
}
return r;
}
/**
* ffs - find first set bit in a word
* @x: the word to search
*
* This is defined the same way as the libc and compiler builtin ffs routines.
*
* ffs(value) returns 0 if value is 0 or the position of the first set bit if
* value is nonzero. The first (least significant) bit is at position 1.
*/
#define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x))
static __always_inline int variable_fls(unsigned int x)
{
int r;
if (!x)
return 0;
asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
asm volatile (".option push\n"
".option arch,+zbb\n"
CLZW "%0, %1\n"
".option pop\n"
: "=r" (r) : "r" (x) :);
return 32 - r;
legacy:
r = 32;
if (!(x & 0xffff0000u)) {
x <<= 16;
r -= 16;
}
if (!(x & 0xff000000u)) {
x <<= 8;
r -= 8;
}
if (!(x & 0xf0000000u)) {
x <<= 4;
r -= 4;
}
if (!(x & 0xc0000000u)) {
x <<= 2;
r -= 2;
}
if (!(x & 0x80000000u)) {
x <<= 1;
r -= 1;
}
return r;
}
/**
* fls - find last set bit in a word
* @x: the word to search
*
* This is defined in a similar way as ffs, but returns the position of the most
* significant set bit.
*
* fls(value) returns 0 if value is 0 or the position of the last set bit if
* value is nonzero. The last (most significant) bit is at position 32.
*/
#define fls(x) \
({ \
typeof(x) x_ = (x); \
__builtin_constant_p(x_) ? \
(int)((x_ != 0) ? (32 - __builtin_clz(x_)) : 0) \
: \
variable_fls(x_); \
})
#endif /* !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE) */
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/hweight.h>

View file

@ -7,7 +7,10 @@
#define _ASM_CPUFEATURE_H
#include <linux/bitmap.h>
#include <linux/jump_label.h>
#include <asm/hwcap.h>
#include <asm/alternative-macros.h>
#include <asm/errno.h>
/*
* These are probed via a device_initcall(), via either the SBI or directly
@ -30,7 +33,104 @@ DECLARE_PER_CPU(long, misaligned_access_speed);
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];
void check_unaligned_access(int cpu);
void riscv_user_isa_enable(void);
#ifdef CONFIG_RISCV_MISALIGNED
bool unaligned_ctl_available(void);
bool check_unaligned_access_emulated(int cpu);
void unaligned_emulation_finish(void);
#else
static inline bool unaligned_ctl_available(void)
{
return false;
}
static inline bool check_unaligned_access_emulated(int cpu)
{
return false;
}
static inline void unaligned_emulation_finish(void) {}
#endif
unsigned long riscv_get_elf_hwcap(void);
struct riscv_isa_ext_data {
const unsigned int id;
const char *name;
const char *property;
};
extern const struct riscv_isa_ext_data riscv_isa_ext[];
extern const size_t riscv_isa_ext_count;
extern bool riscv_isa_fallback;
unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
#define riscv_isa_extension_available(isa_bitmap, ext) \
__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
static __always_inline bool
riscv_has_extension_likely(const unsigned long ext)
{
compiletime_assert(ext < RISCV_ISA_EXT_MAX,
"ext must be < RISCV_ISA_EXT_MAX");
if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
asm_volatile_goto(
ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1)
:
: [ext] "i" (ext)
:
: l_no);
} else {
if (!__riscv_isa_extension_available(NULL, ext))
goto l_no;
}
return true;
l_no:
return false;
}
static __always_inline bool
riscv_has_extension_unlikely(const unsigned long ext)
{
compiletime_assert(ext < RISCV_ISA_EXT_MAX,
"ext must be < RISCV_ISA_EXT_MAX");
if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
asm_volatile_goto(
ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1)
:
: [ext] "i" (ext)
:
: l_yes);
} else {
if (__riscv_isa_extension_available(NULL, ext))
goto l_yes;
}
return false;
l_yes:
return true;
}
static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext)
{
if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_likely(ext))
return true;
return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
}
static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext)
{
if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_unlikely(ext))
return true;
return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
}
#endif

View file

@ -14,7 +14,7 @@
#include <asm/auxvec.h>
#include <asm/byteorder.h>
#include <asm/cacheinfo.h>
#include <asm/hwcap.h>
#include <asm/cpufeature.h>
/*
* These are used to set parameters in the core dumps.

View file

@ -8,4 +8,18 @@
void handle_page_fault(struct pt_regs *regs);
void handle_break(struct pt_regs *regs);
#ifdef CONFIG_RISCV_MISALIGNED
int handle_misaligned_load(struct pt_regs *regs);
int handle_misaligned_store(struct pt_regs *regs);
#else
static inline int handle_misaligned_load(struct pt_regs *regs)
{
return -1;
}
static inline int handle_misaligned_store(struct pt_regs *regs)
{
return -1;
}
#endif
#endif /* _ASM_RISCV_ENTRY_COMMON_H */

View file

@ -117,9 +117,9 @@ asm volatile(ALTERNATIVE( \
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
* 0000000 11001 00000 000 00000 0001011
*/
#define THEAD_inval_A0 ".long 0x0265000b"
#define THEAD_clean_A0 ".long 0x0255000b"
#define THEAD_flush_A0 ".long 0x0275000b"
#define THEAD_INVAL_A0 ".long 0x0265000b"
#define THEAD_CLEAN_A0 ".long 0x0255000b"
#define THEAD_FLUSH_A0 ".long 0x0275000b"
#define THEAD_SYNC_S ".long 0x0190000b"
#define ALT_CMO_OP(_op, _start, _size, _cachesize) \

View file

@ -8,9 +8,6 @@
#ifndef _ASM_RISCV_HWCAP_H
#define _ASM_RISCV_HWCAP_H
#include <asm/alternative-macros.h>
#include <asm/errno.h>
#include <linux/bits.h>
#include <uapi/asm/hwcap.h>
#define RISCV_ISA_EXT_a ('a' - 'a')
@ -69,92 +66,4 @@
#define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SSAIA
#endif
#ifndef __ASSEMBLY__
#include <linux/jump_label.h>
#include <asm/cpufeature.h>
unsigned long riscv_get_elf_hwcap(void);
struct riscv_isa_ext_data {
const unsigned int id;
const char *name;
const char *property;
};
extern const struct riscv_isa_ext_data riscv_isa_ext[];
extern const size_t riscv_isa_ext_count;
extern bool riscv_isa_fallback;
unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext)
bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
#define riscv_isa_extension_available(isa_bitmap, ext) \
__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
static __always_inline bool
riscv_has_extension_likely(const unsigned long ext)
{
compiletime_assert(ext < RISCV_ISA_EXT_MAX,
"ext must be < RISCV_ISA_EXT_MAX");
if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
asm_volatile_goto(
ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1)
:
: [ext] "i" (ext)
:
: l_no);
} else {
if (!__riscv_isa_extension_available(NULL, ext))
goto l_no;
}
return true;
l_no:
return false;
}
static __always_inline bool
riscv_has_extension_unlikely(const unsigned long ext)
{
compiletime_assert(ext < RISCV_ISA_EXT_MAX,
"ext must be < RISCV_ISA_EXT_MAX");
if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
asm_volatile_goto(
ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1)
:
: [ext] "i" (ext)
:
: l_yes);
} else {
if (__riscv_isa_extension_available(NULL, ext))
goto l_yes;
}
return false;
l_yes:
return true;
}
static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext)
{
if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_likely(ext))
return true;
return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
}
static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext)
{
if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_unlikely(ext))
return true;
return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
}
#endif
#endif /* _ASM_RISCV_HWCAP_H */

View file

@ -180,19 +180,19 @@
INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(51), \
__RD(0), RS1(gaddr), RS2(vmid))
#define CBO_inval(base) \
#define CBO_INVAL(base) \
INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
RS1(base), SIMM12(0))
#define CBO_clean(base) \
#define CBO_CLEAN(base) \
INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
RS1(base), SIMM12(1))
#define CBO_flush(base) \
#define CBO_FLUSH(base) \
INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
RS1(base), SIMM12(2))
#define CBO_zero(base) \
#define CBO_ZERO(base) \
INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
RS1(base), SIMM12(4))

View file

@ -126,14 +126,18 @@ enum napot_cont_order {
/*
* [63:59] T-Head Memory Type definitions:
*
* 00000 - NC Weakly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
* bit[63] SO - Strong Order
* bit[62] C - Cacheable
* bit[61] B - Bufferable
* bit[60] SH - Shareable
* bit[59] Sec - Trustable
* 00110 - NC Weakly-ordered, Non-cacheable, Bufferable, Shareable, Non-trustable
* 01110 - PMA Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable
* 10000 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
* 10010 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Shareable, Non-trustable
*/
#define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60))
#define _PAGE_NOCACHE_THEAD 0UL
#define _PAGE_IO_THEAD (1UL << 63)
#define _PAGE_NOCACHE_THEAD ((1UL < 61) | (1UL << 60))
#define _PAGE_IO_THEAD ((1UL << 63) | (1UL << 60))
#define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
static inline u64 riscv_page_mtmask(void)

View file

@ -16,9 +16,9 @@
#define _PAGE_GLOBAL (1 << 5) /* Global */
#define _PAGE_ACCESSED (1 << 6) /* Set by hardware on any access */
#define _PAGE_DIRTY (1 << 7) /* Set by hardware on any write */
#define _PAGE_SOFT (1 << 8) /* Reserved for software */
#define _PAGE_SOFT (3 << 8) /* Reserved for software */
#define _PAGE_SPECIAL _PAGE_SOFT
#define _PAGE_SPECIAL (1 << 8) /* RSW: 0x1 */
#define _PAGE_TABLE _PAGE_PRESENT
/*

View file

@ -291,6 +291,7 @@ static inline pte_t pud_pte(pud_t pud)
}
#ifdef CONFIG_RISCV_ISA_SVNAPOT
#include <asm/cpufeature.h>
static __always_inline bool has_svnapot(void)
{

View file

@ -8,6 +8,7 @@
#include <linux/const.h>
#include <linux/cache.h>
#include <linux/prctl.h>
#include <vdso/processor.h>
@ -82,6 +83,7 @@ struct thread_struct {
unsigned long bad_cause;
unsigned long vstate_ctrl;
struct __riscv_v_ext_state vstate;
unsigned long align_ctl;
};
/* Whitelist the fstate from the task_struct for hardened usercopy */
@ -94,6 +96,7 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
#define INIT_THREAD { \
.sp = sizeof(init_stack) + (long)&init_stack, \
.align_ctl = PR_UNALIGN_NOPRINT, \
}
#define task_pt_regs(tsk) \
@ -136,6 +139,12 @@ extern long riscv_v_vstate_ctrl_set_current(unsigned long arg);
extern long riscv_v_vstate_ctrl_get_current(void);
#endif /* CONFIG_RISCV_ISA_V */
extern int get_unalign_ctl(struct task_struct *tsk, unsigned long addr);
extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
#define GET_UNALIGN_CTL(tsk, addr) get_unalign_ctl((tsk), (addr))
#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PROCESSOR_H */

View file

@ -280,9 +280,6 @@ void sbi_set_timer(uint64_t stime_value);
void sbi_shutdown(void);
void sbi_send_ipi(unsigned int cpu);
int sbi_remote_fence_i(const struct cpumask *cpu_mask);
int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size);
int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
unsigned long start,

View file

@ -9,7 +9,7 @@
#include <linux/jump_label.h>
#include <linux/sched/task_stack.h>
#include <asm/vector.h>
#include <asm/hwcap.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/csr.h>

View file

@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb);
static inline void tlb_flush(struct mmu_gather *tlb)
{
flush_tlb_mm(tlb->mm);
#ifdef CONFIG_MMU
if (tlb->fullmm || tlb->need_flush_all)
flush_tlb_mm(tlb->mm);
else
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
tlb_get_unmap_size(tlb));
#endif
}
#endif /* _ASM_RISCV_TLB_H */

View file

@ -11,6 +11,9 @@
#include <asm/smp.h>
#include <asm/errata_list.h>
#define FLUSH_TLB_MAX_SIZE ((unsigned long)-1)
#define FLUSH_TLB_NO_ASID ((unsigned long)-1)
#ifdef CONFIG_MMU
extern unsigned long asid_mask;
@ -32,9 +35,12 @@ static inline void local_flush_tlb_page(unsigned long addr)
#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *mm);
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned int page_size);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
@ -51,14 +57,15 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
local_flush_tlb_all();
}
#define flush_tlb_mm(mm) flush_tlb_all()
#endif /* !CONFIG_SMP || !CONFIG_MMU */
/* Flush a range of kernel pages */
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
flush_tlb_all();
local_flush_tlb_all();
}
#define flush_tlb_mm(mm) flush_tlb_all()
#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
#endif /* !CONFIG_SMP || !CONFIG_MMU */
#endif /* _ASM_RISCV_TLBFLUSH_H */

View file

@ -15,7 +15,7 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <asm/ptrace.h>
#include <asm/hwcap.h>
#include <asm/cpufeature.h>
#include <asm/csr.h>
#include <asm/asm.h>

View file

@ -49,6 +49,7 @@ typedef union __riscv_fp_state elf_fpregset_t;
#define R_RISCV_TLS_DTPREL64 9
#define R_RISCV_TLS_TPREL32 10
#define R_RISCV_TLS_TPREL64 11
#define R_RISCV_IRELATIVE 58
/* Relocation types not used by the dynamic linker */
#define R_RISCV_BRANCH 16
@ -81,7 +82,6 @@ typedef union __riscv_fp_state elf_fpregset_t;
#define R_RISCV_ALIGN 43
#define R_RISCV_RVC_BRANCH 44
#define R_RISCV_RVC_JUMP 45
#define R_RISCV_LUI 46
#define R_RISCV_GPREL_I 47
#define R_RISCV_GPREL_S 48
#define R_RISCV_TPREL_I 49
@ -93,6 +93,9 @@ typedef union __riscv_fp_state elf_fpregset_t;
#define R_RISCV_SET16 55
#define R_RISCV_SET32 56
#define R_RISCV_32_PCREL 57
#define R_RISCV_PLT32 59
#define R_RISCV_SET_ULEB128 60
#define R_RISCV_SUB_ULEB128 61
#endif /* _UAPI_ASM_RISCV_ELF_H */

View file

@ -57,9 +57,10 @@ obj-y += stacktrace.o
obj-y += cacheinfo.o
obj-y += patch.o
obj-y += probes/
obj-y += tests/
obj-$(CONFIG_MMU) += vdso.o vdso/
obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o
obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o
obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_RISCV_ISA_V) += vector.o
obj-$(CONFIG_SMP) += smpboot.o

View file

@ -9,7 +9,7 @@
/* void __riscv_copy_words_unaligned(void *, const void *, size_t) */
/* Performs a memcpy without aligning buffers, using word loads and stores. */
/* Note: The size is truncated to a multiple of 8 * SZREG */
ENTRY(__riscv_copy_words_unaligned)
SYM_FUNC_START(__riscv_copy_words_unaligned)
andi a4, a2, ~((8*SZREG)-1)
beqz a4, 2f
add a3, a1, a4
@ -36,12 +36,12 @@ ENTRY(__riscv_copy_words_unaligned)
2:
ret
END(__riscv_copy_words_unaligned)
SYM_FUNC_END(__riscv_copy_words_unaligned)
/* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */
/* Performs a memcpy without aligning buffers, using only byte accesses. */
/* Note: The size is truncated to a multiple of 8 */
ENTRY(__riscv_copy_bytes_unaligned)
SYM_FUNC_START(__riscv_copy_bytes_unaligned)
andi a4, a2, ~(8-1)
beqz a4, 2f
add a3, a1, a4
@ -68,4 +68,4 @@ ENTRY(__riscv_copy_bytes_unaligned)
2:
ret
END(__riscv_copy_bytes_unaligned)
SYM_FUNC_END(__riscv_copy_bytes_unaligned)

View file

@ -125,13 +125,14 @@ int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned lo
*/
int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
{
int rc;
for (; node; node = node->parent) {
if (of_device_is_compatible(node, "riscv")) {
rc = riscv_of_processor_hartid(node, hartid);
if (!rc)
return 0;
*hartid = (unsigned long)of_get_cpu_hwid(node, 0);
if (*hartid == ~0UL) {
pr_warn("Found CPU without hart ID\n");
return -ENODEV;
}
return 0;
}
}
@ -202,9 +203,8 @@ arch_initcall(riscv_cpuinfo_init);
#ifdef CONFIG_PROC_FS
static void print_isa(struct seq_file *f)
static void print_isa(struct seq_file *f, const unsigned long *isa_bitmap)
{
seq_puts(f, "isa\t\t: ");
if (IS_ENABLED(CONFIG_32BIT))
seq_write(f, "rv32", 4);
@ -212,7 +212,7 @@ static void print_isa(struct seq_file *f)
seq_write(f, "rv64", 4);
for (int i = 0; i < riscv_isa_ext_count; i++) {
if (!__riscv_isa_extension_available(NULL, riscv_isa_ext[i].id))
if (!__riscv_isa_extension_available(isa_bitmap, riscv_isa_ext[i].id))
continue;
/* Only multi-letter extensions are split by underscores */
@ -276,7 +276,15 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "processor\t: %lu\n", cpu_id);
seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id));
print_isa(m);
/*
* For historical raisins, the isa: line is limited to the lowest common
* denominator of extensions supported across all harts. A true list of
* extensions supported on this hart is printed later in the hart isa:
* line.
*/
seq_puts(m, "isa\t\t: ");
print_isa(m, NULL);
print_mmu(m);
if (acpi_disabled) {
@ -292,6 +300,13 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "mvendorid\t: 0x%lx\n", ci->mvendorid);
seq_printf(m, "marchid\t\t: 0x%lx\n", ci->marchid);
seq_printf(m, "mimpid\t\t: 0x%lx\n", ci->mimpid);
/*
* Print the ISA extensions specific to this hart, which may show
* additional extensions not present across all harts.
*/
seq_puts(m, "hart isa\t: ");
print_isa(m, hart_isa[cpu_id].isa);
seq_puts(m, "\n");
return 0;

View file

@ -8,6 +8,7 @@
#include <linux/acpi.h>
#include <linux/bitmap.h>
#include <linux/cpuhotplug.h>
#include <linux/ctype.h>
#include <linux/log2.h>
#include <linux/memory.h>
@ -29,6 +30,7 @@
#define MISALIGNED_ACCESS_JIFFIES_LG2 1
#define MISALIGNED_BUFFER_SIZE 0x4000
#define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE)
#define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
unsigned long elf_hwcap __read_mostly;
@ -559,23 +561,21 @@ unsigned long riscv_get_elf_hwcap(void)
return hwcap;
}
void check_unaligned_access(int cpu)
static int check_unaligned_access(void *param)
{
int cpu = smp_processor_id();
u64 start_cycles, end_cycles;
u64 word_cycles;
u64 byte_cycles;
int ratio;
unsigned long start_jiffies, now;
struct page *page;
struct page *page = param;
void *dst;
void *src;
long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
if (!page) {
pr_warn("Can't alloc pages to measure memcpy performance");
return;
}
if (check_unaligned_access_emulated(cpu))
return 0;
/* Make an unaligned destination buffer. */
dst = (void *)((unsigned long)page_address(page) | 0x1);
@ -629,7 +629,7 @@ void check_unaligned_access(int cpu)
pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
cpu);
goto out;
return 0;
}
if (word_cycles < byte_cycles)
@ -643,18 +643,84 @@ void check_unaligned_access(int cpu)
(speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
per_cpu(misaligned_access_speed, cpu) = speed;
out:
__free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
}
static int check_unaligned_access_boot_cpu(void)
{
check_unaligned_access(0);
return 0;
}
arch_initcall(check_unaligned_access_boot_cpu);
static void check_unaligned_access_nonboot_cpu(void *param)
{
unsigned int cpu = smp_processor_id();
struct page **pages = param;
if (smp_processor_id() != 0)
check_unaligned_access(pages[cpu]);
}
static int riscv_online_cpu(unsigned int cpu)
{
static struct page *buf;
/* We are already set since the last check */
if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
return 0;
buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
if (!buf) {
pr_warn("Allocation failure, not measuring misaligned performance\n");
return -ENOMEM;
}
check_unaligned_access(buf);
__free_pages(buf, MISALIGNED_BUFFER_ORDER);
return 0;
}
/* Measure unaligned access on all CPUs present at boot in parallel. */
static int check_unaligned_access_all_cpus(void)
{
unsigned int cpu;
unsigned int cpu_count = num_possible_cpus();
struct page **bufs = kzalloc(cpu_count * sizeof(struct page *),
GFP_KERNEL);
if (!bufs) {
pr_warn("Allocation failure, not measuring misaligned performance\n");
return 0;
}
/*
* Allocate separate buffers for each CPU so there's no fighting over
* cache lines.
*/
for_each_cpu(cpu, cpu_online_mask) {
bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
if (!bufs[cpu]) {
pr_warn("Allocation failure, not measuring misaligned performance\n");
goto out;
}
}
/* Check everybody except 0, who stays behind to tend jiffies. */
on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
/* Check core 0. */
smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
/* Setup hotplug callback for any new CPUs that come online. */
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
riscv_online_cpu, NULL);
out:
unaligned_emulation_finish();
for_each_cpu(cpu, cpu_online_mask) {
if (bufs[cpu])
__free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
}
kfree(bufs);
return 0;
}
arch_initcall(check_unaligned_access_all_cpus);
void riscv_user_isa_enable(void)
{

View file

@ -26,9 +26,9 @@ SYM_CODE_START(handle_exception)
* register will contain 0, and we should continue on the current TP.
*/
csrrw tp, CSR_SCRATCH, tp
bnez tp, _save_context
bnez tp, .Lsave_context
_restore_kernel_tpsp:
.Lrestore_kernel_tpsp:
csrr tp, CSR_SCRATCH
REG_S sp, TASK_TI_KERNEL_SP(tp)
@ -40,7 +40,7 @@ _restore_kernel_tpsp:
REG_L sp, TASK_TI_KERNEL_SP(tp)
#endif
_save_context:
.Lsave_context:
REG_S sp, TASK_TI_USER_SP(tp)
REG_L sp, TASK_TI_KERNEL_SP(tp)
addi sp, sp, -(PT_SIZE_ON_STACK)
@ -322,7 +322,7 @@ SYM_FUNC_END(__switch_to)
.section ".rodata"
.align LGREG
/* Exception vector table */
SYM_CODE_START(excp_vect_table)
SYM_DATA_START_LOCAL(excp_vect_table)
RISCV_PTR do_trap_insn_misaligned
ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
RISCV_PTR do_trap_insn_illegal
@ -340,12 +340,11 @@ SYM_CODE_START(excp_vect_table)
RISCV_PTR do_page_fault /* load page fault */
RISCV_PTR do_trap_unknown
RISCV_PTR do_page_fault /* store page fault */
excp_vect_table_end:
SYM_CODE_END(excp_vect_table)
SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end)
#ifndef CONFIG_MMU
SYM_CODE_START(__user_rt_sigreturn)
SYM_DATA_START(__user_rt_sigreturn)
li a7, __NR_rt_sigreturn
ecall
SYM_CODE_END(__user_rt_sigreturn)
SYM_DATA_END(__user_rt_sigreturn)
#endif

View file

@ -19,7 +19,7 @@
#include <asm/csr.h>
#include <asm/asm-offsets.h>
ENTRY(__fstate_save)
SYM_FUNC_START(__fstate_save)
li a2, TASK_THREAD_F0
add a0, a0, a2
li t1, SR_FS
@ -60,9 +60,9 @@ ENTRY(__fstate_save)
sw t0, TASK_THREAD_FCSR_F0(a0)
csrc CSR_STATUS, t1
ret
ENDPROC(__fstate_save)
SYM_FUNC_END(__fstate_save)
ENTRY(__fstate_restore)
SYM_FUNC_START(__fstate_restore)
li a2, TASK_THREAD_F0
add a0, a0, a2
li t1, SR_FS
@ -103,4 +103,125 @@ ENTRY(__fstate_restore)
fscsr t0
csrc CSR_STATUS, t1
ret
ENDPROC(__fstate_restore)
SYM_FUNC_END(__fstate_restore)
#define get_f32(which) fmv.x.s a0, which; j 2f
#define put_f32(which) fmv.s.x which, a1; j 2f
#if __riscv_xlen == 64
# define get_f64(which) fmv.x.d a0, which; j 2f
# define put_f64(which) fmv.d.x which, a1; j 2f
#else
# define get_f64(which) fsd which, 0(a1); j 2f
# define put_f64(which) fld which, 0(a1); j 2f
#endif
.macro fp_access_prologue
/*
* Compute jump offset to store the correct FP register since we don't
* have indirect FP register access
*/
sll t0, a0, 3
la t2, 1f
add t0, t0, t2
li t1, SR_FS
csrs CSR_STATUS, t1
jr t0
1:
.endm
.macro fp_access_epilogue
2:
csrc CSR_STATUS, t1
ret
.endm
#define fp_access_body(__access_func) \
__access_func(f0); \
__access_func(f1); \
__access_func(f2); \
__access_func(f3); \
__access_func(f4); \
__access_func(f5); \
__access_func(f6); \
__access_func(f7); \
__access_func(f8); \
__access_func(f9); \
__access_func(f10); \
__access_func(f11); \
__access_func(f12); \
__access_func(f13); \
__access_func(f14); \
__access_func(f15); \
__access_func(f16); \
__access_func(f17); \
__access_func(f18); \
__access_func(f19); \
__access_func(f20); \
__access_func(f21); \
__access_func(f22); \
__access_func(f23); \
__access_func(f24); \
__access_func(f25); \
__access_func(f26); \
__access_func(f27); \
__access_func(f28); \
__access_func(f29); \
__access_func(f30); \
__access_func(f31)
#ifdef CONFIG_RISCV_MISALIGNED
/*
* Disable compressed instructions set to keep a constant offset between FP
* load/store/move instructions
*/
.option norvc
/*
* put_f32_reg - Set a FP register from a register containing the value
* a0 = FP register index to be set
* a1 = value to be loaded in the FP register
*/
SYM_FUNC_START(put_f32_reg)
fp_access_prologue
fp_access_body(put_f32)
fp_access_epilogue
SYM_FUNC_END(put_f32_reg)
/*
* get_f32_reg - Get a FP register value and return it
* a0 = FP register index to be retrieved
*/
SYM_FUNC_START(get_f32_reg)
fp_access_prologue
fp_access_body(get_f32)
fp_access_epilogue
SYM_FUNC_END(get_f32_reg)
/*
* put_f64_reg - Set a 64 bits FP register from a value or a pointer.
* a0 = FP register index to be set
* a1 = value/pointer to be loaded in the FP register (when xlen == 32 bits, we
* load the value to a pointer).
*/
SYM_FUNC_START(put_f64_reg)
fp_access_prologue
fp_access_body(put_f64)
fp_access_epilogue
SYM_FUNC_END(put_f64_reg)
/*
* put_f64_reg - Get a 64 bits FP register value and returned it or store it to
* a pointer.
* a0 = FP register index to be retrieved
* a1 = If xlen == 32, pointer which should be loaded with the FP register value
* or unused if xlen == 64. In which case the FP register value is returned
* through a0
*/
SYM_FUNC_START(get_f64_reg)
fp_access_prologue
fp_access_body(get_f64)
fp_access_epilogue
SYM_FUNC_END(get_f64_reg)
#endif /* CONFIG_RISCV_MISALIGNED */

View file

@ -19,7 +19,7 @@
#include "efi-header.S"
__HEAD
ENTRY(_start)
SYM_CODE_START(_start)
/*
* Image header expected by Linux boot-loaders. The image header data
* structure is described in asm/image.h.
@ -164,12 +164,12 @@ secondary_start_sbi:
XIP_FIXUP_OFFSET a0
call relocate_enable_mmu
#endif
call setup_trap_vector
call .Lsetup_trap_vector
tail smp_callin
#endif /* CONFIG_SMP */
.align 2
setup_trap_vector:
.Lsetup_trap_vector:
/* Set trap vector to exception handler */
la a0, handle_exception
csrw CSR_TVEC, a0
@ -187,9 +187,9 @@ setup_trap_vector:
wfi
j .Lsecondary_park
END(_start)
SYM_CODE_END(_start)
ENTRY(_start_kernel)
SYM_CODE_START(_start_kernel)
/* Mask all interrupts */
csrw CSR_IE, zero
csrw CSR_IP, zero
@ -206,7 +206,7 @@ ENTRY(_start_kernel)
* not implement PMPs, so we set up a quick trap handler to just skip
* touching the PMPs on any trap.
*/
la a0, pmp_done
la a0, .Lpmp_done
csrw CSR_TVEC, a0
li a0, -1
@ -214,7 +214,7 @@ ENTRY(_start_kernel)
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
csrw CSR_PMPCFG0, a0
.align 2
pmp_done:
.Lpmp_done:
/*
* The hartid in a0 is expected later on, and we have no firmware
@ -275,12 +275,12 @@ pmp_done:
/* Clear BSS for flat non-ELF images */
la a3, __bss_start
la a4, __bss_stop
ble a4, a3, clear_bss_done
clear_bss:
ble a4, a3, .Lclear_bss_done
.Lclear_bss:
REG_S zero, (a3)
add a3, a3, RISCV_SZPTR
blt a3, a4, clear_bss
clear_bss_done:
blt a3, a4, .Lclear_bss
.Lclear_bss_done:
#endif
la a2, boot_cpu_hartid
XIP_FIXUP_OFFSET a2
@ -305,7 +305,7 @@ clear_bss_done:
call relocate_enable_mmu
#endif /* CONFIG_MMU */
call setup_trap_vector
call .Lsetup_trap_vector
/* Restore C environment */
la tp, init_task
la sp, init_thread_union + THREAD_SIZE
@ -348,10 +348,10 @@ clear_bss_done:
tail .Lsecondary_start_common
#endif /* CONFIG_RISCV_BOOT_SPINWAIT */
END(_start_kernel)
SYM_CODE_END(_start_kernel)
#ifdef CONFIG_RISCV_M_MODE
ENTRY(reset_regs)
SYM_CODE_START_LOCAL(reset_regs)
li sp, 0
li gp, 0
li tp, 0
@ -449,5 +449,5 @@ ENTRY(reset_regs)
.Lreset_regs_done_vector:
#endif /* CONFIG_RISCV_ISA_V */
ret
END(reset_regs)
SYM_CODE_END(reset_regs)
#endif /* CONFIG_RISCV_M_MODE */

Some files were not shown because too many files have changed in this diff Show more