From 61e84f99877fa8caaf1be86d51d825406e8d8bc1 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 12 Dec 2006 11:52:34 +0000 Subject: [PATCH 1/7] [MIPS] Malta: Add missing MTD file. Signed-off-by: Ralf Baechle --- arch/mips/mips-boards/malta/malta_mtd.c | 63 +++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 arch/mips/mips-boards/malta/malta_mtd.c diff --git a/arch/mips/mips-boards/malta/malta_mtd.c b/arch/mips/mips-boards/malta/malta_mtd.c new file mode 100644 index 000000000000..8ad9bdf25dce --- /dev/null +++ b/arch/mips/mips-boards/malta/malta_mtd.c @@ -0,0 +1,63 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2006 MIPS Technologies, Inc. + * written by Ralf Baechle + */ + +#include +#include +#include +#include +#include + +static struct mtd_partition malta_mtd_partitions[] = { + { + .name = "YAMON", + .offset = 0x0, + .size = 0x100000, + .mask_flags = MTD_WRITEABLE + }, { + .name = "User FS", + .offset = 0x100000, + .size = 0x2e0000 + }, { + .name = "Board Config", + .offset = 0x3e0000, + .size = 0x020000, + .mask_flags = MTD_WRITEABLE + } +}; + +static struct physmap_flash_data malta_flash_data = { + .width = 4, + .nr_parts = ARRAY_SIZE(malta_mtd_partitions), + .parts = malta_mtd_partitions +}; + +static struct resource malta_flash_resource = { + .start = 0x1e000000, + .end = 0x1e3fffff, + .flags = IORESOURCE_MEM +}; + +static struct platform_device malta_flash = { + .name = "physmap-flash", + .id = 0, + .dev = { + .platform_data = &malta_flash_data, + }, + .num_resources = 1, + .resource = &malta_flash_resource, +}; + +static int __init malta_mtd_init(void) +{ + platform_device_register(&malta_flash); + + return 0; +} + +module_init(malta_mtd_init) From f860c90bd6ce22c6a0a352cc16acc74fba3d628e Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Wed, 13 Dec 2006 01:22:06 +0900 Subject: [PATCH 2/7] [MIPS] csum_partial and copy in parallel Implement optimized asm version of csum_partial_copy_nocheck, csum_partial_copy_from_user and csum_and_copy_to_user which can do calculate and copy in parallel, based on memcpy.S. Signed-off-by: Atsushi Nemoto Signed-off-by: Ralf Baechle --- arch/mips/kernel/mips_ksyms.c | 2 + arch/mips/lib/Makefile | 2 +- arch/mips/lib/csum_partial.S | 442 ++++++++++++++++++++++++++++++ arch/mips/lib/csum_partial_copy.c | 52 ---- include/asm-mips/checksum.h | 31 ++- 5 files changed, 464 insertions(+), 65 deletions(-) delete mode 100644 arch/mips/lib/csum_partial_copy.c diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index f44a01357ada..2ef857c3ee53 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c @@ -46,5 +46,7 @@ EXPORT_SYMBOL(__strnlen_user_nocheck_asm); EXPORT_SYMBOL(__strnlen_user_asm); EXPORT_SYMBOL(csum_partial); +EXPORT_SYMBOL(csum_partial_copy_nocheck); +EXPORT_SYMBOL(__csum_partial_copy_user); EXPORT_SYMBOL(invalid_pte_table); diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index 888b61ea12fe..989c900b8b14 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -2,7 +2,7 @@ # Makefile for MIPS-specific library files.. # -lib-y += csum_partial.o csum_partial_copy.o memcpy.o promlib.o \ +lib-y += csum_partial.o memcpy.o promlib.o \ strlen_user.o strncpy_user.o strnlen_user.o uncached.o obj-y += iomap.o diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 9db357294be1..c0a77fe038be 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S @@ -8,7 +8,9 @@ * Copyright (C) 1998, 1999 Ralf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. */ +#include #include +#include #include #ifdef CONFIG_64BIT @@ -271,3 +273,443 @@ small_csumcpy: jr ra .set noreorder END(csum_partial) + + +/* + * checksum and copy routines based on memcpy.S + * + * csum_partial_copy_nocheck(src, dst, len, sum) + * __csum_partial_copy_user(src, dst, len, sum, errp) + * + * See "Spec" in memcpy.S for details. Unlike __copy_user, all + * function in this file use the standard calling convention. + */ + +#define src a0 +#define dst a1 +#define len a2 +#define psum a3 +#define sum v0 +#define odd t8 +#define errptr t9 + +/* + * The exception handler for loads requires that: + * 1- AT contain the address of the byte just past the end of the source + * of the copy, + * 2- src_entry <= src < AT, and + * 3- (dst - src) == (dst_entry - src_entry), + * The _entry suffix denotes values when __copy_user was called. + * + * (1) is set up up by __csum_partial_copy_from_user and maintained by + * not writing AT in __csum_partial_copy + * (2) is met by incrementing src by the number of bytes copied + * (3) is met by not doing loads between a pair of increments of dst and src + * + * The exception handlers for stores stores -EFAULT to errptr and return. + * These handlers do not need to overwrite any data. + */ + +#define EXC(inst_reg,addr,handler) \ +9: inst_reg, addr; \ + .section __ex_table,"a"; \ + PTR 9b, handler; \ + .previous + +#ifdef USE_DOUBLE + +#define LOAD ld +#define LOADL ldl +#define LOADR ldr +#define STOREL sdl +#define STORER sdr +#define STORE sd +#define ADD daddu +#define SUB dsubu +#define SRL dsrl +#define SLL dsll +#define SLLV dsllv +#define SRLV dsrlv +#define NBYTES 8 +#define LOG_NBYTES 3 + +#else + +#define LOAD lw +#define LOADL lwl +#define LOADR lwr +#define STOREL swl +#define STORER swr +#define STORE sw +#define ADD addu +#define SUB subu +#define SRL srl +#define SLL sll +#define SLLV sllv +#define SRLV srlv +#define NBYTES 4 +#define LOG_NBYTES 2 + +#endif /* USE_DOUBLE */ + +#ifdef CONFIG_CPU_LITTLE_ENDIAN +#define LDFIRST LOADR +#define LDREST LOADL +#define STFIRST STORER +#define STREST STOREL +#define SHIFT_DISCARD SLLV +#define SHIFT_DISCARD_REVERT SRLV +#else +#define LDFIRST LOADL +#define LDREST LOADR +#define STFIRST STOREL +#define STREST STORER +#define SHIFT_DISCARD SRLV +#define SHIFT_DISCARD_REVERT SLLV +#endif + +#define FIRST(unit) ((unit)*NBYTES) +#define REST(unit) (FIRST(unit)+NBYTES-1) + +#define ADDRMASK (NBYTES-1) + + .set noat + +LEAF(__csum_partial_copy_user) + PTR_ADDU AT, src, len /* See (1) above. */ +#ifdef CONFIG_64BIT + move errptr, a4 +#else + lw errptr, 16(sp) +#endif +FEXPORT(csum_partial_copy_nocheck) + move sum, zero + move odd, zero + /* + * Note: dst & src may be unaligned, len may be 0 + * Temps + */ + /* + * The "issue break"s below are very approximate. + * Issue delays for dcache fills will perturb the schedule, as will + * load queue full replay traps, etc. + * + * If len < NBYTES use byte operations. + */ + sltu t2, len, NBYTES + and t1, dst, ADDRMASK + bnez t2, copy_bytes_checklen + and t0, src, ADDRMASK + andi odd, dst, 0x1 /* odd buffer? */ + bnez t1, dst_unaligned + nop + bnez t0, src_unaligned_dst_aligned + /* + * use delay slot for fall-through + * src and dst are aligned; need to compute rem + */ +both_aligned: + SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter + beqz t0, cleanup_both_aligned # len < 8*NBYTES + nop + SUB len, 8*NBYTES # subtract here for bgez loop + .align 4 +1: +EXC( LOAD t0, UNIT(0)(src), l_exc) +EXC( LOAD t1, UNIT(1)(src), l_exc_copy) +EXC( LOAD t2, UNIT(2)(src), l_exc_copy) +EXC( LOAD t3, UNIT(3)(src), l_exc_copy) +EXC( LOAD t4, UNIT(4)(src), l_exc_copy) +EXC( LOAD t5, UNIT(5)(src), l_exc_copy) +EXC( LOAD t6, UNIT(6)(src), l_exc_copy) +EXC( LOAD t7, UNIT(7)(src), l_exc_copy) + SUB len, len, 8*NBYTES + ADD src, src, 8*NBYTES +EXC( STORE t0, UNIT(0)(dst), s_exc) + ADDC(sum, t0) +EXC( STORE t1, UNIT(1)(dst), s_exc) + ADDC(sum, t1) +EXC( STORE t2, UNIT(2)(dst), s_exc) + ADDC(sum, t2) +EXC( STORE t3, UNIT(3)(dst), s_exc) + ADDC(sum, t3) +EXC( STORE t4, UNIT(4)(dst), s_exc) + ADDC(sum, t4) +EXC( STORE t5, UNIT(5)(dst), s_exc) + ADDC(sum, t5) +EXC( STORE t6, UNIT(6)(dst), s_exc) + ADDC(sum, t6) +EXC( STORE t7, UNIT(7)(dst), s_exc) + ADDC(sum, t7) + bgez len, 1b + ADD dst, dst, 8*NBYTES + ADD len, 8*NBYTES # revert len (see above) + + /* + * len == the number of bytes left to copy < 8*NBYTES + */ +cleanup_both_aligned: +#define rem t7 + beqz len, done + sltu t0, len, 4*NBYTES + bnez t0, less_than_4units + and rem, len, (NBYTES-1) # rem = len % NBYTES + /* + * len >= 4*NBYTES + */ +EXC( LOAD t0, UNIT(0)(src), l_exc) +EXC( LOAD t1, UNIT(1)(src), l_exc_copy) +EXC( LOAD t2, UNIT(2)(src), l_exc_copy) +EXC( LOAD t3, UNIT(3)(src), l_exc_copy) + SUB len, len, 4*NBYTES + ADD src, src, 4*NBYTES +EXC( STORE t0, UNIT(0)(dst), s_exc) + ADDC(sum, t0) +EXC( STORE t1, UNIT(1)(dst), s_exc) + ADDC(sum, t1) +EXC( STORE t2, UNIT(2)(dst), s_exc) + ADDC(sum, t2) +EXC( STORE t3, UNIT(3)(dst), s_exc) + ADDC(sum, t3) + beqz len, done + ADD dst, dst, 4*NBYTES +less_than_4units: + /* + * rem = len % NBYTES + */ + beq rem, len, copy_bytes + nop +1: +EXC( LOAD t0, 0(src), l_exc) + ADD src, src, NBYTES + SUB len, len, NBYTES +EXC( STORE t0, 0(dst), s_exc) + ADDC(sum, t0) + bne rem, len, 1b + ADD dst, dst, NBYTES + + /* + * src and dst are aligned, need to copy rem bytes (rem < NBYTES) + * A loop would do only a byte at a time with possible branch + * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE + * because can't assume read-access to dst. Instead, use + * STREST dst, which doesn't require read access to dst. + * + * This code should perform better than a simple loop on modern, + * wide-issue mips processors because the code has fewer branches and + * more instruction-level parallelism. + */ +#define bits t2 + beqz len, done + ADD t1, dst, len # t1 is just past last byte of dst + li bits, 8*NBYTES + SLL rem, len, 3 # rem = number of bits to keep +EXC( LOAD t0, 0(src), l_exc) + SUB bits, bits, rem # bits = number of bits to discard + SHIFT_DISCARD t0, t0, bits +EXC( STREST t0, -1(t1), s_exc) + SHIFT_DISCARD_REVERT t0, t0, bits + .set reorder + ADDC(sum, t0) + b done + .set noreorder +dst_unaligned: + /* + * dst is unaligned + * t0 = src & ADDRMASK + * t1 = dst & ADDRMASK; T1 > 0 + * len >= NBYTES + * + * Copy enough bytes to align dst + * Set match = (src and dst have same alignment) + */ +#define match rem +EXC( LDFIRST t3, FIRST(0)(src), l_exc) + ADD t2, zero, NBYTES +EXC( LDREST t3, REST(0)(src), l_exc_copy) + SUB t2, t2, t1 # t2 = number of bytes copied + xor match, t0, t1 +EXC( STFIRST t3, FIRST(0)(dst), s_exc) + SLL t4, t1, 3 # t4 = number of bits to discard + SHIFT_DISCARD t3, t3, t4 + /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */ + ADDC(sum, t3) + beq len, t2, done + SUB len, len, t2 + ADD dst, dst, t2 + beqz match, both_aligned + ADD src, src, t2 + +src_unaligned_dst_aligned: + SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter + beqz t0, cleanup_src_unaligned + and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES +1: +/* + * Avoid consecutive LD*'s to the same register since some mips + * implementations can't issue them in the same cycle. + * It's OK to load FIRST(N+1) before REST(N) because the two addresses + * are to the same unit (unless src is aligned, but it's not). + */ +EXC( LDFIRST t0, FIRST(0)(src), l_exc) +EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy) + SUB len, len, 4*NBYTES +EXC( LDREST t0, REST(0)(src), l_exc_copy) +EXC( LDREST t1, REST(1)(src), l_exc_copy) +EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy) +EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy) +EXC( LDREST t2, REST(2)(src), l_exc_copy) +EXC( LDREST t3, REST(3)(src), l_exc_copy) + ADD src, src, 4*NBYTES +#ifdef CONFIG_CPU_SB1 + nop # improves slotting +#endif +EXC( STORE t0, UNIT(0)(dst), s_exc) + ADDC(sum, t0) +EXC( STORE t1, UNIT(1)(dst), s_exc) + ADDC(sum, t1) +EXC( STORE t2, UNIT(2)(dst), s_exc) + ADDC(sum, t2) +EXC( STORE t3, UNIT(3)(dst), s_exc) + ADDC(sum, t3) + bne len, rem, 1b + ADD dst, dst, 4*NBYTES + +cleanup_src_unaligned: + beqz len, done + and rem, len, NBYTES-1 # rem = len % NBYTES + beq rem, len, copy_bytes + nop +1: +EXC( LDFIRST t0, FIRST(0)(src), l_exc) +EXC( LDREST t0, REST(0)(src), l_exc_copy) + ADD src, src, NBYTES + SUB len, len, NBYTES +EXC( STORE t0, 0(dst), s_exc) + ADDC(sum, t0) + bne len, rem, 1b + ADD dst, dst, NBYTES + +copy_bytes_checklen: + beqz len, done + nop +copy_bytes: + /* 0 < len < NBYTES */ +#ifdef CONFIG_CPU_LITTLE_ENDIAN +#define SHIFT_START 0 +#define SHIFT_INC 8 +#else +#define SHIFT_START 8*(NBYTES-1) +#define SHIFT_INC -8 +#endif + move t2, zero # partial word + li t3, SHIFT_START # shift +/* use l_exc_copy here to return correct sum on fault */ +#define COPY_BYTE(N) \ +EXC( lbu t0, N(src), l_exc_copy); \ + SUB len, len, 1; \ +EXC( sb t0, N(dst), s_exc); \ + SLLV t0, t0, t3; \ + addu t3, SHIFT_INC; \ + beqz len, copy_bytes_done; \ + or t2, t0 + + COPY_BYTE(0) + COPY_BYTE(1) +#ifdef USE_DOUBLE + COPY_BYTE(2) + COPY_BYTE(3) + COPY_BYTE(4) + COPY_BYTE(5) +#endif +EXC( lbu t0, NBYTES-2(src), l_exc_copy) + SUB len, len, 1 +EXC( sb t0, NBYTES-2(dst), s_exc) + SLLV t0, t0, t3 + or t2, t0 +copy_bytes_done: + ADDC(sum, t2) +done: + /* fold checksum */ +#ifdef USE_DOUBLE + dsll32 v1, sum, 0 + daddu sum, v1 + sltu v1, sum, v1 + dsra32 sum, sum, 0 + addu sum, v1 +#endif + sll v1, sum, 16 + addu sum, v1 + sltu v1, sum, v1 + srl sum, sum, 16 + addu sum, v1 + + /* odd buffer alignment? */ + beqz odd, 1f + nop + sll v1, sum, 8 + srl sum, sum, 8 + or sum, v1 + andi sum, 0xffff +1: + .set reorder + ADDC(sum, psum) + jr ra + .set noreorder + +l_exc_copy: + /* + * Copy bytes from src until faulting load address (or until a + * lb faults) + * + * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) + * may be more than a byte beyond the last address. + * Hence, the lb below may get an exception. + * + * Assumes src < THREAD_BUADDR($28) + */ + LOAD t0, TI_TASK($28) + li t2, SHIFT_START + LOAD t0, THREAD_BUADDR(t0) +1: +EXC( lbu t1, 0(src), l_exc) + ADD src, src, 1 + sb t1, 0(dst) # can't fault -- we're copy_from_user + SLLV t1, t1, t2 + addu t2, SHIFT_INC + ADDC(sum, t1) + bne src, t0, 1b + ADD dst, dst, 1 +l_exc: + LOAD t0, TI_TASK($28) + nop + LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address + nop + SUB len, AT, t0 # len number of uncopied bytes + /* + * Here's where we rely on src and dst being incremented in tandem, + * See (3) above. + * dst += (fault addr - src) to put dst at first byte to clear + */ + ADD dst, t0 # compute start address in a1 + SUB dst, src + /* + * Clear len bytes starting at dst. Can't call __bzero because it + * might modify len. An inefficient loop for these rare times... + */ + beqz len, done + SUB src, len, 1 +1: sb zero, 0(dst) + ADD dst, dst, 1 + bnez src, 1b + SUB src, src, 1 + li v1, -EFAULT + b done + sw v1, (errptr) + +s_exc: + li v0, -1 /* invalid checksum */ + li v1, -EFAULT + jr ra + sw v1, (errptr) + END(__csum_partial_copy_user) diff --git a/arch/mips/lib/csum_partial_copy.c b/arch/mips/lib/csum_partial_copy.c deleted file mode 100644 index 06771040a267..000000000000 --- a/arch/mips/lib/csum_partial_copy.c +++ /dev/null @@ -1,52 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1994, 1995 Waldorf Electronics GmbH - * Copyright (C) 1998, 1999 Ralf Baechle - */ -#include -#include -#include -#include -#include -#include -#include - -/* - * copy while checksumming, otherwise like csum_partial - */ -__wsum csum_partial_copy_nocheck(const void *src, - void *dst, int len, __wsum sum) -{ - /* - * It's 2:30 am and I don't feel like doing it real ... - * This is lots slower than the real thing (tm) - */ - sum = csum_partial(src, len, sum); - memcpy(dst, src, len); - - return sum; -} - -EXPORT_SYMBOL(csum_partial_copy_nocheck); - -/* - * Copy from userspace and compute checksum. If we catch an exception - * then zero the rest of the buffer. - */ -__wsum csum_partial_copy_from_user (const void __user *src, - void *dst, int len, __wsum sum, int *err_ptr) -{ - int missing; - - might_sleep(); - missing = copy_from_user(dst, src, len); - if (missing) { - memset(dst + len - missing, 0, missing); - *err_ptr = -EFAULT; - } - - return csum_partial(dst, len, sum); -} diff --git a/include/asm-mips/checksum.h b/include/asm-mips/checksum.h index 9b768c3b96b3..24cdcc6eaab8 100644 --- a/include/asm-mips/checksum.h +++ b/include/asm-mips/checksum.h @@ -29,31 +29,38 @@ */ __wsum csum_partial(const void *buff, int len, __wsum sum); +__wsum __csum_partial_copy_user(const void *src, void *dst, + int len, __wsum sum, int *err_ptr); + /* * this is a new version of the above that records errors it finds in *errp, * but continues and zeros the rest of the buffer. */ -__wsum csum_partial_copy_from_user(const void __user *src, - void *dst, int len, - __wsum sum, int *errp); +static inline +__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, + __wsum sum, int *err_ptr) +{ + might_sleep(); + return __csum_partial_copy_user((__force void *)src, dst, + len, sum, err_ptr); +} /* * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER -static inline __wsum csum_and_copy_to_user (const void *src, void __user *dst, - int len, __wsum sum, - int *err_ptr) +static inline +__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, + __wsum sum, int *err_ptr) { might_sleep(); - sum = csum_partial(src, len, sum); - - if (copy_to_user(dst, src, len)) { + if (access_ok(VERIFY_WRITE, dst, len)) + return __csum_partial_copy_user(src, (__force void *)dst, + len, sum, err_ptr); + if (len) *err_ptr = -EFAULT; - return (__force __wsum)-1; - } - return sum; + return (__force __wsum)-1; /* invalid checksum */ } /* From f9bba75e378776ee4e97adc0555db16695d341e1 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Mon, 8 Jan 2007 00:50:34 +0900 Subject: [PATCH 3/7] [MIPS] SMTC build fix Pass "irq" to __DO_IRQ_SMTC_HOOK() macro. Signed-off-by: Atsushi Nemoto Signed-off-by: Ralf Baechle --- include/asm-mips/irq.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h index 67657089efa7..386da82e5774 100644 --- a/include/asm-mips/irq.h +++ b/include/asm-mips/irq.h @@ -31,14 +31,14 @@ static inline int irq_canonicalize(int irq) * functions will take over re-enabling the low-level mask. * Otherwise it will be done on return from exception. */ -#define __DO_IRQ_SMTC_HOOK() \ +#define __DO_IRQ_SMTC_HOOK(irq) \ do { \ if (irq_hwmask[irq] & 0x0000ff00) \ write_c0_tccontext(read_c0_tccontext() & \ ~(irq_hwmask[irq] & 0x0000ff00)); \ } while (0) #else -#define __DO_IRQ_SMTC_HOOK() do { } while (0) +#define __DO_IRQ_SMTC_HOOK(irq) do { } while (0) #endif /* @@ -52,7 +52,7 @@ do { \ #define do_IRQ(irq) \ do { \ irq_enter(); \ - __DO_IRQ_SMTC_HOOK(); \ + __DO_IRQ_SMTC_HOOK(irq); \ generic_handle_irq(irq); \ irq_exit(); \ } while (0) From f75f369fd783d194cb45632617561ca4d7045849 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Mon, 8 Jan 2007 01:27:40 +0900 Subject: [PATCH 4/7] [MIPS] Fix build errors on SEAD Quick and dirty fix for build errors on SEAD. Signed-off-by: Atsushi Nemoto Signed-off-by: Ralf Baechle --- arch/mips/mips-boards/generic/time.c | 9 +++++++-- arch/mips/mips-boards/sead/sead_int.c | 4 ++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c index e4604c73f02e..a3c3a1d462b2 100644 --- a/arch/mips/mips-boards/generic/time.c +++ b/arch/mips/mips-boards/generic/time.c @@ -47,6 +47,9 @@ #ifdef CONFIG_MIPS_MALTA #include #endif +#ifdef CONFIG_MIPS_SEAD +#include +#endif unsigned long cpu_khz; @@ -263,11 +266,13 @@ void __init mips_time_init(void) void __init plat_timer_setup(struct irqaction *irq) { +#ifdef MSC01E_INT_BASE if (cpu_has_veic) { set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch); mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; - } - else { + } else +#endif + { if (cpu_has_vint) set_vi_handler (MIPSCPU_INT_CPUCTR, mips_timer_dispatch); mips_cpu_timer_irq = MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR; diff --git a/arch/mips/mips-boards/sead/sead_int.c b/arch/mips/mips-boards/sead/sead_int.c index f445fcddfdfd..874ccb0066b8 100644 --- a/arch/mips/mips-boards/sead/sead_int.c +++ b/arch/mips/mips-boards/sead/sead_int.c @@ -21,7 +21,7 @@ * Sead board. */ #include -#include +#include #include #include @@ -108,7 +108,7 @@ asmlinkage void plat_irq_dispatch(void) if (irq >= 0) do_IRQ(MIPSCPU_INT_BASE + irq); else - spurious_interrupt(regs); + spurious_interrupt(); } void __init arch_init_irq(void) From d98f92371461c5c8cc24e12a212c59b3f437b581 Mon Sep 17 00:00:00 2001 From: Davy Chan Date: Fri, 5 Jan 2007 13:56:46 +0800 Subject: [PATCH 5/7] [MIPS] pnx8550: Fix write_config_byte() PCI config space accessor There's a serious typo in the function: arch/mips/pci/ops-pnx8550.c:write_config_byte() The parameter passed to the function config_access() is PCI_CMD_CONFIG_READ instead of PCI_CMD_CONFIG_WRITE. This renders any attempts to write a single byte to the PCI configuration registers useless. This problem does not exist for write_config_word() nor write_config_dword(). This problem has been there since kernel v2.6.17 and is still there as of kernel v2.6.19.1. Signed-off-by: Ralf Baechle --- arch/mips/pci/ops-pnx8550.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/pci/ops-pnx8550.c b/arch/mips/pci/ops-pnx8550.c index 454b65cc3354..f556b7a8dccd 100644 --- a/arch/mips/pci/ops-pnx8550.c +++ b/arch/mips/pci/ops-pnx8550.c @@ -202,7 +202,7 @@ write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 val) break; } - err = config_access(PCI_CMD_CONFIG_READ, bus, devfn, where, ~(1 << (where & 3)), &data); + err = config_access(PCI_CMD_CONFIG_WRITE, bus, devfn, where, ~(1 << (where & 3)), &data); return err; } From 33b06b513e804ae64ebd5105fb703ec90bd7e173 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Mon, 18 Dec 2006 00:38:21 +0900 Subject: [PATCH 6/7] [MIPS] TX49: Fix use of CDEX build_store_reg() The commit a923660d786a53e78834b19062f7af2535f7f8ad accidently prevents TX49 from using CDEX. Use build_dst_pref() only if prefetch for store was really available. Signed-off-by: Atsushi Nemoto Signed-off-by: Ralf Baechle --- arch/mips/mm/pg-r4k.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/mips/mm/pg-r4k.c b/arch/mips/mm/pg-r4k.c index d41fc5885e87..dc795be62807 100644 --- a/arch/mips/mm/pg-r4k.c +++ b/arch/mips/mm/pg-r4k.c @@ -243,11 +243,10 @@ static void __init __build_store_reg(int reg) static inline void build_store_reg(int reg) { - if (cpu_has_prefetch) - if (reg) - build_dst_pref(pref_offset_copy); - else - build_dst_pref(pref_offset_clear); + int pref_off = cpu_has_prefetch ? + (reg ? pref_offset_copy : pref_offset_clear) : 0; + if (pref_off) + build_dst_pref(pref_off); else if (cpu_has_cache_cdex_s) build_cdex_s(); else if (cpu_has_cache_cdex_p) From 2dbda7dceca81adfe57c8884be5c66e70822d89a Mon Sep 17 00:00:00 2001 From: Vitaly Wool Date: Thu, 28 Dec 2006 17:14:05 +0300 Subject: [PATCH 7/7] [MIPS] PNX8550: Fix system timer support the patch inlined below restores proper time accounting for PNX8550-based boards. It also gets rid of #ifdef in the generic code which becomes unnecessary then. It's functionally identical to the previous patch with the same name but it has minor comments from Atsushi and Sergei taken into account. Signed-off-by: Vitaly Wool Signed-off-by: Ralf Baechle --- arch/mips/kernel/time.c | 2 -- arch/mips/philips/pnx8550/common/time.c | 44 +++++++++++++++++++++---- 2 files changed, 37 insertions(+), 9 deletions(-) diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 11aab6d6bfe5..8aa544f73a5e 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -94,10 +94,8 @@ static void c0_timer_ack(void) { unsigned int count; -#ifndef CONFIG_SOC_PNX8550 /* pnx8550 resets to zero */ /* Ack this timer interrupt and set the next one. */ expirelo += cycles_per_jiffy; -#endif write_c0_compare(expirelo); /* Check to see if we have missed any timer interrupts. */ diff --git a/arch/mips/philips/pnx8550/common/time.c b/arch/mips/philips/pnx8550/common/time.c index 65c440e8480b..f80acae07cee 100644 --- a/arch/mips/philips/pnx8550/common/time.c +++ b/arch/mips/philips/pnx8550/common/time.c @@ -33,7 +33,17 @@ #include #include -extern unsigned int mips_hpt_frequency; +static unsigned long cpj; + +static cycle_t hpt_read(void) +{ + return read_c0_count2(); +} + +static void timer_ack(void) +{ + write_c0_compare(cpj); +} /* * pnx8550_time_init() - it does the following things: @@ -68,27 +78,47 @@ void pnx8550_time_init(void) * HZ timer interrupts per second. */ mips_hpt_frequency = 27UL * ((1000000UL * n)/(m * pow2p)); + cpj = (mips_hpt_frequency + HZ / 2) / HZ; + timer_ack(); + + /* Setup Timer 2 */ + write_c0_count2(0); + write_c0_compare2(0xffffffff); + + clocksource_mips.read = hpt_read; + mips_timer_ack = timer_ack; } +static irqreturn_t monotonic_interrupt(int irq, void *dev_id) +{ + /* Timer 2 clear interrupt */ + write_c0_compare2(-1); + return IRQ_HANDLED; +} + +static struct irqaction monotonic_irqaction = { + .handler = monotonic_interrupt, + .flags = IRQF_DISABLED, + .name = "Monotonic timer", +}; + void __init plat_timer_setup(struct irqaction *irq) { int configPR; setup_irq(PNX8550_INT_TIMER1, irq); + setup_irq(PNX8550_INT_TIMER2, &monotonic_irqaction); - /* Start timer1 */ + /* Timer 1 start */ configPR = read_c0_config7(); configPR &= ~0x00000008; write_c0_config7(configPR); - /* Timer 2 stop */ + /* Timer 2 start */ configPR = read_c0_config7(); - configPR |= 0x00000010; + configPR &= ~0x00000010; write_c0_config7(configPR); - write_c0_count2(0); - write_c0_compare2(0xffffffff); - /* Timer 3 stop */ configPR = read_c0_config7(); configPR |= 0x00000020;