mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-15 15:15:47 +00:00
500c2e1fdb
The current locking mechanism uses a ll/sc sequence to release a spinlock. This is slower than a wmb() followed by a store to unlock. The branching forward to .subsection 2 on sc failure slows down the contended case. So we get rid of that part too. Since we are now working on naturally aligned u16 values, we can get rid of a masking operation as the LHU already does the right thing. The ANDI are reversed for better scheduling on multi-issue CPUs On a 12 CPU 750MHz Octeon cn5750 this patch improves ipv4 UDP packet forwarding rates from 3.58*10^6 PPS to 3.99*10^6 PPS, or about 11%. Signed-off-by: David Daney <ddaney@caviumnetworks.com> To: linux-mips@linux-mips.org Patchwork: http://patchwork.linux-mips.org/patch/937/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
37 lines
592 B
C
37 lines
592 B
C
#ifndef _ASM_SPINLOCK_TYPES_H
|
|
#define _ASM_SPINLOCK_TYPES_H
|
|
|
|
#ifndef __LINUX_SPINLOCK_TYPES_H
|
|
# error "please don't include this file directly"
|
|
#endif
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <asm/byteorder.h>
|
|
|
|
typedef union {
|
|
/*
|
|
* bits 0..15 : serving_now
|
|
* bits 16..31 : ticket
|
|
*/
|
|
u32 lock;
|
|
struct {
|
|
#ifdef __BIG_ENDIAN
|
|
u16 ticket;
|
|
u16 serving_now;
|
|
#else
|
|
u16 serving_now;
|
|
u16 ticket;
|
|
#endif
|
|
} h;
|
|
} arch_spinlock_t;
|
|
|
|
#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0 }
|
|
|
|
typedef struct {
|
|
volatile unsigned int lock;
|
|
} arch_rwlock_t;
|
|
|
|
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
|
|
|
|
#endif
|