mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 17:08:10 +00:00
CRIS v32: Completely rework spinlocks for ETRAX FS and ARTPEC-3
This commit is contained in:
parent
4258fb1932
commit
de1c1419f9
1 changed files with 75 additions and 113 deletions
|
@ -1,40 +1,47 @@
|
|||
#ifndef __ASM_ARCH_SPINLOCK_H
|
||||
#define __ASM_ARCH_SPINLOCK_H
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <linux/spinlock_types.h>
|
||||
|
||||
#define RW_LOCK_BIAS 0x01000000
|
||||
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
|
||||
#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
|
||||
|
||||
#define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0)
|
||||
#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
|
||||
|
||||
extern void cris_spin_unlock(void *l, int val);
|
||||
extern void cris_spin_lock(void *l);
|
||||
extern int cris_spin_trylock(void* l);
|
||||
extern int cris_spin_trylock(void *l);
|
||||
|
||||
static inline void _raw_spin_unlock(spinlock_t *lock)
|
||||
static inline int __raw_spin_is_locked(raw_spinlock_t *x)
|
||||
{
|
||||
return *(volatile signed char *)(&(x)->slock) <= 0;
|
||||
}
|
||||
|
||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||
{
|
||||
__asm__ volatile ("move.d %1,%0" \
|
||||
: "=m" (lock->lock) \
|
||||
: "=m" (lock->slock) \
|
||||
: "r" (1) \
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline int _raw_spin_trylock(spinlock_t *lock)
|
||||
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
||||
{
|
||||
return cris_spin_trylock((void*)&lock->lock);
|
||||
while (__raw_spin_is_locked(lock))
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
static inline void _raw_spin_lock(spinlock_t *lock)
|
||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||
{
|
||||
cris_spin_lock((void*)&lock->lock);
|
||||
return cris_spin_trylock((void *)&lock->slock);
|
||||
}
|
||||
|
||||
static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
|
||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||
{
|
||||
_raw_spin_lock(lock);
|
||||
cris_spin_lock((void *)&lock->slock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
||||
{
|
||||
__raw_spin_lock(lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -46,119 +53,74 @@ static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
|
|||
* can "mix" irq-safe locks - any writer needs to get a
|
||||
* irq-safe write-lock, but readers can get non-irqsafe
|
||||
* read-locks.
|
||||
*/
|
||||
typedef struct {
|
||||
spinlock_t lock;
|
||||
volatile int counter;
|
||||
#ifdef CONFIG_PREEMPT
|
||||
unsigned int break_lock;
|
||||
#endif
|
||||
} rwlock_t;
|
||||
|
||||
#define RW_LOCK_UNLOCKED (rwlock_t) { {1}, 0 }
|
||||
|
||||
#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
|
||||
|
||||
/**
|
||||
* read_can_lock - would read_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
#define read_can_lock(x) ((int)(x)->counter >= 0)
|
||||
|
||||
/**
|
||||
* write_can_lock - would write_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
#define write_can_lock(x) ((x)->counter == 0)
|
||||
|
||||
#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
|
||||
|
||||
/* read_lock, read_unlock are pretty straightforward. Of course it somehow
|
||||
* sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
|
||||
|
||||
static __inline__ void _raw_read_lock(rwlock_t *rw)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
_raw_spin_lock(&rw->lock);
|
||||
|
||||
rw->counter++;
|
||||
|
||||
_raw_spin_unlock(&rw->lock);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static __inline__ void _raw_read_unlock(rwlock_t *rw)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
_raw_spin_lock(&rw->lock);
|
||||
|
||||
rw->counter--;
|
||||
|
||||
_raw_spin_unlock(&rw->lock);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/* write_lock is less trivial. We optimistically grab the lock and check
|
||||
* if we surprised any readers. If so we release the lock and wait till
|
||||
* they're all gone before trying again
|
||||
*
|
||||
* Also note that we don't use the _irqsave / _irqrestore suffixes here.
|
||||
* If we're called with interrupts enabled and we've got readers (or other
|
||||
* writers) in interrupt handlers someone fucked up and we'd dead-lock
|
||||
* sooner or later anyway. prumpf */
|
||||
*/
|
||||
|
||||
static __inline__ void _raw_write_lock(rwlock_t *rw)
|
||||
static inline int __raw_read_can_lock(raw_rwlock_t *x)
|
||||
{
|
||||
retry:
|
||||
_raw_spin_lock(&rw->lock);
|
||||
|
||||
if(rw->counter != 0) {
|
||||
/* this basically never happens */
|
||||
_raw_spin_unlock(&rw->lock);
|
||||
|
||||
while(rw->counter != 0);
|
||||
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/* got it. now leave without unlocking */
|
||||
rw->counter = -1; /* remember we are locked */
|
||||
return (int)(x)->lock > 0;
|
||||
}
|
||||
|
||||
/* write_unlock is absolutely trivial - we don't have to wait for anything */
|
||||
|
||||
static __inline__ void _raw_write_unlock(rwlock_t *rw)
|
||||
static inline int __raw_write_can_lock(raw_rwlock_t *x)
|
||||
{
|
||||
rw->counter = 0;
|
||||
_raw_spin_unlock(&rw->lock);
|
||||
return (x)->lock == RW_LOCK_BIAS;
|
||||
}
|
||||
|
||||
static __inline__ int _raw_write_trylock(rwlock_t *rw)
|
||||
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
||||
{
|
||||
_raw_spin_lock(&rw->lock);
|
||||
if (rw->counter != 0) {
|
||||
/* this basically never happens */
|
||||
_raw_spin_unlock(&rw->lock);
|
||||
__raw_spin_lock(&rw->slock);
|
||||
while (rw->lock == 0);
|
||||
rw->lock--;
|
||||
__raw_spin_unlock(&rw->slock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
||||
{
|
||||
__raw_spin_lock(&rw->slock);
|
||||
while (rw->lock != RW_LOCK_BIAS);
|
||||
rw->lock == 0;
|
||||
__raw_spin_unlock(&rw->slock);
|
||||
}
|
||||
|
||||
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
||||
{
|
||||
__raw_spin_lock(&rw->slock);
|
||||
rw->lock++;
|
||||
__raw_spin_unlock(&rw->slock);
|
||||
}
|
||||
|
||||
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
||||
{
|
||||
__raw_spin_lock(&rw->slock);
|
||||
while (rw->lock != RW_LOCK_BIAS);
|
||||
rw->lock == RW_LOCK_BIAS;
|
||||
__raw_spin_unlock(&rw->slock);
|
||||
}
|
||||
|
||||
static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
||||
{
|
||||
int ret = 0;
|
||||
__raw_spin_lock(&rw->slock);
|
||||
if (rw->lock != 0) {
|
||||
rw->lock--;
|
||||
ret = 1;
|
||||
}
|
||||
__raw_spin_unlock(&rw->slock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* got it. now leave without unlocking */
|
||||
rw->counter = -1; /* remember we are locked */
|
||||
static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
||||
{
|
||||
int ret = 0;
|
||||
__raw_spin_lock(&rw->slock);
|
||||
if (rw->lock == RW_LOCK_BIAS) {
|
||||
rw->lock == 0;
|
||||
ret = 1;
|
||||
}
|
||||
__raw_spin_unlock(&rw->slock);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static __inline__ int is_read_locked(rwlock_t *rw)
|
||||
{
|
||||
return rw->counter > 0;
|
||||
}
|
||||
|
||||
static __inline__ int is_write_locked(rwlock_t *rw)
|
||||
{
|
||||
return rw->counter < 0;
|
||||
}
|
||||
|
||||
#define _raw_spin_relax(lock) cpu_relax()
|
||||
#define _raw_read_relax(lock) cpu_relax()
|
||||
|
|
Loading…
Reference in a new issue