diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 96255f47b1f8..21f0db2c9711 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -140,12 +140,12 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) rwsemtrace(sem, "Entering __down_read"); - spin_lock(&sem->wait_lock); + spin_lock_irq(&sem->wait_lock); if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; - spin_unlock(&sem->wait_lock); + spin_unlock_irq(&sem->wait_lock); goto out; } @@ -160,7 +160,7 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ - spin_unlock(&sem->wait_lock); + spin_unlock_irq(&sem->wait_lock); /* wait to be given the lock */ for (;;) { @@ -181,10 +181,12 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) */ int fastcall __down_read_trylock(struct rw_semaphore *sem) { + unsigned long flags; int ret = 0; + rwsemtrace(sem, "Entering __down_read_trylock"); - spin_lock(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ @@ -192,7 +194,7 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) ret = 1; } - spin_unlock(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); rwsemtrace(sem, "Leaving __down_read_trylock"); return ret; @@ -209,12 +211,12 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) rwsemtrace(sem, "Entering __down_write"); - spin_lock(&sem->wait_lock); + spin_lock_irq(&sem->wait_lock); if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; - spin_unlock(&sem->wait_lock); + spin_unlock_irq(&sem->wait_lock); goto out; } @@ -229,7 +231,7 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ - spin_unlock(&sem->wait_lock); + spin_unlock_irq(&sem->wait_lock); /* wait to be given the lock */ for (;;) { @@ -250,10 +252,12 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) */ int fastcall __down_write_trylock(struct rw_semaphore *sem) { + unsigned long flags; int ret = 0; + rwsemtrace(sem, "Entering __down_write_trylock"); - spin_lock(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ @@ -261,7 +265,7 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) ret = 1; } - spin_unlock(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); rwsemtrace(sem, "Leaving __down_write_trylock"); return ret; @@ -272,14 +276,16 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) */ void fastcall __up_read(struct rw_semaphore *sem) { + unsigned long flags; + rwsemtrace(sem, "Entering __up_read"); - spin_lock(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); if (--sem->activity == 0 && !list_empty(&sem->wait_list)) sem = __rwsem_wake_one_writer(sem); - spin_unlock(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); rwsemtrace(sem, "Leaving __up_read"); } @@ -289,15 +295,17 @@ void fastcall __up_read(struct rw_semaphore *sem) */ void fastcall __up_write(struct rw_semaphore *sem) { + unsigned long flags; + rwsemtrace(sem, "Entering __up_write"); - spin_lock(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); sem->activity = 0; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 1); - spin_unlock(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); rwsemtrace(sem, "Leaving __up_write"); } @@ -308,15 +316,17 @@ void fastcall __up_write(struct rw_semaphore *sem) */ void fastcall __downgrade_write(struct rw_semaphore *sem) { + unsigned long flags; + rwsemtrace(sem, "Entering __downgrade_write"); - spin_lock(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); sem->activity = 1; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 0); - spin_unlock(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); rwsemtrace(sem, "Leaving __downgrade_write"); } diff --git a/lib/rwsem.c b/lib/rwsem.c index 924c14e0bfde..7644089ec8fa 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c @@ -150,7 +150,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem, set_task_state(tsk, TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ - spin_lock(&sem->wait_lock); + spin_lock_irq(&sem->wait_lock); waiter->task = tsk; get_task_struct(tsk); @@ -163,7 +163,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem, if (!(count & RWSEM_ACTIVE_MASK)) sem = __rwsem_do_wake(sem, 0); - spin_unlock(&sem->wait_lock); + spin_unlock_irq(&sem->wait_lock); /* wait to be given the lock */ for (;;) { @@ -219,15 +219,17 @@ rwsem_down_write_failed(struct rw_semaphore *sem) */ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) { + unsigned long flags; + rwsemtrace(sem, "Entering rwsem_wake"); - spin_lock(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); /* do nothing if list empty */ if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 0); - spin_unlock(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); rwsemtrace(sem, "Leaving rwsem_wake"); @@ -241,15 +243,17 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) */ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) { + unsigned long flags; + rwsemtrace(sem, "Entering rwsem_downgrade_wake"); - spin_lock(&sem->wait_lock); + spin_lock_irqsave(&sem->wait_lock, flags); /* do nothing if list empty */ if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 1); - spin_unlock(&sem->wait_lock); + spin_unlock_irqrestore(&sem->wait_lock, flags); rwsemtrace(sem, "Leaving rwsem_downgrade_wake"); return sem;