irq_work: Use llist in the struct irq_work logic

Use llist in irq_work instead of the lock-less linked list
implementation in irq_work to avoid the code duplication.

Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1315461646-1379-6-git-send-email-ying.huang@intel.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Huang Ying 2011-09-08 14:00:46 +08:00 committed by Ingo Molnar
parent 781f7fd916
commit 38aaf8090d
2 changed files with 42 additions and 64 deletions

View File

@ -1,20 +1,23 @@
#ifndef _LINUX_IRQ_WORK_H
#define _LINUX_IRQ_WORK_H
#include <linux/llist.h>
struct irq_work {
struct irq_work *next;
unsigned long flags;
struct llist_node llnode;
void (*func)(struct irq_work *);
};
static inline
void init_irq_work(struct irq_work *entry, void (*func)(struct irq_work *))
void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
{
entry->next = NULL;
entry->func = func;
work->flags = 0;
work->func = func;
}
bool irq_work_queue(struct irq_work *entry);
bool irq_work_queue(struct irq_work *work);
void irq_work_run(void);
void irq_work_sync(struct irq_work *entry);
void irq_work_sync(struct irq_work *work);
#endif /* _LINUX_IRQ_WORK_H */

View File

@ -17,54 +17,34 @@
* claimed NULL, 3 -> {pending} : claimed to be enqueued
* pending next, 3 -> {busy} : queued, pending callback
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*
* We use the lower two bits of the next pointer to keep PENDING and BUSY
* flags.
*/
#define IRQ_WORK_PENDING 1UL
#define IRQ_WORK_BUSY 2UL
#define IRQ_WORK_FLAGS 3UL
static inline bool irq_work_is_set(struct irq_work *entry, int flags)
{
return (unsigned long)entry->next & flags;
}
static inline struct irq_work *irq_work_next(struct irq_work *entry)
{
unsigned long next = (unsigned long)entry->next;
next &= ~IRQ_WORK_FLAGS;
return (struct irq_work *)next;
}
static inline struct irq_work *next_flags(struct irq_work *entry, int flags)
{
unsigned long next = (unsigned long)entry;
next |= flags;
return (struct irq_work *)next;
}
static DEFINE_PER_CPU(struct irq_work *, irq_work_list);
static DEFINE_PER_CPU(struct llist_head, irq_work_list);
/*
* Claim the entry so that no one else will poke at it.
*/
static bool irq_work_claim(struct irq_work *entry)
static bool irq_work_claim(struct irq_work *work)
{
struct irq_work *next, *nflags;
unsigned long flags, nflags;
do {
next = entry->next;
if ((unsigned long)next & IRQ_WORK_PENDING)
for (;;) {
flags = work->flags;
if (flags & IRQ_WORK_PENDING)
return false;
nflags = next_flags(next, IRQ_WORK_FLAGS);
} while (cmpxchg(&entry->next, next, nflags) != next);
nflags = flags | IRQ_WORK_FLAGS;
if (cmpxchg(&work->flags, flags, nflags) == flags)
break;
cpu_relax();
}
return true;
}
void __weak arch_irq_work_raise(void)
{
/*
@ -75,20 +55,15 @@ void __weak arch_irq_work_raise(void)
/*
* Queue the entry and raise the IPI if needed.
*/
static void __irq_work_queue(struct irq_work *entry)
static void __irq_work_queue(struct irq_work *work)
{
struct irq_work *next;
bool empty;
preempt_disable();
do {
next = __this_cpu_read(irq_work_list);
/* Can assign non-atomic because we keep the flags set. */
entry->next = next_flags(next, IRQ_WORK_FLAGS);
} while (this_cpu_cmpxchg(irq_work_list, next, entry) != next);
empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
/* The list was empty, raise self-interrupt to start processing. */
if (!irq_work_next(entry))
if (empty)
arch_irq_work_raise();
preempt_enable();
@ -100,16 +75,16 @@ static void __irq_work_queue(struct irq_work *entry)
*
* Can be re-enqueued while the callback is still in progress.
*/
bool irq_work_queue(struct irq_work *entry)
bool irq_work_queue(struct irq_work *work)
{
if (!irq_work_claim(entry)) {
if (!irq_work_claim(work)) {
/*
* Already enqueued, can't do!
*/
return false;
}
__irq_work_queue(entry);
__irq_work_queue(work);
return true;
}
EXPORT_SYMBOL_GPL(irq_work_queue);
@ -120,34 +95,34 @@ EXPORT_SYMBOL_GPL(irq_work_queue);
*/
void irq_work_run(void)
{
struct irq_work *list;
struct irq_work *work;
struct llist_head *this_list;
struct llist_node *llnode;
if (this_cpu_read(irq_work_list) == NULL)
this_list = &__get_cpu_var(irq_work_list);
if (llist_empty(this_list))
return;
BUG_ON(!in_irq());
BUG_ON(!irqs_disabled());
list = this_cpu_xchg(irq_work_list, NULL);
llnode = llist_del_all(this_list);
while (llnode != NULL) {
work = llist_entry(llnode, struct irq_work, llnode);
while (list != NULL) {
struct irq_work *entry = list;
list = irq_work_next(list);
llnode = llnode->next;
/*
* Clear the PENDING bit, after this point the @entry
* Clear the PENDING bit, after this point the @work
* can be re-used.
*/
entry->next = next_flags(NULL, IRQ_WORK_BUSY);
entry->func(entry);
work->flags = IRQ_WORK_BUSY;
work->func(work);
/*
* Clear the BUSY bit and return to the free state if
* no-one else claimed it meanwhile.
*/
(void)cmpxchg(&entry->next,
next_flags(NULL, IRQ_WORK_BUSY),
NULL);
(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
}
}
EXPORT_SYMBOL_GPL(irq_work_run);
@ -156,11 +131,11 @@ EXPORT_SYMBOL_GPL(irq_work_run);
* Synchronize against the irq_work @entry, ensures the entry is not
* currently in use.
*/
void irq_work_sync(struct irq_work *entry)
void irq_work_sync(struct irq_work *work)
{
WARN_ON_ONCE(irqs_disabled());
while (irq_work_is_set(entry, IRQ_WORK_BUSY))
while (work->flags & IRQ_WORK_BUSY)
cpu_relax();
}
EXPORT_SYMBOL_GPL(irq_work_sync);