kernfs: Change kernfs_notify_list to llist.

At present kernfs_notify_list is implemented as a singly linked
list of kernfs_node(s), where last element points to itself and
value of ->attr.next tells if node is present on the list or not.
Both addition and deletion to list happen under kernfs_notify_lock.

Change kernfs_notify_list to llist so that addition to list can heppen
locklessly.

Suggested by: Al Viro <viro@zeniv.linux.org.uk>

Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Imran Khan <imran.f.khan@oracle.com>
Link: https://lore.kernel.org/r/20220615021059.862643-3-imran.f.khan@oracle.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Imran Khan 2022-06-15 12:10:57 +10:00 committed by Greg Kroah-Hartman
parent 086c00c71f
commit b8f35fa118
2 changed files with 21 additions and 28 deletions

View file

@ -38,18 +38,16 @@ struct kernfs_open_node {
struct list_head files; /* goes through kernfs_open_file.list */
};
/*
* kernfs_notify() may be called from any context and bounces notifications
* through a work item. To minimize space overhead in kernfs_node, the
* pending queue is implemented as a singly linked list of kernfs_nodes.
* The list is terminated with the self pointer so that whether a
* kernfs_node is on the list or not can be determined by testing the next
* pointer for NULL.
/**
* attribute_to_node - get kernfs_node object corresponding to a kernfs attribute
* @ptr: &struct kernfs_elem_attr
* @type: struct kernfs_node
* @member: name of member (i.e attr)
*/
#define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list)
#define attribute_to_node(ptr, type, member) \
container_of(ptr, type, member)
static DEFINE_SPINLOCK(kernfs_notify_lock);
static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
static LLIST_HEAD(kernfs_notify_list);
/**
* kernfs_deref_open_node - Get kernfs_open_node corresponding to @kn.
@ -902,18 +900,16 @@ static void kernfs_notify_workfn(struct work_struct *work)
struct kernfs_node *kn;
struct kernfs_super_info *info;
struct kernfs_root *root;
struct llist_node *free;
struct kernfs_elem_attr *attr;
repeat:
/* pop one off the notify_list */
spin_lock_irq(&kernfs_notify_lock);
kn = kernfs_notify_list;
if (kn == KERNFS_NOTIFY_EOL) {
spin_unlock_irq(&kernfs_notify_lock);
free = llist_del_first(&kernfs_notify_list);
if (free == NULL)
return;
}
kernfs_notify_list = kn->attr.notify_next;
kn->attr.notify_next = NULL;
spin_unlock_irq(&kernfs_notify_lock);
attr = llist_entry(free, struct kernfs_elem_attr, notify_next);
kn = attribute_to_node(attr, struct kernfs_node, attr);
root = kernfs_root(kn);
/* kick fsnotify */
down_write(&root->kernfs_rwsem);
@ -969,12 +965,14 @@ static void kernfs_notify_workfn(struct work_struct *work)
void kernfs_notify(struct kernfs_node *kn)
{
static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
unsigned long flags;
struct kernfs_open_node *on;
if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
return;
/* Because we are using llist for kernfs_notify_list */
WARN_ON_ONCE(in_nmi());
/* kick poll immediately */
rcu_read_lock();
on = rcu_dereference(kn->attr.open);
@ -985,14 +983,9 @@ void kernfs_notify(struct kernfs_node *kn)
rcu_read_unlock();
/* schedule work to kick fsnotify */
spin_lock_irqsave(&kernfs_notify_lock, flags);
if (!kn->attr.notify_next) {
kernfs_get(kn);
kn->attr.notify_next = kernfs_notify_list;
kernfs_notify_list = kn;
schedule_work(&kernfs_notify_work);
}
spin_unlock_irqrestore(&kernfs_notify_lock, flags);
kernfs_get(kn);
llist_add(&kn->attr.notify_next, &kernfs_notify_list);
schedule_work(&kernfs_notify_work);
}
EXPORT_SYMBOL_GPL(kernfs_notify);

View file

@ -116,7 +116,7 @@ struct kernfs_elem_attr {
const struct kernfs_ops *ops;
struct kernfs_open_node __rcu *open;
loff_t size;
struct kernfs_node *notify_next; /* for kernfs_notify() */
struct llist_node notify_next; /* for kernfs_notify() */
};
/*