mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-29 23:53:32 +00:00
Merge branch 'net-Get-rid-of-net_mutex-and-simplify-cleanup_list-queueing'
Kirill Tkhai says: ==================== net: Get rid of net_mutex and simplify cleanup_list queueing [1/3] kills net_mutex and makes net_sem be taken for write instead. This is made to take less locks (1 instead of 2) for the time before all pernet_operations are converted. [2-3/3] simplifies dead net cleanup queueing, and makes llist api be used for that. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
b99fe0e28f
3 changed files with 47 additions and 41 deletions
|
@ -35,7 +35,6 @@ extern int rtnl_trylock(void);
|
|||
extern int rtnl_is_locked(void);
|
||||
|
||||
extern wait_queue_head_t netdev_unregistering_wq;
|
||||
extern struct mutex net_mutex;
|
||||
extern struct rw_semaphore net_sem;
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
|
|
|
@ -59,8 +59,12 @@ struct net {
|
|||
atomic64_t cookie_gen;
|
||||
|
||||
struct list_head list; /* list of network namespaces */
|
||||
struct list_head cleanup_list; /* namespaces on death row */
|
||||
struct list_head exit_list; /* Use only net_mutex */
|
||||
struct list_head exit_list; /* To linked to call pernet exit
|
||||
* methods on dead net (net_sem
|
||||
* read locked), or to unregister
|
||||
* pernet ops (net_sem wr locked).
|
||||
*/
|
||||
struct llist_node cleanup_list; /* namespaces on death row */
|
||||
|
||||
struct user_namespace *user_ns; /* Owning user namespace */
|
||||
struct ucounts *ucounts;
|
||||
|
@ -89,7 +93,7 @@ struct net {
|
|||
/* core fib_rules */
|
||||
struct list_head rules_ops;
|
||||
|
||||
struct list_head fib_notifier_ops; /* protected by net_mutex */
|
||||
struct list_head fib_notifier_ops; /* protected by net_sem */
|
||||
|
||||
struct net_device *loopback_dev; /* The loopback */
|
||||
struct netns_core core;
|
||||
|
@ -316,7 +320,7 @@ struct pernet_operations {
|
|||
/*
|
||||
* Indicates above methods are allowed to be executed in parallel
|
||||
* with methods of any other pernet_operations, i.e. they are not
|
||||
* need synchronization via net_mutex.
|
||||
* need write locked net_sem.
|
||||
*/
|
||||
bool async;
|
||||
};
|
||||
|
|
|
@ -29,8 +29,6 @@
|
|||
|
||||
static LIST_HEAD(pernet_list);
|
||||
static struct list_head *first_device = &pernet_list;
|
||||
/* Used only if there are !async pernet_operations registered */
|
||||
DEFINE_MUTEX(net_mutex);
|
||||
|
||||
LIST_HEAD(net_namespace_list);
|
||||
EXPORT_SYMBOL_GPL(net_namespace_list);
|
||||
|
@ -407,6 +405,7 @@ struct net *copy_net_ns(unsigned long flags,
|
|||
{
|
||||
struct ucounts *ucounts;
|
||||
struct net *net;
|
||||
unsigned write;
|
||||
int rv;
|
||||
|
||||
if (!(flags & CLONE_NEWNET))
|
||||
|
@ -424,20 +423,26 @@ struct net *copy_net_ns(unsigned long flags,
|
|||
refcount_set(&net->passive, 1);
|
||||
net->ucounts = ucounts;
|
||||
get_user_ns(user_ns);
|
||||
|
||||
rv = down_read_killable(&net_sem);
|
||||
again:
|
||||
write = READ_ONCE(nr_sync_pernet_ops);
|
||||
if (write)
|
||||
rv = down_write_killable(&net_sem);
|
||||
else
|
||||
rv = down_read_killable(&net_sem);
|
||||
if (rv < 0)
|
||||
goto put_userns;
|
||||
if (nr_sync_pernet_ops) {
|
||||
rv = mutex_lock_killable(&net_mutex);
|
||||
if (rv < 0)
|
||||
goto up_read;
|
||||
|
||||
if (!write && unlikely(READ_ONCE(nr_sync_pernet_ops))) {
|
||||
up_read(&net_sem);
|
||||
goto again;
|
||||
}
|
||||
rv = setup_net(net, user_ns);
|
||||
if (nr_sync_pernet_ops)
|
||||
mutex_unlock(&net_mutex);
|
||||
up_read:
|
||||
up_read(&net_sem);
|
||||
|
||||
if (write)
|
||||
up_write(&net_sem);
|
||||
else
|
||||
up_read(&net_sem);
|
||||
|
||||
if (rv < 0) {
|
||||
put_userns:
|
||||
put_user_ns(user_ns);
|
||||
|
@ -476,28 +481,33 @@ static void unhash_nsid(struct net *net, struct net *last)
|
|||
spin_unlock_bh(&net->nsid_lock);
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(cleanup_list_lock);
|
||||
static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
|
||||
static LLIST_HEAD(cleanup_list);
|
||||
|
||||
static void cleanup_net(struct work_struct *work)
|
||||
{
|
||||
const struct pernet_operations *ops;
|
||||
struct net *net, *tmp, *last;
|
||||
struct list_head net_kill_list;
|
||||
struct llist_node *net_kill_list;
|
||||
LIST_HEAD(net_exit_list);
|
||||
unsigned write;
|
||||
|
||||
/* Atomically snapshot the list of namespaces to cleanup */
|
||||
spin_lock_irq(&cleanup_list_lock);
|
||||
list_replace_init(&cleanup_list, &net_kill_list);
|
||||
spin_unlock_irq(&cleanup_list_lock);
|
||||
net_kill_list = llist_del_all(&cleanup_list);
|
||||
again:
|
||||
write = READ_ONCE(nr_sync_pernet_ops);
|
||||
if (write)
|
||||
down_write(&net_sem);
|
||||
else
|
||||
down_read(&net_sem);
|
||||
|
||||
down_read(&net_sem);
|
||||
if (nr_sync_pernet_ops)
|
||||
mutex_lock(&net_mutex);
|
||||
if (!write && unlikely(READ_ONCE(nr_sync_pernet_ops))) {
|
||||
up_read(&net_sem);
|
||||
goto again;
|
||||
}
|
||||
|
||||
/* Don't let anyone else find us. */
|
||||
rtnl_lock();
|
||||
list_for_each_entry(net, &net_kill_list, cleanup_list)
|
||||
llist_for_each_entry(net, net_kill_list, cleanup_list)
|
||||
list_del_rcu(&net->list);
|
||||
/* Cache last net. After we unlock rtnl, no one new net
|
||||
* added to net_namespace_list can assign nsid pointer
|
||||
|
@ -512,7 +522,7 @@ static void cleanup_net(struct work_struct *work)
|
|||
last = list_last_entry(&net_namespace_list, struct net, list);
|
||||
rtnl_unlock();
|
||||
|
||||
list_for_each_entry(net, &net_kill_list, cleanup_list) {
|
||||
llist_for_each_entry(net, net_kill_list, cleanup_list) {
|
||||
unhash_nsid(net, last);
|
||||
list_add_tail(&net->exit_list, &net_exit_list);
|
||||
}
|
||||
|
@ -528,14 +538,14 @@ static void cleanup_net(struct work_struct *work)
|
|||
list_for_each_entry_reverse(ops, &pernet_list, list)
|
||||
ops_exit_list(ops, &net_exit_list);
|
||||
|
||||
if (nr_sync_pernet_ops)
|
||||
mutex_unlock(&net_mutex);
|
||||
|
||||
/* Free the net generic variables */
|
||||
list_for_each_entry_reverse(ops, &pernet_list, list)
|
||||
ops_free_list(ops, &net_exit_list);
|
||||
|
||||
up_read(&net_sem);
|
||||
if (write)
|
||||
up_write(&net_sem);
|
||||
else
|
||||
up_read(&net_sem);
|
||||
|
||||
/* Ensure there are no outstanding rcu callbacks using this
|
||||
* network namespace.
|
||||
|
@ -563,8 +573,6 @@ static void cleanup_net(struct work_struct *work)
|
|||
void net_ns_barrier(void)
|
||||
{
|
||||
down_write(&net_sem);
|
||||
mutex_lock(&net_mutex);
|
||||
mutex_unlock(&net_mutex);
|
||||
up_write(&net_sem);
|
||||
}
|
||||
EXPORT_SYMBOL(net_ns_barrier);
|
||||
|
@ -574,13 +582,8 @@ static DECLARE_WORK(net_cleanup_work, cleanup_net);
|
|||
void __put_net(struct net *net)
|
||||
{
|
||||
/* Cleanup the network namespace in process context */
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cleanup_list_lock, flags);
|
||||
list_add(&net->cleanup_list, &cleanup_list);
|
||||
spin_unlock_irqrestore(&cleanup_list_lock, flags);
|
||||
|
||||
queue_work(netns_wq, &net_cleanup_work);
|
||||
if (llist_add(&net->cleanup_list, &cleanup_list))
|
||||
queue_work(netns_wq, &net_cleanup_work);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__put_net);
|
||||
|
||||
|
|
Loading…
Reference in a new issue