workqueue: make freezing/thawing per-pool

Instead of holding locks from both pools and then processing the pools
together, make freezing/thwaing per-pool - grab locks of one pool,
process it, release it and then proceed to the next pool.

While this patch changes processing order across pools, order within
each pool remains the same.  As each pool is independent, this
shouldn't break anything.

This is part of an effort to remove global_cwq and make worker_pool
the top level abstraction, which in turn will help implementing worker
pools with user-specified attributes.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
This commit is contained in:
Tejun Heo 2013-01-24 11:01:33 -08:00
parent 94cf58bb29
commit a1056305fa
1 changed files with 22 additions and 28 deletions

View File

@ -3686,25 +3686,22 @@ void freeze_workqueues_begin(void)
struct worker_pool *pool; struct worker_pool *pool;
struct workqueue_struct *wq; struct workqueue_struct *wq;
local_irq_disable();
for_each_worker_pool(pool, gcwq) { for_each_worker_pool(pool, gcwq) {
spin_lock_nested(&pool->lock, pool - gcwq->pools); spin_lock_irq(&pool->lock);
WARN_ON_ONCE(pool->flags & POOL_FREEZING); WARN_ON_ONCE(pool->flags & POOL_FREEZING);
pool->flags |= POOL_FREEZING; pool->flags |= POOL_FREEZING;
list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (cwq && cwq->pool == pool &&
(wq->flags & WQ_FREEZABLE))
cwq->max_active = 0;
}
spin_unlock_irq(&pool->lock);
} }
list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (cwq && wq->flags & WQ_FREEZABLE)
cwq->max_active = 0;
}
for_each_worker_pool(pool, gcwq)
spin_unlock(&pool->lock);
local_irq_enable();
} }
spin_unlock(&workqueue_lock); spin_unlock(&workqueue_lock);
@ -3779,30 +3776,27 @@ void thaw_workqueues(void)
struct worker_pool *pool; struct worker_pool *pool;
struct workqueue_struct *wq; struct workqueue_struct *wq;
local_irq_disable();
for_each_worker_pool(pool, gcwq) { for_each_worker_pool(pool, gcwq) {
spin_lock_nested(&pool->lock, pool - gcwq->pools); spin_lock_irq(&pool->lock);
WARN_ON_ONCE(!(pool->flags & POOL_FREEZING)); WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
pool->flags &= ~POOL_FREEZING; pool->flags &= ~POOL_FREEZING;
}
list_for_each_entry(wq, &workqueues, list) { list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (!cwq || !(wq->flags & WQ_FREEZABLE)) if (!cwq || cwq->pool != pool ||
continue; !(wq->flags & WQ_FREEZABLE))
continue;
/* restore max_active and repopulate worklist */ /* restore max_active and repopulate worklist */
cwq_set_max_active(cwq, wq->saved_max_active); cwq_set_max_active(cwq, wq->saved_max_active);
} }
for_each_worker_pool(pool, gcwq) {
wake_up_worker(pool); wake_up_worker(pool);
spin_unlock(&pool->lock);
spin_unlock_irq(&pool->lock);
} }
local_irq_enable();
} }
workqueue_freezing = false; workqueue_freezing = false;