From 31c89007285d365aa36f71d8fb0701581c770a27 Mon Sep 17 00:00:00 2001 From: Audra Mitchell Date: Mon, 15 Jan 2024 12:08:22 -0500 Subject: [PATCH 01/54] workqueue.c: Increase workqueue name length Currently we limit the size of the workqueue name to 24 characters due to commit ecf6881ff349 ("workqueue: make workqueue->name[] fixed len") Increase the size to 32 characters and print a warning in the event the requested name is larger than the limit of 32 characters. Signed-off-by: Audra Mitchell Signed-off-by: Tejun Heo --- kernel/workqueue.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 76e60faed892..8d9dec14b9bb 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -108,7 +108,7 @@ enum { RESCUER_NICE_LEVEL = MIN_NICE, HIGHPRI_NICE_LEVEL = MIN_NICE, - WQ_NAME_LEN = 24, + WQ_NAME_LEN = 32, }; /* @@ -4666,6 +4666,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, va_list args; struct workqueue_struct *wq; struct pool_workqueue *pwq; + int len; /* * Unbound && max_active == 1 used to imply ordered, which is no longer @@ -4692,9 +4693,12 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, } va_start(args, max_active); - vsnprintf(wq->name, sizeof(wq->name), fmt, args); + len = vsnprintf(wq->name, sizeof(wq->name), fmt, args); va_end(args); + if (len >= WQ_NAME_LEN) + pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n", wq->name); + max_active = max_active ?: WQ_DFL_ACTIVE; max_active = wq_clamp_max_active(max_active, flags, wq->name); From ab5e5b99a949b9f282c605d00557b2c727856485 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Tue, 16 Jan 2024 17:19:26 +0100 Subject: [PATCH 02/54] tools/workqueue: Add rescuers printing to wq_dump.py Retrieving rescuers information (e.g., affinity and name) is quite useful when debugging workqueues configurations. Add printing of such information to the existing wq_dump.py script. Signed-off-by: Juri Lelli Signed-off-by: Tejun Heo --- tools/workqueue/wq_dump.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tools/workqueue/wq_dump.py b/tools/workqueue/wq_dump.py index d0df5833f2c1..6da621989e21 100644 --- a/tools/workqueue/wq_dump.py +++ b/tools/workqueue/wq_dump.py @@ -175,3 +175,32 @@ for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_( if wq.flags & WQ_UNBOUND: print(f' {wq.dfl_pwq.pool.id.value_():{max_pool_id_len}}', end='') print('') + +print('') +print('Workqueue -> rescuer') +print('=====================') +print(f'wq_unbound_cpumask={cpumask_str(wq_unbound_cpumask)}') +print('') +print('[ workqueue \ type unbound_cpumask rescuer pid cpumask]') + +for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'): + print(f'{wq.name.string_().decode()[-24:]:24}', end='') + if wq.flags & WQ_UNBOUND: + if wq.flags & WQ_ORDERED: + print(' ordered ', end='') + else: + print(' unbound', end='') + if wq.unbound_attrs.affn_strict: + print(',S ', end='') + else: + print(' ', end='') + print(f' {cpumask_str(wq.unbound_attrs.cpumask):24}', end='') + else: + print(' percpu ', end='') + print(' ', end='') + + if wq.flags & WQ_MEM_RECLAIM: + print(f' {wq.rescuer.task.comm.string_().decode()[-24:]:24}', end='') + print(f' {wq.rescuer.task.pid.value_():5}', end='') + print(f' {cpumask_str(wq.rescuer.task.cpus_ptr)}', end='') + print('') From 85f0ab43f9de62a4b9c1b503b07f1c33e5a6d2ab Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Tue, 16 Jan 2024 17:19:27 +0100 Subject: [PATCH 03/54] kernel/workqueue: Bind rescuer to unbound cpumask for WQ_UNBOUND At the time they are created unbound workqueues rescuers currently use cpu_possible_mask as their affinity, but this can be too wide in case a workqueue unbound mask has been set as a subset of cpu_possible_mask. Make new rescuers use their associated workqueue unbound cpumask from the start. Signed-off-by: Juri Lelli Signed-off-by: Tejun Heo --- kernel/workqueue.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8d9dec14b9bb..ed442cefea7c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4652,7 +4652,10 @@ static int init_rescuer(struct workqueue_struct *wq) } wq->rescuer = rescuer; - kthread_bind_mask(rescuer->task, cpu_possible_mask); + if (wq->flags & WQ_UNBOUND) + kthread_bind_mask(rescuer->task, wq->unbound_attrs->cpumask); + else + kthread_bind_mask(rescuer->task, cpu_possible_mask); wake_up_process(rescuer->task); return 0; From 1a65a6d17cbc58e1aeffb2be962acce49efbef9c Mon Sep 17 00:00:00 2001 From: Xuewen Yan Date: Wed, 10 Jan 2024 11:27:24 +0800 Subject: [PATCH 04/54] workqueue: Add rcu lock check at the end of work item execution Currently the workqueue just checks the atomic and locking states after work execution ends. However, sometimes, a work item may not unlock rcu after acquiring rcu_read_lock(). And as a result, it would cause rcu stall, but the rcu stall warning can not dump the work func, because the work has finished. In order to quickly discover those works that do not call rcu_read_unlock() after rcu_read_lock(), add the rcu lock check. Use rcu_preempt_depth() to check the work's rcu status. Normally, this value is 0. If this value is bigger than 0, it means the work are still holding rcu lock. If so, print err info and the work func. tj: Reworded the description for clarity. Minor formatting tweak. Signed-off-by: Xuewen Yan Reviewed-by: Lai Jiangshan Reviewed-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/workqueue.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ed442cefea7c..aec3efbaaf93 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2640,11 +2640,12 @@ __acquires(&pool->lock) lock_map_release(&lockdep_map); lock_map_release(&pwq->wq->lockdep_map); - if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { - pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" + if (unlikely(in_atomic() || lockdep_depth(current) > 0 || + rcu_preempt_depth() > 0)) { + pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d/%d\n" " last function: %ps\n", - current->comm, preempt_count(), task_pid_nr(current), - worker->current_func); + current->comm, preempt_count(), rcu_preempt_depth(), + task_pid_nr(current), worker->current_func); debug_show_held_locks(current); dump_stack(); } From 7bd20b6b87183db2ebf789bcf9d0aa6d06a0defb Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Fri, 19 Jan 2024 12:54:39 -0300 Subject: [PATCH 05/54] workqueue: mark power efficient workqueue as unbounded if nohz_full enabled A customer using nohz_full has experienced the following interruption: oslat-1004510 [018] timer_cancel: timer=0xffff90a7ca663cf8 oslat-1004510 [018] timer_expire_entry: timer=0xffff90a7ca663cf8 function=delayed_work_timer_fn now=4709188240 baseclk=4709188240 oslat-1004510 [018] workqueue_queue_work: work struct=0xffff90a7ca663cd8 function=fb_flashcursor workqueue=events_power_efficient req_cpu=8192 cpu=18 oslat-1004510 [018] workqueue_activate_work: work struct 0xffff90a7ca663cd8 oslat-1004510 [018] sched_wakeup: kworker/18:1:326 [120] CPU:018 oslat-1004510 [018] timer_expire_exit: timer=0xffff90a7ca663cf8 oslat-1004510 [018] irq_work_entry: vector=246 oslat-1004510 [018] irq_work_exit: vector=246 oslat-1004510 [018] tick_stop: success=0 dependency=SCHED oslat-1004510 [018] hrtimer_start: hrtimer=0xffff90a70009cb00 function=tick_sched_timer/0x0 ... oslat-1004510 [018] softirq_exit: vec=1 [action=TIMER] oslat-1004510 [018] softirq_entry: vec=7 [action=SCHED] oslat-1004510 [018] softirq_exit: vec=7 [action=SCHED] oslat-1004510 [018] tick_stop: success=0 dependency=SCHED oslat-1004510 [018] sched_switch: oslat:1004510 [120] R ==> kworker/18:1:326 [120] kworker/18:1-326 [018] workqueue_execute_start: work struct 0xffff90a7ca663cd8: function fb_flashcursor kworker/18:1-326 [018] workqueue_queue_work: work struct=0xffff9078f119eed0 function=drm_fb_helper_damage_work workqueue=events req_cpu=8192 cpu=18 kworker/18:1-326 [018] workqueue_activate_work: work struct 0xffff9078f119eed0 kworker/18:1-326 [018] timer_start: timer=0xffff90a7ca663cf8 function=delayed_work_timer_fn ... Set wq_power_efficient to true, in case nohz_full is enabled. This makes the power efficient workqueue be unbounded, which allows workqueue items there to be moved to HK CPUs. Signed-off-by: Marcelo Tosatti Signed-off-by: Tejun Heo --- kernel/workqueue.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index aec3efbaaf93..8ca65665efe9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -6638,6 +6638,13 @@ void __init workqueue_init_early(void) wq_update_pod_attrs_buf = alloc_workqueue_attrs(); BUG_ON(!wq_update_pod_attrs_buf); + /* + * If nohz_full is enabled, set power efficient workqueue as unbound. + * This allows workqueue items to be moved to HK CPUs. + */ + if (housekeeping_enabled(HK_TYPE_TICK)) + wq_power_efficient = true; + /* initialize WQ_AFFN_SYSTEM pods */ pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL); pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL); From a6b48c83d28e21ddcd6a080128bb73f9e3d130ac Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 25 Jan 2024 06:21:56 -1000 Subject: [PATCH 06/54] tools/workqueue/wq_dump.py: Clean up code and drop duplicate information - Factor out wq_type_str() - Improve formatting so that it adapts to actual field widths. - Drop duplicate information from "Workqueue -> rescuer" section. If anything, we should add more rescuer-specific info - e.g. the number of work items rescued. Signed-off-by: Tejun Heo Cc: Juri Lelli --- tools/workqueue/wq_dump.py | 71 +++++++++++++++++++------------------- 1 file changed, 36 insertions(+), 35 deletions(-) diff --git a/tools/workqueue/wq_dump.py b/tools/workqueue/wq_dump.py index 6da621989e21..333b2fc00b82 100644 --- a/tools/workqueue/wq_dump.py +++ b/tools/workqueue/wq_dump.py @@ -75,6 +75,20 @@ def cpumask_str(cpumask): output += f'{v:08x}' return output.strip() +wq_type_len = 9 + +def wq_type_str(wq): + if wq.flags & WQ_UNBOUND: + if wq.flags & WQ_ORDERED: + return f'{"ordered":{wq_type_len}}' + else: + if wq.unbound_attrs.affn_strict: + return f'{"unbound,S":{wq_type_len}}' + else: + return f'{"unbound":{wq_type_len}}' + else: + return f'{"percpu":{wq_type_len}}' + worker_pool_idr = prog['worker_pool_idr'] workqueues = prog['workqueues'] wq_unbound_cpumask = prog['wq_unbound_cpumask'] @@ -92,6 +106,10 @@ WQ_AFFN_CACHE = prog['WQ_AFFN_CACHE'] WQ_AFFN_NUMA = prog['WQ_AFFN_NUMA'] WQ_AFFN_SYSTEM = prog['WQ_AFFN_SYSTEM'] +WQ_NAME_LEN = prog['WQ_NAME_LEN'].value_() + +cpumask_str_len = len(cpumask_str(wq_unbound_cpumask)) + print('Affinity Scopes') print('===============') @@ -148,24 +166,13 @@ print('') print('Workqueue CPU -> pool') print('=====================') -print('[ workqueue \ type CPU', end='') +print(f'[{"workqueue":^{WQ_NAME_LEN-2}}\\ {"type CPU":{wq_type_len}}', end='') for cpu in for_each_possible_cpu(prog): print(f' {cpu:{max_pool_id_len}}', end='') print(' dfl]') for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'): - print(f'{wq.name.string_().decode()[-24:]:24}', end='') - if wq.flags & WQ_UNBOUND: - if wq.flags & WQ_ORDERED: - print(' ordered ', end='') - else: - print(' unbound', end='') - if wq.unbound_attrs.affn_strict: - print(',S ', end='') - else: - print(' ', end='') - else: - print(' percpu ', end='') + print(f'{wq.name.string_().decode():{WQ_NAME_LEN}} {wq_type_str(wq):10}', end='') for cpu in for_each_possible_cpu(prog): pool_id = per_cpu_ptr(wq.cpu_pwq, cpu)[0].pool.id.value_() @@ -178,29 +185,23 @@ for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_( print('') print('Workqueue -> rescuer') -print('=====================') -print(f'wq_unbound_cpumask={cpumask_str(wq_unbound_cpumask)}') -print('') -print('[ workqueue \ type unbound_cpumask rescuer pid cpumask]') +print('====================') + +ucpus_len = max(cpumask_str_len, len("unbound_cpus")) +rcpus_len = max(cpumask_str_len, len("rescuer_cpus")) + +print(f'[{"workqueue":^{WQ_NAME_LEN-2}}\\ {"unbound_cpus":{ucpus_len}} pid {"rescuer_cpus":{rcpus_len}} ]') for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'): - print(f'{wq.name.string_().decode()[-24:]:24}', end='') - if wq.flags & WQ_UNBOUND: - if wq.flags & WQ_ORDERED: - print(' ordered ', end='') - else: - print(' unbound', end='') - if wq.unbound_attrs.affn_strict: - print(',S ', end='') - else: - print(' ', end='') - print(f' {cpumask_str(wq.unbound_attrs.cpumask):24}', end='') - else: - print(' percpu ', end='') - print(' ', end='') + if not (wq.flags & WQ_MEM_RECLAIM): + continue - if wq.flags & WQ_MEM_RECLAIM: - print(f' {wq.rescuer.task.comm.string_().decode()[-24:]:24}', end='') - print(f' {wq.rescuer.task.pid.value_():5}', end='') - print(f' {cpumask_str(wq.rescuer.task.cpus_ptr)}', end='') + print(f'{wq.name.string_().decode():{WQ_NAME_LEN}}', end='') + if wq.unbound_attrs.value_() != 0: + print(f' {cpumask_str(wq.unbound_attrs.cpumask):{ucpus_len}}', end='') + else: + print(f' {"":{ucpus_len}}', end='') + + print(f' {wq.rescuer.task.pid.value_():6}', end='') + print(f' {cpumask_str(wq.rescuer.task.cpus_ptr):{rcpus_len}}', end='') print('') From 8318d6a6362f5903edb4c904a8dd447e59be4ad1 Mon Sep 17 00:00:00 2001 From: Audra Mitchell Date: Thu, 25 Jan 2024 14:05:32 -0500 Subject: [PATCH 07/54] workqueue: Shorten events_freezable_power_efficient name Since we have set the WQ_NAME_LEN to 32, decrease the name of events_freezable_power_efficient so that it does not trip the name length warning when the workqueue is created. Signed-off-by: Audra Mitchell Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8ca65665efe9..ee6aa1b897e0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -6706,7 +6706,7 @@ void __init workqueue_init_early(void) WQ_FREEZABLE, 0); system_power_efficient_wq = alloc_workqueue("events_power_efficient", WQ_POWER_EFFICIENT, 0); - system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient", + system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_pwr_efficient", WQ_FREEZABLE | WQ_POWER_EFFICIENT, 0); BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || From 6a229b0e2ff6143b65ba4ef42bd71e29ffc2c16d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 26 Jan 2024 11:55:46 -1000 Subject: [PATCH 08/54] workqueue: Drop unnecessary kick_pool() in create_worker() After creating a new worker, create_worker() is calling kick_pool() to wake up the new worker task. However, as kick_pool() doesn't do anything if there is no work pending, it also calls wake_up_process() explicitly. There's no reason to call kick_pool() at all. wake_up_process() is enough by itself. Drop the unnecessary kick_pool() call. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ee6aa1b897e0..b6b690a17f7c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2217,12 +2217,11 @@ static struct worker *create_worker(struct worker_pool *pool) worker->pool->nr_workers++; worker_enter_idle(worker); - kick_pool(pool); /* * @worker is waiting on a completion in kthread() and will trigger hung - * check if not woken up soon. As kick_pool() might not have waken it - * up, wake it up explicitly once more. + * check if not woken up soon. As kick_pool() is noop if @pool is empty, + * wake it up explicitly. */ wake_up_process(worker->task); From e563d0a7cdc1890ff36bb177b5c8c2854d881e4d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 26 Jan 2024 11:55:50 -1000 Subject: [PATCH 09/54] workqueue: Break up enum definitions and give names to the types workqueue is collecting different sorts of enums into a single unnamed enum type which can increase confusion around enum width. Also, unnamed enums can't be accessed from BPF. Let's break up enum definitions according to their purposes and give them type names. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 41 +++++++++++++++++++++++---------------- kernel/workqueue.c | 6 +++++- 2 files changed, 29 insertions(+), 18 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 2cc0a9606175..78047d0d9882 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -22,7 +22,7 @@ */ #define work_data_bits(work) ((unsigned long *)(&(work)->data)) -enum { +enum work_bits { WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */ WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ @@ -36,21 +36,6 @@ enum { WORK_STRUCT_COLOR_BITS = 4, - WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, - WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT, - WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, - WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, -#ifdef CONFIG_DEBUG_OBJECTS_WORK - WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, -#else - WORK_STRUCT_STATIC = 0, -#endif - - WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS), - - /* not bound to any CPU, prefer the local CPU */ - WORK_CPU_UNBOUND = NR_CPUS, - /* * Reserve 8 bits off of pwq pointer w/ debugobjects turned off. * This makes pwqs aligned to 256 bytes and allows 16 workqueue @@ -74,6 +59,26 @@ enum { WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, +}; + +enum work_flags { + WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, + WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT, + WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, + WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, +#ifdef CONFIG_DEBUG_OBJECTS_WORK + WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, +#else + WORK_STRUCT_STATIC = 0, +#endif +}; + +enum wq_misc_consts { + WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS), + + /* not bound to any CPU, prefer the local CPU */ + WORK_CPU_UNBOUND = NR_CPUS, + /* bit mask for work_busy() return values */ WORK_BUSY_PENDING = 1 << 0, WORK_BUSY_RUNNING = 1 << 1, @@ -347,7 +352,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } * Workqueue flags and constants. For details, please refer to * Documentation/core-api/workqueue.rst. */ -enum { +enum wq_flags { WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ @@ -387,7 +392,9 @@ enum { __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */ +}; +enum wq_consts { WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE, WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b6b690a17f7c..45d0a784ba4f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -56,7 +56,7 @@ #include "workqueue_internal.h" -enum { +enum worker_pool_flags { /* * worker_pool flags * @@ -75,7 +75,9 @@ enum { */ POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ +}; +enum worker_flags { /* worker flags */ WORKER_DIE = 1 << 1, /* die die die */ WORKER_IDLE = 1 << 2, /* is idle */ @@ -86,7 +88,9 @@ enum { WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | WORKER_UNBOUND | WORKER_REBOUND, +}; +enum wq_internal_consts { NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ From a045a272d887575da17ad86d6573e82871b50c27 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:24 -1000 Subject: [PATCH 10/54] workqueue: Move pwq->max_active to wq->max_active max_active is a workqueue-wide setting and the configured value is stored in wq->saved_max_active; however, the effective value was stored in pwq->max_active. While this is harmless, it makes max_active update process more complicated and gets in the way of the planned max_active semantic updates for unbound workqueues. This patches moves pwq->max_active to wq->max_active. This simplifies the code and makes freezing and noop max_active updates cheaper too. No user-visible behavior change is intended. As wq->max_active is updated while holding wq mutex but read without any locking, it now uses WRITE/READ_ONCE(). A new locking locking rule WO is added for it. v2: wq->max_active now uses WRITE/READ_ONCE() as suggested by Lai. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 133 ++++++++++++++++++++++----------------------- 1 file changed, 66 insertions(+), 67 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 45d0a784ba4f..a23a35dbcd74 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -147,6 +147,9 @@ enum wq_internal_consts { * * WR: wq->mutex protected for writes. RCU protected for reads. * + * WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read + * with READ_ONCE() without locking. + * * MD: wq_mayday_lock protected. * * WD: Used internally by the watchdog. @@ -254,7 +257,6 @@ struct pool_workqueue { * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works. */ int nr_active; /* L: nr of active works */ - int max_active; /* L: max active works */ struct list_head inactive_works; /* L: inactive works */ struct list_head pwqs_node; /* WR: node on wq->pwqs */ struct list_head mayday_node; /* MD: node on wq->maydays */ @@ -302,7 +304,8 @@ struct workqueue_struct { struct worker *rescuer; /* MD: rescue worker */ int nr_drainers; /* WQ: drain in progress */ - int saved_max_active; /* WQ: saved pwq max_active */ + int max_active; /* WO: max active works */ + int saved_max_active; /* WQ: saved max_active */ struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */ @@ -1496,7 +1499,7 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_ pwq->nr_active--; if (!list_empty(&pwq->inactive_works)) { /* one down, submit an inactive one */ - if (pwq->nr_active < pwq->max_active) + if (pwq->nr_active < READ_ONCE(pwq->wq->max_active)) pwq_activate_first_inactive(pwq); } } @@ -1797,7 +1800,13 @@ retry: pwq->nr_in_flight[pwq->work_color]++; work_flags = work_color_to_flags(pwq->work_color); - if (likely(pwq->nr_active < pwq->max_active)) { + /* + * Limit the number of concurrently active work items to max_active. + * @work must also queue behind existing inactive work items to maintain + * ordering when max_active changes. See wq_adjust_max_active(). + */ + if (list_empty(&pwq->inactive_works) && + pwq->nr_active < READ_ONCE(pwq->wq->max_active)) { if (list_empty(&pool->worklist)) pool->watchdog_ts = jiffies; @@ -4146,50 +4155,6 @@ static void pwq_release_workfn(struct kthread_work *work) } } -/** - * pwq_adjust_max_active - update a pwq's max_active to the current setting - * @pwq: target pool_workqueue - * - * If @pwq isn't freezing, set @pwq->max_active to the associated - * workqueue's saved_max_active and activate inactive work items - * accordingly. If @pwq is freezing, clear @pwq->max_active to zero. - */ -static void pwq_adjust_max_active(struct pool_workqueue *pwq) -{ - struct workqueue_struct *wq = pwq->wq; - bool freezable = wq->flags & WQ_FREEZABLE; - unsigned long flags; - - /* for @wq->saved_max_active */ - lockdep_assert_held(&wq->mutex); - - /* fast exit for non-freezable wqs */ - if (!freezable && pwq->max_active == wq->saved_max_active) - return; - - /* this function can be called during early boot w/ irq disabled */ - raw_spin_lock_irqsave(&pwq->pool->lock, flags); - - /* - * During [un]freezing, the caller is responsible for ensuring that - * this function is called at least once after @workqueue_freezing - * is updated and visible. - */ - if (!freezable || !workqueue_freezing) { - pwq->max_active = wq->saved_max_active; - - while (!list_empty(&pwq->inactive_works) && - pwq->nr_active < pwq->max_active) - pwq_activate_first_inactive(pwq); - - kick_pool(pwq->pool); - } else { - pwq->max_active = 0; - } - - raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); -} - /* initialize newly allocated @pwq which is associated with @wq and @pool */ static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, struct worker_pool *pool) @@ -4222,9 +4187,6 @@ static void link_pwq(struct pool_workqueue *pwq) /* set the matching work_color */ pwq->work_color = wq->work_color; - /* sync max_active to the current setting */ - pwq_adjust_max_active(pwq); - /* link in @pwq */ list_add_rcu(&pwq->pwqs_node, &wq->pwqs); } @@ -4665,6 +4627,52 @@ static int init_rescuer(struct workqueue_struct *wq) return 0; } +/** + * wq_adjust_max_active - update a wq's max_active to the current setting + * @wq: target workqueue + * + * If @wq isn't freezing, set @wq->max_active to the saved_max_active and + * activate inactive work items accordingly. If @wq is freezing, clear + * @wq->max_active to zero. + */ +static void wq_adjust_max_active(struct workqueue_struct *wq) +{ + struct pool_workqueue *pwq; + + lockdep_assert_held(&wq->mutex); + + if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing) { + WRITE_ONCE(wq->max_active, 0); + return; + } + + if (wq->max_active == wq->saved_max_active) + return; + + /* + * Update @wq->max_active and then kick inactive work items if more + * active work items are allowed. This doesn't break work item ordering + * because new work items are always queued behind existing inactive + * work items if there are any. + */ + WRITE_ONCE(wq->max_active, wq->saved_max_active); + + for_each_pwq(pwq, wq) { + unsigned long flags; + + /* this function can be called during early boot w/ irq disabled */ + raw_spin_lock_irqsave(&pwq->pool->lock, flags); + + while (!list_empty(&pwq->inactive_works) && + pwq->nr_active < wq->max_active) + pwq_activate_first_inactive(pwq); + + kick_pool(pwq->pool); + + raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + } +} + __printf(1, 4) struct workqueue_struct *alloc_workqueue(const char *fmt, unsigned int flags, @@ -4672,7 +4680,6 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, { va_list args; struct workqueue_struct *wq; - struct pool_workqueue *pwq; int len; /* @@ -4711,6 +4718,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, /* init wq */ wq->flags = flags; + wq->max_active = max_active; wq->saved_max_active = max_active; mutex_init(&wq->mutex); atomic_set(&wq->nr_pwqs_to_flush, 0); @@ -4739,8 +4747,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, mutex_lock(&wq_pool_mutex); mutex_lock(&wq->mutex); - for_each_pwq(pwq, wq) - pwq_adjust_max_active(pwq); + wq_adjust_max_active(wq); mutex_unlock(&wq->mutex); list_add_tail_rcu(&wq->list, &workqueues); @@ -4878,8 +4885,6 @@ EXPORT_SYMBOL_GPL(destroy_workqueue); */ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) { - struct pool_workqueue *pwq; - /* disallow meddling with max_active for ordered workqueues */ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) return; @@ -4890,9 +4895,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) wq->flags &= ~__WQ_ORDERED; wq->saved_max_active = max_active; - - for_each_pwq(pwq, wq) - pwq_adjust_max_active(pwq); + wq_adjust_max_active(wq); mutex_unlock(&wq->mutex); } @@ -5139,8 +5142,8 @@ static void show_pwq(struct pool_workqueue *pwq) pr_info(" pwq %d:", pool->id); pr_cont_pool_info(pool); - pr_cont(" active=%d/%d refcnt=%d%s\n", - pwq->nr_active, pwq->max_active, pwq->refcnt, + pr_cont(" active=%d refcnt=%d%s\n", + pwq->nr_active, pwq->refcnt, !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); hash_for_each(pool->busy_hash, bkt, worker, hentry) { @@ -5688,7 +5691,6 @@ EXPORT_SYMBOL_GPL(work_on_cpu_safe_key); void freeze_workqueues_begin(void) { struct workqueue_struct *wq; - struct pool_workqueue *pwq; mutex_lock(&wq_pool_mutex); @@ -5697,8 +5699,7 @@ void freeze_workqueues_begin(void) list_for_each_entry(wq, &workqueues, list) { mutex_lock(&wq->mutex); - for_each_pwq(pwq, wq) - pwq_adjust_max_active(pwq); + wq_adjust_max_active(wq); mutex_unlock(&wq->mutex); } @@ -5763,7 +5764,6 @@ out_unlock: void thaw_workqueues(void) { struct workqueue_struct *wq; - struct pool_workqueue *pwq; mutex_lock(&wq_pool_mutex); @@ -5775,8 +5775,7 @@ void thaw_workqueues(void) /* restore max_active and repopulate worklist */ list_for_each_entry(wq, &workqueues, list) { mutex_lock(&wq->mutex); - for_each_pwq(pwq, wq) - pwq_adjust_max_active(pwq); + wq_adjust_max_active(wq); mutex_unlock(&wq->mutex); } From afa87ce85379e2d93863fce595afdb5771a84004 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:24 -1000 Subject: [PATCH 11/54] workqueue: Factor out pwq_is_empty() "!pwq->nr_active && list_empty(&pwq->inactive_works)" test is repeated multiple times. Let's factor it out into pwq_is_empty(). Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a23a35dbcd74..171d0e6d29f6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1460,6 +1460,11 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) } } +static bool pwq_is_empty(struct pool_workqueue *pwq) +{ + return !pwq->nr_active && list_empty(&pwq->inactive_works); +} + static void pwq_activate_inactive_work(struct work_struct *work) { struct pool_workqueue *pwq = get_work_pwq(work); @@ -3329,7 +3334,7 @@ reflush: bool drained; raw_spin_lock_irq(&pwq->pool->lock); - drained = !pwq->nr_active && list_empty(&pwq->inactive_works); + drained = pwq_is_empty(pwq); raw_spin_unlock_irq(&pwq->pool->lock); if (drained) @@ -4779,7 +4784,7 @@ static bool pwq_busy(struct pool_workqueue *pwq) if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) return true; - if (pwq->nr_active || !list_empty(&pwq->inactive_works)) + if (!pwq_is_empty(pwq)) return true; return false; @@ -5217,7 +5222,7 @@ void show_one_workqueue(struct workqueue_struct *wq) unsigned long flags; for_each_pwq(pwq, wq) { - if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { + if (!pwq_is_empty(pwq)) { idle = false; break; } @@ -5229,7 +5234,7 @@ void show_one_workqueue(struct workqueue_struct *wq) for_each_pwq(pwq, wq) { raw_spin_lock_irqsave(&pwq->pool->lock, flags); - if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { + if (!pwq_is_empty(pwq)) { /* * Defer printing to avoid deadlocks in console * drivers that queue work while holding locks From 4c6380305d21e36581b451f7337a36c93b64e050 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:24 -1000 Subject: [PATCH 12/54] workqueue: Replace pwq_activate_inactive_work() with [__]pwq_activate_work() To prepare for unbound nr_active handling improvements, move work activation part of pwq_activate_inactive_work() into __pwq_activate_work() and add pwq_activate_work() which tests WORK_STRUCT_INACTIVE and updates nr_active. pwq_activate_first_inactive() and try_to_grab_pending() are updated to use pwq_activate_work(). The latter conversion is functionally identical. For the former, this conversion adds an unnecessary WORK_STRUCT_INACTIVE testing. This is temporary and will be removed by the next patch. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 171d0e6d29f6..403f3a14166d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1465,16 +1465,36 @@ static bool pwq_is_empty(struct pool_workqueue *pwq) return !pwq->nr_active && list_empty(&pwq->inactive_works); } -static void pwq_activate_inactive_work(struct work_struct *work) +static void __pwq_activate_work(struct pool_workqueue *pwq, + struct work_struct *work) { - struct pool_workqueue *pwq = get_work_pwq(work); - trace_workqueue_activate_work(work); if (list_empty(&pwq->pool->worklist)) pwq->pool->watchdog_ts = jiffies; move_linked_works(work, &pwq->pool->worklist, NULL); __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work)); +} + +/** + * pwq_activate_work - Activate a work item if inactive + * @pwq: pool_workqueue @work belongs to + * @work: work item to activate + * + * Returns %true if activated. %false if already active. + */ +static bool pwq_activate_work(struct pool_workqueue *pwq, + struct work_struct *work) +{ + struct worker_pool *pool = pwq->pool; + + lockdep_assert_held(&pool->lock); + + if (!(*work_data_bits(work) & WORK_STRUCT_INACTIVE)) + return false; + pwq->nr_active++; + __pwq_activate_work(pwq, work); + return true; } static void pwq_activate_first_inactive(struct pool_workqueue *pwq) @@ -1482,7 +1502,7 @@ static void pwq_activate_first_inactive(struct pool_workqueue *pwq) struct work_struct *work = list_first_entry(&pwq->inactive_works, struct work_struct, entry); - pwq_activate_inactive_work(work); + pwq_activate_work(pwq, work); } /** @@ -1620,8 +1640,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, * management later on and cause stall. Make sure the work * item is activated before grabbing. */ - if (*work_data_bits(work) & WORK_STRUCT_INACTIVE) - pwq_activate_inactive_work(work); + pwq_activate_work(pwq, work); list_del_init(&work->entry); pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); From 1c270b79ce0b8290f146255ea9057243f6dd3c17 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:24 -1000 Subject: [PATCH 13/54] workqueue: Move nr_active handling into helpers __queue_work(), pwq_dec_nr_in_flight() and wq_adjust_max_active() were open-coding nr_active handling, which is fine given that the operations are trivial. However, the planned unbound nr_active update will make them more complicated, so let's move them into helpers. - pwq_tryinc_nr_active() is added. It increments nr_active if under max_active limit and return a boolean indicating whether inc was successful. Note that the function is structured to accommodate future changes. __queue_work() is updated to use the new helper. - pwq_activate_first_inactive() is updated to use pwq_tryinc_nr_active() and thus no longer assumes that nr_active is under max_active and returns a boolean to indicate whether a work item has been activated. - wq_adjust_max_active() no longer tests directly whether a work item can be activated. Instead, it's updated to use the return value of pwq_activate_first_inactive() to tell whether a work item has been activated. - nr_active decrement and activating the first inactive work item is factored into pwq_dec_nr_active(). v3: - WARN_ON_ONCE(!WORK_STRUCT_INACTIVE) added to __pwq_activate_work() as now we're calling the function unconditionally from pwq_activate_first_inactive(). v2: - wq->max_active now uses WRITE/READ_ONCE() as suggested by Lai. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 86 ++++++++++++++++++++++++++++++++++++---------- 1 file changed, 67 insertions(+), 19 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 403f3a14166d..b3d818d46e99 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1468,11 +1468,14 @@ static bool pwq_is_empty(struct pool_workqueue *pwq) static void __pwq_activate_work(struct pool_workqueue *pwq, struct work_struct *work) { + unsigned long *wdb = work_data_bits(work); + + WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INACTIVE)); trace_workqueue_activate_work(work); if (list_empty(&pwq->pool->worklist)) pwq->pool->watchdog_ts = jiffies; move_linked_works(work, &pwq->pool->worklist, NULL); - __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work)); + __clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb); } /** @@ -1497,12 +1500,66 @@ static bool pwq_activate_work(struct pool_workqueue *pwq, return true; } -static void pwq_activate_first_inactive(struct pool_workqueue *pwq) +/** + * pwq_tryinc_nr_active - Try to increment nr_active for a pwq + * @pwq: pool_workqueue of interest + * + * Try to increment nr_active for @pwq. Returns %true if an nr_active count is + * successfully obtained. %false otherwise. + */ +static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq) { - struct work_struct *work = list_first_entry(&pwq->inactive_works, - struct work_struct, entry); + struct workqueue_struct *wq = pwq->wq; + struct worker_pool *pool = pwq->pool; + bool obtained; - pwq_activate_work(pwq, work); + lockdep_assert_held(&pool->lock); + + obtained = pwq->nr_active < READ_ONCE(wq->max_active); + + if (obtained) + pwq->nr_active++; + return obtained; +} + +/** + * pwq_activate_first_inactive - Activate the first inactive work item on a pwq + * @pwq: pool_workqueue of interest + * + * Activate the first inactive work item of @pwq if available and allowed by + * max_active limit. + * + * Returns %true if an inactive work item has been activated. %false if no + * inactive work item is found or max_active limit is reached. + */ +static bool pwq_activate_first_inactive(struct pool_workqueue *pwq) +{ + struct work_struct *work = + list_first_entry_or_null(&pwq->inactive_works, + struct work_struct, entry); + + if (work && pwq_tryinc_nr_active(pwq)) { + __pwq_activate_work(pwq, work); + return true; + } else { + return false; + } +} + +/** + * pwq_dec_nr_active - Retire an active count + * @pwq: pool_workqueue of interest + * + * Decrement @pwq's nr_active and try to activate the first inactive work item. + */ +static void pwq_dec_nr_active(struct pool_workqueue *pwq) +{ + struct worker_pool *pool = pwq->pool; + + lockdep_assert_held(&pool->lock); + + pwq->nr_active--; + pwq_activate_first_inactive(pwq); } /** @@ -1520,14 +1577,8 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_ { int color = get_work_color(work_data); - if (!(work_data & WORK_STRUCT_INACTIVE)) { - pwq->nr_active--; - if (!list_empty(&pwq->inactive_works)) { - /* one down, submit an inactive one */ - if (pwq->nr_active < READ_ONCE(pwq->wq->max_active)) - pwq_activate_first_inactive(pwq); - } - } + if (!(work_data & WORK_STRUCT_INACTIVE)) + pwq_dec_nr_active(pwq); pwq->nr_in_flight[color]--; @@ -1829,13 +1880,11 @@ retry: * @work must also queue behind existing inactive work items to maintain * ordering when max_active changes. See wq_adjust_max_active(). */ - if (list_empty(&pwq->inactive_works) && - pwq->nr_active < READ_ONCE(pwq->wq->max_active)) { + if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq)) { if (list_empty(&pool->worklist)) pool->watchdog_ts = jiffies; trace_workqueue_activate_work(work); - pwq->nr_active++; insert_work(pwq, work, &pool->worklist, work_flags); kick_pool(pool); } else { @@ -4687,9 +4736,8 @@ static void wq_adjust_max_active(struct workqueue_struct *wq) /* this function can be called during early boot w/ irq disabled */ raw_spin_lock_irqsave(&pwq->pool->lock, flags); - while (!list_empty(&pwq->inactive_works) && - pwq->nr_active < wq->max_active) - pwq_activate_first_inactive(pwq); + while (pwq_activate_first_inactive(pwq)) + ; kick_pool(pwq->pool); From c5404d4e6df6faba1007544b5f4e62c7c14416dd Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:24 -1000 Subject: [PATCH 14/54] workqueue: Make wq_adjust_max_active() round-robin pwqs while activating wq_adjust_max_active() needs to activate work items after max_active is increased. Previously, it did that by visiting each pwq once activating all that could be activated. While this makes sense with per-pwq nr_active, nr_active will be shared across multiple pwqs for unbound wqs. Then, we'd want to round-robin through pwqs to be fairer. In preparation, this patch makes wq_adjust_max_active() round-robin pwqs while activating. While the activation ordering changes, this shouldn't cause user-noticeable behavior changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b3d818d46e99..489f70846ac9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4710,7 +4710,7 @@ static int init_rescuer(struct workqueue_struct *wq) */ static void wq_adjust_max_active(struct workqueue_struct *wq) { - struct pool_workqueue *pwq; + bool activated; lockdep_assert_held(&wq->mutex); @@ -4730,19 +4730,26 @@ static void wq_adjust_max_active(struct workqueue_struct *wq) */ WRITE_ONCE(wq->max_active, wq->saved_max_active); - for_each_pwq(pwq, wq) { - unsigned long flags; + /* + * Round-robin through pwq's activating the first inactive work item + * until max_active is filled. + */ + do { + struct pool_workqueue *pwq; - /* this function can be called during early boot w/ irq disabled */ - raw_spin_lock_irqsave(&pwq->pool->lock, flags); + activated = false; + for_each_pwq(pwq, wq) { + unsigned long flags; - while (pwq_activate_first_inactive(pwq)) - ; - - kick_pool(pwq->pool); - - raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); - } + /* can be called during early boot w/ irq disabled */ + raw_spin_lock_irqsave(&pwq->pool->lock, flags); + if (pwq_activate_first_inactive(pwq)) { + activated = true; + kick_pool(pwq->pool); + } + raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + } + } while (activated); } __printf(1, 4) From 9f66cff212bb3c1cd25996aaa0dfd0c9e9d8baab Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:24 -1000 Subject: [PATCH 15/54] workqueue: RCU protect wq->dfl_pwq and implement accessors for it wq->cpu_pwq is RCU protected but wq->dfl_pwq isn't. This is okay because currently wq->dfl_pwq is used only accessed to install it into wq->cpu_pwq which doesn't require RCU access. However, we want to be able to access wq->dfl_pwq under RCU in the future to access its __pod_cpumask and the code can be made easier to read by making the two pwq fields behave in the same way. - Make wq->dfl_pwq RCU protected. - Add unbound_pwq_slot() and unbound_pwq() which can access both ->dfl_pwq and ->cpu_pwq. The former returns the double pointer that can be used access and update the pwqs. The latter performs locking check and dereferences the double pointer. - pwq accesses and updates are converted to use unbound_pwq[_slot](). Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 64 +++++++++++++++++++++++++++++----------------- 1 file changed, 40 insertions(+), 24 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 489f70846ac9..1579b8c9a579 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -308,7 +308,7 @@ struct workqueue_struct { int saved_max_active; /* WQ: saved max_active */ struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ - struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */ + struct pool_workqueue __rcu *dfl_pwq; /* PW: only for unbound wqs */ #ifdef CONFIG_SYSFS struct wq_device *wq_dev; /* I: for sysfs interface */ @@ -639,6 +639,23 @@ static int worker_pool_assign_id(struct worker_pool *pool) return ret; } +static struct pool_workqueue __rcu ** +unbound_pwq_slot(struct workqueue_struct *wq, int cpu) +{ + if (cpu >= 0) + return per_cpu_ptr(wq->cpu_pwq, cpu); + else + return &wq->dfl_pwq; +} + +/* @cpu < 0 for dfl_pwq */ +static struct pool_workqueue *unbound_pwq(struct workqueue_struct *wq, int cpu) +{ + return rcu_dereference_check(*unbound_pwq_slot(wq, cpu), + lockdep_is_held(&wq_pool_mutex) || + lockdep_is_held(&wq->mutex)); +} + static unsigned int work_color_to_flags(int color) { return color << WORK_STRUCT_COLOR_SHIFT; @@ -4328,10 +4345,11 @@ static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu, "possible intersect\n"); } -/* install @pwq into @wq's cpu_pwq and return the old pwq */ +/* install @pwq into @wq and return the old pwq, @cpu < 0 for dfl_pwq */ static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq, int cpu, struct pool_workqueue *pwq) { + struct pool_workqueue __rcu **slot = unbound_pwq_slot(wq, cpu); struct pool_workqueue *old_pwq; lockdep_assert_held(&wq_pool_mutex); @@ -4340,8 +4358,8 @@ static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq, /* link_pwq() can handle duplicate calls */ link_pwq(pwq); - old_pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu)); - rcu_assign_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu), pwq); + old_pwq = rcu_access_pointer(*slot); + rcu_assign_pointer(*slot, pwq); return old_pwq; } @@ -4441,14 +4459,11 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); - /* save the previous pwq and install the new one */ + /* save the previous pwqs and install the new ones */ for_each_possible_cpu(cpu) ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu, ctx->pwq_tbl[cpu]); - - /* @dfl_pwq might not have been used, ensure it's linked */ - link_pwq(ctx->dfl_pwq); - swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); + ctx->dfl_pwq = install_unbound_pwq(ctx->wq, -1, ctx->dfl_pwq); mutex_unlock(&ctx->wq->mutex); } @@ -4558,9 +4573,7 @@ static void wq_update_pod(struct workqueue_struct *wq, int cpu, /* nothing to do if the target cpumask matches the current pwq */ wq_calc_pod_cpumask(target_attrs, cpu, off_cpu); - pwq = rcu_dereference_protected(*per_cpu_ptr(wq->cpu_pwq, cpu), - lockdep_is_held(&wq_pool_mutex)); - if (wqattrs_equal(target_attrs, pwq->pool->attrs)) + if (wqattrs_equal(target_attrs, unbound_pwq(wq, cpu)->pool->attrs)) return; /* create a new pwq */ @@ -4578,10 +4591,11 @@ static void wq_update_pod(struct workqueue_struct *wq, int cpu, use_dfl_pwq: mutex_lock(&wq->mutex); - raw_spin_lock_irq(&wq->dfl_pwq->pool->lock); - get_pwq(wq->dfl_pwq); - raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock); - old_pwq = install_unbound_pwq(wq, cpu, wq->dfl_pwq); + pwq = unbound_pwq(wq, -1); + raw_spin_lock_irq(&pwq->pool->lock); + get_pwq(pwq); + raw_spin_unlock_irq(&pwq->pool->lock); + old_pwq = install_unbound_pwq(wq, cpu, pwq); out_unlock: mutex_unlock(&wq->mutex); put_pwq_unlocked(old_pwq); @@ -4619,10 +4633,13 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq) cpus_read_lock(); if (wq->flags & __WQ_ORDERED) { + struct pool_workqueue *dfl_pwq; + ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); /* there should only be single pwq for ordering guarantee */ - WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || - wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), + dfl_pwq = rcu_access_pointer(wq->dfl_pwq); + WARN(!ret && (wq->pwqs.next != &dfl_pwq->pwqs_node || + wq->pwqs.prev != &dfl_pwq->pwqs_node), "ordering guarantee broken for workqueue %s\n", wq->name); } else { ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); @@ -4856,7 +4873,7 @@ static bool pwq_busy(struct pool_workqueue *pwq) if (pwq->nr_in_flight[i]) return true; - if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) + if ((pwq != rcu_access_pointer(pwq->wq->dfl_pwq)) && (pwq->refcnt > 1)) return true; if (!pwq_is_empty(pwq)) return true; @@ -4940,13 +4957,12 @@ void destroy_workqueue(struct workqueue_struct *wq) rcu_read_lock(); for_each_possible_cpu(cpu) { - pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu)); - RCU_INIT_POINTER(*per_cpu_ptr(wq->cpu_pwq, cpu), NULL); - put_pwq_unlocked(pwq); + put_pwq_unlocked(unbound_pwq(wq, cpu)); + RCU_INIT_POINTER(*unbound_pwq_slot(wq, cpu), NULL); } - put_pwq_unlocked(wq->dfl_pwq); - wq->dfl_pwq = NULL; + put_pwq_unlocked(unbound_pwq(wq, -1)); + RCU_INIT_POINTER(*unbound_pwq_slot(wq, -1), NULL); rcu_read_unlock(); } From dd6c3c5441263723305a9c52c5ccc899a4653000 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:24 -1000 Subject: [PATCH 16/54] workqueue: Move pwq_dec_nr_in_flight() to the end of work item handling The planned shared nr_active handling for unbound workqueues will make pwq_dec_nr_active() sometimes drop the pool lock temporarily to acquire other pool locks, which is necessary as retirement of an nr_active count from one pool may need kick off an inactive work item in another pool. This patch moves pwq_dec_nr_in_flight() call in try_to_grab_pending() to the end of work item handling so that work item state changes stay atomic. process_one_work() which is the other user of pwq_dec_nr_in_flight() already calls it at the end of work item handling. Comments are added to both call sites and pwq_dec_nr_in_flight(). This shouldn't cause any behavior changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1579b8c9a579..b5aba0e5a699 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1587,6 +1587,11 @@ static void pwq_dec_nr_active(struct pool_workqueue *pwq) * A work either has completed or is removed from pending queue, * decrement nr_in_flight of its pwq and handle workqueue flushing. * + * NOTE: + * For unbound workqueues, this function may temporarily drop @pwq->pool->lock + * and thus should be called after all other state updates for the in-flight + * work item is complete. + * * CONTEXT: * raw_spin_lock_irq(pool->lock). */ @@ -1711,11 +1716,13 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, pwq_activate_work(pwq, work); list_del_init(&work->entry); - pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); /* work->data points to pwq iff queued, point to pool */ set_work_pool_and_keep_pending(work, pool->id); + /* must be the last step, see the function comment */ + pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); + raw_spin_unlock(&pool->lock); rcu_read_unlock(); return 1; @@ -2780,6 +2787,8 @@ __acquires(&pool->lock) worker->current_func = NULL; worker->current_pwq = NULL; worker->current_color = INT_MAX; + + /* must be the last step, see the function comment */ pwq_dec_nr_in_flight(pwq, work_data); } From 91ccc6e7233bb10a9c176aa4cc70d6f432a441a5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:24 -1000 Subject: [PATCH 17/54] workqueue: Introduce struct wq_node_nr_active Currently, for both percpu and unbound workqueues, max_active applies per-cpu, which is a recent change for unbound workqueues. The change for unbound workqueues was a significant departure from the previous behavior of per-node application. It made some use cases create undesirable number of concurrent work items and left no good way of fixing them. To address the problem, workqueue is implementing a NUMA node segmented global nr_active mechanism, which will be explained further in the next patch. As a preparation, this patch introduces struct wq_node_nr_active. It's a data structured allocated for each workqueue and NUMA node pair and currently only tracks the workqueue's number of active work items on the node. This is split out from the next patch to make it easier to understand and review. Note that there is an extra wq_node_nr_active allocated for the invalid node nr_node_ids which is used to track nr_active for pools which don't have NUMA node associated such as the default fallback system-wide pool. This doesn't cause any behavior changes visible to userland yet. The next patch will expand to implement the control mechanism on top. v4: - Fixed out-of-bound access when freeing per-cpu workqueues. v3: - Use flexible array for wq->node_nr_active as suggested by Lai. v2: - wq->max_active now uses WRITE/READ_ONCE() as suggested by Lai. - Lai pointed out that pwq_tryinc_nr_active() incorrectly dropped pwq->max_active check. Restored. As the next patch replaces the max_active enforcement mechanism, this doesn't change the end result. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 142 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 135 insertions(+), 7 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b5aba0e5a699..8d465478adb9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -284,6 +284,16 @@ struct wq_flusher { struct wq_device; +/* + * Unlike in a per-cpu workqueue where max_active limits its concurrency level + * on each CPU, in an unbound workqueue, max_active applies to the whole system. + * As sharing a single nr_active across multiple sockets can be very expensive, + * the counting and enforcement is per NUMA node. + */ +struct wq_node_nr_active { + atomic_t nr; /* per-node nr_active count */ +}; + /* * The externally visible workqueue. It relays the issued work items to * the appropriate worker_pool through its pool_workqueues. @@ -330,6 +340,7 @@ struct workqueue_struct { /* hot fields used during command issue, aligned to cacheline */ unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */ + struct wq_node_nr_active *node_nr_active[]; /* I: per-node nr_active */ }; static struct kmem_cache *pwq_cache; @@ -1425,6 +1436,31 @@ work_func_t wq_worker_last_func(struct task_struct *task) return worker->last_func; } +/** + * wq_node_nr_active - Determine wq_node_nr_active to use + * @wq: workqueue of interest + * @node: NUMA node, can be %NUMA_NO_NODE + * + * Determine wq_node_nr_active to use for @wq on @node. Returns: + * + * - %NULL for per-cpu workqueues as they don't need to use shared nr_active. + * + * - node_nr_active[nr_node_ids] if @node is %NUMA_NO_NODE. + * + * - Otherwise, node_nr_active[@node]. + */ +static struct wq_node_nr_active *wq_node_nr_active(struct workqueue_struct *wq, + int node) +{ + if (!(wq->flags & WQ_UNBOUND)) + return NULL; + + if (node == NUMA_NO_NODE) + node = nr_node_ids; + + return wq->node_nr_active[node]; +} + /** * get_pwq - get an extra reference on the specified pool_workqueue * @pwq: pool_workqueue to get @@ -1506,12 +1542,17 @@ static bool pwq_activate_work(struct pool_workqueue *pwq, struct work_struct *work) { struct worker_pool *pool = pwq->pool; + struct wq_node_nr_active *nna; lockdep_assert_held(&pool->lock); if (!(*work_data_bits(work) & WORK_STRUCT_INACTIVE)) return false; + nna = wq_node_nr_active(pwq->wq, pool->node); + if (nna) + atomic_inc(&nna->nr); + pwq->nr_active++; __pwq_activate_work(pwq, work); return true; @@ -1528,14 +1569,18 @@ static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq) { struct workqueue_struct *wq = pwq->wq; struct worker_pool *pool = pwq->pool; + struct wq_node_nr_active *nna = wq_node_nr_active(wq, pool->node); bool obtained; lockdep_assert_held(&pool->lock); obtained = pwq->nr_active < READ_ONCE(wq->max_active); - if (obtained) + if (obtained) { pwq->nr_active++; + if (nna) + atomic_inc(&nna->nr); + } return obtained; } @@ -1572,10 +1617,26 @@ static bool pwq_activate_first_inactive(struct pool_workqueue *pwq) static void pwq_dec_nr_active(struct pool_workqueue *pwq) { struct worker_pool *pool = pwq->pool; + struct wq_node_nr_active *nna = wq_node_nr_active(pwq->wq, pool->node); lockdep_assert_held(&pool->lock); + /* + * @pwq->nr_active should be decremented for both percpu and unbound + * workqueues. + */ pwq->nr_active--; + + /* + * For a percpu workqueue, it's simple. Just need to kick the first + * inactive work item on @pwq itself. + */ + if (!nna) { + pwq_activate_first_inactive(pwq); + return; + } + + atomic_dec(&nna->nr); pwq_activate_first_inactive(pwq); } @@ -4039,11 +4100,63 @@ static void wq_free_lockdep(struct workqueue_struct *wq) } #endif +static void free_node_nr_active(struct wq_node_nr_active **nna_ar) +{ + int node; + + for_each_node(node) { + kfree(nna_ar[node]); + nna_ar[node] = NULL; + } + + kfree(nna_ar[nr_node_ids]); + nna_ar[nr_node_ids] = NULL; +} + +static void init_node_nr_active(struct wq_node_nr_active *nna) +{ + atomic_set(&nna->nr, 0); +} + +/* + * Each node's nr_active counter will be accessed mostly from its own node and + * should be allocated in the node. + */ +static int alloc_node_nr_active(struct wq_node_nr_active **nna_ar) +{ + struct wq_node_nr_active *nna; + int node; + + for_each_node(node) { + nna = kzalloc_node(sizeof(*nna), GFP_KERNEL, node); + if (!nna) + goto err_free; + init_node_nr_active(nna); + nna_ar[node] = nna; + } + + /* [nr_node_ids] is used as the fallback */ + nna = kzalloc_node(sizeof(*nna), GFP_KERNEL, NUMA_NO_NODE); + if (!nna) + goto err_free; + init_node_nr_active(nna); + nna_ar[nr_node_ids] = nna; + + return 0; + +err_free: + free_node_nr_active(nna_ar); + return -ENOMEM; +} + static void rcu_free_wq(struct rcu_head *rcu) { struct workqueue_struct *wq = container_of(rcu, struct workqueue_struct, rcu); + if (wq->flags & WQ_UNBOUND) + free_node_nr_active(wq->node_nr_active); + wq_free_lockdep(wq); free_percpu(wq->cpu_pwq); free_workqueue_attrs(wq->unbound_attrs); @@ -4785,7 +4898,8 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, { va_list args; struct workqueue_struct *wq; - int len; + size_t wq_size; + int name_len; /* * Unbound && max_active == 1 used to imply ordered, which is no longer @@ -4801,7 +4915,12 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, flags |= WQ_UNBOUND; /* allocate wq and format name */ - wq = kzalloc(sizeof(*wq), GFP_KERNEL); + if (flags & WQ_UNBOUND) + wq_size = struct_size(wq, node_nr_active, nr_node_ids + 1); + else + wq_size = sizeof(*wq); + + wq = kzalloc(wq_size, GFP_KERNEL); if (!wq) return NULL; @@ -4812,11 +4931,12 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, } va_start(args, max_active); - len = vsnprintf(wq->name, sizeof(wq->name), fmt, args); + name_len = vsnprintf(wq->name, sizeof(wq->name), fmt, args); va_end(args); - if (len >= WQ_NAME_LEN) - pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n", wq->name); + if (name_len >= WQ_NAME_LEN) + pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n", + wq->name); max_active = max_active ?: WQ_DFL_ACTIVE; max_active = wq_clamp_max_active(max_active, flags, wq->name); @@ -4835,8 +4955,13 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, wq_init_lockdep(wq); INIT_LIST_HEAD(&wq->list); + if (flags & WQ_UNBOUND) { + if (alloc_node_nr_active(wq->node_nr_active) < 0) + goto err_unreg_lockdep; + } + if (alloc_and_link_pwqs(wq) < 0) - goto err_unreg_lockdep; + goto err_free_node_nr_active; if (wq_online && init_rescuer(wq) < 0) goto err_destroy; @@ -4861,6 +4986,9 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, return wq; +err_free_node_nr_active: + if (wq->flags & WQ_UNBOUND) + free_node_nr_active(wq->node_nr_active); err_unreg_lockdep: wq_unregister_lockdep(wq); wq_free_lockdep(wq); From 5797b1c18919cd9c289ded7954383e499f729ce0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:25 -1000 Subject: [PATCH 18/54] workqueue: Implement system-wide nr_active enforcement for unbound workqueues A pool_workqueue (pwq) represents the connection between a workqueue and a worker_pool. One of the roles that a pwq plays is enforcement of the max_active concurrency limit. Before 636b927eba5b ("workqueue: Make unbound workqueues to use per-cpu pool_workqueues"), there was one pwq per each CPU for per-cpu workqueues and per each NUMA node for unbound workqueues, which was a natural result of per-cpu workqueues being served by per-cpu pools and unbound by per-NUMA pools. In terms of max_active enforcement, this was, while not perfect, workable. For per-cpu workqueues, it was fine. For unbound, it wasn't great in that NUMA machines would get max_active that's multiplied by the number of nodes but didn't cause huge problems because NUMA machines are relatively rare and the node count is usually pretty low. However, cache layouts are more complex now and sharing a worker pool across a whole node didn't really work well for unbound workqueues. Thus, a series of commits culminating on 8639ecebc9b1 ("workqueue: Make unbound workqueues to use per-cpu pool_workqueues") implemented more flexible affinity mechanism for unbound workqueues which enables using e.g. last-level-cache aligned pools. In the process, 636b927eba5b ("workqueue: Make unbound workqueues to use per-cpu pool_workqueues") made unbound workqueues use per-cpu pwqs like per-cpu workqueues. While the change was necessary to enable more flexible affinity scopes, this came with the side effect of blowing up the effective max_active for unbound workqueues. Before, the effective max_active for unbound workqueues was multiplied by the number of nodes. After, by the number of CPUs. 636b927eba5b ("workqueue: Make unbound workqueues to use per-cpu pool_workqueues") claims that this should generally be okay. It is okay for users which self-regulates concurrency level which are the vast majority; however, there are enough use cases which actually depend on max_active to prevent the level of concurrency from going bonkers including several IO handling workqueues that can issue a work item for each in-flight IO. With targeted benchmarks, the misbehavior can easily be exposed as reported in http://lkml.kernel.org/r/dbu6wiwu3sdhmhikb2w6lns7b27gbobfavhjj57kwi2quafgwl@htjcc5oikcr3. Unfortunately, there is no way to express what these use cases need using per-cpu max_active. A CPU may issue most of in-flight IOs, so we don't want to set max_active too low but as soon as we increase max_active a bit, we can end up with unreasonable number of in-flight work items when many CPUs issue IOs at the same time. ie. The acceptable lowest max_active is higher than the acceptable highest max_active. Ideally, max_active for an unbound workqueue should be system-wide so that the users can regulate the total level of concurrency regardless of node and cache layout. The reasons workqueue hasn't implemented that yet are: - One max_active enforcement decouples from pool boundaires, chaining execution after a work item finishes requires inter-pool operations which would require lock dancing, which is nasty. - Sharing a single nr_active count across the whole system can be pretty expensive on NUMA machines. - Per-pwq enforcement had been more or less okay while we were using per-node pools. It looks like we no longer can avoid decoupling max_active enforcement from pool boundaries. This patch implements system-wide nr_active mechanism with the following design characteristics: - To avoid sharing a single counter across multiple nodes, the configured max_active is split across nodes according to the proportion of each workqueue's online effective CPUs per node. e.g. A node with twice more online effective CPUs will get twice higher portion of max_active. - Workqueue used to be able to process a chain of interdependent work items which is as long as max_active. We can't do this anymore as max_active is distributed across the nodes. Instead, a new parameter min_active is introduced which determines the minimum level of concurrency within a node regardless of how max_active distribution comes out to be. It is set to the smaller of max_active and WQ_DFL_MIN_ACTIVE which is 8. This can lead to higher effective max_weight than configured and also deadlocks if a workqueue was depending on being able to handle chains of interdependent work items that are longer than 8. I believe these should be fine given that the number of CPUs in each NUMA node is usually higher than 8 and work item chain longer than 8 is pretty unlikely. However, if these assumptions turn out to be wrong, we'll need to add an interface to adjust min_active. - Each unbound wq has an array of struct wq_node_nr_active which tracks per-node nr_active. When its pwq wants to run a work item, it has to obtain the matching node's nr_active. If over the node's max_active, the pwq is queued on wq_node_nr_active->pending_pwqs. As work items finish, the completion path round-robins the pending pwqs activating the first inactive work item of each, which involves some pool lock dancing and kicking other pools. It's not the simplest code but doesn't look too bad. v4: - wq_adjust_max_active() updated to invoke wq_update_node_max_active(). - wq_adjust_max_active() is now protected by wq->mutex instead of wq_pool_mutex. v3: - wq_node_max_active() used to calculate per-node max_active on the fly based on system-wide CPU online states. Lai pointed out that this can lead to skewed distributions for workqueues with restricted cpumasks. Update the max_active distribution to use per-workqueue effective online CPU counts instead of system-wide and cache the calculation results in node_nr_active->max. v2: - wq->min/max_active now uses WRITE/READ_ONCE() as suggested by Lai. Signed-off-by: Tejun Heo Reported-by: Naohiro Aota Link: http://lkml.kernel.org/r/dbu6wiwu3sdhmhikb2w6lns7b27gbobfavhjj57kwi2quafgwl@htjcc5oikcr3 Fixes: 636b927eba5b ("workqueue: Make unbound workqueues to use per-cpu pool_workqueues") Reviewed-by: Lai Jiangshan --- include/linux/workqueue.h | 35 +++- kernel/workqueue.c | 343 ++++++++++++++++++++++++++++++++++---- 2 files changed, 342 insertions(+), 36 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 78047d0d9882..232baea90a1d 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -398,6 +398,13 @@ enum wq_consts { WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE, WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, + + /* + * Per-node default cap on min_active. Unless explicitly set, min_active + * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see + * workqueue_struct->min_active definition. + */ + WQ_DFL_MIN_ACTIVE = 8, }; /* @@ -440,11 +447,33 @@ extern struct workqueue_struct *system_freezable_power_efficient_wq; * alloc_workqueue - allocate a workqueue * @fmt: printf format for the name of the workqueue * @flags: WQ_* flags - * @max_active: max in-flight work items per CPU, 0 for default + * @max_active: max in-flight work items, 0 for default * remaining args: args for @fmt * - * Allocate a workqueue with the specified parameters. For detailed - * information on WQ_* flags, please refer to + * For a per-cpu workqueue, @max_active limits the number of in-flight work + * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be + * executing at most one work item for the workqueue. + * + * For unbound workqueues, @max_active limits the number of in-flight work items + * for the whole system. e.g. @max_active of 16 indicates that that there can be + * at most 16 work items executing for the workqueue in the whole system. + * + * As sharing the same active counter for an unbound workqueue across multiple + * NUMA nodes can be expensive, @max_active is distributed to each NUMA node + * according to the proportion of the number of online CPUs and enforced + * independently. + * + * Depending on online CPU distribution, a node may end up with per-node + * max_active which is significantly lower than @max_active, which can lead to + * deadlocks if the per-node concurrency limit is lower than the maximum number + * of interdependent work items for the workqueue. + * + * To guarantee forward progress regardless of online CPU distribution, the + * concurrency limit on every node is guaranteed to be equal to or greater than + * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means + * that the sum of per-node max_active's may be larger than @max_active. + * + * For detailed information on %WQ_* flags, please refer to * Documentation/core-api/workqueue.rst. * * RETURNS: diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8d465478adb9..903be39bd2d1 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -126,6 +126,9 @@ enum wq_internal_consts { * * L: pool->lock protected. Access with pool->lock held. * + * LN: pool->lock and wq_node_nr_active->lock protected for writes. Either for + * reads. + * * K: Only modified by worker while holding pool->lock. Can be safely read by * self, while holding pool->lock or from IRQ context if %current is the * kworker. @@ -247,17 +250,18 @@ struct pool_workqueue { * pwq->inactive_works instead of pool->worklist and marked with * WORK_STRUCT_INACTIVE. * - * All work items marked with WORK_STRUCT_INACTIVE do not participate - * in pwq->nr_active and all work items in pwq->inactive_works are - * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE - * work items are in pwq->inactive_works. Some of them are ready to - * run in pool->worklist or worker->scheduled. Those work itmes are - * only struct wq_barrier which is used for flush_work() and should - * not participate in pwq->nr_active. For non-barrier work item, it - * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works. + * All work items marked with WORK_STRUCT_INACTIVE do not participate in + * nr_active and all work items in pwq->inactive_works are marked with + * WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE work items are + * in pwq->inactive_works. Some of them are ready to run in + * pool->worklist or worker->scheduled. Those work itmes are only struct + * wq_barrier which is used for flush_work() and should not participate + * in nr_active. For non-barrier work item, it is marked with + * WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works. */ int nr_active; /* L: nr of active works */ struct list_head inactive_works; /* L: inactive works */ + struct list_head pending_node; /* LN: node on wq_node_nr_active->pending_pwqs */ struct list_head pwqs_node; /* WR: node on wq->pwqs */ struct list_head mayday_node; /* MD: node on wq->maydays */ @@ -289,9 +293,19 @@ struct wq_device; * on each CPU, in an unbound workqueue, max_active applies to the whole system. * As sharing a single nr_active across multiple sockets can be very expensive, * the counting and enforcement is per NUMA node. + * + * The following struct is used to enforce per-node max_active. When a pwq wants + * to start executing a work item, it should increment ->nr using + * tryinc_node_nr_active(). If acquisition fails due to ->nr already being over + * ->max, the pwq is queued on ->pending_pwqs. As in-flight work items finish + * and decrement ->nr, node_activate_pending_pwq() activates the pending pwqs in + * round-robin order. */ struct wq_node_nr_active { - atomic_t nr; /* per-node nr_active count */ + int max; /* per-node max_active */ + atomic_t nr; /* per-node nr_active */ + raw_spinlock_t lock; /* nests inside pool locks */ + struct list_head pending_pwqs; /* LN: pwqs with inactive works */ }; /* @@ -314,8 +328,12 @@ struct workqueue_struct { struct worker *rescuer; /* MD: rescue worker */ int nr_drainers; /* WQ: drain in progress */ + + /* See alloc_workqueue() function comment for info on min/max_active */ int max_active; /* WO: max active works */ + int min_active; /* WO: min active works */ int saved_max_active; /* WQ: saved max_active */ + int saved_min_active; /* WQ: saved min_active */ struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ struct pool_workqueue __rcu *dfl_pwq; /* PW: only for unbound wqs */ @@ -667,6 +685,19 @@ static struct pool_workqueue *unbound_pwq(struct workqueue_struct *wq, int cpu) lockdep_is_held(&wq->mutex)); } +/** + * unbound_effective_cpumask - effective cpumask of an unbound workqueue + * @wq: workqueue of interest + * + * @wq->unbound_attrs->cpumask contains the cpumask requested by the user which + * is masked with wq_unbound_cpumask to determine the effective cpumask. The + * default pwq is always mapped to the pool with the current effective cpumask. + */ +static struct cpumask *unbound_effective_cpumask(struct workqueue_struct *wq) +{ + return unbound_pwq(wq, -1)->pool->attrs->__pod_cpumask; +} + static unsigned int work_color_to_flags(int color) { return color << WORK_STRUCT_COLOR_SHIFT; @@ -1461,6 +1492,46 @@ static struct wq_node_nr_active *wq_node_nr_active(struct workqueue_struct *wq, return wq->node_nr_active[node]; } +/** + * wq_update_node_max_active - Update per-node max_actives to use + * @wq: workqueue to update + * @off_cpu: CPU that's going down, -1 if a CPU is not going down + * + * Update @wq->node_nr_active[]->max. @wq must be unbound. max_active is + * distributed among nodes according to the proportions of numbers of online + * cpus. The result is always between @wq->min_active and max_active. + */ +static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu) +{ + struct cpumask *effective = unbound_effective_cpumask(wq); + int min_active = READ_ONCE(wq->min_active); + int max_active = READ_ONCE(wq->max_active); + int total_cpus, node; + + lockdep_assert_held(&wq->mutex); + + if (!cpumask_test_cpu(off_cpu, effective)) + off_cpu = -1; + + total_cpus = cpumask_weight_and(effective, cpu_online_mask); + if (off_cpu >= 0) + total_cpus--; + + for_each_node(node) { + int node_cpus; + + node_cpus = cpumask_weight_and(effective, cpumask_of_node(node)); + if (off_cpu >= 0 && cpu_to_node(off_cpu) == node) + node_cpus--; + + wq_node_nr_active(wq, node)->max = + clamp(DIV_ROUND_UP(max_active * node_cpus, total_cpus), + min_active, max_active); + } + + wq_node_nr_active(wq, NUMA_NO_NODE)->max = min_active; +} + /** * get_pwq - get an extra reference on the specified pool_workqueue * @pwq: pool_workqueue to get @@ -1558,35 +1629,98 @@ static bool pwq_activate_work(struct pool_workqueue *pwq, return true; } +static bool tryinc_node_nr_active(struct wq_node_nr_active *nna) +{ + int max = READ_ONCE(nna->max); + + while (true) { + int old, tmp; + + old = atomic_read(&nna->nr); + if (old >= max) + return false; + tmp = atomic_cmpxchg_relaxed(&nna->nr, old, old + 1); + if (tmp == old) + return true; + } +} + /** * pwq_tryinc_nr_active - Try to increment nr_active for a pwq * @pwq: pool_workqueue of interest + * @fill: max_active may have increased, try to increase concurrency level * * Try to increment nr_active for @pwq. Returns %true if an nr_active count is * successfully obtained. %false otherwise. */ -static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq) +static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq, bool fill) { struct workqueue_struct *wq = pwq->wq; struct worker_pool *pool = pwq->pool; struct wq_node_nr_active *nna = wq_node_nr_active(wq, pool->node); - bool obtained; + bool obtained = false; lockdep_assert_held(&pool->lock); - obtained = pwq->nr_active < READ_ONCE(wq->max_active); - - if (obtained) { - pwq->nr_active++; - if (nna) - atomic_inc(&nna->nr); + if (!nna) { + /* per-cpu workqueue, pwq->nr_active is sufficient */ + obtained = pwq->nr_active < READ_ONCE(wq->max_active); + goto out; } + + /* + * Unbound workqueue uses per-node shared nr_active $nna. If @pwq is + * already waiting on $nna, pwq_dec_nr_active() will maintain the + * concurrency level. Don't jump the line. + * + * We need to ignore the pending test after max_active has increased as + * pwq_dec_nr_active() can only maintain the concurrency level but not + * increase it. This is indicated by @fill. + */ + if (!list_empty(&pwq->pending_node) && likely(!fill)) + goto out; + + obtained = tryinc_node_nr_active(nna); + if (obtained) + goto out; + + /* + * Lockless acquisition failed. Lock, add ourself to $nna->pending_pwqs + * and try again. The smp_mb() is paired with the implied memory barrier + * of atomic_dec_return() in pwq_dec_nr_active() to ensure that either + * we see the decremented $nna->nr or they see non-empty + * $nna->pending_pwqs. + */ + raw_spin_lock(&nna->lock); + + if (list_empty(&pwq->pending_node)) + list_add_tail(&pwq->pending_node, &nna->pending_pwqs); + else if (likely(!fill)) + goto out_unlock; + + smp_mb(); + + obtained = tryinc_node_nr_active(nna); + + /* + * If @fill, @pwq might have already been pending. Being spuriously + * pending in cold paths doesn't affect anything. Let's leave it be. + */ + if (obtained && likely(!fill)) + list_del_init(&pwq->pending_node); + +out_unlock: + raw_spin_unlock(&nna->lock); +out: + if (obtained) + pwq->nr_active++; return obtained; } /** * pwq_activate_first_inactive - Activate the first inactive work item on a pwq * @pwq: pool_workqueue of interest + * @fill: max_active may have increased, try to increase concurrency level * * Activate the first inactive work item of @pwq if available and allowed by * max_active limit. @@ -1594,13 +1728,13 @@ static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq) * Returns %true if an inactive work item has been activated. %false if no * inactive work item is found or max_active limit is reached. */ -static bool pwq_activate_first_inactive(struct pool_workqueue *pwq) +static bool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill) { struct work_struct *work = list_first_entry_or_null(&pwq->inactive_works, struct work_struct, entry); - if (work && pwq_tryinc_nr_active(pwq)) { + if (work && pwq_tryinc_nr_active(pwq, fill)) { __pwq_activate_work(pwq, work); return true; } else { @@ -1608,11 +1742,93 @@ static bool pwq_activate_first_inactive(struct pool_workqueue *pwq) } } +/** + * node_activate_pending_pwq - Activate a pending pwq on a wq_node_nr_active + * @nna: wq_node_nr_active to activate a pending pwq for + * @caller_pool: worker_pool the caller is locking + * + * Activate a pwq in @nna->pending_pwqs. Called with @caller_pool locked. + * @caller_pool may be unlocked and relocked to lock other worker_pools. + */ +static void node_activate_pending_pwq(struct wq_node_nr_active *nna, + struct worker_pool *caller_pool) +{ + struct worker_pool *locked_pool = caller_pool; + struct pool_workqueue *pwq; + struct work_struct *work; + + lockdep_assert_held(&caller_pool->lock); + + raw_spin_lock(&nna->lock); +retry: + pwq = list_first_entry_or_null(&nna->pending_pwqs, + struct pool_workqueue, pending_node); + if (!pwq) + goto out_unlock; + + /* + * If @pwq is for a different pool than @locked_pool, we need to lock + * @pwq->pool->lock. Let's trylock first. If unsuccessful, do the unlock + * / lock dance. For that, we also need to release @nna->lock as it's + * nested inside pool locks. + */ + if (pwq->pool != locked_pool) { + raw_spin_unlock(&locked_pool->lock); + locked_pool = pwq->pool; + if (!raw_spin_trylock(&locked_pool->lock)) { + raw_spin_unlock(&nna->lock); + raw_spin_lock(&locked_pool->lock); + raw_spin_lock(&nna->lock); + goto retry; + } + } + + /* + * $pwq may not have any inactive work items due to e.g. cancellations. + * Drop it from pending_pwqs and see if there's another one. + */ + work = list_first_entry_or_null(&pwq->inactive_works, + struct work_struct, entry); + if (!work) { + list_del_init(&pwq->pending_node); + goto retry; + } + + /* + * Acquire an nr_active count and activate the inactive work item. If + * $pwq still has inactive work items, rotate it to the end of the + * pending_pwqs so that we round-robin through them. This means that + * inactive work items are not activated in queueing order which is fine + * given that there has never been any ordering across different pwqs. + */ + if (likely(tryinc_node_nr_active(nna))) { + pwq->nr_active++; + __pwq_activate_work(pwq, work); + + if (list_empty(&pwq->inactive_works)) + list_del_init(&pwq->pending_node); + else + list_move_tail(&pwq->pending_node, &nna->pending_pwqs); + + /* if activating a foreign pool, make sure it's running */ + if (pwq->pool != caller_pool) + kick_pool(pwq->pool); + } + +out_unlock: + raw_spin_unlock(&nna->lock); + if (locked_pool != caller_pool) { + raw_spin_unlock(&locked_pool->lock); + raw_spin_lock(&caller_pool->lock); + } +} + /** * pwq_dec_nr_active - Retire an active count * @pwq: pool_workqueue of interest * * Decrement @pwq's nr_active and try to activate the first inactive work item. + * For unbound workqueues, this function may temporarily drop @pwq->pool->lock. */ static void pwq_dec_nr_active(struct pool_workqueue *pwq) { @@ -1632,12 +1848,29 @@ static void pwq_dec_nr_active(struct pool_workqueue *pwq) * inactive work item on @pwq itself. */ if (!nna) { - pwq_activate_first_inactive(pwq); + pwq_activate_first_inactive(pwq, false); return; } - atomic_dec(&nna->nr); - pwq_activate_first_inactive(pwq); + /* + * If @pwq is for an unbound workqueue, it's more complicated because + * multiple pwqs and pools may be sharing the nr_active count. When a + * pwq needs to wait for an nr_active count, it puts itself on + * $nna->pending_pwqs. The following atomic_dec_return()'s implied + * memory barrier is paired with smp_mb() in pwq_tryinc_nr_active() to + * guarantee that either we see non-empty pending_pwqs or they see + * decremented $nna->nr. + * + * $nna->max may change as CPUs come online/offline and @pwq->wq's + * max_active gets updated. However, it is guaranteed to be equal to or + * larger than @pwq->wq->min_active which is above zero unless freezing. + * This maintains the forward progress guarantee. + */ + if (atomic_dec_return(&nna->nr) >= READ_ONCE(nna->max)) + return; + + if (!list_empty(&nna->pending_pwqs)) + node_activate_pending_pwq(nna, pool); } /** @@ -1965,7 +2198,7 @@ retry: * @work must also queue behind existing inactive work items to maintain * ordering when max_active changes. See wq_adjust_max_active(). */ - if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq)) { + if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq, false)) { if (list_empty(&pool->worklist)) pool->watchdog_ts = jiffies; @@ -3200,7 +3433,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq, barr->task = current; - /* The barrier work item does not participate in pwq->nr_active. */ + /* The barrier work item does not participate in nr_active. */ work_flags |= WORK_STRUCT_INACTIVE; /* @@ -4116,6 +4349,8 @@ static void free_node_nr_active(struct wq_node_nr_active **nna_ar) static void init_node_nr_active(struct wq_node_nr_active *nna) { atomic_set(&nna->nr, 0); + raw_spin_lock_init(&nna->lock); + INIT_LIST_HEAD(&nna->pending_pwqs); } /* @@ -4355,6 +4590,15 @@ static void pwq_release_workfn(struct kthread_work *work) mutex_unlock(&wq_pool_mutex); } + if (!list_empty(&pwq->pending_node)) { + struct wq_node_nr_active *nna = + wq_node_nr_active(pwq->wq, pwq->pool->node); + + raw_spin_lock_irq(&nna->lock); + list_del_init(&pwq->pending_node); + raw_spin_unlock_irq(&nna->lock); + } + call_rcu(&pwq->rcu, rcu_free_pwq); /* @@ -4380,6 +4624,7 @@ static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, pwq->flush_color = -1; pwq->refcnt = 1; INIT_LIST_HEAD(&pwq->inactive_works); + INIT_LIST_HEAD(&pwq->pending_node); INIT_LIST_HEAD(&pwq->pwqs_node); INIT_LIST_HEAD(&pwq->mayday_node); kthread_init_work(&pwq->release_work, pwq_release_workfn); @@ -4587,6 +4832,9 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) ctx->pwq_tbl[cpu]); ctx->dfl_pwq = install_unbound_pwq(ctx->wq, -1, ctx->dfl_pwq); + /* update node_nr_active->max */ + wq_update_node_max_active(ctx->wq, -1); + mutex_unlock(&ctx->wq->mutex); } @@ -4850,24 +5098,35 @@ static int init_rescuer(struct workqueue_struct *wq) static void wq_adjust_max_active(struct workqueue_struct *wq) { bool activated; + int new_max, new_min; lockdep_assert_held(&wq->mutex); if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing) { - WRITE_ONCE(wq->max_active, 0); - return; + new_max = 0; + new_min = 0; + } else { + new_max = wq->saved_max_active; + new_min = wq->saved_min_active; } - if (wq->max_active == wq->saved_max_active) + if (wq->max_active == new_max && wq->min_active == new_min) return; /* - * Update @wq->max_active and then kick inactive work items if more + * Update @wq->max/min_active and then kick inactive work items if more * active work items are allowed. This doesn't break work item ordering * because new work items are always queued behind existing inactive * work items if there are any. */ - WRITE_ONCE(wq->max_active, wq->saved_max_active); + WRITE_ONCE(wq->max_active, new_max); + WRITE_ONCE(wq->min_active, new_min); + + if (wq->flags & WQ_UNBOUND) + wq_update_node_max_active(wq, -1); + + if (new_max == 0) + return; /* * Round-robin through pwq's activating the first inactive work item @@ -4882,7 +5141,7 @@ static void wq_adjust_max_active(struct workqueue_struct *wq) /* can be called during early boot w/ irq disabled */ raw_spin_lock_irqsave(&pwq->pool->lock, flags); - if (pwq_activate_first_inactive(pwq)) { + if (pwq_activate_first_inactive(pwq, true)) { activated = true; kick_pool(pwq->pool); } @@ -4944,7 +5203,9 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, /* init wq */ wq->flags = flags; wq->max_active = max_active; - wq->saved_max_active = max_active; + wq->min_active = min(max_active, WQ_DFL_MIN_ACTIVE); + wq->saved_max_active = wq->max_active; + wq->saved_min_active = wq->min_active; mutex_init(&wq->mutex); atomic_set(&wq->nr_pwqs_to_flush, 0); INIT_LIST_HEAD(&wq->pwqs); @@ -5110,7 +5371,8 @@ EXPORT_SYMBOL_GPL(destroy_workqueue); * @wq: target workqueue * @max_active: new max_active value. * - * Set max_active of @wq to @max_active. + * Set max_active of @wq to @max_active. See the alloc_workqueue() function + * comment. * * CONTEXT: * Don't call from IRQ context. @@ -5127,6 +5389,9 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) wq->flags &= ~__WQ_ORDERED; wq->saved_max_active = max_active; + if (wq->flags & WQ_UNBOUND) + wq->saved_min_active = min(wq->saved_min_active, max_active); + wq_adjust_max_active(wq); mutex_unlock(&wq->mutex); @@ -5808,6 +6073,10 @@ int workqueue_online_cpu(unsigned int cpu) for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) wq_update_pod(wq, tcpu, cpu, true); + + mutex_lock(&wq->mutex); + wq_update_node_max_active(wq, -1); + mutex_unlock(&wq->mutex); } } @@ -5836,6 +6105,10 @@ int workqueue_offline_cpu(unsigned int cpu) for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) wq_update_pod(wq, tcpu, cpu, false); + + mutex_lock(&wq->mutex); + wq_update_node_max_active(wq, cpu); + mutex_unlock(&wq->mutex); } } mutex_unlock(&wq_pool_mutex); @@ -7127,8 +7400,12 @@ void __init workqueue_init_topology(void) * combinations to apply per-pod sharing. */ list_for_each_entry(wq, &workqueues, list) { - for_each_online_cpu(cpu) { + for_each_online_cpu(cpu) wq_update_pod(wq, cpu, cpu, true); + if (wq->flags & WQ_UNBOUND) { + mutex_lock(&wq->mutex); + wq_update_node_max_active(wq, -1); + mutex_unlock(&wq->mutex); } } From 07daa99b7fd7adfffa22180184e39ec124e73013 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:25 -1000 Subject: [PATCH 19/54] tools/workqueue/wq_dump.py: Add node_nr/max_active dump Print out per-node nr/max_active numbers to improve visibility into node_nr_active operations. Signed-off-by: Tejun Heo --- tools/workqueue/wq_dump.py | 41 +++++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/tools/workqueue/wq_dump.py b/tools/workqueue/wq_dump.py index 333b2fc00b82..bd381511bd9a 100644 --- a/tools/workqueue/wq_dump.py +++ b/tools/workqueue/wq_dump.py @@ -50,6 +50,7 @@ import drgn from drgn.helpers.linux.list import list_for_each_entry,list_empty from drgn.helpers.linux.percpu import per_cpu_ptr from drgn.helpers.linux.cpumask import for_each_cpu,for_each_possible_cpu +from drgn.helpers.linux.nodemask import for_each_node from drgn.helpers.linux.idr import idr_for_each import argparse @@ -107,7 +108,6 @@ WQ_AFFN_NUMA = prog['WQ_AFFN_NUMA'] WQ_AFFN_SYSTEM = prog['WQ_AFFN_SYSTEM'] WQ_NAME_LEN = prog['WQ_NAME_LEN'].value_() - cpumask_str_len = len(cpumask_str(wq_unbound_cpumask)) print('Affinity Scopes') @@ -205,3 +205,42 @@ for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_( print(f' {wq.rescuer.task.pid.value_():6}', end='') print(f' {cpumask_str(wq.rescuer.task.cpus_ptr):{rcpus_len}}', end='') print('') + +print('') +print('Unbound workqueue -> node_nr/max_active') +print('=======================================') + +if 'node_to_cpumask_map' in prog: + __cpu_online_mask = prog['__cpu_online_mask'] + node_to_cpumask_map = prog['node_to_cpumask_map'] + nr_node_ids = prog['nr_node_ids'].value_() + + print(f'online_cpus={cpumask_str(__cpu_online_mask.address_of_())}') + for node in for_each_node(): + print(f'NODE[{node:02}]={cpumask_str(node_to_cpumask_map[node])}') + print('') + + print(f'[{"workqueue":^{WQ_NAME_LEN-2}}\\ min max', end='') + first = True + for node in for_each_node(): + if first: + print(f' NODE {node}', end='') + first = False + else: + print(f' {node:7}', end='') + print(f' {"dfl":>7} ]') + print('') + + for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'): + if not (wq.flags & WQ_UNBOUND): + continue + + print(f'{wq.name.string_().decode():{WQ_NAME_LEN}} ', end='') + print(f'{wq.min_active.value_():3} {wq.max_active.value_():3}', end='') + for node in for_each_node(): + nna = wq.node_nr_active[node] + print(f' {nna.nr.counter.value_():3}/{nna.max.value_():3}', end='') + nna = wq.node_nr_active[nr_node_ids] + print(f' {nna.nr.counter.value_():3}/{nna.max.value_():3}') +else: + printf(f'node_to_cpumask_map not present, is NUMA enabled?') From aae17ebb53cd3da37f5dfbde937acd091eb4340c Mon Sep 17 00:00:00 2001 From: Leonardo Bras Date: Mon, 29 Jan 2024 22:00:46 -0300 Subject: [PATCH 20/54] workqueue: Avoid using isolated cpus' timers on queue_delayed_work When __queue_delayed_work() is called, it chooses a cpu for handling the timer interrupt. As of today, it will pick either the cpu passed as parameter or the last cpu used for this. This is not good if a system does use CPU isolation, because it can take away some valuable cpu time to: 1 - deal with the timer interrupt, 2 - schedule-out the desired task, 3 - queue work on a random workqueue, and 4 - schedule the desired task back to the cpu. So to fix this, during __queue_delayed_work(), if cpu isolation is in place, pick a random non-isolated cpu to handle the timer interrupt. As an optimization, if the current cpu is not isolated, use it instead of looking for another candidate. Signed-off-by: Leonardo Bras Signed-off-by: Tejun Heo --- kernel/workqueue.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 903be39bd2d1..9221a4c57ae1 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2362,10 +2362,18 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, dwork->cpu = cpu; timer->expires = jiffies + delay; - if (unlikely(cpu != WORK_CPU_UNBOUND)) + if (housekeeping_enabled(HK_TYPE_TIMER)) { + /* If the current cpu is a housekeeping cpu, use it. */ + cpu = smp_processor_id(); + if (!housekeeping_test_cpu(cpu, HK_TYPE_TIMER)) + cpu = housekeeping_any_cpu(HK_TYPE_TIMER); add_timer_on(timer, cpu); - else - add_timer(timer); + } else { + if (likely(cpu == WORK_CPU_UNBOUND)) + add_timer(timer); + else + add_timer_on(timer, cpu); + } } /** From 15930da42f8981dc42c19038042947b475b19f47 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 30 Jan 2024 18:55:55 -1000 Subject: [PATCH 21/54] workqueue: Don't call cpumask_test_cpu() with -1 CPU in wq_update_node_max_active() For wq_update_node_max_active(), @off_cpu of -1 indicates that no CPU is going down. The function was incorrectly calling cpumask_test_cpu() with -1 CPU leading to oopses like the following on some archs: Unable to handle kernel paging request at virtual address ffff0002100296e0 .. pc : wq_update_node_max_active+0x50/0x1fc lr : wq_update_node_max_active+0x1f0/0x1fc ... Call trace: wq_update_node_max_active+0x50/0x1fc apply_wqattrs_commit+0xf0/0x114 apply_workqueue_attrs_locked+0x58/0xa0 alloc_workqueue+0x5ac/0x774 workqueue_init_early+0x460/0x540 start_kernel+0x258/0x684 __primary_switched+0xb8/0xc0 Code: 9100a273 35000d01 53067f00 d0016dc1 (f8607a60) ---[ end trace 0000000000000000 ]--- Kernel panic - not syncing: Attempted to kill the idle task! ---[ end Kernel panic - not syncing: Attempted to kill the idle task! ]--- Fix it. Signed-off-by: Tejun Heo Reported-by: Marek Szyprowski Reported-by: Nathan Chancellor Tested-by: Nathan Chancellor Link: http://lkml.kernel.org/r/91eacde0-df99-4d5c-a980-91046f66e612@samsung.com Fixes: 5797b1c18919 ("workqueue: Implement system-wide nr_active enforcement for unbound workqueues") --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9221a4c57ae1..31c1373505d8 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1510,7 +1510,7 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu) lockdep_assert_held(&wq->mutex); - if (!cpumask_test_cpu(off_cpu, effective)) + if (off_cpu >= 0 && !cpumask_test_cpu(off_cpu, effective)) off_cpu = -1; total_cpus = cpumask_weight_and(effective, cpu_online_mask); From c5f8cd6c62ce02205ced15e9a998103f21ec5455 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 30 Jan 2024 19:06:43 -1000 Subject: [PATCH 22/54] workqueue: Avoid premature init of wq->node_nr_active[].max System workqueues are allocated early during boot from workqueue_init_early(). While allocating unbound workqueues, wq_update_node_max_active() is invoked from apply_workqueue_attrs() and accesses NUMA topology to initialize wq->node_nr_active[].max. However, topology information may not be set up at this point. wq_update_node_max_active() is explicitly invoked from workqueue_init_topology() later when topology information is known to be available. This doesn't seem to crash anything but it's doing useless work with dubious data. Let's skip the premature and duplicate node_max_active updates by initializing the field to WQ_DFL_MIN_ACTIVE on allocation and making wq_update_node_max_active() noop until workqueue_init_topology(). Signed-off-by: Tejun Heo --- kernel/workqueue.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9221a4c57ae1..a65081ec6780 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -386,6 +386,8 @@ static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { [WQ_AFFN_SYSTEM] = "system", }; +static bool wq_topo_initialized = false; + /* * Per-cpu work items which run for longer than the following threshold are * automatically considered CPU intensive and excluded from concurrency @@ -1510,6 +1512,9 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu) lockdep_assert_held(&wq->mutex); + if (!wq_topo_initialized) + return; + if (!cpumask_test_cpu(off_cpu, effective)) off_cpu = -1; @@ -4356,6 +4361,7 @@ static void free_node_nr_active(struct wq_node_nr_active **nna_ar) static void init_node_nr_active(struct wq_node_nr_active *nna) { + nna->max = WQ_DFL_MIN_ACTIVE; atomic_set(&nna->nr, 0); raw_spin_lock_init(&nna->lock); INIT_LIST_HEAD(&nna->pending_pwqs); @@ -7400,6 +7406,8 @@ void __init workqueue_init_topology(void) init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache); init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa); + wq_topo_initialized = true; + mutex_lock(&wq_pool_mutex); /* --- kernel/workqueue.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 31c1373505d8..ffb625db9771 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -386,6 +386,8 @@ static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { [WQ_AFFN_SYSTEM] = "system", }; +static bool wq_topo_initialized __read_mostly = false; + /* * Per-cpu work items which run for longer than the following threshold are * automatically considered CPU intensive and excluded from concurrency @@ -1510,6 +1512,9 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu) lockdep_assert_held(&wq->mutex); + if (!wq_topo_initialized) + return; + if (off_cpu >= 0 && !cpumask_test_cpu(off_cpu, effective)) off_cpu = -1; @@ -4356,6 +4361,7 @@ static void free_node_nr_active(struct wq_node_nr_active **nna_ar) static void init_node_nr_active(struct wq_node_nr_active *nna) { + nna->max = WQ_DFL_MIN_ACTIVE; atomic_set(&nna->nr, 0); raw_spin_lock_init(&nna->lock); INIT_LIST_HEAD(&nna->pending_pwqs); @@ -7400,6 +7406,8 @@ void __init workqueue_init_topology(void) init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache); init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa); + wq_topo_initialized = true; + mutex_lock(&wq_pool_mutex); /* From 3e0bc2855b573bcffa2a52955a878f537f5ac0cd Mon Sep 17 00:00:00 2001 From: Miguel Ojeda Date: Thu, 1 Feb 2024 20:06:20 +0100 Subject: [PATCH 23/54] workqueue: rust: sync with `WORK_CPU_UNBOUND` change Commit e563d0a7cdc1 ("workqueue: Break up enum definitions and give names to the types") gives a name to the `enum` where `WORK_CPU_UNBOUND` was defined, so `bindgen` changes its output from e.g.: pub type _bindgen_ty_10 = core::ffi::c_uint; pub const WORK_CPU_UNBOUND: _bindgen_ty_10 = 64; to e.g.: pub type wq_misc_consts = core::ffi::c_uint; pub const wq_misc_consts_WORK_CPU_UNBOUND: wq_misc_consts = 64; Thus update Rust's side to match the change (which requires a slight reformat of the code), fixing the build error. Closes: https://lore.kernel.org/rust-for-linux/CANiq72=9PZ89bCAVX0ZV4cqrYSLoZWyn-d_K4KpBMHjwUMdC3A@mail.gmail.com/ Fixes: e563d0a7cdc1 ("workqueue: Break up enum definitions and give names to the types") Signed-off-by: Miguel Ojeda Reviewed-by: Boqun Feng Signed-off-by: Tejun Heo --- rust/kernel/workqueue.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs index 498397877376..d00231e18007 100644 --- a/rust/kernel/workqueue.rs +++ b/rust/kernel/workqueue.rs @@ -199,7 +199,11 @@ impl Queue { // stay valid until we call the function pointer in the `work_struct`, so the access is ok. unsafe { w.__enqueue(move |work_ptr| { - bindings::queue_work_on(bindings::WORK_CPU_UNBOUND as _, queue_ptr, work_ptr) + bindings::queue_work_on( + bindings::wq_misc_consts_WORK_CPU_UNBOUND as _, + queue_ptr, + work_ptr, + ) }) } } From c70e1779b73a39f7648b26bdc835304c60100ce3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 4 Feb 2024 11:14:21 -1000 Subject: [PATCH 24/54] workqueue: Fix pwq->nr_in_flight corruption in try_to_grab_pending() dd6c3c544126 ("workqueue: Move pwq_dec_nr_in_flight() to the end of work item handling") relocated pwq_dec_nr_in_flight() after set_work_pool_and_keep_pending(). However, the latter destroys information contained in work->data that's needed by pwq_dec_nr_in_flight() including the flush color. With flush color destroyed, flush_workqueue() can stall easily when mixed with cancel_work*() usages. This is easily triggered by running xfstests generic/001 test on xfs: INFO: task umount:6305 blocked for more than 122 seconds. ... task:umount state:D stack:13008 pid:6305 tgid:6305 ppid:6301 flags:0x00004000 Call Trace: __schedule+0x2f6/0xa20 schedule+0x36/0xb0 schedule_timeout+0x20b/0x280 wait_for_completion+0x8a/0x140 __flush_workqueue+0x11a/0x3b0 xfs_inodegc_flush+0x24/0xf0 xfs_unmountfs+0x14/0x180 xfs_fs_put_super+0x3d/0x90 generic_shutdown_super+0x7c/0x160 kill_block_super+0x1b/0x40 xfs_kill_sb+0x12/0x30 deactivate_locked_super+0x35/0x90 deactivate_super+0x42/0x50 cleanup_mnt+0x109/0x170 __cleanup_mnt+0x12/0x20 task_work_run+0x60/0x90 syscall_exit_to_user_mode+0x146/0x150 do_syscall_64+0x5d/0x110 entry_SYSCALL_64_after_hwframe+0x6c/0x74 Fix it by stashing work_data before calling set_work_pool_and_keep_pending() and using the stashed value for pwq_dec_nr_in_flight(). Signed-off-by: Tejun Heo Reported-by: Chandan Babu R Link: http://lkml.kernel.org/r/87o7cxeehy.fsf@debian-BULLSEYE-live-builder-AMD64 Fixes: dd6c3c544126 ("workqueue: Move pwq_dec_nr_in_flight() to the end of work item handling") --- kernel/workqueue.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ffb625db9771..55c9816506b0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1999,6 +1999,8 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, */ pwq = get_work_pwq(work); if (pwq && pwq->pool == pool) { + unsigned long work_data; + debug_work_deactivate(work); /* @@ -2016,11 +2018,15 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, list_del_init(&work->entry); - /* work->data points to pwq iff queued, point to pool */ + /* + * work->data points to pwq iff queued. Let's point to pool. As + * this destroys work->data needed by the next step, stash it. + */ + work_data = *work_data_bits(work); set_work_pool_and_keep_pending(work, pool->id); /* must be the last step, see the function comment */ - pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); + pwq_dec_nr_in_flight(pwq, work_data); raw_spin_unlock(&pool->lock); rcu_read_unlock(); From d412ace11144aa2bf692c7cf9778351efc15c827 Mon Sep 17 00:00:00 2001 From: "Ricardo B. Marliere" Date: Sun, 4 Feb 2024 10:47:05 -0300 Subject: [PATCH 25/54] workqueue: make wq_subsys const Now that the driver core can properly handle constant struct bus_type, move the wq_subsys variable to be a constant structure as well, placing it into read-only memory which can not be modified at runtime. Cc: Greg Kroah-Hartman Suggested-and-reviewed-by: Greg Kroah-Hartman Signed-off-by: Ricardo B. Marliere Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 55c9816506b0..695f6f5ad038 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -6692,7 +6692,7 @@ static struct device_attribute wq_sysfs_unbound_attrs[] = { __ATTR_NULL, }; -static struct bus_type wq_subsys = { +static const struct bus_type wq_subsys = { .name = "workqueue", .dev_groups = wq_sysfs_groups, }; From c35aea39d1e106f61fd2130f0d32a3bac8bd4570 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 4 Feb 2024 11:28:06 -1000 Subject: [PATCH 26/54] workqueue: Update lock debugging code These changes are in preparation of BH workqueue which will execute work items from BH context. - Update lock and RCU depth checks in process_one_work() so that it remembers and checks against the starting depths and prints out the depth changes. - Factor out lockdep annotations in the flush paths into touch_{wq|work}_lockdep_map(). The work->lockdep_map touching is moved from __flush_work() to its callee - start_flush_work(). This brings it closer to the wq counterpart and will allow testing the associated wq's flags which will be needed to support BH workqueues. This is not expected to cause any functional changes. Signed-off-by: Tejun Heo Tested-by: Allen Pais --- kernel/workqueue.c | 51 ++++++++++++++++++++++++++++++---------------- 1 file changed, 34 insertions(+), 17 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 695f6f5ad038..ac42c005b00b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2965,6 +2965,7 @@ __acquires(&pool->lock) struct pool_workqueue *pwq = get_work_pwq(work); struct worker_pool *pool = worker->pool; unsigned long work_data; + int lockdep_start_depth, rcu_start_depth; #ifdef CONFIG_LOCKDEP /* * It is permissible to free the struct work_struct from @@ -3027,6 +3028,8 @@ __acquires(&pool->lock) pwq->stats[PWQ_STAT_STARTED]++; raw_spin_unlock_irq(&pool->lock); + rcu_start_depth = rcu_preempt_depth(); + lockdep_start_depth = lockdep_depth(current); lock_map_acquire(&pwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); /* @@ -3062,12 +3065,15 @@ __acquires(&pool->lock) lock_map_release(&lockdep_map); lock_map_release(&pwq->wq->lockdep_map); - if (unlikely(in_atomic() || lockdep_depth(current) > 0 || - rcu_preempt_depth() > 0)) { - pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d/%d\n" - " last function: %ps\n", - current->comm, preempt_count(), rcu_preempt_depth(), - task_pid_nr(current), worker->current_func); + if (unlikely((worker->task && in_atomic()) || + lockdep_depth(current) != lockdep_start_depth || + rcu_preempt_depth() != rcu_start_depth)) { + pr_err("BUG: workqueue leaked atomic, lock or RCU: %s[%d]\n" + " preempt=0x%08x lock=%d->%d RCU=%d->%d workfn=%ps\n", + current->comm, task_pid_nr(current), preempt_count(), + lockdep_start_depth, lockdep_depth(current), + rcu_start_depth, rcu_preempt_depth(), + worker->current_func); debug_show_held_locks(current); dump_stack(); } @@ -3549,6 +3555,19 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, return wait; } +static void touch_wq_lockdep_map(struct workqueue_struct *wq) +{ + lock_map_acquire(&wq->lockdep_map); + lock_map_release(&wq->lockdep_map); +} + +static void touch_work_lockdep_map(struct work_struct *work, + struct workqueue_struct *wq) +{ + lock_map_acquire(&work->lockdep_map); + lock_map_release(&work->lockdep_map); +} + /** * __flush_workqueue - ensure that any scheduled work has run to completion. * @wq: workqueue to flush @@ -3568,8 +3587,7 @@ void __flush_workqueue(struct workqueue_struct *wq) if (WARN_ON(!wq_online)) return; - lock_map_acquire(&wq->lockdep_map); - lock_map_release(&wq->lockdep_map); + touch_wq_lockdep_map(wq); mutex_lock(&wq->mutex); @@ -3768,6 +3786,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, struct worker *worker = NULL; struct worker_pool *pool; struct pool_workqueue *pwq; + struct workqueue_struct *wq; might_sleep(); @@ -3791,11 +3810,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, pwq = worker->current_pwq; } - check_flush_dependency(pwq->wq, work); + wq = pwq->wq; + check_flush_dependency(wq, work); insert_wq_barrier(pwq, barr, work, worker); raw_spin_unlock_irq(&pool->lock); + touch_work_lockdep_map(work, wq); + /* * Force a lock recursion deadlock when using flush_work() inside a * single-threaded or rescuer equipped workqueue. @@ -3805,11 +3827,9 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, * workqueues the deadlock happens when the rescuer stalls, blocking * forward progress. */ - if (!from_cancel && - (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { - lock_map_acquire(&pwq->wq->lockdep_map); - lock_map_release(&pwq->wq->lockdep_map); - } + if (!from_cancel && (wq->saved_max_active == 1 || wq->rescuer)) + touch_wq_lockdep_map(wq); + rcu_read_unlock(); return true; already_gone: @@ -3828,9 +3848,6 @@ static bool __flush_work(struct work_struct *work, bool from_cancel) if (WARN_ON(!work->func)) return false; - lock_map_acquire(&work->lockdep_map); - lock_map_release(&work->lockdep_map); - if (start_flush_work(work, &barr, from_cancel)) { wait_for_completion(&barr.done); destroy_work_on_stack(&barr.work); From 2fcdb1b44491e08f5334a92c50e8f362e0d46f91 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 4 Feb 2024 11:28:06 -1000 Subject: [PATCH 27/54] workqueue: Factor out init_cpu_worker_pool() Factor out init_cpu_worker_pool() from workqueue_init_early(). This is pure reorganization in preparation of BH workqueue support. Signed-off-by: Tejun Heo Tested-by: Allen Pais --- kernel/workqueue.c | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ac42c005b00b..767971a29c7a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -7147,6 +7147,22 @@ static void __init restrict_unbound_cpumask(const char *name, const struct cpuma cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask); } +static void __init init_cpu_worker_pool(struct worker_pool *pool, int cpu, int nice) +{ + BUG_ON(init_worker_pool(pool)); + pool->cpu = cpu; + cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); + cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); + pool->attrs->nice = nice; + pool->attrs->affn_strict = true; + pool->node = cpu_to_node(cpu); + + /* alloc pool ID */ + mutex_lock(&wq_pool_mutex); + BUG_ON(worker_pool_assign_id(pool)); + mutex_unlock(&wq_pool_mutex); +} + /** * workqueue_init_early - early init for workqueue subsystem * @@ -7207,20 +7223,8 @@ void __init workqueue_init_early(void) struct worker_pool *pool; i = 0; - for_each_cpu_worker_pool(pool, cpu) { - BUG_ON(init_worker_pool(pool)); - pool->cpu = cpu; - cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); - cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); - pool->attrs->nice = std_nice[i++]; - pool->attrs->affn_strict = true; - pool->node = cpu_to_node(cpu); - - /* alloc pool ID */ - mutex_lock(&wq_pool_mutex); - BUG_ON(worker_pool_assign_id(pool)); - mutex_unlock(&wq_pool_mutex); - } + for_each_cpu_worker_pool(pool, cpu) + init_cpu_worker_pool(pool, cpu, std_nice[i++]); } /* create default unbound and ordered wq attrs */ From 4cb1ef64609f9b0254184b2947824f4b46ccab22 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 4 Feb 2024 11:28:06 -1000 Subject: [PATCH 28/54] workqueue: Implement BH workqueues to eventually replace tasklets The only generic interface to execute asynchronously in the BH context is tasklet; however, it's marked deprecated and has some design flaws such as the execution code accessing the tasklet item after the execution is complete which can lead to subtle use-after-free in certain usage scenarios and less-developed flush and cancel mechanisms. This patch implements BH workqueues which share the same semantics and features of regular workqueues but execute their work items in the softirq context. As there is always only one BH execution context per CPU, none of the concurrency management mechanisms applies and a BH workqueue can be thought of as a convenience wrapper around softirq. Except for the inability to sleep while executing and lack of max_active adjustments, BH workqueues and work items should behave the same as regular workqueues and work items. Currently, the execution is hooked to tasklet[_hi]. However, the goal is to convert all tasklet users over to BH workqueues. Once the conversion is complete, tasklet can be removed and BH workqueues can directly take over the tasklet softirqs. system_bh[_highpri]_wq are added. As queue-wide flushing doesn't exist in tasklet, all existing tasklet users should be able to use the system BH workqueues without creating their own workqueues. v3: - Add missing interrupt.h include. v2: - Instead of using tasklets, hook directly into its softirq action functions - tasklet[_hi]_action(). This is slightly cheaper and closer to the eventual code structure we want to arrive at. Suggested by Lai. - Lai also pointed out several places which need NULL worker->task handling or can use clarification. Updated. Signed-off-by: Tejun Heo Suggested-by: Linus Torvalds Link: http://lkml.kernel.org/r/CAHk-=wjDW53w4-YcSmgKC5RruiRLHmJ1sXeYdp_ZgVoBw=5byA@mail.gmail.com Tested-by: Allen Pais Reviewed-by: Lai Jiangshan --- Documentation/core-api/workqueue.rst | 29 ++- include/linux/workqueue.h | 11 + kernel/softirq.c | 3 + kernel/workqueue.c | 289 ++++++++++++++++++++++----- tools/workqueue/wq_dump.py | 11 +- 5 files changed, 284 insertions(+), 59 deletions(-) diff --git a/Documentation/core-api/workqueue.rst b/Documentation/core-api/workqueue.rst index 33c4539155d9..2d6af6c4665c 100644 --- a/Documentation/core-api/workqueue.rst +++ b/Documentation/core-api/workqueue.rst @@ -77,10 +77,12 @@ wants a function to be executed asynchronously it has to set up a work item pointing to that function and queue that work item on a workqueue. -Special purpose threads, called worker threads, execute the functions -off of the queue, one after the other. If no work is queued, the -worker threads become idle. These worker threads are managed in so -called worker-pools. +A work item can be executed in either a thread or the BH (softirq) context. + +For threaded workqueues, special purpose threads, called [k]workers, execute +the functions off of the queue, one after the other. If no work is queued, +the worker threads become idle. These worker threads are managed in +worker-pools. The cmwq design differentiates between the user-facing workqueues that subsystems and drivers queue work items on and the backend mechanism @@ -91,6 +93,12 @@ for high priority ones, for each possible CPU and some extra worker-pools to serve work items queued on unbound workqueues - the number of these backing pools is dynamic. +BH workqueues use the same framework. However, as there can only be one +concurrent execution context, there's no need to worry about concurrency. +Each per-CPU BH worker pool contains only one pseudo worker which represents +the BH execution context. A BH workqueue can be considered a convenience +interface to softirq. + Subsystems and drivers can create and queue work items through special workqueue API functions as they see fit. They can influence some aspects of the way the work items are executed by setting flags on the @@ -106,7 +114,7 @@ unless specifically overridden, a work item of a bound workqueue will be queued on the worklist of either normal or highpri worker-pool that is associated to the CPU the issuer is running on. -For any worker pool implementation, managing the concurrency level +For any thread pool implementation, managing the concurrency level (how many execution contexts are active) is an important issue. cmwq tries to keep the concurrency at a minimal but sufficient level. Minimal to save resources and sufficient in that the system is used at @@ -164,6 +172,17 @@ resources, scheduled and executed. ``flags`` --------- +``WQ_BH`` + BH workqueues can be considered a convenience interface to softirq. BH + workqueues are always per-CPU and all BH work items are executed in the + queueing CPU's softirq context in the queueing order. + + All BH workqueues must have 0 ``max_active`` and ``WQ_HIGHPRI`` is the + only allowed additional flag. + + BH work items cannot sleep. All other features such as delayed queueing, + flushing and canceling are supported. + ``WQ_UNBOUND`` Work items queued to an unbound wq are served by the special worker-pools which host workers which are not bound to any diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 232baea90a1d..283d7891b4c4 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -353,6 +353,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } * Documentation/core-api/workqueue.rst. */ enum wq_flags { + WQ_BH = 1 << 0, /* execute in bottom half (softirq) context */ WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ @@ -392,6 +393,9 @@ enum wq_flags { __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */ + + /* BH wq only allows the following flags */ + __WQ_BH_ALLOWS = WQ_BH | WQ_HIGHPRI, }; enum wq_consts { @@ -434,6 +438,9 @@ enum wq_consts { * they are same as their non-power-efficient counterparts - e.g. * system_power_efficient_wq is identical to system_wq if * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. + * + * system_bh[_highpri]_wq are convenience interface to softirq. BH work items + * are executed in the queueing CPU's BH context in the queueing order. */ extern struct workqueue_struct *system_wq; extern struct workqueue_struct *system_highpri_wq; @@ -442,6 +449,10 @@ extern struct workqueue_struct *system_unbound_wq; extern struct workqueue_struct *system_freezable_wq; extern struct workqueue_struct *system_power_efficient_wq; extern struct workqueue_struct *system_freezable_power_efficient_wq; +extern struct workqueue_struct *system_bh_wq; +extern struct workqueue_struct *system_bh_highpri_wq; + +void workqueue_softirq_action(bool highpri); /** * alloc_workqueue - allocate a workqueue diff --git a/kernel/softirq.c b/kernel/softirq.c index 210cf5f8d92c..547d282548a8 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -802,11 +803,13 @@ static void tasklet_action_common(struct softirq_action *a, static __latent_entropy void tasklet_action(struct softirq_action *a) { + workqueue_softirq_action(false); tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ); } static __latent_entropy void tasklet_hi_action(struct softirq_action *a) { + workqueue_softirq_action(true); tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ); } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 767971a29c7a..78b4b992e1a3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -72,8 +73,12 @@ enum worker_pool_flags { * Note that DISASSOCIATED should be flipped only while holding * wq_pool_attach_mutex to avoid changing binding state while * worker_attach_to_pool() is in progress. + * + * As there can only be one concurrent BH execution context per CPU, a + * BH pool is per-CPU and always DISASSOCIATED. */ - POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ + POOL_BH = 1 << 0, /* is a BH pool */ + POOL_MANAGER_ACTIVE = 1 << 1, /* being managed */ POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ }; @@ -115,6 +120,14 @@ enum wq_internal_consts { WQ_NAME_LEN = 32, }; +/* + * We don't want to trap softirq for too long. See MAX_SOFTIRQ_TIME and + * MAX_SOFTIRQ_RESTART in kernel/softirq.c. These are macros because + * msecs_to_jiffies() can't be an initializer. + */ +#define BH_WORKER_JIFFIES msecs_to_jiffies(2) +#define BH_WORKER_RESTARTS 10 + /* * Structure fields follow one of the following exclusion rules. * @@ -443,8 +456,13 @@ static bool wq_debug_force_rr_cpu = false; #endif module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644); +/* the BH worker pools */ +static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], + bh_worker_pools); + /* the per-cpu worker pools */ -static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools); +static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], + cpu_worker_pools); static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */ @@ -478,6 +496,10 @@ struct workqueue_struct *system_power_efficient_wq __ro_after_init; EXPORT_SYMBOL_GPL(system_power_efficient_wq); struct workqueue_struct *system_freezable_power_efficient_wq __ro_after_init; EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); +struct workqueue_struct *system_bh_wq; +EXPORT_SYMBOL_GPL(system_bh_wq); +struct workqueue_struct *system_bh_highpri_wq; +EXPORT_SYMBOL_GPL(system_bh_highpri_wq); static int worker_thread(void *__worker); static void workqueue_sysfs_unregister(struct workqueue_struct *wq); @@ -498,6 +520,11 @@ static void show_one_worker_pool(struct worker_pool *pool); !lockdep_is_held(&wq_pool_mutex), \ "RCU, wq->mutex or wq_pool_mutex should be held") +#define for_each_bh_worker_pool(pool, cpu) \ + for ((pool) = &per_cpu(bh_worker_pools, cpu)[0]; \ + (pool) < &per_cpu(bh_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ + (pool)++) + #define for_each_cpu_worker_pool(pool, cpu) \ for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ @@ -1186,6 +1213,14 @@ static bool kick_pool(struct worker_pool *pool) if (!need_more_worker(pool) || !worker) return false; + if (pool->flags & POOL_BH) { + if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) + raise_softirq_irqoff(HI_SOFTIRQ); + else + raise_softirq_irqoff(TASKLET_SOFTIRQ); + return true; + } + p = worker->task; #ifdef CONFIG_SMP @@ -1668,7 +1703,7 @@ static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq, bool fill) lockdep_assert_held(&pool->lock); if (!nna) { - /* per-cpu workqueue, pwq->nr_active is sufficient */ + /* BH or per-cpu workqueue, pwq->nr_active is sufficient */ obtained = pwq->nr_active < READ_ONCE(wq->max_active); goto out; } @@ -2523,19 +2558,21 @@ static cpumask_t *pool_allowed_cpus(struct worker_pool *pool) * cpu-[un]hotplugs. */ static void worker_attach_to_pool(struct worker *worker, - struct worker_pool *pool) + struct worker_pool *pool) { mutex_lock(&wq_pool_attach_mutex); /* - * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains - * stable across this function. See the comments above the flag - * definition for details. + * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains stable + * across this function. See the comments above the flag definition for + * details. BH workers are, while per-CPU, always DISASSOCIATED. */ - if (pool->flags & POOL_DISASSOCIATED) + if (pool->flags & POOL_DISASSOCIATED) { worker->flags |= WORKER_UNBOUND; - else + } else { + WARN_ON_ONCE(pool->flags & POOL_BH); kthread_set_per_cpu(worker->task, pool->cpu); + } if (worker->rescue_wq) set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool)); @@ -2559,6 +2596,9 @@ static void worker_detach_from_pool(struct worker *worker) struct worker_pool *pool = worker->pool; struct completion *detach_completion = NULL; + /* there is one permanent BH worker per CPU which should never detach */ + WARN_ON_ONCE(pool->flags & POOL_BH); + mutex_lock(&wq_pool_attach_mutex); kthread_set_per_cpu(worker->task, -1); @@ -2610,27 +2650,29 @@ static struct worker *create_worker(struct worker_pool *pool) worker->id = id; - if (pool->cpu >= 0) - snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, - pool->attrs->nice < 0 ? "H" : ""); - else - snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); + if (!(pool->flags & POOL_BH)) { + if (pool->cpu >= 0) + snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, + pool->attrs->nice < 0 ? "H" : ""); + else + snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); - worker->task = kthread_create_on_node(worker_thread, worker, pool->node, - "kworker/%s", id_buf); - if (IS_ERR(worker->task)) { - if (PTR_ERR(worker->task) == -EINTR) { - pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n", - id_buf); - } else { - pr_err_once("workqueue: Failed to create a worker thread: %pe", - worker->task); + worker->task = kthread_create_on_node(worker_thread, worker, + pool->node, "kworker/%s", id_buf); + if (IS_ERR(worker->task)) { + if (PTR_ERR(worker->task) == -EINTR) { + pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n", + id_buf); + } else { + pr_err_once("workqueue: Failed to create a worker thread: %pe", + worker->task); + } + goto fail; } - goto fail; - } - set_user_nice(worker->task, pool->attrs->nice); - kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); + set_user_nice(worker->task, pool->attrs->nice); + kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); + } /* successful, attach the worker to the pool */ worker_attach_to_pool(worker, pool); @@ -2646,7 +2688,8 @@ static struct worker *create_worker(struct worker_pool *pool) * check if not woken up soon. As kick_pool() is noop if @pool is empty, * wake it up explicitly. */ - wake_up_process(worker->task); + if (worker->task) + wake_up_process(worker->task); raw_spin_unlock_irq(&pool->lock); @@ -2988,7 +3031,8 @@ __acquires(&pool->lock) worker->current_work = work; worker->current_func = work->func; worker->current_pwq = pwq; - worker->current_at = worker->task->se.sum_exec_runtime; + if (worker->task) + worker->current_at = worker->task->se.sum_exec_runtime; work_data = *work_data_bits(work); worker->current_color = get_work_color(work_data); @@ -3086,7 +3130,8 @@ __acquires(&pool->lock) * stop_machine. At the same time, report a quiescent RCU state so * the same condition doesn't freeze RCU. */ - cond_resched(); + if (worker->task) + cond_resched(); raw_spin_lock_irq(&pool->lock); @@ -3369,6 +3414,61 @@ repeat: goto repeat; } +static void bh_worker(struct worker *worker) +{ + struct worker_pool *pool = worker->pool; + int nr_restarts = BH_WORKER_RESTARTS; + unsigned long end = jiffies + BH_WORKER_JIFFIES; + + raw_spin_lock_irq(&pool->lock); + worker_leave_idle(worker); + + /* + * This function follows the structure of worker_thread(). See there for + * explanations on each step. + */ + if (!need_more_worker(pool)) + goto done; + + WARN_ON_ONCE(!list_empty(&worker->scheduled)); + worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); + + do { + struct work_struct *work = + list_first_entry(&pool->worklist, + struct work_struct, entry); + + if (assign_work(work, worker, NULL)) + process_scheduled_works(worker); + } while (keep_working(pool) && + --nr_restarts && time_before(jiffies, end)); + + worker_set_flags(worker, WORKER_PREP); +done: + worker_enter_idle(worker); + kick_pool(pool); + raw_spin_unlock_irq(&pool->lock); +} + +/* + * TODO: Convert all tasklet users to workqueue and use softirq directly. + * + * This is currently called from tasklet[_hi]action() and thus is also called + * whenever there are tasklets to run. Let's do an early exit if there's nothing + * queued. Once conversion from tasklet is complete, the need_more_worker() test + * can be dropped. + * + * After full conversion, we'll add worker->softirq_action, directly use the + * softirq action and obtain the worker pointer from the softirq_action pointer. + */ +void workqueue_softirq_action(bool highpri) +{ + struct worker_pool *pool = + &per_cpu(bh_worker_pools, smp_processor_id())[highpri]; + if (need_more_worker(pool)) + bh_worker(list_first_entry(&pool->workers, struct worker, node)); +} + /** * check_flush_dependency - check for flush dependency sanity * @target_wq: workqueue being flushed @@ -3441,6 +3541,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq, struct wq_barrier *barr, struct work_struct *target, struct worker *worker) { + static __maybe_unused struct lock_class_key bh_key, thr_key; unsigned int work_flags = 0; unsigned int work_color; struct list_head *head; @@ -3450,8 +3551,13 @@ static void insert_wq_barrier(struct pool_workqueue *pwq, * as we know for sure that this will not trigger any of the * checks and call back into the fixup functions where we * might deadlock. + * + * BH and threaded workqueues need separate lockdep keys to avoid + * spuriously triggering "inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} + * usage". */ - INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); + INIT_WORK_ONSTACK_KEY(&barr->work, wq_barrier_func, + (pwq->wq->flags & WQ_BH) ? &bh_key : &thr_key); __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); init_completion_map(&barr->done, &target->lockdep_map); @@ -3557,15 +3663,31 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, static void touch_wq_lockdep_map(struct workqueue_struct *wq) { +#ifdef CONFIG_LOCKDEP + if (wq->flags & WQ_BH) + local_bh_disable(); + lock_map_acquire(&wq->lockdep_map); lock_map_release(&wq->lockdep_map); + + if (wq->flags & WQ_BH) + local_bh_enable(); +#endif } static void touch_work_lockdep_map(struct work_struct *work, struct workqueue_struct *wq) { +#ifdef CONFIG_LOCKDEP + if (wq->flags & WQ_BH) + local_bh_disable(); + lock_map_acquire(&work->lockdep_map); lock_map_release(&work->lockdep_map); + + if (wq->flags & WQ_BH) + local_bh_enable(); +#endif } /** @@ -5019,10 +5141,17 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq) if (!(wq->flags & WQ_UNBOUND)) { for_each_possible_cpu(cpu) { - struct pool_workqueue **pwq_p = - per_cpu_ptr(wq->cpu_pwq, cpu); - struct worker_pool *pool = - &(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]); + struct pool_workqueue **pwq_p; + struct worker_pool __percpu *pools; + struct worker_pool *pool; + + if (wq->flags & WQ_BH) + pools = bh_worker_pools; + else + pools = cpu_worker_pools; + + pool = &(per_cpu_ptr(pools, cpu)[highpri]); + pwq_p = per_cpu_ptr(wq->cpu_pwq, cpu); *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); @@ -5197,6 +5326,13 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, size_t wq_size; int name_len; + if (flags & WQ_BH) { + if (WARN_ON_ONCE(flags & ~__WQ_BH_ALLOWS)) + return NULL; + if (WARN_ON_ONCE(max_active)) + return NULL; + } + /* * Unbound && max_active == 1 used to imply ordered, which is no longer * the case on many machines due to per-pod pools. While @@ -5234,8 +5370,16 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n", wq->name); - max_active = max_active ?: WQ_DFL_ACTIVE; - max_active = wq_clamp_max_active(max_active, flags, wq->name); + if (flags & WQ_BH) { + /* + * BH workqueues always share a single execution context per CPU + * and don't impose any max_active limit. + */ + max_active = INT_MAX; + } else { + max_active = max_active ?: WQ_DFL_ACTIVE; + max_active = wq_clamp_max_active(max_active, flags, wq->name); + } /* init wq */ wq->flags = flags; @@ -5416,6 +5560,9 @@ EXPORT_SYMBOL_GPL(destroy_workqueue); */ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) { + /* max_active doesn't mean anything for BH workqueues */ + if (WARN_ON(wq->flags & WQ_BH)) + return; /* disallow meddling with max_active for ordered workqueues */ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) return; @@ -5617,7 +5764,24 @@ static void pr_cont_pool_info(struct worker_pool *pool) pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); if (pool->node != NUMA_NO_NODE) pr_cont(" node=%d", pool->node); - pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); + pr_cont(" flags=0x%x", pool->flags); + if (pool->flags & POOL_BH) + pr_cont(" bh%s", + pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : ""); + else + pr_cont(" nice=%d", pool->attrs->nice); +} + +static void pr_cont_worker_id(struct worker *worker) +{ + struct worker_pool *pool = worker->pool; + + if (pool->flags & WQ_BH) + pr_cont("bh%s", + pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : ""); + else + pr_cont("%d%s", task_pid_nr(worker->task), + worker->rescue_wq ? "(RESCUER)" : ""); } struct pr_cont_work_struct { @@ -5694,10 +5858,9 @@ static void show_pwq(struct pool_workqueue *pwq) if (worker->current_pwq != pwq) continue; - pr_cont("%s %d%s:%ps", comma ? "," : "", - task_pid_nr(worker->task), - worker->rescue_wq ? "(RESCUER)" : "", - worker->current_func); + pr_cont(" %s", comma ? "," : ""); + pr_cont_worker_id(worker); + pr_cont(":%ps", worker->current_func); list_for_each_entry(work, &worker->scheduled, entry) pr_cont_work(false, work, &pcws); pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); @@ -5816,8 +5979,8 @@ static void show_one_worker_pool(struct worker_pool *pool) pr_cont(" manager: %d", task_pid_nr(pool->manager->task)); list_for_each_entry(worker, &pool->idle_list, entry) { - pr_cont(" %s%d", first ? "idle: " : "", - task_pid_nr(worker->task)); + pr_cont(" %s", first ? "idle: " : ""); + pr_cont_worker_id(worker); first = false; } pr_cont("\n"); @@ -6090,13 +6253,15 @@ int workqueue_online_cpu(unsigned int cpu) mutex_lock(&wq_pool_mutex); for_each_pool(pool, pi) { - mutex_lock(&wq_pool_attach_mutex); + /* BH pools aren't affected by hotplug */ + if (pool->flags & POOL_BH) + continue; + mutex_lock(&wq_pool_attach_mutex); if (pool->cpu == cpu) rebind_workers(pool); else if (pool->cpu < 0) restore_unbound_workers_cpumask(pool, cpu); - mutex_unlock(&wq_pool_attach_mutex); } @@ -7053,7 +7218,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) /* did we stall? */ if (time_after(now, ts + thresh)) { lockup_detected = true; - if (pool->cpu >= 0) { + if (pool->cpu >= 0 && !(pool->flags & POOL_BH)) { pool->cpu_stall = true; cpu_pool_stall = true; } @@ -7218,10 +7383,16 @@ void __init workqueue_init_early(void) pt->pod_node[0] = NUMA_NO_NODE; pt->cpu_pod[0] = 0; - /* initialize CPU pools */ + /* initialize BH and CPU pools */ for_each_possible_cpu(cpu) { struct worker_pool *pool; + i = 0; + for_each_bh_worker_pool(pool, cpu) { + init_cpu_worker_pool(pool, cpu, std_nice[i++]); + pool->flags |= POOL_BH; + } + i = 0; for_each_cpu_worker_pool(pool, cpu) init_cpu_worker_pool(pool, cpu, std_nice[i++]); @@ -7257,10 +7428,14 @@ void __init workqueue_init_early(void) system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_pwr_efficient", WQ_FREEZABLE | WQ_POWER_EFFICIENT, 0); + system_bh_wq = alloc_workqueue("events_bh", WQ_BH, 0); + system_bh_highpri_wq = alloc_workqueue("events_bh_highpri", + WQ_BH | WQ_HIGHPRI, 0); BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || !system_unbound_wq || !system_freezable_wq || !system_power_efficient_wq || - !system_freezable_power_efficient_wq); + !system_freezable_power_efficient_wq || + !system_bh_wq || !system_bh_highpri_wq); } static void __init wq_cpu_intensive_thresh_init(void) @@ -7326,9 +7501,10 @@ void __init workqueue_init(void) * up. Also, create a rescuer for workqueues that requested it. */ for_each_possible_cpu(cpu) { - for_each_cpu_worker_pool(pool, cpu) { + for_each_bh_worker_pool(pool, cpu) + pool->node = cpu_to_node(cpu); + for_each_cpu_worker_pool(pool, cpu) pool->node = cpu_to_node(cpu); - } } list_for_each_entry(wq, &workqueues, list) { @@ -7339,7 +7515,16 @@ void __init workqueue_init(void) mutex_unlock(&wq_pool_mutex); - /* create the initial workers */ + /* + * Create the initial workers. A BH pool has one pseudo worker that + * represents the shared BH execution context and thus doesn't get + * affected by hotplug events. Create the BH pseudo workers for all + * possible CPUs here. + */ + for_each_possible_cpu(cpu) + for_each_bh_worker_pool(pool, cpu) + BUG_ON(!create_worker(pool)); + for_each_online_cpu(cpu) { for_each_cpu_worker_pool(pool, cpu) { pool->flags &= ~POOL_DISASSOCIATED; diff --git a/tools/workqueue/wq_dump.py b/tools/workqueue/wq_dump.py index bd381511bd9a..d29b918306b4 100644 --- a/tools/workqueue/wq_dump.py +++ b/tools/workqueue/wq_dump.py @@ -79,7 +79,9 @@ def cpumask_str(cpumask): wq_type_len = 9 def wq_type_str(wq): - if wq.flags & WQ_UNBOUND: + if wq.flags & WQ_BH: + return f'{"bh":{wq_type_len}}' + elif wq.flags & WQ_UNBOUND: if wq.flags & WQ_ORDERED: return f'{"ordered":{wq_type_len}}' else: @@ -97,6 +99,7 @@ wq_pod_types = prog['wq_pod_types'] wq_affn_dfl = prog['wq_affn_dfl'] wq_affn_names = prog['wq_affn_names'] +WQ_BH = prog['WQ_BH'] WQ_UNBOUND = prog['WQ_UNBOUND'] WQ_ORDERED = prog['__WQ_ORDERED'] WQ_MEM_RECLAIM = prog['WQ_MEM_RECLAIM'] @@ -107,6 +110,8 @@ WQ_AFFN_CACHE = prog['WQ_AFFN_CACHE'] WQ_AFFN_NUMA = prog['WQ_AFFN_NUMA'] WQ_AFFN_SYSTEM = prog['WQ_AFFN_SYSTEM'] +POOL_BH = prog['POOL_BH'] + WQ_NAME_LEN = prog['WQ_NAME_LEN'].value_() cpumask_str_len = len(cpumask_str(wq_unbound_cpumask)) @@ -151,10 +156,12 @@ for pi, pool in idr_for_each(worker_pool_idr): for pi, pool in idr_for_each(worker_pool_idr): pool = drgn.Object(prog, 'struct worker_pool', address=pool) - print(f'pool[{pi:0{max_pool_id_len}}] ref={pool.refcnt.value_():{max_ref_len}} nice={pool.attrs.nice.value_():3} ', end='') + print(f'pool[{pi:0{max_pool_id_len}}] flags=0x{pool.flags.value_():02x} ref={pool.refcnt.value_():{max_ref_len}} nice={pool.attrs.nice.value_():3} ', end='') print(f'idle/workers={pool.nr_idle.value_():3}/{pool.nr_workers.value_():3} ', end='') if pool.cpu >= 0: print(f'cpu={pool.cpu.value_():3}', end='') + if pool.flags & POOL_BH: + print(' bh', end='') else: print(f'cpus={cpumask_str(pool.attrs.cpumask)}', end='') print(f' pod_cpus={cpumask_str(pool.attrs.__pod_cpumask)}', end='') From 4f19b8e01e2fb6c97d4307abb7bde4d34a1e601e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 5 Feb 2024 07:18:08 -1000 Subject: [PATCH 29/54] Revert "workqueue: make wq_subsys const" This reverts commit d412ace11144aa2bf692c7cf9778351efc15c827. This leads to build failures as it depends on a driver-core commit 32f78abe59c7 ("driver core: bus: constantify subsys_register() calls"). Let's drop it from wq tree and route it through driver-core tree. Signed-off-by: Tejun Heo Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202402051505.kM9Rr3CJ-lkp@intel.com/ --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 78b4b992e1a3..524a7fff52af 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -6874,7 +6874,7 @@ static struct device_attribute wq_sysfs_unbound_attrs[] = { __ATTR_NULL, }; -static const struct bus_type wq_subsys = { +static struct bus_type wq_subsys = { .name = "workqueue", .dev_groups = wq_sysfs_groups, }; From 96068b6030391082bf0cd97af525d731afa5ad63 Mon Sep 17 00:00:00 2001 From: Wang Jinchao Date: Mon, 5 Feb 2024 08:31:52 +0800 Subject: [PATCH 30/54] workqueue: fix a typo in comment There should be three, fix it. Signed-off-by: Wang Jinchao Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 524a7fff52af..d7fdb631ecf7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -7604,7 +7604,7 @@ static bool __init cpus_share_numa(int cpu0, int cpu1) /** * workqueue_init_topology - initialize CPU pods for unbound workqueues * - * This is the third step of there-staged workqueue subsystem initialization and + * This is the third step of three-staged workqueue subsystem initialization and * invoked after SMP and topology information are fully initialized. It * initializes the unbound CPU pods accordingly. */ From 8eb17dc1a6b5db7e89681f59285242af8d182f95 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Sat, 3 Feb 2024 10:43:30 -0500 Subject: [PATCH 31/54] workqueue: Skip __WQ_DESTROYING workqueues when updating global unbound cpumask Skip updating workqueues with __WQ_DESTROYING bit set when updating global unbound cpumask to avoid unnecessary work and other complications. Signed-off-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d7fdb631ecf7..68c48489eab3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -6501,7 +6501,7 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask) lockdep_assert_held(&wq_pool_mutex); list_for_each_entry(wq, &workqueues, list) { - if (!(wq->flags & WQ_UNBOUND)) + if (!(wq->flags & WQ_UNBOUND) || (wq->flags & __WQ_DESTROYING)) continue; /* creating multiple pwqs breaks ordering guarantee */ From 3bc1e711c26bff01d41ad71145ecb8dcb4412576 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 5 Feb 2024 14:19:10 -1000 Subject: [PATCH 32/54] workqueue: Don't implicitly make UNBOUND workqueues w/ @max_active==1 ordered 5c0338c68706 ("workqueue: restore WQ_UNBOUND/max_active==1 to be ordered") automoatically promoted UNBOUND workqueues w/ @max_active==1 to ordered workqueues because UNBOUND workqueues w/ @max_active==1 used to be the way to create ordered workqueues and the new NUMA support broke it. These problems can be subtle and the fact that they can only trigger on NUMA machines made them even more difficult to debug. However, overloading the UNBOUND allocation interface this way creates other issues. It's difficult to tell whether a given workqueue actually needs to be ordered and users that legitimately want a min concurrency level wq unexpectedly gets an ordered one instead. With planned UNBOUND workqueue udpates to improve execution locality and more prevalence of chiplet designs which can benefit from such improvements, this isn't a state we wanna be in forever. There aren't that many UNBOUND w/ @max_active==1 users in the tree and the preceding patches audited all and converted them to alloc_ordered_workqueue() as appropriate. This patch removes the implicit promotion of UNBOUND w/ @max_active==1 workqueues to ordered ones. v2: v1 patch incorrectly dropped !list_empty(&wq->pwqs) condition in apply_workqueue_attrs_locked() which spuriously triggers WARNING and fails workqueue creation. Fix it. Signed-off-by: Tejun Heo Reported-by: kernel test robot Link: https://lore.kernel.org/oe-lkp/202304251050.45a5df1f-oliver.sang@intel.com --- Documentation/core-api/workqueue.rst | 14 +++++--------- include/linux/workqueue.h | 4 +--- kernel/workqueue.c | 22 ++++------------------ 3 files changed, 10 insertions(+), 30 deletions(-) diff --git a/Documentation/core-api/workqueue.rst b/Documentation/core-api/workqueue.rst index 2d6af6c4665c..9572609b5263 100644 --- a/Documentation/core-api/workqueue.rst +++ b/Documentation/core-api/workqueue.rst @@ -256,15 +256,11 @@ may queue at the same time. Unless there is a specific need for throttling the number of active work items, specifying '0' is recommended. -Some users depend on the strict execution ordering of ST wq. The -combination of ``@max_active`` of 1 and ``WQ_UNBOUND`` used to -achieve this behavior. Work items on such wq were always queued to the -unbound worker-pools and only one work item could be active at any given -time thus achieving the same ordering property as ST wq. - -In the current implementation the above configuration only guarantees -ST behavior within a given NUMA node. Instead ``alloc_ordered_workqueue()`` should -be used to achieve system-wide ST behavior. +Some users depend on strict execution ordering where only one work item +is in flight at any given time and the work items are processed in +queueing order. While the combination of ``@max_active`` of 1 and +``WQ_UNBOUND`` used to achieve this behavior, this is no longer the +case. Use ``alloc_ordered_queue()`` instead. Example Execution Scenarios diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 283d7891b4c4..4ba33cf07f11 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -392,7 +392,6 @@ enum wq_flags { __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ - __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */ /* BH wq only allows the following flags */ __WQ_BH_ALLOWS = WQ_BH | WQ_HIGHPRI, @@ -507,8 +506,7 @@ alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...); * Pointer to the allocated workqueue on success, %NULL on failure. */ #define alloc_ordered_workqueue(fmt, flags, args...) \ - alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \ - __WQ_ORDERED_EXPLICIT | (flags), 1, ##args) + alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) #define create_workqueue(name) \ alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name)) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 68c48489eab3..ecc775843bfa 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5007,12 +5007,8 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, return -EINVAL; /* creating multiple pwqs breaks ordering guarantee */ - if (!list_empty(&wq->pwqs)) { - if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) - return -EINVAL; - - wq->flags &= ~__WQ_ORDERED; - } + if (!list_empty(&wq->pwqs) && WARN_ON(wq->flags & __WQ_ORDERED)) + return -EINVAL; ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask); if (IS_ERR(ctx)) @@ -5333,15 +5329,6 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, return NULL; } - /* - * Unbound && max_active == 1 used to imply ordered, which is no longer - * the case on many machines due to per-pod pools. While - * alloc_ordered_workqueue() is the right way to create an ordered - * workqueue, keep the previous behavior to avoid subtle breakages. - */ - if ((flags & WQ_UNBOUND) && max_active == 1) - flags |= __WQ_ORDERED; - /* see the comment above the definition of WQ_POWER_EFFICIENT */ if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) flags |= WQ_UNBOUND; @@ -5564,14 +5551,13 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) if (WARN_ON(wq->flags & WQ_BH)) return; /* disallow meddling with max_active for ordered workqueues */ - if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) + if (WARN_ON(wq->flags & __WQ_ORDERED)) return; max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); mutex_lock(&wq->mutex); - wq->flags &= ~__WQ_ORDERED; wq->saved_max_active = max_active; if (wq->flags & WQ_UNBOUND) wq->saved_min_active = min(wq->saved_min_active, max_active); @@ -7028,7 +7014,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) * attributes breaks ordering guarantee. Disallow exposing ordered * workqueues. */ - if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) + if (WARN_ON(wq->flags & __WQ_ORDERED)) return -EINVAL; wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); From 26fb7e3dda4c16e2cfe2164a1e7315a9386602db Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 8 Feb 2024 11:10:11 -0500 Subject: [PATCH 33/54] workqueue: Link pwq's into wq->pwqs from oldest to newest Add a new pwq into the tail of wq->pwqs so that pwq iteration will start from the oldest pwq to the newest. This ordering will facilitate the inclusion of ordered workqueues in a wq_unbound_cpumask update. Signed-off-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index cf514ba0dfc3..fa7bd3b34f52 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4804,7 +4804,7 @@ static void link_pwq(struct pool_workqueue *pwq) pwq->work_color = wq->work_color; /* link in @pwq */ - list_add_rcu(&pwq->pwqs_node, &wq->pwqs); + list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs); } /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ From 4c065dbce1e8639546ef3612acffb062dd084cfe Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 8 Feb 2024 14:12:20 -0500 Subject: [PATCH 34/54] workqueue: Enable unbound cpumask update on ordered workqueues Ordered workqueues does not currently follow changes made to the global unbound cpumask because per-pool workqueue changes may break the ordering guarantee. IOW, a work function in an ordered workqueue may run on an isolated CPU. This patch enables ordered workqueues to follow changes made to the global unbound cpumask by temporaily plug or suspend the newly allocated pool_workqueue from executing newly queued work items until the old pwq has been properly drained. For ordered workqueues, there should only be one pwq that is unplugged, the rests should be plugged. This enables ordered workqueues to follow the unbound cpumask changes like other unbound workqueues at the expense of some delay in execution of work functions during the transition period. Signed-off-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/workqueue.c | 69 +++++++++++++++++++++++++++++++++++++++------- 1 file changed, 59 insertions(+), 10 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index fa7bd3b34f52..da124859a691 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -255,6 +255,7 @@ struct pool_workqueue { int refcnt; /* L: reference count */ int nr_in_flight[WORK_NR_COLORS]; /* L: nr of in_flight works */ + bool plugged; /* L: execution suspended */ /* * nr_active management and WORK_STRUCT_INACTIVE: @@ -1708,6 +1709,9 @@ static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq, bool fill) goto out; } + if (unlikely(pwq->plugged)) + return false; + /* * Unbound workqueue uses per-node shared nr_active $nna. If @pwq is * already waiting on $nna, pwq_dec_nr_active() will maintain the @@ -1782,6 +1786,43 @@ static bool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill) } } +/** + * unplug_oldest_pwq - restart an oldest plugged pool_workqueue + * @wq: workqueue_struct to be restarted + * + * pwq's are linked into wq->pwqs with the oldest first. For ordered + * workqueues, only the oldest pwq is unplugged, the others are plugged to + * suspend execution until the oldest one is drained. When this happens, the + * next oldest one (first plugged pwq in iteration) will be unplugged to + * restart work item execution to ensure proper work item ordering. + * + * dfl_pwq --------------+ [P] - plugged + * | + * v + * pwqs -> A -> B [P] -> C [P] (newest) + * | | | + * 1 3 5 + * | | | + * 2 4 6 + */ +static void unplug_oldest_pwq(struct workqueue_struct *wq) +{ + struct pool_workqueue *pwq; + + lockdep_assert_held(&wq->mutex); + + /* Caller should make sure that pwqs isn't empty before calling */ + pwq = list_first_entry_or_null(&wq->pwqs, struct pool_workqueue, + pwqs_node); + raw_spin_lock_irq(&pwq->pool->lock); + if (pwq->plugged) { + pwq->plugged = false; + if (pwq_activate_first_inactive(pwq, true)) + kick_pool(pwq->pool); + } + raw_spin_unlock_irq(&pwq->pool->lock); +} + /** * node_activate_pending_pwq - Activate a pending pwq on a wq_node_nr_active * @nna: wq_node_nr_active to activate a pending pwq for @@ -4740,6 +4781,13 @@ static void pwq_release_workfn(struct kthread_work *work) mutex_lock(&wq->mutex); list_del_rcu(&pwq->pwqs_node); is_last = list_empty(&wq->pwqs); + + /* + * For ordered workqueue with a plugged dfl_pwq, restart it now. + */ + if (!is_last && (wq->flags & __WQ_ORDERED)) + unplug_oldest_pwq(wq); + mutex_unlock(&wq->mutex); } @@ -4966,6 +5014,15 @@ apply_wqattrs_prepare(struct workqueue_struct *wq, cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); ctx->attrs = new_attrs; + /* + * For initialized ordered workqueues, there should only be one pwq + * (dfl_pwq). Set the plugged flag of ctx->dfl_pwq to suspend execution + * of newly queued work items until execution of older work items in + * the old pwq's have completed. + */ + if ((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)) + ctx->dfl_pwq->plugged = true; + ctx->wq = wq; return ctx; @@ -5006,10 +5063,6 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, if (WARN_ON(!(wq->flags & WQ_UNBOUND))) return -EINVAL; - /* creating multiple pwqs breaks ordering guarantee */ - if (!list_empty(&wq->pwqs) && WARN_ON(wq->flags & __WQ_ORDERED)) - return -EINVAL; - ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -6489,9 +6542,6 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask) list_for_each_entry(wq, &workqueues, list) { if (!(wq->flags & WQ_UNBOUND) || (wq->flags & __WQ_DESTROYING)) continue; - /* creating multiple pwqs breaks ordering guarantee */ - if (wq->flags & __WQ_ORDERED) - continue; ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask); if (IS_ERR(ctx)) { @@ -7006,9 +7056,8 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) int ret; /* - * Adjusting max_active or creating new pwqs by applying - * attributes breaks ordering guarantee. Disallow exposing ordered - * workqueues. + * Adjusting max_active breaks ordering guarantee. Disallow exposing + * ordered workqueues. */ if (WARN_ON(wq->flags & __WQ_ORDERED)) return -EINVAL; From d64f2fa064f8866802e23c8ec95d9d1f601480ee Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Thu, 8 Feb 2024 11:10:13 -0500 Subject: [PATCH 35/54] kernel/workqueue: Let rescuers follow unbound wq cpumask changes When workqueue cpumask changes are committed the associated rescuer (if one exists) affinity is not touched and this might be a problem down the line for isolated setups. Make sure rescuers affinity is updated every time a workqueue cpumask changes, so that rescuers can't break isolation. [longman: set_cpus_allowed_ptr() will block until the designated task is enqueued on an allowed CPU, no wake_up_process() needed. Also use the unbound_effective_cpumask() helper as suggested by Tejun.] Signed-off-by: Juri Lelli Signed-off-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/workqueue.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index da124859a691..a24c7cfb80b4 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5051,6 +5051,11 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) /* update node_nr_active->max */ wq_update_node_max_active(ctx->wq, -1); + /* rescuer needs to respect wq cpumask changes */ + if (ctx->wq->rescuer) + set_cpus_allowed_ptr(ctx->wq->rescuer->task, + unbound_effective_cpumask(ctx->wq)); + mutex_unlock(&ctx->wq->mutex); } From 49584bb8ddbe8bcfc276c2d7dd3c8890f45f5970 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 8 Feb 2024 11:10:14 -0500 Subject: [PATCH 36/54] workqueue: Bind unbound workqueue rescuer to wq_unbound_cpumask Commit 85f0ab43f9de ("kernel/workqueue: Bind rescuer to unbound cpumask for WQ_UNBOUND") modified init_rescuer() to bind rescuer of an unbound workqueue to the cpumask in wq->unbound_attrs. However unbound_attrs->cpumask's of all workqueues are initialized to cpu_possible_mask and will only be changed if it has the WQ_SYSFS flag to expose a cpumask sysfs file to be written by users. So this patch doesn't achieve what it is intended to do. If an unbound workqueue is created after wq_unbound_cpumask is modified and there is no more unbound cpumask update after that, the unbound rescuer will be bound to all CPUs unless the workqueue is created with the WQ_SYSFS flag and a user explicitly modified its cpumask sysfs file. Fix this problem by binding directly to wq_unbound_cpumask in init_rescuer(). Fixes: 85f0ab43f9de ("kernel/workqueue: Bind rescuer to unbound cpumask for WQ_UNBOUND") Signed-off-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a24c7cfb80b4..cd2c6edc5c66 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5299,7 +5299,7 @@ static int init_rescuer(struct workqueue_struct *wq) wq->rescuer = rescuer; if (wq->flags & WQ_UNBOUND) - kthread_bind_mask(rescuer->task, wq->unbound_attrs->cpumask); + kthread_bind_mask(rescuer->task, wq_unbound_cpumask); else kthread_bind_mask(rescuer->task, cpu_possible_mask); wake_up_process(rescuer->task); From 516d3dc99f4f2ab856d879696cd3a5d7f6db7796 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Fri, 9 Feb 2024 12:06:11 -0500 Subject: [PATCH 37/54] workqueue: Fix kernel-doc comment of unplug_oldest_pwq() Fix the kernel-doc comment of the unplug_oldest_pwq() function to enable proper processing and formatting of the embedded ASCII diagram. Signed-off-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/workqueue.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index cd2c6edc5c66..ddcdeb7b9f26 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1787,14 +1787,12 @@ static bool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill) } /** - * unplug_oldest_pwq - restart an oldest plugged pool_workqueue - * @wq: workqueue_struct to be restarted + * unplug_oldest_pwq - unplug the oldest pool_workqueue + * @wq: workqueue_struct where its oldest pwq is to be unplugged * - * pwq's are linked into wq->pwqs with the oldest first. For ordered - * workqueues, only the oldest pwq is unplugged, the others are plugged to - * suspend execution until the oldest one is drained. When this happens, the - * next oldest one (first plugged pwq in iteration) will be unplugged to - * restart work item execution to ensure proper work item ordering. + * This function should only be called for ordered workqueues where only the + * oldest pwq is unplugged, the others are plugged to suspend execution to + * ensure proper work item ordering:: * * dfl_pwq --------------+ [P] - plugged * | @@ -1804,6 +1802,11 @@ static bool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill) * 1 3 5 * | | | * 2 4 6 + * + * When the oldest pwq is drained and removed, this function should be called + * to unplug the next oldest one to start its work item execution. Note that + * pwq's are linked into wq->pwqs with the oldest first, so the first one in + * the list is the oldest. */ static void unplug_oldest_pwq(struct workqueue_struct *wq) { From 8f172181f24bb5df7675225d9b5b66d059613f50 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 8 Feb 2024 14:11:56 -1000 Subject: [PATCH 38/54] workqueue: Implement workqueue_set_min_active() Since 5797b1c18919 ("workqueue: Implement system-wide nr_active enforcement for unbound workqueues"), unbound workqueues have separate min_active which sets the number of interdependent work items that can be handled. This value is currently initialized to WQ_DFL_MIN_ACTIVE which is 8. This isn't high enough for some users, let's add an interface to adjust the setting. Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 2 ++ kernel/workqueue.c | 27 +++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 4ba33cf07f11..1565bab9edc8 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -553,6 +553,8 @@ extern bool flush_rcu_work(struct rcu_work *rwork); extern void workqueue_set_max_active(struct workqueue_struct *wq, int max_active); +extern void workqueue_set_min_active(struct workqueue_struct *wq, + int min_active); extern struct work_struct *current_work(void); extern bool current_is_workqueue_rescuer(void); extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ddcdeb7b9f26..4950bfc2cdcc 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5629,6 +5629,33 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) } EXPORT_SYMBOL_GPL(workqueue_set_max_active); +/** + * workqueue_set_min_active - adjust min_active of an unbound workqueue + * @wq: target unbound workqueue + * @min_active: new min_active value + * + * Set min_active of an unbound workqueue. Unlike other types of workqueues, an + * unbound workqueue is not guaranteed to be able to process max_active + * interdependent work items. Instead, an unbound workqueue is guaranteed to be + * able to process min_active number of interdependent work items which is + * %WQ_DFL_MIN_ACTIVE by default. + * + * Use this function to adjust the min_active value between 0 and the current + * max_active. + */ +void workqueue_set_min_active(struct workqueue_struct *wq, int min_active) +{ + /* min_active is only meaningful for non-ordered unbound workqueues */ + if (WARN_ON((wq->flags & (WQ_BH | WQ_UNBOUND | __WQ_ORDERED)) != + WQ_UNBOUND)) + return; + + mutex_lock(&wq->mutex); + wq->saved_min_active = clamp(min_active, 0, wq->saved_max_active); + wq_adjust_max_active(wq); + mutex_unlock(&wq->mutex); +} + /** * current_work - retrieve %current task's work struct * From bf52b1ac6ab41a060511d56d0f2da12f3a2486db Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 8 Feb 2024 14:14:16 -1000 Subject: [PATCH 39/54] async: Use a dedicated unbound workqueue with raised min_active Async can schedule a number of interdependent work items. However, since 5797b1c18919 ("workqueue: Implement system-wide nr_active enforcement for unbound workqueues"), unbound workqueues have separate min_active which sets the number of interdependent work items that can be handled. This default value is 8 which isn't sufficient for async and can lead to stalls during resume from suspend in some cases. Let's use a dedicated unbound workqueue with raised min_active. Link: http://lkml.kernel.org/r/708a65cc-79ec-44a6-8454-a93d0f3114c3@samsung.com Reported-by: Marek Szyprowski Cc: Rafael J. Wysocki Tested-by: Marek Szyprowski Signed-off-by: Tejun Heo --- include/linux/async.h | 1 + init/main.c | 1 + kernel/async.c | 17 ++++++++++++++++- 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/include/linux/async.h b/include/linux/async.h index 33c9ff4afb49..19b778d08600 100644 --- a/include/linux/async.h +++ b/include/linux/async.h @@ -120,4 +120,5 @@ extern void async_synchronize_cookie(async_cookie_t cookie); extern void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain); extern bool current_is_async(void); +extern void async_init(void); #endif diff --git a/init/main.c b/init/main.c index e24b0780fdff..685db36c999f 100644 --- a/init/main.c +++ b/init/main.c @@ -1545,6 +1545,7 @@ static noinline void __init kernel_init_freeable(void) sched_init_smp(); workqueue_init_topology(); + async_init(); padata_init(); page_alloc_init_late(); diff --git a/kernel/async.c b/kernel/async.c index 97f224a5257b..4c3e6a44595f 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -64,6 +64,7 @@ static async_cookie_t next_cookie = 1; static LIST_HEAD(async_global_pending); /* pending from all registered doms */ static ASYNC_DOMAIN(async_dfl_domain); static DEFINE_SPINLOCK(async_lock); +static struct workqueue_struct *async_wq; struct async_entry { struct list_head domain_list; @@ -174,7 +175,7 @@ static async_cookie_t __async_schedule_node_domain(async_func_t func, spin_unlock_irqrestore(&async_lock, flags); /* schedule for execution */ - queue_work_node(node, system_unbound_wq, &entry->work); + queue_work_node(node, async_wq, &entry->work); return newcookie; } @@ -345,3 +346,17 @@ bool current_is_async(void) return worker && worker->current_func == async_run_entry_fn; } EXPORT_SYMBOL_GPL(current_is_async); + +void __init async_init(void) +{ + /* + * Async can schedule a number of interdependent work items. However, + * unbound workqueues can handle only upto min_active interdependent + * work items. The default min_active of 8 isn't sufficient for async + * and can lead to stalls. Let's use a dedicated workqueue with raised + * min_active. + */ + async_wq = alloc_workqueue("async", WQ_UNBOUND, 0); + BUG_ON(!async_wq); + workqueue_set_min_active(async_wq, WQ_DFL_ACTIVE); +} From 2f34d7337d98f3eae7bd3d1270efaf9d8a17cfc6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Feb 2024 08:33:55 -1000 Subject: [PATCH 40/54] workqueue: Fix queue_work_on() with BH workqueues When queue_work_on() is used to queue a BH work item on a remote CPU, the work item is queued on that CPU but kick_pool() raises softirq on the local CPU. This leads to stalls as the work item won't be executed until something else on the remote CPU schedules a BH work item or tasklet locally. Fix it by bouncing raising softirq to the target CPU using per-cpu irq_work. Signed-off-by: Tejun Heo Fixes: 4cb1ef64609f ("workqueue: Implement BH workqueues to eventually replace tasklets") --- kernel/workqueue.c | 41 ++++++++++++++++++++++++++++++++++++----- 1 file changed, 36 insertions(+), 5 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4950bfc2cdcc..04e35dbe6799 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -54,6 +54,7 @@ #include #include #include +#include #include "workqueue_internal.h" @@ -457,6 +458,10 @@ static bool wq_debug_force_rr_cpu = false; #endif module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644); +/* to raise softirq for the BH worker pools on other CPUs */ +static DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_work [NR_STD_WORKER_POOLS], + bh_pool_irq_works); + /* the BH worker pools */ static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], bh_worker_pools); @@ -1197,6 +1202,13 @@ static bool assign_work(struct work_struct *work, struct worker *worker, return true; } +static struct irq_work *bh_pool_irq_work(struct worker_pool *pool) +{ + int high = pool->attrs->nice == HIGHPRI_NICE_LEVEL ? 1 : 0; + + return &per_cpu(bh_pool_irq_works, pool->cpu)[high]; +} + /** * kick_pool - wake up an idle worker if necessary * @pool: pool to kick @@ -1215,10 +1227,15 @@ static bool kick_pool(struct worker_pool *pool) return false; if (pool->flags & POOL_BH) { - if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) - raise_softirq_irqoff(HI_SOFTIRQ); - else - raise_softirq_irqoff(TASKLET_SOFTIRQ); + if (likely(pool->cpu == smp_processor_id())) { + if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) + raise_softirq_irqoff(HI_SOFTIRQ); + else + raise_softirq_irqoff(TASKLET_SOFTIRQ); + } else { + irq_work_queue_on(bh_pool_irq_work(pool), pool->cpu); + } + return true; } @@ -7367,6 +7384,16 @@ static inline void wq_watchdog_init(void) { } #endif /* CONFIG_WQ_WATCHDOG */ +static void bh_pool_kick_normal(struct irq_work *irq_work) +{ + raise_softirq_irqoff(TASKLET_SOFTIRQ); +} + +static void bh_pool_kick_highpri(struct irq_work *irq_work) +{ + raise_softirq_irqoff(HI_SOFTIRQ); +} + static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask) { if (!cpumask_intersects(wq_unbound_cpumask, mask)) { @@ -7408,6 +7435,8 @@ void __init workqueue_init_early(void) { struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM]; int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; + void (*irq_work_fns[2])(struct irq_work *) = { bh_pool_kick_normal, + bh_pool_kick_highpri }; int i, cpu; BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); @@ -7455,8 +7484,10 @@ void __init workqueue_init_early(void) i = 0; for_each_bh_worker_pool(pool, cpu) { - init_cpu_worker_pool(pool, cpu, std_nice[i++]); + init_cpu_worker_pool(pool, cpu, std_nice[i]); pool->flags |= POOL_BH; + init_irq_work(bh_pool_irq_work(pool), irq_work_fns[i]); + i++; } i = 0; From fd0a68a2337b79a7bd4dad5e7d9dc726828527af Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 15 Feb 2024 19:10:01 -1000 Subject: [PATCH 41/54] workqueue, irq_work: Build fix for !CONFIG_IRQ_WORK 2f34d7337d98 ("workqueue: Fix queue_work_on() with BH workqueues") added irq_work usage to workqueue; however, it turns out irq_work is actually optional and the change breaks build on configuration which doesn't have CONFIG_IRQ_WORK enabled. Fix build by making workqueue use irq_work only when CONFIG_SMP and enabling CONFIG_IRQ_WORK when CONFIG_SMP is set. It's reasonable to argue that it may be better to just always enable it. However, this still saves a small bit of memory for tiny UP configs and also the least amount of change, so, for now, let's keep it conditional. Verified to do the right thing for x86_64 allnoconfig and defconfig, and aarch64 allnoconfig, allnoconfig + prink disable (SMP but nothing selects IRQ_WORK) and a modified aarch64 Kconfig where !SMP and nothing selects IRQ_WORK. v2: `depends on SMP` leads to Kconfig warnings when CONFIG_IRQ_WORK is selected by something else when !CONFIG_SMP. Use `def_bool y if SMP` instead. Signed-off-by: Tejun Heo Reported-by: Naresh Kamboju Tested-by: Anders Roxell Fixes: 2f34d7337d98 ("workqueue: Fix queue_work_on() with BH workqueues") Cc: Stephen Rothwell --- init/Kconfig | 2 +- kernel/workqueue.c | 24 +++++++++++++++--------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/init/Kconfig b/init/Kconfig index 8df18f3a9748..0d21c9e0398f 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -106,7 +106,7 @@ config CONSTRUCTORS bool config IRQ_WORK - bool + def_bool y if SMP config BUILDTIME_TABLE_SORT bool diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 04e35dbe6799..6ae441e13804 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1209,6 +1209,20 @@ static struct irq_work *bh_pool_irq_work(struct worker_pool *pool) return &per_cpu(bh_pool_irq_works, pool->cpu)[high]; } +static void kick_bh_pool(struct worker_pool *pool) +{ +#ifdef CONFIG_SMP + if (unlikely(pool->cpu != smp_processor_id())) { + irq_work_queue_on(bh_pool_irq_work(pool), pool->cpu); + return; + } +#endif + if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) + raise_softirq_irqoff(HI_SOFTIRQ); + else + raise_softirq_irqoff(TASKLET_SOFTIRQ); +} + /** * kick_pool - wake up an idle worker if necessary * @pool: pool to kick @@ -1227,15 +1241,7 @@ static bool kick_pool(struct worker_pool *pool) return false; if (pool->flags & POOL_BH) { - if (likely(pool->cpu == smp_processor_id())) { - if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) - raise_softirq_irqoff(HI_SOFTIRQ); - else - raise_softirq_irqoff(TASKLET_SOFTIRQ); - } else { - irq_work_queue_on(bh_pool_irq_work(pool), pool->cpu); - } - + kick_bh_pool(pool); return true; } From c7a40c49af920fbad2ab6795b6587308ad69de9f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:13 -1000 Subject: [PATCH 42/54] workqueue: Cosmetic changes Reorder some global declarations and adjust comments and whitespaces for clarity and consistency. No functional changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 6ae441e13804..b280caf81fb2 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -376,8 +376,6 @@ struct workqueue_struct { struct wq_node_nr_active *node_nr_active[]; /* I: per-node nr_active */ }; -static struct kmem_cache *pwq_cache; - /* * Each pod type describes how CPUs should be grouped for unbound workqueues. * See the comment above workqueue_attrs->affn_scope. @@ -389,20 +387,15 @@ struct wq_pod_type { int *cpu_pod; /* cpu -> pod */ }; -static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES]; -static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE; - static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { - [WQ_AFFN_DFL] = "default", - [WQ_AFFN_CPU] = "cpu", - [WQ_AFFN_SMT] = "smt", - [WQ_AFFN_CACHE] = "cache", - [WQ_AFFN_NUMA] = "numa", - [WQ_AFFN_SYSTEM] = "system", + [WQ_AFFN_DFL] = "default", + [WQ_AFFN_CPU] = "cpu", + [WQ_AFFN_SMT] = "smt", + [WQ_AFFN_CACHE] = "cache", + [WQ_AFFN_NUMA] = "numa", + [WQ_AFFN_SYSTEM] = "system", }; -static bool wq_topo_initialized __read_mostly = false; - /* * Per-cpu work items which run for longer than the following threshold are * automatically considered CPU intensive and excluded from concurrency @@ -418,6 +411,12 @@ static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); module_param_named(power_efficient, wq_power_efficient, bool, 0444); static bool wq_online; /* can kworkers be created yet? */ +static bool wq_topo_initialized __read_mostly = false; + +static struct kmem_cache *pwq_cache; + +static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES]; +static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE; /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */ static struct workqueue_attrs *wq_update_pod_attrs_buf; @@ -2231,7 +2230,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, */ lockdep_assert_irqs_disabled(); - /* * For a draining wq, only works from the same workqueue are * allowed. The __WQ_DESTROYING helps to spot the issue that @@ -4121,8 +4119,8 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) local_irq_restore(flags); /* - * This allows canceling during early boot. We know that @work - * isn't executing. + * Skip __flush_work() during early boot when we know that @work isn't + * executing. This allows canceling during early boot. */ if (wq_online) __flush_work(work, true); From d355001fa9370df8fdd6fca0e9ed77063615c7da Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:13 -1000 Subject: [PATCH 43/54] workqueue: Use rcu_read_lock_any_held() instead of rcu_read_lock_held() The different flavors of RCU read critical sections have been unified. Let's update the locking assertion macros accordingly to avoid requiring unnecessary explicit rcu_read_[un]lock() calls. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b280caf81fb2..87750e70b638 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -515,12 +515,12 @@ static void show_one_worker_pool(struct worker_pool *pool); #include #define assert_rcu_or_pool_mutex() \ - RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + RCU_LOCKDEP_WARN(!rcu_read_lock_any_held() && \ !lockdep_is_held(&wq_pool_mutex), \ "RCU or wq_pool_mutex should be held") #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ - RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + RCU_LOCKDEP_WARN(!rcu_read_lock_any_held() && \ !lockdep_is_held(&wq->mutex) && \ !lockdep_is_held(&wq_pool_mutex), \ "RCU, wq->mutex or wq_pool_mutex should be held") From c5140688d19a4579f7b01e6ca4b6e5f5d23d3d4d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:14 -1000 Subject: [PATCH 44/54] workqueue: Rename __cancel_work_timer() to __cancel_timer_sync() __cancel_work_timer() is used to implement cancel_work_sync() and cancel_delayed_work_sync(), similarly to how __cancel_work() is used to implement cancel_work() and cancel_delayed_work(). ie. The _timer part of the name is a complete misnomer. The difference from __cancel_work() is the fact that it syncs against work item execution not whether it handles timers or not. Let's rename it to less confusing __cancel_work_sync(). No functional change. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 87750e70b638..7e2af79bfa62 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4075,7 +4075,7 @@ static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *k return autoremove_wake_function(wait, mode, sync, key); } -static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) +static bool __cancel_work_sync(struct work_struct *work, bool is_dwork) { static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); unsigned long flags; @@ -4159,7 +4159,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) */ bool cancel_work_sync(struct work_struct *work) { - return __cancel_work_timer(work, false); + return __cancel_work_sync(work, false); } EXPORT_SYMBOL_GPL(cancel_work_sync); @@ -4264,7 +4264,7 @@ EXPORT_SYMBOL(cancel_delayed_work); */ bool cancel_delayed_work_sync(struct delayed_work *dwork) { - return __cancel_work_timer(&dwork->work, true); + return __cancel_work_sync(&dwork->work, true); } EXPORT_SYMBOL(cancel_delayed_work_sync); From cdc6e4b329bc82676886a758a940b2b6987c2109 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:14 -1000 Subject: [PATCH 45/54] workqueue: Reorganize flush and cancel[_sync] functions They are currently a bit disorganized with flush and cancel functions mixed. Reoranize them so that flush functions come first, cancel next and cancel_sync last. This way, we won't have to add prototypes for internal functions for the planned disable/enable support. This is pure code reorganization. No functional changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 136 ++++++++++++++++++++++----------------------- 1 file changed, 68 insertions(+), 68 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7e2af79bfa62..962061dca05c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4061,6 +4061,65 @@ bool flush_work(struct work_struct *work) } EXPORT_SYMBOL_GPL(flush_work); +/** + * flush_delayed_work - wait for a dwork to finish executing the last queueing + * @dwork: the delayed work to flush + * + * Delayed timer is cancelled and the pending work is queued for + * immediate execution. Like flush_work(), this function only + * considers the last queueing instance of @dwork. + * + * Return: + * %true if flush_work() waited for the work to finish execution, + * %false if it was already idle. + */ +bool flush_delayed_work(struct delayed_work *dwork) +{ + local_irq_disable(); + if (del_timer_sync(&dwork->timer)) + __queue_work(dwork->cpu, dwork->wq, &dwork->work); + local_irq_enable(); + return flush_work(&dwork->work); +} +EXPORT_SYMBOL(flush_delayed_work); + +/** + * flush_rcu_work - wait for a rwork to finish executing the last queueing + * @rwork: the rcu work to flush + * + * Return: + * %true if flush_rcu_work() waited for the work to finish execution, + * %false if it was already idle. + */ +bool flush_rcu_work(struct rcu_work *rwork) +{ + if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { + rcu_barrier(); + flush_work(&rwork->work); + return true; + } else { + return flush_work(&rwork->work); + } +} +EXPORT_SYMBOL(flush_rcu_work); + +static bool __cancel_work(struct work_struct *work, bool is_dwork) +{ + unsigned long flags; + int ret; + + do { + ret = try_to_grab_pending(work, is_dwork, &flags); + } while (unlikely(ret == -EAGAIN)); + + if (unlikely(ret < 0)) + return false; + + set_work_pool_and_clear_pending(work, get_work_pool_id(work)); + local_irq_restore(flags); + return ret; +} + struct cwt_wait { wait_queue_entry_t wait; struct work_struct *work; @@ -4139,6 +4198,15 @@ static bool __cancel_work_sync(struct work_struct *work, bool is_dwork) return ret; } +/* + * See cancel_delayed_work() + */ +bool cancel_work(struct work_struct *work) +{ + return __cancel_work(work, false); +} +EXPORT_SYMBOL(cancel_work); + /** * cancel_work_sync - cancel a work and wait for it to finish * @work: the work to cancel @@ -4163,74 +4231,6 @@ bool cancel_work_sync(struct work_struct *work) } EXPORT_SYMBOL_GPL(cancel_work_sync); -/** - * flush_delayed_work - wait for a dwork to finish executing the last queueing - * @dwork: the delayed work to flush - * - * Delayed timer is cancelled and the pending work is queued for - * immediate execution. Like flush_work(), this function only - * considers the last queueing instance of @dwork. - * - * Return: - * %true if flush_work() waited for the work to finish execution, - * %false if it was already idle. - */ -bool flush_delayed_work(struct delayed_work *dwork) -{ - local_irq_disable(); - if (del_timer_sync(&dwork->timer)) - __queue_work(dwork->cpu, dwork->wq, &dwork->work); - local_irq_enable(); - return flush_work(&dwork->work); -} -EXPORT_SYMBOL(flush_delayed_work); - -/** - * flush_rcu_work - wait for a rwork to finish executing the last queueing - * @rwork: the rcu work to flush - * - * Return: - * %true if flush_rcu_work() waited for the work to finish execution, - * %false if it was already idle. - */ -bool flush_rcu_work(struct rcu_work *rwork) -{ - if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { - rcu_barrier(); - flush_work(&rwork->work); - return true; - } else { - return flush_work(&rwork->work); - } -} -EXPORT_SYMBOL(flush_rcu_work); - -static bool __cancel_work(struct work_struct *work, bool is_dwork) -{ - unsigned long flags; - int ret; - - do { - ret = try_to_grab_pending(work, is_dwork, &flags); - } while (unlikely(ret == -EAGAIN)); - - if (unlikely(ret < 0)) - return false; - - set_work_pool_and_clear_pending(work, get_work_pool_id(work)); - local_irq_restore(flags); - return ret; -} - -/* - * See cancel_delayed_work() - */ -bool cancel_work(struct work_struct *work) -{ - return __cancel_work(work, false); -} -EXPORT_SYMBOL(cancel_work); - /** * cancel_delayed_work - cancel a delayed work * @dwork: delayed_work to cancel From c26e2f2e2fcfb73996fa025a0d3b5695017d65b5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:14 -1000 Subject: [PATCH 46/54] workqueue: Use variable name irq_flags for saving local irq flags Using the generic term `flags` for irq flags is conventional but can be confusing as there's quite a bit of code dealing with work flags which involves some subtleties. Let's use a more explicit name `irq_flags` for local irq flags. No functional changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 76 +++++++++++++++++++++++----------------------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 962061dca05c..b590d93d054b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2029,7 +2029,7 @@ out_put: * try_to_grab_pending - steal work item from worklist and disable irq * @work: work item to steal * @is_dwork: @work is a delayed_work - * @flags: place to store irq state + * @irq_flags: place to store irq state * * Try to grab PENDING bit of @work. This function can handle @work in any * stable state - idle, on timer or on worklist. @@ -2051,17 +2051,17 @@ out_put: * irqsafe, ensures that we return -EAGAIN for finite short period of time. * * On successful return, >= 0, irq is disabled and the caller is - * responsible for releasing it using local_irq_restore(*@flags). + * responsible for releasing it using local_irq_restore(*@irq_flags). * * This function is safe to call from any context including IRQ handler. */ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, - unsigned long *flags) + unsigned long *irq_flags) { struct worker_pool *pool; struct pool_workqueue *pwq; - local_irq_save(*flags); + local_irq_save(*irq_flags); /* try to steal the timer if it exists */ if (is_dwork) { @@ -2136,7 +2136,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, raw_spin_unlock(&pool->lock); fail: rcu_read_unlock(); - local_irq_restore(*flags); + local_irq_restore(*irq_flags); if (work_is_canceling(work)) return -ENOENT; cpu_relax(); @@ -2344,16 +2344,16 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) { bool ret = false; - unsigned long flags; + unsigned long irq_flags; - local_irq_save(flags); + local_irq_save(irq_flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_work(cpu, wq, work); ret = true; } - local_irq_restore(flags); + local_irq_restore(irq_flags); return ret; } EXPORT_SYMBOL(queue_work_on); @@ -2410,7 +2410,7 @@ static int select_numa_node_cpu(int node) bool queue_work_node(int node, struct workqueue_struct *wq, struct work_struct *work) { - unsigned long flags; + unsigned long irq_flags; bool ret = false; /* @@ -2424,7 +2424,7 @@ bool queue_work_node(int node, struct workqueue_struct *wq, */ WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)); - local_irq_save(flags); + local_irq_save(irq_flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { int cpu = select_numa_node_cpu(node); @@ -2433,7 +2433,7 @@ bool queue_work_node(int node, struct workqueue_struct *wq, ret = true; } - local_irq_restore(flags); + local_irq_restore(irq_flags); return ret; } EXPORT_SYMBOL_GPL(queue_work_node); @@ -2503,17 +2503,17 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, { struct work_struct *work = &dwork->work; bool ret = false; - unsigned long flags; + unsigned long irq_flags; /* read the comment in __queue_work() */ - local_irq_save(flags); + local_irq_save(irq_flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_delayed_work(cpu, wq, dwork, delay); ret = true; } - local_irq_restore(flags); + local_irq_restore(irq_flags); return ret; } EXPORT_SYMBOL(queue_delayed_work_on); @@ -2539,16 +2539,16 @@ EXPORT_SYMBOL(queue_delayed_work_on); bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { - unsigned long flags; + unsigned long irq_flags; int ret; do { - ret = try_to_grab_pending(&dwork->work, true, &flags); + ret = try_to_grab_pending(&dwork->work, true, &irq_flags); } while (unlikely(ret == -EAGAIN)); if (likely(ret >= 0)) { __queue_delayed_work(cpu, wq, dwork, delay); - local_irq_restore(flags); + local_irq_restore(irq_flags); } /* -ENOENT from try_to_grab_pending() becomes %true */ @@ -4105,18 +4105,18 @@ EXPORT_SYMBOL(flush_rcu_work); static bool __cancel_work(struct work_struct *work, bool is_dwork) { - unsigned long flags; + unsigned long irq_flags; int ret; do { - ret = try_to_grab_pending(work, is_dwork, &flags); + ret = try_to_grab_pending(work, is_dwork, &irq_flags); } while (unlikely(ret == -EAGAIN)); if (unlikely(ret < 0)) return false; set_work_pool_and_clear_pending(work, get_work_pool_id(work)); - local_irq_restore(flags); + local_irq_restore(irq_flags); return ret; } @@ -4137,11 +4137,11 @@ static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *k static bool __cancel_work_sync(struct work_struct *work, bool is_dwork) { static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); - unsigned long flags; + unsigned long irq_flags; int ret; do { - ret = try_to_grab_pending(work, is_dwork, &flags); + ret = try_to_grab_pending(work, is_dwork, &irq_flags); /* * If someone else is already canceling, wait for it to * finish. flush_work() doesn't work for PREEMPT_NONE @@ -4175,7 +4175,7 @@ static bool __cancel_work_sync(struct work_struct *work, bool is_dwork) /* tell other tasks trying to grab @work to back off */ mark_work_canceling(work); - local_irq_restore(flags); + local_irq_restore(irq_flags); /* * Skip __flush_work() during early boot when we know that @work isn't @@ -5381,15 +5381,15 @@ static void wq_adjust_max_active(struct workqueue_struct *wq) activated = false; for_each_pwq(pwq, wq) { - unsigned long flags; + unsigned long irq_flags; /* can be called during early boot w/ irq disabled */ - raw_spin_lock_irqsave(&pwq->pool->lock, flags); + raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags); if (pwq_activate_first_inactive(pwq, true)) { activated = true; kick_pool(pwq->pool); } - raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags); } } while (activated); } @@ -5762,7 +5762,7 @@ EXPORT_SYMBOL_GPL(workqueue_congested); unsigned int work_busy(struct work_struct *work) { struct worker_pool *pool; - unsigned long flags; + unsigned long irq_flags; unsigned int ret = 0; if (work_pending(work)) @@ -5771,10 +5771,10 @@ unsigned int work_busy(struct work_struct *work) rcu_read_lock(); pool = get_work_pool(work); if (pool) { - raw_spin_lock_irqsave(&pool->lock, flags); + raw_spin_lock_irqsave(&pool->lock, irq_flags); if (find_worker_executing_work(pool, work)) ret |= WORK_BUSY_RUNNING; - raw_spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pool->lock, irq_flags); } rcu_read_unlock(); @@ -6006,7 +6006,7 @@ void show_one_workqueue(struct workqueue_struct *wq) { struct pool_workqueue *pwq; bool idle = true; - unsigned long flags; + unsigned long irq_flags; for_each_pwq(pwq, wq) { if (!pwq_is_empty(pwq)) { @@ -6020,7 +6020,7 @@ void show_one_workqueue(struct workqueue_struct *wq) pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); for_each_pwq(pwq, wq) { - raw_spin_lock_irqsave(&pwq->pool->lock, flags); + raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags); if (!pwq_is_empty(pwq)) { /* * Defer printing to avoid deadlocks in console @@ -6031,7 +6031,7 @@ void show_one_workqueue(struct workqueue_struct *wq) show_pwq(pwq); printk_deferred_exit(); } - raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags); /* * We could be printing a lot from atomic context, e.g. * sysrq-t -> show_all_workqueues(). Avoid triggering @@ -6050,10 +6050,10 @@ static void show_one_worker_pool(struct worker_pool *pool) { struct worker *worker; bool first = true; - unsigned long flags; + unsigned long irq_flags; unsigned long hung = 0; - raw_spin_lock_irqsave(&pool->lock, flags); + raw_spin_lock_irqsave(&pool->lock, irq_flags); if (pool->nr_workers == pool->nr_idle) goto next_pool; @@ -6081,7 +6081,7 @@ static void show_one_worker_pool(struct worker_pool *pool) pr_cont("\n"); printk_deferred_exit(); next_pool: - raw_spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pool->lock, irq_flags); /* * We could be printing a lot from atomic context, e.g. * sysrq-t -> show_all_workqueues(). Avoid triggering @@ -7212,10 +7212,10 @@ static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; static void show_cpu_pool_hog(struct worker_pool *pool) { struct worker *worker; - unsigned long flags; + unsigned long irq_flags; int bkt; - raw_spin_lock_irqsave(&pool->lock, flags); + raw_spin_lock_irqsave(&pool->lock, irq_flags); hash_for_each(pool->busy_hash, bkt, worker, hentry) { if (task_is_running(worker->task)) { @@ -7233,7 +7233,7 @@ static void show_cpu_pool_hog(struct worker_pool *pool) } } - raw_spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pool->lock, irq_flags); } static void show_cpu_pools_hogs(void) From c5f5b9422a49e9bc1c2f992135592ed921ac18e5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:14 -1000 Subject: [PATCH 47/54] workqueue: Introduce work_cancel_flags The cancel path used bool @is_dwork to distinguish canceling a regular work and a delayed one. The planned disable/enable support will need passing around another flag in the code path. As passing them around with bools will be confusing, let's introduce named flags to pass around in the cancel path. WORK_CANCEL_DELAYED replaces @is_dwork. No functional changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b590d93d054b..317c85f051b0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -96,6 +96,10 @@ enum worker_flags { WORKER_UNBOUND | WORKER_REBOUND, }; +enum work_cancel_flags { + WORK_CANCEL_DELAYED = 1 << 0, /* canceling a delayed_work */ +}; + enum wq_internal_consts { NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ @@ -2028,7 +2032,7 @@ out_put: /** * try_to_grab_pending - steal work item from worklist and disable irq * @work: work item to steal - * @is_dwork: @work is a delayed_work + * @cflags: %WORK_CANCEL_ flags * @irq_flags: place to store irq state * * Try to grab PENDING bit of @work. This function can handle @work in any @@ -2055,7 +2059,7 @@ out_put: * * This function is safe to call from any context including IRQ handler. */ -static int try_to_grab_pending(struct work_struct *work, bool is_dwork, +static int try_to_grab_pending(struct work_struct *work, u32 cflags, unsigned long *irq_flags) { struct worker_pool *pool; @@ -2064,7 +2068,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, local_irq_save(*irq_flags); /* try to steal the timer if it exists */ - if (is_dwork) { + if (cflags & WORK_CANCEL_DELAYED) { struct delayed_work *dwork = to_delayed_work(work); /* @@ -2543,7 +2547,8 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, int ret; do { - ret = try_to_grab_pending(&dwork->work, true, &irq_flags); + ret = try_to_grab_pending(&dwork->work, WORK_CANCEL_DELAYED, + &irq_flags); } while (unlikely(ret == -EAGAIN)); if (likely(ret >= 0)) { @@ -4103,13 +4108,13 @@ bool flush_rcu_work(struct rcu_work *rwork) } EXPORT_SYMBOL(flush_rcu_work); -static bool __cancel_work(struct work_struct *work, bool is_dwork) +static bool __cancel_work(struct work_struct *work, u32 cflags) { unsigned long irq_flags; int ret; do { - ret = try_to_grab_pending(work, is_dwork, &irq_flags); + ret = try_to_grab_pending(work, cflags, &irq_flags); } while (unlikely(ret == -EAGAIN)); if (unlikely(ret < 0)) @@ -4134,14 +4139,14 @@ static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *k return autoremove_wake_function(wait, mode, sync, key); } -static bool __cancel_work_sync(struct work_struct *work, bool is_dwork) +static bool __cancel_work_sync(struct work_struct *work, u32 cflags) { static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); unsigned long irq_flags; int ret; do { - ret = try_to_grab_pending(work, is_dwork, &irq_flags); + ret = try_to_grab_pending(work, cflags, &irq_flags); /* * If someone else is already canceling, wait for it to * finish. flush_work() doesn't work for PREEMPT_NONE @@ -4203,7 +4208,7 @@ static bool __cancel_work_sync(struct work_struct *work, bool is_dwork) */ bool cancel_work(struct work_struct *work) { - return __cancel_work(work, false); + return __cancel_work(work, 0); } EXPORT_SYMBOL(cancel_work); @@ -4227,7 +4232,7 @@ EXPORT_SYMBOL(cancel_work); */ bool cancel_work_sync(struct work_struct *work) { - return __cancel_work_sync(work, false); + return __cancel_work_sync(work, 0); } EXPORT_SYMBOL_GPL(cancel_work_sync); @@ -4249,7 +4254,7 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); */ bool cancel_delayed_work(struct delayed_work *dwork) { - return __cancel_work(&dwork->work, true); + return __cancel_work(&dwork->work, WORK_CANCEL_DELAYED); } EXPORT_SYMBOL(cancel_delayed_work); @@ -4264,7 +4269,7 @@ EXPORT_SYMBOL(cancel_delayed_work); */ bool cancel_delayed_work_sync(struct delayed_work *dwork) { - return __cancel_work_sync(&dwork->work, true); + return __cancel_work_sync(&dwork->work, WORK_CANCEL_DELAYED); } EXPORT_SYMBOL(cancel_delayed_work_sync); From e9a8e01f9b133c145dd125021ec47c006d108af4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:14 -1000 Subject: [PATCH 48/54] workqueue: Clean up enum work_bits and related constants The bits of work->data are used for a few different purposes. How the bits are used is determined by enum work_bits. The planned disable/enable support will add another use, so let's clean it up a bit in preparation. - Let WORK_STRUCT_*_BIT's values be determined by enum definition order. - Deliminate different bit sections the same way using SHIFT and BITS values. - Rename __WORK_OFFQ_CANCELING to WORK_OFFQ_CANCELING_BIT for consistency. - Introduce WORK_STRUCT_PWQ_SHIFT and replace WORK_STRUCT_FLAG_MASK and WORK_STRUCT_WQ_DATA_MASK with WQ_STRUCT_PWQ_MASK for clarity. - Improve documentation. No functional changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- include/linux/workqueue.h | 60 +++++++++++++++++++++------------------ kernel/workqueue.c | 8 +++--- 2 files changed, 37 insertions(+), 31 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 1565bab9edc8..0ad534fe6673 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -24,41 +24,49 @@ enum work_bits { WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ - WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */ - WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ - WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ + WORK_STRUCT_INACTIVE_BIT, /* work item is inactive */ + WORK_STRUCT_PWQ_BIT, /* data points to pwq */ + WORK_STRUCT_LINKED_BIT, /* next work is linked to this one */ #ifdef CONFIG_DEBUG_OBJECTS_WORK - WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ - WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ -#else - WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ + WORK_STRUCT_STATIC_BIT, /* static initializer (debugobjects) */ #endif + WORK_STRUCT_FLAG_BITS, + /* color for workqueue flushing */ + WORK_STRUCT_COLOR_SHIFT = WORK_STRUCT_FLAG_BITS, WORK_STRUCT_COLOR_BITS = 4, /* - * Reserve 8 bits off of pwq pointer w/ debugobjects turned off. - * This makes pwqs aligned to 256 bytes and allows 16 workqueue - * flush colors. + * When WORK_STRUCT_PWQ is set, reserve 8 bits off of pwq pointer w/ + * debugobjects turned off. This makes pwqs aligned to 256 bytes (512 + * bytes w/ DEBUG_OBJECTS_WORK) and allows 16 workqueue flush colors. + * + * MSB + * [ pwq pointer ] [ flush color ] [ STRUCT flags ] + * 4 bits 4 or 5 bits */ - WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + - WORK_STRUCT_COLOR_BITS, - - /* data contains off-queue information when !WORK_STRUCT_PWQ */ - WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, - - __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, + WORK_STRUCT_PWQ_SHIFT = WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_COLOR_BITS, /* - * When a work item is off queue, its high bits point to the last - * pool it was on. Cap at 31 bits and use the highest number to - * indicate that no pool is associated. + * data contains off-queue information when !WORK_STRUCT_PWQ. + * + * MSB + * [ pool ID ] [ OFFQ flags ] [ STRUCT flags ] + * 1 bit 4 or 5 bits */ - WORK_OFFQ_FLAG_BITS = 1, - WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, + WORK_OFFQ_FLAG_SHIFT = WORK_STRUCT_FLAG_BITS, + WORK_OFFQ_CANCELING_BIT = WORK_OFFQ_FLAG_SHIFT, + WORK_OFFQ_FLAG_END, + WORK_OFFQ_FLAG_BITS = WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT, + + /* + * When a work item is off queue, the high bits encode off-queue flags + * and the last pool it was on. Cap pool ID to 31 bits and use the + * highest number to indicate that no pool is associated. + */ + WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS, WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, - }; enum work_flags { @@ -88,12 +96,10 @@ enum wq_misc_consts { }; /* Convenience constants - of type 'unsigned long', not 'enum'! */ -#define WORK_OFFQ_CANCELING (1ul << __WORK_OFFQ_CANCELING) +#define WORK_OFFQ_CANCELING (1ul << WORK_OFFQ_CANCELING_BIT) #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1) #define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT) - -#define WORK_STRUCT_FLAG_MASK ((1ul << WORK_STRUCT_FLAG_BITS) - 1) -#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) +#define WORK_STRUCT_PWQ_MASK (~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1)) #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) #define WORK_DATA_STATIC_INIT() \ diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 317c85f051b0..7c6915e23c5c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -247,7 +247,7 @@ enum pool_workqueue_stats { }; /* - * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS + * The per-pool workqueue. While queued, bits below WORK_PWQ_SHIFT * of work_struct->data are used for flags and the remaining high bits * point to the pwq; thus, pwqs need to be aligned at two's power of the * number of flag bits. @@ -294,7 +294,7 @@ struct pool_workqueue { */ struct kthread_work release_work; struct rcu_head rcu; -} __aligned(1 << WORK_STRUCT_FLAG_BITS); +} __aligned(1 << WORK_STRUCT_PWQ_SHIFT); /* * Structure used to wait for workqueue flush. @@ -843,7 +843,7 @@ static void clear_work_data(struct work_struct *work) static inline struct pool_workqueue *work_struct_pwq(unsigned long data) { - return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK); + return (struct pool_workqueue *)(data & WORK_STRUCT_PWQ_MASK); } static struct pool_workqueue *get_work_pwq(struct work_struct *work) @@ -4851,7 +4851,7 @@ static void pwq_release_workfn(struct kthread_work *work) static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, struct worker_pool *pool) { - BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); + BUG_ON((unsigned long)pwq & ~WORK_STRUCT_PWQ_MASK); memset(pwq, 0, sizeof(*pwq)); From 978b8409eab15aa733ae3a79c9b5158d34cd3fb7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:14 -1000 Subject: [PATCH 49/54] workqueue: Factor out work_grab_pending() from __cancel_work_sync() The planned disable/enable support will need the same logic. Let's factor it out. No functional changes. v2: Update function comment to include @irq_flags. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 132 +++++++++++++++++++++++++++------------------ 1 file changed, 80 insertions(+), 52 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7c6915e23c5c..3b606eb5d6e3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -484,6 +484,12 @@ static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; /* I: attributes used when instantiating ordered pools on demand */ static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; +/* + * Used to synchronize multiple cancel_sync attempts on the same work item. See + * work_grab_pending() and __cancel_work_sync(). + */ +static DECLARE_WAIT_QUEUE_HEAD(wq_cancel_waitq); + /* * I: kthread_worker to release pwq's. pwq release needs to be bounced to a * process context while holding a pool lock. Bounce to a dedicated kthread @@ -2147,6 +2153,75 @@ fail: return -EAGAIN; } +struct cwt_wait { + wait_queue_entry_t wait; + struct work_struct *work; +}; + +static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) +{ + struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); + + if (cwait->work != key) + return 0; + return autoremove_wake_function(wait, mode, sync, key); +} + +/** + * work_grab_pending - steal work item from worklist and disable irq + * @work: work item to steal + * @cflags: %WORK_CANCEL_ flags + * @irq_flags: place to store IRQ state + * + * Grab PENDING bit of @work. @work can be in any stable state - idle, on timer + * or on worklist. + * + * Must be called in process context. IRQ is disabled on return with IRQ state + * stored in *@irq_flags. The caller is responsible for re-enabling it using + * local_irq_restore(). + * + * Returns %true if @work was pending. %false if idle. + */ +static bool work_grab_pending(struct work_struct *work, u32 cflags, + unsigned long *irq_flags) +{ + struct cwt_wait cwait; + int ret; + + might_sleep(); +repeat: + ret = try_to_grab_pending(work, cflags, irq_flags); + if (likely(ret >= 0)) + return ret; + if (ret != -ENOENT) + goto repeat; + + /* + * Someone is already canceling. Wait for it to finish. flush_work() + * doesn't work for PREEMPT_NONE because we may get woken up between + * @work's completion and the other canceling task resuming and clearing + * CANCELING - flush_work() will return false immediately as @work is no + * longer busy, try_to_grab_pending() will return -ENOENT as @work is + * still being canceled and the other canceling task won't be able to + * clear CANCELING as we're hogging the CPU. + * + * Let's wait for completion using a waitqueue. As this may lead to the + * thundering herd problem, use a custom wake function which matches + * @work along with exclusive wait and wakeup. + */ + init_wait(&cwait.wait); + cwait.wait.func = cwt_wakefn; + cwait.work = work; + + prepare_to_wait_exclusive(&wq_cancel_waitq, &cwait.wait, + TASK_UNINTERRUPTIBLE); + if (work_is_canceling(work)) + schedule(); + finish_wait(&wq_cancel_waitq, &cwait.wait); + + goto repeat; +} + /** * insert_work - insert a work into a pool * @pwq: pwq @work belongs to @@ -4125,60 +4200,13 @@ static bool __cancel_work(struct work_struct *work, u32 cflags) return ret; } -struct cwt_wait { - wait_queue_entry_t wait; - struct work_struct *work; -}; - -static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) -{ - struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); - - if (cwait->work != key) - return 0; - return autoremove_wake_function(wait, mode, sync, key); -} - static bool __cancel_work_sync(struct work_struct *work, u32 cflags) { - static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); unsigned long irq_flags; - int ret; + bool ret; - do { - ret = try_to_grab_pending(work, cflags, &irq_flags); - /* - * If someone else is already canceling, wait for it to - * finish. flush_work() doesn't work for PREEMPT_NONE - * because we may get scheduled between @work's completion - * and the other canceling task resuming and clearing - * CANCELING - flush_work() will return false immediately - * as @work is no longer busy, try_to_grab_pending() will - * return -ENOENT as @work is still being canceled and the - * other canceling task won't be able to clear CANCELING as - * we're hogging the CPU. - * - * Let's wait for completion using a waitqueue. As this - * may lead to the thundering herd problem, use a custom - * wake function which matches @work along with exclusive - * wait and wakeup. - */ - if (unlikely(ret == -ENOENT)) { - struct cwt_wait cwait; - - init_wait(&cwait.wait); - cwait.wait.func = cwt_wakefn; - cwait.work = work; - - prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, - TASK_UNINTERRUPTIBLE); - if (work_is_canceling(work)) - schedule(); - finish_wait(&cancel_waitq, &cwait.wait); - } - } while (unlikely(ret < 0)); - - /* tell other tasks trying to grab @work to back off */ + /* claim @work and tell other tasks trying to grab @work to back off */ + ret = work_grab_pending(work, cflags, &irq_flags); mark_work_canceling(work); local_irq_restore(irq_flags); @@ -4197,8 +4225,8 @@ static bool __cancel_work_sync(struct work_struct *work, u32 cflags) * visible there. */ smp_mb(); - if (waitqueue_active(&cancel_waitq)) - __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); + if (waitqueue_active(&wq_cancel_waitq)) + __wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work); return ret; } From afe928c1dc611bec155d834020e0631e026aeb8a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:14 -1000 Subject: [PATCH 50/54] workqueue: Remove clear_work_data() clear_work_data() is only used in one place and immediately followed by smp_mb(), making it equivalent to set_work_pool_and_clear_pending() w/ WORK_OFFQ_POOL_NONE for @pool_id. Drop it. No functional changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 3b606eb5d6e3..4f9c85f7c57a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -763,10 +763,9 @@ static int work_next_color(int color) * contain the pointer to the queued pwq. Once execution starts, the flag * is cleared and the high bits contain OFFQ flags and pool ID. * - * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() - * and clear_work_data() can be used to set the pwq, pool or clear - * work->data. These functions should only be called while the work is - * owned - ie. while the PENDING bit is set. + * set_work_pwq(), set_work_pool_and_clear_pending() and mark_work_canceling() + * can be used to set the pwq, pool or clear work->data. These functions should + * only be called while the work is owned - ie. while the PENDING bit is set. * * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq * corresponding to a work. Pool is available once the work has been @@ -841,12 +840,6 @@ static void set_work_pool_and_clear_pending(struct work_struct *work, smp_mb(); } -static void clear_work_data(struct work_struct *work) -{ - smp_wmb(); /* see set_work_pool_and_clear_pending() */ - set_work_data(work, WORK_STRUCT_NO_POOL, 0); -} - static inline struct pool_workqueue *work_struct_pwq(unsigned long data) { return (struct pool_workqueue *)(data & WORK_STRUCT_PWQ_MASK); @@ -4217,14 +4210,13 @@ static bool __cancel_work_sync(struct work_struct *work, u32 cflags) if (wq_online) __flush_work(work, true); - clear_work_data(work); - /* - * Paired with prepare_to_wait() above so that either - * waitqueue_active() is visible here or !work_is_canceling() is - * visible there. + * smp_mb() at the end of set_work_pool_and_clear_pending() is paired + * with prepare_to_wait() above so that either waitqueue_active() is + * visible here or !work_is_canceling() is visible there. */ - smp_mb(); + set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE); + if (waitqueue_active(&wq_cancel_waitq)) __wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work); From bccdc1faafaf32e00d6e4dddca1ded64e3272189 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:15 -1000 Subject: [PATCH 51/54] workqueue: Make @flags handling consistent across set_work_data() and friends - set_work_data() takes a separate @flags argument but just ORs it to @data. This is more confusing than helpful. Just take @data. - Use the name @flags consistently and add the parameter to set_work_pool_and_{keep|clear}_pending(). This will be used by the planned disable/enable support. No functional changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4f9c85f7c57a..65a27be81452 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -777,29 +777,28 @@ static int work_next_color(int color) * but stay off timer and worklist for arbitrarily long and nobody should * try to steal the PENDING bit. */ -static inline void set_work_data(struct work_struct *work, unsigned long data, - unsigned long flags) +static inline void set_work_data(struct work_struct *work, unsigned long data) { WARN_ON_ONCE(!work_pending(work)); - atomic_long_set(&work->data, data | flags | work_static(work)); + atomic_long_set(&work->data, data | work_static(work)); } static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, - unsigned long extra_flags) + unsigned long flags) { - set_work_data(work, (unsigned long)pwq, - WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); + set_work_data(work, (unsigned long)pwq | WORK_STRUCT_PENDING | + WORK_STRUCT_PWQ | flags); } static void set_work_pool_and_keep_pending(struct work_struct *work, - int pool_id) + int pool_id, unsigned long flags) { - set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, - WORK_STRUCT_PENDING); + set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) | + WORK_STRUCT_PENDING | flags); } static void set_work_pool_and_clear_pending(struct work_struct *work, - int pool_id) + int pool_id, unsigned long flags) { /* * The following wmb is paired with the implied mb in @@ -808,7 +807,8 @@ static void set_work_pool_and_clear_pending(struct work_struct *work, * owner. */ smp_wmb(); - set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); + set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) | + flags); /* * The following mb guarantees that previous clear of a PENDING bit * will not be reordered with any speculative LOADS or STORES from @@ -909,7 +909,7 @@ static void mark_work_canceling(struct work_struct *work) unsigned long pool_id = get_work_pool_id(work); pool_id <<= WORK_OFFQ_POOL_SHIFT; - set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); + set_work_data(work, pool_id | WORK_STRUCT_PENDING | WORK_OFFQ_CANCELING); } static bool work_is_canceling(struct work_struct *work) @@ -2127,7 +2127,7 @@ static int try_to_grab_pending(struct work_struct *work, u32 cflags, * this destroys work->data needed by the next step, stash it. */ work_data = *work_data_bits(work); - set_work_pool_and_keep_pending(work, pool->id); + set_work_pool_and_keep_pending(work, pool->id, 0); /* must be the last step, see the function comment */ pwq_dec_nr_in_flight(pwq, work_data); @@ -3205,7 +3205,7 @@ __acquires(&pool->lock) * PENDING and queued state changes happen together while IRQ is * disabled. */ - set_work_pool_and_clear_pending(work, pool->id); + set_work_pool_and_clear_pending(work, pool->id, 0); pwq->stats[PWQ_STAT_STARTED]++; raw_spin_unlock_irq(&pool->lock); @@ -4188,7 +4188,7 @@ static bool __cancel_work(struct work_struct *work, u32 cflags) if (unlikely(ret < 0)) return false; - set_work_pool_and_clear_pending(work, get_work_pool_id(work)); + set_work_pool_and_clear_pending(work, get_work_pool_id(work), 0); local_irq_restore(irq_flags); return ret; } @@ -4215,7 +4215,7 @@ static bool __cancel_work_sync(struct work_struct *work, u32 cflags) * with prepare_to_wait() above so that either waitqueue_active() is * visible here or !work_is_canceling() is visible there. */ - set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE); + set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE, 0); if (waitqueue_active(&wq_cancel_waitq)) __wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work); From ccdec92198df0c91f45a68f971771b6b0c1ba02d Mon Sep 17 00:00:00 2001 From: Xuewen Yan Date: Thu, 22 Feb 2024 15:28:08 +0800 Subject: [PATCH 52/54] workqueue: Control intensive warning threshold through cmdline When CONFIG_WQ_CPU_INTENSIVE_REPORT is set, the kernel will report the work functions which violate the intensive_threshold_us repeatedly. And now, only when the violate times exceed 4 and is a power of 2, the kernel warning could be triggered. However, sometimes, even if a long work execution time occurs only once, it may cause other work to be delayed for a long time. This may also cause some problems sometimes. In order to freely control the threshold of warninging, a boot argument is added so that the user can control the warning threshold to be printed. At the same time, keep the exponential backoff to prevent reporting too much. By default, the warning threshold is 4. tj: Updated kernel-parameters.txt description. Signed-off-by: Xuewen Yan Signed-off-by: Tejun Heo --- Documentation/admin-guide/kernel-parameters.txt | 9 +++++++++ kernel/workqueue.c | 14 +++++++++++--- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 6ee0f9a5da70..c0e9abbf1d2f 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -7219,6 +7219,15 @@ threshold repeatedly. They are likely good candidates for using WQ_UNBOUND workqueues instead. + workqueue.cpu_intensive_warning_thresh= + If CONFIG_WQ_CPU_INTENSIVE_REPORT is set, the kernel + will report the work functions which violate the + intensive_threshold_us repeatedly. In order to prevent + spurious warnings, start printing only after a work + function has violated this threshold number of times. + + The default is 4 times. 0 disables the warning. + workqueue.power_efficient Per-cpu workqueues are generally preferred because they show better performance thanks to cache diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 65a27be81452..38783e3a60bb 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -409,6 +409,10 @@ static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { */ static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX; module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644); +#ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT +static unsigned int wq_cpu_intensive_warning_thresh = 4; +module_param_named(cpu_intensive_warning_thresh, wq_cpu_intensive_warning_thresh, uint, 0644); +#endif /* see the comment above the definition of WQ_POWER_EFFICIENT */ static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); @@ -1327,11 +1331,13 @@ restart: u64 cnt; /* - * Start reporting from the fourth time and back off + * Start reporting from the warning_thresh and back off * exponentially. */ cnt = atomic64_inc_return_relaxed(&ent->cnt); - if (cnt >= 4 && is_power_of_2(cnt)) + if (wq_cpu_intensive_warning_thresh && + cnt >= wq_cpu_intensive_warning_thresh && + is_power_of_2(cnt + 1 - wq_cpu_intensive_warning_thresh)) printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n", ent->func, wq_cpu_intensive_thresh_us, atomic64_read(&ent->cnt)); @@ -1360,10 +1366,12 @@ restart: ent = &wci_ents[wci_nr_ents++]; ent->func = func; - atomic64_set(&ent->cnt, 1); + atomic64_set(&ent->cnt, 0); hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func); raw_spin_unlock(&wci_lock); + + goto restart; } #else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ From 60b2ebf48526567b53e0188dbd1a4df8e646bcc1 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Tue, 27 Feb 2024 19:10:37 +0000 Subject: [PATCH 53/54] workqueue: Introduce from_work() helper for cleaner callback declarations To streamline the transition from tasklets to worqueues, a new helper function, from_work(), is introduced. This helper, inspired by existing from_() patterns, utilizes container_of() and eliminates the redundancy of declaring variable types, leading to more concise and readable code. The modified code snippet demonstrates the enhanced clarity achieved with from_wq(): void callback(struct work_struct *w) { - struct some_data_structure *local = container_of(w, struct some_data_structure, work); + struct some_data_structure *local = from_work(local, w, work); This change aims to facilitate a smoother transition and uphold code quality standards. Based on: git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git disable_work-v3 Signed-off-by: Allen Pais Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 0ad534fe6673..64a60b9232d3 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -522,6 +522,9 @@ alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...); #define create_singlethread_workqueue(name) \ alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name) +#define from_work(var, callback_work, work_fieldname) \ + container_of(callback_work, typeof(*var), work_fieldname) + extern void destroy_workqueue(struct workqueue_struct *wq); struct workqueue_attrs *alloc_workqueue_attrs(void); From 1acd92d95fa24edca8f0292b21870025da93e24f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 26 Feb 2024 15:38:55 -1000 Subject: [PATCH 54/54] workqueue: Drain BH work items on hot-unplugged CPUs Boqun pointed out that workqueues aren't handling BH work items on offlined CPUs. Unlike tasklet which transfers out the pending tasks from CPUHP_SOFTIRQ_DEAD, BH workqueue would just leave them pending which is problematic. Note that this behavior is specific to BH workqueues as the non-BH per-CPU workers just become unbound when the CPU goes offline. This patch fixes the issue by draining the pending BH work items from an offlined CPU from CPUHP_SOFTIRQ_DEAD. Because work items carry more context, it's not as easy to transfer the pending work items from one pool to another. Instead, run BH work items which execute the offlined pools on an online CPU. Note that this assumes that no further BH work items will be queued on the offlined CPUs. This assumption is shared with tasklet and should be fine for conversions. However, this issue also exists for per-CPU workqueues which will just keep executing work items queued after CPU offline on unbound workers and workqueue should reject per-CPU and BH work items queued on offline CPUs. This will be addressed separately later. Signed-off-by: Tejun Heo Reported-and-reviewed-by: Boqun Feng Link: http://lkml.kernel.org/r/Zdvw0HdSXcU3JZ4g@boqun-archlinux --- include/linux/workqueue.h | 1 + kernel/softirq.c | 2 + kernel/workqueue.c | 91 +++++++++++++++++++++++++++++++++++++-- 3 files changed, 91 insertions(+), 3 deletions(-) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 64a60b9232d3..158784dd189a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -458,6 +458,7 @@ extern struct workqueue_struct *system_bh_wq; extern struct workqueue_struct *system_bh_highpri_wq; void workqueue_softirq_action(bool highpri); +void workqueue_softirq_dead(unsigned int cpu); /** * alloc_workqueue - allocate a workqueue diff --git a/kernel/softirq.c b/kernel/softirq.c index 547d282548a8..b315b21fb28c 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -932,6 +932,8 @@ static void run_ksoftirqd(unsigned int cpu) #ifdef CONFIG_HOTPLUG_CPU static int takeover_tasklets(unsigned int cpu) { + workqueue_softirq_dead(cpu); + /* CPU is dead, so no lock needed. */ local_irq_disable(); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 38783e3a60bb..a60eb65955e7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -81,6 +81,7 @@ enum worker_pool_flags { POOL_BH = 1 << 0, /* is a BH pool */ POOL_MANAGER_ACTIVE = 1 << 1, /* being managed */ POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ + POOL_BH_DRAINING = 1 << 3, /* draining after CPU offline */ }; enum worker_flags { @@ -1218,7 +1219,9 @@ static struct irq_work *bh_pool_irq_work(struct worker_pool *pool) static void kick_bh_pool(struct worker_pool *pool) { #ifdef CONFIG_SMP - if (unlikely(pool->cpu != smp_processor_id())) { + /* see drain_dead_softirq_workfn() for BH_DRAINING */ + if (unlikely(pool->cpu != smp_processor_id() && + !(pool->flags & POOL_BH_DRAINING))) { irq_work_queue_on(bh_pool_irq_work(pool), pool->cpu); return; } @@ -3155,6 +3158,7 @@ __acquires(&pool->lock) struct worker_pool *pool = worker->pool; unsigned long work_data; int lockdep_start_depth, rcu_start_depth; + bool bh_draining = pool->flags & POOL_BH_DRAINING; #ifdef CONFIG_LOCKDEP /* * It is permissible to free the struct work_struct from @@ -3220,7 +3224,9 @@ __acquires(&pool->lock) rcu_start_depth = rcu_preempt_depth(); lockdep_start_depth = lockdep_depth(current); - lock_map_acquire(&pwq->wq->lockdep_map); + /* see drain_dead_softirq_workfn() */ + if (!bh_draining) + lock_map_acquire(&pwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); /* * Strictly speaking we should mark the invariant state without holding @@ -3253,7 +3259,8 @@ __acquires(&pool->lock) trace_workqueue_execute_end(work, worker->current_func); pwq->stats[PWQ_STAT_COMPLETED]++; lock_map_release(&lockdep_map); - lock_map_release(&pwq->wq->lockdep_map); + if (!bh_draining) + lock_map_release(&pwq->wq->lockdep_map); if (unlikely((worker->task && in_atomic()) || lockdep_depth(current) != lockdep_start_depth || @@ -3615,6 +3622,84 @@ void workqueue_softirq_action(bool highpri) bh_worker(list_first_entry(&pool->workers, struct worker, node)); } +struct wq_drain_dead_softirq_work { + struct work_struct work; + struct worker_pool *pool; + struct completion done; +}; + +static void drain_dead_softirq_workfn(struct work_struct *work) +{ + struct wq_drain_dead_softirq_work *dead_work = + container_of(work, struct wq_drain_dead_softirq_work, work); + struct worker_pool *pool = dead_work->pool; + bool repeat; + + /* + * @pool's CPU is dead and we want to execute its still pending work + * items from this BH work item which is running on a different CPU. As + * its CPU is dead, @pool can't be kicked and, as work execution path + * will be nested, a lockdep annotation needs to be suppressed. Mark + * @pool with %POOL_BH_DRAINING for the special treatments. + */ + raw_spin_lock_irq(&pool->lock); + pool->flags |= POOL_BH_DRAINING; + raw_spin_unlock_irq(&pool->lock); + + bh_worker(list_first_entry(&pool->workers, struct worker, node)); + + raw_spin_lock_irq(&pool->lock); + pool->flags &= ~POOL_BH_DRAINING; + repeat = need_more_worker(pool); + raw_spin_unlock_irq(&pool->lock); + + /* + * bh_worker() might hit consecutive execution limit and bail. If there + * still are pending work items, reschedule self and return so that we + * don't hog this CPU's BH. + */ + if (repeat) { + if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) + queue_work(system_bh_highpri_wq, work); + else + queue_work(system_bh_wq, work); + } else { + complete(&dead_work->done); + } +} + +/* + * @cpu is dead. Drain the remaining BH work items on the current CPU. It's + * possible to allocate dead_work per CPU and avoid flushing. However, then we + * have to worry about draining overlapping with CPU coming back online or + * nesting (one CPU's dead_work queued on another CPU which is also dead and so + * on). Let's keep it simple and drain them synchronously. These are BH work + * items which shouldn't be requeued on the same pool. Shouldn't take long. + */ +void workqueue_softirq_dead(unsigned int cpu) +{ + int i; + + for (i = 0; i < NR_STD_WORKER_POOLS; i++) { + struct worker_pool *pool = &per_cpu(bh_worker_pools, cpu)[i]; + struct wq_drain_dead_softirq_work dead_work; + + if (!need_more_worker(pool)) + continue; + + INIT_WORK(&dead_work.work, drain_dead_softirq_workfn); + dead_work.pool = pool; + init_completion(&dead_work.done); + + if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) + queue_work(system_bh_highpri_wq, &dead_work.work); + else + queue_work(system_bh_wq, &dead_work.work); + + wait_for_completion(&dead_work.done); + } +} + /** * check_flush_dependency - check for flush dependency sanity * @target_wq: workqueue being flushed