workqueue: Introduce work_cancel_flags

The cancel path used bool @is_dwork to distinguish canceling a regular work
and a delayed one. The planned disable/enable support will need passing
around another flag in the code path. As passing them around with bools will
be confusing, let's introduce named flags to pass around in the cancel path.

WORK_CANCEL_DELAYED replaces @is_dwork. No functional changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com>
This commit is contained in:
Tejun Heo 2024-02-20 19:36:14 -10:00
parent c26e2f2e2f
commit c5f5b9422a
1 changed files with 17 additions and 12 deletions

View File

@ -96,6 +96,10 @@ enum worker_flags {
WORKER_UNBOUND | WORKER_REBOUND,
};
enum work_cancel_flags {
WORK_CANCEL_DELAYED = 1 << 0, /* canceling a delayed_work */
};
enum wq_internal_consts {
NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
@ -2028,7 +2032,7 @@ out_put:
/**
* try_to_grab_pending - steal work item from worklist and disable irq
* @work: work item to steal
* @is_dwork: @work is a delayed_work
* @cflags: %WORK_CANCEL_ flags
* @irq_flags: place to store irq state
*
* Try to grab PENDING bit of @work. This function can handle @work in any
@ -2055,7 +2059,7 @@ out_put:
*
* This function is safe to call from any context including IRQ handler.
*/
static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
static int try_to_grab_pending(struct work_struct *work, u32 cflags,
unsigned long *irq_flags)
{
struct worker_pool *pool;
@ -2064,7 +2068,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
local_irq_save(*irq_flags);
/* try to steal the timer if it exists */
if (is_dwork) {
if (cflags & WORK_CANCEL_DELAYED) {
struct delayed_work *dwork = to_delayed_work(work);
/*
@ -2543,7 +2547,8 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
int ret;
do {
ret = try_to_grab_pending(&dwork->work, true, &irq_flags);
ret = try_to_grab_pending(&dwork->work, WORK_CANCEL_DELAYED,
&irq_flags);
} while (unlikely(ret == -EAGAIN));
if (likely(ret >= 0)) {
@ -4103,13 +4108,13 @@ bool flush_rcu_work(struct rcu_work *rwork)
}
EXPORT_SYMBOL(flush_rcu_work);
static bool __cancel_work(struct work_struct *work, bool is_dwork)
static bool __cancel_work(struct work_struct *work, u32 cflags)
{
unsigned long irq_flags;
int ret;
do {
ret = try_to_grab_pending(work, is_dwork, &irq_flags);
ret = try_to_grab_pending(work, cflags, &irq_flags);
} while (unlikely(ret == -EAGAIN));
if (unlikely(ret < 0))
@ -4134,14 +4139,14 @@ static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *k
return autoremove_wake_function(wait, mode, sync, key);
}
static bool __cancel_work_sync(struct work_struct *work, bool is_dwork)
static bool __cancel_work_sync(struct work_struct *work, u32 cflags)
{
static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
unsigned long irq_flags;
int ret;
do {
ret = try_to_grab_pending(work, is_dwork, &irq_flags);
ret = try_to_grab_pending(work, cflags, &irq_flags);
/*
* If someone else is already canceling, wait for it to
* finish. flush_work() doesn't work for PREEMPT_NONE
@ -4203,7 +4208,7 @@ static bool __cancel_work_sync(struct work_struct *work, bool is_dwork)
*/
bool cancel_work(struct work_struct *work)
{
return __cancel_work(work, false);
return __cancel_work(work, 0);
}
EXPORT_SYMBOL(cancel_work);
@ -4227,7 +4232,7 @@ EXPORT_SYMBOL(cancel_work);
*/
bool cancel_work_sync(struct work_struct *work)
{
return __cancel_work_sync(work, false);
return __cancel_work_sync(work, 0);
}
EXPORT_SYMBOL_GPL(cancel_work_sync);
@ -4249,7 +4254,7 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
*/
bool cancel_delayed_work(struct delayed_work *dwork)
{
return __cancel_work(&dwork->work, true);
return __cancel_work(&dwork->work, WORK_CANCEL_DELAYED);
}
EXPORT_SYMBOL(cancel_delayed_work);
@ -4264,7 +4269,7 @@ EXPORT_SYMBOL(cancel_delayed_work);
*/
bool cancel_delayed_work_sync(struct delayed_work *dwork)
{
return __cancel_work_sync(&dwork->work, true);
return __cancel_work_sync(&dwork->work, WORK_CANCEL_DELAYED);
}
EXPORT_SYMBOL(cancel_delayed_work_sync);