Merge branch 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

* 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: (33 commits)
  usb: don't use flush_scheduled_work()
  speedtch: don't abuse struct delayed_work
  media/video: don't use flush_scheduled_work()
  media/video: explicitly flush request_module work
  ioc4: use static work_struct for ioc4_load_modules()
  init: don't call flush_scheduled_work() from do_initcalls()
  s390: don't use flush_scheduled_work()
  rtc: don't use flush_scheduled_work()
  mmc: update workqueue usages
  mfd: update workqueue usages
  dvb: don't use flush_scheduled_work()
  leds-wm8350: don't use flush_scheduled_work()
  mISDN: don't use flush_scheduled_work()
  macintosh/ams: don't use flush_scheduled_work()
  vmwgfx: don't use flush_scheduled_work()
  tpm: don't use flush_scheduled_work()
  sonypi: don't use flush_scheduled_work()
  hvsi: don't use flush_scheduled_work()
  xen: don't use flush_scheduled_work()
  gdrom: don't use flush_scheduled_work()
  ...

Fixed up trivial conflict in drivers/media/video/bt8xx/bttv-input.c
as per Tejun.
This commit is contained in:
Linus Torvalds 2011-01-07 16:58:04 -08:00
commit 23d69b09b7
80 changed files with 289 additions and 156 deletions

View File

@ -566,3 +566,13 @@ Why: This field is deprecated. I2C device drivers shouldn't change their
Who: Jean Delvare <khali@linux-fr.org>
----------------------------
What: cancel_rearming_delayed_work[queue]()
When: 2.6.39
Why: The functions have been superceded by cancel_delayed_work_sync()
quite some time ago. The conversion is trivial and there is no
in-kernel user left.
Who: Tejun Heo <tj@kernel.org>
----------------------------

View File

@ -579,7 +579,8 @@ static int sharpsl_ac_check(void)
static int sharpsl_pm_suspend(struct platform_device *pdev, pm_message_t state)
{
sharpsl_pm.flags |= SHARPSL_SUSPENDED;
flush_scheduled_work();
flush_delayed_work_sync(&toggle_charger);
flush_delayed_work_sync(&sharpsl_bat);
if (sharpsl_pm.charge_mode == CHRG_ON)
sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG;

View File

@ -107,7 +107,7 @@ static int switch_drv_remove(struct platform_device *pdev)
device_remove_file(&pdev->dev, &dev_attr_switch);
platform_set_drvdata(pdev, NULL);
flush_scheduled_work();
flush_work_sync(&psw->work);
del_timer_sync(&psw->debounce);
free_irq(irq, pdev);

View File

@ -6128,7 +6128,7 @@ static void ata_port_detach(struct ata_port *ap)
/* it better be dead now */
WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
cancel_rearming_delayed_work(&ap->hotplug_task);
cancel_delayed_work_sync(&ap->hotplug_task);
skip_eh:
if (ap->pmp_link) {

View File

@ -1320,7 +1320,7 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
{
DPRINTK("ENTER\n");
cancel_rearming_delayed_work(&ap->sff_pio_task);
cancel_delayed_work_sync(&ap->sff_pio_task);
ap->hsm_task_state = HSM_ST_IDLE;
if (ata_msg_ctl(ap))

View File

@ -4352,7 +4352,7 @@ static int __init floppy_init(void)
out_unreg_platform_dev:
platform_device_unregister(&floppy_device[drive]);
out_flush_work:
flush_scheduled_work();
flush_work_sync(&floppy_work);
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
out_unreg_region:
@ -4422,7 +4422,7 @@ static int floppy_grab_irq_and_dma(void)
* We might have scheduled a free_irq(), wait it to
* drain first:
*/
flush_scheduled_work();
flush_work_sync(&floppy_work);
if (fd_request_irq()) {
DPRINT("Unable to grab IRQ%d for the floppy driver\n",

View File

@ -547,7 +547,7 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
spin_unlock_irqrestore(&blkif_io_lock, flags);
/* Flush gnttab callback work. Must be done with no locks held. */
flush_scheduled_work();
flush_work_sync(&info->work);
del_gendisk(info->gd);
@ -596,7 +596,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
spin_unlock_irq(&blkif_io_lock);
/* Flush gnttab callback work. Must be done with no locks held. */
flush_scheduled_work();
flush_work_sync(&info->work);
/* Free resources associated with old device channel. */
if (info->ring_ref != GRANT_INVALID_REF) {

View File

@ -837,7 +837,7 @@ probe_fail_no_mem:
static int __devexit remove_gdrom(struct platform_device *devptr)
{
flush_scheduled_work();
flush_work_sync(&work);
blk_cleanup_queue(gd.gdrom_rq);
free_irq(HW_EVENT_GDROM_CMD, &gd);
free_irq(HW_EVENT_GDROM_DMA, &gd);

View File

@ -850,8 +850,8 @@ static void hvsi_flush_output(struct hvsi_struct *hp)
wait_event_timeout(hp->emptyq, (hp->n_outbuf <= 0), HVSI_TIMEOUT);
/* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */
cancel_delayed_work(&hp->writer);
flush_scheduled_work();
cancel_delayed_work_sync(&hp->writer);
flush_work_sync(&hp->handshaker);
/*
* it's also possible that our timeout expired and hvsi_write_worker

View File

@ -1729,7 +1729,7 @@ void ipwireless_hardware_free(struct ipw_hardware *hw)
ipwireless_stop_interrupts(hw);
flush_scheduled_work();
flush_work_sync(&hw->work_rx);
for (i = 0; i < NL_NUM_OF_ADDRESSES; i++)
if (hw->packet_assembler[i] != NULL)

View File

@ -430,7 +430,8 @@ void ipwireless_network_free(struct ipw_network *network)
network->shutting_down = 1;
ipwireless_ppp_close(network);
flush_scheduled_work();
flush_work_sync(&network->work_go_online);
flush_work_sync(&network->work_go_offline);
ipwireless_stop_interrupts(network->hardware);
ipwireless_associate_network(network->hardware, NULL);

View File

@ -577,7 +577,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
mutex_unlock(&ttyj->ipw_tty_mutex);
tty_hangup(ttyj->linux_tty);
/* Wait till the tty_hangup has completed */
flush_scheduled_work();
flush_work_sync(&ttyj->linux_tty->hangup_work);
/* FIXME: Exactly how is the tty object locked here
against a parallel ioctl etc */
mutex_lock(&ttyj->ipw_tty_mutex);

View File

@ -1434,7 +1434,7 @@ static int __devexit sonypi_remove(struct platform_device *dev)
sonypi_disable();
synchronize_irq(sonypi_device.irq);
flush_scheduled_work();
flush_work_sync(&sonypi_device.input_work);
if (useinput) {
input_unregister_device(sonypi_device.input_key_dev);

View File

@ -986,7 +986,7 @@ int tpm_release(struct inode *inode, struct file *file)
struct tpm_chip *chip = file->private_data;
del_singleshot_timer_sync(&chip->user_read_timer);
flush_scheduled_work();
flush_work_sync(&chip->work);
file->private_data = NULL;
atomic_set(&chip->data_pending, 0);
kfree(chip->data_buffer);
@ -1038,7 +1038,7 @@ ssize_t tpm_read(struct file *file, char __user *buf,
ssize_t ret_size;
del_singleshot_timer_sync(&chip->user_read_timer);
flush_scheduled_work();
flush_work_sync(&chip->work);
ret_size = atomic_read(&chip->data_pending);
atomic_set(&chip->data_pending, 0);
if (ret_size > 0) { /* relay data */

View File

@ -1472,8 +1472,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
list_del(&bdev->device_list);
mutex_unlock(&glob->device_list_mutex);
if (!cancel_delayed_work(&bdev->wq))
flush_scheduled_work();
cancel_delayed_work_sync(&bdev->wq);
while (ttm_bo_delayed_delete(bdev, true))
;

View File

@ -659,7 +659,7 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
par->dirty.active = false;
spin_unlock_irqrestore(&par->dirty.lock, flags);
flush_scheduled_work();
flush_delayed_work_sync(&info->deferred_work);
par->bo_ptr = NULL;
ttm_bo_kunmap(&par->map);

View File

@ -2297,6 +2297,7 @@ static int __init capidrv_init(void)
errcode = capi20_get_profile(0, &profile);
if (errcode != CAPI_NOERROR) {
unregister_capictr_notifier(&capictr_nb);
capi20_release(&global.ap);
return -EIO;
}

View File

@ -38,6 +38,7 @@
#include <linux/rcupdate.h>
static int showcapimsgs = 0;
static struct workqueue_struct *kcapi_wq;
MODULE_DESCRIPTION("CAPI4Linux: kernel CAPI layer");
MODULE_AUTHOR("Carsten Paeth");
@ -291,7 +292,7 @@ static int notify_push(unsigned int event_type, u32 controller)
event->type = event_type;
event->controller = controller;
schedule_work(&event->work);
queue_work(kcapi_wq, &event->work);
return 0;
}
@ -408,7 +409,7 @@ void capi_ctr_handle_message(struct capi_ctr *ctr, u16 appl,
goto error;
}
skb_queue_tail(&ap->recv_queue, skb);
schedule_work(&ap->recv_work);
queue_work(kcapi_wq, &ap->recv_work);
rcu_read_unlock();
return;
@ -743,7 +744,7 @@ u16 capi20_release(struct capi20_appl *ap)
mutex_unlock(&capi_controller_lock);
flush_scheduled_work();
flush_workqueue(kcapi_wq);
skb_queue_purge(&ap->recv_queue);
if (showcapimsgs & 1) {
@ -1285,21 +1286,30 @@ static int __init kcapi_init(void)
{
int err;
kcapi_wq = alloc_workqueue("kcapi", 0, 0);
if (!kcapi_wq)
return -ENOMEM;
register_capictr_notifier(&capictr_nb);
err = cdebug_init();
if (!err)
kcapi_proc_init();
return err;
if (err) {
unregister_capictr_notifier(&capictr_nb);
destroy_workqueue(kcapi_wq);
return err;
}
kcapi_proc_init();
return 0;
}
static void __exit kcapi_exit(void)
{
kcapi_proc_exit();
/* make sure all notifiers are finished */
flush_scheduled_work();
unregister_capictr_notifier(&capictr_nb);
cdebug_exit();
destroy_workqueue(kcapi_wq);
}
module_init(kcapi_init);

View File

@ -110,7 +110,7 @@ mISDN_freedchannel(struct dchannel *ch)
}
skb_queue_purge(&ch->squeue);
skb_queue_purge(&ch->rqueue);
flush_scheduled_work();
flush_work_sync(&ch->workq);
return 0;
}
EXPORT_SYMBOL(mISDN_freedchannel);
@ -143,7 +143,7 @@ mISDN_freebchannel(struct bchannel *ch)
mISDN_clear_bchannel(ch);
skb_queue_purge(&ch->rqueue);
ch->rcount = 0;
flush_scheduled_work();
flush_work_sync(&ch->workq);
return 0;
}
EXPORT_SYMBOL(mISDN_freebchannel);

View File

@ -1269,6 +1269,8 @@ release_card(struct l1oip *hc)
if (timer_pending(&hc->timeout_tl))
del_timer(&hc->timeout_tl);
cancel_work_sync(&hc->workq);
if (hc->socket_thread)
l1oip_socket_close(hc);

View File

@ -276,7 +276,7 @@ static int wm8350_led_remove(struct platform_device *pdev)
struct wm8350_led *led = platform_get_drvdata(pdev);
led_classdev_unregister(&led->cdev);
flush_scheduled_work();
flush_work_sync(&led->work);
wm8350_led_disable(led);
regulator_put(led->dcdc);
regulator_put(led->isink);

View File

@ -226,7 +226,7 @@ void ams_sensor_detach(void)
* We do this after ams_info.exit(), because an interrupt might
* have arrived before disabling them.
*/
flush_scheduled_work();
flush_work_sync(&ams_info.worker);
/* Remove device */
of_device_unregister(ams_info.of_dev);

View File

@ -285,8 +285,8 @@ static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm)
static void __devexit rackmeter_stop_cpu_sniffer(struct rackmeter *rm)
{
cancel_rearming_delayed_work(&rm->cpu[0].sniffer);
cancel_rearming_delayed_work(&rm->cpu[1].sniffer);
cancel_delayed_work_sync(&rm->cpu[0].sniffer);
cancel_delayed_work_sync(&rm->cpu[1].sniffer);
}
static int __devinit rackmeter_setup(struct rackmeter *rm)

View File

@ -1329,7 +1329,8 @@ static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
return -EBUSY;
dvb_net_stop(net);
flush_scheduled_work();
flush_work_sync(&priv->set_multicast_list_wq);
flush_work_sync(&priv->restart_net_feed_wq);
printk("dvb_net: removed network interface %s\n", net->name);
unregister_netdev(net);
dvbnet->state[num]=0;

View File

@ -313,8 +313,7 @@ int dvb_usb_remote_init(struct dvb_usb_device *d)
int dvb_usb_remote_exit(struct dvb_usb_device *d)
{
if (d->state & DVB_USB_STATE_REMOTE) {
cancel_rearming_delayed_work(&d->rc_query_work);
flush_scheduled_work();
cancel_delayed_work_sync(&d->rc_query_work);
if (d->props.rc.mode == DVB_RC_LEGACY)
input_unregister_device(d->input_dev);
else

View File

@ -111,7 +111,7 @@ void mantis_evmgr_exit(struct mantis_ca *ca)
struct mantis_pci *mantis = ca->ca_priv;
dprintk(MANTIS_DEBUG, 1, "Mantis Host I/F Event manager exiting");
flush_scheduled_work();
flush_work_sync(&ca->hif_evm_work);
mantis_hif_exit(ca);
mantis_pcmcia_exit(ca);
}

View File

@ -182,5 +182,6 @@ void mantis_uart_exit(struct mantis_pci *mantis)
{
/* disable interrupt */
mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
flush_work_sync(&mantis->uart_work);
}
EXPORT_SYMBOL_GPL(mantis_uart_exit);

View File

@ -189,8 +189,14 @@ static void request_modules(struct bttv *dev)
INIT_WORK(&dev->request_module_wk, request_module_async);
schedule_work(&dev->request_module_wk);
}
static void flush_request_modules(struct bttv *dev)
{
flush_work_sync(&dev->request_module_wk);
}
#else
#define request_modules(dev)
#define flush_request_modules(dev)
#endif /* CONFIG_MODULES */
@ -4429,6 +4435,9 @@ static void __devexit bttv_remove(struct pci_dev *pci_dev)
if (bttv_verbose)
printk("bttv%d: unloading\n",btv->c.nr);
if (bttv_tvcards[btv->c.type].has_dvb)
flush_request_modules(btv);
/* shutdown everything (DMA+IRQs) */
btand(~15, BT848_GPIO_DMA_CTL);
btwrite(0, BT848_INT_MASK);

View File

@ -319,16 +319,13 @@ static void bttv_ir_start(struct bttv *btv, struct bttv_ir *ir)
static void bttv_ir_stop(struct bttv *btv)
{
if (btv->remote->polling) {
if (btv->remote->polling)
del_timer_sync(&btv->remote->timer);
flush_scheduled_work();
}
if (btv->remote->rc5_gpio) {
u32 gpio;
del_timer_sync(&btv->remote->timer);
flush_scheduled_work();
gpio = bttv_gpio_read(&btv->c);
bttv_gpio_write(&btv->c, gpio & ~(1 << 4));

View File

@ -267,8 +267,14 @@ static void request_modules(struct cx18 *dev)
INIT_WORK(&dev->request_module_wk, request_module_async);
schedule_work(&dev->request_module_wk);
}
static void flush_request_modules(struct cx18 *dev)
{
flush_work_sync(&dev->request_module_wk);
}
#else
#define request_modules(dev)
#define flush_request_modules(dev)
#endif /* CONFIG_MODULES */
/* Generic utility functions */
@ -1233,6 +1239,8 @@ static void cx18_remove(struct pci_dev *pci_dev)
CX18_DEBUG_INFO("Removing Card\n");
flush_request_modules(cx);
/* Stop all captures */
CX18_DEBUG_INFO("Stopping all streams\n");
if (atomic_read(&cx->tot_capturing) > 0)

View File

@ -813,8 +813,14 @@ static void request_modules(struct cx231xx *dev)
INIT_WORK(&dev->request_module_wk, request_module_async);
schedule_work(&dev->request_module_wk);
}
static void flush_request_modules(struct cx231xx *dev)
{
flush_work_sync(&dev->request_module_wk);
}
#else
#define request_modules(dev)
#define flush_request_modules(dev)
#endif /* CONFIG_MODULES */
/*
@ -1147,6 +1153,8 @@ static void cx231xx_usb_disconnect(struct usb_interface *interface)
if (!dev->udev)
return;
flush_request_modules(dev);
/* delete v4l2 device */
v4l2_device_unregister(&dev->v4l2_dev);

View File

@ -229,8 +229,6 @@ static void cx23885_input_ir_stop(struct cx23885_dev *dev)
v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, &params);
v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params);
}
flush_scheduled_work();
}
static void cx23885_input_ir_close(struct rc_dev *rc)

View File

@ -66,8 +66,14 @@ static void request_modules(struct cx8802_dev *dev)
INIT_WORK(&dev->request_module_wk, request_module_async);
schedule_work(&dev->request_module_wk);
}
static void flush_request_modules(struct cx8802_dev *dev)
{
flush_work_sync(&dev->request_module_wk);
}
#else
#define request_modules(dev)
#define flush_request_modules(dev)
#endif /* CONFIG_MODULES */
@ -819,6 +825,8 @@ static void __devexit cx8802_remove(struct pci_dev *pci_dev)
dprintk( 1, "%s\n", __func__);
flush_request_modules(dev);
if (!list_empty(&dev->drvlist)) {
struct cx8802_driver *drv, *tmp;
int err;

View File

@ -2690,8 +2690,14 @@ static void request_modules(struct em28xx *dev)
INIT_WORK(&dev->request_module_wk, request_module_async);
schedule_work(&dev->request_module_wk);
}
static void flush_request_modules(struct em28xx *dev)
{
flush_work_sync(&dev->request_module_wk);
}
#else
#define request_modules(dev)
#define flush_request_modules(dev)
#endif /* CONFIG_MODULES */
/*
@ -3118,6 +3124,8 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
em28xx_info("disconnecting %s\n", dev->vdev->name);
flush_request_modules(dev);
/* wait until all current v4l2 io is finished then deallocate
resources */
mutex_lock(&dev->lock);

View File

@ -551,7 +551,7 @@ void em28xx_deregister_snapshot_button(struct em28xx *dev)
{
if (dev->sbutton_input_dev != NULL) {
em28xx_info("Deregistering snapshot button\n");
cancel_rearming_delayed_work(&dev->sbutton_query_work);
cancel_delayed_work_sync(&dev->sbutton_query_work);
input_unregister_device(dev->sbutton_input_dev);
dev->sbutton_input_dev = NULL;
}

View File

@ -1198,7 +1198,7 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
atomic_inc(&cam->reset_disable);
flush_scheduled_work();
flush_work_sync(&cam->sensor_reset_work);
rval = videobuf_streamoff(q);
if (!rval) {
@ -1512,7 +1512,7 @@ static int omap24xxcam_release(struct file *file)
atomic_inc(&cam->reset_disable);
flush_scheduled_work();
flush_work_sync(&cam->sensor_reset_work);
/* stop streaming capture */
videobuf_streamoff(&fh->vbq);
@ -1536,7 +1536,7 @@ static int omap24xxcam_release(struct file *file)
* not be scheduled anymore since streaming is already
* disabled.)
*/
flush_scheduled_work();
flush_work_sync(&cam->sensor_reset_work);
mutex_lock(&cam->mutex);
if (atomic_dec_return(&cam->users) == 0) {

View File

@ -166,8 +166,14 @@ static void request_submodules(struct saa7134_dev *dev)
schedule_work(&dev->request_module_wk);
}
static void flush_request_submodules(struct saa7134_dev *dev)
{
flush_work_sync(&dev->request_module_wk);
}
#else
#define request_submodules(dev)
#define flush_request_submodules(dev)
#endif /* CONFIG_MODULES */
/* ------------------------------------------------------------------ */
@ -1010,8 +1016,6 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
}
}
request_submodules(dev);
v4l2_prio_init(&dev->prio);
mutex_lock(&saa7134_devlist_lock);
@ -1066,6 +1070,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
if (saa7134_dmasound_init && !dev->dmasound.priv_data)
saa7134_dmasound_init(dev);
request_submodules(dev);
return 0;
fail4:
@ -1091,6 +1096,8 @@ static void __devexit saa7134_finidev(struct pci_dev *pci_dev)
struct saa7134_dev *dev = container_of(v4l2_dev, struct saa7134_dev, v4l2_dev);
struct saa7134_mpeg_ops *mops;
flush_request_submodules(dev);
/* Release DMA sound modules if present */
if (saa7134_dmasound_exit && dev->dmasound.priv_data) {
saa7134_dmasound_exit(dev);

View File

@ -553,7 +553,7 @@ static int empress_fini(struct saa7134_dev *dev)
if (NULL == dev->empress_dev)
return 0;
flush_scheduled_work();
flush_work_sync(&dev->empress_workqueue);
video_unregister_device(dev->empress_dev);
dev->empress_dev = NULL;
return 0;

View File

@ -1259,7 +1259,7 @@ static int menelaus_probe(struct i2c_client *client,
return 0;
fail2:
free_irq(client->irq, menelaus);
flush_scheduled_work();
flush_work_sync(&menelaus->work);
fail1:
kfree(menelaus);
return err;
@ -1270,6 +1270,7 @@ static int __exit menelaus_remove(struct i2c_client *client)
struct menelaus_chip *menelaus = i2c_get_clientdata(client);
free_irq(client->irq, menelaus);
flush_work_sync(&menelaus->work);
kfree(menelaus);
the_menelaus = NULL;
return 0;

View File

@ -242,7 +242,7 @@ static int dbg_show(struct seq_file *s, void *_)
seq_printf(s, "mask2 %s\n", buf);
/* ignore ackint2 */
(void) schedule_delayed_work(&tps->work, POWER_POLL_DELAY);
schedule_delayed_work(&tps->work, POWER_POLL_DELAY);
/* VMAIN voltage, enable lowpower, etc */
@ -400,7 +400,7 @@ static void tps65010_interrupt(struct tps65010 *tps)
&& (tps->chgstatus & (TPS_CHG_USB|TPS_CHG_AC)))
poll = 1;
if (poll)
(void) schedule_delayed_work(&tps->work, POWER_POLL_DELAY);
schedule_delayed_work(&tps->work, POWER_POLL_DELAY);
/* also potentially gpio-in rise or fall */
}
@ -410,7 +410,7 @@ static void tps65010_work(struct work_struct *work)
{
struct tps65010 *tps;
tps = container_of(work, struct tps65010, work.work);
tps = container_of(to_delayed_work(work), struct tps65010, work);
mutex_lock(&tps->lock);
tps65010_interrupt(tps);
@ -448,7 +448,7 @@ static irqreturn_t tps65010_irq(int irq, void *_tps)
disable_irq_nosync(irq);
set_bit(FLAG_IRQ_ENABLE, &tps->flags);
(void) schedule_work(&tps->work.work);
schedule_delayed_work(&tps->work, 0);
return IRQ_HANDLED;
}
@ -527,8 +527,7 @@ static int __exit tps65010_remove(struct i2c_client *client)
}
if (client->irq > 0)
free_irq(client->irq, tps);
cancel_delayed_work(&tps->work);
flush_scheduled_work();
cancel_delayed_work_sync(&tps->work);
debugfs_remove(tps->file);
kfree(tps);
the_tps = NULL;
@ -720,7 +719,7 @@ int tps65010_set_vbus_draw(unsigned mA)
&& test_and_set_bit(
FLAG_VBUS_CHANGED, &the_tps->flags)) {
/* gadget drivers call this in_irq() */
(void) schedule_work(&the_tps->work.work);
schedule_delayed_work(&the_tps->work, 0);
}
local_irq_restore(flags);

View File

@ -273,13 +273,11 @@ ioc4_variant(struct ioc4_driver_data *idd)
static void __devinit
ioc4_load_modules(struct work_struct *work)
{
/* arg just has to be freed */
request_module("sgiioc4");
kfree(work);
}
static DECLARE_WORK(ioc4_load_modules_work, ioc4_load_modules);
/* Adds a new instance of an IOC4 card */
static int __devinit
ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
@ -396,21 +394,12 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
* PCI device.
*/
if (idd->idd_variant != IOC4_VARIANT_PCI_RT) {
struct work_struct *work;
work = kzalloc(sizeof(struct work_struct), GFP_KERNEL);
if (!work) {
printk(KERN_WARNING
"%s: IOC4 unable to allocate memory for "
"load of sub-modules.\n", __func__);
} else {
/* Request the module from a work procedure as the
* modprobe goes out to a userland helper and that
* will hang if done directly from ioc4_probe().
*/
printk(KERN_INFO "IOC4 loading sgiioc4 submodule\n");
INIT_WORK(work, ioc4_load_modules);
schedule_work(work);
}
/* Request the module from a work procedure as the modprobe
* goes out to a userland helper and that will hang if done
* directly from ioc4_probe().
*/
printk(KERN_INFO "IOC4 loading sgiioc4 submodule\n");
schedule_work(&ioc4_load_modules_work);
}
return 0;
@ -498,7 +487,7 @@ static void __exit
ioc4_exit(void)
{
/* Ensure ioc4_load_modules() has completed before exiting */
flush_scheduled_work();
flush_work_sync(&ioc4_load_modules_work);
pci_unregister_driver(&ioc4_driver);
}

View File

@ -1790,7 +1790,7 @@ static int __init mmc_init(void)
{
int ret;
workqueue = create_singlethread_workqueue("kmmcd");
workqueue = alloc_ordered_workqueue("kmmcd", 0);
if (!workqueue)
return -ENOMEM;

View File

@ -173,6 +173,8 @@ struct mmc_omap_host {
struct omap_mmc_platform_data *pdata;
};
static struct workqueue_struct *mmc_omap_wq;
static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
{
unsigned long tick_ns;
@ -289,7 +291,7 @@ static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
host->next_slot = new_slot;
host->mmc = new_slot->mmc;
spin_unlock_irqrestore(&host->slot_lock, flags);
schedule_work(&host->slot_release_work);
queue_work(mmc_omap_wq, &host->slot_release_work);
return;
}
@ -457,7 +459,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
}
host->stop_data = data;
schedule_work(&host->send_stop_work);
queue_work(mmc_omap_wq, &host->send_stop_work);
}
static void
@ -637,7 +639,7 @@ mmc_omap_cmd_timer(unsigned long data)
OMAP_MMC_WRITE(host, IE, 0);
disable_irq(host->irq);
host->abort = 1;
schedule_work(&host->cmd_abort_work);
queue_work(mmc_omap_wq, &host->cmd_abort_work);
}
spin_unlock_irqrestore(&host->slot_lock, flags);
}
@ -826,7 +828,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
host->abort = 1;
OMAP_MMC_WRITE(host, IE, 0);
disable_irq_nosync(host->irq);
schedule_work(&host->cmd_abort_work);
queue_work(mmc_omap_wq, &host->cmd_abort_work);
return IRQ_HANDLED;
}
@ -1387,7 +1389,7 @@ static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
tasklet_kill(&slot->cover_tasklet);
del_timer_sync(&slot->cover_timer);
flush_scheduled_work();
flush_workqueue(mmc_omap_wq);
mmc_remove_host(mmc);
mmc_free_host(mmc);
@ -1608,12 +1610,22 @@ static struct platform_driver mmc_omap_driver = {
static int __init mmc_omap_init(void)
{
return platform_driver_probe(&mmc_omap_driver, mmc_omap_probe);
int ret;
mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
if (!mmc_omap_wq)
return -ENOMEM;
ret = platform_driver_probe(&mmc_omap_driver, mmc_omap_probe);
if (ret)
destroy_workqueue(mmc_omap_wq);
return ret;
}
static void __exit mmc_omap_exit(void)
{
platform_driver_unregister(&mmc_omap_driver);
destroy_workqueue(mmc_omap_wq);
}
module_init(mmc_omap_init);

View File

@ -2290,7 +2290,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
free_irq(host->irq, host);
if (mmc_slot(host).card_detect_irq)
free_irq(mmc_slot(host).card_detect_irq, host);
flush_scheduled_work();
flush_work_sync(&host->mmc_carddetect_work);
mmc_host_disable(host->mmc);
clk_disable(host->iclk);

View File

@ -22,7 +22,7 @@ static int my3126_interrupt_enable(struct cphy *cphy)
static int my3126_interrupt_disable(struct cphy *cphy)
{
cancel_rearming_delayed_work(&cphy->phy_update);
cancel_delayed_work_sync(&cphy->phy_update);
return 0;
}

View File

@ -1279,7 +1279,7 @@ static void emac_force_link_update(struct emac_instance *dev)
netif_carrier_off(dev->ndev);
smp_rmb();
if (dev->link_polling) {
cancel_rearming_delayed_work(&dev->link_work);
cancel_delayed_work_sync(&dev->link_work);
if (dev->link_polling)
schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
}
@ -1294,7 +1294,7 @@ static int emac_close(struct net_device *ndev)
if (dev->phy.address >= 0) {
dev->link_polling = 0;
cancel_rearming_delayed_work(&dev->link_work);
cancel_delayed_work_sync(&dev->link_work);
}
mutex_lock(&dev->link_lock);
emac_netif_stop(dev);

View File

@ -1207,7 +1207,6 @@ static void housekeeping_enable(struct zd_mac *mac)
static void housekeeping_disable(struct zd_mac *mac)
{
dev_dbg_f(zd_mac_dev(mac), "\n");
cancel_rearming_delayed_workqueue(zd_workqueue,
&mac->housekeeping.link_led_work);
cancel_delayed_work_sync(&mac->housekeeping.link_led_work);
zd_chip_control_leds(&mac->chip, ZD_LED_OFF);
}

View File

@ -580,10 +580,8 @@ static int ds2760_battery_remove(struct platform_device *pdev)
{
struct ds2760_device_info *di = platform_get_drvdata(pdev);
cancel_rearming_delayed_workqueue(di->monitor_wqueue,
&di->monitor_work);
cancel_rearming_delayed_workqueue(di->monitor_wqueue,
&di->set_charged_work);
cancel_delayed_work_sync(&di->monitor_work);
cancel_delayed_work_sync(&di->set_charged_work);
destroy_workqueue(di->monitor_wqueue);
power_supply_unregister(&di->bat);
kfree(di);

View File

@ -730,8 +730,7 @@ static __devinit int probe(int irq, struct device *dev)
power_reg_failed_1:
power_supply_unregister(&pbi->batt);
power_reg_failed:
cancel_rearming_delayed_workqueue(pbi->monitor_wqueue,
&pbi->monitor_battery);
cancel_delayed_work_sync(&pbi->monitor_battery);
requestirq_failed:
destroy_workqueue(pbi->monitor_wqueue);
wqueue_failed:
@ -760,8 +759,7 @@ static int __devexit platform_pmic_battery_remove(struct platform_device *pdev)
struct pmic_power_module_info *pbi = dev_get_drvdata(&pdev->dev);
free_irq(pbi->irq, pbi);
cancel_rearming_delayed_workqueue(pbi->monitor_wqueue,
&pbi->monitor_battery);
cancel_delayed_work_sync(&pbi->monitor_battery);
destroy_workqueue(pbi->monitor_wqueue);
power_supply_unregister(&pbi->usb);

View File

@ -104,7 +104,7 @@ static int clear_uie(struct rtc_device *rtc)
}
if (rtc->uie_task_active) {
spin_unlock_irq(&rtc->irq_lock);
flush_scheduled_work();
flush_work_sync(&rtc->uie_task);
spin_lock_irq(&rtc->irq_lock);
}
rtc->uie_irq_active = 0;

View File

@ -813,7 +813,7 @@ static int __devexit ds1305_remove(struct spi_device *spi)
if (spi->irq) {
set_bit(FLAG_EXITING, &ds1305->flags);
free_irq(spi->irq, ds1305);
flush_scheduled_work();
cancel_work_sync(&ds1305->work);
}
rtc_device_unregister(ds1305->rtc);

View File

@ -417,7 +417,7 @@ static int __devexit ds1374_remove(struct i2c_client *client)
mutex_unlock(&ds1374->mutex);
free_irq(client->irq, client);
flush_scheduled_work();
cancel_work_sync(&ds1374->work);
}
rtc_device_unregister(ds1374->rtc);

View File

@ -463,7 +463,7 @@ static int __devexit ds3232_remove(struct i2c_client *client)
mutex_unlock(&ds3232->mutex);
free_irq(client->irq, client);
flush_scheduled_work();
cancel_work_sync(&ds3232->work);
}
rtc_device_unregister(ds3232->rtc);

View File

@ -650,7 +650,7 @@ static int __devexit rx8025_remove(struct i2c_client *client)
mutex_unlock(lock);
free_irq(client->irq, client);
flush_scheduled_work();
cancel_work_sync(&rx8025->work);
}
rx8025_sysfs_unregister(&client->dev);

View File

@ -24,6 +24,8 @@
#include "tape_std.h"
#include "tape_3590.h"
static struct workqueue_struct *tape_3590_wq;
/*
* Pointer to debug area.
*/
@ -613,7 +615,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
p->device = tape_get_device(device);
p->op = op;
schedule_work(&p->work);
queue_work(tape_3590_wq, &p->work);
return 0;
}
@ -1629,7 +1631,7 @@ fail_kmalloc:
static void
tape_3590_cleanup_device(struct tape_device *device)
{
flush_scheduled_work();
flush_workqueue(tape_3590_wq);
tape_std_unassign(device);
kfree(device->discdata);
@ -1733,11 +1735,17 @@ tape_3590_init(void)
#endif
DBF_EVENT(3, "3590 init\n");
tape_3590_wq = alloc_workqueue("tape_3590", 0, 0);
if (!tape_3590_wq)
return -ENOMEM;
/* Register driver for 3590 tapes. */
rc = ccw_driver_register(&tape_3590_driver);
if (rc)
if (rc) {
destroy_workqueue(tape_3590_wq);
DBF_EVENT(3, "3590 init failed\n");
else
} else
DBF_EVENT(3, "3590 registered\n");
return rc;
}
@ -1746,7 +1754,7 @@ static void
tape_3590_exit(void)
{
ccw_driver_unregister(&tape_3590_driver);
destroy_workqueue(tape_3590_wq);
debug_unregister(TAPE_DBF_AREA);
}

View File

@ -264,7 +264,7 @@ cleanup_queue:
void
tapeblock_cleanup_device(struct tape_device *device)
{
flush_scheduled_work();
flush_work_sync(&device->blk_data.requeue_task);
tape_put_device(device);
if (!device->blk_data.disk) {

View File

@ -1325,8 +1325,8 @@ static void pohmelfs_put_super(struct super_block *sb)
}
psb->trans_scan_timeout = psb->drop_scan_timeout = 0;
cancel_rearming_delayed_work(&psb->dwork);
cancel_rearming_delayed_work(&psb->drop_dwork);
cancel_delayed_work_sync(&psb->dwork);
cancel_delayed_work_sync(&psb->drop_dwork);
flush_scheduled_work();
dprintk("%s: stopped workqueues.\n", __func__);

View File

@ -1247,7 +1247,7 @@ static void cxacru_unbind(struct usbatm_data *usbatm_instance,
mutex_unlock(&instance->poll_state_serialize);
if (is_polling)
cancel_rearming_delayed_work(&instance->poll_work);
cancel_delayed_work_sync(&instance->poll_work);
usb_kill_urb(instance->snd_urb);
usb_kill_urb(instance->rcv_urb);

View File

@ -139,7 +139,8 @@ struct speedtch_instance_data {
struct speedtch_params params; /* set in probe, constant afterwards */
struct delayed_work status_checker;
struct timer_list status_check_timer;
struct work_struct status_check_work;
unsigned char last_status;
@ -498,7 +499,7 @@ static void speedtch_check_status(struct work_struct *work)
{
struct speedtch_instance_data *instance =
container_of(work, struct speedtch_instance_data,
status_checker.work);
status_check_work);
struct usbatm_data *usbatm = instance->usbatm;
struct atm_dev *atm_dev = usbatm->atm_dev;
unsigned char *buf = instance->scratch_buffer;
@ -575,11 +576,11 @@ static void speedtch_status_poll(unsigned long data)
{
struct speedtch_instance_data *instance = (void *)data;
schedule_delayed_work(&instance->status_checker, 0);
schedule_work(&instance->status_check_work);
/* The following check is racy, but the race is harmless */
if (instance->poll_delay < MAX_POLL_DELAY)
mod_timer(&instance->status_checker.timer, jiffies + msecs_to_jiffies(instance->poll_delay));
mod_timer(&instance->status_check_timer, jiffies + msecs_to_jiffies(instance->poll_delay));
else
atm_warn(instance->usbatm, "Too many failures - disabling line status polling\n");
}
@ -595,7 +596,7 @@ static void speedtch_resubmit_int(unsigned long data)
if (int_urb) {
ret = usb_submit_urb(int_urb, GFP_ATOMIC);
if (!ret)
schedule_delayed_work(&instance->status_checker, 0);
schedule_work(&instance->status_check_work);
else {
atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
@ -624,7 +625,7 @@ static void speedtch_handle_int(struct urb *int_urb)
}
if ((count == 6) && !memcmp(up_int, instance->int_data, 6)) {
del_timer(&instance->status_checker.timer);
del_timer(&instance->status_check_timer);
atm_info(usbatm, "DSL line goes up\n");
} else if ((count == 6) && !memcmp(down_int, instance->int_data, 6)) {
atm_info(usbatm, "DSL line goes down\n");
@ -640,7 +641,7 @@ static void speedtch_handle_int(struct urb *int_urb)
if ((int_urb = instance->int_urb)) {
ret = usb_submit_urb(int_urb, GFP_ATOMIC);
schedule_delayed_work(&instance->status_checker, 0);
schedule_work(&instance->status_check_work);
if (ret < 0) {
atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
goto fail;
@ -686,7 +687,7 @@ static int speedtch_atm_start(struct usbatm_data *usbatm, struct atm_dev *atm_de
}
/* Start status polling */
mod_timer(&instance->status_checker.timer, jiffies + msecs_to_jiffies(1000));
mod_timer(&instance->status_check_timer, jiffies + msecs_to_jiffies(1000));
return 0;
}
@ -698,7 +699,7 @@ static void speedtch_atm_stop(struct usbatm_data *usbatm, struct atm_dev *atm_de
atm_dbg(usbatm, "%s entered\n", __func__);
del_timer_sync(&instance->status_checker.timer);
del_timer_sync(&instance->status_check_timer);
/*
* Since resubmit_timer and int_urb can schedule themselves and
@ -717,7 +718,7 @@ static void speedtch_atm_stop(struct usbatm_data *usbatm, struct atm_dev *atm_de
del_timer_sync(&instance->resubmit_timer);
usb_free_urb(int_urb);
flush_scheduled_work();
flush_work_sync(&instance->status_check_work);
}
static int speedtch_pre_reset(struct usb_interface *intf)
@ -869,10 +870,11 @@ static int speedtch_bind(struct usbatm_data *usbatm,
usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
INIT_DELAYED_WORK(&instance->status_checker, speedtch_check_status);
INIT_WORK(&instance->status_check_work, speedtch_check_status);
init_timer(&instance->status_check_timer);
instance->status_checker.timer.function = speedtch_status_poll;
instance->status_checker.timer.data = (unsigned long)instance;
instance->status_check_timer.function = speedtch_status_poll;
instance->status_check_timer.data = (unsigned long)instance;
instance->last_status = 0xff;
instance->poll_delay = MIN_POLL_DELAY;

View File

@ -839,11 +839,9 @@ void gether_cleanup(void)
return;
unregister_netdev(the_dev->net);
flush_work_sync(&the_dev->work);
free_netdev(the_dev->net);
/* assuming we used keventd, it must quiesce too */
flush_scheduled_work();
the_dev = NULL;
}

View File

@ -901,7 +901,8 @@ static void ohci_stop (struct usb_hcd *hcd)
ohci_dump (ohci, 1);
flush_scheduled_work();
if (quirk_nec(ohci))
flush_work_sync(&ohci->nec_work);
ohci_usb_reset (ohci);
ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);

View File

@ -1247,7 +1247,7 @@ static int __exit isp1301_remove(struct i2c_client *i2c)
isp->timer.data = 0;
set_bit(WORK_STOP, &isp->todo);
del_timer_sync(&isp->timer);
flush_scheduled_work();
flush_work_sync(&isp->work);
put_device(&i2c->dev);
the_transceiver = NULL;

View File

@ -613,9 +613,8 @@ static void oti6858_close(struct usb_serial_port *port)
dbg("%s(): after buf_clear()", __func__);
/* cancel scheduled setup */
cancel_delayed_work(&priv->delayed_setup_work);
cancel_delayed_work(&priv->delayed_write_work);
flush_scheduled_work();
cancel_delayed_work_sync(&priv->delayed_setup_work);
cancel_delayed_work_sync(&priv->delayed_write_work);
/* shutdown our urbs */
dbg("%s(): shutting down urbs", __func__);

View File

@ -75,7 +75,7 @@ int fb_deferred_io_fsync(struct file *file, int datasync)
return 0;
/* Kill off the delayed work */
cancel_rearming_delayed_work(&info->deferred_work);
cancel_delayed_work_sync(&info->deferred_work);
/* Run it immediately */
return schedule_delayed_work(&info->deferred_work, 0);

View File

@ -396,7 +396,7 @@ static void mipid_esd_start_check(struct mipid_device *md)
static void mipid_esd_stop_check(struct mipid_device *md)
{
if (md->esd_check != NULL)
cancel_rearming_delayed_workqueue(md->esd_wq, &md->esd_work);
cancel_delayed_work_sync(&md->esd_work);
}
static void mipid_esd_work(struct work_struct *work)

View File

@ -317,7 +317,12 @@ static void ncp_stop_tasks(struct ncp_server *server) {
sk->sk_write_space = server->write_space;
release_sock(sk);
del_timer_sync(&server->timeout_tm);
flush_scheduled_work();
flush_work_sync(&server->rcv.tq);
if (sk->sk_socket->type == SOCK_STREAM)
flush_work_sync(&server->tx.tq);
else
flush_work_sync(&server->timeout_tq);
}
static int ncp_show_options(struct seq_file *seq, struct vfsmount *mnt)

View File

@ -4336,7 +4336,7 @@ __nfs4_state_shutdown(void)
void
nfs4_state_shutdown(void)
{
cancel_rearming_delayed_workqueue(laundry_wq, &laundromat_work);
cancel_delayed_work_sync(&laundromat_work);
destroy_workqueue(laundry_wq);
locks_end_grace(&nfsd4_manager);
nfs4_lock_state();

View File

@ -307,8 +307,7 @@ static void o2hb_arm_write_timeout(struct o2hb_region *reg)
static void o2hb_disarm_write_timeout(struct o2hb_region *reg)
{
cancel_delayed_work(&reg->hr_write_timeout_work);
flush_scheduled_work();
cancel_delayed_work_sync(&reg->hr_write_timeout_work);
}
static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc)

View File

@ -325,5 +325,7 @@ void o2quo_init(void)
void o2quo_exit(void)
{
flush_scheduled_work();
struct o2quo_state *qs = &o2quo_state;
flush_work_sync(&qs->qs_work);
}

View File

@ -408,7 +408,7 @@ xfs_mru_cache_flush(
spin_lock(&mru->lock);
if (mru->queued) {
spin_unlock(&mru->lock);
cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
cancel_delayed_work_sync(&mru->work);
spin_lock(&mru->lock);
}

View File

@ -409,7 +409,7 @@ static inline bool __cancel_delayed_work(struct delayed_work *work)
}
/* Obsolete. use cancel_delayed_work_sync() */
static inline
static inline __deprecated
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
struct delayed_work *work)
{
@ -417,7 +417,7 @@ void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
}
/* Obsolete. use cancel_delayed_work_sync() */
static inline
static inline __deprecated
void cancel_rearming_delayed_work(struct delayed_work *work)
{
cancel_delayed_work_sync(work);

View File

@ -777,9 +777,6 @@ static void __init do_initcalls(void)
for (fn = __early_initcall_end; fn < __initcall_end; fn++)
do_one_initcall(*fn);
/* Make sure there is no pending stuff from the initcall sequence */
flush_scheduled_work();
}
/*

View File

@ -932,6 +932,38 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
wake_up_worker(gcwq);
}
/*
* Test whether @work is being queued from another work executing on the
* same workqueue. This is rather expensive and should only be used from
* cold paths.
*/
static bool is_chained_work(struct workqueue_struct *wq)
{
unsigned long flags;
unsigned int cpu;
for_each_gcwq_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu);
struct worker *worker;
struct hlist_node *pos;
int i;
spin_lock_irqsave(&gcwq->lock, flags);
for_each_busy_worker(worker, i, pos, gcwq) {
if (worker->task != current)
continue;
spin_unlock_irqrestore(&gcwq->lock, flags);
/*
* I'm @worker, no locking necessary. See if @work
* is headed to the same workqueue.
*/
return worker->current_cwq->wq == wq;
}
spin_unlock_irqrestore(&gcwq->lock, flags);
}
return false;
}
static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
struct work_struct *work)
{
@ -943,7 +975,9 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
debug_work_activate(work);
if (WARN_ON_ONCE(wq->flags & WQ_DYING))
/* if dying, only works from the same workqueue are allowed */
if (unlikely(wq->flags & WQ_DYING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
/* determine gcwq to use */
@ -2936,11 +2970,35 @@ EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
*/
void destroy_workqueue(struct workqueue_struct *wq)
{
unsigned int flush_cnt = 0;
unsigned int cpu;
/*
* Mark @wq dying and drain all pending works. Once WQ_DYING is
* set, only chain queueing is allowed. IOW, only currently
* pending or running work items on @wq can queue further work
* items on it. @wq is flushed repeatedly until it becomes empty.
* The number of flushing is detemined by the depth of chaining and
* should be relatively short. Whine if it takes too long.
*/
wq->flags |= WQ_DYING;
reflush:
flush_workqueue(wq);
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (!cwq->nr_active && list_empty(&cwq->delayed_works))
continue;
if (++flush_cnt == 10 ||
(flush_cnt % 100 == 0 && flush_cnt <= 1000))
printk(KERN_WARNING "workqueue %s: flush on "
"destruction isn't complete after %u tries\n",
wq->name, flush_cnt);
goto reflush;
}
/*
* wq list is used to freeze wq, remove from list after
* flushing is complete in case freeze races us.

View File

@ -1293,7 +1293,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
* anything expensive but will only modify reap_work
* and reschedule the timer.
*/
cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu));
cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
/* Now the cache_reaper is guaranteed to be not running. */
per_cpu(slab_reap_work, cpu).work.func = NULL;
break;

View File

@ -1033,7 +1033,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));
cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
per_cpu(vmstat_work, cpu).work.func = NULL;
break;
case CPU_DOWN_FAILED:

View File

@ -1607,7 +1607,7 @@ static void lec_arp_destroy(struct lec_priv *priv)
struct lec_arp_table *entry;
int i;
cancel_rearming_delayed_work(&priv->lec_arp_work);
cancel_delayed_work_sync(&priv->lec_arp_work);
/*
* Remove all entries

View File

@ -923,7 +923,7 @@ void __netpoll_cleanup(struct netpoll *np)
skb_queue_purge(&npinfo->arp_tx);
skb_queue_purge(&npinfo->txq);
cancel_rearming_delayed_work(&npinfo->tx_work);
cancel_delayed_work_sync(&npinfo->tx_work);
/* clean after last, unfinished work */
__skb_queue_purge(&npinfo->txq);

View File

@ -390,7 +390,7 @@ static int dsa_remove(struct platform_device *pdev)
if (dst->link_poll_needed)
del_timer_sync(&dst->link_poll_timer);
flush_scheduled_work();
flush_work_sync(&dst->link_poll_work);
for (i = 0; i < dst->pd->nr_chips; i++) {
struct dsa_switch *ds = dst->ds[i];

View File

@ -3430,7 +3430,7 @@ void ip_vs_control_cleanup(void)
{
EnterFunction(2);
ip_vs_trash_cleanup();
cancel_rearming_delayed_work(&defense_work);
cancel_delayed_work_sync(&defense_work);
cancel_work_sync(&defense_work.work);
ip_vs_kill_estimator(&ip_vs_stats);
unregister_sysctl_table(sysctl_header);

View File

@ -770,7 +770,7 @@ static void xs_destroy(struct rpc_xprt *xprt)
dprintk("RPC: xs_destroy xprt %p\n", xprt);
cancel_rearming_delayed_work(&transport->connect_worker);
cancel_delayed_work_sync(&transport->connect_worker);
xs_close(xprt);
xs_free_peer_addresses(xprt);