Merge branch 'for-3.4/drivers' of git://git.kernel.dk/linux-block

Pull block driver bits from Jens Axboe:

 - A series of fixes for mtip32xx.  Most from Asai at Micron, but also
   one from Greg, getting rid of the dependency on PCIE_HOTPLUG.

 - A few bug fixes for xen-blkfront, and blkback.

 - A virtio-blk fix for Vivek, making resize actually work.

 - Two fixes from Stephen, making larger transfers possible on cciss.
   This is needed for tape drive support.

* 'for-3.4/drivers' of git://git.kernel.dk/linux-block:
  block: mtip32xx: remove HOTPLUG_PCI_PCIE dependancy
  mtip32xx: dump tagmap on failure
  mtip32xx: fix handling of commands in various scenarios
  mtip32xx: Shorten macro names
  mtip32xx: misc changes
  mtip32xx: Add new sysfs entry 'status'
  mtip32xx: make setting comp_time as common
  mtip32xx: Add new bitwise flag 'dd_flag'
  mtip32xx: fix error handling in mtip_init()
  virtio-blk: Call revalidate_disk() upon online disk resize
  xen/blkback: Make optional features be really optional.
  xen/blkback: Squash the discard support for 'file' and 'phy' type.
  mtip32xx: fix incorrect value set for drv_cleanup_done, and re-initialize and start port in mtip_restart_port()
  cciss: Fix scsi tape io with more than 255 scatter gather elements
  cciss: Initialize scsi host max_sectors for tape drive support
  xen-blkfront: make blkif_io_lock spinlock per-device
  xen/blkfront: don't put bdev right after getting it
  xen-blkfront: use bitmap_set() and bitmap_clear()
  xen/blkback: Enable blkback on HVM guests
  xen/blkback: use grant-table.c hypercall wrappers
This commit is contained in:
Linus Torvalds 2012-04-13 18:45:13 -07:00
commit c104f1fa1e
10 changed files with 797 additions and 333 deletions

View file

@ -0,0 +1,18 @@
What: /sys/block/rssd*/registers
Date: March 2012
KernelVersion: 3.3
Contact: Asai Thambi S P <asamymuthupa@micron.com>
Description: This is a read-only file. Dumps below driver information and
hardware registers.
- S ACTive
- Command Issue
- Allocated
- Completed
- PORT IRQ STAT
- HOST IRQ STAT
What: /sys/block/rssd*/status
Date: April 2012
KernelVersion: 3.4
Contact: Asai Thambi S P <asamymuthupa@micron.com>
Description: This is a read-only file. Indicates the status of the device.

View file

@ -866,6 +866,7 @@ cciss_scsi_detect(ctlr_info_t *h)
sh->can_queue = cciss_tape_cmds; sh->can_queue = cciss_tape_cmds;
sh->sg_tablesize = h->maxsgentries; sh->sg_tablesize = h->maxsgentries;
sh->max_cmd_len = MAX_COMMAND_SIZE; sh->max_cmd_len = MAX_COMMAND_SIZE;
sh->max_sectors = h->cciss_max_sectors;
((struct cciss_scsi_adapter_data_t *) ((struct cciss_scsi_adapter_data_t *)
h->scsi_ctlr)->scsi_host = sh; h->scsi_ctlr)->scsi_host = sh;
@ -1410,7 +1411,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c,
/* track how many SG entries we are using */ /* track how many SG entries we are using */
if (request_nsgs > h->maxSG) if (request_nsgs > h->maxSG)
h->maxSG = request_nsgs; h->maxSG = request_nsgs;
c->Header.SGTotal = (__u8) request_nsgs + chained; c->Header.SGTotal = (u16) request_nsgs + chained;
if (request_nsgs > h->max_cmd_sgentries) if (request_nsgs > h->max_cmd_sgentries)
c->Header.SGList = h->max_cmd_sgentries; c->Header.SGList = h->max_cmd_sgentries;
else else

View file

@ -4,6 +4,6 @@
config BLK_DEV_PCIESSD_MTIP32XX config BLK_DEV_PCIESSD_MTIP32XX
tristate "Block Device Driver for Micron PCIe SSDs" tristate "Block Device Driver for Micron PCIe SSDs"
depends on HOTPLUG_PCI_PCIE depends on PCI
help help
This enables the block driver for Micron PCIe SSDs. This enables the block driver for Micron PCIe SSDs.

File diff suppressed because it is too large Load diff

View file

@ -34,8 +34,8 @@
/* offset of Device Control register in PCIe extended capabilites space */ /* offset of Device Control register in PCIe extended capabilites space */
#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48 #define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
/* # of times to retry timed out IOs */ /* # of times to retry timed out/failed IOs */
#define MTIP_MAX_RETRIES 5 #define MTIP_MAX_RETRIES 2
/* Various timeout values in ms */ /* Various timeout values in ms */
#define MTIP_NCQ_COMMAND_TIMEOUT_MS 5000 #define MTIP_NCQ_COMMAND_TIMEOUT_MS 5000
@ -114,12 +114,41 @@
#define __force_bit2int (unsigned int __force) #define __force_bit2int (unsigned int __force)
/* below are bit numbers in 'flags' defined in mtip_port */ /* below are bit numbers in 'flags' defined in mtip_port */
#define MTIP_FLAG_IC_ACTIVE_BIT 0 #define MTIP_PF_IC_ACTIVE_BIT 0 /* pio/ioctl */
#define MTIP_FLAG_EH_ACTIVE_BIT 1 #define MTIP_PF_EH_ACTIVE_BIT 1 /* error handling */
#define MTIP_FLAG_SVC_THD_ACTIVE_BIT 2 #define MTIP_PF_SE_ACTIVE_BIT 2 /* secure erase */
#define MTIP_FLAG_ISSUE_CMDS_BIT 4 #define MTIP_PF_DM_ACTIVE_BIT 3 /* download microcde */
#define MTIP_FLAG_REBUILD_BIT 5 #define MTIP_PF_PAUSE_IO ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
#define MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT 8 (1 << MTIP_PF_EH_ACTIVE_BIT) | \
(1 << MTIP_PF_SE_ACTIVE_BIT) | \
(1 << MTIP_PF_DM_ACTIVE_BIT))
#define MTIP_PF_SVC_THD_ACTIVE_BIT 4
#define MTIP_PF_ISSUE_CMDS_BIT 5
#define MTIP_PF_REBUILD_BIT 6
#define MTIP_PF_SVC_THD_STOP_BIT 8
/* below are bit numbers in 'dd_flag' defined in driver_data */
#define MTIP_DDF_REMOVE_PENDING_BIT 1
#define MTIP_DDF_OVER_TEMP_BIT 2
#define MTIP_DDF_WRITE_PROTECT_BIT 3
#define MTIP_DDF_STOP_IO ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
(1 << MTIP_DDF_OVER_TEMP_BIT) | \
(1 << MTIP_DDF_WRITE_PROTECT_BIT))
#define MTIP_DDF_CLEANUP_BIT 5
#define MTIP_DDF_RESUME_BIT 6
#define MTIP_DDF_INIT_DONE_BIT 7
#define MTIP_DDF_REBUILD_FAILED_BIT 8
__packed struct smart_attr{
u8 attr_id;
u16 flags;
u8 cur;
u8 worst;
u32 data;
u8 res[3];
};
/* Register Frame Information Structure (FIS), host to device. */ /* Register Frame Information Structure (FIS), host to device. */
struct host_to_dev_fis { struct host_to_dev_fis {
@ -345,6 +374,12 @@ struct mtip_port {
* when the command slot and all associated data structures * when the command slot and all associated data structures
* are no longer needed. * are no longer needed.
*/ */
u16 *log_buf;
dma_addr_t log_buf_dma;
u8 *smart_buf;
dma_addr_t smart_buf_dma;
unsigned long allocated[SLOTBITS_IN_LONGS]; unsigned long allocated[SLOTBITS_IN_LONGS];
/* /*
* used to queue commands when an internal command is in progress * used to queue commands when an internal command is in progress
@ -368,6 +403,7 @@ struct mtip_port {
* Timer used to complete commands that have been active for too long. * Timer used to complete commands that have been active for too long.
*/ */
struct timer_list cmd_timer; struct timer_list cmd_timer;
unsigned long ic_pause_timer;
/* /*
* Semaphore used to block threads if there are no * Semaphore used to block threads if there are no
* command slots available. * command slots available.
@ -404,13 +440,9 @@ struct driver_data {
unsigned slot_groups; /* number of slot groups the product supports */ unsigned slot_groups; /* number of slot groups the product supports */
atomic_t drv_cleanup_done; /* Atomic variable for SRSI */
unsigned long index; /* Index to determine the disk name */ unsigned long index; /* Index to determine the disk name */
unsigned int ftlrebuildflag; /* FTL rebuild flag */ unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
atomic_t resumeflag; /* Atomic variable to track suspend/resume */
struct task_struct *mtip_svc_handler; /* task_struct of svc thd */ struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
}; };

View file

@ -351,6 +351,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
cap_str_10, cap_str_2); cap_str_10, cap_str_2);
set_capacity(vblk->disk, capacity); set_capacity(vblk->disk, capacity);
revalidate_disk(vblk->disk);
done: done:
mutex_unlock(&vblk->config_lock); mutex_unlock(&vblk->config_lock);
} }

View file

@ -321,6 +321,7 @@ struct seg_buf {
static void xen_blkbk_unmap(struct pending_req *req) static void xen_blkbk_unmap(struct pending_req *req)
{ {
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int i, invcount = 0; unsigned int i, invcount = 0;
grant_handle_t handle; grant_handle_t handle;
int ret; int ret;
@ -332,25 +333,12 @@ static void xen_blkbk_unmap(struct pending_req *req)
gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
GNTMAP_host_map, handle); GNTMAP_host_map, handle);
pending_handle(req, i) = BLKBACK_INVALID_HANDLE; pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
pages[invcount] = virt_to_page(vaddr(req, i));
invcount++; invcount++;
} }
ret = HYPERVISOR_grant_table_op( ret = gnttab_unmap_refs(unmap, pages, invcount, false);
GNTTABOP_unmap_grant_ref, unmap, invcount);
BUG_ON(ret); BUG_ON(ret);
/*
* Note, we use invcount, so nr->pages, so we can't index
* using vaddr(req, i).
*/
for (i = 0; i < invcount; i++) {
ret = m2p_remove_override(
virt_to_page(unmap[i].host_addr), false);
if (ret) {
pr_alert(DRV_PFX "Failed to remove M2P override for %lx\n",
(unsigned long)unmap[i].host_addr);
continue;
}
}
} }
static int xen_blkbk_map(struct blkif_request *req, static int xen_blkbk_map(struct blkif_request *req,
@ -378,7 +366,7 @@ static int xen_blkbk_map(struct blkif_request *req,
pending_req->blkif->domid); pending_req->blkif->domid);
} }
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg); ret = gnttab_map_refs(map, NULL, &blkbk->pending_page(pending_req, 0), nseg);
BUG_ON(ret); BUG_ON(ret);
/* /*
@ -398,15 +386,6 @@ static int xen_blkbk_map(struct blkif_request *req,
if (ret) if (ret)
continue; continue;
ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
blkbk->pending_page(pending_req, i), NULL);
if (ret) {
pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n",
(unsigned long)map[i].dev_bus_addr, ret);
/* We could switch over to GNTTABOP_copy */
continue;
}
seg[i].buf = map[i].dev_bus_addr | seg[i].buf = map[i].dev_bus_addr |
(req->u.rw.seg[i].first_sect << 9); (req->u.rw.seg[i].first_sect << 9);
} }
@ -419,21 +398,18 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
int err = 0; int err = 0;
int status = BLKIF_RSP_OKAY; int status = BLKIF_RSP_OKAY;
struct block_device *bdev = blkif->vbd.bdev; struct block_device *bdev = blkif->vbd.bdev;
unsigned long secure;
blkif->st_ds_req++; blkif->st_ds_req++;
xen_blkif_get(blkif); xen_blkif_get(blkif);
if (blkif->blk_backend_type == BLKIF_BACKEND_PHY || secure = (blkif->vbd.discard_secure &&
blkif->blk_backend_type == BLKIF_BACKEND_FILE) { (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
unsigned long secure = (blkif->vbd.discard_secure && BLKDEV_DISCARD_SECURE : 0;
(req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
BLKDEV_DISCARD_SECURE : 0; err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
err = blkdev_issue_discard(bdev, req->u.discard.nr_sectors,
req->u.discard.sector_number, GFP_KERNEL, secure);
req->u.discard.nr_sectors,
GFP_KERNEL, secure);
} else
err = -EOPNOTSUPP;
if (err == -EOPNOTSUPP) { if (err == -EOPNOTSUPP) {
pr_debug(DRV_PFX "discard op failed, not supported\n"); pr_debug(DRV_PFX "discard op failed, not supported\n");
@ -830,7 +806,7 @@ static int __init xen_blkif_init(void)
int i, mmap_pages; int i, mmap_pages;
int rc = 0; int rc = 0;
if (!xen_pv_domain()) if (!xen_domain())
return -ENODEV; return -ENODEV;
blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL); blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);

View file

@ -146,11 +146,6 @@ enum blkif_protocol {
BLKIF_PROTOCOL_X86_64 = 3, BLKIF_PROTOCOL_X86_64 = 3,
}; };
enum blkif_backend_type {
BLKIF_BACKEND_PHY = 1,
BLKIF_BACKEND_FILE = 2,
};
struct xen_vbd { struct xen_vbd {
/* What the domain refers to this vbd as. */ /* What the domain refers to this vbd as. */
blkif_vdev_t handle; blkif_vdev_t handle;
@ -177,7 +172,6 @@ struct xen_blkif {
unsigned int irq; unsigned int irq;
/* Comms information. */ /* Comms information. */
enum blkif_protocol blk_protocol; enum blkif_protocol blk_protocol;
enum blkif_backend_type blk_backend_type;
union blkif_back_rings blk_rings; union blkif_back_rings blk_rings;
void *blk_ring; void *blk_ring;
/* The VBD attached to this interface. */ /* The VBD attached to this interface. */

View file

@ -381,72 +381,49 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache", err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
"%d", state); "%d", state);
if (err) if (err)
xenbus_dev_fatal(dev, err, "writing feature-flush-cache"); dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
return err; return err;
} }
int xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be) static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
{ {
struct xenbus_device *dev = be->dev; struct xenbus_device *dev = be->dev;
struct xen_blkif *blkif = be->blkif; struct xen_blkif *blkif = be->blkif;
char *type;
int err; int err;
int state = 0; int state = 0;
struct block_device *bdev = be->blkif->vbd.bdev;
struct request_queue *q = bdev_get_queue(bdev);
type = xenbus_read(XBT_NIL, dev->nodename, "type", NULL); if (blk_queue_discard(q)) {
if (!IS_ERR(type)) { err = xenbus_printf(xbt, dev->nodename,
if (strncmp(type, "file", 4) == 0) { "discard-granularity", "%u",
state = 1; q->limits.discard_granularity);
blkif->blk_backend_type = BLKIF_BACKEND_FILE; if (err) {
dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
return;
} }
if (strncmp(type, "phy", 3) == 0) { err = xenbus_printf(xbt, dev->nodename,
struct block_device *bdev = be->blkif->vbd.bdev; "discard-alignment", "%u",
struct request_queue *q = bdev_get_queue(bdev); q->limits.discard_alignment);
if (blk_queue_discard(q)) { if (err) {
err = xenbus_printf(xbt, dev->nodename, dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
"discard-granularity", "%u", return;
q->limits.discard_granularity); }
if (err) { state = 1;
xenbus_dev_fatal(dev, err, /* Optional. */
"writing discard-granularity"); err = xenbus_printf(xbt, dev->nodename,
goto kfree; "discard-secure", "%d",
} blkif->vbd.discard_secure);
err = xenbus_printf(xbt, dev->nodename, if (err) {
"discard-alignment", "%u", dev_warn(dev-dev, "writing discard-secure (%d)", err);
q->limits.discard_alignment); return;
if (err) {
xenbus_dev_fatal(dev, err,
"writing discard-alignment");
goto kfree;
}
state = 1;
blkif->blk_backend_type = BLKIF_BACKEND_PHY;
}
/* Optional. */
err = xenbus_printf(xbt, dev->nodename,
"discard-secure", "%d",
blkif->vbd.discard_secure);
if (err) {
xenbus_dev_fatal(dev, err,
"writting discard-secure");
goto kfree;
}
} }
} else {
err = PTR_ERR(type);
xenbus_dev_fatal(dev, err, "reading type");
goto out;
} }
err = xenbus_printf(xbt, dev->nodename, "feature-discard", err = xenbus_printf(xbt, dev->nodename, "feature-discard",
"%d", state); "%d", state);
if (err) if (err)
xenbus_dev_fatal(dev, err, "writing feature-discard"); dev_warn(&dev->dev, "writing feature-discard (%d)", err);
kfree:
kfree(type);
out:
return err;
} }
int xen_blkbk_barrier(struct xenbus_transaction xbt, int xen_blkbk_barrier(struct xenbus_transaction xbt,
struct backend_info *be, int state) struct backend_info *be, int state)
@ -457,7 +434,7 @@ int xen_blkbk_barrier(struct xenbus_transaction xbt,
err = xenbus_printf(xbt, dev->nodename, "feature-barrier", err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
"%d", state); "%d", state);
if (err) if (err)
xenbus_dev_fatal(dev, err, "writing feature-barrier"); dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
return err; return err;
} }
@ -689,14 +666,12 @@ static void connect(struct backend_info *be)
return; return;
} }
err = xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
if (err)
goto abort;
err = xen_blkbk_discard(xbt, be);
/* If we can't advertise it is OK. */ /* If we can't advertise it is OK. */
err = xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support); xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
xen_blkbk_discard(xbt, be);
xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(&be->blkif->vbd)); (unsigned long long)vbd_sz(&be->blkif->vbd));

View file

@ -43,6 +43,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/bitmap.h>
#include <xen/xen.h> #include <xen/xen.h>
#include <xen/xenbus.h> #include <xen/xenbus.h>
@ -81,6 +82,7 @@ static const struct block_device_operations xlvbd_block_fops;
*/ */
struct blkfront_info struct blkfront_info
{ {
spinlock_t io_lock;
struct mutex mutex; struct mutex mutex;
struct xenbus_device *xbdev; struct xenbus_device *xbdev;
struct gendisk *gd; struct gendisk *gd;
@ -105,8 +107,6 @@ struct blkfront_info
int is_ready; int is_ready;
}; };
static DEFINE_SPINLOCK(blkif_io_lock);
static unsigned int nr_minors; static unsigned int nr_minors;
static unsigned long *minors; static unsigned long *minors;
static DEFINE_SPINLOCK(minor_lock); static DEFINE_SPINLOCK(minor_lock);
@ -177,8 +177,7 @@ static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
spin_lock(&minor_lock); spin_lock(&minor_lock);
if (find_next_bit(minors, end, minor) >= end) { if (find_next_bit(minors, end, minor) >= end) {
for (; minor < end; ++minor) bitmap_set(minors, minor, nr);
__set_bit(minor, minors);
rc = 0; rc = 0;
} else } else
rc = -EBUSY; rc = -EBUSY;
@ -193,8 +192,7 @@ static void xlbd_release_minors(unsigned int minor, unsigned int nr)
BUG_ON(end > nr_minors); BUG_ON(end > nr_minors);
spin_lock(&minor_lock); spin_lock(&minor_lock);
for (; minor < end; ++minor) bitmap_clear(minors, minor, nr);
__clear_bit(minor, minors);
spin_unlock(&minor_lock); spin_unlock(&minor_lock);
} }
@ -419,7 +417,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
struct request_queue *rq; struct request_queue *rq;
struct blkfront_info *info = gd->private_data; struct blkfront_info *info = gd->private_data;
rq = blk_init_queue(do_blkif_request, &blkif_io_lock); rq = blk_init_queue(do_blkif_request, &info->io_lock);
if (rq == NULL) if (rq == NULL)
return -1; return -1;
@ -636,14 +634,14 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
if (info->rq == NULL) if (info->rq == NULL)
return; return;
spin_lock_irqsave(&blkif_io_lock, flags); spin_lock_irqsave(&info->io_lock, flags);
/* No more blkif_request(). */ /* No more blkif_request(). */
blk_stop_queue(info->rq); blk_stop_queue(info->rq);
/* No more gnttab callback work. */ /* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback); gnttab_cancel_free_callback(&info->callback);
spin_unlock_irqrestore(&blkif_io_lock, flags); spin_unlock_irqrestore(&info->io_lock, flags);
/* Flush gnttab callback work. Must be done with no locks held. */ /* Flush gnttab callback work. Must be done with no locks held. */
flush_work_sync(&info->work); flush_work_sync(&info->work);
@ -675,16 +673,16 @@ static void blkif_restart_queue(struct work_struct *work)
{ {
struct blkfront_info *info = container_of(work, struct blkfront_info, work); struct blkfront_info *info = container_of(work, struct blkfront_info, work);
spin_lock_irq(&blkif_io_lock); spin_lock_irq(&info->io_lock);
if (info->connected == BLKIF_STATE_CONNECTED) if (info->connected == BLKIF_STATE_CONNECTED)
kick_pending_request_queues(info); kick_pending_request_queues(info);
spin_unlock_irq(&blkif_io_lock); spin_unlock_irq(&info->io_lock);
} }
static void blkif_free(struct blkfront_info *info, int suspend) static void blkif_free(struct blkfront_info *info, int suspend)
{ {
/* Prevent new requests being issued until we fix things up. */ /* Prevent new requests being issued until we fix things up. */
spin_lock_irq(&blkif_io_lock); spin_lock_irq(&info->io_lock);
info->connected = suspend ? info->connected = suspend ?
BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
/* No more blkif_request(). */ /* No more blkif_request(). */
@ -692,7 +690,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
blk_stop_queue(info->rq); blk_stop_queue(info->rq);
/* No more gnttab callback work. */ /* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback); gnttab_cancel_free_callback(&info->callback);
spin_unlock_irq(&blkif_io_lock); spin_unlock_irq(&info->io_lock);
/* Flush gnttab callback work. Must be done with no locks held. */ /* Flush gnttab callback work. Must be done with no locks held. */
flush_work_sync(&info->work); flush_work_sync(&info->work);
@ -728,10 +726,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
struct blkfront_info *info = (struct blkfront_info *)dev_id; struct blkfront_info *info = (struct blkfront_info *)dev_id;
int error; int error;
spin_lock_irqsave(&blkif_io_lock, flags); spin_lock_irqsave(&info->io_lock, flags);
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
spin_unlock_irqrestore(&blkif_io_lock, flags); spin_unlock_irqrestore(&info->io_lock, flags);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -816,7 +814,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
kick_pending_request_queues(info); kick_pending_request_queues(info);
spin_unlock_irqrestore(&blkif_io_lock, flags); spin_unlock_irqrestore(&info->io_lock, flags);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -991,6 +989,7 @@ static int blkfront_probe(struct xenbus_device *dev,
} }
mutex_init(&info->mutex); mutex_init(&info->mutex);
spin_lock_init(&info->io_lock);
info->xbdev = dev; info->xbdev = dev;
info->vdevice = vdevice; info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED; info->connected = BLKIF_STATE_DISCONNECTED;
@ -1068,7 +1067,7 @@ static int blkif_recover(struct blkfront_info *info)
xenbus_switch_state(info->xbdev, XenbusStateConnected); xenbus_switch_state(info->xbdev, XenbusStateConnected);
spin_lock_irq(&blkif_io_lock); spin_lock_irq(&info->io_lock);
/* Now safe for us to use the shared ring */ /* Now safe for us to use the shared ring */
info->connected = BLKIF_STATE_CONNECTED; info->connected = BLKIF_STATE_CONNECTED;
@ -1079,7 +1078,7 @@ static int blkif_recover(struct blkfront_info *info)
/* Kick any other new requests queued since we resumed */ /* Kick any other new requests queued since we resumed */
kick_pending_request_queues(info); kick_pending_request_queues(info);
spin_unlock_irq(&blkif_io_lock); spin_unlock_irq(&info->io_lock);
return 0; return 0;
} }
@ -1277,10 +1276,10 @@ static void blkfront_connect(struct blkfront_info *info)
xenbus_switch_state(info->xbdev, XenbusStateConnected); xenbus_switch_state(info->xbdev, XenbusStateConnected);
/* Kick pending requests. */ /* Kick pending requests. */
spin_lock_irq(&blkif_io_lock); spin_lock_irq(&info->io_lock);
info->connected = BLKIF_STATE_CONNECTED; info->connected = BLKIF_STATE_CONNECTED;
kick_pending_request_queues(info); kick_pending_request_queues(info);
spin_unlock_irq(&blkif_io_lock); spin_unlock_irq(&info->io_lock);
add_disk(info->gd); add_disk(info->gd);
@ -1410,7 +1409,6 @@ static int blkif_release(struct gendisk *disk, fmode_t mode)
mutex_lock(&blkfront_mutex); mutex_lock(&blkfront_mutex);
bdev = bdget_disk(disk, 0); bdev = bdget_disk(disk, 0);
bdput(bdev);
if (bdev->bd_openers) if (bdev->bd_openers)
goto out; goto out;
@ -1441,6 +1439,7 @@ static int blkif_release(struct gendisk *disk, fmode_t mode)
} }
out: out:
bdput(bdev);
mutex_unlock(&blkfront_mutex); mutex_unlock(&blkfront_mutex);
return 0; return 0;
} }