Merge branch 'for-2.6.34' of git://git.kernel.dk/linux-2.6-block

* 'for-2.6.34' of git://git.kernel.dk/linux-2.6-block: (38 commits)
  block: don't access jiffies when initialising io_context
  cfq: remove 8 bytes of padding from cfq_rb_root on 64 bit builds
  block: fix for "Consolidate phys_segment and hw_segment limits"
  cfq-iosched: quantum check tweak
  blktrace: perform cleanup after setup error
  blkdev: fix merge_bvec_fn return value checks
  cfq-iosched: requests "in flight" vs "in driver" clarification
  cciss: Fix problem with scatter gather elements in the scsi half of the driver
  cciss: eliminate unnecessary pointer use in cciss scsi code
  cciss: do not use void pointer for scsi hba data
  cciss: factor out scatter gather chain block mapping code
  cciss: fix scatter gather chain block dma direction kludge
  cciss: simplify scatter gather code
  cciss: factor out scatter gather chain block allocation and freeing
  cciss: detect bad alignment of scsi commands at build time
  cciss: clarify command list padding calculation
  cfq-iosched: rethink seeky detection for SSDs
  cfq-iosched: rework seeky detection
  block: remove padding from io_context on 64bit builds
  block: Consolidate phys_segment and hw_segment limits
  ...
This commit is contained in:
Linus Torvalds 2010-03-01 09:00:29 -08:00
commit b1bf936840
77 changed files with 702 additions and 901 deletions

View file

@ -128,3 +128,17 @@ Description:
preferred request size for workloads where sustained
throughput is desired. If no optimal I/O size is
reported this file contains 0.
What: /sys/block/<disk>/queue/nomerges
Date: January 2010
Contact:
Description:
Standard I/O elevator operations include attempts to
merge contiguous I/Os. For known random I/O loads these
attempts will always fail and result in extra cycles
being spent in the kernel. This allows one to turn off
this behavior on one of two ways: When set to 1, complex
merge checks are disabled, but the simple one-shot merges
with the previous I/O request are enabled. When set to 2,
all merge tries are disabled. The default value is 0 -
which enables all types of merge tries.

View file

@ -25,11 +25,11 @@ size allowed by the hardware.
nomerges (RW)
-------------
This enables the user to disable the lookup logic involved with IO merging
requests in the block layer. Merging may still occur through a direct
1-hit cache, since that comes for (almost) free. The IO scheduler will not
waste cycles doing tree/hash lookups for merges if nomerges is 1. Defaults
to 0, enabling all merges.
This enables the user to disable the lookup logic involved with IO
merging requests in the block layer. By default (0) all merges are
enabled. When set to 1 only simple one-hit merges will be tried. When
set to 2 no merge algorithms will be tried (including one-hit or more
complex tree/hash lookups).
nr_requests (RW)
----------------

View file

@ -747,7 +747,7 @@ static int ubd_open_dev(struct ubd *ubd_dev)
ubd_dev->fd = fd;
if(ubd_dev->cow.file != NULL){
blk_queue_max_sectors(ubd_dev->queue, 8 * sizeof(long));
blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long));
err = -ENOMEM;
ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len);
@ -849,7 +849,7 @@ static int ubd_add(int n, char **error_out)
}
ubd_dev->queue->queuedata = ubd_dev;
blk_queue_max_hw_segments(ubd_dev->queue, MAX_SG);
blk_queue_max_segments(ubd_dev->queue, MAX_SG);
err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
if(err){
*error_out = "Failed to register device";

View file

@ -23,20 +23,6 @@ static LIST_HEAD(blkio_list);
struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
EXPORT_SYMBOL_GPL(blkio_root_cgroup);
bool blkiocg_css_tryget(struct blkio_cgroup *blkcg)
{
if (!css_tryget(&blkcg->css))
return false;
return true;
}
EXPORT_SYMBOL_GPL(blkiocg_css_tryget);
void blkiocg_css_put(struct blkio_cgroup *blkcg)
{
css_put(&blkcg->css);
}
EXPORT_SYMBOL_GPL(blkiocg_css_put);
struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
{
return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),

View file

@ -43,9 +43,6 @@ struct blkio_group {
unsigned long sectors;
};
extern bool blkiocg_css_tryget(struct blkio_cgroup *blkcg);
extern void blkiocg_css_put(struct blkio_cgroup *blkcg);
typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
typedef void (blkio_update_group_weight_fn) (struct blkio_group *blkg,
unsigned int weight);

View file

@ -1490,9 +1490,9 @@ static inline void __generic_make_request(struct bio *bio)
/*
* We only want one ->make_request_fn to be active at a time,
* else stack usage with stacked devices could be a problem.
* So use current->bio_{list,tail} to keep a list of requests
* So use current->bio_list to keep a list of requests
* submited by a make_request_fn function.
* current->bio_tail is also used as a flag to say if
* current->bio_list is also used as a flag to say if
* generic_make_request is currently active in this task or not.
* If it is NULL, then no make_request is active. If it is non-NULL,
* then a make_request is active, and new requests should be added
@ -1500,11 +1500,11 @@ static inline void __generic_make_request(struct bio *bio)
*/
void generic_make_request(struct bio *bio)
{
if (current->bio_tail) {
struct bio_list bio_list_on_stack;
if (current->bio_list) {
/* make_request is active */
*(current->bio_tail) = bio;
bio->bi_next = NULL;
current->bio_tail = &bio->bi_next;
bio_list_add(current->bio_list, bio);
return;
}
/* following loop may be a bit non-obvious, and so deserves some
@ -1512,30 +1512,27 @@ void generic_make_request(struct bio *bio)
* Before entering the loop, bio->bi_next is NULL (as all callers
* ensure that) so we have a list with a single bio.
* We pretend that we have just taken it off a longer list, so
* we assign bio_list to the next (which is NULL) and bio_tail
* to &bio_list, thus initialising the bio_list of new bios to be
* we assign bio_list to a pointer to the bio_list_on_stack,
* thus initialising the bio_list of new bios to be
* added. __generic_make_request may indeed add some more bios
* through a recursive call to generic_make_request. If it
* did, we find a non-NULL value in bio_list and re-enter the loop
* from the top. In this case we really did just take the bio
* of the top of the list (no pretending) and so fixup bio_list and
* bio_tail or bi_next, and call into __generic_make_request again.
* of the top of the list (no pretending) and so remove it from
* bio_list, and call into __generic_make_request again.
*
* The loop was structured like this to make only one call to
* __generic_make_request (which is important as it is large and
* inlined) and to keep the structure simple.
*/
BUG_ON(bio->bi_next);
bio_list_init(&bio_list_on_stack);
current->bio_list = &bio_list_on_stack;
do {
current->bio_list = bio->bi_next;
if (bio->bi_next == NULL)
current->bio_tail = &current->bio_list;
else
bio->bi_next = NULL;
__generic_make_request(bio);
bio = current->bio_list;
bio = bio_list_pop(current->bio_list);
} while (bio);
current->bio_tail = NULL; /* deactivate */
current->bio_list = NULL; /* deactivate */
}
EXPORT_SYMBOL(generic_make_request);
@ -1617,8 +1614,7 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
* limitation.
*/
blk_recalc_rq_segments(rq);
if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
rq->nr_phys_segments > queue_max_hw_segments(q)) {
if (rq->nr_phys_segments > queue_max_segments(q)) {
printk(KERN_ERR "%s: over max segments limit.\n", __func__);
return -EIO;
}

View file

@ -91,7 +91,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
spin_lock_init(&ret->lock);
ret->ioprio_changed = 0;
ret->ioprio = 0;
ret->last_waited = jiffies; /* doesn't matter... */
ret->last_waited = 0; /* doesn't matter... */
ret->nr_batch_requests = 0; /* because this is 0 */
INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ret->cic_list);

View file

@ -206,8 +206,7 @@ static inline int ll_new_hw_segment(struct request_queue *q,
{
int nr_phys_segs = bio_phys_segments(q, bio);
if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@ -300,10 +299,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
total_phys_segments--;
}
if (total_phys_segments > queue_max_phys_segments(q))
return 0;
if (total_phys_segments > queue_max_hw_segments(q))
if (total_phys_segments > queue_max_segments(q))
return 0;
/* Merge is OK... */

View file

@ -91,10 +91,9 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
*/
void blk_set_default_limits(struct queue_limits *lim)
{
lim->max_phys_segments = MAX_PHYS_SEGMENTS;
lim->max_hw_segments = MAX_HW_SEGMENTS;
lim->max_segments = BLK_MAX_SEGMENTS;
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->max_segment_size = MAX_SEGMENT_SIZE;
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
lim->max_sectors = BLK_DEF_MAX_SECTORS;
lim->max_hw_sectors = INT_MAX;
lim->max_discard_sectors = 0;
@ -154,7 +153,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
q->unplug_timer.data = (unsigned long)q;
blk_set_default_limits(&q->limits);
blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
/*
* If the caller didn't supply a lock, fall back to our embedded
@ -210,37 +209,32 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
EXPORT_SYMBOL(blk_queue_bounce_limit);
/**
* blk_queue_max_sectors - set max sectors for a request for this queue
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
* @q: the request queue for the device
* @max_sectors: max sectors in the usual 512b unit
* @max_hw_sectors: max hardware sectors in the usual 512b unit
*
* Description:
* Enables a low level driver to set an upper limit on the size of
* received requests.
* Enables a low level driver to set a hard upper limit,
* max_hw_sectors, on the size of requests. max_hw_sectors is set by
* the device driver based upon the combined capabilities of I/O
* controller and storage device.
*
* max_sectors is a soft limit imposed by the block layer for
* filesystem type requests. This value can be overridden on a
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
* The soft limit can not exceed max_hw_sectors.
**/
void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
{
if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_sectors);
__func__, max_hw_sectors);
}
if (BLK_DEF_MAX_SECTORS > max_sectors)
q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors;
else {
q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
q->limits.max_hw_sectors = max_sectors;
}
}
EXPORT_SYMBOL(blk_queue_max_sectors);
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
{
if (BLK_DEF_MAX_SECTORS > max_sectors)
q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
else
q->limits.max_hw_sectors = max_sectors;
q->limits.max_hw_sectors = max_hw_sectors;
q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
BLK_DEF_MAX_SECTORS);
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
@ -257,17 +251,15 @@ void blk_queue_max_discard_sectors(struct request_queue *q,
EXPORT_SYMBOL(blk_queue_max_discard_sectors);
/**
* blk_queue_max_phys_segments - set max phys segments for a request for this queue
* blk_queue_max_segments - set max hw segments for a request for this queue
* @q: the request queue for the device
* @max_segments: max number of segments
*
* Description:
* Enables a low level driver to set an upper limit on the number of
* physical data segments in a request. This would be the largest sized
* scatter list the driver could handle.
* hw data segments in a request.
**/
void blk_queue_max_phys_segments(struct request_queue *q,
unsigned short max_segments)
void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
{
if (!max_segments) {
max_segments = 1;
@ -275,33 +267,9 @@ void blk_queue_max_phys_segments(struct request_queue *q,
__func__, max_segments);
}
q->limits.max_phys_segments = max_segments;
q->limits.max_segments = max_segments;
}
EXPORT_SYMBOL(blk_queue_max_phys_segments);
/**
* blk_queue_max_hw_segments - set max hw segments for a request for this queue
* @q: the request queue for the device
* @max_segments: max number of segments
*
* Description:
* Enables a low level driver to set an upper limit on the number of
* hw data segments in a request. This would be the largest number of
* address/length pairs the host adapter can actually give at once
* to the device.
**/
void blk_queue_max_hw_segments(struct request_queue *q,
unsigned short max_segments)
{
if (!max_segments) {
max_segments = 1;
printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_segments);
}
q->limits.max_hw_segments = max_segments;
}
EXPORT_SYMBOL(blk_queue_max_hw_segments);
EXPORT_SYMBOL(blk_queue_max_segments);
/**
* blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
@ -507,7 +475,7 @@ static unsigned int lcm(unsigned int a, unsigned int b)
* blk_stack_limits - adjust queue_limits for stacked devices
* @t: the stacking driver limits (top device)
* @b: the underlying queue limits (bottom, component device)
* @offset: offset to beginning of data within component device
* @start: first data sector within component device
*
* Description:
* This function is used by stacking drivers like MD and DM to ensure
@ -525,10 +493,9 @@ static unsigned int lcm(unsigned int a, unsigned int b)
* the alignment_offset is undefined.
*/
int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset)
sector_t start)
{
sector_t alignment;
unsigned int top, bottom, ret = 0;
unsigned int top, bottom, alignment, ret = 0;
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
@ -537,18 +504,14 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
b->seg_boundary_mask);
t->max_phys_segments = min_not_zero(t->max_phys_segments,
b->max_phys_segments);
t->max_hw_segments = min_not_zero(t->max_hw_segments,
b->max_hw_segments);
t->max_segments = min_not_zero(t->max_segments, b->max_segments);
t->max_segment_size = min_not_zero(t->max_segment_size,
b->max_segment_size);
t->misaligned |= b->misaligned;
alignment = queue_limit_alignment_offset(b, offset);
alignment = queue_limit_alignment_offset(b, start);
/* Bottom device has different alignment. Check that it is
* compatible with the current top alignment.
@ -611,11 +574,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
/* Discard alignment and granularity */
if (b->discard_granularity) {
unsigned int granularity = b->discard_granularity;
offset &= granularity - 1;
alignment = (granularity + b->discard_alignment - offset)
& (granularity - 1);
alignment = queue_limit_discard_alignment(b, start);
if (t->discard_granularity != 0 &&
t->discard_alignment != alignment) {
@ -657,7 +616,7 @@ int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
start += get_start_sect(bdev);
return blk_stack_limits(t, &bq->limits, start << 9);
return blk_stack_limits(t, &bq->limits, start);
}
EXPORT_SYMBOL(bdev_stack_limits);
@ -668,9 +627,8 @@ EXPORT_SYMBOL(bdev_stack_limits);
* @offset: offset to beginning of data within component device
*
* Description:
* Merges the limits for two queues. Returns 0 if alignment
* didn't change. Returns -1 if adding the bottom device caused
* misalignment.
* Merges the limits for a top level gendisk and a bottom level
* block_device.
*/
void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset)
@ -678,9 +636,7 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
struct request_queue *t = disk->queue;
struct request_queue *b = bdev_get_queue(bdev);
offset += get_start_sect(bdev) << 9;
if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) {
if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
disk_name(disk, 0, top);
@ -752,22 +708,19 @@ EXPORT_SYMBOL(blk_queue_update_dma_pad);
* does is adjust the queue so that the buf is always appended
* silently to the scatterlist.
*
* Note: This routine adjusts max_hw_segments to make room for
* appending the drain buffer. If you call
* blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
* calling this routine, you must set the limit to one fewer than your
* device can support otherwise there won't be room for the drain
* buffer.
* Note: This routine adjusts max_hw_segments to make room for appending
* the drain buffer. If you call blk_queue_max_segments() after calling
* this routine, you must set the limit to one fewer than your device
* can support otherwise there won't be room for the drain buffer.
*/
int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size)
{
if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
if (queue_max_segments(q) < 2)
return -EINVAL;
/* make room for appending the drain */
blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
blk_queue_max_segments(q, queue_max_segments(q) - 1);
q->dma_drain_needed = dma_drain_needed;
q->dma_drain_buffer = buf;
q->dma_drain_size = size;

View file

@ -189,7 +189,8 @@ static ssize_t queue_nonrot_store(struct request_queue *q, const char *page,
static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
{
return queue_var_show(blk_queue_nomerges(q), page);
return queue_var_show((blk_queue_nomerges(q) << 1) |
blk_queue_noxmerges(q), page);
}
static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
@ -199,10 +200,12 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
ssize_t ret = queue_var_store(&nm, page, count);
spin_lock_irq(q->queue_lock);
if (nm)
queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
if (nm == 2)
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
else
queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
else if (nm)
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
spin_unlock_irq(q->queue_lock);
return ret;

View file

@ -19,7 +19,7 @@
* tunables
*/
/* max queue in one round of service */
static const int cfq_quantum = 4;
static const int cfq_quantum = 8;
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
/* maximum backwards seek, in KiB */
static const int cfq_back_max = 16 * 1024;
@ -46,8 +46,9 @@ static const int cfq_hist_divisor = 4;
#define CFQ_HW_QUEUE_MIN (5)
#define CFQ_SERVICE_SHIFT 12
#define CFQQ_SEEK_THR 8 * 1024
#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
#define CFQQ_SEEK_THR (sector_t)(8 * 100)
#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
#define RQ_CIC(rq) \
((struct cfq_io_context *) (rq)->elevator_private)
@ -77,11 +78,12 @@ struct cfq_rb_root {
struct rb_root rb;
struct rb_node *left;
unsigned count;
unsigned total_weight;
u64 min_vdisktime;
struct rb_node *active;
unsigned total_weight;
};
#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, 0, 0, }
#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
.count = 0, .min_vdisktime = 0, }
/*
* Per process-grouping structure
@ -115,11 +117,11 @@ struct cfq_queue {
/* time when queue got scheduled in to dispatch first request. */
unsigned long dispatch_start;
unsigned int allocated_slice;
unsigned int slice_dispatch;
/* time when first request from queue completed and slice started. */
unsigned long slice_start;
unsigned long slice_end;
long slice_resid;
unsigned int slice_dispatch;
/* pending metadata requests */
int meta_pending;
@ -130,13 +132,11 @@ struct cfq_queue {
unsigned short ioprio, org_ioprio;
unsigned short ioprio_class, org_ioprio_class;
unsigned int seek_samples;
u64 seek_total;
sector_t seek_mean;
sector_t last_request_pos;
pid_t pid;
u32 seek_history;
sector_t last_request_pos;
struct cfq_rb_root *service_tree;
struct cfq_queue *new_cfqq;
struct cfq_group *cfqg;
@ -223,8 +223,8 @@ struct cfq_data {
unsigned int busy_queues;
int rq_in_driver[2];
int sync_flight;
int rq_in_driver;
int rq_in_flight[2];
/*
* queue-depth detection
@ -417,11 +417,6 @@ static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
struct io_context *);
static inline int rq_in_driver(struct cfq_data *cfqd)
{
return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1];
}
static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
bool is_sync)
{
@ -951,10 +946,6 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
unsigned int major, minor;
/* Do we need to take this reference */
if (!blkiocg_css_tryget(blkcg))
return NULL;;
cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
if (cfqg || !create)
goto done;
@ -985,7 +976,6 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
done:
blkiocg_css_put(blkcg);
return cfqg;
}
@ -1420,9 +1410,9 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
cfqd->rq_in_driver[rq_is_sync(rq)]++;
cfqd->rq_in_driver++;
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
rq_in_driver(cfqd));
cfqd->rq_in_driver);
cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
}
@ -1430,12 +1420,11 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
const int sync = rq_is_sync(rq);
WARN_ON(!cfqd->rq_in_driver[sync]);
cfqd->rq_in_driver[sync]--;
WARN_ON(!cfqd->rq_in_driver);
cfqd->rq_in_driver--;
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
rq_in_driver(cfqd));
cfqd->rq_in_driver);
}
static void cfq_remove_request(struct request *rq)
@ -1673,16 +1662,7 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct request *rq, bool for_preempt)
{
sector_t sdist = cfqq->seek_mean;
if (!sample_valid(cfqq->seek_samples))
sdist = CFQQ_SEEK_THR;
/* if seek_mean is big, using it as close criteria is meaningless */
if (sdist > CFQQ_SEEK_THR && !for_preempt)
sdist = CFQQ_SEEK_THR;
return cfq_dist_from_last(cfqd, rq) <= sdist;
return cfq_dist_from_last(cfqd, rq) <= CFQQ_SEEK_THR;
}
static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
@ -1878,8 +1858,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
cfqq->dispatched++;
elv_dispatch_sort(q, rq);
if (cfq_cfqq_sync(cfqq))
cfqd->sync_flight++;
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
cfqq->nr_sectors += blk_rq_sectors(rq);
}
@ -2219,6 +2198,19 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
return dispatched;
}
static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
/* the queue hasn't finished any request, can't estimate */
if (cfq_cfqq_slice_new(cfqq))
return 1;
if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
cfqq->slice_end))
return 1;
return 0;
}
static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
unsigned int max_dispatch;
@ -2226,16 +2218,16 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
/*
* Drain async requests before we start sync IO
*/
if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
return false;
/*
* If this is an async queue and we have sync IO in flight, let it wait
*/
if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
return false;
max_dispatch = cfqd->cfq_quantum;
max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
if (cfq_class_idle(cfqq))
max_dispatch = 1;
@ -2252,13 +2244,22 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
/*
* We have other queues, don't allow more IO from this one
*/
if (cfqd->busy_queues > 1)
if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq))
return false;
/*
* Sole queue user, no limit
*/
max_dispatch = -1;
if (cfqd->busy_queues == 1)
max_dispatch = -1;
else
/*
* Normally we start throttling cfqq when cfq_quantum/2
* requests have been dispatched. But we can drive
* deeper queue depths at the beginning of slice
* subjected to upper limit of cfq_quantum.
* */
max_dispatch = cfqd->cfq_quantum;
}
/*
@ -2980,30 +2981,20 @@ static void
cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct request *rq)
{
sector_t sdist;
u64 total;
sector_t sdist = 0;
sector_t n_sec = blk_rq_sectors(rq);
if (cfqq->last_request_pos) {
if (cfqq->last_request_pos < blk_rq_pos(rq))
sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
else
sdist = cfqq->last_request_pos - blk_rq_pos(rq);
}
if (!cfqq->last_request_pos)
sdist = 0;
else if (cfqq->last_request_pos < blk_rq_pos(rq))
sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
cfqq->seek_history <<= 1;
if (blk_queue_nonrot(cfqd->queue))
cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
else
sdist = cfqq->last_request_pos - blk_rq_pos(rq);
/*
* Don't allow the seek distance to get too large from the
* odd fragment, pagein, etc
*/
if (cfqq->seek_samples <= 60) /* second&third seek */
sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024);
else
sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64);
cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8;
cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8;
total = cfqq->seek_total + (cfqq->seek_samples/2);
do_div(total, cfqq->seek_samples);
cfqq->seek_mean = (sector_t)total;
cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
}
/*
@ -3028,8 +3019,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_mark_cfqq_deep(cfqq);
if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
(!cfq_cfqq_deep(cfqq) && sample_valid(cfqq->seek_samples)
&& CFQQ_SEEKY(cfqq)))
(!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
enable_idle = 0;
else if (sample_valid(cic->ttime_samples)) {
if (cic->ttime_mean > cfqd->cfq_slice_idle)
@ -3215,14 +3205,14 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
{
struct cfq_queue *cfqq = cfqd->active_queue;
if (rq_in_driver(cfqd) > cfqd->hw_tag_est_depth)
cfqd->hw_tag_est_depth = rq_in_driver(cfqd);
if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
if (cfqd->hw_tag == 1)
return;
if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
return;
/*
@ -3232,7 +3222,7 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
*/
if (cfqq && cfq_cfqq_idle_window(cfqq) &&
cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) < CFQ_HW_QUEUE_MIN)
CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
return;
if (cfqd->hw_tag_samples++ < 50)
@ -3285,13 +3275,12 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfq_update_hw_tag(cfqd);
WARN_ON(!cfqd->rq_in_driver[sync]);
WARN_ON(!cfqd->rq_in_driver);
WARN_ON(!cfqq->dispatched);
cfqd->rq_in_driver[sync]--;
cfqd->rq_in_driver--;
cfqq->dispatched--;
if (cfq_cfqq_sync(cfqq))
cfqd->sync_flight--;
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
if (sync) {
RQ_CIC(rq)->last_end_request = now;
@ -3345,7 +3334,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
}
}
if (!rq_in_driver(cfqd))
if (!cfqd->rq_in_driver)
cfq_schedule_dispatch(cfqd);
}

View file

@ -473,6 +473,15 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
struct request *__rq;
int ret;
/*
* Levels of merges:
* nomerges: No merges at all attempted
* noxmerges: Only simple one-hit cache try
* merges: All merge tries attempted
*/
if (blk_queue_nomerges(q))
return ELEVATOR_NO_MERGE;
/*
* First try one-hit cache.
*/
@ -484,7 +493,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
}
}
if (blk_queue_nomerges(q))
if (blk_queue_noxmerges(q))
return ELEVATOR_NO_MERGE;
/*

View file

@ -1097,7 +1097,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
dev->flags |= ATA_DFLAG_NO_UNLOAD;
/* configure max sectors */
blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
blk_queue_max_hw_sectors(sdev->request_queue, dev->max_sectors);
if (dev->class == ATA_DEV_ATAPI) {
struct request_queue *q = sdev->request_queue;

View file

@ -772,7 +772,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
}
blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
blk_queue_max_segments(sdev->request_queue, sg_tablesize);
ata_port_printk(ap, KERN_INFO,
"DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
(unsigned long long)*ap->host->dev->dma_mask,

View file

@ -2534,8 +2534,8 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit);
RequestQueue->queuedata = Controller;
blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit);
blk_queue_max_phys_segments(RequestQueue, Controller->DriverScatterGatherLimit);
blk_queue_max_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit);
blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
disk->queue = RequestQueue;
sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n);
disk->major = MajorNumber;
@ -7134,7 +7134,7 @@ static struct DAC960_privdata DAC960_P_privdata = {
.MemoryWindowSize = DAC960_PD_RegisterWindowSize,
};
static struct pci_device_id DAC960_id_table[] = {
static const struct pci_device_id DAC960_id_table[] = {
{
.vendor = PCI_VENDOR_ID_MYLEX,
.device = PCI_DEVICE_ID_MYLEX_DAC960_GEM,

View file

@ -434,7 +434,7 @@ static struct brd_device *brd_alloc(int i)
goto out_free_dev;
blk_queue_make_request(brd->brd_queue, brd_make_request);
blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL);
blk_queue_max_sectors(brd->brd_queue, 1024);
blk_queue_max_hw_sectors(brd->brd_queue, 1024);
blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
disk = brd->brd_disk = alloc_disk(1 << part_shift);

View file

@ -257,6 +257,79 @@ static inline void removeQ(CommandList_struct *c)
hlist_del_init(&c->list);
}
static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list,
int nr_cmds)
{
int i;
if (!cmd_sg_list)
return;
for (i = 0; i < nr_cmds; i++) {
kfree(cmd_sg_list[i]);
cmd_sg_list[i] = NULL;
}
kfree(cmd_sg_list);
}
static SGDescriptor_struct **cciss_allocate_sg_chain_blocks(
ctlr_info_t *h, int chainsize, int nr_cmds)
{
int j;
SGDescriptor_struct **cmd_sg_list;
if (chainsize <= 0)
return NULL;
cmd_sg_list = kmalloc(sizeof(*cmd_sg_list) * nr_cmds, GFP_KERNEL);
if (!cmd_sg_list)
return NULL;
/* Build up chain blocks for each command */
for (j = 0; j < nr_cmds; j++) {
/* Need a block of chainsized s/g elements. */
cmd_sg_list[j] = kmalloc((chainsize *
sizeof(*cmd_sg_list[j])), GFP_KERNEL);
if (!cmd_sg_list[j]) {
dev_err(&h->pdev->dev, "Cannot get memory "
"for s/g chains.\n");
goto clean;
}
}
return cmd_sg_list;
clean:
cciss_free_sg_chain_blocks(cmd_sg_list, nr_cmds);
return NULL;
}
static void cciss_unmap_sg_chain_block(ctlr_info_t *h, CommandList_struct *c)
{
SGDescriptor_struct *chain_sg;
u64bit temp64;
if (c->Header.SGTotal <= h->max_cmd_sgentries)
return;
chain_sg = &c->SG[h->max_cmd_sgentries - 1];
temp64.val32.lower = chain_sg->Addr.lower;
temp64.val32.upper = chain_sg->Addr.upper;
pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
}
static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c,
SGDescriptor_struct *chain_block, int len)
{
SGDescriptor_struct *chain_sg;
u64bit temp64;
chain_sg = &c->SG[h->max_cmd_sgentries - 1];
chain_sg->Ext = CCISS_SG_CHAIN;
chain_sg->Len = len;
temp64.val = pci_map_single(h->pdev, chain_block, len,
PCI_DMA_TODEVICE);
chain_sg->Addr.lower = temp64.val32.lower;
chain_sg->Addr.upper = temp64.val32.upper;
}
#include "cciss_scsi.c" /* For SCSI tape support */
static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
@ -1344,26 +1417,27 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
kfree(buff);
return -ENOMEM;
}
// Fill in the command type
/* Fill in the command type */
c->cmd_type = CMD_IOCTL_PEND;
// Fill in Command Header
c->Header.ReplyQueue = 0; // unused in simple mode
if (iocommand.buf_size > 0) // buffer to fill
/* Fill in Command Header */
c->Header.ReplyQueue = 0; /* unused in simple mode */
if (iocommand.buf_size > 0) /* buffer to fill */
{
c->Header.SGList = 1;
c->Header.SGTotal = 1;
} else // no buffers to fill
} else /* no buffers to fill */
{
c->Header.SGList = 0;
c->Header.SGTotal = 0;
}
c->Header.LUN = iocommand.LUN_info;
c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
/* use the kernel address the cmd block for tag */
c->Header.Tag.lower = c->busaddr;
// Fill in Request block
/* Fill in Request block */
c->Request = iocommand.Request;
// Fill in the scatter gather information
/* Fill in the scatter gather information */
if (iocommand.buf_size > 0) {
temp64.val = pci_map_single(host->pdev, buff,
iocommand.buf_size,
@ -1371,7 +1445,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
c->SG[0].Addr.lower = temp64.val32.lower;
c->SG[0].Addr.upper = temp64.val32.upper;
c->SG[0].Len = iocommand.buf_size;
c->SG[0].Ext = 0; // we are not chaining
c->SG[0].Ext = 0; /* we are not chaining */
}
c->waiting = &wait;
@ -1670,14 +1744,9 @@ static void cciss_softirq_done(struct request *rq)
/* unmap the DMA mapping for all the scatter gather elements */
for (i = 0; i < cmd->Header.SGList; i++) {
if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) {
temp64.val32.lower = cmd->SG[i].Addr.lower;
temp64.val32.upper = cmd->SG[i].Addr.upper;
pci_dma_sync_single_for_cpu(h->pdev, temp64.val,
cmd->SG[i].Len, ddir);
pci_unmap_single(h->pdev, temp64.val,
cmd->SG[i].Len, ddir);
cciss_unmap_sg_chain_block(h, cmd);
/* Point to the next block */
curr_sg = h->cmd_sg_list[cmd->cmdindex]->sgchain;
curr_sg = h->cmd_sg_list[cmd->cmdindex];
sg_index = 0;
}
temp64.val32.lower = curr_sg[sg_index].Addr.lower;
@ -1796,12 +1865,9 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
/* This is a hardware imposed limit. */
blk_queue_max_hw_segments(disk->queue, h->maxsgentries);
blk_queue_max_segments(disk->queue, h->maxsgentries);
/* This is a limit in the driver and could be eliminated. */
blk_queue_max_phys_segments(disk->queue, h->maxsgentries);
blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors);
blk_queue_softirq_done(disk->queue, cciss_softirq_done);
@ -2425,7 +2491,7 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
c->Request.Type.Direction = XFER_READ;
c->Request.Timeout = 0;
c->Request.CDB[0] = cmd;
c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
c->Request.CDB[7] = (size >> 16) & 0xFF;
c->Request.CDB[8] = (size >> 8) & 0xFF;
c->Request.CDB[9] = size & 0xFF;
@ -2694,7 +2760,7 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
"cciss: reading geometry failed, volume "
"does not support reading geometry\n");
drv->heads = 255;
drv->sectors = 32; // Sectors per track
drv->sectors = 32; /* Sectors per track */
drv->cylinders = total_size + 1;
drv->raid_level = RAID_UNKNOWN;
} else {
@ -3082,7 +3148,6 @@ static void do_cciss_request(struct request_queue *q)
SGDescriptor_struct *curr_sg;
drive_info_struct *drv;
int i, dir;
int nseg = 0;
int sg_index = 0;
int chained = 0;
@ -3112,19 +3177,19 @@ static void do_cciss_request(struct request_queue *q)
/* fill in the request */
drv = creq->rq_disk->private_data;
c->Header.ReplyQueue = 0; // unused in simple mode
c->Header.ReplyQueue = 0; /* unused in simple mode */
/* got command from pool, so use the command block index instead */
/* for direct lookups. */
/* The first 2 bits are reserved for controller error reporting. */
c->Header.Tag.lower = (c->cmdindex << 3);
c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID));
c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
c->Request.Type.Type = TYPE_CMD; // It is a command.
c->Request.CDBLen = 10; /* 12 byte commands not in FW yet; */
c->Request.Type.Type = TYPE_CMD; /* It is a command. */
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction =
(rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
c->Request.Timeout = 0; // Don't time out
c->Request.Timeout = 0; /* Don't time out */
c->Request.CDB[0] =
(rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
start_blk = blk_rq_pos(creq);
@ -3149,13 +3214,8 @@ static void do_cciss_request(struct request_queue *q)
for (i = 0; i < seg; i++) {
if (((sg_index+1) == (h->max_cmd_sgentries)) &&
!chained && ((seg - i) > 1)) {
nseg = seg - i;
curr_sg[sg_index].Len = (nseg) *
sizeof(SGDescriptor_struct);
curr_sg[sg_index].Ext = CCISS_SG_CHAIN;
/* Point to next chain block. */
curr_sg = h->cmd_sg_list[c->cmdindex]->sgchain;
curr_sg = h->cmd_sg_list[c->cmdindex];
sg_index = 0;
chained = 1;
}
@ -3166,31 +3226,12 @@ static void do_cciss_request(struct request_queue *q)
curr_sg[sg_index].Addr.lower = temp64.val32.lower;
curr_sg[sg_index].Addr.upper = temp64.val32.upper;
curr_sg[sg_index].Ext = 0; /* we are not chaining */
++sg_index;
}
if (chained) {
int len;
curr_sg = c->SG;
sg_index = h->max_cmd_sgentries - 1;
len = curr_sg[sg_index].Len;
/* Setup pointer to next chain block.
* Fill out last element in current chain
* block with address of next chain block.
*/
temp64.val = pci_map_single(h->pdev,
h->cmd_sg_list[c->cmdindex]->sgchain,
len, dir);
h->cmd_sg_list[c->cmdindex]->sg_chain_dma = temp64.val;
curr_sg[sg_index].Addr.lower = temp64.val32.lower;
curr_sg[sg_index].Addr.upper = temp64.val32.upper;
pci_dma_sync_single_for_device(h->pdev,
h->cmd_sg_list[c->cmdindex]->sg_chain_dma,
len, dir);
}
if (chained)
cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex],
(seg - (h->max_cmd_sgentries - 1)) *
sizeof(SGDescriptor_struct));
/* track how many SG entries we are using */
if (seg > h->maxSG)
@ -3209,11 +3250,11 @@ static void do_cciss_request(struct request_queue *q)
if (likely(blk_fs_request(creq))) {
if(h->cciss_read == CCISS_READ_10) {
c->Request.CDB[1] = 0;
c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
c->Request.CDB[3] = (start_blk >> 16) & 0xff;
c->Request.CDB[4] = (start_blk >> 8) & 0xff;
c->Request.CDB[5] = start_blk & 0xff;
c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
c->Request.CDB[6] = 0; /* (sect >> 24) & 0xff; MSB */
c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
@ -3222,7 +3263,7 @@ static void do_cciss_request(struct request_queue *q)
c->Request.CDBLen = 16;
c->Request.CDB[1]= 0;
c->Request.CDB[2]= (upper32 >> 24) & 0xff; //MSB
c->Request.CDB[2]= (upper32 >> 24) & 0xff; /* MSB */
c->Request.CDB[3]= (upper32 >> 16) & 0xff;
c->Request.CDB[4]= (upper32 >> 8) & 0xff;
c->Request.CDB[5]= upper32 & 0xff;
@ -4240,37 +4281,10 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
goto clean4;
}
}
hba[i]->cmd_sg_list = kmalloc(sizeof(struct Cmd_sg_list *) *
hba[i]->nr_cmds,
GFP_KERNEL);
if (!hba[i]->cmd_sg_list) {
printk(KERN_ERR "cciss%d: Cannot get memory for "
"s/g chaining.\n", i);
hba[i]->cmd_sg_list = cciss_allocate_sg_chain_blocks(hba[i],
hba[i]->chainsize, hba[i]->nr_cmds);
if (!hba[i]->cmd_sg_list && hba[i]->chainsize > 0)
goto clean4;
}
/* Build up chain blocks for each command */
if (hba[i]->chainsize > 0) {
for (j = 0; j < hba[i]->nr_cmds; j++) {
hba[i]->cmd_sg_list[j] =
kmalloc(sizeof(struct Cmd_sg_list),
GFP_KERNEL);
if (!hba[i]->cmd_sg_list[j]) {
printk(KERN_ERR "cciss%d: Cannot get memory "
"for chain block.\n", i);
goto clean4;
}
/* Need a block of chainsized s/g elements. */
hba[i]->cmd_sg_list[j]->sgchain =
kmalloc((hba[i]->chainsize *
sizeof(SGDescriptor_struct)),
GFP_KERNEL);
if (!hba[i]->cmd_sg_list[j]->sgchain) {
printk(KERN_ERR "cciss%d: Cannot get memory "
"for s/g chains\n", i);
goto clean4;
}
}
}
spin_lock_init(&hba[i]->lock);
@ -4329,16 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
for (k = 0; k < hba[i]->nr_cmds; k++)
kfree(hba[i]->scatter_list[k]);
kfree(hba[i]->scatter_list);
/* Only free up extra s/g lists if controller supports them */
if (hba[i]->chainsize > 0) {
for (j = 0; j < hba[i]->nr_cmds; j++) {
if (hba[i]->cmd_sg_list[j]) {
kfree(hba[i]->cmd_sg_list[j]->sgchain);
kfree(hba[i]->cmd_sg_list[j]);
}
}
kfree(hba[i]->cmd_sg_list);
}
cciss_free_sg_chain_blocks(hba[i]->cmd_sg_list, hba[i]->nr_cmds);
if (hba[i]->cmd_pool)
pci_free_consistent(hba[i]->pdev,
hba[i]->nr_cmds * sizeof(CommandList_struct),
@ -4456,16 +4461,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
for (j = 0; j < hba[i]->nr_cmds; j++)
kfree(hba[i]->scatter_list[j]);
kfree(hba[i]->scatter_list);
/* Only free up extra s/g lists if controller supports them */
if (hba[i]->chainsize > 0) {
for (j = 0; j < hba[i]->nr_cmds; j++) {
if (hba[i]->cmd_sg_list[j]) {
kfree(hba[i]->cmd_sg_list[j]->sgchain);
kfree(hba[i]->cmd_sg_list[j]);
}
}
kfree(hba[i]->cmd_sg_list);
}
cciss_free_sg_chain_blocks(hba[i]->cmd_sg_list, hba[i]->nr_cmds);
/*
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
@ -4498,7 +4494,7 @@ static int __init cciss_init(void)
* boundary. Given that we use pci_alloc_consistent() to allocate an
* array of them, the size must be a multiple of 8 bytes.
*/
BUILD_BUG_ON(sizeof(CommandList_struct) % 8);
BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT);
printk(KERN_INFO DRIVER_NAME "\n");

View file

@ -55,18 +55,12 @@ typedef struct _drive_info_struct
char device_initialized; /* indicates whether dev is initialized */
} drive_info_struct;
struct Cmd_sg_list {
SGDescriptor_struct *sgchain;
dma_addr_t sg_chain_dma;
int chain_block_size;
};
struct ctlr_info
{
int ctlr;
char devname[8];
char *product_name;
char firm_ver[4]; // Firmware version
char firm_ver[4]; /* Firmware version */
struct pci_dev *pdev;
__u32 board_id;
void __iomem *vaddr;
@ -89,7 +83,7 @@ struct ctlr_info
int maxsgentries;
int chainsize;
int max_cmd_sgentries;
struct Cmd_sg_list **cmd_sg_list;
SGDescriptor_struct **cmd_sg_list;
# define DOORBELL_INT 0
# define PERF_MODE_INT 1
@ -103,7 +97,7 @@ struct ctlr_info
BYTE cciss_write;
BYTE cciss_read_capacity;
// information about each logical volume
/* information about each logical volume */
drive_info_struct *drv[CISS_MAX_LUN];
struct access_method access;
@ -116,7 +110,7 @@ struct ctlr_info
unsigned int maxSG;
spinlock_t lock;
//* pointers to command and error info pool */
/* pointers to command and error info pool */
CommandList_struct *cmd_pool;
dma_addr_t cmd_pool_dhandle;
ErrorInfo_struct *errinfo_pool;
@ -134,12 +128,10 @@ struct ctlr_info
*/
int next_to_run;
// Disk structures we need to pass back
/* Disk structures we need to pass back */
struct gendisk *gendisk[CISS_MAX_LUN];
#ifdef CONFIG_CISS_SCSI_TAPE
void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
/* list of block side commands the scsi error handling sucked up */
/* and saved for later processing */
struct cciss_scsi_adapter_data_t *scsi_ctlr;
#endif
unsigned char alive;
struct list_head scan_list;
@ -315,4 +307,3 @@ struct board_type {
#define CCISS_LOCK(i) (&hba[i]->lock)
#endif /* CCISS_H */

View file

@ -1,31 +1,16 @@
#ifndef CCISS_CMD_H
#define CCISS_CMD_H
//###########################################################################
//DEFINES
//###########################################################################
#include <linux/cciss_defs.h>
/* DEFINES */
#define CISS_VERSION "1.00"
//general boundary definitions
#define SENSEINFOBYTES 32//note that this value may vary between host implementations
/* general boundary definitions */
#define MAXSGENTRIES 32
#define CCISS_SG_CHAIN 0x80000000
#define MAXREPLYQS 256
//Command Status value
#define CMD_SUCCESS 0x0000
#define CMD_TARGET_STATUS 0x0001
#define CMD_DATA_UNDERRUN 0x0002
#define CMD_DATA_OVERRUN 0x0003
#define CMD_INVALID 0x0004
#define CMD_PROTOCOL_ERR 0x0005
#define CMD_HARDWARE_ERR 0x0006
#define CMD_CONNECTION_LOST 0x0007
#define CMD_ABORTED 0x0008
#define CMD_ABORT_FAILED 0x0009
#define CMD_UNSOLICITED_ABORT 0x000A
#define CMD_TIMEOUT 0x000B
#define CMD_UNABORTABLE 0x000C
/* Unit Attentions ASC's as defined for the MSA2012sa */
#define POWER_OR_RESET 0x29
#define STATE_CHANGED 0x2a
@ -49,30 +34,13 @@
#define ASYM_ACCESS_CHANGED 0x06
#define LUN_CAPACITY_CHANGED 0x09
//transfer direction
#define XFER_NONE 0x00
#define XFER_WRITE 0x01
#define XFER_READ 0x02
#define XFER_RSVD 0x03
//task attribute
#define ATTR_UNTAGGED 0x00
#define ATTR_SIMPLE 0x04
#define ATTR_HEADOFQUEUE 0x05
#define ATTR_ORDERED 0x06
#define ATTR_ACA 0x07
//cdb type
#define TYPE_CMD 0x00
#define TYPE_MSG 0x01
//config space register offsets
/* config space register offsets */
#define CFG_VENDORID 0x00
#define CFG_DEVICEID 0x02
#define CFG_I2OBAR 0x10
#define CFG_MEM1BAR 0x14
//i2o space register offsets
/* i2o space register offsets */
#define I2O_IBDB_SET 0x20
#define I2O_IBDB_CLEAR 0x70
#define I2O_INT_STATUS 0x30
@ -81,7 +49,7 @@
#define I2O_OBPOST_Q 0x44
#define I2O_DMA1_CFG 0x214
//Configuration Table
/* Configuration Table */
#define CFGTBL_ChangeReq 0x00000001l
#define CFGTBL_AccCmds 0x00000001l
@ -103,24 +71,17 @@ typedef union _u64bit
__u64 val;
} u64bit;
// Type defs used in the following structs
#define BYTE __u8
#define WORD __u16
#define HWORD __u16
#define DWORD __u32
/* Type defs used in the following structs */
#define QWORD vals32
//###########################################################################
//STRUCTURES
//###########################################################################
#define CISS_MAX_LUN 1024
/* STRUCTURES */
#define CISS_MAX_PHYS_LUN 1024
// SCSI-3 Cmmands
/* SCSI-3 Cmmands */
#pragma pack(1)
#define CISS_INQUIRY 0x12
//Date returned
/* Date returned */
typedef struct _InquiryData_struct
{
BYTE data_byte[36];
@ -128,7 +89,7 @@ typedef struct _InquiryData_struct
#define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */
#define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */
// Data returned
/* Data returned */
typedef struct _ReportLUNdata_struct
{
BYTE LUNListLength[4];
@ -139,8 +100,8 @@ typedef struct _ReportLUNdata_struct
#define CCISS_READ_CAPACITY 0x25 /* Read Capacity */
typedef struct _ReadCapdata_struct
{
BYTE total_size[4]; // Total size in blocks
BYTE block_size[4]; // Size of blocks in bytes
BYTE total_size[4]; /* Total size in blocks */
BYTE block_size[4]; /* Size of blocks in bytes */
} ReadCapdata_struct;
#define CCISS_READ_CAPACITY_16 0x9e /* Read Capacity 16 */
@ -172,52 +133,13 @@ typedef struct _ReadCapdata_struct_16
#define CDB_LEN10 10
#define CDB_LEN16 16
// BMIC commands
/* BMIC commands */
#define BMIC_READ 0x26
#define BMIC_WRITE 0x27
#define BMIC_CACHE_FLUSH 0xc2
#define CCISS_CACHE_FLUSH 0x01 //C2 was already being used by CCISS
//Command List Structure
typedef union _SCSI3Addr_struct {
struct {
BYTE Dev;
BYTE Bus:6;
BYTE Mode:2; // b00
} PeripDev;
struct {
BYTE DevLSB;
BYTE DevMSB:6;
BYTE Mode:2; // b01
} LogDev;
struct {
BYTE Dev:5;
BYTE Bus:3;
BYTE Targ:6;
BYTE Mode:2; // b10
} LogUnit;
} SCSI3Addr_struct;
typedef struct _PhysDevAddr_struct {
DWORD TargetId:24;
DWORD Bus:6;
DWORD Mode:2;
SCSI3Addr_struct Target[2]; //2 level target device addr
} PhysDevAddr_struct;
typedef struct _LogDevAddr_struct {
DWORD VolId:30;
DWORD Mode:2;
BYTE reserved[4];
} LogDevAddr_struct;
typedef union _LUNAddr_struct {
BYTE LunAddrBytes[8];
SCSI3Addr_struct SCSI3Lun[4];
PhysDevAddr_struct PhysDev;
LogDevAddr_struct LogDev;
} LUNAddr_struct;
#define CCISS_CACHE_FLUSH 0x01 /* C2 was already being used by CCISS */
/* Command List Structure */
#define CTLR_LUNID "\0\0\0\0\0\0\0\0"
typedef struct _CommandListHeader_struct {
@ -227,16 +149,6 @@ typedef struct _CommandListHeader_struct {
QWORD Tag;
LUNAddr_struct LUN;
} CommandListHeader_struct;
typedef struct _RequestBlock_struct {
BYTE CDBLen;
struct {
BYTE Type:3;
BYTE Attribute:3;
BYTE Direction:2;
} Type;
HWORD Timeout;
BYTE CDB[16];
} RequestBlock_struct;
typedef struct _ErrDescriptor_struct {
QWORD Addr;
DWORD Len;
@ -247,28 +159,6 @@ typedef struct _SGDescriptor_struct {
DWORD Ext;
} SGDescriptor_struct;
typedef union _MoreErrInfo_struct{
struct {
BYTE Reserved[3];
BYTE Type;
DWORD ErrorInfo;
}Common_Info;
struct{
BYTE Reserved[2];
BYTE offense_size;//size of offending entry
BYTE offense_num; //byte # of offense 0-base
DWORD offense_value;
}Invalid_Cmd;
}MoreErrInfo_struct;
typedef struct _ErrorInfo_struct {
BYTE ScsiStatus;
BYTE SenseLen;
HWORD CommandStatus;
DWORD ResidualCnt;
MoreErrInfo_struct MoreErrInfo;
BYTE SenseInfo[SENSEINFOBYTES];
} ErrorInfo_struct;
/* Command types */
#define CMD_RWREQ 0x00
#define CMD_IOCTL_PEND 0x01
@ -277,10 +167,18 @@ typedef struct _ErrorInfo_struct {
#define CMD_MSG_TIMEOUT 0x05
#define CMD_MSG_STALE 0xff
/* This structure needs to be divisible by 8 for new
* indexing method.
/* This structure needs to be divisible by COMMANDLIST_ALIGNMENT
* because low bits of the address are used to to indicate that
* whether the tag contains an index or an address. PAD_32 and
* PAD_64 can be adjusted independently as needed for 32-bit
* and 64-bits systems.
*/
#define PADSIZE (sizeof(long) - 4)
#define COMMANDLIST_ALIGNMENT (8)
#define IS_64_BIT ((sizeof(long) - 4)/4)
#define IS_32_BIT (!IS_64_BIT)
#define PAD_32 (0)
#define PAD_64 (4)
#define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
typedef struct _CommandList_struct {
CommandListHeader_struct Header;
RequestBlock_struct Request;
@ -300,7 +198,7 @@ typedef struct _CommandList_struct {
char pad[PADSIZE];
} CommandList_struct;
//Configuration Table Structure
/* Configuration Table Structure */
typedef struct _HostWrite_struct {
DWORD TransportRequest;
DWORD Reserved;
@ -326,4 +224,4 @@ typedef struct _CfgTable_struct {
DWORD MaxPhysicalDrivesPerLogicalUnit;
} CfgTable_struct;
#pragma pack()
#endif // CCISS_CMD_H
#endif /* CCISS_CMD_H */

View file

@ -84,7 +84,6 @@ static struct scsi_host_template cciss_driver_template = {
.queuecommand = cciss_scsi_queue_command,
.can_queue = SCSI_CCISS_CAN_QUEUE,
.this_id = 7,
.sg_tablesize = MAXSGENTRIES,
.cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
/* Can't have eh_bus_reset_handler or eh_host_reset_handler for cciss */
@ -93,11 +92,16 @@ static struct scsi_host_template cciss_driver_template = {
};
#pragma pack(1)
#define SCSI_PAD_32 0
#define SCSI_PAD_64 0
struct cciss_scsi_cmd_stack_elem_t {
CommandList_struct cmd;
ErrorInfo_struct Err;
__u32 busaddr;
__u32 pad;
int cmdindex;
u8 pad[IS_32_BIT * SCSI_PAD_32 + IS_64_BIT * SCSI_PAD_64];
};
#pragma pack()
@ -118,16 +122,15 @@ struct cciss_scsi_cmd_stack_t {
struct cciss_scsi_adapter_data_t {
struct Scsi_Host *scsi_host;
struct cciss_scsi_cmd_stack_t cmd_stack;
SGDescriptor_struct **cmd_sg_list;
int registered;
spinlock_t lock; // to protect ccissscsi[ctlr];
};
#define CPQ_TAPE_LOCK(ctlr, flags) spin_lock_irqsave( \
&(((struct cciss_scsi_adapter_data_t *) \
hba[ctlr]->scsi_ctlr)->lock), flags);
&hba[ctlr]->scsi_ctlr->lock, flags);
#define CPQ_TAPE_UNLOCK(ctlr, flags) spin_unlock_irqrestore( \
&(((struct cciss_scsi_adapter_data_t *) \
hba[ctlr]->scsi_ctlr)->lock), flags);
&hba[ctlr]->scsi_ctlr->lock, flags);
static CommandList_struct *
scsi_cmd_alloc(ctlr_info_t *h)
@ -143,7 +146,7 @@ scsi_cmd_alloc(ctlr_info_t *h)
struct cciss_scsi_cmd_stack_t *stk;
u64bit temp64;
sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr;
sa = h->scsi_ctlr;
stk = &sa->cmd_stack;
if (stk->top < 0)
@ -154,6 +157,7 @@ scsi_cmd_alloc(ctlr_info_t *h)
memset(&c->Err, 0, sizeof(c->Err));
/* set physical addr of cmd and addr of scsi parameters */
c->cmd.busaddr = c->busaddr;
c->cmd.cmdindex = c->cmdindex;
/* (__u32) (stk->cmd_pool_handle +
(sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top)); */
@ -182,7 +186,7 @@ scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd)
struct cciss_scsi_adapter_data_t *sa;
struct cciss_scsi_cmd_stack_t *stk;
sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr;
sa = h->scsi_ctlr;
stk = &sa->cmd_stack;
if (stk->top >= CMD_STACK_SIZE) {
printk("cciss: scsi_cmd_free called too many times.\n");
@ -199,24 +203,31 @@ scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa)
struct cciss_scsi_cmd_stack_t *stk;
size_t size;
sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(hba[ctlr],
hba[ctlr]->chainsize, CMD_STACK_SIZE);
if (!sa->cmd_sg_list && hba[ctlr]->chainsize > 0)
return -ENOMEM;
stk = &sa->cmd_stack;
size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
// pci_alloc_consistent guarantees 32-bit DMA address will
// be used
/* Check alignment, see cciss_cmd.h near CommandList_struct def. */
BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0);
/* pci_alloc_consistent guarantees 32-bit DMA address will be used */
stk->pool = (struct cciss_scsi_cmd_stack_elem_t *)
pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle);
if (stk->pool == NULL) {
printk("stk->pool is null\n");
return -1;
cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE);
sa->cmd_sg_list = NULL;
return -ENOMEM;
}
for (i=0; i<CMD_STACK_SIZE; i++) {
stk->elem[i] = &stk->pool[i];
stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle +
(sizeof(struct cciss_scsi_cmd_stack_elem_t) * i));
stk->elem[i]->cmdindex = i;
}
stk->top = CMD_STACK_SIZE-1;
return 0;
@ -229,7 +240,7 @@ scsi_cmd_stack_free(int ctlr)
struct cciss_scsi_cmd_stack_t *stk;
size_t size;
sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
sa = hba[ctlr]->scsi_ctlr;
stk = &sa->cmd_stack;
if (stk->top != CMD_STACK_SIZE-1) {
printk( "cciss: %d scsi commands are still outstanding.\n",
@ -241,6 +252,7 @@ scsi_cmd_stack_free(int ctlr)
pci_free_consistent(hba[ctlr]->pdev, size, stk->pool, stk->cmd_pool_handle);
stk->pool = NULL;
cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE);
}
#if 0
@ -530,8 +542,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
CPQ_TAPE_LOCK(ctlr, flags);
if (hostno != -1) /* if it's not the first time... */
sh = ((struct cciss_scsi_adapter_data_t *)
hba[ctlr]->scsi_ctlr)->scsi_host;
sh = hba[ctlr]->scsi_ctlr->scsi_host;
/* find any devices in ccissscsi[] that are not in
sd[] and remove them from ccissscsi[] */
@ -702,7 +713,7 @@ cciss_scsi_setup(int cntl_num)
kfree(shba);
shba = NULL;
}
hba[cntl_num]->scsi_ctlr = (void *) shba;
hba[cntl_num]->scsi_ctlr = shba;
return;
}
@ -725,6 +736,8 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
ctlr = hba[cp->ctlr];
scsi_dma_unmap(cmd);
if (cp->Header.SGTotal > ctlr->max_cmd_sgentries)
cciss_unmap_sg_chain_block(ctlr, cp);
cmd->result = (DID_OK << 16); /* host byte */
cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
@ -847,9 +860,10 @@ cciss_scsi_detect(int ctlr)
sh->io_port = 0; // good enough? FIXME,
sh->n_io_port = 0; // I don't think we use these two...
sh->this_id = SELF_SCSI_ID;
sh->sg_tablesize = hba[ctlr]->maxsgentries;
((struct cciss_scsi_adapter_data_t *)
hba[ctlr]->scsi_ctlr)->scsi_host = (void *) sh;
hba[ctlr]->scsi_ctlr)->scsi_host = sh;
sh->hostdata[0] = (unsigned long) hba[ctlr];
sh->irq = hba[ctlr]->intr[SIMPLE_MODE_INT];
sh->unique_id = sh->irq;
@ -1364,34 +1378,54 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
dma mapping and fills in the scatter gather entries of the
cciss command, cp. */
static void
cciss_scatter_gather(struct pci_dev *pdev,
CommandList_struct *cp,
struct scsi_cmnd *cmd)
static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp,
struct scsi_cmnd *cmd)
{
unsigned int len;
struct scatterlist *sg;
__u64 addr64;
int use_sg, i;
int request_nsgs, i, chained, sg_index;
struct cciss_scsi_adapter_data_t *sa = h->scsi_ctlr;
SGDescriptor_struct *curr_sg;
BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
use_sg = scsi_dma_map(cmd);
if (use_sg) { /* not too many addrs? */
scsi_for_each_sg(cmd, sg, use_sg, i) {
chained = 0;
sg_index = 0;
curr_sg = cp->SG;
request_nsgs = scsi_dma_map(cmd);
if (request_nsgs) {
scsi_for_each_sg(cmd, sg, request_nsgs, i) {
if (sg_index + 1 == h->max_cmd_sgentries &&
!chained && request_nsgs - i > 1) {
chained = 1;
sg_index = 0;
curr_sg = sa->cmd_sg_list[cp->cmdindex];
}
addr64 = (__u64) sg_dma_address(sg);
len = sg_dma_len(sg);
cp->SG[i].Addr.lower =
(__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
cp->SG[i].Addr.upper =
(__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
cp->SG[i].Len = len;
cp->SG[i].Ext = 0; // we are not chaining
curr_sg[sg_index].Addr.lower =
(__u32) (addr64 & 0x0FFFFFFFFULL);
curr_sg[sg_index].Addr.upper =
(__u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
curr_sg[sg_index].Len = len;
curr_sg[sg_index].Ext = 0;
++sg_index;
}
if (chained)
cciss_map_sg_chain_block(h, cp,
sa->cmd_sg_list[cp->cmdindex],
(request_nsgs - (h->max_cmd_sgentries - 1)) *
sizeof(SGDescriptor_struct));
}
cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */
cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
/* track how many SG entries we are using */
if (request_nsgs > h->maxSG)
h->maxSG = request_nsgs;
cp->Header.SGTotal = (__u8) request_nsgs + chained;
if (request_nsgs > h->max_cmd_sgentries)
cp->Header.SGList = h->max_cmd_sgentries;
else
cp->Header.SGList = cp->Header.SGTotal;
return;
}
@ -1399,7 +1433,7 @@ cciss_scatter_gather(struct pci_dev *pdev,
static int
cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
{
ctlr_info_t **c;
ctlr_info_t *c;
int ctlr, rc;
unsigned char scsi3addr[8];
CommandList_struct *cp;
@ -1407,8 +1441,8 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd
// Get the ptr to our adapter structure (hba[i]) out of cmd->host.
// We violate cmd->host privacy here. (Is there another way?)
c = (ctlr_info_t **) &cmd->device->host->hostdata[0];
ctlr = (*c)->ctlr;
c = (ctlr_info_t *) cmd->device->host->hostdata[0];
ctlr = c->ctlr;
rc = lookup_scsi3addr(ctlr, cmd->device->channel, cmd->device->id,
cmd->device->lun, scsi3addr);
@ -1431,7 +1465,7 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd
see what the device thinks of it. */
spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
cp = scsi_cmd_alloc(*c);
cp = scsi_cmd_alloc(c);
spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
if (cp == NULL) { /* trouble... */
printk("scsi_cmd_alloc returned NULL!\n");
@ -1489,15 +1523,14 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd
BUG();
break;
}
cciss_scatter_gather((*c)->pdev, cp, cmd); // Fill the SG list
cciss_scatter_gather(c, cp, cmd);
/* Put the request on the tail of the request queue */
spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
addQ(&(*c)->reqQ, cp);
(*c)->Qdepth++;
start_io(*c);
addQ(&c->reqQ, cp);
c->Qdepth++;
start_io(c);
spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
/* the cmd'll come back via intr handler in complete_scsi_command() */
@ -1514,7 +1547,7 @@ cciss_unregister_scsi(int ctlr)
/* we are being forcibly unloaded, and may not refuse. */
spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
sa = hba[ctlr]->scsi_ctlr;
stk = &sa->cmd_stack;
/* if we weren't ever actually registered, don't unregister */
@ -1541,7 +1574,7 @@ cciss_engage_scsi(int ctlr)
unsigned long flags;
spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
sa = hba[ctlr]->scsi_ctlr;
stk = &sa->cmd_stack;
if (sa->registered) {
@ -1654,14 +1687,14 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
int rc;
CommandList_struct *cmd_in_trouble;
unsigned char lunaddr[8];
ctlr_info_t **c;
ctlr_info_t *c;
int ctlr;
/* find the controller to which the command to be aborted was sent */
c = (ctlr_info_t **) &scsicmd->device->host->hostdata[0];
c = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
if (c == NULL) /* paranoia */
return FAILED;
ctlr = (*c)->ctlr;
ctlr = c->ctlr;
printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr);
/* find the command that's giving us trouble */
cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble;
@ -1671,7 +1704,7 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
/* send a reset to the SCSI LUN which the command was sent to */
rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr,
TYPE_MSG);
if (rc == 0 && wait_for_device_to_become_ready(*c, lunaddr) == 0)
if (rc == 0 && wait_for_device_to_become_ready(c, lunaddr) == 0)
return SUCCESS;
printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr);
return FAILED;
@ -1682,14 +1715,14 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
int rc;
CommandList_struct *cmd_to_abort;
unsigned char lunaddr[8];
ctlr_info_t **c;
ctlr_info_t *c;
int ctlr;
/* find the controller to which the command to be aborted was sent */
c = (ctlr_info_t **) &scsicmd->device->host->hostdata[0];
c = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
if (c == NULL) /* paranoia */
return FAILED;
ctlr = (*c)->ctlr;
ctlr = c->ctlr;
printk(KERN_WARNING "cciss%d: aborting tardy SCSI cmd\n", ctlr);
/* find the command to be aborted */

View file

@ -25,16 +25,16 @@
#include <scsi/scsicam.h> /* possibly irrelevant, since we don't show disks */
// the scsi id of the adapter...
/* the scsi id of the adapter... */
#define SELF_SCSI_ID 15
// 15 is somewhat arbitrary, since the scsi-2 bus
// that's presented by the driver to the OS is
// fabricated. The "real" scsi-3 bus the
// hardware presents is fabricated too.
// The actual, honest-to-goodness physical
// bus that the devices are attached to is not
// addressible natively, and may in fact turn
// out to be not scsi at all.
/* 15 is somewhat arbitrary, since the scsi-2 bus
that's presented by the driver to the OS is
fabricated. The "real" scsi-3 bus the
hardware presents is fabricated too.
The actual, honest-to-goodness physical
bus that the devices are attached to is not
addressible natively, and may in fact turn
out to be not scsi at all. */
#define SCSI_CCISS_CAN_QUEUE 2

View file

@ -448,11 +448,8 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
/* This is a hardware imposed limit. */
blk_queue_max_hw_segments(q, SG_MAX);
blk_queue_max_segments(q, SG_MAX);
/* This is a driver limit and could be eliminated. */
blk_queue_max_phys_segments(q, SG_MAX);
init_timer(&hba[i]->timer);
hba[i]->timer.expires = jiffies + IDA_TIMER;
hba[i]->timer.data = (unsigned long)hba[i];

View file

@ -709,9 +709,8 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu
max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
blk_queue_max_sectors(q, max_seg_s >> 9);
blk_queue_max_phys_segments(q, max_segments ? max_segments : MAX_PHYS_SEGMENTS);
blk_queue_max_hw_segments(q, max_segments ? max_segments : MAX_HW_SEGMENTS);
blk_queue_max_hw_sectors(q, max_seg_s >> 9);
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
blk_queue_max_segment_size(q, max_seg_s);
blk_queue_logical_block_size(q, 512);
blk_queue_segment_boundary(q, PAGE_SIZE-1);

View file

@ -4234,7 +4234,7 @@ static int __init floppy_init(void)
err = -ENOMEM;
goto out_unreg_driver;
}
blk_queue_max_sectors(floppy_queue, 64);
blk_queue_max_hw_sectors(floppy_queue, 64);
blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
floppy_find, NULL, NULL);

View file

@ -719,7 +719,7 @@ static int __init hd_init(void)
return -ENOMEM;
}
blk_queue_max_sectors(hd_queue, 255);
blk_queue_max_hw_sectors(hd_queue, 255);
init_timer(&device_timer);
device_timer.function = hd_times_out;
blk_queue_logical_block_size(hd_queue, 512);

View file

@ -980,7 +980,7 @@ static int mg_probe(struct platform_device *plat_dev)
__func__, __LINE__);
goto probe_err_6;
}
blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
blk_queue_max_hw_sectors(host->breq, MG_MAX_SECTS);
blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
init_timer(&host->timer);

View file

@ -906,7 +906,7 @@ static int __init pd_init(void)
if (!pd_queue)
goto out1;
blk_queue_max_sectors(pd_queue, cluster);
blk_queue_max_hw_sectors(pd_queue, cluster);
if (register_blkdev(major, name))
goto out2;

View file

@ -956,8 +956,7 @@ static int __init pf_init(void)
return -ENOMEM;
}
blk_queue_max_phys_segments(pf_queue, cluster);
blk_queue_max_hw_segments(pf_queue, cluster);
blk_queue_max_segments(pf_queue, cluster);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
struct gendisk *disk = pf->disk;

View file

@ -569,6 +569,7 @@ static struct packet_data *pkt_alloc_packet_data(int frames)
}
spin_lock_init(&pkt->lock);
bio_list_init(&pkt->orig_bios);
for (i = 0; i < frames; i++) {
struct bio *bio = pkt_bio_alloc(1);
@ -720,43 +721,6 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
pd->bio_queue_size++;
}
/*
* Add a bio to a single linked list defined by its head and tail pointers.
*/
static void pkt_add_list_last(struct bio *bio, struct bio **list_head, struct bio **list_tail)
{
bio->bi_next = NULL;
if (*list_tail) {
BUG_ON((*list_head) == NULL);
(*list_tail)->bi_next = bio;
(*list_tail) = bio;
} else {
BUG_ON((*list_head) != NULL);
(*list_head) = bio;
(*list_tail) = bio;
}
}
/*
* Remove and return the first bio from a single linked list defined by its
* head and tail pointers.
*/
static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio **list_tail)
{
struct bio *bio;
if (*list_head == NULL)
return NULL;
bio = *list_head;
*list_head = bio->bi_next;
if (*list_head == NULL)
*list_tail = NULL;
bio->bi_next = NULL;
return bio;
}
/*
* Send a packet_command to the underlying block device and
* wait for completion.
@ -876,13 +840,10 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
{
spin_lock(&pd->iosched.lock);
if (bio_data_dir(bio) == READ) {
pkt_add_list_last(bio, &pd->iosched.read_queue,
&pd->iosched.read_queue_tail);
} else {
pkt_add_list_last(bio, &pd->iosched.write_queue,
&pd->iosched.write_queue_tail);
}
if (bio_data_dir(bio) == READ)
bio_list_add(&pd->iosched.read_queue, bio);
else
bio_list_add(&pd->iosched.write_queue, bio);
spin_unlock(&pd->iosched.lock);
atomic_set(&pd->iosched.attention, 1);
@ -917,8 +878,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
int reads_queued, writes_queued;
spin_lock(&pd->iosched.lock);
reads_queued = (pd->iosched.read_queue != NULL);
writes_queued = (pd->iosched.write_queue != NULL);
reads_queued = !bio_list_empty(&pd->iosched.read_queue);
writes_queued = !bio_list_empty(&pd->iosched.write_queue);
spin_unlock(&pd->iosched.lock);
if (!reads_queued && !writes_queued)
@ -927,7 +888,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
if (pd->iosched.writing) {
int need_write_seek = 1;
spin_lock(&pd->iosched.lock);
bio = pd->iosched.write_queue;
bio = bio_list_peek(&pd->iosched.write_queue);
spin_unlock(&pd->iosched.lock);
if (bio && (bio->bi_sector == pd->iosched.last_write))
need_write_seek = 0;
@ -950,13 +911,10 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
}
spin_lock(&pd->iosched.lock);
if (pd->iosched.writing) {
bio = pkt_get_list_first(&pd->iosched.write_queue,
&pd->iosched.write_queue_tail);
} else {
bio = pkt_get_list_first(&pd->iosched.read_queue,
&pd->iosched.read_queue_tail);
}
if (pd->iosched.writing)
bio = bio_list_pop(&pd->iosched.write_queue);
else
bio = bio_list_pop(&pd->iosched.read_queue);
spin_unlock(&pd->iosched.lock);
if (!bio)
@ -992,14 +950,14 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
{
if ((pd->settings.size << 9) / CD_FRAMESIZE
<= queue_max_phys_segments(q)) {
<= queue_max_segments(q)) {
/*
* The cdrom device can handle one segment/frame
*/
clear_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0;
} else if ((pd->settings.size << 9) / PAGE_SIZE
<= queue_max_phys_segments(q)) {
<= queue_max_segments(q)) {
/*
* We can handle this case at the expense of some extra memory
* copies during write operations
@ -1114,7 +1072,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
int f;
char written[PACKET_MAX_SIZE];
BUG_ON(!pkt->orig_bios);
BUG_ON(bio_list_empty(&pkt->orig_bios));
atomic_set(&pkt->io_wait, 0);
atomic_set(&pkt->io_errors, 0);
@ -1124,7 +1082,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
*/
memset(written, 0, sizeof(written));
spin_lock(&pkt->lock);
for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
bio_list_for_each(bio, &pkt->orig_bios) {
int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
int num_frames = bio->bi_size / CD_FRAMESIZE;
pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
@ -1363,7 +1321,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
break;
pkt_rbtree_erase(pd, node);
spin_lock(&pkt->lock);
pkt_add_list_last(bio, &pkt->orig_bios, &pkt->orig_bios_tail);
bio_list_add(&pkt->orig_bios, bio);
pkt->write_size += bio->bi_size / CD_FRAMESIZE;
spin_unlock(&pkt->lock);
}
@ -1409,7 +1367,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
*/
frames_write = 0;
spin_lock(&pkt->lock);
for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
bio_list_for_each(bio, &pkt->orig_bios) {
int segment = bio->bi_idx;
int src_offs = 0;
int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
@ -1472,20 +1430,14 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
{
struct bio *bio, *next;
struct bio *bio;
if (!uptodate)
pkt->cache_valid = 0;
/* Finish all bios corresponding to this packet */
bio = pkt->orig_bios;
while (bio) {
next = bio->bi_next;
bio->bi_next = NULL;
while ((bio = bio_list_pop(&pkt->orig_bios)))
bio_endio(bio, uptodate ? 0 : -EIO);
bio = next;
}
pkt->orig_bios = pkt->orig_bios_tail = NULL;
}
static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
@ -2360,7 +2312,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
* even if the size is a multiple of the packet size.
*/
spin_lock_irq(q->queue_lock);
blk_queue_max_sectors(q, pd->settings.size);
blk_queue_max_hw_sectors(q, pd->settings.size);
spin_unlock_irq(q->queue_lock);
set_bit(PACKET_WRITABLE, &pd->flags);
} else {
@ -2567,8 +2519,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
spin_lock(&pkt->lock);
if ((pkt->state == PACKET_WAITING_STATE) ||
(pkt->state == PACKET_READ_WAIT_STATE)) {
pkt_add_list_last(bio, &pkt->orig_bios,
&pkt->orig_bios_tail);
bio_list_add(&pkt->orig_bios, bio);
pkt->write_size += bio->bi_size / CD_FRAMESIZE;
if ((pkt->write_size >= pkt->frames) &&
(pkt->state == PACKET_WAITING_STATE)) {
@ -2662,7 +2613,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
blk_queue_make_request(q, pkt_make_request);
blk_queue_logical_block_size(q, CD_FRAMESIZE);
blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
blk_queue_merge_bvec(q, pkt_merge_bvec);
q->queuedata = pd;
}
@ -2898,6 +2849,8 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
spin_lock_init(&pd->lock);
spin_lock_init(&pd->iosched.lock);
bio_list_init(&pd->iosched.read_queue);
bio_list_init(&pd->iosched.write_queue);
sprintf(pd->name, DRIVER_NAME"%d", idx);
init_waitqueue_head(&pd->wqueue);
pd->bio_queue = RB_ROOT;

View file

@ -474,7 +474,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH);
blk_queue_max_sectors(queue, dev->bounce_size >> 9);
blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9);
blk_queue_segment_boundary(queue, -1UL);
blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_logical_block_size(queue, dev->blk_size);
@ -482,8 +482,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
ps3disk_prepare_flush);
blk_queue_max_phys_segments(queue, -1);
blk_queue_max_hw_segments(queue, -1);
blk_queue_max_segments(queue, -1);
blk_queue_max_segment_size(queue, dev->bounce_size);
gendisk = alloc_disk(PS3DISK_MINORS);

View file

@ -751,10 +751,9 @@ static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
priv->queue = queue;
queue->queuedata = dev;
blk_queue_make_request(queue, ps3vram_make_request);
blk_queue_max_phys_segments(queue, MAX_PHYS_SEGMENTS);
blk_queue_max_hw_segments(queue, MAX_HW_SEGMENTS);
blk_queue_max_segment_size(queue, MAX_SEGMENT_SIZE);
blk_queue_max_sectors(queue, SAFE_MAX_SECTORS);
blk_queue_max_segments(queue, BLK_MAX_SEGMENTS);
blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE);
blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS);
gendisk = alloc_disk(1);
if (!gendisk) {

View file

@ -691,9 +691,8 @@ static int probe_disk(struct vdc_port *port)
port->disk = g;
blk_queue_max_hw_segments(q, port->ring_cookies);
blk_queue_max_phys_segments(q, port->ring_cookies);
blk_queue_max_sectors(q, port->max_xfer_size);
blk_queue_max_segments(q, port->ring_cookies);
blk_queue_max_hw_sectors(q, port->max_xfer_size);
g->major = vdc_major;
g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
strcpy(g->disk_name, port->disk_name);

View file

@ -409,7 +409,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
static void carm_remove_one (struct pci_dev *pdev);
static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static struct pci_device_id carm_pci_tbl[] = {
static const struct pci_device_id carm_pci_tbl[] = {
{ PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
{ PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
{ } /* terminate list */
@ -1518,8 +1518,7 @@ static int carm_init_disks(struct carm_host *host)
break;
}
disk->queue = q;
blk_queue_max_hw_segments(q, CARM_MAX_REQ_SG);
blk_queue_max_phys_segments(q, CARM_MAX_REQ_SG);
blk_queue_max_segments(q, CARM_MAX_REQ_SG);
blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
q->queuedata = port;

View file

@ -393,7 +393,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum);
#define ub_usb_ids usb_storage_usb_ids
#else
static struct usb_device_id ub_usb_ids[] = {
static const struct usb_device_id ub_usb_ids[] = {
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
{ }
};
@ -2320,10 +2320,9 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
disk->queue = q;
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
blk_queue_max_segments(q, UB_MAX_REQ_SG);
blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
blk_queue_max_sectors(q, UB_MAX_SECTORS);
blk_queue_max_hw_sectors(q, UB_MAX_SECTORS);
blk_queue_logical_block_size(q, lun->capacity.bsize);
lun->disk = disk;

View file

@ -462,9 +462,8 @@ static int probe_disk(struct viodasd_device *d)
}
d->disk = g;
blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA);
blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA);
blk_queue_max_sectors(q, VIODASD_MAXSECTORS);
blk_queue_max_segments(q, VIOMAXBLOCKDMA);
blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS);
g->major = VIODASD_MAJOR;
g->first_minor = dev_no << PARTITION_SHIFT;
if (dev_no >= 26)

View file

@ -435,7 +435,7 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
kfree(vblk);
}
static struct virtio_device_id id_table[] = {
static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
{ 0 },
};

View file

@ -242,7 +242,7 @@ static int __init xd_init(void)
}
/* xd_maxsectors depends on controller - so set after detection */
blk_queue_max_sectors(xd_queue, xd_maxsectors);
blk_queue_max_hw_sectors(xd_queue, xd_maxsectors);
for (i = 0; i < xd_drives; i++)
add_disk(xd_gendisk[i]);

View file

@ -346,15 +346,14 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
/* Hard sector size and max sectors impersonate the equiv. hardware. */
blk_queue_logical_block_size(rq, sector_size);
blk_queue_max_sectors(rq, 512);
blk_queue_max_hw_sectors(rq, 512);
/* Each segment in a request is up to an aligned page in size. */
blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
blk_queue_max_segment_size(rq, PAGE_SIZE);
/* Ensure a merged request will fit in a single I/O ring slot. */
blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
/* Make sure buffer addresses are sector-aligned. */
blk_queue_dma_alignment(rq, 511);
@ -1050,7 +1049,7 @@ static const struct block_device_operations xlvbd_block_fops =
};
static struct xenbus_device_id blkfront_ids[] = {
static const struct xenbus_device_id blkfront_ids[] = {
{ "vbd" },
{ "" }
};

View file

@ -1227,7 +1227,7 @@ static int __devexit ace_of_remove(struct of_device *op)
}
/* Match table for of_platform binding */
static struct of_device_id ace_of_match[] __devinitdata = {
static const struct of_device_id ace_of_match[] __devinitconst = {
{ .compatible = "xlnx,opb-sysace-1.00.b", },
{ .compatible = "xlnx,opb-sysace-1.00.c", },
{ .compatible = "xlnx,xps-sysace-1.00.a", },

View file

@ -741,7 +741,7 @@ static int __devinit probe_gdrom_setupqueue(void)
{
blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
/* using DMA so memory will need to be contiguous */
blk_queue_max_hw_segments(gd.gdrom_rq, 1);
blk_queue_max_segments(gd.gdrom_rq, 1);
/* set a large max size to get most from DMA */
blk_queue_max_segment_size(gd.gdrom_rq, 0x40000);
gd.disk->queue = gd.gdrom_rq;

View file

@ -616,9 +616,8 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
gendisk->first_minor = deviceno;
strncpy(gendisk->disk_name, c->name,
sizeof(gendisk->disk_name));
blk_queue_max_hw_segments(q, 1);
blk_queue_max_phys_segments(q, 1);
blk_queue_max_sectors(q, 4096 / 512);
blk_queue_max_segments(q, 1);
blk_queue_max_hw_sectors(q, 4096 / 512);
gendisk->queue = q;
gendisk->fops = &viocd_fops;
gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE;

View file

@ -1571,7 +1571,7 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
sdev->start_stop_pwr_cond = 1;
if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);

View file

@ -679,7 +679,7 @@ static void ide_disk_setup(ide_drive_t *drive)
if (max_s > hwif->rqsize)
max_s = hwif->rqsize;
blk_queue_max_sectors(q, max_s);
blk_queue_max_hw_sectors(q, max_s);
}
printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,

View file

@ -486,7 +486,7 @@ static void ide_floppy_setup(ide_drive_t *drive)
drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE;
/* This value will be visible in the /proc/ide/hdx/settings */
drive->pc_delay = IDEFLOPPY_PC_DELAY;
blk_queue_max_sectors(drive->queue, 64);
blk_queue_max_hw_sectors(drive->queue, 64);
}
/*
@ -494,7 +494,7 @@ static void ide_floppy_setup(ide_drive_t *drive)
* nasty clicking noises without it, so please don't remove this.
*/
if (strncmp((char *)&id[ATA_ID_PROD], "IOMEGA Clik!", 11) == 0) {
blk_queue_max_sectors(drive->queue, 64);
blk_queue_max_hw_sectors(drive->queue, 64);
drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE;
/* IOMEGA Clik! drives do not support lock/unlock commands */
drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;

View file

@ -774,7 +774,7 @@ static int ide_init_queue(ide_drive_t *drive)
if (hwif->rqsize < max_sectors)
max_sectors = hwif->rqsize;
blk_queue_max_sectors(q, max_sectors);
blk_queue_max_hw_sectors(q, max_sectors);
#ifdef CONFIG_PCI
/* When we have an IOMMU, we may have a problem where pci_map_sg()
@ -790,8 +790,7 @@ static int ide_init_queue(ide_drive_t *drive)
max_sg_entries >>= 1;
#endif /* CONFIG_PCI */
blk_queue_max_hw_segments(q, max_sg_entries);
blk_queue_max_phys_segments(q, max_sg_entries);
blk_queue_max_segments(q, max_sg_entries);
/* assign drive queue */
drive->queue = q;

View file

@ -2020,7 +2020,7 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev)
if (lu->workarounds & SBP2_WORKAROUND_POWER_CONDITION)
sdev->start_stop_pwr_cond = 1;
if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
return 0;

View file

@ -177,7 +177,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
conf->array_sectors += rdev->sectors;
cnt++;

View file

@ -308,7 +308,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
*/
if (q->merge_bvec_fn &&
queue_max_sectors(q) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
conf->working_disks++;
mddev->degraded--;
@ -478,7 +478,7 @@ static int multipath_run (mddev_t *mddev)
* a merge_bvec_fn to be involved in multipath */
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
if (!test_bit(Faulty, &rdev->flags))
conf->working_disks++;

View file

@ -182,7 +182,7 @@ static int create_strip_zones(mddev_t *mddev)
if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
if (!smallest || (rdev1->sectors < smallest->sectors))
smallest = rdev1;
@ -325,7 +325,7 @@ static int raid0_run(mddev_t *mddev)
}
if (md_check_no_bitmap(mddev))
return -EINVAL;
blk_queue_max_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
ret = create_strip_zones(mddev);

View file

@ -1158,7 +1158,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
p->head_position = 0;
rdev->raid_disk = mirror;
@ -2103,7 +2103,7 @@ static int run(mddev_t *mddev)
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
}
mddev->degraded = 0;

View file

@ -1161,7 +1161,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
p->head_position = 0;
rdev->raid_disk = mirror;
@ -2260,7 +2260,7 @@ static int run(mddev_t *mddev)
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0;
}

View file

@ -3739,7 +3739,7 @@ static int bio_fits_rdev(struct bio *bi)
if ((bi->bi_size>>9) > queue_max_sectors(q))
return 0;
blk_recount_segments(q, bi);
if (bi->bi_phys_segments > queue_max_phys_segments(q))
if (bi->bi_phys_segments > queue_max_segments(q))
return 0;
if (q->merge_bvec_fn)

View file

@ -1226,9 +1226,8 @@ static int mspro_block_init_disk(struct memstick_dev *card)
blk_queue_prep_rq(msb->queue, mspro_block_prepare_req);
blk_queue_bounce_limit(msb->queue, limit);
blk_queue_max_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
blk_queue_max_phys_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
blk_queue_max_hw_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
blk_queue_max_segment_size(msb->queue,
MSPRO_BLOCK_MAX_PAGES * msb->page_size);

View file

@ -1065,9 +1065,8 @@ static int i2o_block_probe(struct device *dev)
queue = gd->queue;
queue->queuedata = i2o_blk_dev;
blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS);
blk_queue_max_sectors(queue, max_sectors);
blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size));
blk_queue_max_hw_sectors(queue, max_sectors);
blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size));
osm_debug("max sectors = %d\n", queue->max_sectors);
osm_debug("phys segments = %d\n", queue->max_phys_segments);

View file

@ -154,9 +154,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
if (mq->bounce_buf) {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_sectors(mq->queue, bouncesz / 512);
blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
blk_queue_max_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
mq->sg = kmalloc(sizeof(struct scatterlist),
@ -180,10 +179,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
if (!mq->bounce_buf) {
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_sectors(mq->queue,
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
blk_queue_max_segments(mq->queue, host->max_hw_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
mq->sg = kmalloc(sizeof(struct scatterlist) *

View file

@ -2139,9 +2139,8 @@ static void dasd_setup_queue(struct dasd_block *block)
blk_queue_logical_block_size(block->request_queue, block->bp_block);
max = block->base->discipline->max_blocks << block->s2b_shift;
blk_queue_max_sectors(block->request_queue, max);
blk_queue_max_phys_segments(block->request_queue, -1L);
blk_queue_max_hw_segments(block->request_queue, -1L);
blk_queue_max_hw_sectors(block->request_queue, max);
blk_queue_max_segments(block->request_queue, -1L);
/* with page sized segments we can translate each segement into
* one idaw/tidaw
*/

View file

@ -222,9 +222,8 @@ tapeblock_setup_device(struct tape_device * device)
goto cleanup_queue;
blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
blk_queue_max_phys_segments(blkdat->request_queue, -1L);
blk_queue_max_hw_segments(blkdat->request_queue, -1L);
blk_queue_max_hw_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
blk_queue_max_segments(blkdat->request_queue, -1L);
blk_queue_max_segment_size(blkdat->request_queue, -1L);
blk_queue_segment_boundary(blkdat->request_queue, -1L);

View file

@ -4195,7 +4195,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
if (tgt->service_parms.class3_parms[0] & 0x80000000)
rport->supported_classes |= FC_COS_CLASS3;
if (rport->rqst_q)
blk_queue_max_hw_segments(rport->rqst_q, 1);
blk_queue_max_segments(rport->rqst_q, 1);
} else
tgt_dbg(tgt, "rport add failed\n");
spin_unlock_irqrestore(vhost->host->host_lock, flags);
@ -4669,7 +4669,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
}
if (shost_to_fc_host(shost)->rqst_q)
blk_queue_max_hw_segments(shost_to_fc_host(shost)->rqst_q, 1);
blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
dev_set_drvdata(dev, vhost);
spin_lock(&ibmvfc_driver_lock);
list_add_tail(&vhost->queue, &ibmvfc_head);

View file

@ -3674,7 +3674,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
if (ipr_is_vset_device(res)) {
blk_queue_rq_timeout(sdev->request_queue,
IPR_VSET_RW_TIMEOUT);
blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
}
if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
sdev->allow_restart = 1;

View file

@ -235,7 +235,7 @@ static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
scsi_dev->allow_restart = 1;
blk_queue_rq_timeout(scsi_dev->request_queue,
PMCRAID_VSET_IO_TIMEOUT);
blk_queue_max_sectors(scsi_dev->request_queue,
blk_queue_max_hw_sectors(scsi_dev->request_queue,
PMCRAID_VSET_MAX_SECTORS);
}

View file

@ -1630,10 +1630,10 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
/*
* this limit is imposed by hardware restrictions
*/
blk_queue_max_hw_segments(q, shost->sg_tablesize);
blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
SCSI_MAX_SG_CHAIN_SEGMENTS));
blk_queue_max_sectors(q, shost->max_sectors);
blk_queue_max_hw_sectors(q, shost->max_sectors);
blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
blk_queue_segment_boundary(q, shost->dma_boundary);
dma_set_seg_boundary(dev, shost->dma_boundary);

View file

@ -879,7 +879,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
* broken RA4x00 Compaq Disk Array
*/
if (*bflags & BLIST_MAX_512)
blk_queue_max_sectors(sdev->request_queue, 512);
blk_queue_max_hw_sectors(sdev->request_queue, 512);
/*
* Some devices may not want to have a start command automatically

View file

@ -287,8 +287,7 @@ sg_open(struct inode *inode, struct file *filp)
if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
sdp->sgdebug = 0;
q = sdp->device->request_queue;
sdp->sg_tablesize = min(queue_max_hw_segments(q),
queue_max_phys_segments(q));
sdp->sg_tablesize = queue_max_segments(q);
}
if ((sfp = sg_add_sfp(sdp, dev)))
filp->private_data = sfp;
@ -1376,8 +1375,7 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
sdp->device = scsidp;
INIT_LIST_HEAD(&sdp->sfds);
init_waitqueue_head(&sdp->o_excl_wait);
sdp->sg_tablesize = min(queue_max_hw_segments(q),
queue_max_phys_segments(q));
sdp->sg_tablesize = queue_max_segments(q);
sdp->index = k;
kref_init(&sdp->d_ref);

View file

@ -3983,8 +3983,7 @@ static int st_probe(struct device *dev)
return -ENODEV;
}
i = min(queue_max_hw_segments(SDp->request_queue),
queue_max_phys_segments(SDp->request_queue));
i = queue_max_segments(SDp->request_queue);
if (st_max_sg_segs < i)
i = st_max_sg_segs;
buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);

View file

@ -363,10 +363,7 @@ static int blkvsc_probe(struct device *device)
blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock);
blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE);
blk_queue_max_phys_segments(blkdev->gd->queue,
MAX_MULTIPAGE_BUFFER_COUNT);
blk_queue_max_hw_segments(blkdev->gd->queue,
MAX_MULTIPAGE_BUFFER_COUNT);
blk_queue_max_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1);
blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY);
blk_queue_dma_alignment(blkdev->gd->queue, 511);

View file

@ -134,14 +134,14 @@ static int slave_configure(struct scsi_device *sdev)
if (us->fflags & US_FL_MAX_SECTORS_MIN)
max_sectors = PAGE_CACHE_SIZE >> 9;
if (queue_max_sectors(sdev->request_queue) > max_sectors)
blk_queue_max_sectors(sdev->request_queue,
blk_queue_max_hw_sectors(sdev->request_queue,
max_sectors);
} else if (sdev->type == TYPE_TAPE) {
/* Tapes need much higher max_sector limits, so just
* raise it to the maximum possible (4 GB / 512) and
* let the queue segment size sort out the real limit.
*/
blk_queue_max_sectors(sdev->request_queue, 0x7FFFFF);
blk_queue_max_hw_sectors(sdev->request_queue, 0x7FFFFF);
}
/* Some USB host controllers can't do DMA; they have to use PIO.
@ -495,7 +495,7 @@ static ssize_t store_max_sectors(struct device *dev, struct device_attribute *at
unsigned short ms;
if (sscanf(buf, "%hu", &ms) > 0 && ms <= SCSI_DEFAULT_MAX_SECTORS) {
blk_queue_max_sectors(sdev->request_queue, ms);
blk_queue_max_hw_sectors(sdev->request_queue, ms);
return strlen(buf);
}
return -EINVAL;

View file

@ -507,10 +507,8 @@ int bio_get_nr_vecs(struct block_device *bdev)
int nr_pages;
nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (nr_pages > queue_max_phys_segments(q))
nr_pages = queue_max_phys_segments(q);
if (nr_pages > queue_max_hw_segments(q))
nr_pages = queue_max_hw_segments(q);
if (nr_pages > queue_max_segments(q))
nr_pages = queue_max_segments(q);
return nr_pages;
}
@ -557,7 +555,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
.bi_rw = bio->bi_rw,
};
if (q->merge_bvec_fn(q, &bvm, prev) < len) {
if (q->merge_bvec_fn(q, &bvm, prev) != prev->bv_len) {
prev->bv_len -= len;
return 0;
}
@ -575,8 +573,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
* make this too complex.
*/
while (bio->bi_phys_segments >= queue_max_phys_segments(q)
|| bio->bi_phys_segments >= queue_max_hw_segments(q)) {
while (bio->bi_phys_segments >= queue_max_segments(q)) {
if (retried_segments)
return 0;
@ -611,7 +608,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
* merge_bvec_fn() returns number of bytes it can accept
* at this offset
*/
if (q->merge_bvec_fn(q, &bvm, bvec) < len) {
if (q->merge_bvec_fn(q, &bvm, bvec) != bvec->bv_len) {
bvec->bv_page = NULL;
bvec->bv_len = 0;
bvec->bv_offset = 0;

View file

@ -412,9 +412,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
pdev = part_to_dev(p);
p->start_sect = start;
p->alignment_offset = queue_sector_alignment_offset(disk->queue, start);
p->discard_alignment = queue_sector_discard_alignment(disk->queue,
start);
p->alignment_offset =
queue_limit_alignment_offset(&disk->queue->limits, start);
p->discard_alignment =
queue_limit_discard_alignment(&disk->queue->limits, start);
p->nr_sects = len;
p->partno = partno;
p->policy = get_disk_ro(disk);

View file

@ -43,6 +43,7 @@ header-y += blkpg.h
header-y += bpqether.h
header-y += bsg.h
header-y += can.h
header-y += cciss_defs.h
header-y += cdk.h
header-y += chio.h
header-y += coda_psdev.h

View file

@ -316,8 +316,7 @@ struct queue_limits {
unsigned int discard_alignment;
unsigned short logical_block_size;
unsigned short max_hw_segments;
unsigned short max_phys_segments;
unsigned short max_segments;
unsigned char misaligned;
unsigned char discard_misaligned;
@ -462,6 +461,7 @@ struct request_queue
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_CLUSTER) | \
@ -587,6 +587,8 @@ enum {
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_flushing(q) ((q)->ordseq)
@ -918,10 +920,27 @@ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
/* Temporary compatibility wrapper */
static inline void blk_queue_max_sectors(struct request_queue *q, unsigned int max)
{
blk_queue_max_hw_sectors(q, max);
}
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
static inline void blk_queue_max_phys_segments(struct request_queue *q, unsigned short max)
{
blk_queue_max_segments(q, max);
}
static inline void blk_queue_max_hw_segments(struct request_queue *q, unsigned short max)
{
blk_queue_max_segments(q, max);
}
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
@ -1014,11 +1033,15 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
#define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128
#define SAFE_MAX_SECTORS 255
#define BLK_DEF_MAX_SECTORS 1024
#define MAX_SEGMENT_SIZE 65536
#define BLK_SEG_BOUNDARY_MASK 0xFFFFFFFFUL
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
BLK_SAFE_MAX_SECTORS = 255,
BLK_DEF_MAX_SECTORS = 1024,
BLK_MAX_SEGMENT_SIZE = 65536,
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
@ -1042,14 +1065,9 @@ static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
return q->limits.max_hw_sectors;
}
static inline unsigned short queue_max_hw_segments(struct request_queue *q)
static inline unsigned short queue_max_segments(struct request_queue *q)
{
return q->limits.max_hw_segments;
}
static inline unsigned short queue_max_phys_segments(struct request_queue *q)
{
return q->limits.max_phys_segments;
return q->limits.max_segments;
}
static inline unsigned int queue_max_segment_size(struct request_queue *q)
@ -1110,18 +1128,13 @@ static inline int queue_alignment_offset(struct request_queue *q)
return q->limits.alignment_offset;
}
static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset)
static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
{
unsigned int granularity = max(lim->physical_block_size, lim->io_min);
unsigned int alignment = (sector << 9) & (granularity - 1);
offset &= granularity - 1;
return (granularity + lim->alignment_offset - offset) & (granularity - 1);
}
static inline int queue_sector_alignment_offset(struct request_queue *q,
sector_t sector)
{
return queue_limit_alignment_offset(&q->limits, sector << 9);
return (granularity + lim->alignment_offset - alignment)
& (granularity - 1);
}
static inline int bdev_alignment_offset(struct block_device *bdev)
@ -1145,10 +1158,8 @@ static inline int queue_discard_alignment(struct request_queue *q)
return q->limits.discard_alignment;
}
static inline int queue_sector_discard_alignment(struct request_queue *q,
sector_t sector)
static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
{
struct queue_limits *lim = &q->limits;
unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
return (lim->discard_granularity + lim->discard_alignment - alignment)

130
include/linux/cciss_defs.h Normal file
View file

@ -0,0 +1,130 @@
#ifndef CCISS_DEFS_H
#define CCISS_DEFS_H
#include <linux/types.h>
/* general boundary definitions */
#define SENSEINFOBYTES 32 /* note that this value may vary
between host implementations */
/* Command Status value */
#define CMD_SUCCESS 0x0000
#define CMD_TARGET_STATUS 0x0001
#define CMD_DATA_UNDERRUN 0x0002
#define CMD_DATA_OVERRUN 0x0003
#define CMD_INVALID 0x0004
#define CMD_PROTOCOL_ERR 0x0005
#define CMD_HARDWARE_ERR 0x0006
#define CMD_CONNECTION_LOST 0x0007
#define CMD_ABORTED 0x0008
#define CMD_ABORT_FAILED 0x0009
#define CMD_UNSOLICITED_ABORT 0x000A
#define CMD_TIMEOUT 0x000B
#define CMD_UNABORTABLE 0x000C
/* transfer direction */
#define XFER_NONE 0x00
#define XFER_WRITE 0x01
#define XFER_READ 0x02
#define XFER_RSVD 0x03
/* task attribute */
#define ATTR_UNTAGGED 0x00
#define ATTR_SIMPLE 0x04
#define ATTR_HEADOFQUEUE 0x05
#define ATTR_ORDERED 0x06
#define ATTR_ACA 0x07
/* cdb type */
#define TYPE_CMD 0x00
#define TYPE_MSG 0x01
/* Type defs used in the following structs */
#define BYTE __u8
#define WORD __u16
#define HWORD __u16
#define DWORD __u32
#define CISS_MAX_LUN 1024
#define LEVEL2LUN 1 /* index into Target(x) structure, due to byte swapping */
#define LEVEL3LUN 0
#pragma pack(1)
/* Command List Structure */
typedef union _SCSI3Addr_struct {
struct {
BYTE Dev;
BYTE Bus:6;
BYTE Mode:2; /* b00 */
} PeripDev;
struct {
BYTE DevLSB;
BYTE DevMSB:6;
BYTE Mode:2; /* b01 */
} LogDev;
struct {
BYTE Dev:5;
BYTE Bus:3;
BYTE Targ:6;
BYTE Mode:2; /* b10 */
} LogUnit;
} SCSI3Addr_struct;
typedef struct _PhysDevAddr_struct {
DWORD TargetId:24;
DWORD Bus:6;
DWORD Mode:2;
SCSI3Addr_struct Target[2]; /* 2 level target device addr */
} PhysDevAddr_struct;
typedef struct _LogDevAddr_struct {
DWORD VolId:30;
DWORD Mode:2;
BYTE reserved[4];
} LogDevAddr_struct;
typedef union _LUNAddr_struct {
BYTE LunAddrBytes[8];
SCSI3Addr_struct SCSI3Lun[4];
PhysDevAddr_struct PhysDev;
LogDevAddr_struct LogDev;
} LUNAddr_struct;
typedef struct _RequestBlock_struct {
BYTE CDBLen;
struct {
BYTE Type:3;
BYTE Attribute:3;
BYTE Direction:2;
} Type;
HWORD Timeout;
BYTE CDB[16];
} RequestBlock_struct;
typedef union _MoreErrInfo_struct{
struct {
BYTE Reserved[3];
BYTE Type;
DWORD ErrorInfo;
} Common_Info;
struct{
BYTE Reserved[2];
BYTE offense_size; /* size of offending entry */
BYTE offense_num; /* byte # of offense 0-base */
DWORD offense_value;
} Invalid_Cmd;
} MoreErrInfo_struct;
typedef struct _ErrorInfo_struct {
BYTE ScsiStatus;
BYTE SenseLen;
HWORD CommandStatus;
DWORD ResidualCnt;
MoreErrInfo_struct MoreErrInfo;
BYTE SenseInfo[SENSEINFOBYTES];
} ErrorInfo_struct;
#pragma pack()
#endif /* CCISS_DEFS_H */

View file

@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/cciss_defs.h>
#define CCISS_IOC_MAGIC 'B'
@ -36,133 +37,6 @@ typedef __u32 DriverVer_type;
#define MAX_KMALLOC_SIZE 128000
#ifndef CCISS_CMD_H
// This defines are duplicated in cciss_cmd.h in the driver directory
//general boundary definitions
#define SENSEINFOBYTES 32//note that this value may vary between host implementations
//Command Status value
#define CMD_SUCCESS 0x0000
#define CMD_TARGET_STATUS 0x0001
#define CMD_DATA_UNDERRUN 0x0002
#define CMD_DATA_OVERRUN 0x0003
#define CMD_INVALID 0x0004
#define CMD_PROTOCOL_ERR 0x0005
#define CMD_HARDWARE_ERR 0x0006
#define CMD_CONNECTION_LOST 0x0007
#define CMD_ABORTED 0x0008
#define CMD_ABORT_FAILED 0x0009
#define CMD_UNSOLICITED_ABORT 0x000A
#define CMD_TIMEOUT 0x000B
#define CMD_UNABORTABLE 0x000C
//transfer direction
#define XFER_NONE 0x00
#define XFER_WRITE 0x01
#define XFER_READ 0x02
#define XFER_RSVD 0x03
//task attribute
#define ATTR_UNTAGGED 0x00
#define ATTR_SIMPLE 0x04
#define ATTR_HEADOFQUEUE 0x05
#define ATTR_ORDERED 0x06
#define ATTR_ACA 0x07
//cdb type
#define TYPE_CMD 0x00
#define TYPE_MSG 0x01
// Type defs used in the following structs
#define BYTE __u8
#define WORD __u16
#define HWORD __u16
#define DWORD __u32
#define CISS_MAX_LUN 1024
#define LEVEL2LUN 1 // index into Target(x) structure, due to byte swapping
#define LEVEL3LUN 0
#pragma pack(1)
//Command List Structure
typedef union _SCSI3Addr_struct {
struct {
BYTE Dev;
BYTE Bus:6;
BYTE Mode:2; // b00
} PeripDev;
struct {
BYTE DevLSB;
BYTE DevMSB:6;
BYTE Mode:2; // b01
} LogDev;
struct {
BYTE Dev:5;
BYTE Bus:3;
BYTE Targ:6;
BYTE Mode:2; // b10
} LogUnit;
} SCSI3Addr_struct;
typedef struct _PhysDevAddr_struct {
DWORD TargetId:24;
DWORD Bus:6;
DWORD Mode:2;
SCSI3Addr_struct Target[2]; //2 level target device addr
} PhysDevAddr_struct;
typedef struct _LogDevAddr_struct {
DWORD VolId:30;
DWORD Mode:2;
BYTE reserved[4];
} LogDevAddr_struct;
typedef union _LUNAddr_struct {
BYTE LunAddrBytes[8];
SCSI3Addr_struct SCSI3Lun[4];
PhysDevAddr_struct PhysDev;
LogDevAddr_struct LogDev;
} LUNAddr_struct;
typedef struct _RequestBlock_struct {
BYTE CDBLen;
struct {
BYTE Type:3;
BYTE Attribute:3;
BYTE Direction:2;
} Type;
HWORD Timeout;
BYTE CDB[16];
} RequestBlock_struct;
typedef union _MoreErrInfo_struct{
struct {
BYTE Reserved[3];
BYTE Type;
DWORD ErrorInfo;
}Common_Info;
struct{
BYTE Reserved[2];
BYTE offense_size;//size of offending entry
BYTE offense_num; //byte # of offense 0-base
DWORD offense_value;
}Invalid_Cmd;
}MoreErrInfo_struct;
typedef struct _ErrorInfo_struct {
BYTE ScsiStatus;
BYTE SenseLen;
HWORD CommandStatus;
DWORD ResidualCnt;
MoreErrInfo_struct MoreErrInfo;
BYTE SenseInfo[SENSEINFOBYTES];
} ErrorInfo_struct;
#pragma pack()
#endif /* CCISS_CMD_H */
typedef struct _IOCTL_Command_struct {
LUNAddr_struct LUN_info;
RequestBlock_struct Request;

View file

@ -385,7 +385,7 @@
/* defines for max_sectors and max_phys_segments */
#define I2O_MAX_SECTORS 1024
#define I2O_MAX_SECTORS_LIMITED 128
#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
#define I2O_MAX_PHYS_SEGMENTS BLK_MAX_SEGMENTS
/*
* Message structures

View file

@ -49,8 +49,8 @@ struct io_context {
/*
* For request batching
*/
unsigned long last_waited; /* Time last woken after wait for request */
int nr_batch_requests; /* Number of requests left in the batch */
unsigned long last_waited; /* Time last woken after wait for request */
struct radix_tree_root radix_root;
struct hlist_head cic_list;

View file

@ -163,10 +163,8 @@ struct packet_iosched
atomic_t attention; /* Set to non-zero when queue processing is needed */
int writing; /* Non-zero when writing, zero when reading */
spinlock_t lock; /* Protecting read/write queue manipulations */
struct bio *read_queue;
struct bio *read_queue_tail;
struct bio *write_queue;
struct bio *write_queue_tail;
struct bio_list read_queue;
struct bio_list write_queue;
sector_t last_write; /* The sector where the last write ended */
int successive_reads;
};
@ -206,8 +204,8 @@ struct packet_data
spinlock_t lock; /* Lock protecting state transitions and */
/* orig_bios list */
struct bio *orig_bios; /* Original bios passed to pkt_make_request */
struct bio *orig_bios_tail;/* that will be handled by this packet */
struct bio_list orig_bios; /* Original bios passed to pkt_make_request */
/* that will be handled by this packet */
int write_size; /* Total size of all bios in the orig_bios */
/* list, measured in number of frames */

View file

@ -97,7 +97,7 @@ struct sched_param {
struct exec_domain;
struct futex_pi_state;
struct robust_list_head;
struct bio;
struct bio_list;
struct fs_struct;
struct bts_context;
struct perf_event_context;
@ -1454,7 +1454,7 @@ struct task_struct {
void *journal_info;
/* stacked block device info */
struct bio *bio_list, **bio_tail;
struct bio_list *bio_list;
/* VM state */
struct reclaim_state *reclaim_state;

View file

@ -540,9 +540,10 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
if (ret)
return ret;
if (copy_to_user(arg, &buts, sizeof(buts)))
if (copy_to_user(arg, &buts, sizeof(buts))) {
blk_trace_remove(q);
return -EFAULT;
}
return 0;
}
EXPORT_SYMBOL_GPL(blk_trace_setup);