diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index fa8f26309444..75642a352a8f 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1659,7 +1659,7 @@ static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); if (ioc) { ioc_cgroup_changed(ioc); - put_io_context(ioc, NULL); + put_io_context(ioc); } } } diff --git a/block/blk-core.c b/block/blk-core.c index e6c05a97ee2b..3a78b00edd71 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -642,7 +642,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq) if (rq->cmd_flags & REQ_ELVPRIV) { elv_put_request(q, rq); if (rq->elv.icq) - put_io_context(rq->elv.icq->ioc, q); + put_io_context(rq->elv.icq->ioc); } mempool_free(rq, q->rq.rq_pool); @@ -872,13 +872,15 @@ retry: spin_unlock_irq(q->queue_lock); /* create icq if missing */ - if (unlikely(et->icq_cache && !icq)) + if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) { icq = ioc_create_icq(q, gfp_mask); + if (!icq) + goto fail_icq; + } - /* rqs are guaranteed to have icq on elv_set_request() if requested */ - if (likely(!et->icq_cache || icq)) - rq = blk_alloc_request(q, icq, rw_flags, gfp_mask); + rq = blk_alloc_request(q, icq, rw_flags, gfp_mask); +fail_icq: if (unlikely(!rq)) { /* * Allocation failed presumably due to memory. Undo anything @@ -1210,7 +1212,6 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); drive_stat_acct(req, 0); - elv_bio_merged(q, req, bio); return true; } @@ -1241,7 +1242,6 @@ static bool bio_attempt_front_merge(struct request_queue *q, req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); drive_stat_acct(req, 0); - elv_bio_merged(q, req, bio); return true; } @@ -1255,13 +1255,12 @@ static bool bio_attempt_front_merge(struct request_queue *q, * on %current's plugged list. Returns %true if merge was successful, * otherwise %false. * - * This function is called without @q->queue_lock; however, elevator is - * accessed iff there already are requests on the plugged list which in - * turn guarantees validity of the elevator. - * - * Note that, on successful merge, elevator operation - * elevator_bio_merged_fn() will be called without queue lock. Elevator - * must be ready for this. + * Plugging coalesces IOs from the same issuer for the same purpose without + * going through @q->queue_lock. As such it's more of an issuing mechanism + * than scheduling, and the request, while may have elvpriv data, is not + * added on the elevator at this point. In addition, we don't have + * reliable access to the elevator outside queue lock. Only check basic + * merging parameters without querying the elevator. */ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, unsigned int *request_count) @@ -1280,10 +1279,10 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, (*request_count)++; - if (rq->q != q) + if (rq->q != q || !blk_rq_merge_ok(rq, bio)) continue; - el_ret = elv_try_merge(rq, bio); + el_ret = blk_try_merge(rq, bio); if (el_ret == ELEVATOR_BACK_MERGE) { ret = bio_attempt_back_merge(q, rq, bio); if (ret) @@ -1345,12 +1344,14 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) el_ret = elv_merge(q, &req, bio); if (el_ret == ELEVATOR_BACK_MERGE) { if (bio_attempt_back_merge(q, req, bio)) { + elv_bio_merged(q, req, bio); if (!attempt_back_merge(q, req)) elv_merged_request(q, req, el_ret); goto out_unlock; } } else if (el_ret == ELEVATOR_FRONT_MERGE) { if (bio_attempt_front_merge(q, req, bio)) { + elv_bio_merged(q, req, bio); if (!attempt_front_merge(q, req)) elv_merged_request(q, req, el_ret); goto out_unlock; diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 27a06e00eaec..8b782a63c297 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -29,21 +29,6 @@ void get_io_context(struct io_context *ioc) } EXPORT_SYMBOL(get_io_context); -/* - * Releasing ioc may nest into another put_io_context() leading to nested - * fast path release. As the ioc's can't be the same, this is okay but - * makes lockdep whine. Keep track of nesting and use it as subclass. - */ -#ifdef CONFIG_LOCKDEP -#define ioc_release_depth(q) ((q) ? (q)->ioc_release_depth : 0) -#define ioc_release_depth_inc(q) (q)->ioc_release_depth++ -#define ioc_release_depth_dec(q) (q)->ioc_release_depth-- -#else -#define ioc_release_depth(q) 0 -#define ioc_release_depth_inc(q) do { } while (0) -#define ioc_release_depth_dec(q) do { } while (0) -#endif - static void icq_free_icq_rcu(struct rcu_head *head) { struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); @@ -75,11 +60,8 @@ static void ioc_exit_icq(struct io_cq *icq) if (rcu_dereference_raw(ioc->icq_hint) == icq) rcu_assign_pointer(ioc->icq_hint, NULL); - if (et->ops.elevator_exit_icq_fn) { - ioc_release_depth_inc(q); + if (et->ops.elevator_exit_icq_fn) et->ops.elevator_exit_icq_fn(icq); - ioc_release_depth_dec(q); - } /* * @icq->q might have gone away by the time RCU callback runs @@ -98,8 +80,15 @@ static void ioc_release_fn(struct work_struct *work) struct io_context *ioc = container_of(work, struct io_context, release_work); struct request_queue *last_q = NULL; + unsigned long flags; - spin_lock_irq(&ioc->lock); + /* + * Exiting icq may call into put_io_context() through elevator + * which will trigger lockdep warning. The ioc's are guaranteed to + * be different, use a different locking subclass here. Use + * irqsave variant as there's no spin_lock_irq_nested(). + */ + spin_lock_irqsave_nested(&ioc->lock, flags, 1); while (!hlist_empty(&ioc->icq_list)) { struct io_cq *icq = hlist_entry(ioc->icq_list.first, @@ -121,15 +110,15 @@ static void ioc_release_fn(struct work_struct *work) */ if (last_q) { spin_unlock(last_q->queue_lock); - spin_unlock_irq(&ioc->lock); + spin_unlock_irqrestore(&ioc->lock, flags); blk_put_queue(last_q); } else { - spin_unlock_irq(&ioc->lock); + spin_unlock_irqrestore(&ioc->lock, flags); } last_q = this_q; - spin_lock_irq(this_q->queue_lock); - spin_lock(&ioc->lock); + spin_lock_irqsave(this_q->queue_lock, flags); + spin_lock_nested(&ioc->lock, 1); continue; } ioc_exit_icq(icq); @@ -137,10 +126,10 @@ static void ioc_release_fn(struct work_struct *work) if (last_q) { spin_unlock(last_q->queue_lock); - spin_unlock_irq(&ioc->lock); + spin_unlock_irqrestore(&ioc->lock, flags); blk_put_queue(last_q); } else { - spin_unlock_irq(&ioc->lock); + spin_unlock_irqrestore(&ioc->lock, flags); } kmem_cache_free(iocontext_cachep, ioc); @@ -149,79 +138,29 @@ static void ioc_release_fn(struct work_struct *work) /** * put_io_context - put a reference of io_context * @ioc: io_context to put - * @locked_q: request_queue the caller is holding queue_lock of (hint) * * Decrement reference count of @ioc and release it if the count reaches - * zero. If the caller is holding queue_lock of a queue, it can indicate - * that with @locked_q. This is an optimization hint and the caller is - * allowed to pass in %NULL even when it's holding a queue_lock. + * zero. */ -void put_io_context(struct io_context *ioc, struct request_queue *locked_q) +void put_io_context(struct io_context *ioc) { - struct request_queue *last_q = locked_q; unsigned long flags; if (ioc == NULL) return; BUG_ON(atomic_long_read(&ioc->refcount) <= 0); - if (locked_q) - lockdep_assert_held(locked_q->queue_lock); - - if (!atomic_long_dec_and_test(&ioc->refcount)) - return; /* - * Destroy @ioc. This is a bit messy because icq's are chained - * from both ioc and queue, and ioc->lock nests inside queue_lock. - * The inner ioc->lock should be held to walk our icq_list and then - * for each icq the outer matching queue_lock should be grabbed. - * ie. We need to do reverse-order double lock dancing. - * - * Another twist is that we are often called with one of the - * matching queue_locks held as indicated by @locked_q, which - * prevents performing double-lock dance for other queues. - * - * So, we do it in two stages. The fast path uses the queue_lock - * the caller is holding and, if other queues need to be accessed, - * uses trylock to avoid introducing locking dependency. This can - * handle most cases, especially if @ioc was performing IO on only - * single device. - * - * If trylock doesn't cut it, we defer to @ioc->release_work which - * can do all the double-locking dancing. + * Releasing ioc requires reverse order double locking and we may + * already be holding a queue_lock. Do it asynchronously from wq. */ - spin_lock_irqsave_nested(&ioc->lock, flags, - ioc_release_depth(locked_q)); - - while (!hlist_empty(&ioc->icq_list)) { - struct io_cq *icq = hlist_entry(ioc->icq_list.first, - struct io_cq, ioc_node); - struct request_queue *this_q = icq->q; - - if (this_q != last_q) { - if (last_q && last_q != locked_q) - spin_unlock(last_q->queue_lock); - last_q = NULL; - - if (!spin_trylock(this_q->queue_lock)) - break; - last_q = this_q; - continue; - } - ioc_exit_icq(icq); + if (atomic_long_dec_and_test(&ioc->refcount)) { + spin_lock_irqsave(&ioc->lock, flags); + if (!hlist_empty(&ioc->icq_list)) + schedule_work(&ioc->release_work); + spin_unlock_irqrestore(&ioc->lock, flags); } - - if (last_q && last_q != locked_q) - spin_unlock(last_q->queue_lock); - - spin_unlock_irqrestore(&ioc->lock, flags); - - /* if no icq is left, we're done; otherwise, kick release_work */ - if (hlist_empty(&ioc->icq_list)) - kmem_cache_free(iocontext_cachep, ioc); - else - schedule_work(&ioc->release_work); } EXPORT_SYMBOL(put_io_context); @@ -236,7 +175,7 @@ void exit_io_context(struct task_struct *task) task_unlock(task); atomic_dec(&ioc->nr_tasks); - put_io_context(ioc, NULL); + put_io_context(ioc); } /** diff --git a/block/blk-merge.c b/block/blk-merge.c index cfcc37cb222b..160035f54882 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -471,3 +471,40 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq, { return attempt_merge(q, rq, next); } + +bool blk_rq_merge_ok(struct request *rq, struct bio *bio) +{ + if (!rq_mergeable(rq)) + return false; + + /* don't merge file system requests and discard requests */ + if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD)) + return false; + + /* don't merge discard requests and secure discard requests */ + if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE)) + return false; + + /* different data direction or already started, don't merge */ + if (bio_data_dir(bio) != rq_data_dir(rq)) + return false; + + /* must be same device and not a special request */ + if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special) + return false; + + /* only merge integrity protected bio into ditto rq */ + if (bio_integrity(bio) != blk_integrity_rq(rq)) + return false; + + return true; +} + +int blk_try_merge(struct request *rq, struct bio *bio) +{ + if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector) + return ELEVATOR_BACK_MERGE; + else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector) + return ELEVATOR_FRONT_MERGE; + return ELEVATOR_NO_MERGE; +} diff --git a/block/blk.h b/block/blk.h index 7efd772336de..9c12f80882b0 100644 --- a/block/blk.h +++ b/block/blk.h @@ -137,6 +137,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq, struct request *next); void blk_recalc_rq_segments(struct request *rq); void blk_rq_set_mixed_merge(struct request *rq); +bool blk_rq_merge_ok(struct request *rq, struct bio *bio); +int blk_try_merge(struct request *rq, struct bio *bio); void blk_queue_congestion_threshold(struct request_queue *q); diff --git a/block/bsg.c b/block/bsg.c index 4cf703fd98bb..ff64ae3bacee 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -983,7 +983,8 @@ void bsg_unregister_queue(struct request_queue *q) mutex_lock(&bsg_mutex); idr_remove(&bsg_minor_idr, bcd->minor); - sysfs_remove_link(&q->kobj, "bsg"); + if (q->kobj.sd) + sysfs_remove_link(&q->kobj, "bsg"); device_unregister(bcd->class_dev); bcd->class_dev = NULL; kref_put(&bcd->ref, bsg_kref_release_function); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index ee55019066a1..d0ba50533668 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1699,18 +1699,11 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, /* * Lookup the cfqq that this bio will be queued with and allow - * merge only if rq is queued there. This function can be called - * from plug merge without queue_lock. In such cases, ioc of @rq - * and %current are guaranteed to be equal. Avoid lookup which - * requires queue_lock by using @rq's cic. + * merge only if rq is queued there. */ - if (current->io_context == RQ_CIC(rq)->icq.ioc) { - cic = RQ_CIC(rq); - } else { - cic = cfq_cic_lookup(cfqd, current->io_context); - if (!cic) - return false; - } + cic = cfq_cic_lookup(cfqd, current->io_context); + if (!cic) + return false; cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); return cfqq == RQ_CFQQ(rq); @@ -1794,7 +1787,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfqd->active_queue = NULL; if (cfqd->active_cic) { - put_io_context(cfqd->active_cic->icq.ioc, cfqd->queue); + put_io_context(cfqd->active_cic->icq.ioc); cfqd->active_cic = NULL; } } @@ -3117,17 +3110,18 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, */ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { + enum wl_type_t old_type = cfqq_type(cfqd->active_queue); + cfq_log_cfqq(cfqd, cfqq, "preempt"); + cfq_slice_expired(cfqd, 1); /* * workload type is changed, don't save slice, otherwise preempt * doesn't happen */ - if (cfqq_type(cfqd->active_queue) != cfqq_type(cfqq)) + if (old_type != cfqq_type(cfqq)) cfqq->cfqg->saved_workload_slice = 0; - cfq_slice_expired(cfqd, 1); - /* * Put the new queue at the front of the of the current list, * so we know that it will be selected next. diff --git a/block/elevator.c b/block/elevator.c index 91e18f8af9be..f016855a46b0 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -70,39 +70,9 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) /* * can we safely merge with this request? */ -int elv_rq_merge_ok(struct request *rq, struct bio *bio) +bool elv_rq_merge_ok(struct request *rq, struct bio *bio) { - if (!rq_mergeable(rq)) - return 0; - - /* - * Don't merge file system requests and discard requests - */ - if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD)) - return 0; - - /* - * Don't merge discard requests and secure discard requests - */ - if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE)) - return 0; - - /* - * different data direction or already started, don't merge - */ - if (bio_data_dir(bio) != rq_data_dir(rq)) - return 0; - - /* - * must be same device and not a special request - */ - if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special) - return 0; - - /* - * only merge integrity protected bio into ditto rq - */ - if (bio_integrity(bio) != blk_integrity_rq(rq)) + if (!blk_rq_merge_ok(rq, bio)) return 0; if (!elv_iosched_allow_merge(rq, bio)) @@ -112,23 +82,6 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio) } EXPORT_SYMBOL(elv_rq_merge_ok); -int elv_try_merge(struct request *__rq, struct bio *bio) -{ - int ret = ELEVATOR_NO_MERGE; - - /* - * we can merge and sequence is ok, check if it's possible - */ - if (elv_rq_merge_ok(__rq, bio)) { - if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector) - ret = ELEVATOR_BACK_MERGE; - else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector) - ret = ELEVATOR_FRONT_MERGE; - } - - return ret; -} - static struct elevator_type *elevator_find(const char *name) { struct elevator_type *e; @@ -478,8 +431,8 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) /* * First try one-hit cache. */ - if (q->last_merge) { - ret = elv_try_merge(q->last_merge, bio); + if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) { + ret = blk_try_merge(q->last_merge, bio); if (ret != ELEVATOR_NO_MERGE) { *req = q->last_merge; return ret; diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 510fb10ec45a..9baf11e86362 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4368,8 +4368,14 @@ out_unreg_blkdev: out_put_disk: while (dr--) { del_timer_sync(&motor_off_timer[dr]); - if (disks[dr]->queue) + if (disks[dr]->queue) { blk_cleanup_queue(disks[dr]->queue); + /* + * put_disk() is not paired with add_disk() and + * will put queue reference one extra time. fix it. + */ + disks[dr]->queue = NULL; + } put_disk(disks[dr]); } return err; @@ -4579,6 +4585,15 @@ static void __exit floppy_module_exit(void) platform_device_unregister(&floppy_device[drive]); } blk_cleanup_queue(disks[drive]->queue); + + /* + * These disks have not called add_disk(). Don't put down + * queue reference in put_disk(). + */ + if (!(allowed_drive_mask & (1 << drive)) || + fdc_state[FDC(drive)].version == FDC_NONE) + disks[drive]->queue = NULL; + put_disk(disks[drive]); } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f00257782fcc..cd504353b278 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -356,14 +356,14 @@ lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd) return __splice_from_pipe(pipe, sd, lo_splice_actor); } -static int +static ssize_t do_lo_receive(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos) { struct lo_read_data cookie; struct splice_desc sd; struct file *file; - long retval; + ssize_t retval; cookie.lo = lo; cookie.page = bvec->bv_page; @@ -379,26 +379,28 @@ do_lo_receive(struct loop_device *lo, file = lo->lo_backing_file; retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor); - if (retval < 0) - return retval; - if (retval != bvec->bv_len) - return -EIO; - return 0; + return retval; } static int lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) { struct bio_vec *bvec; - int i, ret = 0; + ssize_t s; + int i; bio_for_each_segment(bvec, bio, i) { - ret = do_lo_receive(lo, bvec, bsize, pos); - if (ret < 0) + s = do_lo_receive(lo, bvec, bsize, pos); + if (s < 0) + return s; + + if (s != bvec->bv_len) { + zero_fill_bio(bio); break; + } pos += bvec->bv_len; } - return ret; + return 0; } static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index b74eab70c3d0..8eb81c96608f 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -2068,8 +2068,6 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, * when the read completes. * @data Callback data passed to the callback function * when the read completes. - * @barrier If non-zero, this command must be completed before - * issuing any other commands. * @dir Direction (read or write) * * return value @@ -2077,7 +2075,7 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, */ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, int nsect, int nents, int tag, void *callback, - void *data, int barrier, int dir) + void *data, int dir) { struct host_to_dev_fis *fis; struct mtip_port *port = dd->port; @@ -2108,8 +2106,6 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF); *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF); fis->device = 1 << 6; - if (barrier) - fis->device |= FUA_BIT; fis->features = nsect & 0xFF; fis->features_ex = (nsect >> 8) & 0xFF; fis->sect_count = ((tag << 3) | (tag >> 5)); @@ -3087,7 +3083,6 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) tag, bio_endio, bio, - bio->bi_rw & REQ_FUA, bio_data_dir(bio)); } else bio_io_error(bio); @@ -3187,6 +3182,10 @@ skip_create_disk: blk_queue_max_segments(dd->queue, MTIP_MAX_SG); blk_queue_physical_block_size(dd->queue, 4096); blk_queue_io_min(dd->queue, 4096); + /* + * write back cache is not supported in the device. FUA depends on + * write back cache support, hence setting flush support to zero. + */ blk_queue_flush(dd->queue, 0); /* Set the capacity of the device in 512 byte sectors. */ diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 723d7c4946dc..e0554a8f2233 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -104,9 +104,6 @@ /* BAR number used to access the HBA registers. */ #define MTIP_ABAR 5 -/* Forced Unit Access Bit */ -#define FUA_BIT 0x80 - #ifdef DEBUG #define dbg_printk(format, arg...) \ printk(pr_fmt(format), ##arg); @@ -415,8 +412,6 @@ struct driver_data { atomic_t resumeflag; /* Atomic variable to track suspend/resume */ - atomic_t eh_active; /* Flag for error handling tracking */ - struct task_struct *mtip_svc_handler; /* task_struct of svc thd */ }; diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 55eaf474d32c..d620b4495745 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -286,8 +286,6 @@ /* used to tell the module to turn on full debugging messages */ static bool debug; -/* used to keep tray locked at all times */ -static int keeplocked; /* default compatibility mode */ static bool autoclose=1; static bool autoeject; @@ -1204,7 +1202,7 @@ void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode) cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name); cdrom_dvd_rw_close_write(cdi); - if ((cdo->capability & CDC_LOCK) && !keeplocked) { + if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) { cdinfo(CD_CLOSE, "Unlocking door!\n"); cdo->lock_door(cdi, 0); } @@ -1371,7 +1369,7 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot) curslot = info->hdr.curslot; kfree(info); - if (cdi->use_count > 1 || keeplocked) { + if (cdi->use_count > 1 || cdi->keeplocked) { if (slot == CDSL_CURRENT) { return curslot; } else { @@ -2119,11 +2117,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf, if (!nr) return -ENOMEM; - if (!access_ok(VERIFY_WRITE, ubuf, nframes * CD_FRAMESIZE_RAW)) { - ret = -EFAULT; - goto out; - } - cgc.data_direction = CGC_DATA_READ; while (nframes > 0) { if (nr > nframes) @@ -2132,7 +2125,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf, ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW); if (ret) break; - if (__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) { + if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) { ret = -EFAULT; break; } @@ -2140,7 +2133,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf, nframes -= nr; lba += nr; } -out: kfree(cgc.buffer); return ret; } @@ -2295,7 +2287,7 @@ static int cdrom_ioctl_eject(struct cdrom_device_info *cdi) if (!CDROM_CAN(CDC_OPEN_TRAY)) return -ENOSYS; - if (cdi->use_count != 1 || keeplocked) + if (cdi->use_count != 1 || cdi->keeplocked) return -EBUSY; if (CDROM_CAN(CDC_LOCK)) { int ret = cdi->ops->lock_door(cdi, 0); @@ -2322,7 +2314,7 @@ static int cdrom_ioctl_eject_sw(struct cdrom_device_info *cdi, if (!CDROM_CAN(CDC_OPEN_TRAY)) return -ENOSYS; - if (keeplocked) + if (cdi->keeplocked) return -EBUSY; cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT); @@ -2453,7 +2445,7 @@ static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi, if (!CDROM_CAN(CDC_LOCK)) return -EDRIVE_CANT_DO_THIS; - keeplocked = arg ? 1 : 0; + cdi->keeplocked = arg ? 1 : 0; /* * Don't unlock the door on multiple opens by default, but allow diff --git a/fs/bio.c b/fs/bio.c index b1fe82cf88cf..b980ecde026a 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -505,13 +505,9 @@ EXPORT_SYMBOL(bio_clone); int bio_get_nr_vecs(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); - int nr_pages; - - nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; - if (nr_pages > queue_max_segments(q)) - nr_pages = queue_max_segments(q); - - return nr_pages; + return min_t(unsigned, + queue_max_segments(q), + queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1); } EXPORT_SYMBOL(bio_get_nr_vecs); diff --git a/fs/ioprio.c b/fs/ioprio.c index f84b380d65e5..0f1b9515213b 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c @@ -51,7 +51,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio) ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); if (ioc) { ioc_ioprio_changed(ioc, ioprio); - put_io_context(ioc, NULL); + put_io_context(ioc); } return err; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6c6a1f008065..606cf339bb56 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -399,9 +399,6 @@ struct request_queue { /* Throttle data */ struct throtl_data *td; #endif -#ifdef CONFIG_LOCKDEP - int ioc_release_depth; -#endif }; #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index 35eae4b67503..7c48029dffe6 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -952,7 +952,8 @@ struct cdrom_device_info { char name[20]; /* name of the device type */ /* per-device flags */ __u8 sanyo_slot : 2; /* Sanyo 3 CD changer support */ - __u8 reserved : 6; /* not used yet */ + __u8 keeplocked : 1; /* CDROM_LOCKDOOR status */ + __u8 reserved : 5; /* not used yet */ int cdda_method; /* see flags */ __u8 last_sense; __u8 media_written; /* dirty flag, DVD+RW bookkeeping */ diff --git a/include/linux/elevator.h b/include/linux/elevator.h index c24f3d7fbf1e..7d4e0356f329 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -42,12 +42,6 @@ struct elevator_ops elevator_merged_fn *elevator_merged_fn; elevator_merge_req_fn *elevator_merge_req_fn; elevator_allow_merge_fn *elevator_allow_merge_fn; - - /* - * Used for both plugged list and elevator merging and in the - * former case called without queue_lock. Read comment on top of - * attempt_plug_merge() for details. - */ elevator_bio_merged_fn *elevator_bio_merged_fn; elevator_dispatch_fn *elevator_dispatch_fn; @@ -122,7 +116,6 @@ extern void elv_dispatch_add_tail(struct request_queue *, struct request *); extern void elv_add_request(struct request_queue *, struct request *, int); extern void __elv_add_request(struct request_queue *, struct request *, int); extern int elv_merge(struct request_queue *, struct request **, struct bio *); -extern int elv_try_merge(struct request *, struct bio *); extern void elv_merge_requests(struct request_queue *, struct request *, struct request *); extern void elv_merged_request(struct request_queue *, struct request *, int); @@ -155,7 +148,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); extern int elevator_init(struct request_queue *, char *); extern void elevator_exit(struct elevator_queue *); extern int elevator_change(struct request_queue *, const char *); -extern int elv_rq_merge_ok(struct request *, struct bio *); +extern bool elv_rq_merge_ok(struct request *, struct bio *); /* * Helper functions. diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 7e1371c4bccf..119773eebe31 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -133,7 +133,7 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc) struct task_struct; #ifdef CONFIG_BLOCK -void put_io_context(struct io_context *ioc, struct request_queue *locked_q); +void put_io_context(struct io_context *ioc); void exit_io_context(struct task_struct *task); struct io_context *get_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node); @@ -141,8 +141,7 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio); void ioc_cgroup_changed(struct io_context *ioc); #else struct io_context; -static inline void put_io_context(struct io_context *ioc, - struct request_queue *locked_q) { } +static inline void put_io_context(struct io_context *ioc) { } static inline void exit_io_context(struct task_struct *task) { } #endif diff --git a/kernel/fork.c b/kernel/fork.c index 1b2ef3c23ae4..b77fd559c78e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -910,7 +910,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk) return -ENOMEM; new_ioc->ioprio = ioc->ioprio; - put_io_context(new_ioc, NULL); + put_io_context(new_ioc); } #endif return 0; diff --git a/kernel/relay.c b/kernel/relay.c index 4335e1d7ee2d..ab56a1764d4d 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -164,10 +164,14 @@ depopulate: */ static struct rchan_buf *relay_create_buf(struct rchan *chan) { - struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); - if (!buf) + struct rchan_buf *buf; + + if (chan->n_subbufs > UINT_MAX / sizeof(size_t *)) return NULL; + buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); + if (!buf) + return NULL; buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL); if (!buf->padding) goto free_buf; @@ -574,6 +578,8 @@ struct rchan *relay_open(const char *base_filename, if (!(subbuf_size && n_subbufs)) return NULL; + if (subbuf_size > UINT_MAX / n_subbufs) + return NULL; chan = kzalloc(sizeof(struct rchan), GFP_KERNEL); if (!chan)