diff --git a/block/badblocks.c b/block/badblocks.c index 6610e282a03e..6ebcef282314 100644 --- a/block/badblocks.c +++ b/block/badblocks.c @@ -133,6 +133,26 @@ int badblocks_check(struct badblocks *bb, sector_t s, int sectors, } EXPORT_SYMBOL_GPL(badblocks_check); +static void badblocks_update_acked(struct badblocks *bb) +{ + u64 *p = bb->page; + int i; + bool unacked = false; + + if (!bb->unacked_exist) + return; + + for (i = 0; i < bb->count ; i++) { + if (!BB_ACK(p[i])) { + unacked = true; + break; + } + } + + if (!unacked) + bb->unacked_exist = 0; +} + /** * badblocks_set() - Add a range of bad blocks to the table. * @bb: the badblocks structure that holds all badblock information @@ -294,6 +314,8 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors, bb->changed = 1; if (!acknowledged) bb->unacked_exist = 1; + else + badblocks_update_acked(bb); write_sequnlock_irqrestore(&bb->lock, flags); return rv; @@ -401,6 +423,7 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors) } } + badblocks_update_acked(bb); bb->changed = 1; out: write_sequnlock_irq(&bb->lock); diff --git a/block/blk-flush.c b/block/blk-flush.c index 6a14b68b9135..3c882cbc7541 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -342,6 +342,34 @@ static void flush_data_end_io(struct request *rq, int error) struct request_queue *q = rq->q; struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); + /* + * Updating q->in_flight[] here for making this tag usable + * early. Because in blk_queue_start_tag(), + * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and + * reserve tags for sync I/O. + * + * More importantly this way can avoid the following I/O + * deadlock: + * + * - suppose there are 40 fua requests comming to flush queue + * and queue depth is 31 + * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc + * tag for async I/O any more + * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT + * and flush_data_end_io() is called + * - the other rqs still can't go ahead if not updating + * q->in_flight[BLK_RW_ASYNC] here, meantime these rqs + * are held in flush data queue and make no progress of + * handling post flush rq + * - only after the post flush rq is handled, all these rqs + * can be completed + */ + + elv_completed_request(q, rq); + + /* for avoiding double accounting */ + rq->cmd_flags &= ~REQ_STARTED; + /* * After populating an empty queue, kick it to avoid stall. Read * the comment in flush_end_io(). diff --git a/block/blk-mq.c b/block/blk-mq.c index ddc2eed64771..f3d27a6dee09 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1217,9 +1217,9 @@ static struct request *blk_mq_map_request(struct request_queue *q, blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx); rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); - hctx->queued++; - data->hctx = hctx; - data->ctx = ctx; + data->hctx = alloc_data.hctx; + data->ctx = alloc_data.ctx; + data->hctx->queued++; return rq; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index ba405b55329f..19a16b2dbb91 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -164,7 +164,7 @@ static void sock_shutdown(struct nbd_device *nbd) spin_lock(&nbd->sock_lock); if (!nbd->sock) { - spin_unlock_irq(&nbd->sock_lock); + spin_unlock(&nbd->sock_lock); return; } diff --git a/kernel/softirq.c b/kernel/softirq.c index 1bf81ef91375..744fa611cae0 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -58,7 +58,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp DEFINE_PER_CPU(struct task_struct *, ksoftirqd); const char * const softirq_to_name[NR_SOFTIRQS] = { - "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", + "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", "TASKLET", "SCHED", "HRTIMER", "RCU" };