for-linus-20190215

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAlxm7pAQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpl6JEACM5qHp7HEf7muuLKDUoX16G2eDOjacVxbL
 q1kqyHNvrYD/aGo+8vcshCef6xno9fL1akIxTyaTcMwYJUk9JSMicsVimxC1OvI6
 a5ZiWItX2L8Nh/heJe+FtutWbrT+Nd+3Q8DqI+U0YkRnjnXaRVgLFtBmjLOxBrqJ
 Ps/VepB4GaxA0oWdPbhos/N3wa42uFy3ixdv3Kv6WmHdqraB9uagt8PwwUti3WzQ
 uxWL6J+JOBSDha8l3fp68Okib1bm/6Nmmc9l8Yz1eFwf+Y+gVgw7wPQxkUD/XaFW
 bDJGwp3NawK07EanIAIzfXUEGfLvgeRJBEP3OGwV/TAiHX5q9zQo/tbM6x8j4aT9
 zGlwU/EnwFixgbRW/hOT5Ox4usBlfB1j0ZiNmgUm8QphHrELFnc35Kd+PR/KONNX
 sI6ZiifEAMR+4S99kTZ5YjHUqcUVm9ndd8iQGW9mvM6vt3o1L6QKeOeEKBMlhMcx
 V+JtViC50ojidYc82kEtQFY9OKRkc5x3k1wBsH49LGMT+fvEwETallOXHTarQKrv
 QAZNN1NINkMmrL5bgBXFqf0qpOy4xHnhis5AilUHNZwa4G8iAe8oqz/2eUCydiV1
 Ogx20a8T1ifeSkI2NXrwnBjVzqnfiO9wOb9py98BiLR6k59x3GYtbCdGtpIXfSFv
 hG79KKoz3Q==
 =8mjO
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-20190215' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - Ensure we insert into the hctx dispatch list, if a request is marked
   as DONTPREP (Jianchao)

 - NVMe pull request, single missing unlock on error fix (Keith)

 - MD pull request, single fix for a potentially data corrupting issue
   (Nate)

 - Floppy check_events regression fix (Yufen)

* tag 'for-linus-20190215' of git://git.kernel.dk/linux-block:
  md/raid1: don't clear bitmap bits on interrupted recovery.
  floppy: check_events callback should not return a negative number
  nvme-pci: add missing unlock for reset error
  blk-mq: insert rq with DONTPREP to hctx dispatch list when requeue
This commit is contained in:
Linus Torvalds 2019-02-15 09:12:28 -08:00
commit 24f0a48743
4 changed files with 34 additions and 16 deletions

View File

@ -737,12 +737,20 @@ static void blk_mq_requeue_work(struct work_struct *work)
spin_unlock_irq(&q->requeue_lock);
list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
if (!(rq->rq_flags & RQF_SOFTBARRIER))
if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
continue;
rq->rq_flags &= ~RQF_SOFTBARRIER;
list_del_init(&rq->queuelist);
blk_mq_sched_insert_request(rq, true, false, false);
/*
* If RQF_DONTPREP, rq has contained some driver specific
* data, so insert it to hctx dispatch list to avoid any
* merge.
*/
if (rq->rq_flags & RQF_DONTPREP)
blk_mq_request_bypass_insert(rq, false);
else
blk_mq_sched_insert_request(rq, true, false, false);
}
while (!list_empty(&rq_list)) {

View File

@ -4075,7 +4075,7 @@ static unsigned int floppy_check_events(struct gendisk *disk,
if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
if (lock_fdc(drive))
return -EINTR;
return 0;
poll_drive(false, 0);
process_fd_request();
}

View File

@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bio)
reschedule_retry(r1_bio);
}
static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
{
sector_t sync_blocks = 0;
sector_t s = r1_bio->sector;
long sectors_to_go = r1_bio->sectors;
/* make sure these bits don't get cleared. */
do {
md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
s += sync_blocks;
sectors_to_go -= sync_blocks;
} while (sectors_to_go > 0);
}
static void end_sync_write(struct bio *bio)
{
int uptodate = !bio->bi_status;
@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *bio)
struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
if (!uptodate) {
sector_t sync_blocks = 0;
sector_t s = r1_bio->sector;
long sectors_to_go = r1_bio->sectors;
/* make sure these bits doesn't get cleared. */
do {
md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
s += sync_blocks;
sectors_to_go -= sync_blocks;
} while (sectors_to_go > 0);
abort_sync_write(mddev, r1_bio);
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED, &
@ -2172,8 +2178,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
(i == r1_bio->read_disk ||
!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
continue;
if (test_bit(Faulty, &conf->mirrors[i].rdev->flags))
if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
abort_sync_write(mddev, r1_bio);
continue;
}
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))

View File

@ -2560,15 +2560,15 @@ static void nvme_reset_work(struct work_struct *work)
mutex_lock(&dev->shutdown_lock);
result = nvme_pci_enable(dev);
if (result)
goto out;
goto out_unlock;
result = nvme_pci_configure_admin_queue(dev);
if (result)
goto out;
goto out_unlock;
result = nvme_alloc_admin_tags(dev);
if (result)
goto out;
goto out_unlock;
/*
* Limit the max command size to prevent iod->sg allocations going
@ -2651,6 +2651,8 @@ static void nvme_reset_work(struct work_struct *work)
nvme_start_ctrl(&dev->ctrl);
return;
out_unlock:
mutex_unlock(&dev->shutdown_lock);
out:
nvme_remove_dead_ctrl(dev, result);
}