ceph: only dirty ITER_IOVEC pages for direct read

If a page is already locked, attempting to dirty it leads to a deadlock
in lock_page().  This is what currently happens to ITER_BVEC pages when
a dio-enabled loop device is backed by ceph:

  $ losetup --direct-io /dev/loop0 /mnt/cephfs/img
  $ xfs_io -c 'pread 0 4k' /dev/loop0

Follow other file systems and only dirty ITER_IOVEC pages.

Cc: stable@kernel.org
Signed-off-by: "Yan, Zheng" <zyan@redhat.com>
Reviewed-by: Ilya Dryomov <idryomov@gmail.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
This commit is contained in:
Yan, Zheng 2018-03-16 11:22:29 +08:00 committed by Ilya Dryomov
parent 3eb2ce825e
commit 85784f9395

View file

@ -640,7 +640,8 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
struct ceph_aio_request {
struct kiocb *iocb;
size_t total_len;
int write;
bool write;
bool should_dirty;
int error;
struct list_head osd_reqs;
unsigned num_reqs;
@ -750,7 +751,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
}
}
ceph_put_page_vector(osd_data->pages, num_pages, !aio_req->write);
ceph_put_page_vector(osd_data->pages, num_pages, aio_req->should_dirty);
ceph_osdc_put_request(req);
if (rc < 0)
@ -847,6 +848,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
size_t count = iov_iter_count(iter);
loff_t pos = iocb->ki_pos;
bool write = iov_iter_rw(iter) == WRITE;
bool should_dirty = !write && iter_is_iovec(iter);
if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
return -EROFS;
@ -914,6 +916,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
if (aio_req) {
aio_req->iocb = iocb;
aio_req->write = write;
aio_req->should_dirty = should_dirty;
INIT_LIST_HEAD(&aio_req->osd_reqs);
if (write) {
aio_req->mtime = mtime;
@ -971,7 +974,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
len = ret;
}
ceph_put_page_vector(pages, num_pages, !write);
ceph_put_page_vector(pages, num_pages, should_dirty);
ceph_osdc_put_request(req);
if (ret < 0)