new helper: iov_iter_npages()

counts the pages covered by iov_iter, up to given limit.
do_block_direct_io() and fuse_iter_npages() switched to
it.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2014-03-19 01:16:16 -04:00
parent 5b46f25ddc
commit f67da30c1d
4 changed files with 31 additions and 22 deletions

View File

@ -1100,7 +1100,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags)
{
int seg;
unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
unsigned blkbits = i_blkbits;
unsigned blocksize_mask = (1 << blkbits) - 1;
@ -1108,7 +1107,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
loff_t end = offset + iov_iter_count(iter);
struct dio *dio;
struct dio_submit sdio = { 0, };
unsigned long user_addr;
struct buffer_head map_bh = { 0, };
struct blk_plug plug;
unsigned long align = offset | iov_iter_alignment(iter);
@ -1231,12 +1229,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
if (unlikely(sdio.blkfactor))
sdio.pages_in_io = 2;
for (seg = 0; seg < iter->nr_segs; seg++) {
user_addr = (unsigned long)iter->iov[seg].iov_base;
sdio.pages_in_io +=
((user_addr + iter->iov[seg].iov_len + PAGE_SIZE-1) /
PAGE_SIZE - user_addr / PAGE_SIZE);
}
sdio.pages_in_io += iov_iter_npages(iter, INT_MAX);
blk_start_plug(&plug);

View File

@ -1310,7 +1310,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
while (nbytes < *nbytesp && req->num_pages < req->max_pages) {
unsigned npages;
size_t start, end, frag_size;
size_t start;
unsigned n = req->max_pages - req->num_pages;
ssize_t ret = iov_iter_get_pages(ii,
&req->pages[req->num_pages],
@ -1344,19 +1344,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
static inline int fuse_iter_npages(const struct iov_iter *ii_p)
{
struct iov_iter ii = *ii_p;
int npages = 0;
while (iov_iter_count(&ii) && npages < FUSE_MAX_PAGES_PER_REQ) {
unsigned long user_addr = fuse_get_user_addr(&ii);
unsigned offset = user_addr & ~PAGE_MASK;
size_t frag_size = iov_iter_single_seg_count(&ii);
npages += (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
iov_iter_advance(&ii, frag_size);
}
return min(npages, FUSE_MAX_PAGES_PER_REQ);
return iov_iter_npages(ii_p, FUSE_MAX_PAGES_PER_REQ);
}
ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,

View File

@ -73,6 +73,7 @@ void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
unsigned long nr_segs, size_t count);
ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
size_t maxsize, size_t *start);
int iov_iter_npages(const struct iov_iter *i, int maxpages);
static inline size_t iov_iter_count(struct iov_iter *i)
{

View File

@ -262,3 +262,30 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
return (res == n ? len : res * PAGE_SIZE) - *start;
}
EXPORT_SYMBOL(iov_iter_get_pages);
int iov_iter_npages(const struct iov_iter *i, int maxpages)
{
size_t offset = i->iov_offset;
size_t size = i->count;
const struct iovec *iov = i->iov;
int npages = 0;
int n;
for (n = 0; size && n < i->nr_segs; n++, iov++) {
unsigned long addr = (unsigned long)iov->iov_base + offset;
size_t len = iov->iov_len - offset;
offset = 0;
if (unlikely(!len)) /* empty segment */
continue;
if (len > size)
len = size;
npages += (addr + len + PAGE_SIZE - 1) / PAGE_SIZE
- addr / PAGE_SIZE;
if (npages >= maxpages) /* don't bother going further */
return maxpages;
size -= len;
offset = 0;
}
return min(npages, maxpages);
}
EXPORT_SYMBOL(iov_iter_npages);