io_uring/io-wq: move RLIMIT_FSIZE to io-wq
RLIMIT_SIZE in needed only for execution from an io-wq context, hence move all preparations from hot path to io-wq work setup. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
327d6d968b
commit
57f1a64958
|
@ -462,6 +462,7 @@ static void io_impersonate_work(struct io_worker *worker,
|
||||||
io_wq_switch_mm(worker, work);
|
io_wq_switch_mm(worker, work);
|
||||||
if (worker->cur_creds != work->creds)
|
if (worker->cur_creds != work->creds)
|
||||||
io_wq_switch_creds(worker, work);
|
io_wq_switch_creds(worker, work);
|
||||||
|
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->fsize;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_assign_current_work(struct io_worker *worker,
|
static void io_assign_current_work(struct io_worker *worker,
|
||||||
|
|
|
@ -89,6 +89,7 @@ struct io_wq_work {
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
const struct cred *creds;
|
const struct cred *creds;
|
||||||
struct fs_struct *fs;
|
struct fs_struct *fs;
|
||||||
|
unsigned long fsize;
|
||||||
unsigned flags;
|
unsigned flags;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -645,7 +645,6 @@ struct io_kiocb {
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
refcount_t refs;
|
refcount_t refs;
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
unsigned long fsize;
|
|
||||||
u64 user_data;
|
u64 user_data;
|
||||||
|
|
||||||
struct list_head link_list;
|
struct list_head link_list;
|
||||||
|
@ -736,6 +735,7 @@ struct io_op_def {
|
||||||
unsigned pollout : 1;
|
unsigned pollout : 1;
|
||||||
/* op supports buffer selection */
|
/* op supports buffer selection */
|
||||||
unsigned buffer_select : 1;
|
unsigned buffer_select : 1;
|
||||||
|
unsigned needs_fsize : 1;
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct io_op_def io_op_defs[] = {
|
static const struct io_op_def io_op_defs[] = {
|
||||||
|
@ -755,6 +755,7 @@ static const struct io_op_def io_op_defs[] = {
|
||||||
.hash_reg_file = 1,
|
.hash_reg_file = 1,
|
||||||
.unbound_nonreg_file = 1,
|
.unbound_nonreg_file = 1,
|
||||||
.pollout = 1,
|
.pollout = 1,
|
||||||
|
.needs_fsize = 1,
|
||||||
},
|
},
|
||||||
[IORING_OP_FSYNC] = {
|
[IORING_OP_FSYNC] = {
|
||||||
.needs_file = 1,
|
.needs_file = 1,
|
||||||
|
@ -769,6 +770,7 @@ static const struct io_op_def io_op_defs[] = {
|
||||||
.hash_reg_file = 1,
|
.hash_reg_file = 1,
|
||||||
.unbound_nonreg_file = 1,
|
.unbound_nonreg_file = 1,
|
||||||
.pollout = 1,
|
.pollout = 1,
|
||||||
|
.needs_fsize = 1,
|
||||||
},
|
},
|
||||||
[IORING_OP_POLL_ADD] = {
|
[IORING_OP_POLL_ADD] = {
|
||||||
.needs_file = 1,
|
.needs_file = 1,
|
||||||
|
@ -821,6 +823,7 @@ static const struct io_op_def io_op_defs[] = {
|
||||||
},
|
},
|
||||||
[IORING_OP_FALLOCATE] = {
|
[IORING_OP_FALLOCATE] = {
|
||||||
.needs_file = 1,
|
.needs_file = 1,
|
||||||
|
.needs_fsize = 1,
|
||||||
},
|
},
|
||||||
[IORING_OP_OPENAT] = {
|
[IORING_OP_OPENAT] = {
|
||||||
.file_table = 1,
|
.file_table = 1,
|
||||||
|
@ -852,6 +855,7 @@ static const struct io_op_def io_op_defs[] = {
|
||||||
.needs_file = 1,
|
.needs_file = 1,
|
||||||
.unbound_nonreg_file = 1,
|
.unbound_nonreg_file = 1,
|
||||||
.pollout = 1,
|
.pollout = 1,
|
||||||
|
.needs_fsize = 1,
|
||||||
},
|
},
|
||||||
[IORING_OP_FADVISE] = {
|
[IORING_OP_FADVISE] = {
|
||||||
.needs_file = 1,
|
.needs_file = 1,
|
||||||
|
@ -1169,6 +1173,10 @@ static void io_prep_async_work(struct io_kiocb *req)
|
||||||
}
|
}
|
||||||
spin_unlock(¤t->fs->lock);
|
spin_unlock(¤t->fs->lock);
|
||||||
}
|
}
|
||||||
|
if (def->needs_fsize)
|
||||||
|
req->work.fsize = rlimit(RLIMIT_FSIZE);
|
||||||
|
else
|
||||||
|
req->work.fsize = RLIM_INFINITY;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_prep_async_link(struct io_kiocb *req)
|
static void io_prep_async_link(struct io_kiocb *req)
|
||||||
|
@ -3072,8 +3080,6 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||||
if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
|
if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
|
||||||
return -EBADF;
|
return -EBADF;
|
||||||
|
|
||||||
req->fsize = rlimit(RLIMIT_FSIZE);
|
|
||||||
|
|
||||||
/* either don't need iovec imported or already have it */
|
/* either don't need iovec imported or already have it */
|
||||||
if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
|
if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3130,17 +3136,11 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
|
||||||
}
|
}
|
||||||
kiocb->ki_flags |= IOCB_WRITE;
|
kiocb->ki_flags |= IOCB_WRITE;
|
||||||
|
|
||||||
if (!force_nonblock)
|
|
||||||
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
|
|
||||||
|
|
||||||
if (req->file->f_op->write_iter)
|
if (req->file->f_op->write_iter)
|
||||||
ret2 = call_write_iter(req->file, kiocb, &iter);
|
ret2 = call_write_iter(req->file, kiocb, &iter);
|
||||||
else
|
else
|
||||||
ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
|
ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
|
||||||
|
|
||||||
if (!force_nonblock)
|
|
||||||
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
|
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
|
||||||
* retry them without IOCB_NOWAIT.
|
* retry them without IOCB_NOWAIT.
|
||||||
|
@ -3335,7 +3335,6 @@ static int io_fallocate_prep(struct io_kiocb *req,
|
||||||
req->sync.off = READ_ONCE(sqe->off);
|
req->sync.off = READ_ONCE(sqe->off);
|
||||||
req->sync.len = READ_ONCE(sqe->addr);
|
req->sync.len = READ_ONCE(sqe->addr);
|
||||||
req->sync.mode = READ_ONCE(sqe->len);
|
req->sync.mode = READ_ONCE(sqe->len);
|
||||||
req->fsize = rlimit(RLIMIT_FSIZE);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3346,11 +3345,8 @@ static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
|
||||||
/* fallocate always requiring blocking context */
|
/* fallocate always requiring blocking context */
|
||||||
if (force_nonblock)
|
if (force_nonblock)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
|
|
||||||
ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
|
ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
|
||||||
req->sync.len);
|
req->sync.len);
|
||||||
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
req_set_fail_links(req);
|
req_set_fail_links(req);
|
||||||
io_req_complete(req, ret);
|
io_req_complete(req, ret);
|
||||||
|
|
Loading…
Reference in New Issue