summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-04-01 15:43:54 +0100
committerJens Axboe <axboe@kernel.dk>2021-04-11 19:30:34 -0600
commit75769e3f7357171dbe040a5ed55445c2642295d1 (patch)
tree8e63709e8e711ec094cb354ba764ce2eaec49cc7 /fs/io_uring.c
parent0aec38fda2b6e36c0b066a87ff727ace3666cade (diff)
downloadlwn-75769e3f7357171dbe040a5ed55445c2642295d1.tar.gz
lwn-75769e3f7357171dbe040a5ed55445c2642295d1.zip
io_uring: improve import_fixed overflow checks
Replace a hand-coded overflow check with a specialised function. Even though compilers are smart enough to generate identical binary (i.e. check carry bit), but it's more foolproof and conveys the intention better. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/e437dcdc929bacbb6f11a4824ecbbf17225cb82a.1617287883.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f53d93261e2b..e6508b19e19e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2785,8 +2785,8 @@ static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
size_t len = req->rw.len;
struct io_mapped_ubuf *imu;
u16 index, buf_index = req->buf_index;
+ u64 buf_end, buf_addr = req->rw.addr;
size_t offset;
- u64 buf_addr;
if (unlikely(buf_index >= ctx->nr_user_bufs))
return -EFAULT;
@@ -2794,11 +2794,10 @@ static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
imu = &ctx->user_bufs[index];
buf_addr = req->rw.addr;
- /* overflow */
- if (buf_addr + len < buf_addr)
+ if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
return -EFAULT;
/* not inside the mapped region */
- if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
+ if (buf_addr < imu->ubuf || buf_end > imu->ubuf + imu->len)
return -EFAULT;
/*