diff options
author | Jens Axboe <axboe@kernel.dk> | 2020-06-01 10:00:27 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2020-06-21 20:44:25 -0600 |
commit | 4503b7676a2e0abe69c2f2c0d8b03aec53f2f048 (patch) | |
tree | 1d2e009bdf3fc22219108545dd5c0e8022bf19db /fs/io_uring.c | |
parent | ac8691c415e0ce0b8734cb6d9df2df18608eebed (diff) | |
download | lwn-4503b7676a2e0abe69c2f2c0d8b03aec53f2f048.tar.gz lwn-4503b7676a2e0abe69c2f2c0d8b03aec53f2f048.zip |
io_uring: catch -EIO from buffered issue request failure
-EIO bubbles up like -EAGAIN if we fail to allocate a request at the
lower level. Play it safe and treat it like -EAGAIN in terms of sync
retry, to avoid passing back an errant -EIO.
Catch some of these early for block based file, as non-mq devices
generally do not support NOWAIT. That saves us some overhead by
not first trying, then retrying from async context. We can go straight
to async punt instead.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 28 |
1 files changed, 23 insertions, 5 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index de894455f6bd..c5ee6d1a92d3 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2088,6 +2088,15 @@ static struct file *__io_file_get(struct io_submit_state *state, int fd) return state->file; } +static bool io_bdev_nowait(struct block_device *bdev) +{ +#ifdef CONFIG_BLOCK + return !bdev || queue_is_mq(bdev_get_queue(bdev)); +#else + return true; +#endif +} + /* * If we tracked the file through the SCM inflight mechanism, we could support * any file. For now, just ensure that anything potentially problematic is done @@ -2097,10 +2106,19 @@ static bool io_file_supports_async(struct file *file, int rw) { umode_t mode = file_inode(file)->i_mode; - if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISSOCK(mode)) - return true; - if (S_ISREG(mode) && file->f_op != &io_uring_fops) + if (S_ISBLK(mode)) { + if (io_bdev_nowait(file->f_inode->i_bdev)) + return true; + return false; + } + if (S_ISCHR(mode) || S_ISSOCK(mode)) return true; + if (S_ISREG(mode)) { + if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) && + file->f_op != &io_uring_fops) + return true; + return false; + } /* any ->read/write should understand O_NONBLOCK */ if (file->f_flags & O_NONBLOCK) @@ -2650,7 +2668,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock) iov_count = iov_iter_count(&iter); ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count); if (!ret) { - ssize_t ret2; + ssize_t ret2 = 0; if (req->file->f_op->read_iter) ret2 = call_read_iter(req->file, kiocb, &iter); @@ -2658,7 +2676,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock) ret2 = loop_rw_iter(READ, req->file, kiocb, &iter); /* Catch -EAGAIN return for forced non-blocking submission */ - if (!force_nonblock || ret2 != -EAGAIN) { + if (!force_nonblock || (ret2 != -EAGAIN && ret2 != -EIO)) { kiocb_done(kiocb, ret2); } else { copy_iov: |