diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-08-30 19:22:52 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-08-30 19:22:52 -0700 |
commit | c547d89a9a445f6bb757b93247de43d312e722da (patch) | |
tree | 4c209d3f90476fbd9ad841251a3b7818a0d99e5d /net | |
parent | 44d7d3b0d1cdb2119dba33bbedd602ce30528d6c (diff) | |
parent | 87df7fb922d18e96992aa5e824aa34b2065fef59 (diff) | |
download | lwn-c547d89a9a445f6bb757b93247de43d312e722da.tar.gz lwn-c547d89a9a445f6bb757b93247de43d312e722da.zip |
Merge tag 'for-5.15/io_uring-2021-08-30' of git://git.kernel.dk/linux-block
Pull io_uring updates from Jens Axboe:
- cancellation cleanups (Hao, Pavel)
- io-wq accounting cleanup (Hao)
- io_uring submit locking fix (Hao)
- io_uring link handling fixes (Hao)
- fixed file improvements (wangyangbo, Pavel)
- allow updates of linked timeouts like regular timeouts (Pavel)
- IOPOLL fix (Pavel)
- remove batched file get optimization (Pavel)
- improve reference handling (Pavel)
- IRQ task_work batching (Pavel)
- allow pure fixed file, and add support for open/accept (Pavel)
- GFP_ATOMIC RT kernel fix
- multiple CQ ring waiter improvement
- funnel IRQ completions through task_work
- add support for limiting async workers explicitly
- add different clocksource support for timeouts
- io-wq wakeup race fix
- lots of cleanups and improvement (Pavel et al)
* tag 'for-5.15/io_uring-2021-08-30' of git://git.kernel.dk/linux-block: (87 commits)
io-wq: fix wakeup race when adding new work
io-wq: wqe and worker locks no longer need to be IRQ safe
io-wq: check max_worker limits if a worker transitions bound state
io_uring: allow updating linked timeouts
io_uring: keep ltimeouts in a list
io_uring: support CLOCK_BOOTTIME/REALTIME for timeouts
io-wq: provide a way to limit max number of workers
io_uring: add build check for buf_index overflows
io_uring: clarify io_req_task_cancel() locking
io_uring: add task-refs-get helper
io_uring: fix failed linkchain code logic
io_uring: remove redundant req_set_fail()
io_uring: don't free request to slab
io_uring: accept directly into fixed file table
io_uring: hand code io_accept() fd installing
io_uring: openat directly into fixed fd table
net: add accept helper not installing fd
io_uring: fix io_try_cancel_userdata race for iowq
io_uring: IRQ rw completion batching
io_uring: batch task work locking
...
Diffstat (limited to 'net')
-rw-r--r-- | net/socket.c | 71 |
1 files changed, 37 insertions, 34 deletions
diff --git a/net/socket.c b/net/socket.c index 0b2dad3bdf7f..532fff5a3684 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1722,32 +1722,22 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog) return __sys_listen(fd, backlog); } -int __sys_accept4_file(struct file *file, unsigned file_flags, +struct file *do_accept(struct file *file, unsigned file_flags, struct sockaddr __user *upeer_sockaddr, - int __user *upeer_addrlen, int flags, - unsigned long nofile) + int __user *upeer_addrlen, int flags) { struct socket *sock, *newsock; struct file *newfile; - int err, len, newfd; + int err, len; struct sockaddr_storage address; - if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) - return -EINVAL; - - if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) - flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; - sock = sock_from_file(file); - if (!sock) { - err = -ENOTSOCK; - goto out; - } + if (!sock) + return ERR_PTR(-ENOTSOCK); - err = -ENFILE; newsock = sock_alloc(); if (!newsock) - goto out; + return ERR_PTR(-ENFILE); newsock->type = sock->type; newsock->ops = sock->ops; @@ -1758,18 +1748,9 @@ int __sys_accept4_file(struct file *file, unsigned file_flags, */ __module_get(newsock->ops->owner); - newfd = __get_unused_fd_flags(flags, nofile); - if (unlikely(newfd < 0)) { - err = newfd; - sock_release(newsock); - goto out; - } newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name); - if (IS_ERR(newfile)) { - err = PTR_ERR(newfile); - put_unused_fd(newfd); - goto out; - } + if (IS_ERR(newfile)) + return newfile; err = security_socket_accept(sock, newsock); if (err) @@ -1794,16 +1775,38 @@ int __sys_accept4_file(struct file *file, unsigned file_flags, } /* File flags are not inherited via accept() unlike another OSes. */ - - fd_install(newfd, newfile); - err = newfd; -out: - return err; + return newfile; out_fd: fput(newfile); - put_unused_fd(newfd); - goto out; + return ERR_PTR(err); +} + +int __sys_accept4_file(struct file *file, unsigned file_flags, + struct sockaddr __user *upeer_sockaddr, + int __user *upeer_addrlen, int flags, + unsigned long nofile) +{ + struct file *newfile; + int newfd; + if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) + return -EINVAL; + + if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) + flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; + + newfd = __get_unused_fd_flags(flags, nofile); + if (unlikely(newfd < 0)) + return newfd; + + newfile = do_accept(file, file_flags, upeer_sockaddr, upeer_addrlen, + flags); + if (IS_ERR(newfile)) { + put_unused_fd(newfd); + return PTR_ERR(newfile); + } + fd_install(newfd, newfile); + return newfd; } /* |