diff options
author | Kuniyuki Iwashima <kuniyu@amazon.co.jp> | 2022-01-13 09:28:45 +0900 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2022-01-18 15:45:06 -0800 |
commit | 4408d55a64677febdcb50d1b44d0dc714ce4187e (patch) | |
tree | d4b3ce744d08900ceb2af860f8f7c962ed86486c /net/unix | |
parent | 2a1aff6035187d877d7b6f28f81b0a084c00e17a (diff) | |
download | lwn-4408d55a64677febdcb50d1b44d0dc714ce4187e.tar.gz lwn-4408d55a64677febdcb50d1b44d0dc714ce4187e.zip |
af_unix: Refactor unix_next_socket().
Currently, unix_next_socket() is overloaded depending on the 2nd argument.
If it is NULL, unix_next_socket() returns the first socket in the hash. If
not NULL, it returns the next socket in the same hash list or the first
socket in the next non-empty hash list.
This patch refactors unix_next_socket() into two functions unix_get_first()
and unix_get_next(). unix_get_first() newly acquires a lock and returns
the first socket in the list. unix_get_next() returns the next socket in a
list or releases a lock and falls back to unix_get_first().
In the following patch, bpf iter holds entire sockets in a list and always
releases the lock before .show(). It always calls unix_get_first() to
acquire a lock in each iteration. So, this patch makes the change easier
to follow.
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
Link: https://lore.kernel.org/r/20220113002849.4384-2-kuniyu@amazon.co.jp
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'net/unix')
-rw-r--r-- | net/unix/af_unix.c | 51 |
1 files changed, 30 insertions, 21 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index c19569819866..e1c4082accdb 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -3240,49 +3240,58 @@ static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos) return sk; } -static struct sock *unix_next_socket(struct seq_file *seq, - struct sock *sk, - loff_t *pos) +static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos) { unsigned long bucket = get_bucket(*pos); + struct sock *sk; - while (sk > (struct sock *)SEQ_START_TOKEN) { - sk = sk_next(sk); - if (!sk) - goto next_bucket; - if (sock_net(sk) == seq_file_net(seq)) - return sk; - } - - do { + while (bucket < ARRAY_SIZE(unix_socket_table)) { spin_lock(&unix_table_locks[bucket]); + sk = unix_from_bucket(seq, pos); if (sk) return sk; -next_bucket: - spin_unlock(&unix_table_locks[bucket++]); - *pos = set_bucket_offset(bucket, 1); - } while (bucket < ARRAY_SIZE(unix_socket_table)); + spin_unlock(&unix_table_locks[bucket]); + + *pos = set_bucket_offset(++bucket, 1); + } return NULL; } +static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk, + loff_t *pos) +{ + unsigned long bucket = get_bucket(*pos); + + for (sk = sk_next(sk); sk; sk = sk_next(sk)) + if (sock_net(sk) == seq_file_net(seq)) + return sk; + + spin_unlock(&unix_table_locks[bucket]); + + *pos = set_bucket_offset(++bucket, 1); + + return unix_get_first(seq, pos); +} + static void *unix_seq_start(struct seq_file *seq, loff_t *pos) { if (!*pos) return SEQ_START_TOKEN; - if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table)) - return NULL; - - return unix_next_socket(seq, NULL, pos); + return unix_get_first(seq, pos); } static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; - return unix_next_socket(seq, v, pos); + + if (v == SEQ_START_TOKEN) + return unix_get_first(seq, pos); + + return unix_get_next(seq, v, pos); } static void unix_seq_stop(struct seq_file *seq, void *v) |