summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2012-10-22 17:14:36 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2012-10-24 10:46:15 -0400
commitf878b657ce8e7d3673afe48110ec208a29e38c4a (patch)
treece2f01c75268473a8d4e8879f5f55b73a16caa7d /net
parent4bc1e68ed6a8b59be8a79eb719be515a55c7bc68 (diff)
downloadlwn-f878b657ce8e7d3673afe48110ec208a29e38c4a.tar.gz
lwn-f878b657ce8e7d3673afe48110ec208a29e38c4a.zip
SUNRPC: Get rid of the xs_error_report socket callback
Chris Perl reports that we're seeing races between the wakeup call in xs_error_report and the connect attempts. Basically, Chris has shown that in certain circumstances, the call to xs_error_report causes the rpc_task that is responsible for reconnecting to wake up early, thus triggering a disconnect and retry. Since the sk->sk_error_report() calls in the socket layer are always followed by a tcp_done() in the cases where we care about waking up the rpc_tasks, just let the state_change callbacks take responsibility for those wake ups. Reported-by: Chris Perl <chris.perl@gmail.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: stable@vger.kernel.org Tested-by: Chris Perl <chris.perl@gmail.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/xprtsock.c25
1 files changed, 0 insertions, 25 deletions
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 1f105c2d2e15..75853cabf4c9 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -254,7 +254,6 @@ struct sock_xprt {
void (*old_data_ready)(struct sock *, int);
void (*old_state_change)(struct sock *);
void (*old_write_space)(struct sock *);
- void (*old_error_report)(struct sock *);
};
/*
@@ -781,7 +780,6 @@ static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
transport->old_data_ready = sk->sk_data_ready;
transport->old_state_change = sk->sk_state_change;
transport->old_write_space = sk->sk_write_space;
- transport->old_error_report = sk->sk_error_report;
}
static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
@@ -789,7 +787,6 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s
sk->sk_data_ready = transport->old_data_ready;
sk->sk_state_change = transport->old_state_change;
sk->sk_write_space = transport->old_write_space;
- sk->sk_error_report = transport->old_error_report;
}
static void xs_reset_transport(struct sock_xprt *transport)
@@ -1546,25 +1543,6 @@ static void xs_tcp_state_change(struct sock *sk)
read_unlock_bh(&sk->sk_callback_lock);
}
-/**
- * xs_error_report - callback mainly for catching socket errors
- * @sk: socket
- */
-static void xs_error_report(struct sock *sk)
-{
- struct rpc_xprt *xprt;
-
- read_lock_bh(&sk->sk_callback_lock);
- if (!(xprt = xprt_from_sock(sk)))
- goto out;
- dprintk("RPC: %s client %p...\n"
- "RPC: error %d\n",
- __func__, xprt, sk->sk_err);
- xprt_wake_pending_tasks(xprt, -EAGAIN);
-out:
- read_unlock_bh(&sk->sk_callback_lock);
-}
-
static void xs_write_space(struct sock *sk)
{
struct socket *sock;
@@ -1864,7 +1842,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
sk->sk_user_data = xprt;
sk->sk_data_ready = xs_local_data_ready;
sk->sk_write_space = xs_udp_write_space;
- sk->sk_error_report = xs_error_report;
sk->sk_allocation = GFP_ATOMIC;
xprt_clear_connected(xprt);
@@ -1989,7 +1966,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
sk->sk_user_data = xprt;
sk->sk_data_ready = xs_udp_data_ready;
sk->sk_write_space = xs_udp_write_space;
- sk->sk_error_report = xs_error_report;
sk->sk_no_check = UDP_CSUM_NORCV;
sk->sk_allocation = GFP_ATOMIC;
@@ -2102,7 +2078,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
sk->sk_data_ready = xs_tcp_data_ready;
sk->sk_state_change = xs_tcp_state_change;
sk->sk_write_space = xs_tcp_write_space;
- sk->sk_error_report = xs_error_report;
sk->sk_allocation = GFP_ATOMIC;
/* socket options */