summaryrefslogtreecommitdiff
path: root/net/rxrpc
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-10-03 21:00:17 -0700
committerDavid S. Miller <davem@davemloft.net>2018-10-03 21:00:17 -0700
commit6f41617bf23a17d9cb7cc6ca8161534f05f80293 (patch)
tree2844d8d197a61b0603e31f09613e3272635dd1cc /net/rxrpc
parent7bdaae270cb55d40b7fb73744c7e00a7108ee5b6 (diff)
parentcec4de302c5ff2c5eb3bfcb0c4845a095f5149b9 (diff)
downloadlwn-6f41617bf23a17d9cb7cc6ca8161534f05f80293.tar.gz
lwn-6f41617bf23a17d9cb7cc6ca8161534f05f80293.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor conflict in net/core/rtnetlink.c, David Ahern's bug fix in 'net' overlapped the renaming of a netlink attribute in net-next. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rxrpc')
-rw-r--r--net/rxrpc/ar-internal.h36
-rw-r--r--net/rxrpc/call_accept.c45
-rw-r--r--net/rxrpc/call_object.c2
-rw-r--r--net/rxrpc/conn_client.c4
-rw-r--r--net/rxrpc/conn_object.c14
-rw-r--r--net/rxrpc/input.c90
-rw-r--r--net/rxrpc/local_object.c32
-rw-r--r--net/rxrpc/output.c54
-rw-r--r--net/rxrpc/peer_event.c46
-rw-r--r--net/rxrpc/peer_object.c52
-rw-r--r--net/rxrpc/protocol.h15
11 files changed, 188 insertions, 202 deletions
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index c97558710421..ef9554131434 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -40,17 +40,12 @@ struct rxrpc_crypt {
struct rxrpc_connection;
/*
- * Mark applied to socket buffers.
+ * Mark applied to socket buffers in skb->mark. skb->priority is used
+ * to pass supplementary information.
*/
enum rxrpc_skb_mark {
- RXRPC_SKB_MARK_DATA, /* data message */
- RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
- RXRPC_SKB_MARK_BUSY, /* server busy message */
- RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
- RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
- RXRPC_SKB_MARK_NET_ERROR, /* network error message */
- RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
- RXRPC_SKB_MARK_NEW_CALL, /* local error message */
+ RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */
+ RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */
};
/*
@@ -293,7 +288,6 @@ struct rxrpc_peer {
struct hlist_node hash_link;
struct rxrpc_local *local;
struct hlist_head error_targets; /* targets for net error distribution */
- struct work_struct error_distributor;
struct rb_root service_conns; /* Service connections */
struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
time64_t last_tx_at; /* Last time packet sent here */
@@ -304,8 +298,6 @@ struct rxrpc_peer {
unsigned int maxdata; /* data size (MTU - hdrsize) */
unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
int debug_id; /* debug ID for printks */
- int error_report; /* Net (+0) or local (+1000000) to distribute */
-#define RXRPC_LOCAL_ERROR_OFFSET 1000000
struct sockaddr_rxrpc srx; /* remote address */
/* calculated RTT cache */
@@ -463,6 +455,16 @@ struct rxrpc_connection {
u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
};
+static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
+{
+ return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
+}
+
+static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
+{
+ return !rxrpc_to_server(sp);
+}
+
/*
* Flags in call->flags.
*/
@@ -717,6 +719,8 @@ extern struct workqueue_struct *rxrpc_workqueue;
int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
void rxrpc_discard_prealloc(struct rxrpc_sock *);
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
+ struct rxrpc_sock *,
+ struct rxrpc_peer *,
struct rxrpc_connection *,
struct sk_buff *);
void rxrpc_accept_incoming_calls(struct rxrpc_local *);
@@ -908,7 +912,8 @@ extern unsigned int rxrpc_closed_conn_expiry;
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
- struct sk_buff *);
+ struct sk_buff *,
+ struct rxrpc_peer **);
void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
void rxrpc_disconnect_call(struct rxrpc_call *);
void rxrpc_kill_connection(struct rxrpc_connection *);
@@ -1031,7 +1036,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
* peer_event.c
*/
void rxrpc_error_report(struct sock *);
-void rxrpc_peer_error_distributor(struct work_struct *);
void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
void rxrpc_peer_keepalive_worker(struct work_struct *);
@@ -1044,13 +1048,11 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
struct sockaddr_rxrpc *, gfp_t);
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
-struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
- struct rxrpc_peer *);
+void rxrpc_new_incoming_peer(struct rxrpc_local *, struct rxrpc_peer *);
void rxrpc_destroy_all_peers(struct rxrpc_net *);
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
void rxrpc_put_peer(struct rxrpc_peer *);
-void __rxrpc_queue_peer_error(struct rxrpc_peer *);
/*
* proc.c
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 9d1e298b784c..9c7f26d06a52 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -249,11 +249,11 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
*/
static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
struct rxrpc_local *local,
+ struct rxrpc_peer *peer,
struct rxrpc_connection *conn,
struct sk_buff *skb)
{
struct rxrpc_backlog *b = rx->backlog;
- struct rxrpc_peer *peer, *xpeer;
struct rxrpc_call *call;
unsigned short call_head, conn_head, peer_head;
unsigned short call_tail, conn_tail, peer_tail;
@@ -276,21 +276,18 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
return NULL;
if (!conn) {
- /* No connection. We're going to need a peer to start off
- * with. If one doesn't yet exist, use a spare from the
- * preallocation set. We dump the address into the spare in
- * anticipation - and to save on stack space.
- */
- xpeer = b->peer_backlog[peer_tail];
- if (rxrpc_extract_addr_from_skb(local, &xpeer->srx, skb) < 0)
- return NULL;
-
- peer = rxrpc_lookup_incoming_peer(local, xpeer);
- if (peer == xpeer) {
+ if (peer && !rxrpc_get_peer_maybe(peer))
+ peer = NULL;
+ if (!peer) {
+ peer = b->peer_backlog[peer_tail];
+ if (rxrpc_extract_addr_from_skb(local, &peer->srx, skb) < 0)
+ return NULL;
b->peer_backlog[peer_tail] = NULL;
smp_store_release(&b->peer_backlog_tail,
(peer_tail + 1) &
(RXRPC_BACKLOG_MAX - 1));
+
+ rxrpc_new_incoming_peer(local, peer);
}
/* Now allocate and set up the connection */
@@ -335,45 +332,31 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
* The call is returned with the user access mutex held.
*/
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+ struct rxrpc_sock *rx,
+ struct rxrpc_peer *peer,
struct rxrpc_connection *conn,
struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct rxrpc_sock *rx;
struct rxrpc_call *call;
- u16 service_id = sp->hdr.serviceId;
_enter("");
- /* Get the socket providing the service */
- rx = rcu_dereference(local->service);
- if (rx && (service_id == rx->srx.srx_service ||
- service_id == rx->second_service))
- goto found_service;
-
- trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
- RX_INVALID_OPERATION, EOPNOTSUPP);
- skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
- skb->priority = RX_INVALID_OPERATION;
- _leave(" = NULL [service]");
- return NULL;
-
-found_service:
spin_lock(&rx->incoming_lock);
if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
rx->sk.sk_state == RXRPC_CLOSE) {
trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
- skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
skb->priority = RX_INVALID_OPERATION;
_leave(" = NULL [close]");
call = NULL;
goto out;
}
- call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
+ call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
if (!call) {
- skb->mark = RXRPC_SKB_MARK_BUSY;
+ skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
_leave(" = NULL [busy]");
call = NULL;
goto out;
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 9486293fef5c..799f75b6900d 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -400,7 +400,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
rcu_assign_pointer(conn->channels[chan].call, call);
spin_lock(&conn->params.peer->lock);
- hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
+ hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
spin_unlock(&conn->params.peer->lock);
_net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index f8f37188a932..8acf74fe24c0 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -710,8 +710,8 @@ int rxrpc_connect_call(struct rxrpc_call *call,
}
spin_lock_bh(&call->conn->params.peer->lock);
- hlist_add_head(&call->error_link,
- &call->conn->params.peer->error_targets);
+ hlist_add_head_rcu(&call->error_link,
+ &call->conn->params.peer->error_targets);
spin_unlock_bh(&call->conn->params.peer->lock);
out:
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 77440a356b14..885dae829f4a 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -69,10 +69,14 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
* If successful, a pointer to the connection is returned, but no ref is taken.
* NULL is returned if there is no match.
*
+ * When searching for a service call, if we find a peer but no connection, we
+ * return that through *_peer in case we need to create a new service call.
+ *
* The caller must be holding the RCU read lock.
*/
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct rxrpc_peer **_peer)
{
struct rxrpc_connection *conn;
struct rxrpc_conn_proto k;
@@ -85,9 +89,6 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0)
goto not_found;
- k.epoch = sp->hdr.epoch;
- k.cid = sp->hdr.cid & RXRPC_CIDMASK;
-
/* We may have to handle mixing IPv4 and IPv6 */
if (srx.transport.family != local->srx.transport.family) {
pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
@@ -99,7 +100,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
k.epoch = sp->hdr.epoch;
k.cid = sp->hdr.cid & RXRPC_CIDMASK;
- if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
+ if (rxrpc_to_server(sp)) {
/* We need to look up service connections by the full protocol
* parameter set. We look up the peer first as an intermediate
* step and then the connection from the peer's tree.
@@ -107,6 +108,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
peer = rxrpc_lookup_peer_rcu(local, &srx);
if (!peer)
goto not_found;
+ *_peer = peer;
conn = rxrpc_find_service_conn_rcu(peer, skb);
if (!conn || atomic_read(&conn->usage) == 0)
goto not_found;
@@ -214,7 +216,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
call->peer->cong_cwnd = call->cong_cwnd;
spin_lock_bh(&conn->params.peer->lock);
- hlist_del_init(&call->error_link);
+ hlist_del_rcu(&call->error_link);
spin_unlock_bh(&conn->params.peer->lock);
if (rxrpc_is_client_call(call))
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index ee8e7e1d5c0f..4b86b6be6c1f 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -622,13 +622,14 @@ static void rxrpc_input_requested_ack(struct rxrpc_call *call,
if (!skb)
continue;
+ sent_at = skb->tstamp;
+ smp_rmb(); /* Read timestamp before serial. */
sp = rxrpc_skb(skb);
if (sp->hdr.serial != orig_serial)
continue;
- smp_rmb();
- sent_at = skb->tstamp;
goto found;
}
+
return;
found:
@@ -1124,12 +1125,14 @@ void rxrpc_data_ready(struct sock *udp_sk)
{
struct rxrpc_connection *conn;
struct rxrpc_channel *chan;
- struct rxrpc_call *call;
+ struct rxrpc_call *call = NULL;
struct rxrpc_skb_priv *sp;
struct rxrpc_local *local = udp_sk->sk_user_data;
+ struct rxrpc_peer *peer = NULL;
+ struct rxrpc_sock *rx = NULL;
struct sk_buff *skb;
unsigned int channel;
- int ret, skew;
+ int ret, skew = 0;
_enter("%p", udp_sk);
@@ -1143,6 +1146,9 @@ void rxrpc_data_ready(struct sock *udp_sk)
return;
}
+ if (skb->tstamp == 0)
+ skb->tstamp = ktime_get_real();
+
rxrpc_new_skb(skb, rxrpc_skb_rx_received);
_net("recv skb %p", skb);
@@ -1177,46 +1183,75 @@ void rxrpc_data_ready(struct sock *udp_sk)
trace_rxrpc_rx_packet(sp);
- _net("Rx RxRPC %s ep=%x call=%x:%x",
- sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
- sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
-
- if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
- !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
- _proto("Rx Bad Packet Type %u", sp->hdr.type);
- goto bad_message;
- }
-
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_VERSION:
- if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED))
+ if (rxrpc_to_client(sp))
goto discard;
rxrpc_post_packet_to_local(local, skb);
goto out;
case RXRPC_PACKET_TYPE_BUSY:
- if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
+ if (rxrpc_to_server(sp))
goto discard;
/* Fall through */
+ case RXRPC_PACKET_TYPE_ACK:
+ case RXRPC_PACKET_TYPE_ACKALL:
+ if (sp->hdr.callNumber == 0)
+ goto bad_message;
+ /* Fall through */
+ case RXRPC_PACKET_TYPE_ABORT:
+ break;
case RXRPC_PACKET_TYPE_DATA:
- if (sp->hdr.callNumber == 0)
+ if (sp->hdr.callNumber == 0 ||
+ sp->hdr.seq == 0)
goto bad_message;
if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
!rxrpc_validate_jumbo(skb))
goto bad_message;
break;
+ case RXRPC_PACKET_TYPE_CHALLENGE:
+ if (rxrpc_to_server(sp))
+ goto discard;
+ break;
+ case RXRPC_PACKET_TYPE_RESPONSE:
+ if (rxrpc_to_client(sp))
+ goto discard;
+ break;
+
/* Packet types 9-11 should just be ignored. */
case RXRPC_PACKET_TYPE_PARAMS:
case RXRPC_PACKET_TYPE_10:
case RXRPC_PACKET_TYPE_11:
goto discard;
+
+ default:
+ _proto("Rx Bad Packet Type %u", sp->hdr.type);
+ goto bad_message;
}
+ if (sp->hdr.serviceId == 0)
+ goto bad_message;
+
rcu_read_lock();
- conn = rxrpc_find_connection_rcu(local, skb);
+ if (rxrpc_to_server(sp)) {
+ /* Weed out packets to services we're not offering. Packets
+ * that would begin a call are explicitly rejected and the rest
+ * are just discarded.
+ */
+ rx = rcu_dereference(local->service);
+ if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
+ sp->hdr.serviceId != rx->second_service)) {
+ if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
+ sp->hdr.seq == 1)
+ goto unsupported_service;
+ goto discard_unlock;
+ }
+ }
+
+ conn = rxrpc_find_connection_rcu(local, skb, &peer);
if (conn) {
if (sp->hdr.securityIndex != conn->security_ix)
goto wrong_security;
@@ -1280,7 +1315,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
call = rcu_dereference(chan->call);
if (sp->hdr.callNumber > chan->call_id) {
- if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) {
+ if (rxrpc_to_client(sp)) {
rcu_read_unlock();
goto reject_packet;
}
@@ -1297,19 +1332,15 @@ void rxrpc_data_ready(struct sock *udp_sk)
if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
}
- } else {
- skew = 0;
- call = NULL;
}
if (!call || atomic_read(&call->usage) == 0) {
- if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) ||
- sp->hdr.callNumber == 0 ||
+ if (rxrpc_to_client(sp) ||
sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
goto bad_message_unlock;
if (sp->hdr.seq != 1)
goto discard_unlock;
- call = rxrpc_new_incoming_call(local, conn, skb);
+ call = rxrpc_new_incoming_call(local, rx, peer, conn, skb);
if (!call) {
rcu_read_unlock();
goto reject_packet;
@@ -1340,6 +1371,13 @@ wrong_security:
skb->priority = RXKADINCONSISTENCY;
goto post_abort;
+unsupported_service:
+ rcu_read_unlock();
+ trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ RX_INVALID_OPERATION, EOPNOTSUPP);
+ skb->priority = RX_INVALID_OPERATION;
+ goto post_abort;
+
reupgrade:
rcu_read_unlock();
trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
@@ -1354,7 +1392,7 @@ bad_message:
protocol_error:
skb->priority = RX_PROTOCOL_ERROR;
post_abort:
- skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
reject_packet:
trace_rxrpc_rx_done(skb->mark, skb->priority);
rxrpc_reject_packet(local, skb);
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 777c3ed4cfc0..94d234e9c685 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -135,10 +135,10 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
}
switch (local->srx.transport.family) {
- case AF_INET:
- /* we want to receive ICMP errors */
+ case AF_INET6:
+ /* we want to receive ICMPv6 errors */
opt = 1;
- ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
+ ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
(char *) &opt, sizeof(opt));
if (ret < 0) {
_debug("setsockopt failed");
@@ -146,19 +146,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
}
/* we want to set the don't fragment bit */
- opt = IP_PMTUDISC_DO;
- ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
+ opt = IPV6_PMTUDISC_DO;
+ ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
(char *) &opt, sizeof(opt));
if (ret < 0) {
_debug("setsockopt failed");
goto error;
}
- break;
- case AF_INET6:
+ /* Fall through and set IPv4 options too otherwise we don't get
+ * errors from IPv4 packets sent through the IPv6 socket.
+ */
+
+ case AF_INET:
/* we want to receive ICMP errors */
opt = 1;
- ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
+ ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
(char *) &opt, sizeof(opt));
if (ret < 0) {
_debug("setsockopt failed");
@@ -166,13 +169,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
}
/* we want to set the don't fragment bit */
- opt = IPV6_PMTUDISC_DO;
- ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
+ opt = IP_PMTUDISC_DO;
+ ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
(char *) &opt, sizeof(opt));
if (ret < 0) {
_debug("setsockopt failed");
goto error;
}
+
+ /* We want receive timestamps. */
+ opt = 1;
+ ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS,
+ (char *)&opt, sizeof(opt));
+ if (ret < 0) {
+ _debug("setsockopt failed");
+ goto error;
+ }
break;
default:
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index ccf5de160444..e8fb8922bca8 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -124,7 +124,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
struct kvec iov[2];
rxrpc_serial_t serial;
rxrpc_seq_t hard_ack, top;
- ktime_t now;
size_t len, n;
int ret;
u8 reason;
@@ -196,9 +195,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
/* We need to stick a time in before we send the packet in case
* the reply gets back before kernel_sendmsg() completes - but
* asking UDP to send the packet can take a relatively long
- * time, so we update the time after, on the assumption that
- * the packet transmission is more likely to happen towards the
- * end of the kernel_sendmsg() call.
+ * time.
*/
call->ping_time = ktime_get_real();
set_bit(RXRPC_CALL_PINGING, &call->flags);
@@ -206,9 +203,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
}
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
- now = ktime_get_real();
- if (ping)
- call->ping_time = now;
conn->params.peer->last_tx_at = ktime_get_seconds();
if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
@@ -363,8 +357,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
/* If our RTT cache needs working on, request an ACK. Also request
* ACKs if a DATA packet appears to have been lost.
+ *
+ * However, we mustn't request an ACK on the last reply packet of a
+ * service call, lest OpenAFS incorrectly send us an ACK with some
+ * soft-ACKs in it and then never follow up with a proper hard ACK.
*/
- if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
+ if ((!(sp->hdr.flags & RXRPC_LAST_PACKET) ||
+ rxrpc_to_server(sp)
+ ) &&
(test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
retrans ||
call->cong_mode == RXRPC_CALL_SLOW_START ||
@@ -390,6 +390,11 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
goto send_fragmentable;
down_read(&conn->params.local->defrag_sem);
+
+ sp->hdr.serial = serial;
+ smp_wmb(); /* Set serial before timestamp */
+ skb->tstamp = ktime_get_real();
+
/* send the packet by UDP
* - returns -EMSGSIZE if UDP would have to fragment the packet
* to go out of the interface
@@ -413,12 +418,8 @@ done:
trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
retrans, lost);
if (ret >= 0) {
- ktime_t now = ktime_get_real();
- skb->tstamp = now;
- smp_wmb();
- sp->hdr.serial = serial;
if (whdr.flags & RXRPC_REQUEST_ACK) {
- call->peer->rtt_last_req = now;
+ call->peer->rtt_last_req = skb->tstamp;
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
if (call->peer->rtt_usage > 1) {
unsigned long nowj = jiffies, ack_lost_at;
@@ -457,6 +458,10 @@ send_fragmentable:
down_write(&conn->params.local->defrag_sem);
+ sp->hdr.serial = serial;
+ smp_wmb(); /* Set serial before timestamp */
+ skb->tstamp = ktime_get_real();
+
switch (conn->params.local->srx.transport.family) {
case AF_INET:
opt = IP_PMTUDISC_DONT;
@@ -519,7 +524,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
struct kvec iov[2];
size_t size;
__be32 code;
- int ret;
+ int ret, ioc;
_enter("%d", local->debug_id);
@@ -527,7 +532,6 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
iov[0].iov_len = sizeof(whdr);
iov[1].iov_base = &code;
iov[1].iov_len = sizeof(code);
- size = sizeof(whdr) + sizeof(code);
msg.msg_name = &srx.transport;
msg.msg_control = NULL;
@@ -535,17 +539,31 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
msg.msg_flags = 0;
memset(&whdr, 0, sizeof(whdr));
- whdr.type = RXRPC_PACKET_TYPE_ABORT;
while ((skb = skb_dequeue(&local->reject_queue))) {
rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
sp = rxrpc_skb(skb);
+ switch (skb->mark) {
+ case RXRPC_SKB_MARK_REJECT_BUSY:
+ whdr.type = RXRPC_PACKET_TYPE_BUSY;
+ size = sizeof(whdr);
+ ioc = 1;
+ break;
+ case RXRPC_SKB_MARK_REJECT_ABORT:
+ whdr.type = RXRPC_PACKET_TYPE_ABORT;
+ code = htonl(skb->priority);
+ size = sizeof(whdr) + sizeof(code);
+ ioc = 2;
+ break;
+ default:
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ continue;
+ }
+
if (rxrpc_extract_addr_from_skb(local, &srx, skb) == 0) {
msg.msg_namelen = srx.transport_len;
- code = htonl(skb->priority);
-
whdr.epoch = htonl(sp->hdr.epoch);
whdr.cid = htonl(sp->hdr.cid);
whdr.callNumber = htonl(sp->hdr.callNumber);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 4f9da2f51c69..f3e6fc670da2 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -23,6 +23,8 @@
#include "ar-internal.h"
static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
+static void rxrpc_distribute_error(struct rxrpc_peer *, int,
+ enum rxrpc_call_completion);
/*
* Find the peer associated with an ICMP packet.
@@ -194,8 +196,6 @@ void rxrpc_error_report(struct sock *sk)
rcu_read_unlock();
rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
- /* The ref we obtained is passed off to the work item */
- __rxrpc_queue_peer_error(peer);
_leave("");
}
@@ -205,6 +205,7 @@ void rxrpc_error_report(struct sock *sk)
static void rxrpc_store_error(struct rxrpc_peer *peer,
struct sock_exterr_skb *serr)
{
+ enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
struct sock_extended_err *ee;
int err;
@@ -255,7 +256,7 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
case SO_EE_ORIGIN_NONE:
case SO_EE_ORIGIN_LOCAL:
_proto("Rx Received local error { error=%d }", err);
- err += RXRPC_LOCAL_ERROR_OFFSET;
+ compl = RXRPC_CALL_LOCAL_ERROR;
break;
case SO_EE_ORIGIN_ICMP6:
@@ -264,48 +265,23 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
break;
}
- peer->error_report = err;
+ rxrpc_distribute_error(peer, err, compl);
}
/*
- * Distribute an error that occurred on a peer
+ * Distribute an error that occurred on a peer.
*/
-void rxrpc_peer_error_distributor(struct work_struct *work)
+static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
+ enum rxrpc_call_completion compl)
{
- struct rxrpc_peer *peer =
- container_of(work, struct rxrpc_peer, error_distributor);
struct rxrpc_call *call;
- enum rxrpc_call_completion compl;
- int error;
-
- _enter("");
-
- error = READ_ONCE(peer->error_report);
- if (error < RXRPC_LOCAL_ERROR_OFFSET) {
- compl = RXRPC_CALL_NETWORK_ERROR;
- } else {
- compl = RXRPC_CALL_LOCAL_ERROR;
- error -= RXRPC_LOCAL_ERROR_OFFSET;
- }
- _debug("ISSUE ERROR %s %d", rxrpc_call_completions[compl], error);
-
- spin_lock_bh(&peer->lock);
-
- while (!hlist_empty(&peer->error_targets)) {
- call = hlist_entry(peer->error_targets.first,
- struct rxrpc_call, error_link);
- hlist_del_init(&call->error_link);
+ hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
rxrpc_see_call(call);
-
- if (rxrpc_set_call_completion(call, compl, 0, -error))
+ if (call->state < RXRPC_CALL_COMPLETE &&
+ rxrpc_set_call_completion(call, compl, 0, -error))
rxrpc_notify_socket(call);
}
-
- spin_unlock_bh(&peer->lock);
-
- rxrpc_put_peer(peer);
- _leave("");
}
/*
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 1dc7648e3eff..01a9febfa367 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -124,11 +124,9 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
struct rxrpc_net *rxnet = local->rxnet;
hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
- if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) {
- if (atomic_read(&peer->usage) == 0)
- return NULL;
+ if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
+ atomic_read(&peer->usage) > 0)
return peer;
- }
}
return NULL;
@@ -222,8 +220,6 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
atomic_set(&peer->usage, 1);
peer->local = local;
INIT_HLIST_HEAD(&peer->error_targets);
- INIT_WORK(&peer->error_distributor,
- &rxrpc_peer_error_distributor);
peer->service_conns = RB_ROOT;
seqlock_init(&peer->service_conn_lock);
spin_lock_init(&peer->lock);
@@ -299,34 +295,23 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
}
/*
- * Set up a new incoming peer. The address is prestored in the preallocated
- * peer.
+ * Set up a new incoming peer. There shouldn't be any other matching peers
+ * since we've already done a search in the list from the non-reentrant context
+ * (the data_ready handler) that is the only place we can add new peers.
*/
-struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
- struct rxrpc_peer *prealloc)
+void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
{
- struct rxrpc_peer *peer;
struct rxrpc_net *rxnet = local->rxnet;
unsigned long hash_key;
- hash_key = rxrpc_peer_hash_key(local, &prealloc->srx);
- prealloc->local = local;
- rxrpc_init_peer(prealloc, hash_key);
+ hash_key = rxrpc_peer_hash_key(local, &peer->srx);
+ peer->local = local;
+ rxrpc_init_peer(peer, hash_key);
spin_lock(&rxnet->peer_hash_lock);
-
- /* Need to check that we aren't racing with someone else */
- peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key);
- if (peer && !rxrpc_get_peer_maybe(peer))
- peer = NULL;
- if (!peer) {
- peer = prealloc;
- hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
- list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
- }
-
+ hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
+ list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
spin_unlock(&rxnet->peer_hash_lock);
- return peer;
}
/*
@@ -416,21 +401,6 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
}
/*
- * Queue a peer record. This passes the caller's ref to the workqueue.
- */
-void __rxrpc_queue_peer_error(struct rxrpc_peer *peer)
-{
- const void *here = __builtin_return_address(0);
- int n;
-
- n = atomic_read(&peer->usage);
- if (rxrpc_queue_work(&peer->error_distributor))
- trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here);
- else
- rxrpc_put_peer(peer);
-}
-
-/*
* Discard a peer record.
*/
static void __rxrpc_put_peer(struct rxrpc_peer *peer)
diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h
index 93da73bf7098..f9cb83c938f3 100644
--- a/net/rxrpc/protocol.h
+++ b/net/rxrpc/protocol.h
@@ -50,7 +50,6 @@ struct rxrpc_wire_header {
#define RXRPC_PACKET_TYPE_10 10 /* Ignored */
#define RXRPC_PACKET_TYPE_11 11 /* Ignored */
#define RXRPC_PACKET_TYPE_VERSION 13 /* version string request */
-#define RXRPC_N_PACKET_TYPES 14 /* number of packet types (incl type 0) */
uint8_t flags; /* packet flags */
#define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */
@@ -72,20 +71,6 @@ struct rxrpc_wire_header {
} __packed;
-#define RXRPC_SUPPORTED_PACKET_TYPES ( \
- (1 << RXRPC_PACKET_TYPE_DATA) | \
- (1 << RXRPC_PACKET_TYPE_ACK) | \
- (1 << RXRPC_PACKET_TYPE_BUSY) | \
- (1 << RXRPC_PACKET_TYPE_ABORT) | \
- (1 << RXRPC_PACKET_TYPE_ACKALL) | \
- (1 << RXRPC_PACKET_TYPE_CHALLENGE) | \
- (1 << RXRPC_PACKET_TYPE_RESPONSE) | \
- /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */ \
- (1 << RXRPC_PACKET_TYPE_PARAMS) | \
- (1 << RXRPC_PACKET_TYPE_10) | \
- (1 << RXRPC_PACKET_TYPE_11) | \
- (1 << RXRPC_PACKET_TYPE_VERSION))
-
/*****************************************************************************/
/*
* jumbo packet secondary header