summaryrefslogblamecommitdiff
path: root/net/rxrpc/call_accept.c
blob: cc7194e05a151e163ed8a9386326b82cc8234bca (plain) (tree)
1
2
3
4
5
6
7
8
9
10
11










                                                                

                                           







                           
                      
                           





                         




































































































































































































                                                                                


                                                                            
                                                     







                                        


                                                 





                                                     

                                        


                             
                              




















                                                                               
                                        









                                                                  




                                    


                                                     
                                                          





                                    
                                                  













                                                                      
                                                     


                                                   
                                                                        

                                                                        
                                                                           
                                                                              
                                                     



                                                                        
                                                                   










                                                                           
                                        












                                                     
                                             






                                              
                                     
                                             


                                     
             







                                                                               
                                                           
 


                                  
                                      
                            



                                      

                                                





                                          
                           

                            












                                                                

                                                       

                                                  
                                            
                                                                 
                                                               






                                                   
                                                         


                                        




                                                               



                                                                        
                       












                                              
                                       
                            
               
 



                            


                                             
               




                                                            
               





                                                                  
                                                           

                                                                 


































                                                                            
                             





                                                             

                                  
                                 




                                      
                                                    
                                    




                                                                  
                                                                    
                      


                                           
                               


                                                  
            
                                           




                                     






                                     
                                          













                                              




                                             




                                                                            
                             



                                         

                                                                         
                                                                               

                                               
                      

                                  
                      


                      
 
                                           
                                     
                                     


                             




                                                                               
                                                                  

                                                                           

                                                                        

                                                                

                                                                        



                                     
                                                                              


                              

















                                                                               
                                        






























                                                                             
/* incoming call handling
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/errqueue.h>
#include <linux/udp.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/icmp.h>
#include <linux/gfp.h>
#include <linux/circ_buf.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <net/ip.h>
#include "ar-internal.h"

/*
 * Preallocate a single service call, connection and peer and, if possible,
 * give them a user ID and attach the user's side of the ID to them.
 */
static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
				      struct rxrpc_backlog *b,
				      rxrpc_notify_rx_t notify_rx,
				      rxrpc_user_attach_call_t user_attach_call,
				      unsigned long user_call_ID, gfp_t gfp)
{
	const void *here = __builtin_return_address(0);
	struct rxrpc_call *call;
	int max, tmp;
	unsigned int size = RXRPC_BACKLOG_MAX;
	unsigned int head, tail, call_head, call_tail;

	max = rx->sk.sk_max_ack_backlog;
	tmp = rx->sk.sk_ack_backlog;
	if (tmp >= max) {
		_leave(" = -ENOBUFS [full %u]", max);
		return -ENOBUFS;
	}
	max -= tmp;

	/* We don't need more conns and peers than we have calls, but on the
	 * other hand, we shouldn't ever use more peers than conns or conns
	 * than calls.
	 */
	call_head = b->call_backlog_head;
	call_tail = READ_ONCE(b->call_backlog_tail);
	tmp = CIRC_CNT(call_head, call_tail, size);
	if (tmp >= max) {
		_leave(" = -ENOBUFS [enough %u]", tmp);
		return -ENOBUFS;
	}
	max = tmp + 1;

	head = b->peer_backlog_head;
	tail = READ_ONCE(b->peer_backlog_tail);
	if (CIRC_CNT(head, tail, size) < max) {
		struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
		if (!peer)
			return -ENOMEM;
		b->peer_backlog[head] = peer;
		smp_store_release(&b->peer_backlog_head,
				  (head + 1) & (size - 1));
	}

	head = b->conn_backlog_head;
	tail = READ_ONCE(b->conn_backlog_tail);
	if (CIRC_CNT(head, tail, size) < max) {
		struct rxrpc_connection *conn;

		conn = rxrpc_prealloc_service_connection(gfp);
		if (!conn)
			return -ENOMEM;
		b->conn_backlog[head] = conn;
		smp_store_release(&b->conn_backlog_head,
				  (head + 1) & (size - 1));
	}

	/* Now it gets complicated, because calls get registered with the
	 * socket here, particularly if a user ID is preassigned by the user.
	 */
	call = rxrpc_alloc_call(gfp);
	if (!call)
		return -ENOMEM;
	call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
	call->state = RXRPC_CALL_SERVER_PREALLOC;

	trace_rxrpc_call(call, rxrpc_call_new_service,
			 atomic_read(&call->usage),
			 here, (const void *)user_call_ID);

	write_lock(&rx->call_lock);
	if (user_attach_call) {
		struct rxrpc_call *xcall;
		struct rb_node *parent, **pp;

		/* Check the user ID isn't already in use */
		pp = &rx->calls.rb_node;
		parent = NULL;
		while (*pp) {
			parent = *pp;
			xcall = rb_entry(parent, struct rxrpc_call, sock_node);
			if (user_call_ID < call->user_call_ID)
				pp = &(*pp)->rb_left;
			else if (user_call_ID > call->user_call_ID)
				pp = &(*pp)->rb_right;
			else
				goto id_in_use;
		}

		call->user_call_ID = user_call_ID;
		call->notify_rx = notify_rx;
		rxrpc_get_call(call, rxrpc_call_got);
		user_attach_call(call, user_call_ID);
		rxrpc_get_call(call, rxrpc_call_got_userid);
		rb_link_node(&call->sock_node, parent, pp);
		rb_insert_color(&call->sock_node, &rx->calls);
		set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
	}

	write_unlock(&rx->call_lock);

	write_lock(&rxrpc_call_lock);
	list_add_tail(&call->link, &rxrpc_calls);
	write_unlock(&rxrpc_call_lock);

	b->call_backlog[call_head] = call;
	smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
	_leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
	return 0;

id_in_use:
	write_unlock(&rx->call_lock);
	rxrpc_cleanup_call(call);
	_leave(" = -EBADSLT");
	return -EBADSLT;
}

/*
 * Preallocate sufficient service connections, calls and peers to cover the
 * entire backlog of a socket.  When a new call comes in, if we don't have
 * sufficient of each available, the call gets rejected as busy or ignored.
 *
 * The backlog is replenished when a connection is accepted or rejected.
 */
int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
{
	struct rxrpc_backlog *b = rx->backlog;

	if (!b) {
		b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
		if (!b)
			return -ENOMEM;
		rx->backlog = b;
	}

	if (rx->discard_new_call)
		return 0;

	while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp) == 0)
		;

	return 0;
}

/*
 * Discard the preallocation on a service.
 */
void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
{
	struct rxrpc_backlog *b = rx->backlog;
	unsigned int size = RXRPC_BACKLOG_MAX, head, tail;

	if (!b)
		return;
	rx->backlog = NULL;

	head = b->peer_backlog_head;
	tail = b->peer_backlog_tail;
	while (CIRC_CNT(head, tail, size) > 0) {
		struct rxrpc_peer *peer = b->peer_backlog[tail];
		kfree(peer);
		tail = (tail + 1) & (size - 1);
	}

	head = b->conn_backlog_head;
	tail = b->conn_backlog_tail;
	while (CIRC_CNT(head, tail, size) > 0) {
		struct rxrpc_connection *conn = b->conn_backlog[tail];
		write_lock(&rxrpc_connection_lock);
		list_del(&conn->link);
		list_del(&conn->proc_link);
		write_unlock(&rxrpc_connection_lock);
		kfree(conn);
		tail = (tail + 1) & (size - 1);
	}

	head = b->call_backlog_head;
	tail = b->call_backlog_tail;
	while (CIRC_CNT(head, tail, size) > 0) {
		struct rxrpc_call *call = b->call_backlog[tail];
		if (rx->discard_new_call) {
			_debug("discard %lx", call->user_call_ID);
			rx->discard_new_call(call, call->user_call_ID);
		}
		rxrpc_call_completed(call);
		rxrpc_release_call(rx, call);
		rxrpc_put_call(call, rxrpc_call_put);
		tail = (tail + 1) & (size - 1);
	}

	kfree(b);
}

/*
 * generate a connection-level abort
 */
static int rxrpc_busy(struct rxrpc_local *local, struct sockaddr_rxrpc *srx,
		      struct rxrpc_wire_header *whdr)
{
	struct msghdr msg;
	struct kvec iov[1];
	size_t len;
	int ret;

	_enter("%d,,", local->debug_id);

	whdr->type	= RXRPC_PACKET_TYPE_BUSY;
	whdr->serial	= htonl(1);

	msg.msg_name	= &srx->transport.sin;
	msg.msg_namelen	= sizeof(srx->transport.sin);
	msg.msg_control	= NULL;
	msg.msg_controllen = 0;
	msg.msg_flags	= 0;

	iov[0].iov_base	= whdr;
	iov[0].iov_len	= sizeof(*whdr);

	len = iov[0].iov_len;

	_proto("Tx BUSY %%1");

	ret = kernel_sendmsg(local->socket, &msg, iov, 1, len);
	if (ret < 0) {
		_leave(" = -EAGAIN [sendmsg failed: %d]", ret);
		return -EAGAIN;
	}

	_leave(" = 0");
	return 0;
}

/*
 * accept an incoming call that needs peer, transport and/or connection setting
 * up
 */
static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
				      struct rxrpc_sock *rx,
				      struct sk_buff *skb,
				      struct sockaddr_rxrpc *srx)
{
	struct rxrpc_connection *conn;
	struct rxrpc_skb_priv *sp, *nsp;
	struct rxrpc_call *call;
	struct sk_buff *notification;
	int ret;

	_enter("");

	sp = rxrpc_skb(skb);

	/* get a notification message to send to the server app */
	notification = alloc_skb(0, GFP_NOFS);
	if (!notification) {
		_debug("no memory");
		ret = -ENOMEM;
		goto error_nofree;
	}
	rxrpc_new_skb(notification);
	notification->mark = RXRPC_SKB_MARK_NEW_CALL;

	conn = rxrpc_incoming_connection(local, srx, skb);
	if (IS_ERR(conn)) {
		_debug("no conn");
		ret = PTR_ERR(conn);
		goto error;
	}

	call = rxrpc_incoming_call(rx, conn, skb);
	rxrpc_put_connection(conn);
	if (IS_ERR(call)) {
		_debug("no call");
		ret = PTR_ERR(call);
		goto error;
	}

	/* attach the call to the socket */
	read_lock_bh(&local->services_lock);
	if (rx->sk.sk_state == RXRPC_CLOSE)
		goto invalid_service;

	write_lock(&rx->call_lock);
	if (!test_and_set_bit(RXRPC_CALL_INIT_ACCEPT, &call->flags)) {
		rxrpc_get_call(call, rxrpc_call_got);

		spin_lock(&call->conn->state_lock);
		if (sp->hdr.securityIndex > 0 &&
		    call->conn->state == RXRPC_CONN_SERVICE_UNSECURED) {
			_debug("await conn sec");
			list_add_tail(&call->accept_link, &rx->secureq);
			call->conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
			set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
			rxrpc_queue_conn(call->conn);
		} else {
			_debug("conn ready");
			call->state = RXRPC_CALL_SERVER_ACCEPTING;
			list_add_tail(&call->accept_link, &rx->acceptq);
			rxrpc_get_call_for_skb(call, notification);
			nsp = rxrpc_skb(notification);
			nsp->call = call;

			ASSERTCMP(atomic_read(&call->usage), >=, 3);

			_debug("notify");
			spin_lock(&call->lock);
			ret = rxrpc_queue_rcv_skb(call, notification, true,
						  false);
			spin_unlock(&call->lock);
			notification = NULL;
			BUG_ON(ret < 0);
		}
		spin_unlock(&call->conn->state_lock);

		_debug("queued");
	}
	write_unlock(&rx->call_lock);

	_debug("process");
	rxrpc_fast_process_packet(call, skb);

	_debug("done");
	read_unlock_bh(&local->services_lock);
	rxrpc_free_skb(notification);
	rxrpc_put_call(call, rxrpc_call_put);
	_leave(" = 0");
	return 0;

invalid_service:
	_debug("invalid");
	read_unlock_bh(&local->services_lock);

	rxrpc_release_call(rx, call);
	rxrpc_put_call(call, rxrpc_call_put);
	ret = -ECONNREFUSED;
error:
	rxrpc_free_skb(notification);
error_nofree:
	_leave(" = %d", ret);
	return ret;
}

/*
 * accept incoming calls that need peer, transport and/or connection setting up
 * - the packets we get are all incoming client DATA packets that have seq == 1
 */
void rxrpc_accept_incoming_calls(struct rxrpc_local *local)
{
	struct rxrpc_skb_priv *sp;
	struct sockaddr_rxrpc srx;
	struct rxrpc_sock *rx;
	struct rxrpc_wire_header whdr;
	struct sk_buff *skb;
	int ret;

	_enter("%d", local->debug_id);

	skb = skb_dequeue(&local->accept_queue);
	if (!skb) {
		_leave("\n");
		return;
	}

	_net("incoming call skb %p", skb);

	rxrpc_see_skb(skb);
	sp = rxrpc_skb(skb);

	/* Set up a response packet header in case we need it */
	whdr.epoch	= htonl(sp->hdr.epoch);
	whdr.cid	= htonl(sp->hdr.cid);
	whdr.callNumber	= htonl(sp->hdr.callNumber);
	whdr.seq	= htonl(sp->hdr.seq);
	whdr.serial	= 0;
	whdr.flags	= 0;
	whdr.type	= 0;
	whdr.userStatus	= 0;
	whdr.securityIndex = sp->hdr.securityIndex;
	whdr._rsvd	= 0;
	whdr.serviceId	= htons(sp->hdr.serviceId);

	if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
		goto drop;

	/* get the socket providing the service */
	read_lock_bh(&local->services_lock);
	hlist_for_each_entry(rx, &local->services, listen_link) {
		if (rx->srx.srx_service == sp->hdr.serviceId &&
		    rx->sk.sk_state != RXRPC_CLOSE)
			goto found_service;
	}
	read_unlock_bh(&local->services_lock);
	goto invalid_service;

found_service:
	_debug("found service %hd", rx->srx.srx_service);
	if (sk_acceptq_is_full(&rx->sk))
		goto backlog_full;
	sk_acceptq_added(&rx->sk);
	read_unlock_bh(&local->services_lock);

	ret = rxrpc_accept_incoming_call(local, rx, skb, &srx);
	if (ret < 0)
		sk_acceptq_removed(&rx->sk);
	switch (ret) {
	case -ECONNRESET: /* old calls are ignored */
	case -ECONNABORTED: /* aborted calls are reaborted or ignored */
	case 0:
		return;
	case -ECONNREFUSED:
		goto invalid_service;
	case -EBUSY:
		goto busy;
	case -EKEYREJECTED:
		goto security_mismatch;
	default:
		BUG();
	}

backlog_full:
	read_unlock_bh(&local->services_lock);
busy:
	rxrpc_busy(local, &srx, &whdr);
	rxrpc_free_skb(skb);
	return;

drop:
	rxrpc_free_skb(skb);
	return;

invalid_service:
	skb->priority = RX_INVALID_OPERATION;
	rxrpc_reject_packet(local, skb);
	return;

	/* can't change connection security type mid-flow */
security_mismatch:
	skb->priority = RX_PROTOCOL_ERROR;
	rxrpc_reject_packet(local, skb);
	return;
}

/*
 * handle acceptance of a call by userspace
 * - assign the user call ID to the call at the front of the queue
 */
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
				     unsigned long user_call_ID,
				     rxrpc_notify_rx_t notify_rx)
{
	struct rxrpc_call *call;
	struct rb_node *parent, **pp;
	int ret;

	_enter(",%lx", user_call_ID);

	ASSERT(!irqs_disabled());

	write_lock(&rx->call_lock);

	ret = -ENODATA;
	if (list_empty(&rx->acceptq))
		goto out;

	/* check the user ID isn't already in use */
	ret = -EBADSLT;
	pp = &rx->calls.rb_node;
	parent = NULL;
	while (*pp) {
		parent = *pp;
		call = rb_entry(parent, struct rxrpc_call, sock_node);

		if (user_call_ID < call->user_call_ID)
			pp = &(*pp)->rb_left;
		else if (user_call_ID > call->user_call_ID)
			pp = &(*pp)->rb_right;
		else
			goto out;
	}

	/* dequeue the first call and check it's still valid */
	call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
	list_del_init(&call->accept_link);
	sk_acceptq_removed(&rx->sk);
	rxrpc_see_call(call);

	write_lock_bh(&call->state_lock);
	switch (call->state) {
	case RXRPC_CALL_SERVER_ACCEPTING:
		call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
		break;
	case RXRPC_CALL_COMPLETE:
		ret = call->error;
		goto out_release;
	default:
		BUG();
	}

	/* formalise the acceptance */
	rxrpc_get_call(call, rxrpc_call_got_userid);
	call->notify_rx = notify_rx;
	call->user_call_ID = user_call_ID;
	rb_link_node(&call->sock_node, parent, pp);
	rb_insert_color(&call->sock_node, &rx->calls);
	if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
		BUG();
	if (test_and_set_bit(RXRPC_CALL_EV_ACCEPTED, &call->events))
		BUG();

	write_unlock_bh(&call->state_lock);
	write_unlock(&rx->call_lock);
	rxrpc_queue_call(call);
	_leave(" = %p{%d}", call, call->debug_id);
	return call;

out_release:
	write_unlock_bh(&call->state_lock);
	write_unlock(&rx->call_lock);
	_debug("release %p", call);
	rxrpc_release_call(rx, call);
	_leave(" = %d", ret);
	return ERR_PTR(ret);
out:
	write_unlock(&rx->call_lock);
	_leave(" = %d", ret);
	return ERR_PTR(ret);
}

/*
 * Handle rejection of a call by userspace
 * - reject the call at the front of the queue
 */
int rxrpc_reject_call(struct rxrpc_sock *rx)
{
	struct rxrpc_call *call;
	int ret;

	_enter("");

	ASSERT(!irqs_disabled());

	write_lock(&rx->call_lock);

	ret = -ENODATA;
	if (list_empty(&rx->acceptq)) {
		write_unlock(&rx->call_lock);
		_leave(" = -ENODATA");
		return -ENODATA;
	}

	/* dequeue the first call and check it's still valid */
	call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
	list_del_init(&call->accept_link);
	sk_acceptq_removed(&rx->sk);
	rxrpc_see_call(call);

	write_lock_bh(&call->state_lock);
	switch (call->state) {
	case RXRPC_CALL_SERVER_ACCEPTING:
		__rxrpc_set_call_completion(call, RXRPC_CALL_SERVER_BUSY,
					    0, ECONNABORTED);
		if (test_and_set_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events))
			rxrpc_queue_call(call);
		ret = 0;
		break;
	case RXRPC_CALL_COMPLETE:
		ret = call->error;
		break;
	default:
		BUG();
	}

	write_unlock_bh(&call->state_lock);
	write_unlock(&rx->call_lock);
	rxrpc_release_call(rx, call);
	_leave(" = %d", ret);
	return ret;
}

/**
 * rxrpc_kernel_accept_call - Allow a kernel service to accept an incoming call
 * @sock: The socket on which the impending call is waiting
 * @user_call_ID: The tag to attach to the call
 * @notify_rx: Where to send notifications instead of socket queue
 *
 * Allow a kernel service to accept an incoming call, assuming the incoming
 * call is still valid.  The caller should immediately trigger their own
 * notification as there must be data waiting.
 */
struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *sock,
					    unsigned long user_call_ID,
					    rxrpc_notify_rx_t notify_rx)
{
	struct rxrpc_call *call;

	_enter(",%lx", user_call_ID);
	call = rxrpc_accept_call(rxrpc_sk(sock->sk), user_call_ID, notify_rx);
	_leave(" = %p", call);
	return call;
}
EXPORT_SYMBOL(rxrpc_kernel_accept_call);

/**
 * rxrpc_kernel_reject_call - Allow a kernel service to reject an incoming call
 * @sock: The socket on which the impending call is waiting
 *
 * Allow a kernel service to reject an incoming call with a BUSY message,
 * assuming the incoming call is still valid.
 */
int rxrpc_kernel_reject_call(struct socket *sock)
{
	int ret;

	_enter("");
	ret = rxrpc_reject_call(rxrpc_sk(sock->sk));
	_leave(" = %d", ret);
	return ret;
}
EXPORT_SYMBOL(rxrpc_kernel_reject_call);

/*
 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
 * @sock: The socket on which to preallocate
 * @notify_rx: Event notification function for the call
 * @user_attach_call: Func to attach call to user_call_ID
 * @user_call_ID: The tag to attach to the preallocated call
 * @gfp: The allocation conditions.
 *
 * Charge up the socket with preallocated calls, each with a user ID.  A
 * function should be provided to effect the attachment from the user's side.
 * The user is given a ref to hold on the call.
 *
 * Note that the call may be come connected before this function returns.
 */
int rxrpc_kernel_charge_accept(struct socket *sock,
			       rxrpc_notify_rx_t notify_rx,
			       rxrpc_user_attach_call_t user_attach_call,
			       unsigned long user_call_ID, gfp_t gfp)
{
	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
	struct rxrpc_backlog *b = rx->backlog;

	if (sock->sk->sk_state == RXRPC_CLOSE)
		return -ESHUTDOWN;

	return rxrpc_service_prealloc_one(rx, b, notify_rx,
					  user_attach_call, user_call_ID,
					  gfp);
}
EXPORT_SYMBOL(rxrpc_kernel_charge_accept);