summaryrefslogtreecommitdiff
path: root/drivers/nvme/host/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/host/tcp.c')
-rw-r--r--drivers/nvme/host/tcp.c220
1 files changed, 194 insertions, 26 deletions
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index b127d41dbbfe..26c459f0198d 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -8,7 +8,6 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/key.h>
#include <linux/nvme-tcp.h>
#include <linux/nvme-keyring.h>
#include <net/sock.h>
@@ -54,6 +53,8 @@ MODULE_PARM_DESC(tls_handshake_timeout,
"nvme TLS handshake timeout in seconds (default 10)");
#endif
+static atomic_t nvme_tcp_cpu_queues[NR_CPUS];
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* lockdep can detect a circular dependency of the form
* sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
@@ -127,6 +128,7 @@ enum nvme_tcp_queue_flags {
NVME_TCP_Q_ALLOCATED = 0,
NVME_TCP_Q_LIVE = 1,
NVME_TCP_Q_POLLING = 2,
+ NVME_TCP_Q_IO_CPU_SET = 3,
};
enum nvme_tcp_recv_state {
@@ -214,6 +216,19 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
return queue - queue->ctrl->queues;
}
+static inline bool nvme_tcp_recv_pdu_supported(enum nvme_tcp_pdu_type type)
+{
+ switch (type) {
+ case nvme_tcp_c2h_term:
+ case nvme_tcp_c2h_data:
+ case nvme_tcp_r2t:
+ case nvme_tcp_rsp:
+ return true;
+ default:
+ return false;
+ }
+}
+
/*
* Check if the queue is TLS encrypted
*/
@@ -233,7 +248,7 @@ static inline bool nvme_tcp_tls_configured(struct nvme_ctrl *ctrl)
if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
return 0;
- return ctrl->opts->tls;
+ return ctrl->opts->tls || ctrl->opts->concat;
}
static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
@@ -760,6 +775,40 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
return 0;
}
+static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue,
+ struct nvme_tcp_term_pdu *pdu)
+{
+ u16 fes;
+ const char *msg;
+ u32 plen = le32_to_cpu(pdu->hdr.plen);
+
+ static const char * const msg_table[] = {
+ [NVME_TCP_FES_INVALID_PDU_HDR] = "Invalid PDU Header Field",
+ [NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error",
+ [NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error",
+ [NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range",
+ [NVME_TCP_FES_DATA_LIMIT_EXCEEDED] = "Data Transfer Limit Exceeded",
+ [NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter",
+ };
+
+ if (plen < NVME_TCP_MIN_C2HTERM_PLEN ||
+ plen > NVME_TCP_MAX_C2HTERM_PLEN) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Received a malformed C2HTermReq PDU (plen = %u)\n",
+ plen);
+ return;
+ }
+
+ fes = le16_to_cpu(pdu->fes);
+ if (fes && fes < ARRAY_SIZE(msg_table))
+ msg = msg_table[fes];
+ else
+ msg = "Unknown";
+
+ dev_err(queue->ctrl->ctrl.device,
+ "Received C2HTermReq (FES = %s)\n", msg);
+}
+
static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
unsigned int *offset, size_t *len)
{
@@ -781,6 +830,25 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
return 0;
hdr = queue->pdu;
+ if (unlikely(hdr->hlen != sizeof(struct nvme_tcp_rsp_pdu))) {
+ if (!nvme_tcp_recv_pdu_supported(hdr->type))
+ goto unsupported_pdu;
+
+ dev_err(queue->ctrl->ctrl.device,
+ "pdu type %d has unexpected header length (%d)\n",
+ hdr->type, hdr->hlen);
+ return -EPROTO;
+ }
+
+ if (unlikely(hdr->type == nvme_tcp_c2h_term)) {
+ /*
+ * C2HTermReq never includes Header or Data digests.
+ * Skip the checks.
+ */
+ nvme_tcp_handle_c2h_term(queue, (void *)queue->pdu);
+ return -EINVAL;
+ }
+
if (queue->hdr_digest) {
ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
if (unlikely(ret))
@@ -804,10 +872,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
nvme_tcp_init_recv_ctx(queue);
return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
default:
- dev_err(queue->ctrl->ctrl.device,
- "unsupported pdu type (%d)\n", hdr->type);
- return -EINVAL;
+ goto unsupported_pdu;
}
+
+unsupported_pdu:
+ dev_err(queue->ctrl->ctrl.device,
+ "unsupported pdu type (%d)\n", hdr->type);
+ return -EINVAL;
}
static inline void nvme_tcp_end_request(struct request *rq, u16 status)
@@ -1446,8 +1517,11 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
}
+ msg.msg_flags = MSG_WAITALL;
ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
iov.iov_len, msg.msg_flags);
+ if (ret >= 0 && ret < sizeof(*icresp))
+ ret = -ECONNRESET;
if (ret < 0) {
pr_warn("queue %d: failed to receive icresp, error %d\n",
nvme_tcp_queue_id(queue), ret);
@@ -1562,23 +1636,56 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
ctrl->io_queues[HCTX_TYPE_POLL];
}
+/*
+ * Track the number of queues assigned to each cpu using a global per-cpu
+ * counter and select the least used cpu from the mq_map. Our goal is to spread
+ * different controllers I/O threads across different cpu cores.
+ *
+ * Note that the accounting is not 100% perfect, but we don't need to be, we're
+ * simply putting our best effort to select the best candidate cpu core that we
+ * find at any given point.
+ */
static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
{
struct nvme_tcp_ctrl *ctrl = queue->ctrl;
- int qid = nvme_tcp_queue_id(queue);
- int n = 0;
+ struct blk_mq_tag_set *set = &ctrl->tag_set;
+ int qid = nvme_tcp_queue_id(queue) - 1;
+ unsigned int *mq_map = NULL;
+ int cpu, min_queues = INT_MAX, io_cpu;
+
+ if (wq_unbound)
+ goto out;
if (nvme_tcp_default_queue(queue))
- n = qid - 1;
+ mq_map = set->map[HCTX_TYPE_DEFAULT].mq_map;
else if (nvme_tcp_read_queue(queue))
- n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
+ mq_map = set->map[HCTX_TYPE_READ].mq_map;
else if (nvme_tcp_poll_queue(queue))
- n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
- ctrl->io_queues[HCTX_TYPE_READ] - 1;
- if (wq_unbound)
- queue->io_cpu = WORK_CPU_UNBOUND;
- else
- queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+ mq_map = set->map[HCTX_TYPE_POLL].mq_map;
+
+ if (WARN_ON(!mq_map))
+ goto out;
+
+ /* Search for the least used cpu from the mq_map */
+ io_cpu = WORK_CPU_UNBOUND;
+ for_each_online_cpu(cpu) {
+ int num_queues = atomic_read(&nvme_tcp_cpu_queues[cpu]);
+
+ if (mq_map[cpu] != qid)
+ continue;
+ if (num_queues < min_queues) {
+ io_cpu = cpu;
+ min_queues = num_queues;
+ }
+ }
+ if (io_cpu != WORK_CPU_UNBOUND) {
+ queue->io_cpu = io_cpu;
+ atomic_inc(&nvme_tcp_cpu_queues[io_cpu]);
+ set_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags);
+ }
+out:
+ dev_dbg(ctrl->ctrl.device, "queue %d: using cpu %d\n",
+ qid, queue->io_cpu);
}
static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
@@ -1682,7 +1789,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
queue->cmnd_capsule_len = sizeof(struct nvme_command) +
NVME_TCP_ADMIN_CCSZ;
- ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
+ ret = sock_create_kern(current->nsproxy->net_ns,
+ ctrl->addr.ss_family, SOCK_STREAM,
IPPROTO_TCP, &queue->sock);
if (ret) {
dev_err(nctrl->device,
@@ -1722,7 +1830,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
queue->sock->sk->sk_allocation = GFP_ATOMIC;
queue->sock->sk->sk_use_task_frag = false;
- nvme_tcp_set_queue_io_cpu(queue);
+ queue->io_cpu = WORK_CPU_UNBOUND;
queue->request = NULL;
queue->data_remaining = 0;
queue->ddgst_remaining = 0;
@@ -1844,6 +1952,9 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
return;
+ if (test_and_clear_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags))
+ atomic_dec(&nvme_tcp_cpu_queues[queue->io_cpu]);
+
mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
__nvme_tcp_stop_queue(queue);
@@ -1878,9 +1989,10 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
nvme_tcp_init_recv_ctx(queue);
nvme_tcp_setup_sock_ops(queue);
- if (idx)
+ if (idx) {
+ nvme_tcp_set_queue_io_cpu(queue);
ret = nvmf_connect_io_queue(nctrl, idx);
- else
+ } else
ret = nvmf_connect_admin_queue(nctrl);
if (!ret) {
@@ -1948,7 +2060,7 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
if (nvme_tcp_tls_configured(ctrl)) {
if (ctrl->opts->tls_key)
pskid = key_serial(ctrl->opts->tls_key);
- else {
+ else if (ctrl->opts->tls) {
pskid = nvme_tls_psk_default(ctrl->opts->keyring,
ctrl->opts->host->nqn,
ctrl->opts->subsysnqn);
@@ -1978,9 +2090,25 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
int i, ret;
- if (nvme_tcp_tls_configured(ctrl) && !ctrl->tls_pskid) {
- dev_err(ctrl->device, "no PSK negotiated\n");
- return -ENOKEY;
+ if (nvme_tcp_tls_configured(ctrl)) {
+ if (ctrl->opts->concat) {
+ /*
+ * The generated PSK is stored in the
+ * fabric options
+ */
+ if (!ctrl->opts->tls_key) {
+ dev_err(ctrl->device, "no PSK generated\n");
+ return -ENOKEY;
+ }
+ if (ctrl->tls_pskid &&
+ ctrl->tls_pskid != key_serial(ctrl->opts->tls_key)) {
+ dev_err(ctrl->device, "Stale PSK id %08x\n", ctrl->tls_pskid);
+ ctrl->tls_pskid = 0;
+ }
+ } else if (!ctrl->tls_pskid) {
+ dev_err(ctrl->device, "no PSK negotiated\n");
+ return -ENOKEY;
+ }
}
for (i = 1; i < ctrl->queue_count; i++) {
@@ -2198,6 +2326,27 @@ static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl,
}
}
+/*
+ * The TLS key is set by secure concatenation after negotiation has been
+ * completed on the admin queue. We need to revoke the key when:
+ * - concatenation is enabled (otherwise it's a static key set by the user)
+ * and
+ * - the generated key is present in ctrl->tls_key (otherwise there's nothing
+ * to revoke)
+ * and
+ * - a valid PSK key ID has been set in ctrl->tls_pskid (otherwise TLS
+ * negotiation has not run).
+ *
+ * We cannot always revoke the key as nvme_tcp_alloc_admin_queue() is called
+ * twice during secure concatenation, once on a 'normal' connection to run the
+ * DH-HMAC-CHAP negotiation (which generates the key, so it _must not_ be set),
+ * and once after the negotiation (which uses the key, so it _must_ be set).
+ */
+static bool nvme_tcp_key_revoke_needed(struct nvme_ctrl *ctrl)
+{
+ return ctrl->opts->concat && ctrl->opts->tls_key && ctrl->tls_pskid;
+}
+
static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
{
struct nvmf_ctrl_options *opts = ctrl->opts;
@@ -2207,6 +2356,16 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
if (ret)
return ret;
+ if (ctrl->opts && ctrl->opts->concat && !ctrl->tls_pskid) {
+ /* See comments for nvme_tcp_key_revoke_needed() */
+ dev_dbg(ctrl->device, "restart admin queue for secure concatenation\n");
+ nvme_stop_keep_alive(ctrl);
+ nvme_tcp_teardown_admin_queue(ctrl, false);
+ ret = nvme_tcp_configure_admin_queue(ctrl, false);
+ if (ret)
+ return ret;
+ }
+
if (ctrl->icdoff) {
ret = -EOPNOTSUPP;
dev_err(ctrl->device, "icdoff is not supported!\n");
@@ -2303,6 +2462,8 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
struct nvme_tcp_ctrl, err_work);
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+ if (nvme_tcp_key_revoke_needed(ctrl))
+ nvme_auth_revoke_tls_key(ctrl);
nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false);
@@ -2343,6 +2504,8 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
container_of(work, struct nvme_ctrl, reset_work);
int ret;
+ if (nvme_tcp_key_revoke_needed(ctrl))
+ nvme_auth_revoke_tls_key(ctrl);
nvme_stop_ctrl(ctrl);
nvme_tcp_teardown_ctrl(ctrl, false);
@@ -2613,6 +2776,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nvme_tcp_queue *queue = hctx->driver_data;
struct sock *sk = queue->sock->sk;
+ int ret;
if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
return 0;
@@ -2620,9 +2784,9 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
set_bit(NVME_TCP_Q_POLLING, &queue->flags);
if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
sk_busy_loop(sk, true);
- nvme_tcp_try_recv(queue);
+ ret = nvme_tcp_try_recv(queue);
clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
- return queue->nr_cqe;
+ return ret < 0 ? ret : queue->nr_cqe;
}
static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
@@ -2838,13 +3002,14 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE | NVMF_OPT_TLS |
- NVMF_OPT_KEYRING | NVMF_OPT_TLS_KEY,
+ NVMF_OPT_KEYRING | NVMF_OPT_TLS_KEY | NVMF_OPT_CONCAT,
.create_ctrl = nvme_tcp_create_ctrl,
};
static int __init nvme_tcp_init_module(void)
{
unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
+ int cpu;
BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
@@ -2862,6 +3027,9 @@ static int __init nvme_tcp_init_module(void)
if (!nvme_tcp_wq)
return -ENOMEM;
+ for_each_possible_cpu(cpu)
+ atomic_set(&nvme_tcp_cpu_queues[cpu], 0);
+
nvmf_register_transport(&nvme_tcp_transport);
return 0;
}