From 48e8c6f1612b3d2dccaea2285231def830cc5b8e Mon Sep 17 00:00:00 2001 From: Peter Geis Date: Thu, 20 May 2021 12:32:30 -0400 Subject: net: phy: add driver for Motorcomm yt8511 phy Add a driver for the Motorcomm yt8511 phy that will be used in the production Pine64 rk3566-quartz64 development board. It supports gigabit transfer speeds, rgmii, and 125mhz clk output. Signed-off-by: Peter Geis Signed-off-by: David S. Miller --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index bd7aff0c120f..b9f329249a5a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12378,6 +12378,12 @@ F: Documentation/userspace-api/media/drivers/meye* F: drivers/media/pci/meye/ F: include/uapi/linux/meye.h +MOTORCOMM PHY DRIVER +M: Peter Geis +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/phy/motorcomm.c + MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD S: Orphan F: Documentation/driver-api/serial/moxa-smartio.rst -- cgit v1.2.3 From e860fa9b69e1bf077ba4725ee4be7b9443a3682a Mon Sep 17 00:00:00 2001 From: Dave Ertman Date: Thu, 20 May 2021 09:37:48 -0500 Subject: iidc: Introduce iidc.h Introduce a shared header file used by the 'ice' Intel networking driver providing RDMA support and the 'irdma' driver to provide a private interface. Signed-off-by: Dave Ertman Signed-off-by: Shiraz Saleem Signed-off-by: Tony Nguyen --- MAINTAINERS | 1 + include/linux/net/intel/iidc.h | 100 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+) create mode 100644 include/linux/net/intel/iidc.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index bd7aff0c120f..34d2bf36b5ad 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9130,6 +9130,7 @@ F: Documentation/networking/device_drivers/ethernet/intel/ F: drivers/net/ethernet/intel/ F: drivers/net/ethernet/intel/*/ F: include/linux/avf/virtchnl.h +F: include/linux/net/intel/iidc.h INTEL FRAMEBUFFER DRIVER (excluding 810 and 815) M: Maik Broemme diff --git a/include/linux/net/intel/iidc.h b/include/linux/net/intel/iidc.h new file mode 100644 index 000000000000..e32f6712aee0 --- /dev/null +++ b/include/linux/net/intel/iidc.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021, Intel Corporation. */ + +#ifndef _IIDC_H_ +#define _IIDC_H_ + +#include +#include +#include +#include +#include +#include + +enum iidc_event_type { + IIDC_EVENT_BEFORE_MTU_CHANGE, + IIDC_EVENT_AFTER_MTU_CHANGE, + IIDC_EVENT_BEFORE_TC_CHANGE, + IIDC_EVENT_AFTER_TC_CHANGE, + IIDC_EVENT_CRIT_ERR, + IIDC_EVENT_NBITS /* must be last */ +}; + +enum iidc_reset_type { + IIDC_PFR, + IIDC_CORER, + IIDC_GLOBR, +}; + +#define IIDC_MAX_USER_PRIORITY 8 + +/* Struct to hold per RDMA Qset info */ +struct iidc_rdma_qset_params { + /* Qset TEID returned to the RDMA driver in + * ice_add_rdma_qset and used by RDMA driver + * for calls to ice_del_rdma_qset + */ + u32 teid; /* Qset TEID */ + u16 qs_handle; /* RDMA driver provides this */ + u16 vport_id; /* VSI index */ + u8 tc; /* TC branch the Qset should belong to */ +}; + +struct iidc_qos_info { + u64 tc_ctx; + u8 rel_bw; + u8 prio_type; + u8 egress_virt_up; + u8 ingress_virt_up; +}; + +/* Struct to pass QoS info */ +struct iidc_qos_params { + struct iidc_qos_info tc_info[IEEE_8021QAZ_MAX_TCS]; + u8 up2tc[IIDC_MAX_USER_PRIORITY]; + u8 vport_relative_bw; + u8 vport_priority_type; + u8 num_tc; +}; + +struct iidc_event { + DECLARE_BITMAP(type, IIDC_EVENT_NBITS); + u32 reg; +}; + +struct ice_pf; + +int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset); +int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset); +int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type); +int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable); +void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos); + +#define IIDC_RDMA_ROCE_NAME "roce" + +/* Structure representing auxiliary driver tailored information about the core + * PCI dev, each auxiliary driver using the IIDC interface will have an + * instance of this struct dedicated to it. + */ + +struct iidc_auxiliary_dev { + struct auxiliary_device adev; + struct ice_pf *pf; +}; + +/* structure representing the auxiliary driver. This struct is to be + * allocated and populated by the auxiliary driver's owner. The core PCI + * driver will access these ops by performing a container_of on the + * auxiliary_device->dev.driver. + */ +struct iidc_auxiliary_drv { + struct auxiliary_driver adrv; + /* This event_handler is meant to be a blocking call. For instance, + * when a BEFORE_MTU_CHANGE event comes in, the event_handler will not + * return until the auxiliary driver is ready for the MTU change to + * happen. + */ + void (*event_handler)(struct ice_pf *pf, struct iidc_event *event); +}; + +#endif /* _IIDC_H_*/ -- cgit v1.2.3 From f0e8cb6106da27039cdc23ecf5b5a776d7c7e66e Mon Sep 17 00:00:00 2001 From: Shai Malin Date: Wed, 2 Jun 2021 21:42:39 +0300 Subject: nvme-tcp-offload: Add nvme-tcp-offload - NVMeTCP HW offload ULP This patch will present the structure for the NVMeTCP offload common layer driver. This module is added under "drivers/nvme/host/" and future offload drivers which will register to it will be placed under "drivers/nvme/hw". This new driver will be enabled by the Kconfig "NVM Express over Fabrics TCP offload commmon layer". In order to support the new transport type, for host mode, no change is needed. Each new vendor-specific offload driver will register to this ULP during its probe function, by filling out the nvme_tcp_ofld_dev->ops and nvme_tcp_ofld_dev->private_data and calling nvme_tcp_ofld_register_dev with the initialized struct. The internal implementation: - tcp-offload.h: Includes all common structs and ops to be used and shared by offload drivers. - tcp-offload.c: Includes the init function which registers as a NVMf transport just like any other transport. Acked-by: Igor Russkikh Signed-off-by: Dean Balandin Signed-off-by: Prabhakar Kushwaha Signed-off-by: Omkar Kulkarni Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: Shai Malin Reviewed-by: Hannes Reinecke Reviewed-by: Himanshu Madhani Signed-off-by: David S. Miller --- MAINTAINERS | 8 ++ drivers/nvme/host/Kconfig | 17 ++++ drivers/nvme/host/Makefile | 3 + drivers/nvme/host/tcp-offload.c | 124 +++++++++++++++++++++++++ drivers/nvme/host/tcp-offload.h | 199 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 351 insertions(+) create mode 100644 drivers/nvme/host/tcp-offload.c create mode 100644 drivers/nvme/host/tcp-offload.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 9cbc3766fd74..d8e882229a48 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13107,6 +13107,14 @@ F: drivers/nvme/host/ F: include/linux/nvme.h F: include/uapi/linux/nvme_ioctl.h +NVM EXPRESS TCP OFFLOAD TRANSPORT DRIVERS +M: Shai Malin +M: Ariel Elior +L: linux-nvme@lists.infradead.org +S: Supported +F: drivers/nvme/host/tcp-offload.c +F: drivers/nvme/host/tcp-offload.h + NVM EXPRESS FC TRANSPORT DRIVERS M: James Smart L: linux-nvme@lists.infradead.org diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index a44d49d63968..caedc35e1f0d 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -84,3 +84,20 @@ config NVME_TCP from https://github.com/linux-nvme/nvme-cli. If unsure, say N. + +config NVME_TCP_OFFLOAD + tristate "NVM Express over Fabrics TCP offload common layer" + default m + depends on BLOCK + depends on INET + select NVME_CORE + select NVME_FABRICS + help + This provides support for the NVMe over Fabrics protocol using + the TCP offload transport. This allows you to use remote block devices + exported using the NVMe protocol set. + + To configure a NVMe over Fabrics controller use the nvme-cli tool + from https://github.com/linux-nvme/nvme-cli. + + If unsure, say N. diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile index cbc509784b2e..3c3fdf83ce38 100644 --- a/drivers/nvme/host/Makefile +++ b/drivers/nvme/host/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o obj-$(CONFIG_NVME_FC) += nvme-fc.o obj-$(CONFIG_NVME_TCP) += nvme-tcp.o +obj-$(CONFIG_NVME_TCP_OFFLOAD) += nvme-tcp-offload.o nvme-core-y := core.o ioctl.o nvme-core-$(CONFIG_TRACING) += trace.o @@ -26,3 +27,5 @@ nvme-rdma-y += rdma.o nvme-fc-y += fc.o nvme-tcp-y += tcp.o + +nvme-tcp-offload-y += tcp-offload.o diff --git a/drivers/nvme/host/tcp-offload.c b/drivers/nvme/host/tcp-offload.c new file mode 100644 index 000000000000..f7aa49f337dc --- /dev/null +++ b/drivers/nvme/host/tcp-offload.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2021 Marvell. All rights reserved. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +/* Kernel includes */ +#include +#include + +/* Driver includes */ +#include "tcp-offload.h" + +static LIST_HEAD(nvme_tcp_ofld_devices); +static DEFINE_MUTEX(nvme_tcp_ofld_devices_mutex); + +/** + * nvme_tcp_ofld_register_dev() - NVMeTCP Offload Library registration + * function. + * @dev: NVMeTCP offload device instance to be registered to the + * common tcp offload instance. + * + * API function that registers the type of vendor specific driver + * being implemented to the common NVMe over TCP offload library. Part of + * the overall init sequence of starting up an offload driver. + */ +int nvme_tcp_ofld_register_dev(struct nvme_tcp_ofld_dev *dev) +{ + struct nvme_tcp_ofld_ops *ops = dev->ops; + + if (!ops->claim_dev || + !ops->setup_ctrl || + !ops->release_ctrl || + !ops->create_queue || + !ops->drain_queue || + !ops->destroy_queue || + !ops->poll_queue || + !ops->send_req) + return -EINVAL; + + mutex_lock(&nvme_tcp_ofld_devices_mutex); + list_add_tail(&dev->entry, &nvme_tcp_ofld_devices); + mutex_unlock(&nvme_tcp_ofld_devices_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(nvme_tcp_ofld_register_dev); + +/** + * nvme_tcp_ofld_unregister_dev() - NVMeTCP Offload Library unregistration + * function. + * @dev: NVMeTCP offload device instance to be unregistered from the + * common tcp offload instance. + * + * API function that unregisters the type of vendor specific driver being + * implemented from the common NVMe over TCP offload library. + * Part of the overall exit sequence of unloading the implemented driver. + */ +void nvme_tcp_ofld_unregister_dev(struct nvme_tcp_ofld_dev *dev) +{ + mutex_lock(&nvme_tcp_ofld_devices_mutex); + list_del(&dev->entry); + mutex_unlock(&nvme_tcp_ofld_devices_mutex); +} +EXPORT_SYMBOL_GPL(nvme_tcp_ofld_unregister_dev); + +/** + * nvme_tcp_ofld_report_queue_err() - NVMeTCP Offload report error event + * callback function. Pointed to by nvme_tcp_ofld_queue->report_err. + * @queue: NVMeTCP offload queue instance on which the error has occurred. + * + * API function that allows the vendor specific offload driver to reports errors + * to the common offload layer, to invoke error recovery. + */ +int nvme_tcp_ofld_report_queue_err(struct nvme_tcp_ofld_queue *queue) +{ + /* Placeholder - invoke error recovery flow */ + + return 0; +} + +/** + * nvme_tcp_ofld_req_done() - NVMeTCP Offload request done callback + * function. Pointed to by nvme_tcp_ofld_req->done. + * Handles both NVME_TCP_F_DATA_SUCCESS flag and NVMe CQ. + * @req: NVMeTCP offload request to complete. + * @result: The nvme_result. + * @status: The completion status. + * + * API function that allows the vendor specific offload driver to report request + * completions to the common offload layer. + */ +void nvme_tcp_ofld_req_done(struct nvme_tcp_ofld_req *req, + union nvme_result *result, + __le16 status) +{ + /* Placeholder - complete request with/without error */ +} + +static struct nvmf_transport_ops nvme_tcp_ofld_transport = { + .name = "tcp_offload", + .module = THIS_MODULE, + .required_opts = NVMF_OPT_TRADDR, + .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_NR_WRITE_QUEUES | + NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO | + NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_HDR_DIGEST | + NVMF_OPT_DATA_DIGEST | NVMF_OPT_NR_POLL_QUEUES | + NVMF_OPT_TOS, +}; + +static int __init nvme_tcp_ofld_init_module(void) +{ + nvmf_register_transport(&nvme_tcp_ofld_transport); + + return 0; +} + +static void __exit nvme_tcp_ofld_cleanup_module(void) +{ + nvmf_unregister_transport(&nvme_tcp_ofld_transport); +} + +module_init(nvme_tcp_ofld_init_module); +module_exit(nvme_tcp_ofld_cleanup_module); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/host/tcp-offload.h b/drivers/nvme/host/tcp-offload.h new file mode 100644 index 000000000000..520a0ea6f4b8 --- /dev/null +++ b/drivers/nvme/host/tcp-offload.h @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2021 Marvell. All rights reserved. + */ + +/* Linux includes */ +#include +#include +#include +#include + +/* Driver includes */ +#include "nvme.h" +#include "fabrics.h" + +/* Forward declarations */ +struct nvme_tcp_ofld_ops; + +/* Representation of a vendor-specific device. This is the struct used to + * register to the offload layer by the vendor-specific driver during its probe + * function. + * Allocated by vendor-specific driver. + */ +struct nvme_tcp_ofld_dev { + struct list_head entry; + struct net_device *ndev; + struct nvme_tcp_ofld_ops *ops; + + /* Vendor specific driver context */ + int num_hw_vectors; +}; + +/* Per IO struct holding the nvme_request and command + * Allocated by blk-mq. + */ +struct nvme_tcp_ofld_req { + struct nvme_request req; + struct nvme_command nvme_cmd; + struct list_head queue_entry; + struct nvme_tcp_ofld_queue *queue; + + /* Vendor specific driver context */ + void *private_data; + + /* async flag is used to distinguish between async and IO flow + * in common send_req() of nvme_tcp_ofld_ops. + */ + bool async; + + void (*done)(struct nvme_tcp_ofld_req *req, + union nvme_result *result, + __le16 status); +}; + +enum nvme_tcp_ofld_queue_flags { + NVME_TCP_OFLD_Q_ALLOCATED = 0, + NVME_TCP_OFLD_Q_LIVE = 1, +}; + +/* Allocated by nvme_tcp_ofld */ +struct nvme_tcp_ofld_queue { + /* Offload device associated to this queue */ + struct nvme_tcp_ofld_dev *dev; + struct nvme_tcp_ofld_ctrl *ctrl; + unsigned long flags; + size_t cmnd_capsule_len; + + u8 hdr_digest; + u8 data_digest; + u8 tos; + + /* Vendor specific driver context */ + void *private_data; + + /* Error callback function */ + int (*report_err)(struct nvme_tcp_ofld_queue *queue); +}; + +/* Connectivity (routing) params used for establishing a connection */ +struct nvme_tcp_ofld_ctrl_con_params { + struct sockaddr_storage remote_ip_addr; + + /* If NVMF_OPT_HOST_TRADDR is provided it will be set in local_ip_addr + * in nvme_tcp_ofld_create_ctrl(). + * If NVMF_OPT_HOST_TRADDR is not provided the local_ip_addr will be + * initialized by claim_dev(). + */ + struct sockaddr_storage local_ip_addr; +}; + +/* Allocated by nvme_tcp_ofld */ +struct nvme_tcp_ofld_ctrl { + struct nvme_ctrl nctrl; + struct list_head list; + struct nvme_tcp_ofld_dev *dev; + + /* admin and IO queues */ + struct blk_mq_tag_set tag_set; + struct blk_mq_tag_set admin_tag_set; + struct nvme_tcp_ofld_queue *queues; + + struct work_struct err_work; + struct delayed_work connect_work; + + /* + * Each entry in the array indicates the number of queues of + * corresponding type. + */ + u32 io_queues[HCTX_MAX_TYPES]; + + /* Connectivity params */ + struct nvme_tcp_ofld_ctrl_con_params conn_params; + + /* Vendor specific driver context */ + void *private_data; +}; + +struct nvme_tcp_ofld_ops { + const char *name; + struct module *module; + + /* For vendor-specific driver to report what opts it supports. + * It could be different than the ULP supported opts due to hardware + * limitations. Also it could be different among different vendor + * drivers. + */ + int required_opts; /* bitmap using enum nvmf_parsing_opts */ + int allowed_opts; /* bitmap using enum nvmf_parsing_opts */ + + /* For vendor-specific max num of segments and IO sizes */ + u32 max_hw_sectors; + u32 max_segments; + + /** + * claim_dev: Return True if addr is reachable via offload device. + * @dev: The offload device to check. + * @ctrl: The offload ctrl have the conn_params field. The + * conn_params is to be filled with routing params by the lower + * driver. + */ + int (*claim_dev)(struct nvme_tcp_ofld_dev *dev, + struct nvme_tcp_ofld_ctrl *ctrl); + + /** + * setup_ctrl: Setup device specific controller structures. + * @ctrl: The offload ctrl. + */ + int (*setup_ctrl)(struct nvme_tcp_ofld_ctrl *ctrl); + + /** + * release_ctrl: Release/Free device specific controller structures. + * @ctrl: The offload ctrl. + */ + int (*release_ctrl)(struct nvme_tcp_ofld_ctrl *ctrl); + + /** + * create_queue: Create offload queue and establish TCP + NVMeTCP + * (icreq+icresp) connection. Return true on successful connection. + * Based on nvme_tcp_alloc_queue. + * @queue: The queue itself - used as input and output. + * @qid: The queue ID associated with the requested queue. + * @q_size: The queue depth. + */ + int (*create_queue)(struct nvme_tcp_ofld_queue *queue, int qid, + size_t queue_size); + + /** + * drain_queue: Drain a given queue - blocking function call. + * Return from this function ensures that no additional + * completions will arrive on this queue and that the HW will + * not access host memory. + * @queue: The queue to drain. + */ + void (*drain_queue)(struct nvme_tcp_ofld_queue *queue); + + /** + * destroy_queue: Close the TCP + NVMeTCP connection of a given queue + * and make sure its no longer active (no completions will arrive on the + * queue). + * @queue: The queue to destroy. + */ + void (*destroy_queue)(struct nvme_tcp_ofld_queue *queue); + + /** + * poll_queue: Poll a given queue for completions. + * @queue: The queue to poll. + */ + int (*poll_queue)(struct nvme_tcp_ofld_queue *queue); + + /** + * send_req: Dispatch a request. Returns the execution status. + * @req: Ptr to request to be sent. + */ + int (*send_req)(struct nvme_tcp_ofld_req *req); +}; + +/* Exported functions for lower vendor specific offload drivers */ +int nvme_tcp_ofld_register_dev(struct nvme_tcp_ofld_dev *dev); +void nvme_tcp_ofld_unregister_dev(struct nvme_tcp_ofld_dev *dev); -- cgit v1.2.3 From daf6e8c9caa0955e8d190a606b1bacf9a903d3c1 Mon Sep 17 00:00:00 2001 From: Shai Malin Date: Wed, 9 Jun 2021 13:49:18 +0300 Subject: Revert "nvme-tcp-offload: ULP Series" This reverts commits: - 762411542050dbe27c7c96f13c57f93da5d9b89a nvme: NVME_TCP_OFFLOAD should not default to m - 5ff5622ea1f16d535f1be4e478e712ef48fe183b: Merge branch 'NVMeTCP-Offload-ULP' As requested on the mailing-list: https://lore.kernel.org/netdev/SJ0PR18MB3882C20793EA35A3E8DAE300CC379@SJ0PR18MB3882.namprd18.prod.outlook.com/ This patch will revert the nvme-tcp-offload ULP from net-next. The nvme-tcp-offload ULP series will continue to be considered only on linux-nvme@lists.infradead.org. Signed-off-by: Prabhakar Kushwaha Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: Shai Malin Signed-off-by: David S. Miller --- MAINTAINERS | 8 - drivers/nvme/host/Kconfig | 16 - drivers/nvme/host/Makefile | 3 - drivers/nvme/host/fabrics.c | 12 +- drivers/nvme/host/fabrics.h | 9 - drivers/nvme/host/tcp-offload.c | 1318 --------------------------------------- drivers/nvme/host/tcp-offload.h | 206 ------ 7 files changed, 9 insertions(+), 1563 deletions(-) delete mode 100644 drivers/nvme/host/tcp-offload.c delete mode 100644 drivers/nvme/host/tcp-offload.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 85a87a93e194..e69c1991ec3b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13107,14 +13107,6 @@ F: drivers/nvme/host/ F: include/linux/nvme.h F: include/uapi/linux/nvme_ioctl.h -NVM EXPRESS TCP OFFLOAD TRANSPORT DRIVERS -M: Shai Malin -M: Ariel Elior -L: linux-nvme@lists.infradead.org -S: Supported -F: drivers/nvme/host/tcp-offload.c -F: drivers/nvme/host/tcp-offload.h - NVM EXPRESS FC TRANSPORT DRIVERS M: James Smart L: linux-nvme@lists.infradead.org diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index a01e3f380e74..a44d49d63968 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -84,19 +84,3 @@ config NVME_TCP from https://github.com/linux-nvme/nvme-cli. If unsure, say N. - -config NVME_TCP_OFFLOAD - tristate "NVM Express over Fabrics TCP offload common layer" - depends on BLOCK - depends on INET - select NVME_CORE - select NVME_FABRICS - help - This provides support for the NVMe over Fabrics protocol using - the TCP offload transport. This allows you to use remote block devices - exported using the NVMe protocol set. - - To configure a NVMe over Fabrics controller use the nvme-cli tool - from https://github.com/linux-nvme/nvme-cli. - - If unsure, say N. diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile index 3c3fdf83ce38..cbc509784b2e 100644 --- a/drivers/nvme/host/Makefile +++ b/drivers/nvme/host/Makefile @@ -8,7 +8,6 @@ obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o obj-$(CONFIG_NVME_FC) += nvme-fc.o obj-$(CONFIG_NVME_TCP) += nvme-tcp.o -obj-$(CONFIG_NVME_TCP_OFFLOAD) += nvme-tcp-offload.o nvme-core-y := core.o ioctl.o nvme-core-$(CONFIG_TRACING) += trace.o @@ -27,5 +26,3 @@ nvme-rdma-y += rdma.o nvme-fc-y += fc.o nvme-tcp-y += tcp.o - -nvme-tcp-offload-y += tcp-offload.o diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index ceb263eb50fb..a2bb7fc63a73 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -860,8 +860,8 @@ out: return ret; } -int nvmf_check_required_opts(struct nvmf_ctrl_options *opts, - unsigned int required_opts) +static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts, + unsigned int required_opts) { if ((opts->mask & required_opts) != required_opts) { int i; @@ -879,7 +879,6 @@ int nvmf_check_required_opts(struct nvmf_ctrl_options *opts, return 0; } -EXPORT_SYMBOL_GPL(nvmf_check_required_opts); bool nvmf_ip_options_match(struct nvme_ctrl *ctrl, struct nvmf_ctrl_options *opts) @@ -943,6 +942,13 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts) } EXPORT_SYMBOL_GPL(nvmf_free_options); +#define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN) +#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \ + NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \ + NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\ + NVMF_OPT_DISABLE_SQFLOW |\ + NVMF_OPT_FAIL_FAST_TMO) + static struct nvme_ctrl * nvmf_create_ctrl(struct device *dev, const char *buf) { diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 8399fcc063ef..d7f7974dc208 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -68,13 +68,6 @@ enum { NVMF_OPT_FAIL_FAST_TMO = 1 << 20, }; -#define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN) -#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \ - NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \ - NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\ - NVMF_OPT_DISABLE_SQFLOW |\ - NVMF_OPT_FAIL_FAST_TMO) - /** * struct nvmf_ctrl_options - Used to hold the options specified * with the parsing opts enum. @@ -193,7 +186,5 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); bool nvmf_ip_options_match(struct nvme_ctrl *ctrl, struct nvmf_ctrl_options *opts); -int nvmf_check_required_opts(struct nvmf_ctrl_options *opts, - unsigned int required_opts); #endif /* _NVME_FABRICS_H */ diff --git a/drivers/nvme/host/tcp-offload.c b/drivers/nvme/host/tcp-offload.c deleted file mode 100644 index c76822e5ada7..000000000000 --- a/drivers/nvme/host/tcp-offload.c +++ /dev/null @@ -1,1318 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright 2021 Marvell. All rights reserved. - */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -/* Kernel includes */ -#include -#include - -/* Driver includes */ -#include "tcp-offload.h" - -static LIST_HEAD(nvme_tcp_ofld_devices); -static DEFINE_MUTEX(nvme_tcp_ofld_devices_mutex); -static LIST_HEAD(nvme_tcp_ofld_ctrl_list); -static DEFINE_MUTEX(nvme_tcp_ofld_ctrl_mutex); -static struct blk_mq_ops nvme_tcp_ofld_admin_mq_ops; -static struct blk_mq_ops nvme_tcp_ofld_mq_ops; - -static inline struct nvme_tcp_ofld_ctrl *to_tcp_ofld_ctrl(struct nvme_ctrl *nctrl) -{ - return container_of(nctrl, struct nvme_tcp_ofld_ctrl, nctrl); -} - -static inline int nvme_tcp_ofld_qid(struct nvme_tcp_ofld_queue *queue) -{ - return queue - queue->ctrl->queues; -} - -/** - * nvme_tcp_ofld_register_dev() - NVMeTCP Offload Library registration - * function. - * @dev: NVMeTCP offload device instance to be registered to the - * common tcp offload instance. - * - * API function that registers the type of vendor specific driver - * being implemented to the common NVMe over TCP offload library. Part of - * the overall init sequence of starting up an offload driver. - */ -int nvme_tcp_ofld_register_dev(struct nvme_tcp_ofld_dev *dev) -{ - struct nvme_tcp_ofld_ops *ops = dev->ops; - - if (!ops->claim_dev || - !ops->setup_ctrl || - !ops->release_ctrl || - !ops->create_queue || - !ops->drain_queue || - !ops->destroy_queue || - !ops->poll_queue || - !ops->send_req) - return -EINVAL; - - mutex_lock(&nvme_tcp_ofld_devices_mutex); - list_add_tail(&dev->entry, &nvme_tcp_ofld_devices); - mutex_unlock(&nvme_tcp_ofld_devices_mutex); - - return 0; -} -EXPORT_SYMBOL_GPL(nvme_tcp_ofld_register_dev); - -/** - * nvme_tcp_ofld_unregister_dev() - NVMeTCP Offload Library unregistration - * function. - * @dev: NVMeTCP offload device instance to be unregistered from the - * common tcp offload instance. - * - * API function that unregisters the type of vendor specific driver being - * implemented from the common NVMe over TCP offload library. - * Part of the overall exit sequence of unloading the implemented driver. - */ -void nvme_tcp_ofld_unregister_dev(struct nvme_tcp_ofld_dev *dev) -{ - mutex_lock(&nvme_tcp_ofld_devices_mutex); - list_del(&dev->entry); - mutex_unlock(&nvme_tcp_ofld_devices_mutex); -} -EXPORT_SYMBOL_GPL(nvme_tcp_ofld_unregister_dev); - -/** - * nvme_tcp_ofld_error_recovery() - NVMeTCP Offload library error recovery. - * function. - * @nctrl: NVMe controller instance to change to resetting. - * - * API function that change the controller state to resseting. - * Part of the overall controller reset sequence. - */ -void nvme_tcp_ofld_error_recovery(struct nvme_ctrl *nctrl) -{ - if (!nvme_change_ctrl_state(nctrl, NVME_CTRL_RESETTING)) - return; - - queue_work(nvme_reset_wq, &to_tcp_ofld_ctrl(nctrl)->err_work); -} -EXPORT_SYMBOL_GPL(nvme_tcp_ofld_error_recovery); - -/** - * nvme_tcp_ofld_report_queue_err() - NVMeTCP Offload report error event - * callback function. Pointed to by nvme_tcp_ofld_queue->report_err. - * @queue: NVMeTCP offload queue instance on which the error has occurred. - * - * API function that allows the vendor specific offload driver to reports errors - * to the common offload layer, to invoke error recovery. - */ -int nvme_tcp_ofld_report_queue_err(struct nvme_tcp_ofld_queue *queue) -{ - pr_err("nvme-tcp-offload queue error\n"); - nvme_tcp_ofld_error_recovery(&queue->ctrl->nctrl); - - return 0; -} - -/** - * nvme_tcp_ofld_req_done() - NVMeTCP Offload request done callback - * function. Pointed to by nvme_tcp_ofld_req->done. - * Handles both NVME_TCP_F_DATA_SUCCESS flag and NVMe CQ. - * @req: NVMeTCP offload request to complete. - * @result: The nvme_result. - * @status: The completion status. - * - * API function that allows the vendor specific offload driver to report request - * completions to the common offload layer. - */ -void nvme_tcp_ofld_req_done(struct nvme_tcp_ofld_req *req, - union nvme_result *result, - __le16 status) -{ - struct request *rq = blk_mq_rq_from_pdu(req); - - if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), *result)) - nvme_complete_rq(rq); -} - -/** - * nvme_tcp_ofld_async_req_done() - NVMeTCP Offload request done callback - * function for async request. Pointed to by nvme_tcp_ofld_req->done. - * Handles both NVME_TCP_F_DATA_SUCCESS flag and NVMe CQ. - * @req: NVMeTCP offload request to complete. - * @result: The nvme_result. - * @status: The completion status. - * - * API function that allows the vendor specific offload driver to report request - * completions to the common offload layer. - */ -void nvme_tcp_ofld_async_req_done(struct nvme_tcp_ofld_req *req, - union nvme_result *result, __le16 status) -{ - struct nvme_tcp_ofld_queue *queue = req->queue; - struct nvme_tcp_ofld_ctrl *ctrl = queue->ctrl; - - nvme_complete_async_event(&ctrl->nctrl, status, result); -} - -static struct nvme_tcp_ofld_dev * -nvme_tcp_ofld_lookup_dev(struct nvme_tcp_ofld_ctrl *ctrl) -{ - struct nvme_tcp_ofld_dev *dev; - - mutex_lock(&nvme_tcp_ofld_devices_mutex); - list_for_each_entry(dev, &nvme_tcp_ofld_devices, entry) { - if (dev->ops->claim_dev(dev, ctrl)) - goto out; - } - - dev = NULL; -out: - mutex_unlock(&nvme_tcp_ofld_devices_mutex); - - return dev; -} - -static struct blk_mq_tag_set * -nvme_tcp_ofld_alloc_tagset(struct nvme_ctrl *nctrl, bool admin) -{ - struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(nctrl); - struct blk_mq_tag_set *set; - int rc; - - if (admin) { - set = &ctrl->admin_tag_set; - memset(set, 0, sizeof(*set)); - set->ops = &nvme_tcp_ofld_admin_mq_ops; - set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; - set->reserved_tags = NVMF_RESERVED_TAGS; - set->numa_node = nctrl->numa_node; - set->flags = BLK_MQ_F_BLOCKING; - set->cmd_size = sizeof(struct nvme_tcp_ofld_req); - set->driver_data = ctrl; - set->nr_hw_queues = 1; - set->timeout = NVME_ADMIN_TIMEOUT; - } else { - set = &ctrl->tag_set; - memset(set, 0, sizeof(*set)); - set->ops = &nvme_tcp_ofld_mq_ops; - set->queue_depth = nctrl->sqsize + 1; - set->reserved_tags = NVMF_RESERVED_TAGS; - set->numa_node = nctrl->numa_node; - set->flags = BLK_MQ_F_SHOULD_MERGE; - set->cmd_size = sizeof(struct nvme_tcp_ofld_req); - set->driver_data = ctrl; - set->nr_hw_queues = nctrl->queue_count - 1; - set->timeout = NVME_IO_TIMEOUT; - set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; - } - - rc = blk_mq_alloc_tag_set(set); - if (rc) - return ERR_PTR(rc); - - return set; -} - -static void __nvme_tcp_ofld_stop_queue(struct nvme_tcp_ofld_queue *queue) -{ - queue->dev->ops->drain_queue(queue); -} - -static void nvme_tcp_ofld_stop_queue(struct nvme_ctrl *nctrl, int qid) -{ - struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(nctrl); - struct nvme_tcp_ofld_queue *queue = &ctrl->queues[qid]; - - mutex_lock(&queue->queue_lock); - if (test_and_clear_bit(NVME_TCP_OFLD_Q_LIVE, &queue->flags)) - __nvme_tcp_ofld_stop_queue(queue); - mutex_unlock(&queue->queue_lock); -} - -static void nvme_tcp_ofld_stop_io_queues(struct nvme_ctrl *ctrl) -{ - int i; - - for (i = 1; i < ctrl->queue_count; i++) - nvme_tcp_ofld_stop_queue(ctrl, i); -} - -static void __nvme_tcp_ofld_free_queue(struct nvme_tcp_ofld_queue *queue) -{ - queue->dev->ops->destroy_queue(queue); -} - -static void nvme_tcp_ofld_free_queue(struct nvme_ctrl *nctrl, int qid) -{ - struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(nctrl); - struct nvme_tcp_ofld_queue *queue = &ctrl->queues[qid]; - - if (test_and_clear_bit(NVME_TCP_OFLD_Q_ALLOCATED, &queue->flags)) { - __nvme_tcp_ofld_free_queue(queue); - mutex_destroy(&queue->queue_lock); - } -} - -static void -nvme_tcp_ofld_free_io_queues(struct nvme_ctrl *nctrl) -{ - int i; - - for (i = 1; i < nctrl->queue_count; i++) - nvme_tcp_ofld_free_queue(nctrl, i); -} - -static void nvme_tcp_ofld_destroy_io_queues(struct nvme_ctrl *nctrl, bool remove) -{ - nvme_tcp_ofld_stop_io_queues(nctrl); - if (remove) { - blk_cleanup_queue(nctrl->connect_q); - blk_mq_free_tag_set(nctrl->tagset); - } - nvme_tcp_ofld_free_io_queues(nctrl); -} - -static void nvme_tcp_ofld_destroy_admin_queue(struct nvme_ctrl *nctrl, bool remove) -{ - nvme_tcp_ofld_stop_queue(nctrl, 0); - if (remove) { - blk_cleanup_queue(nctrl->admin_q); - blk_cleanup_queue(nctrl->fabrics_q); - blk_mq_free_tag_set(nctrl->admin_tagset); - } - nvme_tcp_ofld_free_queue(nctrl, 0); -} - -static int nvme_tcp_ofld_start_queue(struct nvme_ctrl *nctrl, int qid) -{ - struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(nctrl); - struct nvme_tcp_ofld_queue *queue = &ctrl->queues[qid]; - int rc; - - queue = &ctrl->queues[qid]; - if (qid) { - queue->cmnd_capsule_len = nctrl->ioccsz * 16; - rc = nvmf_connect_io_queue(nctrl, qid, false); - } else { - queue->cmnd_capsule_len = sizeof(struct nvme_command) + NVME_TCP_ADMIN_CCSZ; - rc = nvmf_connect_admin_queue(nctrl); - } - - if (!rc) { - set_bit(NVME_TCP_OFLD_Q_LIVE, &queue->flags); - } else { - if (test_bit(NVME_TCP_OFLD_Q_ALLOCATED, &queue->flags)) - __nvme_tcp_ofld_stop_queue(queue); - dev_err(nctrl->device, - "failed to connect queue: %d ret=%d\n", qid, rc); - } - - return rc; -} - -static int nvme_tcp_ofld_configure_admin_queue(struct nvme_ctrl *nctrl, - bool new) -{ - struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(nctrl); - struct nvme_tcp_ofld_queue *queue = &ctrl->queues[0]; - int rc; - - mutex_init(&queue->queue_lock); - - rc = ctrl->dev->ops->create_queue(queue, 0, NVME_AQ_DEPTH); - if (rc) - return rc; - - set_bit(NVME_TCP_OFLD_Q_ALLOCATED, &queue->flags); - if (new) { - nctrl->admin_tagset = - nvme_tcp_ofld_alloc_tagset(nctrl, true); - if (IS_ERR(nctrl->admin_tagset)) { - rc = PTR_ERR(nctrl->admin_tagset); - nctrl->admin_tagset = NULL; - goto out_free_queue; - } - - nctrl->fabrics_q = blk_mq_init_queue(nctrl->admin_tagset); - if (IS_ERR(nctrl->fabrics_q)) { - rc = PTR_ERR(nctrl->fabrics_q); - nctrl->fabrics_q = NULL; - goto out_free_tagset; - } - - nctrl->admin_q = blk_mq_init_queue(nctrl->admin_tagset); - if (IS_ERR(nctrl->admin_q)) { - rc = PTR_ERR(nctrl->admin_q); - nctrl->admin_q = NULL; - goto out_cleanup_fabrics_q; - } - } - - rc = nvme_tcp_ofld_start_queue(nctrl, 0); - if (rc) - goto out_cleanup_queue; - - rc = nvme_enable_ctrl(nctrl); - if (rc) - goto out_stop_queue; - - blk_mq_unquiesce_queue(nctrl->admin_q); - - rc = nvme_init_ctrl_finish(nctrl); - if (rc) - goto out_quiesce_queue; - - return 0; - -out_quiesce_queue: - blk_mq_quiesce_queue(nctrl->admin_q); - blk_sync_queue(nctrl->admin_q); -out_stop_queue: - nvme_tcp_ofld_stop_queue(nctrl, 0); - nvme_cancel_admin_tagset(nctrl); -out_cleanup_queue: - if (new) - blk_cleanup_queue(nctrl->admin_q); -out_cleanup_fabrics_q: - if (new) - blk_cleanup_queue(nctrl->fabrics_q); -out_free_tagset: - if (new) - blk_mq_free_tag_set(nctrl->admin_tagset); -out_free_queue: - nvme_tcp_ofld_free_queue(nctrl, 0); - - return rc; -} - -static unsigned int nvme_tcp_ofld_nr_io_queues(struct nvme_ctrl *nctrl) -{ - struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(nctrl); - struct nvme_tcp_ofld_dev *dev = ctrl->dev; - u32 hw_vectors = dev->num_hw_vectors; - u32 nr_write_queues, nr_poll_queues; - u32 nr_io_queues, nr_total_queues; - - nr_io_queues = min3(nctrl->opts->nr_io_queues, num_online_cpus(), - hw_vectors); - nr_write_queues = min3(nctrl->opts->nr_write_queues, num_online_cpus(), - hw_vectors); - nr_poll_queues = min3(nctrl->opts->nr_poll_queues, num_online_cpus(), - hw_vectors); - - nr_total_queues = nr_io_queues + nr_write_queues + nr_poll_queues; - - return nr_total_queues; -} - -static void -nvme_tcp_ofld_set_io_queues(struct nvme_ctrl *nctrl, unsigned int nr_io_queues) -{ - struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(nctrl); - struct nvmf_ctrl_options *opts = nctrl->opts; - - if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) { - /* - * separate read/write queues - * hand out dedicated default queues only after we have - * sufficient read queues. - */ - ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues; - nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ]; - ctrl->io_queues[HCTX_TYPE_DEFAULT] = - min(opts->nr_write_queues, nr_io_queues); - nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; - } else { - /* - * shared read/write queues - * either no write queues were requested, or we don't have - * sufficient queue count to have dedicated default queues. - */ - ctrl->io_queues[HCTX_TYPE_DEFAULT] = - min(opts->nr_io_queues, nr_io_queues); - nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; - } - - if (opts->nr_poll_queues && nr_io_queues) { - /* map dedicated poll queues only if we have queues left */ - ctrl->io_queues[HCTX_TYPE_POLL] = - min(opts->nr_poll_queues, nr_io_queues); - } -} - -static int nvme_tcp_ofld_create_io_queues(struct nvme_ctrl *nctrl) -{ - struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(nctrl); - int i, rc; - - for (i = 1; i < nctrl->queue_count; i++) { - mutex_init(&ctrl->queues[i].queue_lock); - - rc = ctrl->dev->ops->create_queue(&ctrl->queues[i], - i, nctrl->sqsize + 1); - if (rc) - goto out_free_queues; - - set_bit(NVME_TCP_OFLD_Q_ALLOCATED, &ctrl->queues[i].flags); - } - - return 0; - -out_free_queues: - for (i--; i >= 1; i--) - nvme_tcp_ofld_free_queue(nctrl, i); - - return rc; -} - -static int nvme_tcp_ofld_alloc_io_queues(struct nvme_ctrl *nctrl) -{ - unsigned int nr_io_queues; - int rc; - - nr_io_queues = nvme_tcp_ofld_nr_io_queues(nctrl); - rc = nvme_set_queue_count(nctrl, &nr_io_queues); - if (rc) - return rc; - - nctrl->queue_count = nr_io_queues + 1; - if (nctrl->queue_count < 2) { - dev_err(nctrl->device, - "unable to set any I/O queues\n"); - - return -ENOMEM; - } - - dev_info(nctrl->device, "creating %d I/O queues.\n", nr_io_queues); - nvme_tcp_ofld_set_io_queues(nctrl, nr_io_queues); - - return nvme_tcp_ofld_create_io_queues(nctrl); -} - -static int nvme_tcp_ofld_start_io_queues(struct nvme_ctrl *nctrl) -{ - int i, rc = 0; - - for (i = 1; i < nctrl->queue_count; i++) { - rc = nvme_tcp_ofld_start_queue(nctrl, i); - if (rc) - goto out_stop_queues; - } - - return 0; - -out_stop_queues: - for (i--; i >= 1; i--) - nvme_tcp_ofld_stop_queue(nctrl, i); - - return rc; -} - -static int -nvme_tcp_ofld_configure_io_queues(struct nvme_ctrl *nctrl, bool new) -{ - int rc = nvme_tcp_ofld_alloc_io_queues(nctrl); - - if (rc) - return rc; - - if (new) { - nctrl->tagset = nvme_tcp_ofld_alloc_tagset(nctrl, false); - if (IS_ERR(nctrl->tagset)) { - rc = PTR_ERR(nctrl->tagset); - nctrl->tagset = NULL; - goto out_free_io_queues; - } - - nctrl->connect_q = blk_mq_init_queue(nctrl->tagset); - if (IS_ERR(nctrl->connect_q)) { - rc = PTR_ERR(nctrl->connect_q); - nctrl->connect_q = NULL; - goto out_free_tag_set; - } - } - - rc = nvme_tcp_ofld_start_io_queues(nctrl); - if (rc) - goto out_cleanup_connect_q; - - if (!new) { - nvme_start_queues(nctrl); - if (!nvme_wait_freeze_timeout(nctrl, NVME_IO_TIMEOUT)) { - /* - * If we timed out waiting for freeze we are likely to - * be stuck. Fail the controller initialization just - * to be safe. - */ - rc = -ENODEV; - goto out_wait_freeze_timed_out; - } - blk_mq_update_nr_hw_queues(nctrl->tagset, nctrl->queue_count - 1); - nvme_unfreeze(nctrl); - } - - return 0; - -out_wait_freeze_timed_out: - nvme_stop_queues(nctrl); - nvme_sync_io_queues(nctrl); - nvme_tcp_ofld_stop_io_queues(nctrl); -out_cleanup_connect_q: - nvme_cancel_tagset(nctrl); - if (new) - blk_cleanup_queue(nctrl->connect_q); -out_free_tag_set: - if (new) - blk_mq_free_tag_set(nctrl->tagset); -out_free_io_queues: - nvme_tcp_ofld_free_io_queues(nctrl); - - return rc; -} - -static void nvme_tcp_ofld_reconnect_or_remove(struct nvme_ctrl *nctrl) -{ - /* If we are resetting/deleting then do nothing */ - if (nctrl->state != NVME_CTRL_CONNECTING) { - WARN_ON_ONCE(nctrl->state == NVME_CTRL_NEW || - nctrl->state == NVME_CTRL_LIVE); - - return; - } - - if (nvmf_should_reconnect(nctrl)) { - dev_info(nctrl->device, "Reconnecting in %d seconds...\n", - nctrl->opts->reconnect_delay); - queue_delayed_work(nvme_wq, - &to_tcp_ofld_ctrl(nctrl)->connect_work, - nctrl->opts->reconnect_delay * HZ); - } else { - dev_info(nctrl->device, "Removing controller...\n"); - nvme_delete_ctrl(nctrl); - } -} - -static int -nvme_tcp_ofld_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, - unsigned int hctx_idx) -{ - struct nvme_tcp_ofld_ctrl *ctrl = data; - - hctx->driver_data = &ctrl->queues[0]; - - return 0; -} - -static int nvme_tcp_ofld_setup_ctrl(struct nvme_ctrl *nctrl, bool new) -{ - struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(nctrl); - struct nvmf_ctrl_options *opts = nctrl->opts; - int rc = 0; - - rc = ctrl->dev->ops->setup_ctrl(ctrl); - if (rc) - return rc; - - rc = nvme_tcp_ofld_configure_admin_queue(nctrl, new); - if (rc) - goto out_release_ctrl; - - if (nctrl->icdoff) { - dev_err(nctrl->device, "icdoff is not supported!\n"); - rc = -EINVAL; - goto destroy_admin; - } - - if (!(nctrl->sgls & ((1 << 0) | (1 << 1)))) { - dev_err(nctrl->device, "Mandatory sgls are not supported!\n"); - goto destroy_admin; - } - - if (opts->queue_size > nctrl->sqsize + 1) - dev_warn(nctrl->device, - "queue_size %zu > ctrl sqsize %u, clamping down\n", - opts->queue_size, nctrl->sqsize + 1); - - if (nctrl->sqsize + 1 > nctrl->maxcmd) { - dev_warn(nctrl->device, - "sqsize %u > ctrl maxcmd %u, clamping down\n", - nctrl->sqsize + 1, nctrl->maxcmd); - nctrl->sqsize = nctrl->maxcmd - 1; - } - - if (nctrl->queue_count > 1) { - rc = nvme_tcp_ofld_configure_io_queues(nctrl, new); - if (rc) - goto destroy_admin; - } - - if (!nvme_change_ctrl_state(nctrl, NVME_CTRL_LIVE)) { - /* - * state change failure is ok if we started ctrl delete, - * unless we're during creation of a new controller to - * avoid races with teardown flow. - */ - WARN_ON_ONCE(nctrl->state != NVME_CTRL_DELETING && - nctrl->state != NVME_CTRL_DELETING_NOIO); - WARN_ON_ONCE(new); - rc = -EINVAL; - goto destroy_io; - } - - nvme_start_ctrl(nctrl); - - return 0; - -destroy_io: - if (nctrl->queue_count > 1) { - nvme_stop_queues(nctrl); - nvme_sync_io_queues(nctrl); - nvme_tcp_ofld_stop_io_queues(nctrl); - nvme_cancel_tagset(nctrl); - nvme_tcp_ofld_destroy_io_queues(nctrl, new); - } -destroy_admin: - blk_mq_quiesce_queue(nctrl->admin_q); - blk_sync_queue(nctrl->admin_q); - nvme_tcp_ofld_stop_queue(nctrl, 0); - nvme_cancel_admin_tagset(nctrl); - nvme_tcp_ofld_destroy_admin_queue(nctrl, new); -out_release_ctrl: - ctrl->dev->ops->release_ctrl(ctrl); - - return rc; -} - -static int -nvme_tcp_ofld_check_dev_opts(struct nvmf_ctrl_options *opts, - struct nvme_tcp_ofld_ops *ofld_ops) -{ - unsigned int nvme_tcp_ofld_opt_mask = NVMF_ALLOWED_OPTS | - ofld_ops->allowed_opts | ofld_ops->required_opts; - struct nvmf_ctrl_options dev_opts_mask; - - if (opts->mask & ~nvme_tcp_ofld_opt_mask) { - pr_warn("One or more nvmf options missing from ofld drvr %s.\n", - ofld_ops->name); - - dev_opts_mask.mask = nvme_tcp_ofld_opt_mask; - - return nvmf_check_required_opts(&dev_opts_mask, opts->mask); - } - - return 0; -} - -static void nvme_tcp_ofld_free_ctrl(struct nvme_ctrl *nctrl) -{ - struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(nctrl); - struct nvme_tcp_ofld_dev *dev = ctrl->dev; - - if (list_empty(&ctrl->list)) - goto free_ctrl; - - ctrl->dev->ops->release_ctrl(ctrl); - - mutex_lock(&nvme_tcp_ofld_ctrl_mutex); - list_del(&ctrl->list); - mutex_unlock(&nvme_tcp_ofld_ctrl_mutex); - - nvmf_free_options(nctrl->opts); -free_ctrl: - module_put(dev->ops->module); - kfree(ctrl->queues); - kfree(ctrl); -} - -static void nvme_tcp_ofld_set_sg_null(struct nvme_command *c) -{ - struct nvme_sgl_desc *sg = &c->common.dptr.sgl; - - sg->addr = 0; - sg->length = 0; - sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | NVME_SGL_FMT_TRANSPORT_A; -} - -inline void nvme_tcp_ofld_set_sg_inline(struct nvme_tcp_ofld_queue *queue, - struct nvme_command *c, u32 data_len) -{ - struct nvme_sgl_desc *sg = &c->common.dptr.sgl; - - sg->addr = cpu_to_le64(queue->ctrl->nctrl.icdoff); - sg->length = cpu_to_le32(data_len); - sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; -} - -static void nvme_tcp_ofld_map_data(struct nvme_command *c, u32 data_len) -{ - struct nvme_sgl_desc *sg = &c->common.dptr.sgl; - - sg->addr = 0; - sg->length = cpu_to_le32(data_len); - sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | NVME_SGL_FMT_TRANSPORT_A; -} - -static void nvme_tcp_ofld_submit_async_event(struct nvme_ctrl *arg) -{ - struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(arg); - struct nvme_tcp_ofld_queue *queue = &ctrl->queues[0]; - struct nvme_tcp_ofld_dev *dev = queue->dev; - struct nvme_tcp_ofld_ops *ops = dev->ops; - - ctrl->async_req.nvme_cmd.common.opcode = nvme_admin_async_event; - ctrl->async_req.nvme_cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH; - ctrl->async_req.nvme_cmd.common.flags |= NVME_CMD_SGL_METABUF; - - nvme_tcp_ofld_set_sg_null(&ctrl->async_req.nvme_cmd); - - ctrl->async_req.async = true; - ctrl->async_req.queue = queue; - ctrl->async_req.done = nvme_tcp_ofld_async_req_done; - - ops->send_req(&ctrl->async_req); -} - -static void -nvme_tcp_ofld_teardown_admin_queue(struct nvme_ctrl *nctrl, bool remove) -{ - blk_mq_quiesce_queue(nctrl->admin_q); - blk_sync_queue(nctrl->admin_q); - - nvme_tcp_ofld_stop_queue(nctrl, 0); - nvme_cancel_admin_tagset(nctrl); - - if (remove) - blk_mq_unquiesce_queue(nctrl->admin_q); - - nvme_tcp_ofld_destroy_admin_queue(nctrl, remove); -} - -static void -nvme_tcp_ofld_teardown_io_queues(struct nvme_ctrl *nctrl, bool remove) -{ - if (nctrl->queue_count <= 1) - return; - - blk_mq_quiesce_queue(nctrl->admin_q); - nvme_start_freeze(nctrl); - nvme_stop_queues(nctrl); - nvme_sync_io_queues(nctrl); - nvme_tcp_ofld_stop_io_queues(nctrl); - nvme_cancel_tagset(nctrl); - - if (remove) - nvme_start_queues(nctrl); - - nvme_tcp_ofld_destroy_io_queues(nctrl, remove); -} - -static void nvme_tcp_ofld_reconnect_ctrl_work(struct work_struct *work) -{ - struct nvme_tcp_ofld_ctrl *ctrl = - container_of(to_delayed_work(work), - struct nvme_tcp_ofld_ctrl, - connect_work); - struct nvme_ctrl *nctrl = &ctrl->nctrl; - - ++nctrl->nr_reconnects; - - if (nvme_tcp_ofld_setup_ctrl(nctrl, false)) - goto requeue; - - dev_info(nctrl->device, "Successfully reconnected (%d attempt)\n", - nctrl->nr_reconnects); - - nctrl->nr_reconnects = 0; - - return; - -requeue: - dev_info(nctrl->device, "Failed reconnect attempt %d\n", - nctrl->nr_reconnects); - nvme_tcp_ofld_reconnect_or_remove(nctrl); -} - -static void nvme_tcp_ofld_error_recovery_work(struct work_struct *work) -{ - struct nvme_tcp_ofld_ctrl *ctrl = - container_of(work, struct nvme_tcp_ofld_ctrl, err_work); - struct nvme_ctrl *nctrl = &ctrl->nctrl; - - nvme_stop_keep_alive(nctrl); - nvme_tcp_ofld_teardown_io_queues(nctrl, false); - /* unquiesce to fail fast pending requests */ - nvme_start_queues(nctrl); - nvme_tcp_ofld_teardown_admin_queue(nctrl, false); - blk_mq_unquiesce_queue(nctrl->admin_q); - - if (!nvme_change_ctrl_state(nctrl, NVME_CTRL_CONNECTING)) { - /* state change failure is ok if we started nctrl delete */ - WARN_ON_ONCE(nctrl->state != NVME_CTRL_DELETING && - nctrl->state != NVME_CTRL_DELETING_NOIO); - - return; - } - - nvme_tcp_ofld_reconnect_or_remove(nctrl); -} - -static void -nvme_tcp_ofld_teardown_ctrl(struct nvme_ctrl *nctrl, bool shutdown) -{ - struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(nctrl); - - cancel_work_sync(&ctrl->err_work); - cancel_delayed_work_sync(&ctrl->connect_work); - nvme_tcp_ofld_teardown_io_queues(nctrl, shutdown); - blk_mq_quiesce_queue(nctrl->admin_q); - if (shutdown) - nvme_shutdown_ctrl(nctrl); - else - nvme_disable_ctrl(nctrl); - nvme_tcp_ofld_teardown_admin_queue(nctrl, shutdown); -} - -static void nvme_tcp_ofld_delete_ctrl(struct nvme_ctrl *nctrl) -{ - nvme_tcp_ofld_teardown_ctrl(nctrl, true); -} - -static void nvme_tcp_ofld_reset_ctrl_work(struct work_struct *work) -{ - struct nvme_ctrl *nctrl = - container_of(work, struct nvme_ctrl, reset_work); - - nvme_stop_ctrl(nctrl); - nvme_tcp_ofld_teardown_ctrl(nctrl, false); - - if (!nvme_change_ctrl_state(nctrl, NVME_CTRL_CONNECTING)) { - /* state change failure is ok if we started ctrl delete */ - WARN_ON_ONCE(nctrl->state != NVME_CTRL_DELETING && - nctrl->state != NVME_CTRL_DELETING_NOIO); - - return; - } - - if (nvme_tcp_ofld_setup_ctrl(nctrl, false)) - goto out_fail; - - return; - -out_fail: - ++nctrl->nr_reconnects; - nvme_tcp_ofld_reconnect_or_remove(nctrl); -} - -static int -nvme_tcp_ofld_init_request(struct blk_mq_tag_set *set, - struct request *rq, - unsigned int hctx_idx, - unsigned int numa_node) -{ - struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(rq); - struct nvme_tcp_ofld_ctrl *ctrl = set->driver_data; - int qid; - - qid = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; - req->queue = &ctrl->queues[qid]; - nvme_req(rq)->ctrl = &ctrl->nctrl; - nvme_req(rq)->cmd = &req->nvme_cmd; - req->done = nvme_tcp_ofld_req_done; - - return 0; -} - -inline size_t nvme_tcp_ofld_inline_data_size(struct nvme_tcp_ofld_queue *queue) -{ - return queue->cmnd_capsule_len - sizeof(struct nvme_command); -} -EXPORT_SYMBOL_GPL(nvme_tcp_ofld_inline_data_size); - -static blk_status_t -nvme_tcp_ofld_queue_rq(struct blk_mq_hw_ctx *hctx, - const struct blk_mq_queue_data *bd) -{ - struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(bd->rq); - struct nvme_tcp_ofld_queue *queue = hctx->driver_data; - struct nvme_tcp_ofld_ctrl *ctrl = queue->ctrl; - struct nvme_ns *ns = hctx->queue->queuedata; - struct nvme_tcp_ofld_dev *dev = queue->dev; - struct nvme_tcp_ofld_ops *ops = dev->ops; - struct nvme_command *nvme_cmd; - struct request *rq = bd->rq; - bool queue_ready; - u32 data_len; - int rc; - - queue_ready = test_bit(NVME_TCP_OFLD_Q_LIVE, &queue->flags); - - req->async = false; - - if (!nvme_check_ready(&ctrl->nctrl, rq, queue_ready)) - return nvme_fail_nonready_command(&ctrl->nctrl, rq); - - rc = nvme_setup_cmd(ns, rq); - if (unlikely(rc)) - return rc; - - blk_mq_start_request(rq); - - nvme_cmd = &req->nvme_cmd; - nvme_cmd->common.flags |= NVME_CMD_SGL_METABUF; - - data_len = blk_rq_nr_phys_segments(rq) ? blk_rq_payload_bytes(rq) : 0; - if (!data_len) - nvme_tcp_ofld_set_sg_null(&req->nvme_cmd); - else if ((rq_data_dir(rq) == WRITE) && - data_len <= nvme_tcp_ofld_inline_data_size(queue)) - nvme_tcp_ofld_set_sg_inline(queue, nvme_cmd, data_len); - else - nvme_tcp_ofld_map_data(nvme_cmd, data_len); - - rc = ops->send_req(req); - if (unlikely(rc)) - return rc; - - return BLK_STS_OK; -} - -static void -nvme_tcp_ofld_exit_request(struct blk_mq_tag_set *set, - struct request *rq, unsigned int hctx_idx) -{ - /* - * Nothing is allocated in nvme_tcp_ofld_init_request, - * hence empty. - */ -} - -static int -nvme_tcp_ofld_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, - unsigned int hctx_idx) -{ - struct nvme_tcp_ofld_ctrl *ctrl = data; - - hctx->driver_data = &ctrl->queues[hctx_idx + 1]; - - return 0; -} - -static int nvme_tcp_ofld_map_queues(struct blk_mq_tag_set *set) -{ - struct nvme_tcp_ofld_ctrl *ctrl = set->driver_data; - struct nvmf_ctrl_options *opts = ctrl->nctrl.opts; - - if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { - /* separate read/write queues */ - set->map[HCTX_TYPE_DEFAULT].nr_queues = - ctrl->io_queues[HCTX_TYPE_DEFAULT]; - set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; - set->map[HCTX_TYPE_READ].nr_queues = - ctrl->io_queues[HCTX_TYPE_READ]; - set->map[HCTX_TYPE_READ].queue_offset = - ctrl->io_queues[HCTX_TYPE_DEFAULT]; - } else { - /* shared read/write queues */ - set->map[HCTX_TYPE_DEFAULT].nr_queues = - ctrl->io_queues[HCTX_TYPE_DEFAULT]; - set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; - set->map[HCTX_TYPE_READ].nr_queues = - ctrl->io_queues[HCTX_TYPE_DEFAULT]; - set->map[HCTX_TYPE_READ].queue_offset = 0; - } - blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); - blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); - - if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { - /* map dedicated poll queues only if we have queues left */ - set->map[HCTX_TYPE_POLL].nr_queues = - ctrl->io_queues[HCTX_TYPE_POLL]; - set->map[HCTX_TYPE_POLL].queue_offset = - ctrl->io_queues[HCTX_TYPE_DEFAULT] + - ctrl->io_queues[HCTX_TYPE_READ]; - blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); - } - - dev_info(ctrl->nctrl.device, - "mapped %d/%d/%d default/read/poll queues.\n", - ctrl->io_queues[HCTX_TYPE_DEFAULT], - ctrl->io_queues[HCTX_TYPE_READ], - ctrl->io_queues[HCTX_TYPE_POLL]); - - return 0; -} - -static int nvme_tcp_ofld_poll(struct blk_mq_hw_ctx *hctx) -{ - struct nvme_tcp_ofld_queue *queue = hctx->driver_data; - struct nvme_tcp_ofld_dev *dev = queue->dev; - struct nvme_tcp_ofld_ops *ops = dev->ops; - - return ops->poll_queue(queue); -} - -static void nvme_tcp_ofld_complete_timed_out(struct request *rq) -{ - struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(rq); - struct nvme_ctrl *nctrl = &req->queue->ctrl->nctrl; - - nvme_tcp_ofld_stop_queue(nctrl, nvme_tcp_ofld_qid(req->queue)); - if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) { - nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; - blk_mq_complete_request(rq); - } -} - -static enum blk_eh_timer_return nvme_tcp_ofld_timeout(struct request *rq, bool reserved) -{ - struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(rq); - struct nvme_tcp_ofld_ctrl *ctrl = req->queue->ctrl; - - dev_warn(ctrl->nctrl.device, - "queue %d: timeout request %#x type %d\n", - nvme_tcp_ofld_qid(req->queue), rq->tag, req->nvme_cmd.common.opcode); - - if (ctrl->nctrl.state != NVME_CTRL_LIVE) { - /* - * If we are resetting, connecting or deleting we should - * complete immediately because we may block controller - * teardown or setup sequence - * - ctrl disable/shutdown fabrics requests - * - connect requests - * - initialization admin requests - * - I/O requests that entered after unquiescing and - * the controller stopped responding - * - * All other requests should be cancelled by the error - * recovery work, so it's fine that we fail it here. - */ - nvme_tcp_ofld_complete_timed_out(rq); - - return BLK_EH_DONE; - } - - nvme_tcp_ofld_error_recovery(&ctrl->nctrl); - - return BLK_EH_RESET_TIMER; -} - -static struct blk_mq_ops nvme_tcp_ofld_mq_ops = { - .queue_rq = nvme_tcp_ofld_queue_rq, - .complete = nvme_complete_rq, - .init_request = nvme_tcp_ofld_init_request, - .exit_request = nvme_tcp_ofld_exit_request, - .init_hctx = nvme_tcp_ofld_init_hctx, - .timeout = nvme_tcp_ofld_timeout, - .map_queues = nvme_tcp_ofld_map_queues, - .poll = nvme_tcp_ofld_poll, -}; - -static struct blk_mq_ops nvme_tcp_ofld_admin_mq_ops = { - .queue_rq = nvme_tcp_ofld_queue_rq, - .complete = nvme_complete_rq, - .init_request = nvme_tcp_ofld_init_request, - .exit_request = nvme_tcp_ofld_exit_request, - .init_hctx = nvme_tcp_ofld_init_admin_hctx, - .timeout = nvme_tcp_ofld_timeout, -}; - -static const struct nvme_ctrl_ops nvme_tcp_ofld_ctrl_ops = { - .name = "tcp_offload", - .module = THIS_MODULE, - .flags = NVME_F_FABRICS, - .reg_read32 = nvmf_reg_read32, - .reg_read64 = nvmf_reg_read64, - .reg_write32 = nvmf_reg_write32, - .free_ctrl = nvme_tcp_ofld_free_ctrl, - .submit_async_event = nvme_tcp_ofld_submit_async_event, - .delete_ctrl = nvme_tcp_ofld_delete_ctrl, - .get_address = nvmf_get_address, -}; - -static bool -nvme_tcp_ofld_existing_controller(struct nvmf_ctrl_options *opts) -{ - struct nvme_tcp_ofld_ctrl *ctrl; - bool found = false; - - mutex_lock(&nvme_tcp_ofld_ctrl_mutex); - list_for_each_entry(ctrl, &nvme_tcp_ofld_ctrl_list, list) { - found = nvmf_ip_options_match(&ctrl->nctrl, opts); - if (found) - break; - } - mutex_unlock(&nvme_tcp_ofld_ctrl_mutex); - - return found; -} - -static struct nvme_ctrl * -nvme_tcp_ofld_create_ctrl(struct device *ndev, struct nvmf_ctrl_options *opts) -{ - struct nvme_tcp_ofld_queue *queue; - struct nvme_tcp_ofld_ctrl *ctrl; - struct nvme_tcp_ofld_dev *dev; - struct nvme_ctrl *nctrl; - int i, rc = 0; - - ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); - if (!ctrl) - return ERR_PTR(-ENOMEM); - - INIT_LIST_HEAD(&ctrl->list); - nctrl = &ctrl->nctrl; - nctrl->opts = opts; - nctrl->queue_count = opts->nr_io_queues + opts->nr_write_queues + - opts->nr_poll_queues + 1; - nctrl->sqsize = opts->queue_size - 1; - nctrl->kato = opts->kato; - INIT_DELAYED_WORK(&ctrl->connect_work, - nvme_tcp_ofld_reconnect_ctrl_work); - INIT_WORK(&ctrl->err_work, nvme_tcp_ofld_error_recovery_work); - INIT_WORK(&nctrl->reset_work, nvme_tcp_ofld_reset_ctrl_work); - if (!(opts->mask & NVMF_OPT_TRSVCID)) { - opts->trsvcid = - kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL); - if (!opts->trsvcid) { - rc = -ENOMEM; - goto out_free_ctrl; - } - opts->mask |= NVMF_OPT_TRSVCID; - } - - rc = inet_pton_with_scope(&init_net, AF_UNSPEC, opts->traddr, - opts->trsvcid, - &ctrl->conn_params.remote_ip_addr); - if (rc) { - pr_err("malformed address passed: %s:%s\n", - opts->traddr, opts->trsvcid); - goto out_free_ctrl; - } - - if (opts->mask & NVMF_OPT_HOST_TRADDR) { - rc = inet_pton_with_scope(&init_net, AF_UNSPEC, - opts->host_traddr, NULL, - &ctrl->conn_params.local_ip_addr); - if (rc) { - pr_err("malformed src address passed: %s\n", - opts->host_traddr); - goto out_free_ctrl; - } - } - - if (!opts->duplicate_connect && - nvme_tcp_ofld_existing_controller(opts)) { - rc = -EALREADY; - goto out_free_ctrl; - } - - /* Find device that can reach the dest addr */ - dev = nvme_tcp_ofld_lookup_dev(ctrl); - if (!dev) { - pr_info("no device found for addr %s:%s.\n", - opts->traddr, opts->trsvcid); - rc = -EINVAL; - goto out_free_ctrl; - } - - /* Increase driver refcnt */ - if (!try_module_get(dev->ops->module)) { - pr_err("try_module_get failed\n"); - dev = NULL; - goto out_free_ctrl; - } - - rc = nvme_tcp_ofld_check_dev_opts(opts, dev->ops); - if (rc) - goto out_module_put; - - ctrl->dev = dev; - - if (ctrl->dev->ops->max_hw_sectors) - nctrl->max_hw_sectors = ctrl->dev->ops->max_hw_sectors; - if (ctrl->dev->ops->max_segments) - nctrl->max_segments = ctrl->dev->ops->max_segments; - - ctrl->queues = kcalloc(nctrl->queue_count, - sizeof(struct nvme_tcp_ofld_queue), - GFP_KERNEL); - if (!ctrl->queues) { - rc = -ENOMEM; - goto out_module_put; - } - - for (i = 0; i < nctrl->queue_count; ++i) { - queue = &ctrl->queues[i]; - queue->ctrl = ctrl; - queue->dev = dev; - queue->report_err = nvme_tcp_ofld_report_queue_err; - } - - rc = nvme_init_ctrl(nctrl, ndev, &nvme_tcp_ofld_ctrl_ops, 0); - if (rc) - goto out_free_queues; - - if (!nvme_change_ctrl_state(nctrl, NVME_CTRL_CONNECTING)) { - WARN_ON_ONCE(1); - rc = -EINTR; - goto out_uninit_ctrl; - } - - rc = nvme_tcp_ofld_setup_ctrl(nctrl, true); - if (rc) - goto out_uninit_ctrl; - - dev_info(nctrl->device, "new ctrl: NQN \"%s\", addr %pISp\n", - opts->subsysnqn, &ctrl->conn_params.remote_ip_addr); - - mutex_lock(&nvme_tcp_ofld_ctrl_mutex); - list_add_tail(&ctrl->list, &nvme_tcp_ofld_ctrl_list); - mutex_unlock(&nvme_tcp_ofld_ctrl_mutex); - - return nctrl; - -out_uninit_ctrl: - nvme_uninit_ctrl(nctrl); - nvme_put_ctrl(nctrl); -out_free_queues: - kfree(ctrl->queues); -out_module_put: - module_put(dev->ops->module); -out_free_ctrl: - kfree(ctrl); - - return ERR_PTR(rc); -} - -static struct nvmf_transport_ops nvme_tcp_ofld_transport = { - .name = "tcp_offload", - .module = THIS_MODULE, - .required_opts = NVMF_OPT_TRADDR, - .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_NR_WRITE_QUEUES | - NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO | - NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_HDR_DIGEST | - NVMF_OPT_DATA_DIGEST | NVMF_OPT_NR_POLL_QUEUES | - NVMF_OPT_TOS, - .create_ctrl = nvme_tcp_ofld_create_ctrl, -}; - -static int __init nvme_tcp_ofld_init_module(void) -{ - nvmf_register_transport(&nvme_tcp_ofld_transport); - - return 0; -} - -static void __exit nvme_tcp_ofld_cleanup_module(void) -{ - struct nvme_tcp_ofld_ctrl *ctrl; - - nvmf_unregister_transport(&nvme_tcp_ofld_transport); - - mutex_lock(&nvme_tcp_ofld_ctrl_mutex); - list_for_each_entry(ctrl, &nvme_tcp_ofld_ctrl_list, list) - nvme_delete_ctrl(&ctrl->nctrl); - mutex_unlock(&nvme_tcp_ofld_ctrl_mutex); - flush_workqueue(nvme_delete_wq); -} - -module_init(nvme_tcp_ofld_init_module); -module_exit(nvme_tcp_ofld_cleanup_module); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/host/tcp-offload.h b/drivers/nvme/host/tcp-offload.h deleted file mode 100644 index 2ac5b2428612..000000000000 --- a/drivers/nvme/host/tcp-offload.h +++ /dev/null @@ -1,206 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright 2021 Marvell. All rights reserved. - */ - -/* Linux includes */ -#include -#include -#include -#include - -/* Driver includes */ -#include "nvme.h" -#include "fabrics.h" - -/* Forward declarations */ -struct nvme_tcp_ofld_ops; - -/* Representation of a vendor-specific device. This is the struct used to - * register to the offload layer by the vendor-specific driver during its probe - * function. - * Allocated by vendor-specific driver. - */ -struct nvme_tcp_ofld_dev { - struct list_head entry; - struct net_device *ndev; - struct nvme_tcp_ofld_ops *ops; - - /* Vendor specific driver context */ - int num_hw_vectors; -}; - -/* Per IO struct holding the nvme_request and command - * Allocated by blk-mq. - */ -struct nvme_tcp_ofld_req { - struct nvme_request req; - struct nvme_command nvme_cmd; - struct list_head queue_entry; - struct nvme_tcp_ofld_queue *queue; - - /* Vendor specific driver context */ - void *private_data; - - /* async flag is used to distinguish between async and IO flow - * in common send_req() of nvme_tcp_ofld_ops. - */ - bool async; - - void (*done)(struct nvme_tcp_ofld_req *req, - union nvme_result *result, - __le16 status); -}; - -enum nvme_tcp_ofld_queue_flags { - NVME_TCP_OFLD_Q_ALLOCATED = 0, - NVME_TCP_OFLD_Q_LIVE = 1, -}; - -/* Allocated by nvme_tcp_ofld */ -struct nvme_tcp_ofld_queue { - /* Offload device associated to this queue */ - struct nvme_tcp_ofld_dev *dev; - struct nvme_tcp_ofld_ctrl *ctrl; - unsigned long flags; - size_t cmnd_capsule_len; - - /* mutex used during stop_queue */ - struct mutex queue_lock; - - u8 hdr_digest; - u8 data_digest; - u8 tos; - - /* Vendor specific driver context */ - void *private_data; - - /* Error callback function */ - int (*report_err)(struct nvme_tcp_ofld_queue *queue); -}; - -/* Connectivity (routing) params used for establishing a connection */ -struct nvme_tcp_ofld_ctrl_con_params { - struct sockaddr_storage remote_ip_addr; - - /* If NVMF_OPT_HOST_TRADDR is provided it will be set in local_ip_addr - * in nvme_tcp_ofld_create_ctrl(). - * If NVMF_OPT_HOST_TRADDR is not provided the local_ip_addr will be - * initialized by claim_dev(). - */ - struct sockaddr_storage local_ip_addr; -}; - -/* Allocated by nvme_tcp_ofld */ -struct nvme_tcp_ofld_ctrl { - struct nvme_ctrl nctrl; - struct list_head list; - struct nvme_tcp_ofld_dev *dev; - - /* admin and IO queues */ - struct blk_mq_tag_set tag_set; - struct blk_mq_tag_set admin_tag_set; - struct nvme_tcp_ofld_queue *queues; - - struct work_struct err_work; - struct delayed_work connect_work; - - /* - * Each entry in the array indicates the number of queues of - * corresponding type. - */ - u32 io_queues[HCTX_MAX_TYPES]; - - /* Connectivity params */ - struct nvme_tcp_ofld_ctrl_con_params conn_params; - - struct nvme_tcp_ofld_req async_req; - - /* Vendor specific driver context */ - void *private_data; -}; - -struct nvme_tcp_ofld_ops { - const char *name; - struct module *module; - - /* For vendor-specific driver to report what opts it supports. - * It could be different than the ULP supported opts due to hardware - * limitations. Also it could be different among different vendor - * drivers. - */ - int required_opts; /* bitmap using enum nvmf_parsing_opts */ - int allowed_opts; /* bitmap using enum nvmf_parsing_opts */ - - /* For vendor-specific max num of segments and IO sizes */ - u32 max_hw_sectors; - u32 max_segments; - - /** - * claim_dev: Return True if addr is reachable via offload device. - * @dev: The offload device to check. - * @ctrl: The offload ctrl have the conn_params field. The - * conn_params is to be filled with routing params by the lower - * driver. - */ - int (*claim_dev)(struct nvme_tcp_ofld_dev *dev, - struct nvme_tcp_ofld_ctrl *ctrl); - - /** - * setup_ctrl: Setup device specific controller structures. - * @ctrl: The offload ctrl. - */ - int (*setup_ctrl)(struct nvme_tcp_ofld_ctrl *ctrl); - - /** - * release_ctrl: Release/Free device specific controller structures. - * @ctrl: The offload ctrl. - */ - int (*release_ctrl)(struct nvme_tcp_ofld_ctrl *ctrl); - - /** - * create_queue: Create offload queue and establish TCP + NVMeTCP - * (icreq+icresp) connection. Return true on successful connection. - * Based on nvme_tcp_alloc_queue. - * @queue: The queue itself - used as input and output. - * @qid: The queue ID associated with the requested queue. - * @q_size: The queue depth. - */ - int (*create_queue)(struct nvme_tcp_ofld_queue *queue, int qid, - size_t queue_size); - - /** - * drain_queue: Drain a given queue - blocking function call. - * Return from this function ensures that no additional - * completions will arrive on this queue and that the HW will - * not access host memory. - * @queue: The queue to drain. - */ - void (*drain_queue)(struct nvme_tcp_ofld_queue *queue); - - /** - * destroy_queue: Close the TCP + NVMeTCP connection of a given queue - * and make sure its no longer active (no completions will arrive on the - * queue). - * @queue: The queue to destroy. - */ - void (*destroy_queue)(struct nvme_tcp_ofld_queue *queue); - - /** - * poll_queue: Poll a given queue for completions. - * @queue: The queue to poll. - */ - int (*poll_queue)(struct nvme_tcp_ofld_queue *queue); - - /** - * send_req: Dispatch a request. Returns the execution status. - * @req: Ptr to request to be sent. - */ - int (*send_req)(struct nvme_tcp_ofld_req *req); -}; - -/* Exported functions for lower vendor specific offload drivers */ -int nvme_tcp_ofld_register_dev(struct nvme_tcp_ofld_dev *dev); -void nvme_tcp_ofld_unregister_dev(struct nvme_tcp_ofld_dev *dev); -void nvme_tcp_ofld_error_recovery(struct nvme_ctrl *nctrl); -inline size_t nvme_tcp_ofld_inline_data_size(struct nvme_tcp_ofld_queue *queue); -- cgit v1.2.3 From bc1bee3b87ee48bd97ef7fd306445132ba2041b0 Mon Sep 17 00:00:00 2001 From: Calvin Johnson Date: Fri, 11 Jun 2021 13:53:54 +0300 Subject: net: mdiobus: Introduce fwnode_mdiobus_register_phy() Introduce fwnode_mdiobus_register_phy() to register PHYs on the mdiobus. From the compatible string, identify whether the PHY is c45 and based on this create a PHY device instance which is registered on the mdiobus. Along with fwnode_mdiobus_register_phy() also introduce fwnode_find_mii_timestamper() and fwnode_mdiobus_phy_device_register() since they are needed. While at it, also use the newly introduced fwnode operation in of_mdiobus_phy_device_register(). Signed-off-by: Calvin Johnson Signed-off-by: Ioana Ciornei Acked-by: Grant Likely Signed-off-by: David S. Miller --- MAINTAINERS | 1 + drivers/net/mdio/Kconfig | 7 ++ drivers/net/mdio/Makefile | 3 +- drivers/net/mdio/fwnode_mdio.c | 144 +++++++++++++++++++++++++++++++++++++++++ drivers/net/mdio/of_mdio.c | 44 ++----------- include/linux/fwnode_mdio.h | 35 ++++++++++ 6 files changed, 194 insertions(+), 40 deletions(-) create mode 100644 drivers/net/mdio/fwnode_mdio.c create mode 100644 include/linux/fwnode_mdio.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index e69c1991ec3b..e8f8b6c33a51 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6811,6 +6811,7 @@ F: Documentation/devicetree/bindings/net/mdio* F: Documentation/devicetree/bindings/net/qca,ar803x.yaml F: Documentation/networking/phy.rst F: drivers/net/mdio/ +F: drivers/net/mdio/fwnode_mdio.c F: drivers/net/mdio/of_mdio.c F: drivers/net/pcs/ F: drivers/net/phy/ diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig index d06e06f5e31a..422e9e042a3c 100644 --- a/drivers/net/mdio/Kconfig +++ b/drivers/net/mdio/Kconfig @@ -19,6 +19,13 @@ config MDIO_BUS reflects whether the mdio_bus/mdio_device code is built as a loadable module or built-in. +config FWNODE_MDIO + def_tristate PHYLIB + depends on (ACPI || OF) || COMPILE_TEST + select FIXED_PHY + help + FWNODE MDIO bus (Ethernet PHY) accessors + config OF_MDIO def_tristate PHYLIB depends on OF diff --git a/drivers/net/mdio/Makefile b/drivers/net/mdio/Makefile index c3ec0ef989df..2e6813c709eb 100644 --- a/drivers/net/mdio/Makefile +++ b/drivers/net/mdio/Makefile @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for Linux MDIO bus drivers -obj-$(CONFIG_OF_MDIO) += of_mdio.o +obj-$(CONFIG_FWNODE_MDIO) += fwnode_mdio.o +obj-$(CONFIG_OF_MDIO) += of_mdio.o obj-$(CONFIG_MDIO_ASPEED) += mdio-aspeed.o obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c new file mode 100644 index 000000000000..e96766da8de4 --- /dev/null +++ b/drivers/net/mdio/fwnode_mdio.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * fwnode helpers for the MDIO (Ethernet PHY) API + * + * This file provides helper functions for extracting PHY device information + * out of the fwnode and using it to populate an mii_bus. + */ + +#include +#include +#include +#include + +MODULE_AUTHOR("Calvin Johnson "); +MODULE_LICENSE("GPL"); + +static struct mii_timestamper * +fwnode_find_mii_timestamper(struct fwnode_handle *fwnode) +{ + struct of_phandle_args arg; + int err; + + if (is_acpi_node(fwnode)) + return NULL; + + err = of_parse_phandle_with_fixed_args(to_of_node(fwnode), + "timestamper", 1, 0, &arg); + if (err == -ENOENT) + return NULL; + else if (err) + return ERR_PTR(err); + + if (arg.args_count != 1) + return ERR_PTR(-EINVAL); + + return register_mii_timestamper(arg.np, arg.args[0]); +} + +int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio, + struct phy_device *phy, + struct fwnode_handle *child, u32 addr) +{ + int rc; + + rc = fwnode_irq_get(child, 0); + if (rc == -EPROBE_DEFER) + return rc; + + if (rc > 0) { + phy->irq = rc; + mdio->irq[addr] = rc; + } else { + phy->irq = mdio->irq[addr]; + } + + if (fwnode_property_read_bool(child, "broken-turn-around")) + mdio->phy_ignore_ta_mask |= 1 << addr; + + fwnode_property_read_u32(child, "reset-assert-us", + &phy->mdio.reset_assert_delay); + fwnode_property_read_u32(child, "reset-deassert-us", + &phy->mdio.reset_deassert_delay); + + /* Associate the fwnode with the device structure so it + * can be looked up later + */ + fwnode_handle_get(child); + phy->mdio.dev.fwnode = child; + + /* All data is now stored in the phy struct; + * register it + */ + rc = phy_device_register(phy); + if (rc) { + fwnode_handle_put(child); + return rc; + } + + dev_dbg(&mdio->dev, "registered phy %p fwnode at address %i\n", + child, addr); + return 0; +} +EXPORT_SYMBOL(fwnode_mdiobus_phy_device_register); + +int fwnode_mdiobus_register_phy(struct mii_bus *bus, + struct fwnode_handle *child, u32 addr) +{ + struct mii_timestamper *mii_ts = NULL; + struct phy_device *phy; + bool is_c45 = false; + u32 phy_id; + int rc; + + mii_ts = fwnode_find_mii_timestamper(child); + if (IS_ERR(mii_ts)) + return PTR_ERR(mii_ts); + + rc = fwnode_property_match_string(child, "compatible", + "ethernet-phy-ieee802.3-c45"); + if (rc >= 0) + is_c45 = true; + + if (is_c45 || fwnode_get_phy_id(child, &phy_id)) + phy = get_phy_device(bus, addr, is_c45); + else + phy = phy_device_create(bus, addr, phy_id, 0, NULL); + if (IS_ERR(phy)) { + unregister_mii_timestamper(mii_ts); + return PTR_ERR(phy); + } + + if (is_acpi_node(child)) { + phy->irq = bus->irq[addr]; + + /* Associate the fwnode with the device structure so it + * can be looked up later. + */ + phy->mdio.dev.fwnode = child; + + /* All data is now stored in the phy struct, so register it */ + rc = phy_device_register(phy); + if (rc) { + phy_device_free(phy); + fwnode_handle_put(phy->mdio.dev.fwnode); + return rc; + } + } else if (is_of_node(child)) { + rc = fwnode_mdiobus_phy_device_register(bus, phy, child, addr); + if (rc) { + unregister_mii_timestamper(mii_ts); + phy_device_free(phy); + return rc; + } + } + + /* phy->mii_ts may already be defined by the PHY driver. A + * mii_timestamper probed via the device tree will still have + * precedence. + */ + if (mii_ts) + phy->mii_ts = mii_ts; + return 0; +} +EXPORT_SYMBOL(fwnode_mdiobus_register_phy); diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c index d73c0570f19c..17327bbc1de4 100644 --- a/drivers/net/mdio/of_mdio.c +++ b/drivers/net/mdio/of_mdio.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -51,46 +52,11 @@ static struct mii_timestamper *of_find_mii_timestamper(struct device_node *node) } int of_mdiobus_phy_device_register(struct mii_bus *mdio, struct phy_device *phy, - struct device_node *child, u32 addr) + struct device_node *child, u32 addr) { - int rc; - - rc = of_irq_get(child, 0); - if (rc == -EPROBE_DEFER) - return rc; - - if (rc > 0) { - phy->irq = rc; - mdio->irq[addr] = rc; - } else { - phy->irq = mdio->irq[addr]; - } - - if (of_property_read_bool(child, "broken-turn-around")) - mdio->phy_ignore_ta_mask |= 1 << addr; - - of_property_read_u32(child, "reset-assert-us", - &phy->mdio.reset_assert_delay); - of_property_read_u32(child, "reset-deassert-us", - &phy->mdio.reset_deassert_delay); - - /* Associate the OF node with the device structure so it - * can be looked up later */ - of_node_get(child); - phy->mdio.dev.of_node = child; - phy->mdio.dev.fwnode = of_fwnode_handle(child); - - /* All data is now stored in the phy struct; - * register it */ - rc = phy_device_register(phy); - if (rc) { - of_node_put(child); - return rc; - } - - dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n", - child, addr); - return 0; + return fwnode_mdiobus_phy_device_register(mdio, phy, + of_fwnode_handle(child), + addr); } EXPORT_SYMBOL(of_mdiobus_phy_device_register); diff --git a/include/linux/fwnode_mdio.h b/include/linux/fwnode_mdio.h new file mode 100644 index 000000000000..faf603c48c86 --- /dev/null +++ b/include/linux/fwnode_mdio.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * FWNODE helper for the MDIO (Ethernet PHY) API + */ + +#ifndef __LINUX_FWNODE_MDIO_H +#define __LINUX_FWNODE_MDIO_H + +#include + +#if IS_ENABLED(CONFIG_FWNODE_MDIO) +int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio, + struct phy_device *phy, + struct fwnode_handle *child, u32 addr); + +int fwnode_mdiobus_register_phy(struct mii_bus *bus, + struct fwnode_handle *child, u32 addr); + +#else /* CONFIG_FWNODE_MDIO */ +int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio, + struct phy_device *phy, + struct fwnode_handle *child, u32 addr) +{ + return -EINVAL; +} + +static inline int fwnode_mdiobus_register_phy(struct mii_bus *bus, + struct fwnode_handle *child, + u32 addr) +{ + return -EINVAL; +} +#endif + +#endif /* __LINUX_FWNODE_MDIO_H */ -- cgit v1.2.3 From 803ca24d2f92e2cf393df4705423f7b09a5eabd9 Mon Sep 17 00:00:00 2001 From: Calvin Johnson Date: Fri, 11 Jun 2021 13:53:57 +0300 Subject: net: mdio: Add ACPI support code for mdio Define acpi_mdiobus_register() to Register mii_bus and create PHYs for each ACPI child node. Signed-off-by: Calvin Johnson Signed-off-by: Ioana Ciornei Acked-by: Rafael J. Wysocki Acked-by: Grant Likely Signed-off-by: David S. Miller --- MAINTAINERS | 1 + drivers/net/mdio/Kconfig | 7 ++++++ drivers/net/mdio/Makefile | 1 + drivers/net/mdio/acpi_mdio.c | 58 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/acpi_mdio.h | 26 ++++++++++++++++++++ 5 files changed, 93 insertions(+) create mode 100644 drivers/net/mdio/acpi_mdio.c create mode 100644 include/linux/acpi_mdio.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index e8f8b6c33a51..2172f594be8f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6811,6 +6811,7 @@ F: Documentation/devicetree/bindings/net/mdio* F: Documentation/devicetree/bindings/net/qca,ar803x.yaml F: Documentation/networking/phy.rst F: drivers/net/mdio/ +F: drivers/net/mdio/acpi_mdio.c F: drivers/net/mdio/fwnode_mdio.c F: drivers/net/mdio/of_mdio.c F: drivers/net/pcs/ diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig index 422e9e042a3c..99a6c13a11af 100644 --- a/drivers/net/mdio/Kconfig +++ b/drivers/net/mdio/Kconfig @@ -34,6 +34,13 @@ config OF_MDIO help OpenFirmware MDIO bus (Ethernet PHY) accessors +config ACPI_MDIO + def_tristate PHYLIB + depends on ACPI + depends on PHYLIB + help + ACPI MDIO bus (Ethernet PHY) accessors + if MDIO_BUS config MDIO_DEVRES diff --git a/drivers/net/mdio/Makefile b/drivers/net/mdio/Makefile index 2e6813c709eb..15f8dc4042ce 100644 --- a/drivers/net/mdio/Makefile +++ b/drivers/net/mdio/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for Linux MDIO bus drivers +obj-$(CONFIG_ACPI_MDIO) += acpi_mdio.o obj-$(CONFIG_FWNODE_MDIO) += fwnode_mdio.o obj-$(CONFIG_OF_MDIO) += of_mdio.o diff --git a/drivers/net/mdio/acpi_mdio.c b/drivers/net/mdio/acpi_mdio.c new file mode 100644 index 000000000000..d77c987fda9c --- /dev/null +++ b/drivers/net/mdio/acpi_mdio.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ACPI helpers for the MDIO (Ethernet PHY) API + * + * This file provides helper functions for extracting PHY device information + * out of the ACPI ASL and using it to populate an mii_bus. + */ + +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Calvin Johnson "); +MODULE_LICENSE("GPL"); + +/** + * acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL. + * @mdio: pointer to mii_bus structure + * @fwnode: pointer to fwnode of MDIO bus. This fwnode is expected to represent + * an ACPI device object corresponding to the MDIO bus and its children are + * expected to correspond to the PHY devices on that bus. + * + * This function registers the mii_bus structure and registers a phy_device + * for each child node of @fwnode. + */ +int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode) +{ + struct fwnode_handle *child; + u32 addr; + int ret; + + /* Mask out all PHYs from auto probing. */ + mdio->phy_mask = GENMASK(31, 0); + ret = mdiobus_register(mdio); + if (ret) + return ret; + + ACPI_COMPANION_SET(&mdio->dev, to_acpi_device_node(fwnode)); + + /* Loop over the child nodes and register a phy_device for each PHY */ + fwnode_for_each_child_node(fwnode, child) { + ret = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), &addr); + if (ret || addr >= PHY_MAX_ADDR) + continue; + + ret = fwnode_mdiobus_register_phy(mdio, child, addr); + if (ret == -ENODEV) + dev_err(&mdio->dev, + "MDIO device at address %d is missing.\n", + addr); + } + return 0; +} +EXPORT_SYMBOL(acpi_mdiobus_register); diff --git a/include/linux/acpi_mdio.h b/include/linux/acpi_mdio.h new file mode 100644 index 000000000000..0a24ab7cb66f --- /dev/null +++ b/include/linux/acpi_mdio.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * ACPI helper for the MDIO (Ethernet PHY) API + */ + +#ifndef __LINUX_ACPI_MDIO_H +#define __LINUX_ACPI_MDIO_H + +#include + +#if IS_ENABLED(CONFIG_ACPI_MDIO) +int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode); +#else /* CONFIG_ACPI_MDIO */ +static inline int +acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode) +{ + /* + * Fall back to mdiobus_register() function to register a bus. + * This way, we don't have to keep compat bits around in drivers. + */ + + return mdiobus_register(mdio); +} +#endif + +#endif /* __LINUX_ACPI_MDIO_H */ -- cgit v1.2.3 From d4433d5b7b34fa316c473769d51c79b2755953e4 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Fri, 11 Jun 2021 23:05:22 +0300 Subject: net: pcs: xpcs: move register bit descriptions to a header file Vendors which integrate the Designware XPCS might modify a few things here and there, and to support those, it's best to create separate C files in order to not clutter up the main pcs-xpcs.c. Because the vendor files might want to access the common xpcs registers too, let's move them in a header file which is local to this driver and can be included by vendor files as appropriate. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- MAINTAINERS | 1 + drivers/net/pcs/pcs-xpcs.c | 97 +----------------------------------------- drivers/net/pcs/pcs-xpcs.h | 103 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 105 insertions(+), 96 deletions(-) create mode 100644 drivers/net/pcs/pcs-xpcs.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 2172f594be8f..c8214235380e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17676,6 +17676,7 @@ M: Jose Abreu L: netdev@vger.kernel.org S: Supported F: drivers/net/pcs/pcs-xpcs.c +F: drivers/net/pcs/pcs-xpcs.h F: include/linux/pcs/pcs-xpcs.h SYNOPSYS DESIGNWARE I2C DRIVER diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index a2cbb2d926b7..8ca7592b02ec 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -11,102 +11,7 @@ #include #include #include - -#define SYNOPSYS_XPCS_ID 0x7996ced0 -#define SYNOPSYS_XPCS_MASK 0xffffffff - -/* Vendor regs access */ -#define DW_VENDOR BIT(15) - -/* VR_XS_PCS */ -#define DW_USXGMII_RST BIT(10) -#define DW_USXGMII_EN BIT(9) -#define DW_VR_XS_PCS_DIG_STS 0x0010 -#define DW_RXFIFO_ERR GENMASK(6, 5) - -/* SR_MII */ -#define DW_USXGMII_FULL BIT(8) -#define DW_USXGMII_SS_MASK (BIT(13) | BIT(6) | BIT(5)) -#define DW_USXGMII_10000 (BIT(13) | BIT(6)) -#define DW_USXGMII_5000 (BIT(13) | BIT(5)) -#define DW_USXGMII_2500 (BIT(5)) -#define DW_USXGMII_1000 (BIT(6)) -#define DW_USXGMII_100 (BIT(13)) -#define DW_USXGMII_10 (0) - -/* SR_AN */ -#define DW_SR_AN_ADV1 0x10 -#define DW_SR_AN_ADV2 0x11 -#define DW_SR_AN_ADV3 0x12 -#define DW_SR_AN_LP_ABL1 0x13 -#define DW_SR_AN_LP_ABL2 0x14 -#define DW_SR_AN_LP_ABL3 0x15 - -/* Clause 73 Defines */ -/* AN_LP_ABL1 */ -#define DW_C73_PAUSE BIT(10) -#define DW_C73_ASYM_PAUSE BIT(11) -#define DW_C73_AN_ADV_SF 0x1 -/* AN_LP_ABL2 */ -#define DW_C73_1000KX BIT(5) -#define DW_C73_10000KX4 BIT(6) -#define DW_C73_10000KR BIT(7) -/* AN_LP_ABL3 */ -#define DW_C73_2500KX BIT(0) -#define DW_C73_5000KR BIT(1) - -/* Clause 37 Defines */ -/* VR MII MMD registers offsets */ -#define DW_VR_MII_MMD_CTRL 0x0000 -#define DW_VR_MII_DIG_CTRL1 0x8000 -#define DW_VR_MII_AN_CTRL 0x8001 -#define DW_VR_MII_AN_INTR_STS 0x8002 -/* Enable 2.5G Mode */ -#define DW_VR_MII_DIG_CTRL1_2G5_EN BIT(2) -/* EEE Mode Control Register */ -#define DW_VR_MII_EEE_MCTRL0 0x8006 -#define DW_VR_MII_EEE_MCTRL1 0x800b - -/* VR_MII_DIG_CTRL1 */ -#define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9) - -/* VR_MII_AN_CTRL */ -#define DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT 3 -#define DW_VR_MII_TX_CONFIG_MASK BIT(3) -#define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII 0x1 -#define DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII 0x0 -#define DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT 1 -#define DW_VR_MII_PCS_MODE_MASK GENMASK(2, 1) -#define DW_VR_MII_PCS_MODE_C37_1000BASEX 0x0 -#define DW_VR_MII_PCS_MODE_C37_SGMII 0x2 - -/* VR_MII_AN_INTR_STS */ -#define DW_VR_MII_AN_STS_C37_ANSGM_FD BIT(1) -#define DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT 2 -#define DW_VR_MII_AN_STS_C37_ANSGM_SP GENMASK(3, 2) -#define DW_VR_MII_C37_ANSGM_SP_10 0x0 -#define DW_VR_MII_C37_ANSGM_SP_100 0x1 -#define DW_VR_MII_C37_ANSGM_SP_1000 0x2 -#define DW_VR_MII_C37_ANSGM_SP_LNKSTS BIT(4) - -/* SR MII MMD Control defines */ -#define AN_CL37_EN BIT(12) /* Enable Clause 37 auto-nego */ -#define SGMII_SPEED_SS13 BIT(13) /* SGMII speed along with SS6 */ -#define SGMII_SPEED_SS6 BIT(6) /* SGMII speed along with SS13 */ - -/* VR MII EEE Control 0 defines */ -#define DW_VR_MII_EEE_LTX_EN BIT(0) /* LPI Tx Enable */ -#define DW_VR_MII_EEE_LRX_EN BIT(1) /* LPI Rx Enable */ -#define DW_VR_MII_EEE_TX_QUIET_EN BIT(2) /* Tx Quiet Enable */ -#define DW_VR_MII_EEE_RX_QUIET_EN BIT(3) /* Rx Quiet Enable */ -#define DW_VR_MII_EEE_TX_EN_CTRL BIT(4) /* Tx Control Enable */ -#define DW_VR_MII_EEE_RX_EN_CTRL BIT(7) /* Rx Control Enable */ - -#define DW_VR_MII_EEE_MULT_FACT_100NS_SHIFT 8 -#define DW_VR_MII_EEE_MULT_FACT_100NS GENMASK(11, 8) - -/* VR MII EEE Control 1 defines */ -#define DW_VR_MII_EEE_TRN_LPI BIT(0) /* Transparent Mode Enable */ +#include "pcs-xpcs.h" #define phylink_pcs_to_xpcs(pl_pcs) \ container_of((pl_pcs), struct dw_xpcs, pcs) diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h new file mode 100644 index 000000000000..867537a68c63 --- /dev/null +++ b/drivers/net/pcs/pcs-xpcs.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare XPCS helpers + * + * Author: Jose Abreu + */ + +#define SYNOPSYS_XPCS_ID 0x7996ced0 +#define SYNOPSYS_XPCS_MASK 0xffffffff + +/* Vendor regs access */ +#define DW_VENDOR BIT(15) + +/* VR_XS_PCS */ +#define DW_USXGMII_RST BIT(10) +#define DW_USXGMII_EN BIT(9) +#define DW_VR_XS_PCS_DIG_STS 0x0010 +#define DW_RXFIFO_ERR GENMASK(6, 5) + +/* SR_MII */ +#define DW_USXGMII_FULL BIT(8) +#define DW_USXGMII_SS_MASK (BIT(13) | BIT(6) | BIT(5)) +#define DW_USXGMII_10000 (BIT(13) | BIT(6)) +#define DW_USXGMII_5000 (BIT(13) | BIT(5)) +#define DW_USXGMII_2500 (BIT(5)) +#define DW_USXGMII_1000 (BIT(6)) +#define DW_USXGMII_100 (BIT(13)) +#define DW_USXGMII_10 (0) + +/* SR_AN */ +#define DW_SR_AN_ADV1 0x10 +#define DW_SR_AN_ADV2 0x11 +#define DW_SR_AN_ADV3 0x12 +#define DW_SR_AN_LP_ABL1 0x13 +#define DW_SR_AN_LP_ABL2 0x14 +#define DW_SR_AN_LP_ABL3 0x15 + +/* Clause 73 Defines */ +/* AN_LP_ABL1 */ +#define DW_C73_PAUSE BIT(10) +#define DW_C73_ASYM_PAUSE BIT(11) +#define DW_C73_AN_ADV_SF 0x1 +/* AN_LP_ABL2 */ +#define DW_C73_1000KX BIT(5) +#define DW_C73_10000KX4 BIT(6) +#define DW_C73_10000KR BIT(7) +/* AN_LP_ABL3 */ +#define DW_C73_2500KX BIT(0) +#define DW_C73_5000KR BIT(1) + +/* Clause 37 Defines */ +/* VR MII MMD registers offsets */ +#define DW_VR_MII_MMD_CTRL 0x0000 +#define DW_VR_MII_DIG_CTRL1 0x8000 +#define DW_VR_MII_AN_CTRL 0x8001 +#define DW_VR_MII_AN_INTR_STS 0x8002 +/* Enable 2.5G Mode */ +#define DW_VR_MII_DIG_CTRL1_2G5_EN BIT(2) +/* EEE Mode Control Register */ +#define DW_VR_MII_EEE_MCTRL0 0x8006 +#define DW_VR_MII_EEE_MCTRL1 0x800b + +/* VR_MII_DIG_CTRL1 */ +#define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9) + +/* VR_MII_AN_CTRL */ +#define DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT 3 +#define DW_VR_MII_TX_CONFIG_MASK BIT(3) +#define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII 0x1 +#define DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII 0x0 +#define DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT 1 +#define DW_VR_MII_PCS_MODE_MASK GENMASK(2, 1) +#define DW_VR_MII_PCS_MODE_C37_1000BASEX 0x0 +#define DW_VR_MII_PCS_MODE_C37_SGMII 0x2 + +/* VR_MII_AN_INTR_STS */ +#define DW_VR_MII_AN_STS_C37_ANSGM_FD BIT(1) +#define DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT 2 +#define DW_VR_MII_AN_STS_C37_ANSGM_SP GENMASK(3, 2) +#define DW_VR_MII_C37_ANSGM_SP_10 0x0 +#define DW_VR_MII_C37_ANSGM_SP_100 0x1 +#define DW_VR_MII_C37_ANSGM_SP_1000 0x2 +#define DW_VR_MII_C37_ANSGM_SP_LNKSTS BIT(4) + +/* SR MII MMD Control defines */ +#define AN_CL37_EN BIT(12) /* Enable Clause 37 auto-nego */ +#define SGMII_SPEED_SS13 BIT(13) /* SGMII speed along with SS6 */ +#define SGMII_SPEED_SS6 BIT(6) /* SGMII speed along with SS13 */ + +/* VR MII EEE Control 0 defines */ +#define DW_VR_MII_EEE_LTX_EN BIT(0) /* LPI Tx Enable */ +#define DW_VR_MII_EEE_LRX_EN BIT(1) /* LPI Rx Enable */ +#define DW_VR_MII_EEE_TX_QUIET_EN BIT(2) /* Tx Quiet Enable */ +#define DW_VR_MII_EEE_RX_QUIET_EN BIT(3) /* Rx Quiet Enable */ +#define DW_VR_MII_EEE_TX_EN_CTRL BIT(4) /* Tx Control Enable */ +#define DW_VR_MII_EEE_RX_EN_CTRL BIT(7) /* Rx Control Enable */ + +#define DW_VR_MII_EEE_MULT_FACT_100NS_SHIFT 8 +#define DW_VR_MII_EEE_MULT_FACT_100NS GENMASK(11, 8) + +/* VR MII EEE Control 1 defines */ +#define DW_VR_MII_EEE_TRN_LPI BIT(0) /* Transparent Mode Enable */ -- cgit v1.2.3 From dd0721ea4c7a6c2ec8b309ff57d74d88f08d4c23 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Fri, 11 Jun 2021 23:05:25 +0300 Subject: net: pcs: xpcs: add support for NXP SJA1105 The NXP SJA1105 DSA switch integrates a Synopsys SGMII XPCS on port 4. The generic code works fine, except there is an integration issue which needs to be dealt with: in this switch, the XPCS is integrated with a PMA that has the TX lane polarity inverted by default (PLUS is MINUS, MINUS is PLUS). To obtain normal non-inverted behavior, the TX lane polarity must be inverted in the PCS, via the DIGITAL_CONTROL_2 register. We introduce a pma_config() method in xpcs_compat which is called by the phylink_pcs_config() implementation. Also, the NXP SJA1105 returns all zeroes in the PHY ID registers 2 and 3. We need to hack up an ad-hoc PHY ID (OUI is zero, device ID is 1) in order for the XPCS driver to recognize it. This PHY ID is added to the public include/linux/pcs/pcs-xpcs.h for that reason (for the sja1105 driver to be able to use it in a later patch). Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- MAINTAINERS | 1 + drivers/net/pcs/Makefile | 4 +++- drivers/net/pcs/pcs-xpcs-nxp.c | 16 ++++++++++++++++ drivers/net/pcs/pcs-xpcs.c | 25 +++++++++++++++++++++++-- drivers/net/pcs/pcs-xpcs.h | 10 ++++++++++ include/linux/pcs/pcs-xpcs.h | 2 ++ 6 files changed, 55 insertions(+), 3 deletions(-) create mode 100644 drivers/net/pcs/pcs-xpcs-nxp.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index c8214235380e..349a87b42d3c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13203,6 +13203,7 @@ M: Vladimir Oltean L: linux-kernel@vger.kernel.org S: Maintained F: drivers/net/dsa/sja1105 +F: drivers/net/pcs/pcs-xpcs-nxp.c NXP TDA998X DRM DRIVER M: Russell King diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile index c23146755972..0603d469bd57 100644 --- a/drivers/net/pcs/Makefile +++ b/drivers/net/pcs/Makefile @@ -1,5 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for Linux PCS drivers -obj-$(CONFIG_PCS_XPCS) += pcs-xpcs.o +pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-nxp.o + +obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o diff --git a/drivers/net/pcs/pcs-xpcs-nxp.c b/drivers/net/pcs/pcs-xpcs-nxp.c new file mode 100644 index 000000000000..51b2fc7d36a9 --- /dev/null +++ b/drivers/net/pcs/pcs-xpcs-nxp.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright 2021 NXP Semiconductors + */ +#include +#include "pcs-xpcs.h" + +/* In NXP SJA1105, the PCS is integrated with a PMA that has the TX lane + * polarity inverted by default (PLUS is MINUS, MINUS is PLUS). To obtain + * normal non-inverted behavior, the TX lane polarity must be inverted in the + * PCS, via the DIGITAL_CONTROL_2 register. + */ +int nxp_sja1105_sgmii_pma_config(struct dw_xpcs *xpcs) +{ + return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL2, + DW_VR_MII_DIG_CTRL2_TX_POL_INV); +} diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index ecf5011977d3..3b1baacfaf8f 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -117,6 +117,7 @@ struct xpcs_compat { const phy_interface_t *interface; int num_interfaces; int an_mode; + int (*pma_config)(struct dw_xpcs *xpcs); }; struct xpcs_id { @@ -168,7 +169,7 @@ static bool __xpcs_linkmode_supported(const struct xpcs_compat *compat, #define xpcs_linkmode_supported(compat, mode) \ __xpcs_linkmode_supported(compat, ETHTOOL_LINK_MODE_ ## mode ## _BIT) -static int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg) +int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg) { u32 reg_addr = mdiobus_c45_addr(dev, reg); struct mii_bus *bus = xpcs->mdiodev->bus; @@ -177,7 +178,7 @@ static int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg) return mdiobus_read(bus, addr, reg_addr); } -static int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val) +int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val) { u32 reg_addr = mdiobus_c45_addr(dev, reg); struct mii_bus *bus = xpcs->mdiodev->bus; @@ -788,6 +789,12 @@ static int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, return -1; } + if (compat->pma_config) { + ret = compat->pma_config(xpcs); + if (ret) + return ret; + } + return 0; } @@ -1022,11 +1029,25 @@ static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = { }, }; +static const struct xpcs_compat nxp_sja1105_xpcs_compat[DW_XPCS_INTERFACE_MAX] = { + [DW_XPCS_SGMII] = { + .supported = xpcs_sgmii_features, + .interface = xpcs_sgmii_interfaces, + .num_interfaces = ARRAY_SIZE(xpcs_sgmii_interfaces), + .an_mode = DW_AN_C37_SGMII, + .pma_config = nxp_sja1105_sgmii_pma_config, + }, +}; + static const struct xpcs_id xpcs_id_list[] = { { .id = SYNOPSYS_XPCS_ID, .mask = SYNOPSYS_XPCS_MASK, .compat = synopsys_xpcs_compat, + }, { + .id = NXP_SJA1105_XPCS_ID, + .mask = SYNOPSYS_XPCS_MASK, + .compat = nxp_sja1105_xpcs_compat, }, }; diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h index 867537a68c63..3daf4276a158 100644 --- a/drivers/net/pcs/pcs-xpcs.h +++ b/drivers/net/pcs/pcs-xpcs.h @@ -60,10 +60,15 @@ /* EEE Mode Control Register */ #define DW_VR_MII_EEE_MCTRL0 0x8006 #define DW_VR_MII_EEE_MCTRL1 0x800b +#define DW_VR_MII_DIG_CTRL2 0x80e1 /* VR_MII_DIG_CTRL1 */ #define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9) +/* VR_MII_DIG_CTRL2 */ +#define DW_VR_MII_DIG_CTRL2_TX_POL_INV BIT(4) +#define DW_VR_MII_DIG_CTRL2_RX_POL_INV BIT(0) + /* VR_MII_AN_CTRL */ #define DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT 3 #define DW_VR_MII_TX_CONFIG_MASK BIT(3) @@ -101,3 +106,8 @@ /* VR MII EEE Control 1 defines */ #define DW_VR_MII_EEE_TRN_LPI BIT(0) /* Transparent Mode Enable */ + +int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg); +int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val); + +int nxp_sja1105_sgmii_pma_config(struct dw_xpcs *xpcs); diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index 4f1cdf6f3d4c..c594f7cdc304 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -10,6 +10,8 @@ #include #include +#define NXP_SJA1105_XPCS_ID 0x00000010 + /* AN mode */ #define DW_AN_C73 1 #define DW_AN_C37_SGMII 2 -- cgit v1.2.3 From f7af616c632ee2ac3af0876fe33bf9e0232e665a Mon Sep 17 00:00:00 2001 From: M Chetan Kumar Date: Sun, 13 Jun 2021 18:20:23 +0530 Subject: net: iosm: infrastructure 1) Kconfig & Makefile changes for IOSM Driver compilation. 2) Add IOSM Driver documentation. 3) Modified MAINTAINER file for IOSM Driver addition. Signed-off-by: M Chetan Kumar Signed-off-by: David S. Miller --- Documentation/networking/device_drivers/index.rst | 1 + .../networking/device_drivers/wwan/index.rst | 18 ++++ .../networking/device_drivers/wwan/iosm.rst | 96 ++++++++++++++++++++++ MAINTAINERS | 7 ++ drivers/net/wwan/Kconfig | 12 +++ drivers/net/wwan/Makefile | 1 + drivers/net/wwan/iosm/Makefile | 26 ++++++ 7 files changed, 161 insertions(+) create mode 100644 Documentation/networking/device_drivers/wwan/index.rst create mode 100644 Documentation/networking/device_drivers/wwan/iosm.rst create mode 100644 drivers/net/wwan/iosm/Makefile (limited to 'MAINTAINERS') diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst index d8279de7bf25..3a5a1d46e77e 100644 --- a/Documentation/networking/device_drivers/index.rst +++ b/Documentation/networking/device_drivers/index.rst @@ -18,6 +18,7 @@ Contents: qlogic/index wan/index wifi/index + wwan/index .. only:: subproject and html diff --git a/Documentation/networking/device_drivers/wwan/index.rst b/Documentation/networking/device_drivers/wwan/index.rst new file mode 100644 index 000000000000..1cb8c7371401 --- /dev/null +++ b/Documentation/networking/device_drivers/wwan/index.rst @@ -0,0 +1,18 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +WWAN Device Drivers +=================== + +Contents: + +.. toctree:: + :maxdepth: 2 + + iosm + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/networking/device_drivers/wwan/iosm.rst b/Documentation/networking/device_drivers/wwan/iosm.rst new file mode 100644 index 000000000000..cd12f57d980a --- /dev/null +++ b/Documentation/networking/device_drivers/wwan/iosm.rst @@ -0,0 +1,96 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +.. Copyright (C) 2020-21 Intel Corporation + +.. _iosm_driver_doc: + +=========================================== +IOSM Driver for Intel M.2 PCIe based Modems +=========================================== +The IOSM (IPC over Shared Memory) driver is a WWAN PCIe host driver developed +for linux or chrome platform for data exchange over PCIe interface between +Host platform & Intel M.2 Modem. The driver exposes interface conforming to the +MBIM protocol [1]. Any front end application ( eg: Modem Manager) could easily +manage the MBIM interface to enable data communication towards WWAN. + +Basic usage +=========== +MBIM functions are inactive when unmanaged. The IOSM driver only provides a +userspace interface MBIM "WWAN PORT" representing MBIM control channel and does +not play any role in managing the functionality. It is the job of a userspace +application to detect port enumeration and enable MBIM functionality. + +Examples of few such userspace application are: +- mbimcli (included with the libmbim [2] library), and +- Modem Manager [3] + +Management Applications to carry out below required actions for establishing +MBIM IP session: +- open the MBIM control channel +- configure network connection settings +- connect to network +- configure IP network interface + +Management application development +================================== +The driver and userspace interfaces are described below. The MBIM protocol is +described in [1] Mobile Broadband Interface Model v1.0 Errata-1. + +MBIM control channel userspace ABI +---------------------------------- + +/dev/wwan0mbim0 character device +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The driver exposes an MBIM interface to the MBIM function by implementing +MBIM WWAN Port. The userspace end of the control channel pipe is a +/dev/wwan0mbim0 character device. Application shall use this interface for +MBIM protocol communication. + +Fragmentation +~~~~~~~~~~~~~ +The userspace application is responsible for all control message fragmentation +and defragmentation as per MBIM specification. + +/dev/wwan0mbim0 write() +~~~~~~~~~~~~~~~~~~~~~ +The MBIM control messages from the management application must not exceed the +negotiated control message size. + +/dev/wwan0mbim0 read() +~~~~~~~~~~~~~~~~~~~~ +The management application must accept control messages of up the negotiated +control message size. + +MBIM data channel userspace ABI +------------------------------- + +wwan0-X network device +~~~~~~~~~~~~~~~~~~~~ +The IOSM driver exposes IP link interface "wwan0-X" of type "wwan" for IP +traffic. Iproute network utility is used for creating "wwan0-X" network +interface and for associating it with MBIM IP session. The Driver supports +upto 8 IP sessions for simultaneous IP communication. + +The userspace management application is responsible for creating new IP link +prior to establishing MBIM IP session where the SessionId is greater than 0. + +For example, creating new IP link for a MBIM IP session with SessionId 1: + + ip link add dev wwan0-1 parentdev-name wwan0 type wwan linkid 1 + +The driver will automatically map the "wwan0-1" network device to MBIM IP +session 1. + +References +========== +[1] "MBIM (Mobile Broadband Interface Model) Errata-1" + - https://www.usb.org/document-library/ + +[2] libmbim - "a glib-based library for talking to WWAN modems and + devices which speak the Mobile Interface Broadband Model (MBIM) + protocol" + - http://www.freedesktop.org/wiki/Software/libmbim/ + +[3] Modem Manager - "a DBus-activated daemon which controls mobile + broadband (2G/3G/4G) devices and connections" + - http://www.freedesktop.org/wiki/Software/ModemManager/ diff --git a/MAINTAINERS b/MAINTAINERS index 349a87b42d3c..183cc61e2dc0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9453,6 +9453,13 @@ L: Dell.Client.Kernel@dell.com S: Maintained F: drivers/platform/x86/intel-wmi-thunderbolt.c +INTEL WWAN IOSM DRIVER +M: M Chetan Kumar +M: Intel Corporation +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/wwan/iosm/ + INTEL(R) TRACE HUB M: Alexander Shishkin S: Supported diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig index ec0b194a373c..13613a4f53d8 100644 --- a/drivers/net/wwan/Kconfig +++ b/drivers/net/wwan/Kconfig @@ -44,4 +44,16 @@ config MHI_WWAN_CTRL To compile this driver as a module, choose M here: the module will be called mhi_wwan_ctrl. +config IOSM + tristate "IOSM Driver for Intel M.2 WWAN Device" + select WWAN_CORE + depends on INTEL_IOMMU + help + This driver enables Intel M.2 WWAN Device communication. + + If you have one of those Intel M.2 WWAN Modules and wish to use it in + Linux say Y/M here. + + If unsure, say N. + endif # WWAN diff --git a/drivers/net/wwan/Makefile b/drivers/net/wwan/Makefile index f33f77ca1021..3e565d3f984f 100644 --- a/drivers/net/wwan/Makefile +++ b/drivers/net/wwan/Makefile @@ -9,3 +9,4 @@ wwan-objs += wwan_core.o obj-$(CONFIG_WWAN_HWSIM) += wwan_hwsim.o obj-$(CONFIG_MHI_WWAN_CTRL) += mhi_wwan_ctrl.o +obj-$(CONFIG_IOSM) += iosm/ diff --git a/drivers/net/wwan/iosm/Makefile b/drivers/net/wwan/iosm/Makefile new file mode 100644 index 000000000000..cdeeb9357af6 --- /dev/null +++ b/drivers/net/wwan/iosm/Makefile @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: (GPL-2.0-only) +# +# Copyright (C) 2020-21 Intel Corporation. +# + +iosm-y = \ + iosm_ipc_task_queue.o \ + iosm_ipc_imem.o \ + iosm_ipc_imem_ops.o \ + iosm_ipc_mmio.o \ + iosm_ipc_port.o \ + iosm_ipc_wwan.o \ + iosm_ipc_uevent.o \ + iosm_ipc_pm.o \ + iosm_ipc_pcie.o \ + iosm_ipc_irq.o \ + iosm_ipc_chnl_cfg.o \ + iosm_ipc_protocol.o \ + iosm_ipc_protocol_ops.o \ + iosm_ipc_mux.o \ + iosm_ipc_mux_codec.o + +obj-$(CONFIG_IOSM) := iosm.o + +# compilation flags +ccflags-y += -DDEBUG -- cgit v1.2.3 From 8744365e258459775bd9b49b705a82d66a21c2b4 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Fri, 28 May 2021 10:48:49 +0200 Subject: MAINTAINERS: netfilter: add irc channel The community #netfilter IRC channel is now live on the libera.chat network (https://libera.chat/). CC: Arturo Borrero Gonzalez Link: https://marc.info/?l=netfilter&m=162210948632717 Signed-off-by: Nicolas Dichtel Signed-off-by: Pablo Neira Ayuso --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index bfb3d0931cba..f3d44262d16e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12657,6 +12657,7 @@ W: http://www.netfilter.org/ W: http://www.iptables.org/ W: http://www.nftables.org/ Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/ +C: irc://irc.libera.chat/netfilter T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git F: include/linux/netfilter* -- cgit v1.2.3 From 5e90abf49c2adfbd6954429c2a1aafdfe9fcab92 Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Fri, 18 Jun 2021 19:36:10 +0200 Subject: net: wwan: Add RPMSG WWAN CTRL driver The remote processor messaging (rpmsg) subsystem provides an interface to communicate with other remote processors. On many Qualcomm SoCs this is used to communicate with an integrated modem DSP that implements most of the modem functionality and provides high-level protocols like QMI or AT to allow controlling the modem. For QMI, most older Qualcomm SoCs (e.g. MSM8916/MSM8974) have a standalone "DATA5_CNTL" channel that allows exchanging QMI messages. Note that newer SoCs (e.g. SDM845) only allow exchanging QMI messages via a shared QRTR channel that is available via a socket API on Linux. For AT, the "DATA4" channel accepts at least a limited set of AT commands, on many older and newer Qualcomm SoCs, although QMI is typically the preferred control protocol. Often there are additional QMI/AT channels (usually named DATA*_CNTL for QMI and DATA* for AT), but it is not clear if those are really functional on all devices. Also, at the moment there is no use case for having multiple QMI/AT ports. If needed more channels could be added later after more testing. Note that the data path (network interface) is entirely separate from the control path and varies between Qualcomm SoCs, e.g. "IPA" on newer Qualcomm SoCs or "BAM-DMUX" on some older ones. The RPMSG WWAN CTRL driver exposes the QMI/AT control ports via the WWAN subsystem, and therefore allows userspace like ModemManager to set up the modem. Until now, ModemManager had to use the RPMSG-specific rpmsg-char where the channels must be explicitly exposed as a char device first and don't show up directly in sysfs. The driver is a fairly simple glue layer between WWAN and RPMSG and is mostly based on the existing mhi_wwan_ctrl.c and rpmsg_char.c. Cc: Loic Poulain Cc: Bjorn Andersson Signed-off-by: Stephan Gerhold Signed-off-by: David S. Miller --- MAINTAINERS | 7 ++ drivers/net/wwan/Kconfig | 18 +++++ drivers/net/wwan/Makefile | 1 + drivers/net/wwan/rpmsg_wwan_ctrl.c | 143 +++++++++++++++++++++++++++++++++++++ 4 files changed, 169 insertions(+) create mode 100644 drivers/net/wwan/rpmsg_wwan_ctrl.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 183cc61e2dc0..fbf792962d7b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15587,6 +15587,13 @@ F: include/linux/rpmsg/ F: include/uapi/linux/rpmsg.h F: samples/rpmsg/ +REMOTE PROCESSOR MESSAGING (RPMSG) WWAN CONTROL DRIVER +M: Stephan Gerhold +L: netdev@vger.kernel.org +L: linux-remoteproc@vger.kernel.org +S: Maintained +F: drivers/net/wwan/rpmsg_wwan_ctrl.c + RENESAS CLOCK DRIVERS M: Geert Uytterhoeven L: linux-renesas-soc@vger.kernel.org diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig index 249b3f1ed62b..de9384326bc8 100644 --- a/drivers/net/wwan/Kconfig +++ b/drivers/net/wwan/Kconfig @@ -38,6 +38,24 @@ config MHI_WWAN_CTRL To compile this driver as a module, choose M here: the module will be called mhi_wwan_ctrl. +config RPMSG_WWAN_CTRL + tristate "RPMSG WWAN control driver" + depends on RPMSG + help + RPMSG WWAN CTRL allows modems available via RPMSG channels to expose + different modem protocols/ports to userspace, including AT and QMI. + These protocols can be accessed directly from userspace + (e.g. AT commands) or via libraries/tools (e.g. libqmi, libqcdm...). + + This is mainly used for modems integrated into many Qualcomm SoCs, + e.g. for AT and QMI on Qualcomm MSM8916 or MSM8974. Note that many + newer Qualcomm SoCs (e.g. SDM845) still provide an AT port through + this driver but the QMI messages can only be sent through + QRTR network sockets (CONFIG_QRTR). + + To compile this driver as a module, choose M here: the module will be + called rpmsg_wwan_ctrl. + config IOSM tristate "IOSM Driver for Intel M.2 WWAN Device" depends on INTEL_IOMMU diff --git a/drivers/net/wwan/Makefile b/drivers/net/wwan/Makefile index 83dd3482ffc3..d90ac33abaef 100644 --- a/drivers/net/wwan/Makefile +++ b/drivers/net/wwan/Makefile @@ -9,4 +9,5 @@ wwan-objs += wwan_core.o obj-$(CONFIG_WWAN_HWSIM) += wwan_hwsim.o obj-$(CONFIG_MHI_WWAN_CTRL) += mhi_wwan_ctrl.o +obj-$(CONFIG_RPMSG_WWAN_CTRL) += rpmsg_wwan_ctrl.o obj-$(CONFIG_IOSM) += iosm/ diff --git a/drivers/net/wwan/rpmsg_wwan_ctrl.c b/drivers/net/wwan/rpmsg_wwan_ctrl.c new file mode 100644 index 000000000000..de226cdb69fd --- /dev/null +++ b/drivers/net/wwan/rpmsg_wwan_ctrl.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2021, Stephan Gerhold */ +#include +#include +#include +#include +#include +#include + +struct rpmsg_wwan_dev { + /* Lower level is a rpmsg dev, upper level is a wwan port */ + struct rpmsg_device *rpdev; + struct wwan_port *wwan_port; + struct rpmsg_endpoint *ept; +}; + +static int rpmsg_wwan_ctrl_callback(struct rpmsg_device *rpdev, + void *buf, int len, void *priv, u32 src) +{ + struct rpmsg_wwan_dev *rpwwan = priv; + struct sk_buff *skb; + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, buf, len); + wwan_port_rx(rpwwan->wwan_port, skb); + return 0; +} + +static int rpmsg_wwan_ctrl_start(struct wwan_port *port) +{ + struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port); + struct rpmsg_channel_info chinfo = { + .src = rpwwan->rpdev->src, + .dst = RPMSG_ADDR_ANY, + }; + + strncpy(chinfo.name, rpwwan->rpdev->id.name, RPMSG_NAME_SIZE); + rpwwan->ept = rpmsg_create_ept(rpwwan->rpdev, rpmsg_wwan_ctrl_callback, + rpwwan, chinfo); + if (!rpwwan->ept) + return -EREMOTEIO; + + return 0; +} + +static void rpmsg_wwan_ctrl_stop(struct wwan_port *port) +{ + struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port); + + rpmsg_destroy_ept(rpwwan->ept); + rpwwan->ept = NULL; +} + +static int rpmsg_wwan_ctrl_tx(struct wwan_port *port, struct sk_buff *skb) +{ + struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port); + int ret; + + ret = rpmsg_trysend(rpwwan->ept, skb->data, skb->len); + if (ret) + return ret; + + consume_skb(skb); + return 0; +} + +static const struct wwan_port_ops rpmsg_wwan_pops = { + .start = rpmsg_wwan_ctrl_start, + .stop = rpmsg_wwan_ctrl_stop, + .tx = rpmsg_wwan_ctrl_tx, +}; + +static struct device *rpmsg_wwan_find_parent(struct device *dev) +{ + /* Select first platform device as parent for the WWAN ports. + * On Qualcomm platforms this is usually the platform device that + * represents the modem remote processor. This might need to be + * adjusted when adding device IDs for other platforms. + */ + for (dev = dev->parent; dev; dev = dev->parent) { + if (dev_is_platform(dev)) + return dev; + } + return NULL; +} + +static int rpmsg_wwan_ctrl_probe(struct rpmsg_device *rpdev) +{ + struct rpmsg_wwan_dev *rpwwan; + struct wwan_port *port; + struct device *parent; + + parent = rpmsg_wwan_find_parent(&rpdev->dev); + if (!parent) + return -ENODEV; + + rpwwan = devm_kzalloc(&rpdev->dev, sizeof(*rpwwan), GFP_KERNEL); + if (!rpwwan) + return -ENOMEM; + + rpwwan->rpdev = rpdev; + dev_set_drvdata(&rpdev->dev, rpwwan); + + /* Register as a wwan port, id.driver_data contains wwan port type */ + port = wwan_create_port(parent, rpdev->id.driver_data, + &rpmsg_wwan_pops, rpwwan); + if (IS_ERR(port)) + return PTR_ERR(port); + + rpwwan->wwan_port = port; + + return 0; +}; + +static void rpmsg_wwan_ctrl_remove(struct rpmsg_device *rpdev) +{ + struct rpmsg_wwan_dev *rpwwan = dev_get_drvdata(&rpdev->dev); + + wwan_remove_port(rpwwan->wwan_port); +} + +static const struct rpmsg_device_id rpmsg_wwan_ctrl_id_table[] = { + /* RPMSG channels for Qualcomm SoCs with integrated modem */ + { .name = "DATA5_CNTL", .driver_data = WWAN_PORT_QMI }, + { .name = "DATA4", .driver_data = WWAN_PORT_AT }, + {}, +}; +MODULE_DEVICE_TABLE(rpmsg, rpmsg_wwan_ctrl_id_table); + +static struct rpmsg_driver rpmsg_wwan_ctrl_driver = { + .drv.name = "rpmsg_wwan_ctrl", + .id_table = rpmsg_wwan_ctrl_id_table, + .probe = rpmsg_wwan_ctrl_probe, + .remove = rpmsg_wwan_ctrl_remove, +}; +module_rpmsg_driver(rpmsg_wwan_ctrl_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("RPMSG WWAN CTRL Driver"); +MODULE_AUTHOR("Stephan Gerhold "); -- cgit v1.2.3 From 1b134d8d756a944deb5f8cc14e7ea6573730442f Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Tue, 22 Jun 2021 16:21:40 +0200 Subject: MAINTAINERS: network: add entry for WWAN This patch adds maintainer info for drivers/net/wwan subdir, including WWAN core and drivers. Adding Sergey and myself as maintainers and Johannes as reviewer. Signed-off-by: Loic Poulain Acked-by: Sergey Ryazanov Signed-off-by: David S. Miller --- MAINTAINERS | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 395b052635ca..cc375fda89d0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -19803,6 +19803,16 @@ F: Documentation/core-api/workqueue.rst F: include/linux/workqueue.h F: kernel/workqueue.c +WWAN DRIVERS +M: Loic Poulain +M: Sergey Ryazanov +R: Johannes Berg +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/wwan/ +F: include/linux/wwan.h +F: include/uapi/linux/wwan.h + X-POWERS AXP288 PMIC DRIVERS M: Hans de Goede S: Maintained -- cgit v1.2.3