summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/aachba.c4
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/arm/acornscsi.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/cxlflash/Kconfig15
-rw-r--r--drivers/scsi/cxlflash/Makefile5
-rw-r--r--drivers/scsi/cxlflash/backend.h48
-rw-r--r--drivers/scsi/cxlflash/common.h340
-rw-r--r--drivers/scsi/cxlflash/cxl_hw.c177
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c278
-rw-r--r--drivers/scsi/cxlflash/main.c3970
-rw-r--r--drivers/scsi/cxlflash/main.h129
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c1399
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.h72
-rw-r--r--drivers/scsi/cxlflash/sislite.h560
-rw-r--r--drivers/scsi/cxlflash/superpipe.c2218
-rw-r--r--drivers/scsi/cxlflash/superpipe.h150
-rw-r--r--drivers/scsi/cxlflash/vlun.c1336
-rw-r--r--drivers/scsi/cxlflash/vlun.h82
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c2
-rw-r--r--drivers/scsi/fnic/fdls_disc.c57
-rw-r--r--drivers/scsi/fnic/fnic_main.c5
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c28
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c4
-rw-r--r--drivers/scsi/hpsa.c19
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c4
-rw-r--r--drivers/scsi/ips.c8
-rw-r--r--drivers/scsi/isci/init.c14
-rw-r--r--drivers/scsi/isci/isci.h7
-rw-r--r--drivers/scsi/isci/remote_device.h2
-rw-r--r--drivers/scsi/iscsi_tcp.c60
-rw-r--r--drivers/scsi/iscsi_tcp.h4
-rw-r--r--drivers/scsi/libiscsi_tcp.c91
-rw-r--r--drivers/scsi/lpfc/lpfc.h26
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/megaraid.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c10
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h4
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h8
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h11
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h21
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_tool.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h20
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h34
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c129
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c159
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c101
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h9
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h7
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h54
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c23
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h10
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c79
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c279
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h49
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/mvsas/mv_sas.c10
-rw-r--r--drivers/scsi/mvsas/mv_sas.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c4
-rw-r--r--drivers/scsi/scsi.c28
-rw-r--r--drivers/scsi/scsi_debug.c928
-rw-r--r--drivers/scsi/scsi_error.c19
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_scan.c5
-rw-r--r--drivers/scsi/scsi_sysctl.c4
-rw-r--r--drivers/scsi/st.c80
-rw-r--r--drivers/scsi/st.h6
-rw-r--r--drivers/scsi/storvsc_drv.c4
86 files changed, 2026 insertions, 11402 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 37c24ffea65c..5a3c670aec27 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -303,9 +303,9 @@ if SCSI_LOWLEVEL && SCSI
config ISCSI_TCP
tristate "iSCSI Initiator over TCP/IP"
depends on SCSI && INET
+ select CRC32
select CRYPTO
select CRYPTO_MD5
- select CRYPTO_CRC32C
select SCSI_ISCSI_ATTRS
help
The iSCSI Driver provides a host with the ability to access storage
@@ -336,7 +336,6 @@ source "drivers/scsi/cxgbi/Kconfig"
source "drivers/scsi/bnx2i/Kconfig"
source "drivers/scsi/bnx2fc/Kconfig"
source "drivers/scsi/be2iscsi/Kconfig"
-source "drivers/scsi/cxlflash/Kconfig"
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1313ddf2fd1a..16de3e41f94c 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -96,7 +96,6 @@ obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
-obj-$(CONFIG_CXLFLASH) += cxlflash/
obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index abf6a82b74af..0be719f38377 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -3221,8 +3221,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
break;
}
fallthrough;
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
case REZERO_UNIT:
case REASSIGN_BLOCKS:
case SEEK_10:
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 91170a67cc91..4b12e6dd8f07 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -2029,7 +2029,7 @@ static void aac_pci_resume(struct pci_dev *pdev)
dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
}
-static struct pci_error_handlers aac_pci_err_handler = {
+static const struct pci_error_handlers aac_pci_err_handler = {
.error_detected = aac_pci_error_detected,
.mmio_enabled = aac_pci_mmio_enabled,
.slot_reset = aac_pci_slot_reset,
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index e50a3dbf9de3..ef21b85cf014 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -591,7 +591,7 @@ datadir_t acornscsi_datadirection(int command)
case CHANGE_DEFINITION: case COMPARE: case COPY:
case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
- case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE_6:
case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
case WRITE_6: case WRITE_10: case WRITE_VERIFY:
case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME:
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 76a1e373386e..a8b399ed98fc 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -5776,7 +5776,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
}
-static struct pci_error_handlers beiscsi_eeh_handlers = {
+static const struct pci_error_handlers beiscsi_eeh_handlers = {
.error_detected = beiscsi_eeh_err_detected,
.slot_reset = beiscsi_eeh_reset,
.resume = beiscsi_eeh_resume,
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 6aa1d3a7e24b..f015c53de0d4 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1642,7 +1642,7 @@ MODULE_DEVICE_TABLE(pci, bfad_id_table);
/*
* PCI error recovery handlers.
*/
-static struct pci_error_handlers bfad_err_handler = {
+static const struct pci_error_handlers bfad_err_handler = {
.error_detected = bfad_pci_error_detected,
.slot_reset = bfad_pci_slot_reset,
.mmio_enabled = bfad_pci_mmio_enabled,
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 9a3f2ed050bd..79c8dafdd49e 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1162,7 +1162,7 @@ err_resume_exit:
dev_err(&pdev->dev, "resume of device failed: %d\n", rv);
}
-static struct pci_error_handlers csio_err_handler = {
+static const struct pci_error_handlers csio_err_handler = {
.error_detected = csio_pci_error_detected,
.slot_reset = csio_pci_slot_reset,
.resume = csio_pci_resume,
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig
deleted file mode 100644
index c424d36e89a6..000000000000
--- a/drivers/scsi/cxlflash/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# IBM CXL-attached Flash Accelerator SCSI Driver
-#
-
-config CXLFLASH
- tristate "Support for IBM CAPI Flash (DEPRECATED)"
- depends on PCI && SCSI && (CXL || OCXL) && EEH
- select IRQ_POLL
- help
- The cxlflash driver is deprecated and will be removed in a future
- kernel release.
-
- Allows CAPI Accelerated IO to Flash
- If unsure, say N.
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
deleted file mode 100644
index fd2f0dd9daf9..000000000000
--- a/drivers/scsi/cxlflash/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CXLFLASH) += cxlflash.o
-cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
-cxlflash-$(CONFIG_CXL) += cxl_hw.o
-cxlflash-$(CONFIG_OCXL) += ocxl_hw.o
diff --git a/drivers/scsi/cxlflash/backend.h b/drivers/scsi/cxlflash/backend.h
deleted file mode 100644
index 181e0445ed42..000000000000
--- a/drivers/scsi/cxlflash/backend.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2018 IBM Corporation
- */
-
-#ifndef _CXLFLASH_BACKEND_H
-#define _CXLFLASH_BACKEND_H
-
-extern const struct cxlflash_backend_ops cxlflash_cxl_ops;
-extern const struct cxlflash_backend_ops cxlflash_ocxl_ops;
-
-struct cxlflash_backend_ops {
- struct module *module;
- void __iomem * (*psa_map)(void *ctx_cookie);
- void (*psa_unmap)(void __iomem *addr);
- int (*process_element)(void *ctx_cookie);
- int (*map_afu_irq)(void *ctx_cookie, int num, irq_handler_t handler,
- void *cookie, char *name);
- void (*unmap_afu_irq)(void *ctx_cookie, int num, void *cookie);
- u64 (*get_irq_objhndl)(void *ctx_cookie, int irq);
- int (*start_context)(void *ctx_cookie);
- int (*stop_context)(void *ctx_cookie);
- int (*afu_reset)(void *ctx_cookie);
- void (*set_master)(void *ctx_cookie);
- void * (*get_context)(struct pci_dev *dev, void *afu_cookie);
- void * (*dev_context_init)(struct pci_dev *dev, void *afu_cookie);
- int (*release_context)(void *ctx_cookie);
- void (*perst_reloads_same_image)(void *afu_cookie, bool image);
- ssize_t (*read_adapter_vpd)(struct pci_dev *dev, void *buf,
- size_t count);
- int (*allocate_afu_irqs)(void *ctx_cookie, int num);
- void (*free_afu_irqs)(void *ctx_cookie);
- void * (*create_afu)(struct pci_dev *dev);
- void (*destroy_afu)(void *afu_cookie);
- struct file * (*get_fd)(void *ctx_cookie, struct file_operations *fops,
- int *fd);
- void * (*fops_get_context)(struct file *file);
- int (*start_work)(void *ctx_cookie, u64 irqs);
- int (*fd_mmap)(struct file *file, struct vm_area_struct *vm);
- int (*fd_release)(struct inode *inode, struct file *file);
-};
-
-#endif /* _CXLFLASH_BACKEND_H */
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
deleted file mode 100644
index de6229e27b48..000000000000
--- a/drivers/scsi/cxlflash/common.h
+++ /dev/null
@@ -1,340 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _CXLFLASH_COMMON_H
-#define _CXLFLASH_COMMON_H
-
-#include <linux/async.h>
-#include <linux/cdev.h>
-#include <linux/irq_poll.h>
-#include <linux/list.h>
-#include <linux/rwsem.h>
-#include <linux/types.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-
-#include "backend.h"
-
-extern const struct file_operations cxlflash_cxl_fops;
-
-#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
-#define MAX_FC_PORTS CXLFLASH_MAX_FC_PORTS /* max ports per AFU */
-#define LEGACY_FC_PORTS 2 /* legacy ports per AFU */
-
-#define CHAN2PORTBANK(_x) ((_x) >> ilog2(CXLFLASH_NUM_FC_PORTS_PER_BANK))
-#define CHAN2BANKPORT(_x) ((_x) & (CXLFLASH_NUM_FC_PORTS_PER_BANK - 1))
-
-#define CHAN2PORTMASK(_x) (1 << (_x)) /* channel to port mask */
-#define PORTMASK2CHAN(_x) (ilog2((_x))) /* port mask to channel */
-#define PORTNUM2CHAN(_x) ((_x) - 1) /* port number to channel */
-
-#define CXLFLASH_BLOCK_SIZE 4096 /* 4K blocks */
-#define CXLFLASH_MAX_XFER_SIZE 16777216 /* 16MB transfer */
-#define CXLFLASH_MAX_SECTORS (CXLFLASH_MAX_XFER_SIZE/512) /* SCSI wants
- * max_sectors
- * in units of
- * 512 byte
- * sectors
- */
-
-#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
-
-/* AFU command retry limit */
-#define MC_RETRY_CNT 5 /* Sufficient for SCSI and certain AFU errors */
-
-/* Command management definitions */
-#define CXLFLASH_MAX_CMDS 256
-#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS
-
-/* RRQ for master issued cmds */
-#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS
-
-/* SQ for master issued cmds */
-#define NUM_SQ_ENTRY CXLFLASH_MAX_CMDS
-
-/* Hardware queue definitions */
-#define CXLFLASH_DEF_HWQS 1
-#define CXLFLASH_MAX_HWQS 8
-#define PRIMARY_HWQ 0
-
-
-static inline void check_sizes(void)
-{
- BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_FC_PORTS_PER_BANK);
- BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_MAX_CMDS);
-}
-
-/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
-#define CMD_BUFSIZE SIZE_4K
-
-enum cxlflash_lr_state {
- LINK_RESET_INVALID,
- LINK_RESET_REQUIRED,
- LINK_RESET_COMPLETE
-};
-
-enum cxlflash_init_state {
- INIT_STATE_NONE,
- INIT_STATE_PCI,
- INIT_STATE_AFU,
- INIT_STATE_SCSI,
- INIT_STATE_CDEV
-};
-
-enum cxlflash_state {
- STATE_PROBING, /* Initial state during probe */
- STATE_PROBED, /* Temporary state, probe completed but EEH occurred */
- STATE_NORMAL, /* Normal running state, everything good */
- STATE_RESET, /* Reset state, trying to reset/recover */
- STATE_FAILTERM /* Failed/terminating state, error out users/threads */
-};
-
-enum cxlflash_hwq_mode {
- HWQ_MODE_RR, /* Roundrobin (default) */
- HWQ_MODE_TAG, /* Distribute based on block MQ tag */
- HWQ_MODE_CPU, /* CPU affinity */
- MAX_HWQ_MODE
-};
-
-/*
- * Each context has its own set of resource handles that is visible
- * only from that context.
- */
-
-struct cxlflash_cfg {
- struct afu *afu;
-
- const struct cxlflash_backend_ops *ops;
- struct pci_dev *dev;
- struct pci_device_id *dev_id;
- struct Scsi_Host *host;
- int num_fc_ports;
- struct cdev cdev;
- struct device *chardev;
-
- ulong cxlflash_regs_pci;
-
- struct work_struct work_q;
- enum cxlflash_init_state init_state;
- enum cxlflash_lr_state lr_state;
- int lr_port;
- atomic_t scan_host_needed;
-
- void *afu_cookie;
-
- atomic_t recovery_threads;
- struct mutex ctx_recovery_mutex;
- struct mutex ctx_tbl_list_mutex;
- struct rw_semaphore ioctl_rwsem;
- struct ctx_info *ctx_tbl[MAX_CONTEXT];
- struct list_head ctx_err_recovery; /* contexts w/ recovery pending */
- struct file_operations cxl_fops;
-
- /* Parameters that are LUN table related */
- int last_lun_index[MAX_FC_PORTS];
- int promote_lun_index;
- struct list_head lluns; /* list of llun_info structs */
-
- wait_queue_head_t tmf_waitq;
- spinlock_t tmf_slock;
- bool tmf_active;
- bool ws_unmap; /* Write-same unmap supported */
- wait_queue_head_t reset_waitq;
- enum cxlflash_state state;
- async_cookie_t async_reset_cookie;
-};
-
-struct afu_cmd {
- struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */
- struct sisl_ioasa sa; /* IOASA must follow IOARCB */
- struct afu *parent;
- struct scsi_cmnd *scp;
- struct completion cevent;
- struct list_head queue;
- u32 hwq_index;
-
- u8 cmd_tmf:1,
- cmd_aborted:1;
-
- struct list_head list; /* Pending commands link */
-
- /* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned.
- * However for performance reasons the IOARCB/IOASA should be
- * cache line aligned.
- */
-} __aligned(cache_line_size());
-
-static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc)
-{
- return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd));
-}
-
-static inline struct afu_cmd *sc_to_afuci(struct scsi_cmnd *sc)
-{
- struct afu_cmd *afuc = sc_to_afuc(sc);
-
- INIT_LIST_HEAD(&afuc->queue);
- return afuc;
-}
-
-static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
-{
- struct afu_cmd *afuc = sc_to_afuc(sc);
-
- memset(afuc, 0, sizeof(*afuc));
- return sc_to_afuci(sc);
-}
-
-struct hwq {
- /* Stuff requiring alignment go first. */
- struct sisl_ioarcb sq[NUM_SQ_ENTRY]; /* 16K SQ */
- u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
-
- /* Beware of alignment till here. Preferably introduce new
- * fields after this point
- */
- struct afu *afu;
- void *ctx_cookie;
- struct sisl_host_map __iomem *host_map; /* MC host map */
- struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
- ctx_hndl_t ctx_hndl; /* master's context handle */
- u32 index; /* Index of this hwq */
- int num_irqs; /* Number of interrupts requested for context */
- struct list_head pending_cmds; /* Commands pending completion */
-
- atomic_t hsq_credits;
- spinlock_t hsq_slock; /* Hardware send queue lock */
- struct sisl_ioarcb *hsq_start;
- struct sisl_ioarcb *hsq_end;
- struct sisl_ioarcb *hsq_curr;
- spinlock_t hrrq_slock;
- u64 *hrrq_start;
- u64 *hrrq_end;
- u64 *hrrq_curr;
- bool toggle;
- bool hrrq_online;
-
- s64 room;
-
- struct irq_poll irqpoll;
-} __aligned(cache_line_size());
-
-struct afu {
- struct hwq hwqs[CXLFLASH_MAX_HWQS];
- int (*send_cmd)(struct afu *afu, struct afu_cmd *cmd);
- int (*context_reset)(struct hwq *hwq);
-
- /* AFU HW */
- struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
-
- atomic_t cmds_active; /* Number of currently active AFU commands */
- struct mutex sync_active; /* Mutex to serialize AFU commands */
- u64 hb;
- u32 internal_lun; /* User-desired LUN mode for this AFU */
-
- u32 num_hwqs; /* Number of hardware queues */
- u32 desired_hwqs; /* Desired h/w queues, effective on AFU reset */
- enum cxlflash_hwq_mode hwq_mode; /* Steering mode for h/w queues */
- u32 hwq_rr_count; /* Count to distribute traffic for roundrobin */
-
- char version[16];
- u64 interface_version;
-
- u32 irqpoll_weight;
- struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */
-};
-
-static inline struct hwq *get_hwq(struct afu *afu, u32 index)
-{
- WARN_ON(index >= CXLFLASH_MAX_HWQS);
-
- return &afu->hwqs[index];
-}
-
-static inline bool afu_is_irqpoll_enabled(struct afu *afu)
-{
- return !!afu->irqpoll_weight;
-}
-
-static inline bool afu_has_cap(struct afu *afu, u64 cap)
-{
- u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT;
-
- return afu_cap & cap;
-}
-
-static inline bool afu_is_ocxl_lisn(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_OCXL_LISN);
-}
-
-static inline bool afu_is_afu_debug(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_AFU_DEBUG);
-}
-
-static inline bool afu_is_lun_provision(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_LUN_PROVISION);
-}
-
-static inline bool afu_is_sq_cmd_mode(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_SQ_CMD_MODE);
-}
-
-static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE);
-}
-
-static inline u64 lun_to_lunid(u64 lun)
-{
- __be64 lun_id;
-
- int_to_scsilun(lun, (struct scsi_lun *)&lun_id);
- return be64_to_cpu(lun_id);
-}
-
-static inline struct fc_port_bank __iomem *get_fc_port_bank(
- struct cxlflash_cfg *cfg, int i)
-{
- struct afu *afu = cfg->afu;
-
- return &afu->afu_map->global.bank[CHAN2PORTBANK(i)];
-}
-
-static inline __be64 __iomem *get_fc_port_regs(struct cxlflash_cfg *cfg, int i)
-{
- struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i);
-
- return &fcpb->fc_port_regs[CHAN2BANKPORT(i)][0];
-}
-
-static inline __be64 __iomem *get_fc_port_luns(struct cxlflash_cfg *cfg, int i)
-{
- struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i);
-
- return &fcpb->fc_port_luns[CHAN2BANKPORT(i)][0];
-}
-
-int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t c, res_hndl_t r, u8 mode);
-void cxlflash_list_init(void);
-void cxlflash_term_global_luns(void);
-void cxlflash_free_errpage(void);
-int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd,
- void __user *arg);
-void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg);
-int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg);
-void cxlflash_term_local_luns(struct cxlflash_cfg *cfg);
-void cxlflash_restore_luntable(struct cxlflash_cfg *cfg);
-
-#endif /* ifndef _CXLFLASH_COMMON_H */
diff --git a/drivers/scsi/cxlflash/cxl_hw.c b/drivers/scsi/cxlflash/cxl_hw.c
deleted file mode 100644
index b814130f3f5c..000000000000
--- a/drivers/scsi/cxlflash/cxl_hw.c
+++ /dev/null
@@ -1,177 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2018 IBM Corporation
- */
-
-#include <misc/cxl.h>
-
-#include "backend.h"
-
-/*
- * The following routines map the cxlflash backend operations to existing CXL
- * kernel API function and are largely simple shims that provide an abstraction
- * for converting generic context and AFU cookies into cxl_context or cxl_afu
- * pointers.
- */
-
-static void __iomem *cxlflash_psa_map(void *ctx_cookie)
-{
- return cxl_psa_map(ctx_cookie);
-}
-
-static void cxlflash_psa_unmap(void __iomem *addr)
-{
- cxl_psa_unmap(addr);
-}
-
-static int cxlflash_process_element(void *ctx_cookie)
-{
- return cxl_process_element(ctx_cookie);
-}
-
-static int cxlflash_map_afu_irq(void *ctx_cookie, int num,
- irq_handler_t handler, void *cookie, char *name)
-{
- return cxl_map_afu_irq(ctx_cookie, num, handler, cookie, name);
-}
-
-static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
-{
- cxl_unmap_afu_irq(ctx_cookie, num, cookie);
-}
-
-static u64 cxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
-{
- /* Dummy fop for cxl */
- return 0;
-}
-
-static int cxlflash_start_context(void *ctx_cookie)
-{
- return cxl_start_context(ctx_cookie, 0, NULL);
-}
-
-static int cxlflash_stop_context(void *ctx_cookie)
-{
- return cxl_stop_context(ctx_cookie);
-}
-
-static int cxlflash_afu_reset(void *ctx_cookie)
-{
- return cxl_afu_reset(ctx_cookie);
-}
-
-static void cxlflash_set_master(void *ctx_cookie)
-{
- cxl_set_master(ctx_cookie);
-}
-
-static void *cxlflash_get_context(struct pci_dev *dev, void *afu_cookie)
-{
- return cxl_get_context(dev);
-}
-
-static void *cxlflash_dev_context_init(struct pci_dev *dev, void *afu_cookie)
-{
- return cxl_dev_context_init(dev);
-}
-
-static int cxlflash_release_context(void *ctx_cookie)
-{
- return cxl_release_context(ctx_cookie);
-}
-
-static void cxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
-{
- cxl_perst_reloads_same_image(afu_cookie, image);
-}
-
-static ssize_t cxlflash_read_adapter_vpd(struct pci_dev *dev,
- void *buf, size_t count)
-{
- return cxl_read_adapter_vpd(dev, buf, count);
-}
-
-static int cxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
-{
- return cxl_allocate_afu_irqs(ctx_cookie, num);
-}
-
-static void cxlflash_free_afu_irqs(void *ctx_cookie)
-{
- cxl_free_afu_irqs(ctx_cookie);
-}
-
-static void *cxlflash_create_afu(struct pci_dev *dev)
-{
- return cxl_pci_to_afu(dev);
-}
-
-static void cxlflash_destroy_afu(void *afu)
-{
- /* Dummy fop for cxl */
-}
-
-static struct file *cxlflash_get_fd(void *ctx_cookie,
- struct file_operations *fops, int *fd)
-{
- return cxl_get_fd(ctx_cookie, fops, fd);
-}
-
-static void *cxlflash_fops_get_context(struct file *file)
-{
- return cxl_fops_get_context(file);
-}
-
-static int cxlflash_start_work(void *ctx_cookie, u64 irqs)
-{
- struct cxl_ioctl_start_work work = { 0 };
-
- work.num_interrupts = irqs;
- work.flags = CXL_START_WORK_NUM_IRQS;
-
- return cxl_start_work(ctx_cookie, &work);
-}
-
-static int cxlflash_fd_mmap(struct file *file, struct vm_area_struct *vm)
-{
- return cxl_fd_mmap(file, vm);
-}
-
-static int cxlflash_fd_release(struct inode *inode, struct file *file)
-{
- return cxl_fd_release(inode, file);
-}
-
-const struct cxlflash_backend_ops cxlflash_cxl_ops = {
- .module = THIS_MODULE,
- .psa_map = cxlflash_psa_map,
- .psa_unmap = cxlflash_psa_unmap,
- .process_element = cxlflash_process_element,
- .map_afu_irq = cxlflash_map_afu_irq,
- .unmap_afu_irq = cxlflash_unmap_afu_irq,
- .get_irq_objhndl = cxlflash_get_irq_objhndl,
- .start_context = cxlflash_start_context,
- .stop_context = cxlflash_stop_context,
- .afu_reset = cxlflash_afu_reset,
- .set_master = cxlflash_set_master,
- .get_context = cxlflash_get_context,
- .dev_context_init = cxlflash_dev_context_init,
- .release_context = cxlflash_release_context,
- .perst_reloads_same_image = cxlflash_perst_reloads_same_image,
- .read_adapter_vpd = cxlflash_read_adapter_vpd,
- .allocate_afu_irqs = cxlflash_allocate_afu_irqs,
- .free_afu_irqs = cxlflash_free_afu_irqs,
- .create_afu = cxlflash_create_afu,
- .destroy_afu = cxlflash_destroy_afu,
- .get_fd = cxlflash_get_fd,
- .fops_get_context = cxlflash_fops_get_context,
- .start_work = cxlflash_start_work,
- .fd_mmap = cxlflash_fd_mmap,
- .fd_release = cxlflash_fd_release,
-};
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
deleted file mode 100644
index 962c797fda07..000000000000
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ /dev/null
@@ -1,278 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#include <linux/unaligned.h>
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-
-#include <scsi/scsi_host.h>
-#include <uapi/scsi/cxlflash_ioctl.h>
-
-#include "sislite.h"
-#include "common.h"
-#include "vlun.h"
-#include "superpipe.h"
-
-/**
- * create_local() - allocate and initialize a local LUN information structure
- * @sdev: SCSI device associated with LUN.
- * @wwid: World Wide Node Name for LUN.
- *
- * Return: Allocated local llun_info structure on success, NULL on failure
- */
-static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = NULL;
-
- lli = kzalloc(sizeof(*lli), GFP_KERNEL);
- if (unlikely(!lli)) {
- dev_err(dev, "%s: could not allocate lli\n", __func__);
- goto out;
- }
-
- lli->sdev = sdev;
- lli->host_no = sdev->host->host_no;
- lli->in_table = false;
-
- memcpy(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN);
-out:
- return lli;
-}
-
-/**
- * create_global() - allocate and initialize a global LUN information structure
- * @sdev: SCSI device associated with LUN.
- * @wwid: World Wide Node Name for LUN.
- *
- * Return: Allocated global glun_info structure on success, NULL on failure
- */
-static struct glun_info *create_global(struct scsi_device *sdev, u8 *wwid)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct glun_info *gli = NULL;
-
- gli = kzalloc(sizeof(*gli), GFP_KERNEL);
- if (unlikely(!gli)) {
- dev_err(dev, "%s: could not allocate gli\n", __func__);
- goto out;
- }
-
- mutex_init(&gli->mutex);
- memcpy(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN);
-out:
- return gli;
-}
-
-/**
- * lookup_local() - find a local LUN information structure by WWID
- * @cfg: Internal structure associated with the host.
- * @wwid: WWID associated with LUN.
- *
- * Return: Found local lun_info structure on success, NULL on failure
- */
-static struct llun_info *lookup_local(struct cxlflash_cfg *cfg, u8 *wwid)
-{
- struct llun_info *lli, *temp;
-
- list_for_each_entry_safe(lli, temp, &cfg->lluns, list)
- if (!memcmp(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN))
- return lli;
-
- return NULL;
-}
-
-/**
- * lookup_global() - find a global LUN information structure by WWID
- * @wwid: WWID associated with LUN.
- *
- * Return: Found global lun_info structure on success, NULL on failure
- */
-static struct glun_info *lookup_global(u8 *wwid)
-{
- struct glun_info *gli, *temp;
-
- list_for_each_entry_safe(gli, temp, &global.gluns, list)
- if (!memcmp(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN))
- return gli;
-
- return NULL;
-}
-
-/**
- * find_and_create_lun() - find or create a local LUN information structure
- * @sdev: SCSI device associated with LUN.
- * @wwid: WWID associated with LUN.
- *
- * The LUN is kept both in a local list (per adapter) and in a global list
- * (across all adapters). Certain attributes of the LUN are local to the
- * adapter (such as index, port selection mask, etc.).
- *
- * The block allocation map is shared across all adapters (i.e. associated
- * wih the global list). Since different attributes are associated with
- * the per adapter and global entries, allocate two separate structures for each
- * LUN (one local, one global).
- *
- * Keep a pointer back from the local to the global entry.
- *
- * This routine assumes the caller holds the global mutex.
- *
- * Return: Found/Allocated local lun_info structure on success, NULL on failure
- */
-static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = NULL;
- struct glun_info *gli = NULL;
-
- if (unlikely(!wwid))
- goto out;
-
- lli = lookup_local(cfg, wwid);
- if (lli)
- goto out;
-
- lli = create_local(sdev, wwid);
- if (unlikely(!lli))
- goto out;
-
- gli = lookup_global(wwid);
- if (gli) {
- lli->parent = gli;
- list_add(&lli->list, &cfg->lluns);
- goto out;
- }
-
- gli = create_global(sdev, wwid);
- if (unlikely(!gli)) {
- kfree(lli);
- lli = NULL;
- goto out;
- }
-
- lli->parent = gli;
- list_add(&lli->list, &cfg->lluns);
-
- list_add(&gli->list, &global.gluns);
-
-out:
- dev_dbg(dev, "%s: returning lli=%p, gli=%p\n", __func__, lli, gli);
- return lli;
-}
-
-/**
- * cxlflash_term_local_luns() - Delete all entries from local LUN list, free.
- * @cfg: Internal structure associated with the host.
- */
-void cxlflash_term_local_luns(struct cxlflash_cfg *cfg)
-{
- struct llun_info *lli, *temp;
-
- mutex_lock(&global.mutex);
- list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
- list_del(&lli->list);
- kfree(lli);
- }
- mutex_unlock(&global.mutex);
-}
-
-/**
- * cxlflash_list_init() - initializes the global LUN list
- */
-void cxlflash_list_init(void)
-{
- INIT_LIST_HEAD(&global.gluns);
- mutex_init(&global.mutex);
- global.err_page = NULL;
-}
-
-/**
- * cxlflash_term_global_luns() - frees resources associated with global LUN list
- */
-void cxlflash_term_global_luns(void)
-{
- struct glun_info *gli, *temp;
-
- mutex_lock(&global.mutex);
- list_for_each_entry_safe(gli, temp, &global.gluns, list) {
- list_del(&gli->list);
- cxlflash_ba_terminate(&gli->blka.ba_lun);
- kfree(gli);
- }
- mutex_unlock(&global.mutex);
-}
-
-/**
- * cxlflash_manage_lun() - handles LUN management activities
- * @sdev: SCSI device associated with LUN.
- * @arg: Manage ioctl data structure.
- *
- * This routine is used to notify the driver about a LUN's WWID and associate
- * SCSI devices (sdev) with a global LUN instance. Additionally it serves to
- * change a LUN's operating mode: legacy or superpipe.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_manage_lun(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_manage_lun *manage = arg;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = NULL;
- int rc = 0;
- u64 flags = manage->hdr.flags;
- u32 chan = sdev->channel;
-
- mutex_lock(&global.mutex);
- lli = find_and_create_lun(sdev, manage->wwid);
- dev_dbg(dev, "%s: WWID=%016llx%016llx, flags=%016llx lli=%p\n",
- __func__, get_unaligned_be64(&manage->wwid[0]),
- get_unaligned_be64(&manage->wwid[8]), manage->hdr.flags, lli);
- if (unlikely(!lli)) {
- rc = -ENOMEM;
- goto out;
- }
-
- if (flags & DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE) {
- /*
- * Update port selection mask based upon channel, store off LUN
- * in unpacked, AFU-friendly format, and hang LUN reference in
- * the sdev.
- */
- lli->port_sel |= CHAN2PORTMASK(chan);
- lli->lun_id[chan] = lun_to_lunid(sdev->lun);
- sdev->hostdata = lli;
- } else if (flags & DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE) {
- if (lli->parent->mode != MODE_NONE)
- rc = -EBUSY;
- else {
- /*
- * Clean up local LUN for this port and reset table
- * tracking when no more references exist.
- */
- sdev->hostdata = NULL;
- lli->port_sel &= ~CHAN2PORTMASK(chan);
- if (lli->port_sel == 0U)
- lli->in_table = false;
- }
- }
-
- dev_dbg(dev, "%s: port_sel=%08x chan=%u lun_id=%016llx\n",
- __func__, lli->port_sel, chan, lli->lun_id[chan]);
-
-out:
- mutex_unlock(&global.mutex);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
deleted file mode 100644
index ae626e389c8b..000000000000
--- a/drivers/scsi/cxlflash/main.c
+++ /dev/null
@@ -1,3970 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#include <linux/delay.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <linux/unaligned.h>
-
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <uapi/scsi/cxlflash_ioctl.h>
-
-#include "main.h"
-#include "sislite.h"
-#include "common.h"
-
-MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
-MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
-MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
-MODULE_LICENSE("GPL");
-
-static char *cxlflash_devnode(const struct device *dev, umode_t *mode);
-static const struct class cxlflash_class = {
- .name = "cxlflash",
- .devnode = cxlflash_devnode,
-};
-
-static u32 cxlflash_major;
-static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
-
-/**
- * process_cmd_err() - command error handler
- * @cmd: AFU command that experienced the error.
- * @scp: SCSI command associated with the AFU command in error.
- *
- * Translates error bits from AFU command to SCSI command results.
- */
-static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
-{
- struct afu *afu = cmd->parent;
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct sisl_ioasa *ioasa;
- u32 resid;
-
- ioasa = &(cmd->sa);
-
- if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
- resid = ioasa->resid;
- scsi_set_resid(scp, resid);
- dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
- __func__, cmd, scp, resid);
- }
-
- if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
- dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
- __func__, cmd, scp);
- scp->result = (DID_ERROR << 16);
- }
-
- dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
- "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
- ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
- ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
-
- if (ioasa->rc.scsi_rc) {
- /* We have a SCSI status */
- if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
- memcpy(scp->sense_buffer, ioasa->sense_data,
- SISL_SENSE_DATA_LEN);
- scp->result = ioasa->rc.scsi_rc;
- } else
- scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
- }
-
- /*
- * We encountered an error. Set scp->result based on nature
- * of error.
- */
- if (ioasa->rc.fc_rc) {
- /* We have an FC status */
- switch (ioasa->rc.fc_rc) {
- case SISL_FC_RC_LINKDOWN:
- scp->result = (DID_REQUEUE << 16);
- break;
- case SISL_FC_RC_RESID:
- /* This indicates an FCP resid underrun */
- if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
- /* If the SISL_RC_FLAGS_OVERRUN flag was set,
- * then we will handle this error else where.
- * If not then we must handle it here.
- * This is probably an AFU bug.
- */
- scp->result = (DID_ERROR << 16);
- }
- break;
- case SISL_FC_RC_RESIDERR:
- /* Resid mismatch between adapter and device */
- case SISL_FC_RC_TGTABORT:
- case SISL_FC_RC_ABORTOK:
- case SISL_FC_RC_ABORTFAIL:
- case SISL_FC_RC_NOLOGI:
- case SISL_FC_RC_ABORTPEND:
- case SISL_FC_RC_WRABORTPEND:
- case SISL_FC_RC_NOEXP:
- case SISL_FC_RC_INUSE:
- scp->result = (DID_ERROR << 16);
- break;
- }
- }
-
- if (ioasa->rc.afu_rc) {
- /* We have an AFU error */
- switch (ioasa->rc.afu_rc) {
- case SISL_AFU_RC_NO_CHANNELS:
- scp->result = (DID_NO_CONNECT << 16);
- break;
- case SISL_AFU_RC_DATA_DMA_ERR:
- switch (ioasa->afu_extra) {
- case SISL_AFU_DMA_ERR_PAGE_IN:
- /* Retry */
- scp->result = (DID_IMM_RETRY << 16);
- break;
- case SISL_AFU_DMA_ERR_INVALID_EA:
- default:
- scp->result = (DID_ERROR << 16);
- }
- break;
- case SISL_AFU_RC_OUT_OF_DATA_BUFS:
- /* Retry */
- scp->result = (DID_ERROR << 16);
- break;
- default:
- scp->result = (DID_ERROR << 16);
- }
- }
-}
-
-/**
- * cmd_complete() - command completion handler
- * @cmd: AFU command that has completed.
- *
- * For SCSI commands this routine prepares and submits commands that have
- * either completed or timed out to the SCSI stack. For internal commands
- * (TMF or AFU), this routine simply notifies the originator that the
- * command has completed.
- */
-static void cmd_complete(struct afu_cmd *cmd)
-{
- struct scsi_cmnd *scp;
- ulong lock_flags;
- struct afu *afu = cmd->parent;
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
-
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- list_del(&cmd->list);
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-
- if (cmd->scp) {
- scp = cmd->scp;
- if (unlikely(cmd->sa.ioasc))
- process_cmd_err(cmd, scp);
- else
- scp->result = (DID_OK << 16);
-
- dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
- __func__, scp, scp->result, cmd->sa.ioasc);
- scsi_done(scp);
- } else if (cmd->cmd_tmf) {
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- cfg->tmf_active = false;
- wake_up_all_locked(&cfg->tmf_waitq);
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
- } else
- complete(&cmd->cevent);
-}
-
-/**
- * flush_pending_cmds() - flush all pending commands on this hardware queue
- * @hwq: Hardware queue to flush.
- *
- * The hardware send queue lock associated with this hardware queue must be
- * held when calling this routine.
- */
-static void flush_pending_cmds(struct hwq *hwq)
-{
- struct cxlflash_cfg *cfg = hwq->afu->parent;
- struct afu_cmd *cmd, *tmp;
- struct scsi_cmnd *scp;
- ulong lock_flags;
-
- list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
- /* Bypass command when on a doneq, cmd_complete() will handle */
- if (!list_empty(&cmd->queue))
- continue;
-
- list_del(&cmd->list);
-
- if (cmd->scp) {
- scp = cmd->scp;
- scp->result = (DID_IMM_RETRY << 16);
- scsi_done(scp);
- } else {
- cmd->cmd_aborted = true;
-
- if (cmd->cmd_tmf) {
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- cfg->tmf_active = false;
- wake_up_all_locked(&cfg->tmf_waitq);
- spin_unlock_irqrestore(&cfg->tmf_slock,
- lock_flags);
- } else
- complete(&cmd->cevent);
- }
- }
-}
-
-/**
- * context_reset() - reset context via specified register
- * @hwq: Hardware queue owning the context to be reset.
- * @reset_reg: MMIO register to perform reset.
- *
- * When the reset is successful, the SISLite specification guarantees that
- * the AFU has aborted all currently pending I/O. Accordingly, these commands
- * must be flushed.
- *
- * Return: 0 on success, -errno on failure
- */
-static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
-{
- struct cxlflash_cfg *cfg = hwq->afu->parent;
- struct device *dev = &cfg->dev->dev;
- int rc = -ETIMEDOUT;
- int nretry = 0;
- u64 val = 0x1;
- ulong lock_flags;
-
- dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
-
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
-
- writeq_be(val, reset_reg);
- do {
- val = readq_be(reset_reg);
- if ((val & 0x1) == 0x0) {
- rc = 0;
- break;
- }
-
- /* Double delay each time */
- udelay(1 << nretry);
- } while (nretry++ < MC_ROOM_RETRY_CNT);
-
- if (!rc)
- flush_pending_cmds(hwq);
-
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-
- dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
- __func__, rc, val, nretry);
- return rc;
-}
-
-/**
- * context_reset_ioarrin() - reset context via IOARRIN register
- * @hwq: Hardware queue owning the context to be reset.
- *
- * Return: 0 on success, -errno on failure
- */
-static int context_reset_ioarrin(struct hwq *hwq)
-{
- return context_reset(hwq, &hwq->host_map->ioarrin);
-}
-
-/**
- * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
- * @hwq: Hardware queue owning the context to be reset.
- *
- * Return: 0 on success, -errno on failure
- */
-static int context_reset_sq(struct hwq *hwq)
-{
- return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
-}
-
-/**
- * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
- * @afu: AFU associated with the host.
- * @cmd: AFU command to send.
- *
- * Return:
- * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
- */
-static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
- int rc = 0;
- s64 room;
- ulong lock_flags;
-
- /*
- * To avoid the performance penalty of MMIO, spread the update of
- * 'room' over multiple commands.
- */
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- if (--hwq->room < 0) {
- room = readq_be(&hwq->host_map->cmd_room);
- if (room <= 0) {
- dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
- "0x%02X, room=0x%016llX\n",
- __func__, cmd->rcb.cdb[0], room);
- hwq->room = 0;
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
- hwq->room = room - 1;
- }
-
- list_add(&cmd->list, &hwq->pending_cmds);
- writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
-out:
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
- dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
- __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
- return rc;
-}
-
-/**
- * send_cmd_sq() - sends an AFU command via SQ ring
- * @afu: AFU associated with the host.
- * @cmd: AFU command to send.
- *
- * Return:
- * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
- */
-static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
- int rc = 0;
- int newval;
- ulong lock_flags;
-
- newval = atomic_dec_if_positive(&hwq->hsq_credits);
- if (newval <= 0) {
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
- cmd->rcb.ioasa = &cmd->sa;
-
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
-
- *hwq->hsq_curr = cmd->rcb;
- if (hwq->hsq_curr < hwq->hsq_end)
- hwq->hsq_curr++;
- else
- hwq->hsq_curr = hwq->hsq_start;
-
- list_add(&cmd->list, &hwq->pending_cmds);
- writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
-
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-out:
- dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
- "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
- cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
- readq_be(&hwq->host_map->sq_head),
- readq_be(&hwq->host_map->sq_tail));
- return rc;
-}
-
-/**
- * wait_resp() - polls for a response or timeout to a sent AFU command
- * @afu: AFU associated with the host.
- * @cmd: AFU command that was sent.
- *
- * Return: 0 on success, -errno on failure
- */
-static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
- ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
-
- timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
- if (!timeout)
- rc = -ETIMEDOUT;
-
- if (cmd->cmd_aborted)
- rc = -EAGAIN;
-
- if (unlikely(cmd->sa.ioasc != 0)) {
- dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
- __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
- rc = -EIO;
- }
-
- return rc;
-}
-
-/**
- * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
- * @host: SCSI host associated with device.
- * @scp: SCSI command to send.
- * @afu: SCSI command to send.
- *
- * Hashes a command based upon the hardware queue mode.
- *
- * Return: Trusted index of target hardware queue
- */
-static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
- struct afu *afu)
-{
- u32 tag;
- u32 hwq = 0;
-
- if (afu->num_hwqs == 1)
- return 0;
-
- switch (afu->hwq_mode) {
- case HWQ_MODE_RR:
- hwq = afu->hwq_rr_count++ % afu->num_hwqs;
- break;
- case HWQ_MODE_TAG:
- tag = blk_mq_unique_tag(scsi_cmd_to_rq(scp));
- hwq = blk_mq_unique_tag_to_hwq(tag);
- break;
- case HWQ_MODE_CPU:
- hwq = smp_processor_id() % afu->num_hwqs;
- break;
- default:
- WARN_ON_ONCE(1);
- }
-
- return hwq;
-}
-
-/**
- * send_tmf() - sends a Task Management Function (TMF)
- * @cfg: Internal structure associated with the host.
- * @sdev: SCSI device destined for TMF.
- * @tmfcmd: TMF command to send.
- *
- * Return:
- * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
- */
-static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
- u64 tmfcmd)
-{
- struct afu *afu = cfg->afu;
- struct afu_cmd *cmd = NULL;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- bool needs_deletion = false;
- char *buf = NULL;
- ulong lock_flags;
- int rc = 0;
- ulong to;
-
- buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
- if (unlikely(!buf)) {
- dev_err(dev, "%s: no memory for command\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
- INIT_LIST_HEAD(&cmd->queue);
-
- /* When Task Management Function is active do not send another */
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- if (cfg->tmf_active)
- wait_event_interruptible_lock_irq(cfg->tmf_waitq,
- !cfg->tmf_active,
- cfg->tmf_slock);
- cfg->tmf_active = true;
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
-
- cmd->parent = afu;
- cmd->cmd_tmf = true;
- cmd->hwq_index = hwq->index;
-
- cmd->rcb.ctx_id = hwq->ctx_hndl;
- cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
- cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
- cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
- cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
- SISL_REQ_FLAGS_SUP_UNDERRUN |
- SISL_REQ_FLAGS_TMF_CMD);
- memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
-
- rc = afu->send_cmd(afu, cmd);
- if (unlikely(rc)) {
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- cfg->tmf_active = false;
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
- goto out;
- }
-
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- to = msecs_to_jiffies(5000);
- to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
- !cfg->tmf_active,
- cfg->tmf_slock,
- to);
- if (!to) {
- dev_err(dev, "%s: TMF timed out\n", __func__);
- rc = -ETIMEDOUT;
- needs_deletion = true;
- } else if (cmd->cmd_aborted) {
- dev_err(dev, "%s: TMF aborted\n", __func__);
- rc = -EAGAIN;
- } else if (cmd->sa.ioasc) {
- dev_err(dev, "%s: TMF failed ioasc=%08x\n",
- __func__, cmd->sa.ioasc);
- rc = -EIO;
- }
- cfg->tmf_active = false;
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
-
- if (needs_deletion) {
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- list_del(&cmd->list);
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
- }
-out:
- kfree(buf);
- return rc;
-}
-
-/**
- * cxlflash_driver_info() - information handler for this host driver
- * @host: SCSI host associated with device.
- *
- * Return: A string describing the device.
- */
-static const char *cxlflash_driver_info(struct Scsi_Host *host)
-{
- return CXLFLASH_ADAPTER_NAME;
-}
-
-/**
- * cxlflash_queuecommand() - sends a mid-layer request
- * @host: SCSI host associated with device.
- * @scp: SCSI command to send.
- *
- * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
- */
-static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
-{
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct afu_cmd *cmd = sc_to_afuci(scp);
- struct scatterlist *sg = scsi_sglist(scp);
- int hwq_index = cmd_to_target_hwq(host, scp, afu);
- struct hwq *hwq = get_hwq(afu, hwq_index);
- u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
- ulong lock_flags;
- int rc = 0;
-
- dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08x-%08x-%08x-%08x)\n",
- __func__, scp, host->host_no, scp->device->channel,
- scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
-
- /*
- * If a Task Management Function is active, wait for it to complete
- * before continuing with regular commands.
- */
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- if (cfg->tmf_active) {
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
-
- switch (cfg->state) {
- case STATE_PROBING:
- case STATE_PROBED:
- case STATE_RESET:
- dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- case STATE_FAILTERM:
- dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
- scp->result = (DID_NO_CONNECT << 16);
- scsi_done(scp);
- rc = 0;
- goto out;
- default:
- atomic_inc(&afu->cmds_active);
- break;
- }
-
- if (likely(sg)) {
- cmd->rcb.data_len = sg->length;
- cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
- }
-
- cmd->scp = scp;
- cmd->parent = afu;
- cmd->hwq_index = hwq_index;
-
- cmd->sa.ioasc = 0;
- cmd->rcb.ctx_id = hwq->ctx_hndl;
- cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
- cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
- cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
-
- if (scp->sc_data_direction == DMA_TO_DEVICE)
- req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
-
- cmd->rcb.req_flags = req_flags;
- memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
-
- rc = afu->send_cmd(afu, cmd);
- atomic_dec(&afu->cmds_active);
-out:
- return rc;
-}
-
-/**
- * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
- * @cfg: Internal structure associated with the host.
- */
-static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
-{
- struct pci_dev *pdev = cfg->dev;
-
- if (pci_channel_offline(pdev))
- wait_event_timeout(cfg->reset_waitq,
- !pci_channel_offline(pdev),
- CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
-}
-
-/**
- * free_mem() - free memory associated with the AFU
- * @cfg: Internal structure associated with the host.
- */
-static void free_mem(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
-
- if (cfg->afu) {
- free_pages((ulong)afu, get_order(sizeof(struct afu)));
- cfg->afu = NULL;
- }
-}
-
-/**
- * cxlflash_reset_sync() - synchronizing point for asynchronous resets
- * @cfg: Internal structure associated with the host.
- */
-static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
-{
- if (cfg->async_reset_cookie == 0)
- return;
-
- /* Wait until all async calls prior to this cookie have completed */
- async_synchronize_cookie(cfg->async_reset_cookie + 1);
- cfg->async_reset_cookie = 0;
-}
-
-/**
- * stop_afu() - stops the AFU command timers and unmaps the MMIO space
- * @cfg: Internal structure associated with the host.
- *
- * Safe to call with AFU in a partially allocated/initialized state.
- *
- * Cancels scheduled worker threads, waits for any active internal AFU
- * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
- */
-static void stop_afu(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct hwq *hwq;
- int i;
-
- cancel_work_sync(&cfg->work_q);
- if (!current_is_async())
- cxlflash_reset_sync(cfg);
-
- if (likely(afu)) {
- while (atomic_read(&afu->cmds_active))
- ssleep(1);
-
- if (afu_is_irqpoll_enabled(afu)) {
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- irq_poll_disable(&hwq->irqpoll);
- }
- }
-
- if (likely(afu->afu_map)) {
- cfg->ops->psa_unmap(afu->afu_map);
- afu->afu_map = NULL;
- }
- }
-}
-
-/**
- * term_intr() - disables all AFU interrupts
- * @cfg: Internal structure associated with the host.
- * @level: Depth of allocation, where to begin waterfall tear down.
- * @index: Index of the hardware queue.
- *
- * Safe to call with AFU/MC in partially allocated/initialized state.
- */
-static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
- u32 index)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
-
- if (!afu) {
- dev_err(dev, "%s: returning with NULL afu\n", __func__);
- return;
- }
-
- hwq = get_hwq(afu, index);
-
- if (!hwq->ctx_cookie) {
- dev_err(dev, "%s: returning with NULL MC\n", __func__);
- return;
- }
-
- switch (level) {
- case UNMAP_THREE:
- /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
- if (index == PRIMARY_HWQ)
- cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
- fallthrough;
- case UNMAP_TWO:
- cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
- fallthrough;
- case UNMAP_ONE:
- cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
- fallthrough;
- case FREE_IRQ:
- cfg->ops->free_afu_irqs(hwq->ctx_cookie);
- fallthrough;
- case UNDO_NOOP:
- /* No action required */
- break;
- }
-}
-
-/**
- * term_mc() - terminates the master context
- * @cfg: Internal structure associated with the host.
- * @index: Index of the hardware queue.
- *
- * Safe to call with AFU/MC in partially allocated/initialized state.
- */
-static void term_mc(struct cxlflash_cfg *cfg, u32 index)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
- ulong lock_flags;
-
- if (!afu) {
- dev_err(dev, "%s: returning with NULL afu\n", __func__);
- return;
- }
-
- hwq = get_hwq(afu, index);
-
- if (!hwq->ctx_cookie) {
- dev_err(dev, "%s: returning with NULL MC\n", __func__);
- return;
- }
-
- WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
- if (index != PRIMARY_HWQ)
- WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
- hwq->ctx_cookie = NULL;
-
- spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
- hwq->hrrq_online = false;
- spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
-
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- flush_pending_cmds(hwq);
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-}
-
-/**
- * term_afu() - terminates the AFU
- * @cfg: Internal structure associated with the host.
- *
- * Safe to call with AFU/MC in partially allocated/initialized state.
- */
-static void term_afu(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int k;
-
- /*
- * Tear down is carefully orchestrated to ensure
- * no interrupts can come in when the problem state
- * area is unmapped.
- *
- * 1) Disable all AFU interrupts for each master
- * 2) Unmap the problem state area
- * 3) Stop each master context
- */
- for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
- term_intr(cfg, UNMAP_THREE, k);
-
- stop_afu(cfg);
-
- for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
- term_mc(cfg, k);
-
- dev_dbg(dev, "%s: returning\n", __func__);
-}
-
-/**
- * notify_shutdown() - notifies device of pending shutdown
- * @cfg: Internal structure associated with the host.
- * @wait: Whether to wait for shutdown processing to complete.
- *
- * This function will notify the AFU that the adapter is being shutdown
- * and will wait for shutdown processing to complete if wait is true.
- * This notification should flush pending I/Os to the device and halt
- * further I/Os until the next AFU reset is issued and device restarted.
- */
-static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct dev_dependent_vals *ddv;
- __be64 __iomem *fc_port_regs;
- u64 reg, status;
- int i, retry_cnt = 0;
-
- ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
- if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
- return;
-
- if (!afu || !afu->afu_map) {
- dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
- return;
- }
-
- /* Notify AFU */
- for (i = 0; i < cfg->num_fc_ports; i++) {
- fc_port_regs = get_fc_port_regs(cfg, i);
-
- reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
- reg |= SISL_FC_SHUTDOWN_NORMAL;
- writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
- }
-
- if (!wait)
- return;
-
- /* Wait up to 1.5 seconds for shutdown processing to complete */
- for (i = 0; i < cfg->num_fc_ports; i++) {
- fc_port_regs = get_fc_port_regs(cfg, i);
- retry_cnt = 0;
-
- while (true) {
- status = readq_be(&fc_port_regs[FC_STATUS / 8]);
- if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
- break;
- if (++retry_cnt >= MC_RETRY_CNT) {
- dev_dbg(dev, "%s: port %d shutdown processing "
- "not yet completed\n", __func__, i);
- break;
- }
- msleep(100 * retry_cnt);
- }
- }
-}
-
-/**
- * cxlflash_get_minor() - gets the first available minor number
- *
- * Return: Unique minor number that can be used to create the character device.
- */
-static int cxlflash_get_minor(void)
-{
- int minor;
- long bit;
-
- bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
- if (bit >= CXLFLASH_MAX_ADAPTERS)
- return -1;
-
- minor = bit & MINORMASK;
- set_bit(minor, cxlflash_minor);
- return minor;
-}
-
-/**
- * cxlflash_put_minor() - releases the minor number
- * @minor: Minor number that is no longer needed.
- */
-static void cxlflash_put_minor(int minor)
-{
- clear_bit(minor, cxlflash_minor);
-}
-
-/**
- * cxlflash_release_chrdev() - release the character device for the host
- * @cfg: Internal structure associated with the host.
- */
-static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
-{
- device_unregister(cfg->chardev);
- cfg->chardev = NULL;
- cdev_del(&cfg->cdev);
- cxlflash_put_minor(MINOR(cfg->cdev.dev));
-}
-
-/**
- * cxlflash_remove() - PCI entry point to tear down host
- * @pdev: PCI device associated with the host.
- *
- * Safe to use as a cleanup in partially allocated/initialized state. Note that
- * the reset_waitq is flushed as part of the stop/termination of user contexts.
- */
-static void cxlflash_remove(struct pci_dev *pdev)
-{
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
- ulong lock_flags;
-
- if (!pci_is_enabled(pdev)) {
- dev_dbg(dev, "%s: Device is disabled\n", __func__);
- return;
- }
-
- /* Yield to running recovery threads before continuing with remove */
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
- cfg->state != STATE_PROBING);
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- if (cfg->tmf_active)
- wait_event_interruptible_lock_irq(cfg->tmf_waitq,
- !cfg->tmf_active,
- cfg->tmf_slock);
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
-
- /* Notify AFU and wait for shutdown processing to complete */
- notify_shutdown(cfg, true);
-
- cfg->state = STATE_FAILTERM;
- cxlflash_stop_term_user_contexts(cfg);
-
- switch (cfg->init_state) {
- case INIT_STATE_CDEV:
- cxlflash_release_chrdev(cfg);
- fallthrough;
- case INIT_STATE_SCSI:
- cxlflash_term_local_luns(cfg);
- scsi_remove_host(cfg->host);
- fallthrough;
- case INIT_STATE_AFU:
- term_afu(cfg);
- fallthrough;
- case INIT_STATE_PCI:
- cfg->ops->destroy_afu(cfg->afu_cookie);
- pci_disable_device(pdev);
- fallthrough;
- case INIT_STATE_NONE:
- free_mem(cfg);
- scsi_host_put(cfg->host);
- break;
- }
-
- dev_dbg(dev, "%s: returning\n", __func__);
-}
-
-/**
- * alloc_mem() - allocates the AFU and its command pool
- * @cfg: Internal structure associated with the host.
- *
- * A partially allocated state remains on failure.
- *
- * Return:
- * 0 on success
- * -ENOMEM on failure to allocate memory
- */
-static int alloc_mem(struct cxlflash_cfg *cfg)
-{
- int rc = 0;
- struct device *dev = &cfg->dev->dev;
-
- /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
- cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(sizeof(struct afu)));
- if (unlikely(!cfg->afu)) {
- dev_err(dev, "%s: cannot get %d free pages\n",
- __func__, get_order(sizeof(struct afu)));
- rc = -ENOMEM;
- goto out;
- }
- cfg->afu->parent = cfg;
- cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
- cfg->afu->afu_map = NULL;
-out:
- return rc;
-}
-
-/**
- * init_pci() - initializes the host as a PCI device
- * @cfg: Internal structure associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_pci(struct cxlflash_cfg *cfg)
-{
- struct pci_dev *pdev = cfg->dev;
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
- rc = pci_enable_device(pdev);
- if (rc || pci_channel_offline(pdev)) {
- if (pci_channel_offline(pdev)) {
- cxlflash_wait_for_pci_err_recovery(cfg);
- rc = pci_enable_device(pdev);
- }
-
- if (rc) {
- dev_err(dev, "%s: Cannot enable adapter\n", __func__);
- cxlflash_wait_for_pci_err_recovery(cfg);
- goto out;
- }
- }
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * init_scsi() - adds the host to the SCSI stack and kicks off host scan
- * @cfg: Internal structure associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_scsi(struct cxlflash_cfg *cfg)
-{
- struct pci_dev *pdev = cfg->dev;
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
- rc = scsi_add_host(cfg->host, &pdev->dev);
- if (rc) {
- dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
- goto out;
- }
-
- scsi_scan_host(cfg->host);
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * set_port_online() - transitions the specified host FC port to online state
- * @fc_regs: Top of MMIO region defined for specified port.
- *
- * The provided MMIO region must be mapped prior to call. Online state means
- * that the FC link layer has synced, completed the handshaking process, and
- * is ready for login to start.
- */
-static void set_port_online(__be64 __iomem *fc_regs)
-{
- u64 cmdcfg;
-
- cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
- cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
- cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
- writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
-}
-
-/**
- * set_port_offline() - transitions the specified host FC port to offline state
- * @fc_regs: Top of MMIO region defined for specified port.
- *
- * The provided MMIO region must be mapped prior to call.
- */
-static void set_port_offline(__be64 __iomem *fc_regs)
-{
- u64 cmdcfg;
-
- cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
- cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
- cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
- writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
-}
-
-/**
- * wait_port_online() - waits for the specified host FC port come online
- * @fc_regs: Top of MMIO region defined for specified port.
- * @delay_us: Number of microseconds to delay between reading port status.
- * @nretry: Number of cycles to retry reading port status.
- *
- * The provided MMIO region must be mapped prior to call. This will timeout
- * when the cable is not plugged in.
- *
- * Return:
- * TRUE (1) when the specified port is online
- * FALSE (0) when the specified port fails to come online after timeout
- */
-static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
-{
- u64 status;
-
- WARN_ON(delay_us < 1000);
-
- do {
- msleep(delay_us / 1000);
- status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
- if (status == U64_MAX)
- nretry /= 2;
- } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
- nretry--);
-
- return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
-}
-
-/**
- * wait_port_offline() - waits for the specified host FC port go offline
- * @fc_regs: Top of MMIO region defined for specified port.
- * @delay_us: Number of microseconds to delay between reading port status.
- * @nretry: Number of cycles to retry reading port status.
- *
- * The provided MMIO region must be mapped prior to call.
- *
- * Return:
- * TRUE (1) when the specified port is offline
- * FALSE (0) when the specified port fails to go offline after timeout
- */
-static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
-{
- u64 status;
-
- WARN_ON(delay_us < 1000);
-
- do {
- msleep(delay_us / 1000);
- status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
- if (status == U64_MAX)
- nretry /= 2;
- } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
- nretry--);
-
- return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
-}
-
-/**
- * afu_set_wwpn() - configures the WWPN for the specified host FC port
- * @afu: AFU associated with the host that owns the specified FC port.
- * @port: Port number being configured.
- * @fc_regs: Top of MMIO region defined for specified port.
- * @wwpn: The world-wide-port-number previously discovered for port.
- *
- * The provided MMIO region must be mapped prior to call. As part of the
- * sequence to configure the WWPN, the port is toggled offline and then back
- * online. This toggling action can cause this routine to delay up to a few
- * seconds. When configured to use the internal LUN feature of the AFU, a
- * failure to come online is overridden.
- */
-static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
- u64 wwpn)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
-
- set_port_offline(fc_regs);
- if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
- FC_PORT_STATUS_RETRY_CNT)) {
- dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
- __func__, port);
- }
-
- writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
-
- set_port_online(fc_regs);
- if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
- FC_PORT_STATUS_RETRY_CNT)) {
- dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
- __func__, port);
- }
-}
-
-/**
- * afu_link_reset() - resets the specified host FC port
- * @afu: AFU associated with the host that owns the specified FC port.
- * @port: Port number being configured.
- * @fc_regs: Top of MMIO region defined for specified port.
- *
- * The provided MMIO region must be mapped prior to call. The sequence to
- * reset the port involves toggling it offline and then back online. This
- * action can cause this routine to delay up to a few seconds. An effort
- * is made to maintain link with the device by switching to host to use
- * the alternate port exclusively while the reset takes place.
- * failure to come online is overridden.
- */
-static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- u64 port_sel;
-
- /* first switch the AFU to the other links, if any */
- port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
- port_sel &= ~(1ULL << port);
- writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
- cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
-
- set_port_offline(fc_regs);
- if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
- FC_PORT_STATUS_RETRY_CNT))
- dev_err(dev, "%s: wait on port %d to go offline timed out\n",
- __func__, port);
-
- set_port_online(fc_regs);
- if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
- FC_PORT_STATUS_RETRY_CNT))
- dev_err(dev, "%s: wait on port %d to go online timed out\n",
- __func__, port);
-
- /* switch back to include this port */
- port_sel |= (1ULL << port);
- writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
- cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
-
- dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
-}
-
-/**
- * afu_err_intr_init() - clears and initializes the AFU for error interrupts
- * @afu: AFU associated with the host.
- */
-static void afu_err_intr_init(struct afu *afu)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- __be64 __iomem *fc_port_regs;
- int i;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- u64 reg;
-
- /* global async interrupts: AFU clears afu_ctrl on context exit
- * if async interrupts were sent to that context. This prevents
- * the AFU form sending further async interrupts when
- * there is
- * nobody to receive them.
- */
-
- /* mask all */
- writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
- /* set LISN# to send and point to primary master context */
- reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
-
- if (afu->internal_lun)
- reg |= 1; /* Bit 63 indicates local lun */
- writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
- /* clear all */
- writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
- /* unmask bits that are of interest */
- /* note: afu can send an interrupt after this step */
- writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
- /* clear again in case a bit came on after previous clear but before */
- /* unmask */
- writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
-
- /* Clear/Set internal lun bits */
- fc_port_regs = get_fc_port_regs(cfg, 0);
- reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
- reg &= SISL_FC_INTERNAL_MASK;
- if (afu->internal_lun)
- reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
- writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
-
- /* now clear FC errors */
- for (i = 0; i < cfg->num_fc_ports; i++) {
- fc_port_regs = get_fc_port_regs(cfg, i);
-
- writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
- writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
- }
-
- /* sync interrupts for master's IOARRIN write */
- /* note that unlike asyncs, there can be no pending sync interrupts */
- /* at this time (this is a fresh context and master has not written */
- /* IOARRIN yet), so there is nothing to clear. */
-
- /* set LISN#, it is always sent to the context that wrote IOARRIN */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- reg = readq_be(&hwq->host_map->ctx_ctrl);
- WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
- reg |= SISL_MSI_SYNC_ERROR;
- writeq_be(reg, &hwq->host_map->ctx_ctrl);
- writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
- }
-}
-
-/**
- * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the AFU.
- *
- * Return: Always return IRQ_HANDLED.
- */
-static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
-{
- struct hwq *hwq = (struct hwq *)data;
- struct cxlflash_cfg *cfg = hwq->afu->parent;
- struct device *dev = &cfg->dev->dev;
- u64 reg;
- u64 reg_unmasked;
-
- reg = readq_be(&hwq->host_map->intr_status);
- reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
-
- if (reg_unmasked == 0UL) {
- dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
- __func__, reg);
- goto cxlflash_sync_err_irq_exit;
- }
-
- dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
- __func__, reg);
-
- writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
-
-cxlflash_sync_err_irq_exit:
- return IRQ_HANDLED;
-}
-
-/**
- * process_hrrq() - process the read-response queue
- * @hwq: HWQ associated with the host.
- * @doneq: Queue of commands harvested from the RRQ.
- * @budget: Threshold of RRQ entries to process.
- *
- * This routine must be called holding the disabled RRQ spin lock.
- *
- * Return: The number of entries processed.
- */
-static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
-{
- struct afu *afu = hwq->afu;
- struct afu_cmd *cmd;
- struct sisl_ioasa *ioasa;
- struct sisl_ioarcb *ioarcb;
- bool toggle = hwq->toggle;
- int num_hrrq = 0;
- u64 entry,
- *hrrq_start = hwq->hrrq_start,
- *hrrq_end = hwq->hrrq_end,
- *hrrq_curr = hwq->hrrq_curr;
-
- /* Process ready RRQ entries up to the specified budget (if any) */
- while (true) {
- entry = *hrrq_curr;
-
- if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
- break;
-
- entry &= ~SISL_RESP_HANDLE_T_BIT;
-
- if (afu_is_sq_cmd_mode(afu)) {
- ioasa = (struct sisl_ioasa *)entry;
- cmd = container_of(ioasa, struct afu_cmd, sa);
- } else {
- ioarcb = (struct sisl_ioarcb *)entry;
- cmd = container_of(ioarcb, struct afu_cmd, rcb);
- }
-
- list_add_tail(&cmd->queue, doneq);
-
- /* Advance to next entry or wrap and flip the toggle bit */
- if (hrrq_curr < hrrq_end)
- hrrq_curr++;
- else {
- hrrq_curr = hrrq_start;
- toggle ^= SISL_RESP_HANDLE_T_BIT;
- }
-
- atomic_inc(&hwq->hsq_credits);
- num_hrrq++;
-
- if (budget > 0 && num_hrrq >= budget)
- break;
- }
-
- hwq->hrrq_curr = hrrq_curr;
- hwq->toggle = toggle;
-
- return num_hrrq;
-}
-
-/**
- * process_cmd_doneq() - process a queue of harvested RRQ commands
- * @doneq: Queue of completed commands.
- *
- * Note that upon return the queue can no longer be trusted.
- */
-static void process_cmd_doneq(struct list_head *doneq)
-{
- struct afu_cmd *cmd, *tmp;
-
- WARN_ON(list_empty(doneq));
-
- list_for_each_entry_safe(cmd, tmp, doneq, queue)
- cmd_complete(cmd);
-}
-
-/**
- * cxlflash_irqpoll() - process a queue of harvested RRQ commands
- * @irqpoll: IRQ poll structure associated with queue to poll.
- * @budget: Threshold of RRQ entries to process per poll.
- *
- * Return: The number of entries processed.
- */
-static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
-{
- struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
- unsigned long hrrq_flags;
- LIST_HEAD(doneq);
- int num_entries = 0;
-
- spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
-
- num_entries = process_hrrq(hwq, &doneq, budget);
- if (num_entries < budget)
- irq_poll_complete(irqpoll);
-
- spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
-
- process_cmd_doneq(&doneq);
- return num_entries;
-}
-
-/**
- * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the AFU.
- *
- * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
- */
-static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
-{
- struct hwq *hwq = (struct hwq *)data;
- struct afu *afu = hwq->afu;
- unsigned long hrrq_flags;
- LIST_HEAD(doneq);
- int num_entries = 0;
-
- spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
-
- /* Silently drop spurious interrupts when queue is not online */
- if (!hwq->hrrq_online) {
- spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
- return IRQ_HANDLED;
- }
-
- if (afu_is_irqpoll_enabled(afu)) {
- irq_poll_sched(&hwq->irqpoll);
- spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
- return IRQ_HANDLED;
- }
-
- num_entries = process_hrrq(hwq, &doneq, -1);
- spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
-
- if (num_entries == 0)
- return IRQ_NONE;
-
- process_cmd_doneq(&doneq);
- return IRQ_HANDLED;
-}
-
-/*
- * Asynchronous interrupt information table
- *
- * NOTE:
- * - Order matters here as this array is indexed by bit position.
- *
- * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
- * as complex and complains due to a lack of parentheses/braces.
- */
-#define ASTATUS_FC(_a, _b, _c, _d) \
- { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
-
-#define BUILD_SISL_ASTATUS_FC_PORT(_a) \
- ASTATUS_FC(_a, LINK_UP, "link up", 0), \
- ASTATUS_FC(_a, LINK_DN, "link down", 0), \
- ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
- ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
- ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
- ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
- ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
- ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
-
-static const struct asyc_intr_info ainfo[] = {
- BUILD_SISL_ASTATUS_FC_PORT(1),
- BUILD_SISL_ASTATUS_FC_PORT(0),
- BUILD_SISL_ASTATUS_FC_PORT(3),
- BUILD_SISL_ASTATUS_FC_PORT(2)
-};
-
-/**
- * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the AFU.
- *
- * Return: Always return IRQ_HANDLED.
- */
-static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
-{
- struct hwq *hwq = (struct hwq *)data;
- struct afu *afu = hwq->afu;
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- const struct asyc_intr_info *info;
- struct sisl_global_map __iomem *global = &afu->afu_map->global;
- __be64 __iomem *fc_port_regs;
- u64 reg_unmasked;
- u64 reg;
- u64 bit;
- u8 port;
-
- reg = readq_be(&global->regs.aintr_status);
- reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
-
- if (unlikely(reg_unmasked == 0)) {
- dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
- __func__, reg);
- goto out;
- }
-
- /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
- writeq_be(reg_unmasked, &global->regs.aintr_clear);
-
- /* Check each bit that is on */
- for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
- if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
- WARN_ON_ONCE(1);
- continue;
- }
-
- info = &ainfo[bit];
- if (unlikely(info->status != 1ULL << bit)) {
- WARN_ON_ONCE(1);
- continue;
- }
-
- port = info->port;
- fc_port_regs = get_fc_port_regs(cfg, port);
-
- dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
- __func__, port, info->desc,
- readq_be(&fc_port_regs[FC_STATUS / 8]));
-
- /*
- * Do link reset first, some OTHER errors will set FC_ERROR
- * again if cleared before or w/o a reset
- */
- if (info->action & LINK_RESET) {
- dev_err(dev, "%s: FC Port %d: resetting link\n",
- __func__, port);
- cfg->lr_state = LINK_RESET_REQUIRED;
- cfg->lr_port = port;
- schedule_work(&cfg->work_q);
- }
-
- if (info->action & CLR_FC_ERROR) {
- reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
-
- /*
- * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
- * should be the same and tracing one is sufficient.
- */
-
- dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
- __func__, port, reg);
-
- writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
- writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
- }
-
- if (info->action & SCAN_HOST) {
- atomic_inc(&cfg->scan_host_needed);
- schedule_work(&cfg->work_q);
- }
- }
-
-out:
- return IRQ_HANDLED;
-}
-
-/**
- * read_vpd() - obtains the WWPNs from VPD
- * @cfg: Internal structure associated with the host.
- * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
- *
- * Return: 0 on success, -errno on failure
- */
-static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
-{
- struct device *dev = &cfg->dev->dev;
- struct pci_dev *pdev = cfg->dev;
- int i, k, rc = 0;
- unsigned int kw_size;
- ssize_t vpd_size;
- char vpd_data[CXLFLASH_VPD_LEN];
- char tmp_buf[WWPN_BUF_LEN] = { 0 };
- const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
- cfg->dev_id->driver_data;
- const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
- const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
-
- /* Get the VPD data from the device */
- vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
- if (unlikely(vpd_size <= 0)) {
- dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
- __func__, vpd_size);
- rc = -ENODEV;
- goto out;
- }
-
- /*
- * Find the offset of the WWPN tag within the read only
- * VPD data and validate the found field (partials are
- * no good to us). Convert the ASCII data to an integer
- * value. Note that we must copy to a temporary buffer
- * because the conversion service requires that the ASCII
- * string be terminated.
- *
- * Allow for WWPN not being found for all devices, setting
- * the returned WWPN to zero when not found. Notify with a
- * log error for cards that should have had WWPN keywords
- * in the VPD - cards requiring WWPN will not have their
- * ports programmed and operate in an undefined state.
- */
- for (k = 0; k < cfg->num_fc_ports; k++) {
- i = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
- wwpn_vpd_tags[k], &kw_size);
- if (i == -ENOENT) {
- if (wwpn_vpd_required)
- dev_err(dev, "%s: Port %d WWPN not found\n",
- __func__, k);
- wwpn[k] = 0ULL;
- continue;
- }
-
- if (i < 0 || kw_size != WWPN_LEN) {
- dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
- __func__, k);
- rc = -ENODEV;
- goto out;
- }
-
- memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
- rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
- if (unlikely(rc)) {
- dev_err(dev, "%s: WWPN conversion failed for port %d\n",
- __func__, k);
- rc = -ENODEV;
- goto out;
- }
-
- dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
- }
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * init_pcr() - initialize the provisioning and control registers
- * @cfg: Internal structure associated with the host.
- *
- * Also sets up fast access to the mapped registers and initializes AFU
- * command fields that never change.
- */
-static void init_pcr(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct sisl_ctrl_map __iomem *ctrl_map;
- struct hwq *hwq;
- void *cookie;
- int i;
-
- for (i = 0; i < MAX_CONTEXT; i++) {
- ctrl_map = &afu->afu_map->ctrls[i].ctrl;
- /* Disrupt any clients that could be running */
- /* e.g. clients that survived a master restart */
- writeq_be(0, &ctrl_map->rht_start);
- writeq_be(0, &ctrl_map->rht_cnt_id);
- writeq_be(0, &ctrl_map->ctx_cap);
- }
-
- /* Copy frequently used fields into hwq */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
- cookie = hwq->ctx_cookie;
-
- hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
- hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
- hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
-
- /* Program the Endian Control for the master context */
- writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
- }
-}
-
-/**
- * init_global() - initialize AFU global registers
- * @cfg: Internal structure associated with the host.
- */
-static int init_global(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
- struct sisl_host_map __iomem *hmap;
- __be64 __iomem *fc_port_regs;
- u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
- int i = 0, num_ports = 0;
- int rc = 0;
- int j;
- void *ctx;
- u64 reg;
-
- rc = read_vpd(cfg, &wwpn[0]);
- if (rc) {
- dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
- goto out;
- }
-
- /* Set up RRQ and SQ in HWQ for master issued cmds */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
- hmap = hwq->host_map;
-
- writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
- writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
- hwq->hrrq_online = true;
-
- if (afu_is_sq_cmd_mode(afu)) {
- writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
- writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
- }
- }
-
- /* AFU configuration */
- reg = readq_be(&afu->afu_map->global.regs.afu_config);
- reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
- /* enable all auto retry options and control endianness */
- /* leave others at default: */
- /* CTX_CAP write protected, mbox_r does not clear on read and */
- /* checker on if dual afu */
- writeq_be(reg, &afu->afu_map->global.regs.afu_config);
-
- /* Global port select: select either port */
- if (afu->internal_lun) {
- /* Only use port 0 */
- writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
- num_ports = 0;
- } else {
- writeq_be(PORT_MASK(cfg->num_fc_ports),
- &afu->afu_map->global.regs.afu_port_sel);
- num_ports = cfg->num_fc_ports;
- }
-
- for (i = 0; i < num_ports; i++) {
- fc_port_regs = get_fc_port_regs(cfg, i);
-
- /* Unmask all errors (but they are still masked at AFU) */
- writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
- /* Clear CRC error cnt & set a threshold */
- (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
- writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
-
- /* Set WWPNs. If already programmed, wwpn[i] is 0 */
- if (wwpn[i] != 0)
- afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
- /* Programming WWPN back to back causes additional
- * offline/online transitions and a PLOGI
- */
- msleep(100);
- }
-
- if (afu_is_ocxl_lisn(afu)) {
- /* Set up the LISN effective address for each master */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
- ctx = hwq->ctx_cookie;
-
- for (j = 0; j < hwq->num_irqs; j++) {
- reg = cfg->ops->get_irq_objhndl(ctx, j);
- writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
- }
-
- reg = hwq->ctx_hndl;
- writeq_be(SISL_LISN_PASID(reg, reg),
- &hwq->ctrl_map->lisn_pasid[0]);
- writeq_be(SISL_LISN_PASID(0UL, reg),
- &hwq->ctrl_map->lisn_pasid[1]);
- }
- }
-
- /* Set up master's own CTX_CAP to allow real mode, host translation */
- /* tables, afu cmds and read/write GSCSI cmds. */
- /* First, unlock ctx_cap write by reading mbox */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */
- writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
- SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
- SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
- &hwq->ctrl_map->ctx_cap);
- }
-
- /*
- * Determine write-same unmap support for host by evaluating the unmap
- * sector support bit of the context control register associated with
- * the primary hardware queue. Note that while this status is reflected
- * in a context register, the outcome can be assumed to be host-wide.
- */
- hwq = get_hwq(afu, PRIMARY_HWQ);
- reg = readq_be(&hwq->host_map->ctx_ctrl);
- if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
- cfg->ws_unmap = true;
-
- /* Initialize heartbeat */
- afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
-out:
- return rc;
-}
-
-/**
- * start_afu() - initializes and starts the AFU
- * @cfg: Internal structure associated with the host.
- */
-static int start_afu(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
- int rc = 0;
- int i;
-
- init_pcr(cfg);
-
- /* Initialize each HWQ */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- /* After an AFU reset, RRQ entries are stale, clear them */
- memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
-
- /* Initialize RRQ pointers */
- hwq->hrrq_start = &hwq->rrq_entry[0];
- hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
- hwq->hrrq_curr = hwq->hrrq_start;
- hwq->toggle = 1;
-
- /* Initialize spin locks */
- spin_lock_init(&hwq->hrrq_slock);
- spin_lock_init(&hwq->hsq_slock);
-
- /* Initialize SQ */
- if (afu_is_sq_cmd_mode(afu)) {
- memset(&hwq->sq, 0, sizeof(hwq->sq));
- hwq->hsq_start = &hwq->sq[0];
- hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
- hwq->hsq_curr = hwq->hsq_start;
-
- atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
- }
-
- /* Initialize IRQ poll */
- if (afu_is_irqpoll_enabled(afu))
- irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
- cxlflash_irqpoll);
-
- }
-
- rc = init_global(cfg);
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * init_intr() - setup interrupt handlers for the master context
- * @cfg: Internal structure associated with the host.
- * @hwq: Hardware queue to initialize.
- *
- * Return: 0 on success, -errno on failure
- */
-static enum undo_level init_intr(struct cxlflash_cfg *cfg,
- struct hwq *hwq)
-{
- struct device *dev = &cfg->dev->dev;
- void *ctx = hwq->ctx_cookie;
- int rc = 0;
- enum undo_level level = UNDO_NOOP;
- bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
- int num_irqs = hwq->num_irqs;
-
- rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
- if (unlikely(rc)) {
- dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
- __func__, rc);
- level = UNDO_NOOP;
- goto out;
- }
-
- rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
- "SISL_MSI_SYNC_ERROR");
- if (unlikely(rc <= 0)) {
- dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
- level = FREE_IRQ;
- goto out;
- }
-
- rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
- "SISL_MSI_RRQ_UPDATED");
- if (unlikely(rc <= 0)) {
- dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
- level = UNMAP_ONE;
- goto out;
- }
-
- /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
- if (!is_primary_hwq)
- goto out;
-
- rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
- "SISL_MSI_ASYNC_ERROR");
- if (unlikely(rc <= 0)) {
- dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
- level = UNMAP_TWO;
- goto out;
- }
-out:
- return level;
-}
-
-/**
- * init_mc() - create and register as the master context
- * @cfg: Internal structure associated with the host.
- * @index: HWQ Index of the master context.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_mc(struct cxlflash_cfg *cfg, u32 index)
-{
- void *ctx;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(cfg->afu, index);
- int rc = 0;
- int num_irqs;
- enum undo_level level;
-
- hwq->afu = cfg->afu;
- hwq->index = index;
- INIT_LIST_HEAD(&hwq->pending_cmds);
-
- if (index == PRIMARY_HWQ) {
- ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
- num_irqs = 3;
- } else {
- ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
- num_irqs = 2;
- }
- if (IS_ERR_OR_NULL(ctx)) {
- rc = -ENOMEM;
- goto err1;
- }
-
- WARN_ON(hwq->ctx_cookie);
- hwq->ctx_cookie = ctx;
- hwq->num_irqs = num_irqs;
-
- /* Set it up as a master with the CXL */
- cfg->ops->set_master(ctx);
-
- /* Reset AFU when initializing primary context */
- if (index == PRIMARY_HWQ) {
- rc = cfg->ops->afu_reset(ctx);
- if (unlikely(rc)) {
- dev_err(dev, "%s: AFU reset failed rc=%d\n",
- __func__, rc);
- goto err1;
- }
- }
-
- level = init_intr(cfg, hwq);
- if (unlikely(level)) {
- dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
- goto err2;
- }
-
- /* Finally, activate the context by starting it */
- rc = cfg->ops->start_context(hwq->ctx_cookie);
- if (unlikely(rc)) {
- dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
- level = UNMAP_THREE;
- goto err2;
- }
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-err2:
- term_intr(cfg, level, index);
- if (index != PRIMARY_HWQ)
- cfg->ops->release_context(ctx);
-err1:
- hwq->ctx_cookie = NULL;
- goto out;
-}
-
-/**
- * get_num_afu_ports() - determines and configures the number of AFU ports
- * @cfg: Internal structure associated with the host.
- *
- * This routine determines the number of AFU ports by converting the global
- * port selection mask. The converted value is only valid following an AFU
- * reset (explicit or power-on). This routine must be invoked shortly after
- * mapping as other routines are dependent on the number of ports during the
- * initialization sequence.
- *
- * To support legacy AFUs that might not have reflected an initial global
- * port mask (value read is 0), default to the number of ports originally
- * supported by the cxlflash driver (2) before hardware with other port
- * offerings was introduced.
- */
-static void get_num_afu_ports(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- u64 port_mask;
- int num_fc_ports = LEGACY_FC_PORTS;
-
- port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
- if (port_mask != 0ULL)
- num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
-
- dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
- __func__, port_mask, num_fc_ports);
-
- cfg->num_fc_ports = num_fc_ports;
- cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
-}
-
-/**
- * init_afu() - setup as master context and start AFU
- * @cfg: Internal structure associated with the host.
- *
- * This routine is a higher level of control for configuring the
- * AFU on probe and reset paths.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_afu(struct cxlflash_cfg *cfg)
-{
- u64 reg;
- int rc = 0;
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
- int i;
-
- cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
-
- mutex_init(&afu->sync_active);
- afu->num_hwqs = afu->desired_hwqs;
- for (i = 0; i < afu->num_hwqs; i++) {
- rc = init_mc(cfg, i);
- if (rc) {
- dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
- __func__, rc, i);
- goto err1;
- }
- }
-
- /* Map the entire MMIO space of the AFU using the first context */
- hwq = get_hwq(afu, PRIMARY_HWQ);
- afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
- if (!afu->afu_map) {
- dev_err(dev, "%s: psa_map failed\n", __func__);
- rc = -ENOMEM;
- goto err1;
- }
-
- /* No byte reverse on reading afu_version or string will be backwards */
- reg = readq(&afu->afu_map->global.regs.afu_version);
- memcpy(afu->version, &reg, sizeof(reg));
- afu->interface_version =
- readq_be(&afu->afu_map->global.regs.interface_version);
- if ((afu->interface_version + 1) == 0) {
- dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
- "interface version %016llx\n", afu->version,
- afu->interface_version);
- rc = -EINVAL;
- goto err1;
- }
-
- if (afu_is_sq_cmd_mode(afu)) {
- afu->send_cmd = send_cmd_sq;
- afu->context_reset = context_reset_sq;
- } else {
- afu->send_cmd = send_cmd_ioarrin;
- afu->context_reset = context_reset_ioarrin;
- }
-
- dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
- afu->version, afu->interface_version);
-
- get_num_afu_ports(cfg);
-
- rc = start_afu(cfg);
- if (rc) {
- dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
- goto err1;
- }
-
- afu_err_intr_init(cfg->afu);
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- hwq->room = readq_be(&hwq->host_map->cmd_room);
- }
-
- /* Restore the LUN mappings */
- cxlflash_restore_luntable(cfg);
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-
-err1:
- for (i = afu->num_hwqs - 1; i >= 0; i--) {
- term_intr(cfg, UNMAP_THREE, i);
- term_mc(cfg, i);
- }
- goto out;
-}
-
-/**
- * afu_reset() - resets the AFU
- * @cfg: Internal structure associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_reset(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
- /* Stop the context before the reset. Since the context is
- * no longer available restart it after the reset is complete
- */
- term_afu(cfg);
-
- rc = init_afu(cfg);
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * drain_ioctls() - wait until all currently executing ioctls have completed
- * @cfg: Internal structure associated with the host.
- *
- * Obtain write access to read/write semaphore that wraps ioctl
- * handling to 'drain' ioctls currently executing.
- */
-static void drain_ioctls(struct cxlflash_cfg *cfg)
-{
- down_write(&cfg->ioctl_rwsem);
- up_write(&cfg->ioctl_rwsem);
-}
-
-/**
- * cxlflash_async_reset_host() - asynchronous host reset handler
- * @data: Private data provided while scheduling reset.
- * @cookie: Cookie that can be used for checkpointing.
- */
-static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
-{
- struct cxlflash_cfg *cfg = data;
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
- if (cfg->state != STATE_RESET) {
- dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
- __func__, cfg->state);
- goto out;
- }
-
- drain_ioctls(cfg);
- cxlflash_mark_contexts_error(cfg);
- rc = afu_reset(cfg);
- if (rc)
- cfg->state = STATE_FAILTERM;
- else
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->reset_waitq);
-
-out:
- scsi_unblock_requests(cfg->host);
-}
-
-/**
- * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
- * @cfg: Internal structure associated with the host.
- */
-static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
-
- if (cfg->state != STATE_NORMAL) {
- dev_dbg(dev, "%s: Not performing reset state=%d\n",
- __func__, cfg->state);
- return;
- }
-
- cfg->state = STATE_RESET;
- scsi_block_requests(cfg->host);
- cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
- cfg);
-}
-
-/**
- * send_afu_cmd() - builds and sends an internal AFU command
- * @afu: AFU associated with the host.
- * @rcb: Pre-populated IOARCB describing command to send.
- *
- * The AFU can only take one internal AFU command at a time. This limitation is
- * enforced by using a mutex to provide exclusive access to the AFU during the
- * operation. This design point requires calling threads to not be on interrupt
- * context due to the possibility of sleeping during concurrent AFU operations.
- *
- * The command status is optionally passed back to the caller when the caller
- * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
- *
- * Return:
- * 0 on success, -errno on failure
- */
-static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct afu_cmd *cmd = NULL;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- ulong lock_flags;
- char *buf = NULL;
- int rc = 0;
- int nretry = 0;
-
- if (cfg->state != STATE_NORMAL) {
- dev_dbg(dev, "%s: Sync not required state=%u\n",
- __func__, cfg->state);
- return 0;
- }
-
- mutex_lock(&afu->sync_active);
- atomic_inc(&afu->cmds_active);
- buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
- if (unlikely(!buf)) {
- dev_err(dev, "%s: no memory for command\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
-
-retry:
- memset(cmd, 0, sizeof(*cmd));
- memcpy(&cmd->rcb, rcb, sizeof(*rcb));
- INIT_LIST_HEAD(&cmd->queue);
- init_completion(&cmd->cevent);
- cmd->parent = afu;
- cmd->hwq_index = hwq->index;
- cmd->rcb.ctx_id = hwq->ctx_hndl;
-
- dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
- __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
-
- rc = afu->send_cmd(afu, cmd);
- if (unlikely(rc)) {
- rc = -ENOBUFS;
- goto out;
- }
-
- rc = wait_resp(afu, cmd);
- switch (rc) {
- case -ETIMEDOUT:
- rc = afu->context_reset(hwq);
- if (rc) {
- /* Delete the command from pending_cmds list */
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- list_del(&cmd->list);
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-
- cxlflash_schedule_async_reset(cfg);
- break;
- }
- fallthrough; /* to retry */
- case -EAGAIN:
- if (++nretry < 2)
- goto retry;
- fallthrough; /* to exit */
- default:
- break;
- }
-
- if (rcb->ioasa)
- *rcb->ioasa = cmd->sa;
-out:
- atomic_dec(&afu->cmds_active);
- mutex_unlock(&afu->sync_active);
- kfree(buf);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_afu_sync() - builds and sends an AFU sync command
- * @afu: AFU associated with the host.
- * @ctx: Identifies context requesting sync.
- * @res: Identifies resource requesting sync.
- * @mode: Type of sync to issue (lightweight, heavyweight, global).
- *
- * AFU sync operations are only necessary and allowed when the device is
- * operating normally. When not operating normally, sync requests can occur as
- * part of cleaning up resources associated with an adapter prior to removal.
- * In this scenario, these requests are simply ignored (safe due to the AFU
- * going away).
- *
- * Return:
- * 0 on success, -errno on failure
- */
-int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct sisl_ioarcb rcb = { 0 };
-
- dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
- __func__, afu, ctx, res, mode);
-
- rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
- rcb.msi = SISL_MSI_RRQ_UPDATED;
- rcb.timeout = MC_AFU_SYNC_TIMEOUT;
-
- rcb.cdb[0] = SISL_AFU_CMD_SYNC;
- rcb.cdb[1] = mode;
- put_unaligned_be16(ctx, &rcb.cdb[2]);
- put_unaligned_be32(res, &rcb.cdb[4]);
-
- return send_afu_cmd(afu, &rcb);
-}
-
-/**
- * cxlflash_eh_abort_handler() - abort a SCSI command
- * @scp: SCSI command to abort.
- *
- * CXL Flash devices do not support a single command abort. Reset the context
- * as per SISLite specification. Flush any pending commands in the hardware
- * queue before the reset.
- *
- * Return: SUCCESS/FAILED as defined in scsi/scsi.h
- */
-static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
-{
- int rc = FAILED;
- struct Scsi_Host *host = scp->device->host;
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct afu_cmd *cmd = sc_to_afuc(scp);
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
-
- dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
- scp->device->channel, scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
-
- /* When the state is not normal, another reset/reload is in progress.
- * Return failed and the mid-layer will invoke host reset handler.
- */
- if (cfg->state != STATE_NORMAL) {
- dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
- __func__, cfg->state);
- goto out;
- }
-
- rc = afu->context_reset(hwq);
- if (unlikely(rc))
- goto out;
-
- rc = SUCCESS;
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_eh_device_reset_handler() - reset a single LUN
- * @scp: SCSI command to send.
- *
- * Return:
- * SUCCESS as defined in scsi/scsi.h
- * FAILED as defined in scsi/scsi.h
- */
-static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
-{
- int rc = SUCCESS;
- struct scsi_device *sdev = scp->device;
- struct Scsi_Host *host = sdev->host;
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct device *dev = &cfg->dev->dev;
- int rcr = 0;
-
- dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
- host->host_no, sdev->channel, sdev->id, sdev->lun);
-retry:
- switch (cfg->state) {
- case STATE_NORMAL:
- rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
- if (unlikely(rcr))
- rc = FAILED;
- break;
- case STATE_RESET:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
- goto retry;
- default:
- rc = FAILED;
- break;
- }
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_eh_host_reset_handler() - reset the host adapter
- * @scp: SCSI command from stack identifying host.
- *
- * Following a reset, the state is evaluated again in case an EEH occurred
- * during the reset. In such a scenario, the host reset will either yield
- * until the EEH recovery is complete or return success or failure based
- * upon the current device state.
- *
- * Return:
- * SUCCESS as defined in scsi/scsi.h
- * FAILED as defined in scsi/scsi.h
- */
-static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
-{
- int rc = SUCCESS;
- int rcr = 0;
- struct Scsi_Host *host = scp->device->host;
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct device *dev = &cfg->dev->dev;
-
- dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
-
- switch (cfg->state) {
- case STATE_NORMAL:
- cfg->state = STATE_RESET;
- drain_ioctls(cfg);
- cxlflash_mark_contexts_error(cfg);
- rcr = afu_reset(cfg);
- if (rcr) {
- rc = FAILED;
- cfg->state = STATE_FAILTERM;
- } else
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->reset_waitq);
- ssleep(1);
- fallthrough;
- case STATE_RESET:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
- if (cfg->state == STATE_NORMAL)
- break;
- fallthrough;
- default:
- rc = FAILED;
- break;
- }
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_change_queue_depth() - change the queue depth for the device
- * @sdev: SCSI device destined for queue depth change.
- * @qdepth: Requested queue depth value to set.
- *
- * The requested queue depth is capped to the maximum supported value.
- *
- * Return: The actual queue depth set.
- */
-static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
-{
-
- if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
- qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
-
- scsi_change_queue_depth(sdev, qdepth);
- return sdev->queue_depth;
-}
-
-/**
- * cxlflash_show_port_status() - queries and presents the current port status
- * @port: Desired port for status reporting.
- * @cfg: Internal structure associated with the host.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf or -EINVAL.
- */
-static ssize_t cxlflash_show_port_status(u32 port,
- struct cxlflash_cfg *cfg,
- char *buf)
-{
- struct device *dev = &cfg->dev->dev;
- char *disp_status;
- u64 status;
- __be64 __iomem *fc_port_regs;
-
- WARN_ON(port >= MAX_FC_PORTS);
-
- if (port >= cfg->num_fc_ports) {
- dev_info(dev, "%s: Port %d not supported on this card.\n",
- __func__, port);
- return -EINVAL;
- }
-
- fc_port_regs = get_fc_port_regs(cfg, port);
- status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
- status &= FC_MTIP_STATUS_MASK;
-
- if (status == FC_MTIP_STATUS_ONLINE)
- disp_status = "online";
- else if (status == FC_MTIP_STATUS_OFFLINE)
- disp_status = "offline";
- else
- disp_status = "unknown";
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
-}
-
-/**
- * port0_show() - queries and presents the current status of port 0
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port0_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_status(0, cfg, buf);
-}
-
-/**
- * port1_show() - queries and presents the current status of port 1
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port1_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_status(1, cfg, buf);
-}
-
-/**
- * port2_show() - queries and presents the current status of port 2
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port2_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_status(2, cfg, buf);
-}
-
-/**
- * port3_show() - queries and presents the current status of port 3
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port3_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_status(3, cfg, buf);
-}
-
-/**
- * lun_mode_show() - presents the current LUN mode of the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the LUN mode.
- * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t lun_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
-}
-
-/**
- * lun_mode_store() - sets the LUN mode of the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the LUN mode.
- * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
- * @count: Length of data resizing in @buf.
- *
- * The CXL Flash AFU supports a dummy LUN mode where the external
- * links and storage are not required. Space on the FPGA is used
- * to create 1 or 2 small LUNs which are presented to the system
- * as if they were a normal storage device. This feature is useful
- * during development and also provides manufacturing with a way
- * to test the AFU without an actual device.
- *
- * 0 = external LUN[s] (default)
- * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
- * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
- * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
- * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t lun_mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = shost_priv(shost);
- struct afu *afu = cfg->afu;
- int rc;
- u32 lun_mode;
-
- rc = kstrtouint(buf, 10, &lun_mode);
- if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
- afu->internal_lun = lun_mode;
-
- /*
- * When configured for internal LUN, there is only one channel,
- * channel number 0, else there will be one less than the number
- * of fc ports for this card.
- */
- if (afu->internal_lun)
- shost->max_channel = 0;
- else
- shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
-
- afu_reset(cfg);
- scsi_scan_host(cfg->host);
- }
-
- return count;
-}
-
-/**
- * ioctl_version_show() - presents the current ioctl version of the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the ioctl version.
- * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t ioctl_version_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- ssize_t bytes = 0;
-
- bytes = scnprintf(buf, PAGE_SIZE,
- "disk: %u\n", DK_CXLFLASH_VERSION_0);
- bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
- "host: %u\n", HT_CXLFLASH_VERSION_0);
-
- return bytes;
-}
-
-/**
- * cxlflash_show_port_lun_table() - queries and presents the port LUN table
- * @port: Desired port for status reporting.
- * @cfg: Internal structure associated with the host.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf or -EINVAL.
- */
-static ssize_t cxlflash_show_port_lun_table(u32 port,
- struct cxlflash_cfg *cfg,
- char *buf)
-{
- struct device *dev = &cfg->dev->dev;
- __be64 __iomem *fc_port_luns;
- int i;
- ssize_t bytes = 0;
-
- WARN_ON(port >= MAX_FC_PORTS);
-
- if (port >= cfg->num_fc_ports) {
- dev_info(dev, "%s: Port %d not supported on this card.\n",
- __func__, port);
- return -EINVAL;
- }
-
- fc_port_luns = get_fc_port_luns(cfg, port);
-
- for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
- bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
- "%03d: %016llx\n",
- i, readq_be(&fc_port_luns[i]));
- return bytes;
-}
-
-/**
- * port0_lun_table_show() - presents the current LUN table of port 0
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port0_lun_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_lun_table(0, cfg, buf);
-}
-
-/**
- * port1_lun_table_show() - presents the current LUN table of port 1
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port1_lun_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_lun_table(1, cfg, buf);
-}
-
-/**
- * port2_lun_table_show() - presents the current LUN table of port 2
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port2_lun_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_lun_table(2, cfg, buf);
-}
-
-/**
- * port3_lun_table_show() - presents the current LUN table of port 3
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port3_lun_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_lun_table(3, cfg, buf);
-}
-
-/**
- * irqpoll_weight_show() - presents the current IRQ poll weight for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the IRQ poll weight.
- * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll
- * weight in ASCII.
- *
- * An IRQ poll weight of 0 indicates polling is disabled.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t irqpoll_weight_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
-}
-
-/**
- * irqpoll_weight_store() - sets the current IRQ poll weight for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the IRQ poll weight.
- * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll
- * weight in ASCII.
- * @count: Length of data resizing in @buf.
- *
- * An IRQ poll weight of 0 indicates polling is disabled.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t irqpoll_weight_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct device *cfgdev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct hwq *hwq;
- u32 weight;
- int rc, i;
-
- rc = kstrtouint(buf, 10, &weight);
- if (rc)
- return -EINVAL;
-
- if (weight > 256) {
- dev_info(cfgdev,
- "Invalid IRQ poll weight. It must be 256 or less.\n");
- return -EINVAL;
- }
-
- if (weight == afu->irqpoll_weight) {
- dev_info(cfgdev,
- "Current IRQ poll weight has the same weight.\n");
- return -EINVAL;
- }
-
- if (afu_is_irqpoll_enabled(afu)) {
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- irq_poll_disable(&hwq->irqpoll);
- }
- }
-
- afu->irqpoll_weight = weight;
-
- if (weight > 0) {
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
- }
- }
-
- return count;
-}
-
-/**
- * num_hwqs_show() - presents the number of hardware queues for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the number of hardware queues.
- * @buf: Buffer of length PAGE_SIZE to report back the number of hardware
- * queues in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t num_hwqs_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
-}
-
-/**
- * num_hwqs_store() - sets the number of hardware queues for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the number of hardware queues.
- * @buf: Buffer of length PAGE_SIZE containing the number of hardware
- * queues in ASCII.
- * @count: Length of data resizing in @buf.
- *
- * n > 0: num_hwqs = n
- * n = 0: num_hwqs = num_online_cpus()
- * n < 0: num_online_cpus() / abs(n)
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t num_hwqs_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
- int rc;
- int nhwqs, num_hwqs;
-
- rc = kstrtoint(buf, 10, &nhwqs);
- if (rc)
- return -EINVAL;
-
- if (nhwqs >= 1)
- num_hwqs = nhwqs;
- else if (nhwqs == 0)
- num_hwqs = num_online_cpus();
- else
- num_hwqs = num_online_cpus() / abs(nhwqs);
-
- afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
- WARN_ON_ONCE(afu->desired_hwqs == 0);
-
-retry:
- switch (cfg->state) {
- case STATE_NORMAL:
- cfg->state = STATE_RESET;
- drain_ioctls(cfg);
- cxlflash_mark_contexts_error(cfg);
- rc = afu_reset(cfg);
- if (rc)
- cfg->state = STATE_FAILTERM;
- else
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->reset_waitq);
- break;
- case STATE_RESET:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
- if (cfg->state == STATE_NORMAL)
- goto retry;
- fallthrough;
- default:
- /* Ideally should not happen */
- dev_err(dev, "%s: Device is not ready, state=%d\n",
- __func__, cfg->state);
- break;
- }
-
- return count;
-}
-
-static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
-
-/**
- * hwq_mode_show() - presents the HWQ steering mode for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the HWQ steering mode.
- * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode
- * as a character string.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t hwq_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
-}
-
-/**
- * hwq_mode_store() - sets the HWQ steering mode for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the HWQ steering mode.
- * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode
- * as a character string.
- * @count: Length of data resizing in @buf.
- *
- * rr = Round-Robin
- * tag = Block MQ Tagging
- * cpu = CPU Affinity
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t hwq_mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = shost_priv(shost);
- struct device *cfgdev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- int i;
- u32 mode = MAX_HWQ_MODE;
-
- for (i = 0; i < MAX_HWQ_MODE; i++) {
- if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
- mode = i;
- break;
- }
- }
-
- if (mode >= MAX_HWQ_MODE) {
- dev_info(cfgdev, "Invalid HWQ steering mode.\n");
- return -EINVAL;
- }
-
- afu->hwq_mode = mode;
-
- return count;
-}
-
-/**
- * mode_show() - presents the current mode of the device
- * @dev: Generic device associated with the device.
- * @attr: Device attribute representing the device mode.
- * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n",
- sdev->hostdata ? "superpipe" : "legacy");
-}
-
-/*
- * Host attributes
- */
-static DEVICE_ATTR_RO(port0);
-static DEVICE_ATTR_RO(port1);
-static DEVICE_ATTR_RO(port2);
-static DEVICE_ATTR_RO(port3);
-static DEVICE_ATTR_RW(lun_mode);
-static DEVICE_ATTR_RO(ioctl_version);
-static DEVICE_ATTR_RO(port0_lun_table);
-static DEVICE_ATTR_RO(port1_lun_table);
-static DEVICE_ATTR_RO(port2_lun_table);
-static DEVICE_ATTR_RO(port3_lun_table);
-static DEVICE_ATTR_RW(irqpoll_weight);
-static DEVICE_ATTR_RW(num_hwqs);
-static DEVICE_ATTR_RW(hwq_mode);
-
-static struct attribute *cxlflash_host_attrs[] = {
- &dev_attr_port0.attr,
- &dev_attr_port1.attr,
- &dev_attr_port2.attr,
- &dev_attr_port3.attr,
- &dev_attr_lun_mode.attr,
- &dev_attr_ioctl_version.attr,
- &dev_attr_port0_lun_table.attr,
- &dev_attr_port1_lun_table.attr,
- &dev_attr_port2_lun_table.attr,
- &dev_attr_port3_lun_table.attr,
- &dev_attr_irqpoll_weight.attr,
- &dev_attr_num_hwqs.attr,
- &dev_attr_hwq_mode.attr,
- NULL
-};
-
-ATTRIBUTE_GROUPS(cxlflash_host);
-
-/*
- * Device attributes
- */
-static DEVICE_ATTR_RO(mode);
-
-static struct attribute *cxlflash_dev_attrs[] = {
- &dev_attr_mode.attr,
- NULL
-};
-
-ATTRIBUTE_GROUPS(cxlflash_dev);
-
-/*
- * Host template
- */
-static struct scsi_host_template driver_template = {
- .module = THIS_MODULE,
- .name = CXLFLASH_ADAPTER_NAME,
- .info = cxlflash_driver_info,
- .ioctl = cxlflash_ioctl,
- .proc_name = CXLFLASH_NAME,
- .queuecommand = cxlflash_queuecommand,
- .eh_abort_handler = cxlflash_eh_abort_handler,
- .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
- .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
- .change_queue_depth = cxlflash_change_queue_depth,
- .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
- .can_queue = CXLFLASH_MAX_CMDS,
- .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
- .this_id = -1,
- .sg_tablesize = 1, /* No scatter gather support */
- .max_sectors = CXLFLASH_MAX_SECTORS,
- .shost_groups = cxlflash_host_groups,
- .sdev_groups = cxlflash_dev_groups,
-};
-
-/*
- * Device dependent values
- */
-static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
- CXLFLASH_WWPN_VPD_REQUIRED };
-static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
- CXLFLASH_NOTIFY_SHUTDOWN };
-static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
- (CXLFLASH_NOTIFY_SHUTDOWN |
- CXLFLASH_OCXL_DEV) };
-
-/*
- * PCI device binding table
- */
-static const struct pci_device_id cxlflash_pci_table[] = {
- {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
- {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
- {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
- {}
-};
-
-MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
-
-/**
- * cxlflash_worker_thread() - work thread handler for the AFU
- * @work: Work structure contained within cxlflash associated with host.
- *
- * Handles the following events:
- * - Link reset which cannot be performed on interrupt context due to
- * blocking up to a few seconds
- * - Rescan the host
- */
-static void cxlflash_worker_thread(struct work_struct *work)
-{
- struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
- work_q);
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- __be64 __iomem *fc_port_regs;
- int port;
- ulong lock_flags;
-
- /* Avoid MMIO if the device has failed */
-
- if (cfg->state != STATE_NORMAL)
- return;
-
- spin_lock_irqsave(cfg->host->host_lock, lock_flags);
-
- if (cfg->lr_state == LINK_RESET_REQUIRED) {
- port = cfg->lr_port;
- if (port < 0)
- dev_err(dev, "%s: invalid port index %d\n",
- __func__, port);
- else {
- spin_unlock_irqrestore(cfg->host->host_lock,
- lock_flags);
-
- /* The reset can block... */
- fc_port_regs = get_fc_port_regs(cfg, port);
- afu_link_reset(afu, port, fc_port_regs);
- spin_lock_irqsave(cfg->host->host_lock, lock_flags);
- }
-
- cfg->lr_state = LINK_RESET_COMPLETE;
- }
-
- spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
-
- if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
- scsi_scan_host(cfg->host);
-}
-
-/**
- * cxlflash_chr_open() - character device open handler
- * @inode: Device inode associated with this character device.
- * @file: File pointer for this device.
- *
- * Only users with admin privileges are allowed to open the character device.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_chr_open(struct inode *inode, struct file *file)
-{
- struct cxlflash_cfg *cfg;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
- file->private_data = cfg;
-
- return 0;
-}
-
-/**
- * decode_hioctl() - translates encoded host ioctl to easily identifiable string
- * @cmd: The host ioctl command to decode.
- *
- * Return: A string identifying the decoded host ioctl.
- */
-static char *decode_hioctl(unsigned int cmd)
-{
- switch (cmd) {
- case HT_CXLFLASH_LUN_PROVISION:
- return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
- }
-
- return "UNKNOWN";
-}
-
-/**
- * cxlflash_lun_provision() - host LUN provisioning handler
- * @cfg: Internal structure associated with the host.
- * @arg: Kernel copy of userspace ioctl data structure.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_lun_provision(struct cxlflash_cfg *cfg, void *arg)
-{
- struct ht_cxlflash_lun_provision *lunprov = arg;
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct sisl_ioarcb rcb;
- struct sisl_ioasa asa;
- __be64 __iomem *fc_port_regs;
- u16 port = lunprov->port;
- u16 scmd = lunprov->hdr.subcmd;
- u16 type;
- u64 reg;
- u64 size;
- u64 lun_id;
- int rc = 0;
-
- if (!afu_is_lun_provision(afu)) {
- rc = -ENOTSUPP;
- goto out;
- }
-
- if (port >= cfg->num_fc_ports) {
- rc = -EINVAL;
- goto out;
- }
-
- switch (scmd) {
- case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
- type = SISL_AFU_LUN_PROVISION_CREATE;
- size = lunprov->size;
- lun_id = 0;
- break;
- case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
- type = SISL_AFU_LUN_PROVISION_DELETE;
- size = 0;
- lun_id = lunprov->lun_id;
- break;
- case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
- fc_port_regs = get_fc_port_regs(cfg, port);
-
- reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
- lunprov->max_num_luns = reg;
- reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
- lunprov->cur_num_luns = reg;
- reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
- lunprov->max_cap_port = reg;
- reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
- lunprov->cur_cap_port = reg;
-
- goto out;
- default:
- rc = -EINVAL;
- goto out;
- }
-
- memset(&rcb, 0, sizeof(rcb));
- memset(&asa, 0, sizeof(asa));
- rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
- rcb.lun_id = lun_id;
- rcb.msi = SISL_MSI_RRQ_UPDATED;
- rcb.timeout = MC_LUN_PROV_TIMEOUT;
- rcb.ioasa = &asa;
-
- rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
- rcb.cdb[1] = type;
- rcb.cdb[2] = port;
- put_unaligned_be64(size, &rcb.cdb[8]);
-
- rc = send_afu_cmd(afu, &rcb);
- if (rc) {
- dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
- __func__, rc, asa.ioasc, asa.afu_extra);
- goto out;
- }
-
- if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
- lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
- memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
- }
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_afu_debug() - host AFU debug handler
- * @cfg: Internal structure associated with the host.
- * @arg: Kernel copy of userspace ioctl data structure.
- *
- * For debug requests requiring a data buffer, always provide an aligned
- * (cache line) buffer to the AFU to appease any alignment requirements.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_afu_debug(struct cxlflash_cfg *cfg, void *arg)
-{
- struct ht_cxlflash_afu_debug *afu_dbg = arg;
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct sisl_ioarcb rcb;
- struct sisl_ioasa asa;
- char *buf = NULL;
- char *kbuf = NULL;
- void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
- u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
- u32 ulen = afu_dbg->data_len;
- bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
- int rc = 0;
-
- if (!afu_is_afu_debug(afu)) {
- rc = -ENOTSUPP;
- goto out;
- }
-
- if (ulen) {
- req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
-
- if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
- rc = -EINVAL;
- goto out;
- }
-
- buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
- if (unlikely(!buf)) {
- rc = -ENOMEM;
- goto out;
- }
-
- kbuf = PTR_ALIGN(buf, cache_line_size());
-
- if (is_write) {
- req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
-
- if (copy_from_user(kbuf, ubuf, ulen)) {
- rc = -EFAULT;
- goto out;
- }
- }
- }
-
- memset(&rcb, 0, sizeof(rcb));
- memset(&asa, 0, sizeof(asa));
-
- rcb.req_flags = req_flags;
- rcb.msi = SISL_MSI_RRQ_UPDATED;
- rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
- rcb.ioasa = &asa;
-
- if (ulen) {
- rcb.data_len = ulen;
- rcb.data_ea = (uintptr_t)kbuf;
- }
-
- rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
- memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
- HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
-
- rc = send_afu_cmd(afu, &rcb);
- if (rc) {
- dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
- __func__, rc, asa.ioasc, asa.afu_extra);
- goto out;
- }
-
- if (ulen && !is_write) {
- if (copy_to_user(ubuf, kbuf, ulen))
- rc = -EFAULT;
- }
-out:
- kfree(buf);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_chr_ioctl() - character device IOCTL handler
- * @file: File pointer for this device.
- * @cmd: IOCTL command.
- * @arg: Userspace ioctl data structure.
- *
- * A read/write semaphore is used to implement a 'drain' of currently
- * running ioctls. The read semaphore is taken at the beginning of each
- * ioctl thread and released upon concluding execution. Additionally the
- * semaphore should be released and then reacquired in any ioctl execution
- * path which will wait for an event to occur that is outside the scope of
- * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
- * a thread simply needs to acquire the write semaphore.
- *
- * Return: 0 on success, -errno on failure
- */
-static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- typedef int (*hioctl) (struct cxlflash_cfg *, void *);
-
- struct cxlflash_cfg *cfg = file->private_data;
- struct device *dev = &cfg->dev->dev;
- char buf[sizeof(union cxlflash_ht_ioctls)];
- void __user *uarg = (void __user *)arg;
- struct ht_cxlflash_hdr *hdr;
- size_t size = 0;
- bool known_ioctl = false;
- int idx = 0;
- int rc = 0;
- hioctl do_ioctl = NULL;
-
- static const struct {
- size_t size;
- hioctl ioctl;
- } ioctl_tbl[] = { /* NOTE: order matters here */
- { sizeof(struct ht_cxlflash_lun_provision), cxlflash_lun_provision },
- { sizeof(struct ht_cxlflash_afu_debug), cxlflash_afu_debug },
- };
-
- /* Hold read semaphore so we can drain if needed */
- down_read(&cfg->ioctl_rwsem);
-
- dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
- __func__, cmd, idx, sizeof(ioctl_tbl));
-
- switch (cmd) {
- case HT_CXLFLASH_LUN_PROVISION:
- case HT_CXLFLASH_AFU_DEBUG:
- known_ioctl = true;
- idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
- size = ioctl_tbl[idx].size;
- do_ioctl = ioctl_tbl[idx].ioctl;
-
- if (likely(do_ioctl))
- break;
-
- fallthrough;
- default:
- rc = -EINVAL;
- goto out;
- }
-
- if (unlikely(copy_from_user(&buf, uarg, size))) {
- dev_err(dev, "%s: copy_from_user() fail "
- "size=%lu cmd=%d (%s) uarg=%p\n",
- __func__, size, cmd, decode_hioctl(cmd), uarg);
- rc = -EFAULT;
- goto out;
- }
-
- hdr = (struct ht_cxlflash_hdr *)&buf;
- if (hdr->version != HT_CXLFLASH_VERSION_0) {
- dev_dbg(dev, "%s: Version %u not supported for %s\n",
- __func__, hdr->version, decode_hioctl(cmd));
- rc = -EINVAL;
- goto out;
- }
-
- if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
- dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
- rc = -EINVAL;
- goto out;
- }
-
- rc = do_ioctl(cfg, (void *)&buf);
- if (likely(!rc))
- if (unlikely(copy_to_user(uarg, &buf, size))) {
- dev_err(dev, "%s: copy_to_user() fail "
- "size=%lu cmd=%d (%s) uarg=%p\n",
- __func__, size, cmd, decode_hioctl(cmd), uarg);
- rc = -EFAULT;
- }
-
- /* fall through to exit */
-
-out:
- up_read(&cfg->ioctl_rwsem);
- if (unlikely(rc && known_ioctl))
- dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
- __func__, decode_hioctl(cmd), cmd, rc);
- else
- dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
- __func__, decode_hioctl(cmd), cmd, rc);
- return rc;
-}
-
-/*
- * Character device file operations
- */
-static const struct file_operations cxlflash_chr_fops = {
- .owner = THIS_MODULE,
- .open = cxlflash_chr_open,
- .unlocked_ioctl = cxlflash_chr_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
-};
-
-/**
- * init_chrdev() - initialize the character device for the host
- * @cfg: Internal structure associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_chrdev(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- struct device *char_dev;
- dev_t devno;
- int minor;
- int rc = 0;
-
- minor = cxlflash_get_minor();
- if (unlikely(minor < 0)) {
- dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
- rc = -ENOSPC;
- goto out;
- }
-
- devno = MKDEV(cxlflash_major, minor);
- cdev_init(&cfg->cdev, &cxlflash_chr_fops);
-
- rc = cdev_add(&cfg->cdev, devno, 1);
- if (rc) {
- dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
- goto err1;
- }
-
- char_dev = device_create(&cxlflash_class, NULL, devno,
- NULL, "cxlflash%d", minor);
- if (IS_ERR(char_dev)) {
- rc = PTR_ERR(char_dev);
- dev_err(dev, "%s: device_create failed rc=%d\n",
- __func__, rc);
- goto err2;
- }
-
- cfg->chardev = char_dev;
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-err2:
- cdev_del(&cfg->cdev);
-err1:
- cxlflash_put_minor(minor);
- goto out;
-}
-
-/**
- * cxlflash_probe() - PCI entry point to add host
- * @pdev: PCI device associated with the host.
- * @dev_id: PCI device id associated with device.
- *
- * The device will initially start out in a 'probing' state and
- * transition to the 'normal' state at the end of a successful
- * probe. Should an EEH event occur during probe, the notification
- * thread (error_detected()) will wait until the probe handler
- * is nearly complete. At that time, the device will be moved to
- * a 'probed' state and the EEH thread woken up to drive the slot
- * reset and recovery (device moves to 'normal' state). Meanwhile,
- * the probe will be allowed to exit successfully.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_probe(struct pci_dev *pdev,
- const struct pci_device_id *dev_id)
-{
- struct Scsi_Host *host;
- struct cxlflash_cfg *cfg = NULL;
- struct device *dev = &pdev->dev;
- struct dev_dependent_vals *ddv;
- int rc = 0;
- int k;
-
- dev_err_once(&pdev->dev, "DEPRECATION: cxlflash is deprecated and will be removed in a future kernel release\n");
-
- dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
- __func__, pdev->irq);
-
- ddv = (struct dev_dependent_vals *)dev_id->driver_data;
- driver_template.max_sectors = ddv->max_sectors;
-
- host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
- if (!host) {
- dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
- host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
- host->unique_id = host->host_no;
- host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
-
- cfg = shost_priv(host);
- cfg->state = STATE_PROBING;
- cfg->host = host;
- rc = alloc_mem(cfg);
- if (rc) {
- dev_err(dev, "%s: alloc_mem failed\n", __func__);
- rc = -ENOMEM;
- scsi_host_put(cfg->host);
- goto out;
- }
-
- cfg->init_state = INIT_STATE_NONE;
- cfg->dev = pdev;
- cfg->cxl_fops = cxlflash_cxl_fops;
- cfg->ops = cxlflash_assign_ops(ddv);
- WARN_ON_ONCE(!cfg->ops);
-
- /*
- * Promoted LUNs move to the top of the LUN table. The rest stay on
- * the bottom half. The bottom half grows from the end (index = 255),
- * whereas the top half grows from the beginning (index = 0).
- *
- * Initialize the last LUN index for all possible ports.
- */
- cfg->promote_lun_index = 0;
-
- for (k = 0; k < MAX_FC_PORTS; k++)
- cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
-
- cfg->dev_id = (struct pci_device_id *)dev_id;
-
- init_waitqueue_head(&cfg->tmf_waitq);
- init_waitqueue_head(&cfg->reset_waitq);
-
- INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
- cfg->lr_state = LINK_RESET_INVALID;
- cfg->lr_port = -1;
- spin_lock_init(&cfg->tmf_slock);
- mutex_init(&cfg->ctx_tbl_list_mutex);
- mutex_init(&cfg->ctx_recovery_mutex);
- init_rwsem(&cfg->ioctl_rwsem);
- INIT_LIST_HEAD(&cfg->ctx_err_recovery);
- INIT_LIST_HEAD(&cfg->lluns);
-
- pci_set_drvdata(pdev, cfg);
-
- rc = init_pci(cfg);
- if (rc) {
- dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
- goto out_remove;
- }
- cfg->init_state = INIT_STATE_PCI;
-
- cfg->afu_cookie = cfg->ops->create_afu(pdev);
- if (unlikely(!cfg->afu_cookie)) {
- dev_err(dev, "%s: create_afu failed\n", __func__);
- rc = -ENOMEM;
- goto out_remove;
- }
-
- rc = init_afu(cfg);
- if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
- dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
- goto out_remove;
- }
- cfg->init_state = INIT_STATE_AFU;
-
- rc = init_scsi(cfg);
- if (rc) {
- dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
- goto out_remove;
- }
- cfg->init_state = INIT_STATE_SCSI;
-
- rc = init_chrdev(cfg);
- if (rc) {
- dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
- goto out_remove;
- }
- cfg->init_state = INIT_STATE_CDEV;
-
- if (wq_has_sleeper(&cfg->reset_waitq)) {
- cfg->state = STATE_PROBED;
- wake_up_all(&cfg->reset_waitq);
- } else
- cfg->state = STATE_NORMAL;
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-
-out_remove:
- cfg->state = STATE_PROBED;
- cxlflash_remove(pdev);
- goto out;
-}
-
-/**
- * cxlflash_pci_error_detected() - called when a PCI error is detected
- * @pdev: PCI device struct.
- * @state: PCI channel state.
- *
- * When an EEH occurs during an active reset, wait until the reset is
- * complete and then take action based upon the device state.
- *
- * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
- */
-static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- int rc = 0;
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
- struct device *dev = &cfg->dev->dev;
-
- dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
-
- switch (state) {
- case pci_channel_io_frozen:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
- cfg->state != STATE_PROBING);
- if (cfg->state == STATE_FAILTERM)
- return PCI_ERS_RESULT_DISCONNECT;
-
- cfg->state = STATE_RESET;
- scsi_block_requests(cfg->host);
- drain_ioctls(cfg);
- rc = cxlflash_mark_contexts_error(cfg);
- if (unlikely(rc))
- dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
- __func__, rc);
- term_afu(cfg);
- return PCI_ERS_RESULT_NEED_RESET;
- case pci_channel_io_perm_failure:
- cfg->state = STATE_FAILTERM;
- wake_up_all(&cfg->reset_waitq);
- scsi_unblock_requests(cfg->host);
- return PCI_ERS_RESULT_DISCONNECT;
- default:
- break;
- }
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/**
- * cxlflash_pci_slot_reset() - called when PCI slot has been reset
- * @pdev: PCI device struct.
- *
- * This routine is called by the pci error recovery code after the PCI
- * slot has been reset, just before we should resume normal operations.
- *
- * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
- */
-static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
-{
- int rc = 0;
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
- struct device *dev = &cfg->dev->dev;
-
- dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
-
- rc = init_afu(cfg);
- if (unlikely(rc)) {
- dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-/**
- * cxlflash_pci_resume() - called when normal operation can resume
- * @pdev: PCI device struct
- */
-static void cxlflash_pci_resume(struct pci_dev *pdev)
-{
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
- struct device *dev = &cfg->dev->dev;
-
- dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
-
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->reset_waitq);
- scsi_unblock_requests(cfg->host);
-}
-
-/**
- * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
- * @dev: Character device.
- * @mode: Mode that can be used to verify access.
- *
- * Return: Allocated string describing the devtmpfs structure.
- */
-static char *cxlflash_devnode(const struct device *dev, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
-}
-
-/**
- * cxlflash_class_init() - create character device class
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_class_init(void)
-{
- dev_t devno;
- int rc = 0;
-
- rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
- if (unlikely(rc)) {
- pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
- goto out;
- }
-
- cxlflash_major = MAJOR(devno);
-
- rc = class_register(&cxlflash_class);
- if (rc) {
- pr_err("%s: class_create failed rc=%d\n", __func__, rc);
- goto err;
- }
-
-out:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
- return rc;
-err:
- unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
- goto out;
-}
-
-/**
- * cxlflash_class_exit() - destroy character device class
- */
-static void cxlflash_class_exit(void)
-{
- dev_t devno = MKDEV(cxlflash_major, 0);
-
- class_unregister(&cxlflash_class);
- unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
-}
-
-static const struct pci_error_handlers cxlflash_err_handler = {
- .error_detected = cxlflash_pci_error_detected,
- .slot_reset = cxlflash_pci_slot_reset,
- .resume = cxlflash_pci_resume,
-};
-
-/*
- * PCI device structure
- */
-static struct pci_driver cxlflash_driver = {
- .name = CXLFLASH_NAME,
- .id_table = cxlflash_pci_table,
- .probe = cxlflash_probe,
- .remove = cxlflash_remove,
- .shutdown = cxlflash_remove,
- .err_handler = &cxlflash_err_handler,
-};
-
-/**
- * init_cxlflash() - module entry point
- *
- * Return: 0 on success, -errno on failure
- */
-static int __init init_cxlflash(void)
-{
- int rc;
-
- check_sizes();
- cxlflash_list_init();
- rc = cxlflash_class_init();
- if (unlikely(rc))
- goto out;
-
- rc = pci_register_driver(&cxlflash_driver);
- if (unlikely(rc))
- goto err;
-out:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
- return rc;
-err:
- cxlflash_class_exit();
- goto out;
-}
-
-/**
- * exit_cxlflash() - module exit point
- */
-static void __exit exit_cxlflash(void)
-{
- cxlflash_term_global_luns();
- cxlflash_free_errpage();
-
- pci_unregister_driver(&cxlflash_driver);
- cxlflash_class_exit();
-}
-
-module_init(init_cxlflash);
-module_exit(exit_cxlflash);
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
deleted file mode 100644
index 0bfb98effce0..000000000000
--- a/drivers/scsi/cxlflash/main.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _CXLFLASH_MAIN_H
-#define _CXLFLASH_MAIN_H
-
-#include <linux/list.h>
-#include <linux/types.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_device.h>
-
-#include "backend.h"
-
-#define CXLFLASH_NAME "cxlflash"
-#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
-#define CXLFLASH_MAX_ADAPTERS 32
-
-#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
-#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600
-#define PCI_DEVICE_ID_IBM_BRIARD 0x0624
-
-/* Since there is only one target, make it 0 */
-#define CXLFLASH_TARGET 0
-#define CXLFLASH_MAX_CDB_LEN 16
-
-/* Really only one target per bus since the Texan is directly attached */
-#define CXLFLASH_MAX_NUM_TARGETS_PER_BUS 1
-#define CXLFLASH_MAX_NUM_LUNS_PER_TARGET 65536
-
-#define CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ)
-
-/* FC defines */
-#define FC_MTIP_CMDCONFIG 0x010
-#define FC_MTIP_STATUS 0x018
-#define FC_MAX_NUM_LUNS 0x080 /* Max LUNs host can provision for port */
-#define FC_CUR_NUM_LUNS 0x088 /* Cur number LUNs provisioned for port */
-#define FC_MAX_CAP_PORT 0x090 /* Max capacity all LUNs for port (4K blocks) */
-#define FC_CUR_CAP_PORT 0x098 /* Cur capacity all LUNs for port (4K blocks) */
-
-#define FC_PNAME 0x300
-#define FC_CONFIG 0x320
-#define FC_CONFIG2 0x328
-#define FC_STATUS 0x330
-#define FC_ERROR 0x380
-#define FC_ERRCAP 0x388
-#define FC_ERRMSK 0x390
-#define FC_CNT_CRCERR 0x538
-#define FC_CRC_THRESH 0x580
-
-#define FC_MTIP_CMDCONFIG_ONLINE 0x20ULL
-#define FC_MTIP_CMDCONFIG_OFFLINE 0x40ULL
-
-#define FC_MTIP_STATUS_MASK 0x30ULL
-#define FC_MTIP_STATUS_ONLINE 0x20ULL
-#define FC_MTIP_STATUS_OFFLINE 0x10ULL
-
-/* TIMEOUT and RETRY definitions */
-
-/* AFU command timeout values */
-#define MC_AFU_SYNC_TIMEOUT 5 /* 5 secs */
-#define MC_LUN_PROV_TIMEOUT 5 /* 5 secs */
-#define MC_AFU_DEBUG_TIMEOUT 5 /* 5 secs */
-
-/* AFU command room retry limit */
-#define MC_ROOM_RETRY_CNT 10
-
-/* FC CRC clear periodic timer */
-#define MC_CRC_THRESH 100 /* threshold in 5 mins */
-
-#define FC_PORT_STATUS_RETRY_CNT 100 /* 100 100ms retries = 10 seconds */
-#define FC_PORT_STATUS_RETRY_INTERVAL_US 100000 /* microseconds */
-
-/* VPD defines */
-#define CXLFLASH_VPD_LEN 256
-#define WWPN_LEN 16
-#define WWPN_BUF_LEN (WWPN_LEN + 1)
-
-enum undo_level {
- UNDO_NOOP = 0,
- FREE_IRQ,
- UNMAP_ONE,
- UNMAP_TWO,
- UNMAP_THREE
-};
-
-struct dev_dependent_vals {
- u64 max_sectors;
- u64 flags;
-#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL
-#define CXLFLASH_WWPN_VPD_REQUIRED 0x0000000000000002ULL
-#define CXLFLASH_OCXL_DEV 0x0000000000000004ULL
-};
-
-static inline const struct cxlflash_backend_ops *
-cxlflash_assign_ops(struct dev_dependent_vals *ddv)
-{
- const struct cxlflash_backend_ops *ops = NULL;
-
-#ifdef CONFIG_OCXL_BASE
- if (ddv->flags & CXLFLASH_OCXL_DEV)
- ops = &cxlflash_ocxl_ops;
-#endif
-
-#ifdef CONFIG_CXL_BASE
- if (!(ddv->flags & CXLFLASH_OCXL_DEV))
- ops = &cxlflash_cxl_ops;
-#endif
-
- return ops;
-}
-
-struct asyc_intr_info {
- u64 status;
- char *desc;
- u8 port;
- u8 action;
-#define CLR_FC_ERROR 0x01
-#define LINK_RESET 0x02
-#define SCAN_HOST 0x04
-};
-
-#endif /* _CXLFLASH_MAIN_H */
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
deleted file mode 100644
index 6542818e595a..000000000000
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ /dev/null
@@ -1,1399 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2018 IBM Corporation
- */
-
-#include <linux/file.h>
-#include <linux/idr.h>
-#include <linux/module.h>
-#include <linux/mount.h>
-#include <linux/pseudo_fs.h>
-#include <linux/poll.h>
-#include <linux/sched/signal.h>
-#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
-#include <asm/xive.h>
-#include <misc/ocxl.h>
-
-#include <uapi/misc/cxl.h>
-
-#include "backend.h"
-#include "ocxl_hw.h"
-
-/*
- * Pseudo-filesystem to allocate inodes.
- */
-
-#define OCXLFLASH_FS_MAGIC 0x1697698f
-
-static int ocxlflash_fs_cnt;
-static struct vfsmount *ocxlflash_vfs_mount;
-
-static int ocxlflash_fs_init_fs_context(struct fs_context *fc)
-{
- return init_pseudo(fc, OCXLFLASH_FS_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type ocxlflash_fs_type = {
- .name = "ocxlflash",
- .owner = THIS_MODULE,
- .init_fs_context = ocxlflash_fs_init_fs_context,
- .kill_sb = kill_anon_super,
-};
-
-/*
- * ocxlflash_release_mapping() - release the memory mapping
- * @ctx: Context whose mapping is to be released.
- */
-static void ocxlflash_release_mapping(struct ocxlflash_context *ctx)
-{
- if (ctx->mapping)
- simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
- ctx->mapping = NULL;
-}
-
-/*
- * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file
- * @dev: Generic device of the host.
- * @name: Name of the pseudo filesystem.
- * @fops: File operations.
- * @priv: Private data.
- * @flags: Flags for the file.
- *
- * Return: pointer to the file on success, ERR_PTR on failure
- */
-static struct file *ocxlflash_getfile(struct device *dev, const char *name,
- const struct file_operations *fops,
- void *priv, int flags)
-{
- struct file *file;
- struct inode *inode;
- int rc;
-
- if (fops->owner && !try_module_get(fops->owner)) {
- dev_err(dev, "%s: Owner does not exist\n", __func__);
- rc = -ENOENT;
- goto err1;
- }
-
- rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount,
- &ocxlflash_fs_cnt);
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n",
- __func__, rc);
- goto err2;
- }
-
- inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb);
- if (IS_ERR(inode)) {
- rc = PTR_ERR(inode);
- dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n",
- __func__, rc);
- goto err3;
- }
-
- file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name,
- flags & (O_ACCMODE | O_NONBLOCK), fops);
- if (IS_ERR(file)) {
- rc = PTR_ERR(file);
- dev_err(dev, "%s: alloc_file failed rc=%d\n",
- __func__, rc);
- goto err4;
- }
-
- file->private_data = priv;
-out:
- return file;
-err4:
- iput(inode);
-err3:
- simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
-err2:
- module_put(fops->owner);
-err1:
- file = ERR_PTR(rc);
- goto out;
-}
-
-/**
- * ocxlflash_psa_map() - map the process specific MMIO space
- * @ctx_cookie: Adapter context for which the mapping needs to be done.
- *
- * Return: MMIO pointer of the mapped region
- */
-static void __iomem *ocxlflash_psa_map(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct device *dev = ctx->hw_afu->dev;
-
- mutex_lock(&ctx->state_mutex);
- if (ctx->state != STARTED) {
- dev_err(dev, "%s: Context not started, state=%d\n", __func__,
- ctx->state);
- mutex_unlock(&ctx->state_mutex);
- return NULL;
- }
- mutex_unlock(&ctx->state_mutex);
-
- return ioremap(ctx->psn_phys, ctx->psn_size);
-}
-
-/**
- * ocxlflash_psa_unmap() - unmap the process specific MMIO space
- * @addr: MMIO pointer to unmap.
- */
-static void ocxlflash_psa_unmap(void __iomem *addr)
-{
- iounmap(addr);
-}
-
-/**
- * ocxlflash_process_element() - get process element of the adapter context
- * @ctx_cookie: Adapter context associated with the process element.
- *
- * Return: process element of the adapter context
- */
-static int ocxlflash_process_element(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
-
- return ctx->pe;
-}
-
-/**
- * afu_map_irq() - map the interrupt of the adapter context
- * @flags: Flags.
- * @ctx: Adapter context.
- * @num: Per-context AFU interrupt number.
- * @handler: Interrupt handler to register.
- * @cookie: Interrupt handler private data.
- * @name: Name of the interrupt.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
- irq_handler_t handler, void *cookie, char *name)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- struct ocxlflash_irqs *irq;
- struct xive_irq_data *xd;
- u32 virq;
- int rc = 0;
-
- if (num < 0 || num >= ctx->num_irqs) {
- dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
- rc = -ENOENT;
- goto out;
- }
-
- irq = &ctx->irqs[num];
- virq = irq_create_mapping(NULL, irq->hwirq);
- if (unlikely(!virq)) {
- dev_err(dev, "%s: irq_create_mapping failed\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- rc = request_irq(virq, handler, 0, name, cookie);
- if (unlikely(rc)) {
- dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc);
- goto err1;
- }
-
- xd = irq_get_handler_data(virq);
- if (unlikely(!xd)) {
- dev_err(dev, "%s: Can't get interrupt data\n", __func__);
- rc = -ENXIO;
- goto err2;
- }
-
- irq->virq = virq;
- irq->vtrig = xd->trig_mmio;
-out:
- return rc;
-err2:
- free_irq(virq, cookie);
-err1:
- irq_dispose_mapping(virq);
- goto out;
-}
-
-/**
- * ocxlflash_map_afu_irq() - map the interrupt of the adapter context
- * @ctx_cookie: Adapter context.
- * @num: Per-context AFU interrupt number.
- * @handler: Interrupt handler to register.
- * @cookie: Interrupt handler private data.
- * @name: Name of the interrupt.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_map_afu_irq(void *ctx_cookie, int num,
- irq_handler_t handler, void *cookie,
- char *name)
-{
- return afu_map_irq(0, ctx_cookie, num, handler, cookie, name);
-}
-
-/**
- * afu_unmap_irq() - unmap the interrupt
- * @flags: Flags.
- * @ctx: Adapter context.
- * @num: Per-context AFU interrupt number.
- * @cookie: Interrupt handler private data.
- */
-static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num,
- void *cookie)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- struct ocxlflash_irqs *irq;
-
- if (num < 0 || num >= ctx->num_irqs) {
- dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
- return;
- }
-
- irq = &ctx->irqs[num];
-
- if (irq_find_mapping(NULL, irq->hwirq)) {
- free_irq(irq->virq, cookie);
- irq_dispose_mapping(irq->virq);
- }
-
- memset(irq, 0, sizeof(*irq));
-}
-
-/**
- * ocxlflash_unmap_afu_irq() - unmap the interrupt
- * @ctx_cookie: Adapter context.
- * @num: Per-context AFU interrupt number.
- * @cookie: Interrupt handler private data.
- */
-static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
-{
- return afu_unmap_irq(0, ctx_cookie, num, cookie);
-}
-
-/**
- * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt
- * @ctx_cookie: Context associated with the interrupt.
- * @irq: Interrupt number.
- *
- * Return: effective address of the mapped region
- */
-static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
-
- if (irq < 0 || irq >= ctx->num_irqs)
- return 0;
-
- return (__force u64)ctx->irqs[irq].vtrig;
-}
-
-/**
- * ocxlflash_xsl_fault() - callback when translation error is triggered
- * @data: Private data provided at callback registration, the context.
- * @addr: Address that triggered the error.
- * @dsisr: Value of dsisr register.
- */
-static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr)
-{
- struct ocxlflash_context *ctx = data;
-
- spin_lock(&ctx->slock);
- ctx->fault_addr = addr;
- ctx->fault_dsisr = dsisr;
- ctx->pending_fault = true;
- spin_unlock(&ctx->slock);
-
- wake_up_all(&ctx->wq);
-}
-
-/**
- * start_context() - local routine to start a context
- * @ctx: Adapter context to be started.
- *
- * Assign the context specific MMIO space, add and enable the PE.
- *
- * Return: 0 on success, -errno on failure
- */
-static int start_context(struct ocxlflash_context *ctx)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct ocxl_afu_config *acfg = &afu->acfg;
- void *link_token = afu->link_token;
- struct pci_dev *pdev = afu->pdev;
- struct device *dev = afu->dev;
- bool master = ctx->master;
- struct mm_struct *mm;
- int rc = 0;
- u32 pid;
-
- mutex_lock(&ctx->state_mutex);
- if (ctx->state != OPENED) {
- dev_err(dev, "%s: Context state invalid, state=%d\n",
- __func__, ctx->state);
- rc = -EINVAL;
- goto out;
- }
-
- if (master) {
- ctx->psn_size = acfg->global_mmio_size;
- ctx->psn_phys = afu->gmmio_phys;
- } else {
- ctx->psn_size = acfg->pp_mmio_stride;
- ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size);
- }
-
- /* pid and mm not set for master contexts */
- if (master) {
- pid = 0;
- mm = NULL;
- } else {
- pid = current->mm->context.id;
- mm = current->mm;
- }
-
- rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0,
- pci_dev_id(pdev), mm, ocxlflash_xsl_fault,
- ctx);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- ctx->state = STARTED;
-out:
- mutex_unlock(&ctx->state_mutex);
- return rc;
-}
-
-/**
- * ocxlflash_start_context() - start a kernel context
- * @ctx_cookie: Adapter context to be started.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_start_context(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
-
- return start_context(ctx);
-}
-
-/**
- * ocxlflash_stop_context() - stop a context
- * @ctx_cookie: Adapter context to be stopped.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_stop_context(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct ocxl_afu_config *acfg = &afu->acfg;
- struct pci_dev *pdev = afu->pdev;
- struct device *dev = afu->dev;
- enum ocxlflash_ctx_state state;
- int rc = 0;
-
- mutex_lock(&ctx->state_mutex);
- state = ctx->state;
- ctx->state = CLOSED;
- mutex_unlock(&ctx->state_mutex);
- if (state != STARTED)
- goto out;
-
- rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos,
- ctx->pe);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n",
- __func__, rc);
- /* If EBUSY, PE could be referenced in future by the AFU */
- if (rc == -EBUSY)
- goto out;
- }
-
- rc = ocxl_link_remove_pe(afu->link_token, ctx->pe);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-out:
- return rc;
-}
-
-/**
- * ocxlflash_afu_reset() - reset the AFU
- * @ctx_cookie: Adapter context.
- */
-static int ocxlflash_afu_reset(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct device *dev = ctx->hw_afu->dev;
-
- /* Pending implementation from OCXL transport services */
- dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__);
-
- /* Silently return success until it is implemented */
- return 0;
-}
-
-/**
- * ocxlflash_set_master() - sets the context as master
- * @ctx_cookie: Adapter context to set as master.
- */
-static void ocxlflash_set_master(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
-
- ctx->master = true;
-}
-
-/**
- * ocxlflash_get_context() - obtains the context associated with the host
- * @pdev: PCI device associated with the host.
- * @afu_cookie: Hardware AFU associated with the host.
- *
- * Return: returns the pointer to host adapter context
- */
-static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie)
-{
- struct ocxl_hw_afu *afu = afu_cookie;
-
- return afu->ocxl_ctx;
-}
-
-/**
- * ocxlflash_dev_context_init() - allocate and initialize an adapter context
- * @pdev: PCI device associated with the host.
- * @afu_cookie: Hardware AFU associated with the host.
- *
- * Return: returns the adapter context on success, ERR_PTR on failure
- */
-static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie)
-{
- struct ocxl_hw_afu *afu = afu_cookie;
- struct device *dev = afu->dev;
- struct ocxlflash_context *ctx;
- int rc;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (unlikely(!ctx)) {
- dev_err(dev, "%s: Context allocation failed\n", __func__);
- rc = -ENOMEM;
- goto err1;
- }
-
- idr_preload(GFP_KERNEL);
- rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT);
- idr_preload_end();
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc);
- goto err2;
- }
-
- spin_lock_init(&ctx->slock);
- init_waitqueue_head(&ctx->wq);
- mutex_init(&ctx->state_mutex);
-
- ctx->state = OPENED;
- ctx->pe = rc;
- ctx->master = false;
- ctx->mapping = NULL;
- ctx->hw_afu = afu;
- ctx->irq_bitmap = 0;
- ctx->pending_irq = false;
- ctx->pending_fault = false;
-out:
- return ctx;
-err2:
- kfree(ctx);
-err1:
- ctx = ERR_PTR(rc);
- goto out;
-}
-
-/**
- * ocxlflash_release_context() - releases an adapter context
- * @ctx_cookie: Adapter context to be released.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_release_context(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct device *dev;
- int rc = 0;
-
- if (!ctx)
- goto out;
-
- dev = ctx->hw_afu->dev;
- mutex_lock(&ctx->state_mutex);
- if (ctx->state >= STARTED) {
- dev_err(dev, "%s: Context in use, state=%d\n", __func__,
- ctx->state);
- mutex_unlock(&ctx->state_mutex);
- rc = -EBUSY;
- goto out;
- }
- mutex_unlock(&ctx->state_mutex);
-
- idr_remove(&ctx->hw_afu->idr, ctx->pe);
- ocxlflash_release_mapping(ctx);
- kfree(ctx);
-out:
- return rc;
-}
-
-/**
- * ocxlflash_perst_reloads_same_image() - sets the image reload policy
- * @afu_cookie: Hardware AFU associated with the host.
- * @image: Whether to load the same image on PERST.
- */
-static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
-{
- struct ocxl_hw_afu *afu = afu_cookie;
-
- afu->perst_same_image = image;
-}
-
-/**
- * ocxlflash_read_adapter_vpd() - reads the adapter VPD
- * @pdev: PCI device associated with the host.
- * @buf: Buffer to get the VPD data.
- * @count: Size of buffer (maximum bytes that can be read).
- *
- * Return: size of VPD on success, -errno on failure
- */
-static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf,
- size_t count)
-{
- return pci_read_vpd(pdev, 0, count, buf);
-}
-
-/**
- * free_afu_irqs() - internal service to free interrupts
- * @ctx: Adapter context.
- */
-static void free_afu_irqs(struct ocxlflash_context *ctx)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- int i;
-
- if (!ctx->irqs) {
- dev_err(dev, "%s: Interrupts not allocated\n", __func__);
- return;
- }
-
- for (i = ctx->num_irqs; i >= 0; i--)
- ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq);
-
- kfree(ctx->irqs);
- ctx->irqs = NULL;
-}
-
-/**
- * alloc_afu_irqs() - internal service to allocate interrupts
- * @ctx: Context associated with the request.
- * @num: Number of interrupts requested.
- *
- * Return: 0 on success, -errno on failure
- */
-static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- struct ocxlflash_irqs *irqs;
- int rc = 0;
- int hwirq;
- int i;
-
- if (ctx->irqs) {
- dev_err(dev, "%s: Interrupts already allocated\n", __func__);
- rc = -EEXIST;
- goto out;
- }
-
- if (num > OCXL_MAX_IRQS) {
- dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num);
- rc = -EINVAL;
- goto out;
- }
-
- irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL);
- if (unlikely(!irqs)) {
- dev_err(dev, "%s: Context irqs allocation failed\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < num; i++) {
- rc = ocxl_link_irq_alloc(afu->link_token, &hwirq);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n",
- __func__, rc);
- goto err;
- }
-
- irqs[i].hwirq = hwirq;
- }
-
- ctx->irqs = irqs;
- ctx->num_irqs = num;
-out:
- return rc;
-err:
- for (i = i-1; i >= 0; i--)
- ocxl_link_free_irq(afu->link_token, irqs[i].hwirq);
- kfree(irqs);
- goto out;
-}
-
-/**
- * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts
- * @ctx_cookie: Context associated with the request.
- * @num: Number of interrupts requested.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
-{
- return alloc_afu_irqs(ctx_cookie, num);
-}
-
-/**
- * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context
- * @ctx_cookie: Adapter context.
- */
-static void ocxlflash_free_afu_irqs(void *ctx_cookie)
-{
- free_afu_irqs(ctx_cookie);
-}
-
-/**
- * ocxlflash_unconfig_afu() - unconfigure the AFU
- * @afu: AFU associated with the host.
- */
-static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu)
-{
- if (afu->gmmio_virt) {
- iounmap(afu->gmmio_virt);
- afu->gmmio_virt = NULL;
- }
-}
-
-/**
- * ocxlflash_destroy_afu() - destroy the AFU structure
- * @afu_cookie: AFU to be freed.
- */
-static void ocxlflash_destroy_afu(void *afu_cookie)
-{
- struct ocxl_hw_afu *afu = afu_cookie;
- int pos;
-
- if (!afu)
- return;
-
- ocxlflash_release_context(afu->ocxl_ctx);
- idr_destroy(&afu->idr);
-
- /* Disable the AFU */
- pos = afu->acfg.dvsec_afu_control_pos;
- ocxl_config_set_afu_state(afu->pdev, pos, 0);
-
- ocxlflash_unconfig_afu(afu);
- kfree(afu);
-}
-
-/**
- * ocxlflash_config_fn() - configure the host function
- * @pdev: PCI device associated with the host.
- * @afu: AFU associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
-{
- struct ocxl_fn_config *fcfg = &afu->fcfg;
- struct device *dev = &pdev->dev;
- u16 base, enabled, supported;
- int rc = 0;
-
- /* Read DVSEC config of the function */
- rc = ocxl_config_read_function(pdev, fcfg);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- /* Check if function has AFUs defined, only 1 per function supported */
- if (fcfg->max_afu_index >= 0) {
- afu->is_present = true;
- if (fcfg->max_afu_index != 0)
- dev_warn(dev, "%s: Unexpected AFU index value %d\n",
- __func__, fcfg->max_afu_index);
- }
-
- rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- afu->fn_actag_base = base;
- afu->fn_actag_enabled = enabled;
-
- ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled);
- dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n",
- __func__, base, enabled);
-
- rc = ocxl_link_setup(pdev, 0, &afu->link_token);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n",
- __func__, rc);
- goto err;
- }
-out:
- return rc;
-err:
- ocxl_link_release(pdev, afu->link_token);
- goto out;
-}
-
-/**
- * ocxlflash_unconfig_fn() - unconfigure the host function
- * @pdev: PCI device associated with the host.
- * @afu: AFU associated with the host.
- */
-static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
-{
- ocxl_link_release(pdev, afu->link_token);
-}
-
-/**
- * ocxlflash_map_mmio() - map the AFU MMIO space
- * @afu: AFU associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu)
-{
- struct ocxl_afu_config *acfg = &afu->acfg;
- struct pci_dev *pdev = afu->pdev;
- struct device *dev = afu->dev;
- phys_addr_t gmmio, ppmmio;
- int rc = 0;
-
- rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash");
- if (unlikely(rc)) {
- dev_err(dev, "%s: pci_request_region for global failed rc=%d\n",
- __func__, rc);
- goto out;
- }
- gmmio = pci_resource_start(pdev, acfg->global_mmio_bar);
- gmmio += acfg->global_mmio_offset;
-
- rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash");
- if (unlikely(rc)) {
- dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n",
- __func__, rc);
- goto err1;
- }
- ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar);
- ppmmio += acfg->pp_mmio_offset;
-
- afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size);
- if (unlikely(!afu->gmmio_virt)) {
- dev_err(dev, "%s: MMIO mapping failed\n", __func__);
- rc = -ENOMEM;
- goto err2;
- }
-
- afu->gmmio_phys = gmmio;
- afu->ppmmio_phys = ppmmio;
-out:
- return rc;
-err2:
- pci_release_region(pdev, acfg->pp_mmio_bar);
-err1:
- pci_release_region(pdev, acfg->global_mmio_bar);
- goto out;
-}
-
-/**
- * ocxlflash_config_afu() - configure the host AFU
- * @pdev: PCI device associated with the host.
- * @afu: AFU associated with the host.
- *
- * Must be called _after_ host function configuration.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
-{
- struct ocxl_afu_config *acfg = &afu->acfg;
- struct ocxl_fn_config *fcfg = &afu->fcfg;
- struct device *dev = &pdev->dev;
- int count;
- int base;
- int pos;
- int rc = 0;
-
- /* This HW AFU function does not have any AFUs defined */
- if (!afu->is_present)
- goto out;
-
- /* Read AFU config at index 0 */
- rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- /* Only one AFU per function is supported, so actag_base is same */
- base = afu->fn_actag_base;
- count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled);
- pos = acfg->dvsec_afu_control_pos;
-
- ocxl_config_set_afu_actag(pdev, pos, base, count);
- dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count);
- afu->afu_actag_base = base;
- afu->afu_actag_enabled = count;
- afu->max_pasid = 1 << acfg->pasid_supported_log;
-
- ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log);
-
- rc = ocxlflash_map_mmio(afu);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- /* Enable the AFU */
- ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1);
-out:
- return rc;
-}
-
-/**
- * ocxlflash_create_afu() - create the AFU for OCXL
- * @pdev: PCI device associated with the host.
- *
- * Return: AFU on success, NULL on failure
- */
-static void *ocxlflash_create_afu(struct pci_dev *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ocxlflash_context *ctx;
- struct ocxl_hw_afu *afu;
- int rc;
-
- afu = kzalloc(sizeof(*afu), GFP_KERNEL);
- if (unlikely(!afu)) {
- dev_err(dev, "%s: HW AFU allocation failed\n", __func__);
- goto out;
- }
-
- afu->pdev = pdev;
- afu->dev = dev;
- idr_init(&afu->idr);
-
- rc = ocxlflash_config_fn(pdev, afu);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Function configuration failed rc=%d\n",
- __func__, rc);
- goto err1;
- }
-
- rc = ocxlflash_config_afu(pdev, afu);
- if (unlikely(rc)) {
- dev_err(dev, "%s: AFU configuration failed rc=%d\n",
- __func__, rc);
- goto err2;
- }
-
- ctx = ocxlflash_dev_context_init(pdev, afu);
- if (IS_ERR(ctx)) {
- rc = PTR_ERR(ctx);
- dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n",
- __func__, rc);
- goto err3;
- }
-
- afu->ocxl_ctx = ctx;
-out:
- return afu;
-err3:
- ocxlflash_unconfig_afu(afu);
-err2:
- ocxlflash_unconfig_fn(pdev, afu);
-err1:
- idr_destroy(&afu->idr);
- kfree(afu);
- afu = NULL;
- goto out;
-}
-
-/**
- * ctx_event_pending() - check for any event pending on the context
- * @ctx: Context to be checked.
- *
- * Return: true if there is an event pending, false if none pending
- */
-static inline bool ctx_event_pending(struct ocxlflash_context *ctx)
-{
- if (ctx->pending_irq || ctx->pending_fault)
- return true;
-
- return false;
-}
-
-/**
- * afu_poll() - poll the AFU for events on the context
- * @file: File associated with the adapter context.
- * @poll: Poll structure from the user.
- *
- * Return: poll mask
- */
-static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
-{
- struct ocxlflash_context *ctx = file->private_data;
- struct device *dev = ctx->hw_afu->dev;
- ulong lock_flags;
- int mask = 0;
-
- poll_wait(file, &ctx->wq, poll);
-
- spin_lock_irqsave(&ctx->slock, lock_flags);
- if (ctx_event_pending(ctx))
- mask |= POLLIN | POLLRDNORM;
- else if (ctx->state == CLOSED)
- mask |= POLLERR;
- spin_unlock_irqrestore(&ctx->slock, lock_flags);
-
- dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n",
- __func__, ctx->pe, mask);
-
- return mask;
-}
-
-/**
- * afu_read() - perform a read on the context for any event
- * @file: File associated with the adapter context.
- * @buf: Buffer to receive the data.
- * @count: Size of buffer (maximum bytes that can be read).
- * @off: Offset.
- *
- * Return: size of the data read on success, -errno on failure
- */
-static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
- loff_t *off)
-{
- struct ocxlflash_context *ctx = file->private_data;
- struct device *dev = ctx->hw_afu->dev;
- struct cxl_event event;
- ulong lock_flags;
- ssize_t esize;
- ssize_t rc;
- int bit;
- DEFINE_WAIT(event_wait);
-
- if (*off != 0) {
- dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n",
- __func__, *off);
- rc = -EINVAL;
- goto out;
- }
-
- spin_lock_irqsave(&ctx->slock, lock_flags);
-
- for (;;) {
- prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE);
-
- if (ctx_event_pending(ctx) || (ctx->state == CLOSED))
- break;
-
- if (file->f_flags & O_NONBLOCK) {
- dev_err(dev, "%s: File cannot be blocked on I/O\n",
- __func__);
- rc = -EAGAIN;
- goto err;
- }
-
- if (signal_pending(current)) {
- dev_err(dev, "%s: Signal pending on the process\n",
- __func__);
- rc = -ERESTARTSYS;
- goto err;
- }
-
- spin_unlock_irqrestore(&ctx->slock, lock_flags);
- schedule();
- spin_lock_irqsave(&ctx->slock, lock_flags);
- }
-
- finish_wait(&ctx->wq, &event_wait);
-
- memset(&event, 0, sizeof(event));
- event.header.process_element = ctx->pe;
- event.header.size = sizeof(struct cxl_event_header);
- if (ctx->pending_irq) {
- esize = sizeof(struct cxl_event_afu_interrupt);
- event.header.size += esize;
- event.header.type = CXL_EVENT_AFU_INTERRUPT;
-
- bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs);
- clear_bit(bit, &ctx->irq_bitmap);
- event.irq.irq = bit + 1;
- if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs))
- ctx->pending_irq = false;
- } else if (ctx->pending_fault) {
- event.header.size += sizeof(struct cxl_event_data_storage);
- event.header.type = CXL_EVENT_DATA_STORAGE;
- event.fault.addr = ctx->fault_addr;
- event.fault.dsisr = ctx->fault_dsisr;
- ctx->pending_fault = false;
- }
-
- spin_unlock_irqrestore(&ctx->slock, lock_flags);
-
- if (copy_to_user(buf, &event, event.header.size)) {
- dev_err(dev, "%s: copy_to_user failed\n", __func__);
- rc = -EFAULT;
- goto out;
- }
-
- rc = event.header.size;
-out:
- return rc;
-err:
- finish_wait(&ctx->wq, &event_wait);
- spin_unlock_irqrestore(&ctx->slock, lock_flags);
- goto out;
-}
-
-/**
- * afu_release() - release and free the context
- * @inode: File inode pointer.
- * @file: File associated with the context.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_release(struct inode *inode, struct file *file)
-{
- struct ocxlflash_context *ctx = file->private_data;
- int i;
-
- /* Unmap and free the interrupts associated with the context */
- for (i = ctx->num_irqs; i >= 0; i--)
- afu_unmap_irq(0, ctx, i, ctx);
- free_afu_irqs(ctx);
-
- return ocxlflash_release_context(ctx);
-}
-
-/**
- * ocxlflash_mmap_fault() - mmap fault handler
- * @vmf: VM fault associated with current fault.
- *
- * Return: 0 on success, -errno on failure
- */
-static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct ocxlflash_context *ctx = vma->vm_file->private_data;
- struct device *dev = ctx->hw_afu->dev;
- u64 mmio_area, offset;
-
- offset = vmf->pgoff << PAGE_SHIFT;
- if (offset >= ctx->psn_size)
- return VM_FAULT_SIGBUS;
-
- mutex_lock(&ctx->state_mutex);
- if (ctx->state != STARTED) {
- dev_err(dev, "%s: Context not started, state=%d\n",
- __func__, ctx->state);
- mutex_unlock(&ctx->state_mutex);
- return VM_FAULT_SIGBUS;
- }
- mutex_unlock(&ctx->state_mutex);
-
- mmio_area = ctx->psn_phys;
- mmio_area += offset;
-
- return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
-}
-
-static const struct vm_operations_struct ocxlflash_vmops = {
- .fault = ocxlflash_mmap_fault,
-};
-
-/**
- * afu_mmap() - map the fault handler operations
- * @file: File associated with the context.
- * @vma: VM area associated with mapping.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct ocxlflash_context *ctx = file->private_data;
-
- if ((vma_pages(vma) + vma->vm_pgoff) >
- (ctx->psn_size >> PAGE_SHIFT))
- return -EINVAL;
-
- vm_flags_set(vma, VM_IO | VM_PFNMAP);
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_ops = &ocxlflash_vmops;
- return 0;
-}
-
-static const struct file_operations ocxl_afu_fops = {
- .owner = THIS_MODULE,
- .poll = afu_poll,
- .read = afu_read,
- .release = afu_release,
- .mmap = afu_mmap,
-};
-
-#define PATCH_FOPS(NAME) \
- do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0)
-
-/**
- * ocxlflash_get_fd() - get file descriptor for an adapter context
- * @ctx_cookie: Adapter context.
- * @fops: File operations to be associated.
- * @fd: File descriptor to be returned back.
- *
- * Return: pointer to the file on success, ERR_PTR on failure
- */
-static struct file *ocxlflash_get_fd(void *ctx_cookie,
- struct file_operations *fops, int *fd)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct device *dev = ctx->hw_afu->dev;
- struct file *file;
- int flags, fdtmp;
- int rc = 0;
- char *name = NULL;
-
- /* Only allow one fd per context */
- if (ctx->mapping) {
- dev_err(dev, "%s: Context is already mapped to an fd\n",
- __func__);
- rc = -EEXIST;
- goto err1;
- }
-
- flags = O_RDWR | O_CLOEXEC;
-
- /* This code is similar to anon_inode_getfd() */
- rc = get_unused_fd_flags(flags);
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n",
- __func__, rc);
- goto err1;
- }
- fdtmp = rc;
-
- /* Patch the file ops that are not defined */
- if (fops) {
- PATCH_FOPS(poll);
- PATCH_FOPS(read);
- PATCH_FOPS(release);
- PATCH_FOPS(mmap);
- } else /* Use default ops */
- fops = (struct file_operations *)&ocxl_afu_fops;
-
- name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe);
- file = ocxlflash_getfile(dev, name, fops, ctx, flags);
- kfree(name);
- if (IS_ERR(file)) {
- rc = PTR_ERR(file);
- dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n",
- __func__, rc);
- goto err2;
- }
-
- ctx->mapping = file->f_mapping;
- *fd = fdtmp;
-out:
- return file;
-err2:
- put_unused_fd(fdtmp);
-err1:
- file = ERR_PTR(rc);
- goto out;
-}
-
-/**
- * ocxlflash_fops_get_context() - get the context associated with the file
- * @file: File associated with the adapter context.
- *
- * Return: pointer to the context
- */
-static void *ocxlflash_fops_get_context(struct file *file)
-{
- return file->private_data;
-}
-
-/**
- * ocxlflash_afu_irq() - interrupt handler for user contexts
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the context.
- *
- * Return: Always return IRQ_HANDLED.
- */
-static irqreturn_t ocxlflash_afu_irq(int irq, void *data)
-{
- struct ocxlflash_context *ctx = data;
- struct device *dev = ctx->hw_afu->dev;
- int i;
-
- dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n",
- __func__, ctx->pe, irq);
-
- for (i = 0; i < ctx->num_irqs; i++) {
- if (ctx->irqs[i].virq == irq)
- break;
- }
- if (unlikely(i >= ctx->num_irqs)) {
- dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__);
- goto out;
- }
-
- spin_lock(&ctx->slock);
- set_bit(i - 1, &ctx->irq_bitmap);
- ctx->pending_irq = true;
- spin_unlock(&ctx->slock);
-
- wake_up_all(&ctx->wq);
-out:
- return IRQ_HANDLED;
-}
-
-/**
- * ocxlflash_start_work() - start a user context
- * @ctx_cookie: Context to be started.
- * @num_irqs: Number of interrupts requested.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- char *name;
- int rc = 0;
- int i;
-
- rc = alloc_afu_irqs(ctx, num_irqs);
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc);
- goto out;
- }
-
- for (i = 0; i < num_irqs; i++) {
- name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i",
- dev_name(dev), ctx->pe, i);
- rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name);
- kfree(name);
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: afu_map_irq failed rc=%d\n",
- __func__, rc);
- goto err;
- }
- }
-
- rc = start_context(ctx);
- if (unlikely(rc)) {
- dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc);
- goto err;
- }
-out:
- return rc;
-err:
- for (i = i-1; i >= 0; i--)
- afu_unmap_irq(0, ctx, i, ctx);
- free_afu_irqs(ctx);
- goto out;
-};
-
-/**
- * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor
- * @file: File installed with adapter file descriptor.
- * @vma: VM area associated with mapping.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma)
-{
- return afu_mmap(file, vma);
-}
-
-/**
- * ocxlflash_fd_release() - release the context associated with the file
- * @inode: File inode pointer.
- * @file: File associated with the adapter context.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_fd_release(struct inode *inode, struct file *file)
-{
- return afu_release(inode, file);
-}
-
-/* Backend ops to ocxlflash services */
-const struct cxlflash_backend_ops cxlflash_ocxl_ops = {
- .module = THIS_MODULE,
- .psa_map = ocxlflash_psa_map,
- .psa_unmap = ocxlflash_psa_unmap,
- .process_element = ocxlflash_process_element,
- .map_afu_irq = ocxlflash_map_afu_irq,
- .unmap_afu_irq = ocxlflash_unmap_afu_irq,
- .get_irq_objhndl = ocxlflash_get_irq_objhndl,
- .start_context = ocxlflash_start_context,
- .stop_context = ocxlflash_stop_context,
- .afu_reset = ocxlflash_afu_reset,
- .set_master = ocxlflash_set_master,
- .get_context = ocxlflash_get_context,
- .dev_context_init = ocxlflash_dev_context_init,
- .release_context = ocxlflash_release_context,
- .perst_reloads_same_image = ocxlflash_perst_reloads_same_image,
- .read_adapter_vpd = ocxlflash_read_adapter_vpd,
- .allocate_afu_irqs = ocxlflash_allocate_afu_irqs,
- .free_afu_irqs = ocxlflash_free_afu_irqs,
- .create_afu = ocxlflash_create_afu,
- .destroy_afu = ocxlflash_destroy_afu,
- .get_fd = ocxlflash_get_fd,
- .fops_get_context = ocxlflash_fops_get_context,
- .start_work = ocxlflash_start_work,
- .fd_mmap = ocxlflash_fd_mmap,
- .fd_release = ocxlflash_fd_release,
-};
diff --git a/drivers/scsi/cxlflash/ocxl_hw.h b/drivers/scsi/cxlflash/ocxl_hw.h
deleted file mode 100644
index f2fe88816bea..000000000000
--- a/drivers/scsi/cxlflash/ocxl_hw.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2018 IBM Corporation
- */
-
-#define OCXL_MAX_IRQS 4 /* Max interrupts per process */
-
-struct ocxlflash_irqs {
- int hwirq;
- u32 virq;
- void __iomem *vtrig;
-};
-
-/* OCXL hardware AFU associated with the host */
-struct ocxl_hw_afu {
- struct ocxlflash_context *ocxl_ctx; /* Host context */
- struct pci_dev *pdev; /* PCI device */
- struct device *dev; /* Generic device */
- bool perst_same_image; /* Same image loaded on perst */
-
- struct ocxl_fn_config fcfg; /* DVSEC config of the function */
- struct ocxl_afu_config acfg; /* AFU configuration data */
-
- int fn_actag_base; /* Function acTag base */
- int fn_actag_enabled; /* Function acTag number enabled */
- int afu_actag_base; /* AFU acTag base */
- int afu_actag_enabled; /* AFU acTag number enabled */
-
- phys_addr_t ppmmio_phys; /* Per process MMIO space */
- phys_addr_t gmmio_phys; /* Global AFU MMIO space */
- void __iomem *gmmio_virt; /* Global MMIO map */
-
- void *link_token; /* Link token for the SPA */
- struct idr idr; /* IDR to manage contexts */
- int max_pasid; /* Maximum number of contexts */
- bool is_present; /* Function has AFUs defined */
-};
-
-enum ocxlflash_ctx_state {
- CLOSED,
- OPENED,
- STARTED
-};
-
-struct ocxlflash_context {
- struct ocxl_hw_afu *hw_afu; /* HW AFU back pointer */
- struct address_space *mapping; /* Mapping for pseudo filesystem */
- bool master; /* Whether this is a master context */
- int pe; /* Process element */
-
- phys_addr_t psn_phys; /* Process mapping */
- u64 psn_size; /* Process mapping size */
-
- spinlock_t slock; /* Protects irq/fault/event updates */
- wait_queue_head_t wq; /* Wait queue for poll and interrupts */
- struct mutex state_mutex; /* Mutex to update context state */
- enum ocxlflash_ctx_state state; /* Context state */
-
- struct ocxlflash_irqs *irqs; /* Pointer to array of structures */
- int num_irqs; /* Number of interrupts */
- bool pending_irq; /* Pending interrupt on the context */
- ulong irq_bitmap; /* Bits indicating pending irq num */
-
- u64 fault_addr; /* Address that triggered the fault */
- u64 fault_dsisr; /* Value of dsisr register at fault */
- bool pending_fault; /* Pending translation fault */
-};
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
deleted file mode 100644
index ab315c59505b..000000000000
--- a/drivers/scsi/cxlflash/sislite.h
+++ /dev/null
@@ -1,560 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _SISLITE_H
-#define _SISLITE_H
-
-#include <linux/types.h>
-
-typedef u16 ctx_hndl_t;
-typedef u32 res_hndl_t;
-
-#define SIZE_4K 4096
-#define SIZE_64K 65536
-
-/*
- * IOARCB: 64 bytes, min 16 byte alignment required, host native endianness
- * except for SCSI CDB which remains big endian per SCSI standards.
- */
-struct sisl_ioarcb {
- u16 ctx_id; /* ctx_hndl_t */
- u16 req_flags;
-#define SISL_REQ_FLAGS_RES_HNDL 0x8000U /* bit 0 (MSB) */
-#define SISL_REQ_FLAGS_PORT_LUN_ID 0x0000U
-
-#define SISL_REQ_FLAGS_SUP_UNDERRUN 0x4000U /* bit 1 */
-
-#define SISL_REQ_FLAGS_TIMEOUT_SECS 0x0000U /* bits 8,9 */
-#define SISL_REQ_FLAGS_TIMEOUT_MSECS 0x0040U
-#define SISL_REQ_FLAGS_TIMEOUT_USECS 0x0080U
-#define SISL_REQ_FLAGS_TIMEOUT_CYCLES 0x00C0U
-
-#define SISL_REQ_FLAGS_TMF_CMD 0x0004u /* bit 13 */
-
-#define SISL_REQ_FLAGS_AFU_CMD 0x0002U /* bit 14 */
-
-#define SISL_REQ_FLAGS_HOST_WRITE 0x0001U /* bit 15 (LSB) */
-#define SISL_REQ_FLAGS_HOST_READ 0x0000U
-
- union {
- u32 res_hndl; /* res_hndl_t */
- u32 port_sel; /* this is a selection mask:
- * 0x1 -> port#0 can be selected,
- * 0x2 -> port#1 can be selected.
- * Can be bitwise ORed.
- */
- };
- u64 lun_id;
- u32 data_len; /* 4K for read/write */
- u32 ioadl_len;
- union {
- u64 data_ea; /* min 16 byte aligned */
- u64 ioadl_ea;
- };
- u8 msi; /* LISN to send on RRQ write */
-#define SISL_MSI_CXL_PFAULT 0 /* reserved for CXL page faults */
-#define SISL_MSI_SYNC_ERROR 1 /* recommended for AFU sync error */
-#define SISL_MSI_RRQ_UPDATED 2 /* recommended for IO completion */
-#define SISL_MSI_ASYNC_ERROR 3 /* master only - for AFU async error */
-
- u8 rrq; /* 0 for a single RRQ */
- u16 timeout; /* in units specified by req_flags */
- u32 rsvd1;
- u8 cdb[16]; /* must be in big endian */
-#define SISL_AFU_CMD_SYNC 0xC0 /* AFU sync command */
-#define SISL_AFU_CMD_LUN_PROVISION 0xD0 /* AFU LUN provision command */
-#define SISL_AFU_CMD_DEBUG 0xE0 /* AFU debug command */
-
-#define SISL_AFU_LUN_PROVISION_CREATE 0x00 /* LUN provision create type */
-#define SISL_AFU_LUN_PROVISION_DELETE 0x01 /* LUN provision delete type */
-
- union {
- u64 reserved; /* Reserved for IOARRIN mode */
- struct sisl_ioasa *ioasa; /* IOASA EA for SQ Mode */
- };
-} __packed;
-
-struct sisl_rc {
- u8 flags;
-#define SISL_RC_FLAGS_SENSE_VALID 0x80U
-#define SISL_RC_FLAGS_FCP_RSP_CODE_VALID 0x40U
-#define SISL_RC_FLAGS_OVERRUN 0x20U
-#define SISL_RC_FLAGS_UNDERRUN 0x10U
-
- u8 afu_rc;
-#define SISL_AFU_RC_RHT_INVALID 0x01U /* user error */
-#define SISL_AFU_RC_RHT_UNALIGNED 0x02U /* should never happen */
-#define SISL_AFU_RC_RHT_OUT_OF_BOUNDS 0x03u /* user error */
-#define SISL_AFU_RC_RHT_DMA_ERR 0x04u /* see afu_extra
- * may retry if afu_retry is off
- * possible on master exit
- */
-#define SISL_AFU_RC_RHT_RW_PERM 0x05u /* no RW perms, user error */
-#define SISL_AFU_RC_LXT_UNALIGNED 0x12U /* should never happen */
-#define SISL_AFU_RC_LXT_OUT_OF_BOUNDS 0x13u /* user error */
-#define SISL_AFU_RC_LXT_DMA_ERR 0x14u /* see afu_extra
- * may retry if afu_retry is off
- * possible on master exit
- */
-#define SISL_AFU_RC_LXT_RW_PERM 0x15u /* no RW perms, user error */
-
-#define SISL_AFU_RC_NOT_XLATE_HOST 0x1au /* possible if master exited */
-
- /* NO_CHANNELS means the FC ports selected by dest_port in
- * IOARCB or in the LXT entry are down when the AFU tried to select
- * a FC port. If the port went down on an active IO, it will set
- * fc_rc to =0x54(NOLOGI) or 0x57(LINKDOWN) instead.
- */
-#define SISL_AFU_RC_NO_CHANNELS 0x20U /* see afu_extra, may retry */
-#define SISL_AFU_RC_CAP_VIOLATION 0x21U /* either user error or
- * afu reset/master restart
- */
-#define SISL_AFU_RC_OUT_OF_DATA_BUFS 0x30U /* always retry */
-#define SISL_AFU_RC_DATA_DMA_ERR 0x31U /* see afu_extra
- * may retry if afu_retry is off
- */
-
- u8 scsi_rc; /* SCSI status byte, retry as appropriate */
-#define SISL_SCSI_RC_CHECK 0x02U
-#define SISL_SCSI_RC_BUSY 0x08u
-
- u8 fc_rc; /* retry */
- /*
- * We should only see fc_rc=0x57 (LINKDOWN) or 0x54(NOLOGI) for
- * commands that are in flight when a link goes down or is logged out.
- * If the link is down or logged out before AFU selects the port, either
- * it will choose the other port or we will get afu_rc=0x20 (no_channel)
- * if there is no valid port to use.
- *
- * ABORTPEND/ABORTOK/ABORTFAIL/TGTABORT can be retried, typically these
- * would happen if a frame is dropped and something times out.
- * NOLOGI or LINKDOWN can be retried if the other port is up.
- * RESIDERR can be retried as well.
- *
- * ABORTFAIL might indicate that lots of frames are getting CRC errors.
- * So it maybe retried once and reset the link if it happens again.
- * The link can also be reset on the CRC error threshold interrupt.
- */
-#define SISL_FC_RC_ABORTPEND 0x52 /* exchange timeout or abort request */
-#define SISL_FC_RC_WRABORTPEND 0x53 /* due to write XFER_RDY invalid */
-#define SISL_FC_RC_NOLOGI 0x54 /* port not logged in, in-flight cmds */
-#define SISL_FC_RC_NOEXP 0x55 /* FC protocol error or HW bug */
-#define SISL_FC_RC_INUSE 0x56 /* tag already in use, HW bug */
-#define SISL_FC_RC_LINKDOWN 0x57 /* link down, in-flight cmds */
-#define SISL_FC_RC_ABORTOK 0x58 /* pending abort completed w/success */
-#define SISL_FC_RC_ABORTFAIL 0x59 /* pending abort completed w/fail */
-#define SISL_FC_RC_RESID 0x5A /* ioasa underrun/overrun flags set */
-#define SISL_FC_RC_RESIDERR 0x5B /* actual data len does not match SCSI
- * reported len, possibly due to dropped
- * frames
- */
-#define SISL_FC_RC_TGTABORT 0x5C /* command aborted by target */
-};
-
-#define SISL_SENSE_DATA_LEN 20 /* Sense data length */
-#define SISL_WWID_DATA_LEN 16 /* WWID data length */
-
-/*
- * IOASA: 64 bytes & must follow IOARCB, min 16 byte alignment required,
- * host native endianness
- */
-struct sisl_ioasa {
- union {
- struct sisl_rc rc;
- u32 ioasc;
-#define SISL_IOASC_GOOD_COMPLETION 0x00000000U
- };
-
- union {
- u32 resid;
- u32 lunid_hi;
- };
-
- u8 port;
- u8 afu_extra;
- /* when afu_rc=0x04, 0x14, 0x31 (_xxx_DMA_ERR):
- * afu_exta contains PSL response code. Useful codes are:
- */
-#define SISL_AFU_DMA_ERR_PAGE_IN 0x0A /* AFU_retry_on_pagein Action
- * Enabled N/A
- * Disabled retry
- */
-#define SISL_AFU_DMA_ERR_INVALID_EA 0x0B /* this is a hard error
- * afu_rc Implies
- * 0x04, 0x14 master exit.
- * 0x31 user error.
- */
- /* when afu rc=0x20 (no channels):
- * afu_extra bits [4:5]: available portmask, [6:7]: requested portmask.
- */
-#define SISL_AFU_NO_CLANNELS_AMASK(afu_extra) (((afu_extra) & 0x0C) >> 2)
-#define SISL_AFU_NO_CLANNELS_RMASK(afu_extra) ((afu_extra) & 0x03)
-
- u8 scsi_extra;
- u8 fc_extra;
-
- union {
- u8 sense_data[SISL_SENSE_DATA_LEN];
- struct {
- u32 lunid_lo;
- u8 wwid[SISL_WWID_DATA_LEN];
- };
- };
-
- /* These fields are defined by the SISlite architecture for the
- * host to use as they see fit for their implementation.
- */
- union {
- u64 host_use[4];
- u8 host_use_b[32];
- };
-} __packed;
-
-#define SISL_RESP_HANDLE_T_BIT 0x1ULL /* Toggle bit */
-
-/* MMIO space is required to support only 64-bit access */
-
-/*
- * This AFU has two mechanisms to deal with endian-ness.
- * One is a global configuration (in the afu_config) register
- * below that specifies the endian-ness of the host.
- * The other is a per context (i.e. application) specification
- * controlled by the endian_ctrl field here. Since the master
- * context is one such application the master context's
- * endian-ness is set to be the same as the host.
- *
- * As per the SISlite spec, the MMIO registers are always
- * big endian.
- */
-#define SISL_ENDIAN_CTRL_BE 0x8000000000000080ULL
-#define SISL_ENDIAN_CTRL_LE 0x0000000000000000ULL
-
-#ifdef __BIG_ENDIAN
-#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_BE
-#else
-#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_LE
-#endif
-
-/* per context host transport MMIO */
-struct sisl_host_map {
- __be64 endian_ctrl; /* Per context Endian Control. The AFU will
- * operate on whatever the context is of the
- * host application.
- */
-
- __be64 intr_status; /* this sends LISN# programmed in ctx_ctrl.
- * Only recovery in a PERM_ERR is a context
- * exit since there is no way to tell which
- * command caused the error.
- */
-#define SISL_ISTATUS_PERM_ERR_LISN_3_EA 0x0400ULL /* b53, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_2_EA 0x0200ULL /* b54, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_1_EA 0x0100ULL /* b55, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_3_PASID 0x0080ULL /* b56, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_2_PASID 0x0040ULL /* b57, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_1_PASID 0x0020ULL /* b58, user error */
-#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ULL /* b59, user error */
-#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ULL /* b60, user error */
-#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ULL /* b61, user error */
-#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ULL /* b62, user error */
- /* Page in wait accessing RCB/IOASA/RRQ is reported in b63.
- * Same error in data/LXT/RHT access is reported via IOASA.
- */
-#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ULL /* b63, can only be
- * generated when AFU
- * auto retry is
- * disabled. If user
- * can determine the
- * command that caused
- * the error, it can
- * be retried.
- */
-#define SISL_ISTATUS_UNMASK (0x07FFULL) /* 1 means unmasked */
-#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */
-
- __be64 intr_clear;
- __be64 intr_mask;
- __be64 ioarrin; /* only write what cmd_room permits */
- __be64 rrq_start; /* start & end are both inclusive */
- __be64 rrq_end; /* write sequence: start followed by end */
- __be64 cmd_room;
- __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */
-#define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */
-#define SISL_CTX_CTRL_LISN_MASK (0xFFULL)
- __be64 mbox_w; /* restricted use */
- __be64 sq_start; /* Submission Queue (R/W): write sequence and */
- __be64 sq_end; /* inclusion semantics are the same as RRQ */
- __be64 sq_head; /* Submission Queue Head (R): for debugging */
- __be64 sq_tail; /* Submission Queue TAIL (R/W): next IOARCB */
- __be64 sq_ctx_reset; /* Submission Queue Context Reset (R/W) */
-};
-
-/* per context provisioning & control MMIO */
-struct sisl_ctrl_map {
- __be64 rht_start;
- __be64 rht_cnt_id;
- /* both cnt & ctx_id args must be ULL */
-#define SISL_RHT_CNT_ID(cnt, ctx_id) (((cnt) << 48) | ((ctx_id) << 32))
-
- __be64 ctx_cap; /* afu_rc below is when the capability is violated */
-#define SISL_CTX_CAP_PROXY_ISSUE 0x8000000000000000ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_REAL_MODE 0x4000000000000000ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_HOST_XLATE 0x2000000000000000ULL /* afu_rc 0x1a */
-#define SISL_CTX_CAP_PROXY_TARGET 0x1000000000000000ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_AFU_CMD 0x0000000000000008ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_GSCSI_CMD 0x0000000000000004ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_WRITE_CMD 0x0000000000000002ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_READ_CMD 0x0000000000000001ULL /* afu_rc 0x21 */
- __be64 mbox_r;
- __be64 lisn_pasid[2];
- /* pasid _a arg must be ULL */
-#define SISL_LISN_PASID(_a, _b) (((_a) << 32) | (_b))
- __be64 lisn_ea[3];
-};
-
-/* single copy global regs */
-struct sisl_global_regs {
- __be64 aintr_status;
- /*
- * In cxlflash, FC port/link are arranged in port pairs, each
- * gets a byte of status:
- *
- * *_OTHER: other err, FC_ERRCAP[31:20]
- * *_LOGO: target sent FLOGI/PLOGI/LOGO while logged in
- * *_CRC_T: CRC threshold exceeded
- * *_LOGI_R: login state machine timed out and retrying
- * *_LOGI_F: login failed, FC_ERROR[19:0]
- * *_LOGI_S: login succeeded
- * *_LINK_DN: link online to offline
- * *_LINK_UP: link offline to online
- */
-#define SISL_ASTATUS_FC2_OTHER 0x80000000ULL /* b32 */
-#define SISL_ASTATUS_FC2_LOGO 0x40000000ULL /* b33 */
-#define SISL_ASTATUS_FC2_CRC_T 0x20000000ULL /* b34 */
-#define SISL_ASTATUS_FC2_LOGI_R 0x10000000ULL /* b35 */
-#define SISL_ASTATUS_FC2_LOGI_F 0x08000000ULL /* b36 */
-#define SISL_ASTATUS_FC2_LOGI_S 0x04000000ULL /* b37 */
-#define SISL_ASTATUS_FC2_LINK_DN 0x02000000ULL /* b38 */
-#define SISL_ASTATUS_FC2_LINK_UP 0x01000000ULL /* b39 */
-
-#define SISL_ASTATUS_FC3_OTHER 0x00800000ULL /* b40 */
-#define SISL_ASTATUS_FC3_LOGO 0x00400000ULL /* b41 */
-#define SISL_ASTATUS_FC3_CRC_T 0x00200000ULL /* b42 */
-#define SISL_ASTATUS_FC3_LOGI_R 0x00100000ULL /* b43 */
-#define SISL_ASTATUS_FC3_LOGI_F 0x00080000ULL /* b44 */
-#define SISL_ASTATUS_FC3_LOGI_S 0x00040000ULL /* b45 */
-#define SISL_ASTATUS_FC3_LINK_DN 0x00020000ULL /* b46 */
-#define SISL_ASTATUS_FC3_LINK_UP 0x00010000ULL /* b47 */
-
-#define SISL_ASTATUS_FC0_OTHER 0x00008000ULL /* b48 */
-#define SISL_ASTATUS_FC0_LOGO 0x00004000ULL /* b49 */
-#define SISL_ASTATUS_FC0_CRC_T 0x00002000ULL /* b50 */
-#define SISL_ASTATUS_FC0_LOGI_R 0x00001000ULL /* b51 */
-#define SISL_ASTATUS_FC0_LOGI_F 0x00000800ULL /* b52 */
-#define SISL_ASTATUS_FC0_LOGI_S 0x00000400ULL /* b53 */
-#define SISL_ASTATUS_FC0_LINK_DN 0x00000200ULL /* b54 */
-#define SISL_ASTATUS_FC0_LINK_UP 0x00000100ULL /* b55 */
-
-#define SISL_ASTATUS_FC1_OTHER 0x00000080ULL /* b56 */
-#define SISL_ASTATUS_FC1_LOGO 0x00000040ULL /* b57 */
-#define SISL_ASTATUS_FC1_CRC_T 0x00000020ULL /* b58 */
-#define SISL_ASTATUS_FC1_LOGI_R 0x00000010ULL /* b59 */
-#define SISL_ASTATUS_FC1_LOGI_F 0x00000008ULL /* b60 */
-#define SISL_ASTATUS_FC1_LOGI_S 0x00000004ULL /* b61 */
-#define SISL_ASTATUS_FC1_LINK_DN 0x00000002ULL /* b62 */
-#define SISL_ASTATUS_FC1_LINK_UP 0x00000001ULL /* b63 */
-
-#define SISL_FC_INTERNAL_UNMASK 0x0000000300000000ULL /* 1 means unmasked */
-#define SISL_FC_INTERNAL_MASK ~(SISL_FC_INTERNAL_UNMASK)
-#define SISL_FC_INTERNAL_SHIFT 32
-
-#define SISL_FC_SHUTDOWN_NORMAL 0x0000000000000010ULL
-#define SISL_FC_SHUTDOWN_ABRUPT 0x0000000000000020ULL
-
-#define SISL_STATUS_SHUTDOWN_ACTIVE 0x0000000000000010ULL
-#define SISL_STATUS_SHUTDOWN_COMPLETE 0x0000000000000020ULL
-
-#define SISL_ASTATUS_UNMASK 0xFFFFFFFFULL /* 1 means unmasked */
-#define SISL_ASTATUS_MASK ~(SISL_ASTATUS_UNMASK) /* 1 means masked */
-
- __be64 aintr_clear;
- __be64 aintr_mask;
- __be64 afu_ctrl;
- __be64 afu_hb;
- __be64 afu_scratch_pad;
- __be64 afu_port_sel;
-#define SISL_AFUCONF_AR_IOARCB 0x4000ULL
-#define SISL_AFUCONF_AR_LXT 0x2000ULL
-#define SISL_AFUCONF_AR_RHT 0x1000ULL
-#define SISL_AFUCONF_AR_DATA 0x0800ULL
-#define SISL_AFUCONF_AR_RSRC 0x0400ULL
-#define SISL_AFUCONF_AR_IOASA 0x0200ULL
-#define SISL_AFUCONF_AR_RRQ 0x0100ULL
-/* Aggregate all Auto Retry Bits */
-#define SISL_AFUCONF_AR_ALL (SISL_AFUCONF_AR_IOARCB|SISL_AFUCONF_AR_LXT| \
- SISL_AFUCONF_AR_RHT|SISL_AFUCONF_AR_DATA| \
- SISL_AFUCONF_AR_RSRC|SISL_AFUCONF_AR_IOASA| \
- SISL_AFUCONF_AR_RRQ)
-#ifdef __BIG_ENDIAN
-#define SISL_AFUCONF_ENDIAN 0x0000ULL
-#else
-#define SISL_AFUCONF_ENDIAN 0x0020ULL
-#endif
-#define SISL_AFUCONF_MBOX_CLR_READ 0x0010ULL
- __be64 afu_config;
- __be64 rsvd[0xf8];
- __le64 afu_version;
- __be64 interface_version;
-#define SISL_INTVER_CAP_SHIFT 16
-#define SISL_INTVER_MAJ_SHIFT 8
-#define SISL_INTVER_CAP_MASK 0xFFFFFFFF00000000ULL
-#define SISL_INTVER_MAJ_MASK 0x00000000FFFF0000ULL
-#define SISL_INTVER_MIN_MASK 0x000000000000FFFFULL
-#define SISL_INTVER_CAP_IOARRIN_CMD_MODE 0x800000000000ULL
-#define SISL_INTVER_CAP_SQ_CMD_MODE 0x400000000000ULL
-#define SISL_INTVER_CAP_RESERVED_CMD_MODE_A 0x200000000000ULL
-#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL
-#define SISL_INTVER_CAP_LUN_PROVISION 0x080000000000ULL
-#define SISL_INTVER_CAP_AFU_DEBUG 0x040000000000ULL
-#define SISL_INTVER_CAP_OCXL_LISN 0x020000000000ULL
-};
-
-#define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */
-#define CXLFLASH_MAX_FC_BANKS 2 /* max # of banks supported */
-#define CXLFLASH_MAX_FC_PORTS (CXLFLASH_NUM_FC_PORTS_PER_BANK * \
- CXLFLASH_MAX_FC_BANKS)
-#define CXLFLASH_MAX_CONTEXT 512 /* number of contexts per AFU */
-#define CXLFLASH_NUM_VLUNS 512 /* number of vluns per AFU/port */
-#define CXLFLASH_NUM_REGS 512 /* number of registers per port */
-
-struct fc_port_bank {
- __be64 fc_port_regs[CXLFLASH_NUM_FC_PORTS_PER_BANK][CXLFLASH_NUM_REGS];
- __be64 fc_port_luns[CXLFLASH_NUM_FC_PORTS_PER_BANK][CXLFLASH_NUM_VLUNS];
-};
-
-struct sisl_global_map {
- union {
- struct sisl_global_regs regs;
- char page0[SIZE_4K]; /* page 0 */
- };
-
- char page1[SIZE_4K]; /* page 1 */
-
- struct fc_port_bank bank[CXLFLASH_MAX_FC_BANKS]; /* pages 2 - 9 */
-
- /* pages 10 - 15 are reserved */
-
-};
-
-/*
- * CXL Flash Memory Map
- *
- * +-------------------------------+
- * | 512 * 64 KB User MMIO |
- * | (per context) |
- * | User Accessible |
- * +-------------------------------+
- * | 512 * 128 B per context |
- * | Provisioning and Control |
- * | Trusted Process accessible |
- * +-------------------------------+
- * | 64 KB Global |
- * | Trusted Process accessible |
- * +-------------------------------+
- */
-struct cxlflash_afu_map {
- union {
- struct sisl_host_map host;
- char harea[SIZE_64K]; /* 64KB each */
- } hosts[CXLFLASH_MAX_CONTEXT];
-
- union {
- struct sisl_ctrl_map ctrl;
- char carea[cache_line_size()]; /* 128B each */
- } ctrls[CXLFLASH_MAX_CONTEXT];
-
- union {
- struct sisl_global_map global;
- char garea[SIZE_64K]; /* 64KB single block */
- };
-};
-
-/*
- * LXT - LBA Translation Table
- * LXT control blocks
- */
-struct sisl_lxt_entry {
- u64 rlba_base; /* bits 0:47 is base
- * b48:55 is lun index
- * b58:59 is write & read perms
- * (if no perm, afu_rc=0x15)
- * b60:63 is port_sel mask
- */
-};
-
-/*
- * RHT - Resource Handle Table
- * Per the SISlite spec, RHT entries are to be 16-byte aligned
- */
-struct sisl_rht_entry {
- struct sisl_lxt_entry *lxt_start;
- u32 lxt_cnt;
- u16 rsvd;
- u8 fp; /* format & perm nibbles.
- * (if no perm, afu_rc=0x05)
- */
- u8 nmask;
-} __packed __aligned(16);
-
-struct sisl_rht_entry_f1 {
- u64 lun_id;
- union {
- struct {
- u8 valid;
- u8 rsvd[5];
- u8 fp;
- u8 port_sel;
- };
-
- u64 dw;
- };
-} __packed __aligned(16);
-
-/* make the fp byte */
-#define SISL_RHT_FP(fmt, perm) (((fmt) << 4) | (perm))
-
-/* make the fp byte for a clone from a source fp and clone flags
- * flags must be only 2 LSB bits.
- */
-#define SISL_RHT_FP_CLONE(src_fp, cln_flags) ((src_fp) & (0xFC | (cln_flags)))
-
-#define RHT_PERM_READ 0x01U
-#define RHT_PERM_WRITE 0x02U
-#define RHT_PERM_RW (RHT_PERM_READ | RHT_PERM_WRITE)
-
-/* extract the perm bits from a fp */
-#define SISL_RHT_PERM(fp) ((fp) & RHT_PERM_RW)
-
-#define PORT0 0x01U
-#define PORT1 0x02U
-#define PORT2 0x04U
-#define PORT3 0x08U
-#define PORT_MASK(_n) ((1 << (_n)) - 1)
-
-/* AFU Sync Mode byte */
-#define AFU_LW_SYNC 0x0U
-#define AFU_HW_SYNC 0x1U
-#define AFU_GSYNC 0x2U
-
-/* Special Task Management Function CDB */
-#define TMF_LUN_RESET 0x1U
-#define TMF_CLEAR_ACA 0x2U
-
-#endif /* _SISLITE_H */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
deleted file mode 100644
index 97631f48e19d..000000000000
--- a/drivers/scsi/cxlflash/superpipe.c
+++ /dev/null
@@ -1,2218 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#include <linux/delay.h>
-#include <linux/file.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/syscalls.h>
-#include <linux/unaligned.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_eh.h>
-#include <uapi/scsi/cxlflash_ioctl.h>
-
-#include "sislite.h"
-#include "common.h"
-#include "vlun.h"
-#include "superpipe.h"
-
-struct cxlflash_global global;
-
-/**
- * marshal_rele_to_resize() - translate release to resize structure
- * @release: Source structure from which to translate/copy.
- * @resize: Destination structure for the translate/copy.
- */
-static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
- struct dk_cxlflash_resize *resize)
-{
- resize->hdr = release->hdr;
- resize->context_id = release->context_id;
- resize->rsrc_handle = release->rsrc_handle;
-}
-
-/**
- * marshal_det_to_rele() - translate detach to release structure
- * @detach: Destination structure for the translate/copy.
- * @release: Source structure from which to translate/copy.
- */
-static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
- struct dk_cxlflash_release *release)
-{
- release->hdr = detach->hdr;
- release->context_id = detach->context_id;
-}
-
-/**
- * marshal_udir_to_rele() - translate udirect to release structure
- * @udirect: Source structure from which to translate/copy.
- * @release: Destination structure for the translate/copy.
- */
-static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect,
- struct dk_cxlflash_release *release)
-{
- release->hdr = udirect->hdr;
- release->context_id = udirect->context_id;
- release->rsrc_handle = udirect->rsrc_handle;
-}
-
-/**
- * cxlflash_free_errpage() - frees resources associated with global error page
- */
-void cxlflash_free_errpage(void)
-{
-
- mutex_lock(&global.mutex);
- if (global.err_page) {
- __free_page(global.err_page);
- global.err_page = NULL;
- }
- mutex_unlock(&global.mutex);
-}
-
-/**
- * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
- * @cfg: Internal structure associated with the host.
- *
- * When the host needs to go down, all users must be quiesced and their
- * memory freed. This is accomplished by putting the contexts in error
- * state which will notify the user and let them 'drive' the tear down.
- * Meanwhile, this routine camps until all user contexts have been removed.
- *
- * Note that the main loop in this routine will always execute at least once
- * to flush the reset_waitq.
- */
-void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int i, found = true;
-
- cxlflash_mark_contexts_error(cfg);
-
- while (true) {
- for (i = 0; i < MAX_CONTEXT; i++)
- if (cfg->ctx_tbl[i]) {
- found = true;
- break;
- }
-
- if (!found && list_empty(&cfg->ctx_err_recovery))
- return;
-
- dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
- __func__);
- wake_up_all(&cfg->reset_waitq);
- ssleep(1);
- found = false;
- }
-}
-
-/**
- * find_error_context() - locates a context by cookie on the error recovery list
- * @cfg: Internal structure associated with the host.
- * @rctxid: Desired context by id.
- * @file: Desired context by file.
- *
- * Return: Found context on success, NULL on failure
- */
-static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
- struct file *file)
-{
- struct ctx_info *ctxi;
-
- list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
- if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
- return ctxi;
-
- return NULL;
-}
-
-/**
- * get_context() - obtains a validated and locked context reference
- * @cfg: Internal structure associated with the host.
- * @rctxid: Desired context (raw, un-decoded format).
- * @arg: LUN information or file associated with request.
- * @ctx_ctrl: Control information to 'steer' desired lookup.
- *
- * NOTE: despite the name pid, in linux, current->pid actually refers
- * to the lightweight process id (tid) and can change if the process is
- * multi threaded. The tgid remains constant for the process and only changes
- * when the process of fork. For all intents and purposes, think of tgid
- * as a pid in the traditional sense.
- *
- * Return: Validated context on success, NULL on failure
- */
-struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
- void *arg, enum ctx_ctrl ctx_ctrl)
-{
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- struct lun_access *lun_access = NULL;
- struct file *file = NULL;
- struct llun_info *lli = arg;
- u64 ctxid = DECODE_CTXID(rctxid);
- int rc;
- pid_t pid = task_tgid_nr(current), ctxpid = 0;
-
- if (ctx_ctrl & CTX_CTRL_FILE) {
- lli = NULL;
- file = (struct file *)arg;
- }
-
- if (ctx_ctrl & CTX_CTRL_CLONE)
- pid = task_ppid_nr(current);
-
- if (likely(ctxid < MAX_CONTEXT)) {
- while (true) {
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- ctxi = cfg->ctx_tbl[ctxid];
- if (ctxi)
- if ((file && (ctxi->file != file)) ||
- (!file && (ctxi->ctxid != rctxid)))
- ctxi = NULL;
-
- if ((ctx_ctrl & CTX_CTRL_ERR) ||
- (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
- ctxi = find_error_context(cfg, rctxid, file);
- if (!ctxi) {
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- goto out;
- }
-
- /*
- * Need to acquire ownership of the context while still
- * under the table/list lock to serialize with a remove
- * thread. Use the 'try' to avoid stalling the
- * table/list lock for a single context.
- *
- * Note that the lock order is:
- *
- * cfg->ctx_tbl_list_mutex -> ctxi->mutex
- *
- * Therefore release ctx_tbl_list_mutex before retrying.
- */
- rc = mutex_trylock(&ctxi->mutex);
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- if (rc)
- break; /* got the context's lock! */
- }
-
- if (ctxi->unavail)
- goto denied;
-
- ctxpid = ctxi->pid;
- if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
- if (pid != ctxpid)
- goto denied;
-
- if (lli) {
- list_for_each_entry(lun_access, &ctxi->luns, list)
- if (lun_access->lli == lli)
- goto out;
- goto denied;
- }
- }
-
-out:
- dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u "
- "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
- ctx_ctrl);
-
- return ctxi;
-
-denied:
- mutex_unlock(&ctxi->mutex);
- ctxi = NULL;
- goto out;
-}
-
-/**
- * put_context() - release a context that was retrieved from get_context()
- * @ctxi: Context to release.
- *
- * For now, releasing the context equates to unlocking it's mutex.
- */
-void put_context(struct ctx_info *ctxi)
-{
- mutex_unlock(&ctxi->mutex);
-}
-
-/**
- * afu_attach() - attach a context to the AFU
- * @cfg: Internal structure associated with the host.
- * @ctxi: Context to attach.
- *
- * Upon setting the context capabilities, they must be confirmed with
- * a read back operation as the context might have been closed since
- * the mailbox was unlocked. When this occurs, registration is failed.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
-{
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
- int rc = 0;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- u64 val;
- int i;
-
- /* Unlock cap and restrict user to read/write cmds in translated mode */
- readq_be(&ctrl_map->mbox_r);
- val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
- writeq_be(val, &ctrl_map->ctx_cap);
- val = readq_be(&ctrl_map->ctx_cap);
- if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
- dev_err(dev, "%s: ctx may be closed val=%016llx\n",
- __func__, val);
- rc = -EAGAIN;
- goto out;
- }
-
- if (afu_is_ocxl_lisn(afu)) {
- /* Set up the LISN effective address for each interrupt */
- for (i = 0; i < ctxi->irqs; i++) {
- val = cfg->ops->get_irq_objhndl(ctxi->ctx, i);
- writeq_be(val, &ctrl_map->lisn_ea[i]);
- }
-
- /* Use primary HWQ PASID as identifier for all interrupts */
- val = hwq->ctx_hndl;
- writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]);
- writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]);
- }
-
- /* Set up MMIO registers pointing to the RHT */
- writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
- val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
- writeq_be(val, &ctrl_map->rht_cnt_id);
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * read_cap16() - issues a SCSI READ_CAP16 command
- * @sdev: SCSI device associated with LUN.
- * @lli: LUN destined for capacity request.
- *
- * The READ_CAP16 can take quite a while to complete. Should an EEH occur while
- * in scsi_execute_cmd(), the EEH handler will attempt to recover. As part of
- * the recovery, the handler drains all currently running ioctls, waiting until
- * they have completed before proceeding with a reset. As this routine is used
- * on the ioctl path, this can create a condition where the EEH handler becomes
- * stuck, infinitely waiting for this ioctl thread. To avoid this behavior,
- * temporarily unmark this thread as an ioctl thread by releasing the ioctl
- * read semaphore. This will allow the EEH handler to proceed with a recovery
- * while this thread is still running. Once the scsi_execute_cmd() returns,
- * reacquire the ioctl read semaphore and check the adapter state in case it
- * changed while inside of scsi_execute_cmd(). The state check will wait if the
- * adapter is still being recovered or return a failure if the recovery failed.
- * In the event that the adapter reset failed, simply return the failure as the
- * ioctl would be unable to continue.
- *
- * Note that the above puts a requirement on this routine to only be called on
- * an ioctl thread.
- *
- * Return: 0 on success, -errno on failure
- */
-static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct glun_info *gli = lli->parent;
- struct scsi_sense_hdr sshdr;
- const struct scsi_exec_args exec_args = {
- .sshdr = &sshdr,
- };
- u8 *cmd_buf = NULL;
- u8 *scsi_cmd = NULL;
- int rc = 0;
- int result = 0;
- int retry_cnt = 0;
- u32 to = CMD_TIMEOUT * HZ;
-
-retry:
- cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
- scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
- if (unlikely(!cmd_buf || !scsi_cmd)) {
- rc = -ENOMEM;
- goto out;
- }
-
- scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */
- scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */
- put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
-
- dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
- retry_cnt ? "re" : "", scsi_cmd[0]);
-
- /* Drop the ioctl read semaphore across lengthy call */
- up_read(&cfg->ioctl_rwsem);
- result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, cmd_buf,
- CMD_BUFSIZE, to, CMD_RETRIES, &exec_args);
- down_read(&cfg->ioctl_rwsem);
- rc = check_state(cfg);
- if (rc) {
- dev_err(dev, "%s: Failed state result=%08x\n",
- __func__, result);
- rc = -ENODEV;
- goto out;
- }
-
- if (result > 0 && scsi_sense_valid(&sshdr)) {
- if (result & SAM_STAT_CHECK_CONDITION) {
- switch (sshdr.sense_key) {
- case NO_SENSE:
- case RECOVERED_ERROR:
- case NOT_READY:
- result &= ~SAM_STAT_CHECK_CONDITION;
- break;
- case UNIT_ATTENTION:
- switch (sshdr.asc) {
- case 0x29: /* Power on Reset or Device Reset */
- fallthrough;
- case 0x2A: /* Device capacity changed */
- case 0x3F: /* Report LUNs changed */
- /* Retry the command once more */
- if (retry_cnt++ < 1) {
- kfree(cmd_buf);
- kfree(scsi_cmd);
- goto retry;
- }
- }
- break;
- default:
- break;
- }
- }
- }
-
- if (result) {
- dev_err(dev, "%s: command failed, result=%08x\n",
- __func__, result);
- rc = -EIO;
- goto out;
- }
-
- /*
- * Read cap was successful, grab values from the buffer;
- * note that we don't need to worry about unaligned access
- * as the buffer is allocated on an aligned boundary.
- */
- mutex_lock(&gli->mutex);
- gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
- gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
- mutex_unlock(&gli->mutex);
-
-out:
- kfree(cmd_buf);
- kfree(scsi_cmd);
-
- dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
- __func__, gli->max_lba, gli->blk_len, rc);
- return rc;
-}
-
-/**
- * get_rhte() - obtains validated resource handle table entry reference
- * @ctxi: Context owning the resource handle.
- * @rhndl: Resource handle associated with entry.
- * @lli: LUN associated with request.
- *
- * Return: Validated RHTE on success, NULL on failure
- */
-struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
- struct llun_info *lli)
-{
- struct cxlflash_cfg *cfg = ctxi->cfg;
- struct device *dev = &cfg->dev->dev;
- struct sisl_rht_entry *rhte = NULL;
-
- if (unlikely(!ctxi->rht_start)) {
- dev_dbg(dev, "%s: Context does not have allocated RHT\n",
- __func__);
- goto out;
- }
-
- if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
- dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
- __func__, rhndl);
- goto out;
- }
-
- if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
- dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n",
- __func__, rhndl);
- goto out;
- }
-
- rhte = &ctxi->rht_start[rhndl];
- if (unlikely(rhte->nmask == 0)) {
- dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n",
- __func__, rhndl);
- rhte = NULL;
- goto out;
- }
-
-out:
- return rhte;
-}
-
-/**
- * rhte_checkout() - obtains free/empty resource handle table entry
- * @ctxi: Context owning the resource handle.
- * @lli: LUN associated with request.
- *
- * Return: Free RHTE on success, NULL on failure
- */
-struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
- struct llun_info *lli)
-{
- struct cxlflash_cfg *cfg = ctxi->cfg;
- struct device *dev = &cfg->dev->dev;
- struct sisl_rht_entry *rhte = NULL;
- int i;
-
- /* Find a free RHT entry */
- for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
- if (ctxi->rht_start[i].nmask == 0) {
- rhte = &ctxi->rht_start[i];
- ctxi->rht_out++;
- break;
- }
-
- if (likely(rhte))
- ctxi->rht_lun[i] = lli;
-
- dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i);
- return rhte;
-}
-
-/**
- * rhte_checkin() - releases a resource handle table entry
- * @ctxi: Context owning the resource handle.
- * @rhte: RHTE to release.
- */
-void rhte_checkin(struct ctx_info *ctxi,
- struct sisl_rht_entry *rhte)
-{
- u32 rsrc_handle = rhte - ctxi->rht_start;
-
- rhte->nmask = 0;
- rhte->fp = 0;
- ctxi->rht_out--;
- ctxi->rht_lun[rsrc_handle] = NULL;
- ctxi->rht_needs_ws[rsrc_handle] = false;
-}
-
-/**
- * rht_format1() - populates a RHTE for format 1
- * @rhte: RHTE to populate.
- * @lun_id: LUN ID of LUN associated with RHTE.
- * @perm: Desired permissions for RHTE.
- * @port_sel: Port selection mask
- */
-static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
- u32 port_sel)
-{
- /*
- * Populate the Format 1 RHT entry for direct access (physical
- * LUN) using the synchronization sequence defined in the
- * SISLite specification.
- */
- struct sisl_rht_entry_f1 dummy = { 0 };
- struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
-
- memset(rhte_f1, 0, sizeof(*rhte_f1));
- rhte_f1->fp = SISL_RHT_FP(1U, 0);
- dma_wmb(); /* Make setting of format bit visible */
-
- rhte_f1->lun_id = lun_id;
- dma_wmb(); /* Make setting of LUN id visible */
-
- /*
- * Use a dummy RHT Format 1 entry to build the second dword
- * of the entry that must be populated in a single write when
- * enabled (valid bit set to TRUE).
- */
- dummy.valid = 0x80;
- dummy.fp = SISL_RHT_FP(1U, perm);
- dummy.port_sel = port_sel;
- rhte_f1->dw = dummy.dw;
-
- dma_wmb(); /* Make remaining RHT entry fields visible */
-}
-
-/**
- * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
- * @gli: LUN to attach.
- * @mode: Desired mode of the LUN.
- * @locked: Mutex status on current thread.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
-{
- int rc = 0;
-
- if (!locked)
- mutex_lock(&gli->mutex);
-
- if (gli->mode == MODE_NONE)
- gli->mode = mode;
- else if (gli->mode != mode) {
- pr_debug("%s: gli_mode=%d requested_mode=%d\n",
- __func__, gli->mode, mode);
- rc = -EINVAL;
- goto out;
- }
-
- gli->users++;
- WARN_ON(gli->users <= 0);
-out:
- pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
- __func__, rc, gli->mode, gli->users);
- if (!locked)
- mutex_unlock(&gli->mutex);
- return rc;
-}
-
-/**
- * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
- * @gli: LUN to detach.
- *
- * When resetting the mode, terminate block allocation resources as they
- * are no longer required (service is safe to call even when block allocation
- * resources were not present - such as when transitioning from physical mode).
- * These resources will be reallocated when needed (subsequent transition to
- * virtual mode).
- */
-void cxlflash_lun_detach(struct glun_info *gli)
-{
- mutex_lock(&gli->mutex);
- WARN_ON(gli->mode == MODE_NONE);
- if (--gli->users == 0) {
- gli->mode = MODE_NONE;
- cxlflash_ba_terminate(&gli->blka.ba_lun);
- }
- pr_debug("%s: gli->users=%u\n", __func__, gli->users);
- WARN_ON(gli->users < 0);
- mutex_unlock(&gli->mutex);
-}
-
-/**
- * _cxlflash_disk_release() - releases the specified resource entry
- * @sdev: SCSI device associated with LUN.
- * @ctxi: Context owning resources.
- * @release: Release ioctl data structure.
- *
- * For LUNs in virtual mode, the virtual LUN associated with the specified
- * resource handle is resized to 0 prior to releasing the RHTE. Note that the
- * AFU sync should _not_ be performed when the context is sitting on the error
- * recovery list. A context on the error recovery list is not known to the AFU
- * due to reset. When the context is recovered, it will be reattached and made
- * known again to the AFU.
- *
- * Return: 0 on success, -errno on failure
- */
-int _cxlflash_disk_release(struct scsi_device *sdev,
- struct ctx_info *ctxi,
- struct dk_cxlflash_release *release)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct afu *afu = cfg->afu;
- bool put_ctx = false;
-
- struct dk_cxlflash_resize size;
- res_hndl_t rhndl = release->rsrc_handle;
-
- int rc = 0;
- int rcr = 0;
- u64 ctxid = DECODE_CTXID(release->context_id),
- rctxid = release->context_id;
-
- struct sisl_rht_entry *rhte;
- struct sisl_rht_entry_f1 *rhte_f1;
-
- dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n",
- __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
-
- if (!ctxi) {
- ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
- __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- put_ctx = true;
- }
-
- rhte = get_rhte(ctxi, rhndl, lli);
- if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
- __func__, rhndl);
- rc = -EINVAL;
- goto out;
- }
-
- /*
- * Resize to 0 for virtual LUNS by setting the size
- * to 0. This will clear LXT_START and LXT_CNT fields
- * in the RHT entry and properly sync with the AFU.
- *
- * Afterwards we clear the remaining fields.
- */
- switch (gli->mode) {
- case MODE_VIRTUAL:
- marshal_rele_to_resize(release, &size);
- size.req_size = 0;
- rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
- if (rc) {
- dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
- goto out;
- }
-
- break;
- case MODE_PHYSICAL:
- /*
- * Clear the Format 1 RHT entry for direct access
- * (physical LUN) using the synchronization sequence
- * defined in the SISLite specification.
- */
- rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
-
- rhte_f1->valid = 0;
- dma_wmb(); /* Make revocation of RHT entry visible */
-
- rhte_f1->lun_id = 0;
- dma_wmb(); /* Make clearing of LUN id visible */
-
- rhte_f1->dw = 0;
- dma_wmb(); /* Make RHT entry bottom-half clearing visible */
-
- if (!ctxi->err_recovery_active) {
- rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
- if (unlikely(rcr))
- dev_dbg(dev, "%s: AFU sync failed rc=%d\n",
- __func__, rcr);
- }
- break;
- default:
- WARN(1, "Unsupported LUN mode!");
- goto out;
- }
-
- rhte_checkin(ctxi, rhte);
- cxlflash_lun_detach(gli);
-
-out:
- if (put_ctx)
- put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-int cxlflash_disk_release(struct scsi_device *sdev, void *release)
-{
- return _cxlflash_disk_release(sdev, NULL, release);
-}
-
-/**
- * destroy_context() - releases a context
- * @cfg: Internal structure associated with the host.
- * @ctxi: Context to release.
- *
- * This routine is safe to be called with a a non-initialized context.
- * Also note that the routine conditionally checks for the existence
- * of the context control map before clearing the RHT registers and
- * context capabilities because it is possible to destroy a context
- * while the context is in the error state (previous mapping was
- * removed [so there is no need to worry about clearing] and context
- * is waiting for a new mapping).
- */
-static void destroy_context(struct cxlflash_cfg *cfg,
- struct ctx_info *ctxi)
-{
- struct afu *afu = cfg->afu;
-
- if (ctxi->initialized) {
- WARN_ON(!list_empty(&ctxi->luns));
-
- /* Clear RHT registers and drop all capabilities for context */
- if (afu->afu_map && ctxi->ctrl_map) {
- writeq_be(0, &ctxi->ctrl_map->rht_start);
- writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
- writeq_be(0, &ctxi->ctrl_map->ctx_cap);
- }
- }
-
- /* Free memory associated with context */
- free_page((ulong)ctxi->rht_start);
- kfree(ctxi->rht_needs_ws);
- kfree(ctxi->rht_lun);
- kfree(ctxi);
-}
-
-/**
- * create_context() - allocates and initializes a context
- * @cfg: Internal structure associated with the host.
- *
- * Return: Allocated context on success, NULL on failure
- */
-static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- struct llun_info **lli = NULL;
- u8 *ws = NULL;
- struct sisl_rht_entry *rhte;
-
- ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
- lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
- ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
- if (unlikely(!ctxi || !lli || !ws)) {
- dev_err(dev, "%s: Unable to allocate context\n", __func__);
- goto err;
- }
-
- rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
- if (unlikely(!rhte)) {
- dev_err(dev, "%s: Unable to allocate RHT\n", __func__);
- goto err;
- }
-
- ctxi->rht_lun = lli;
- ctxi->rht_needs_ws = ws;
- ctxi->rht_start = rhte;
-out:
- return ctxi;
-
-err:
- kfree(ws);
- kfree(lli);
- kfree(ctxi);
- ctxi = NULL;
- goto out;
-}
-
-/**
- * init_context() - initializes a previously allocated context
- * @ctxi: Previously allocated context
- * @cfg: Internal structure associated with the host.
- * @ctx: Previously obtained context cookie.
- * @ctxid: Previously obtained process element associated with CXL context.
- * @file: Previously obtained file associated with CXL context.
- * @perms: User-specified permissions.
- * @irqs: User-specified number of interrupts.
- */
-static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
- void *ctx, int ctxid, struct file *file, u32 perms,
- u64 irqs)
-{
- struct afu *afu = cfg->afu;
-
- ctxi->rht_perms = perms;
- ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
- ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
- ctxi->irqs = irqs;
- ctxi->pid = task_tgid_nr(current); /* tgid = pid */
- ctxi->ctx = ctx;
- ctxi->cfg = cfg;
- ctxi->file = file;
- ctxi->initialized = true;
- mutex_init(&ctxi->mutex);
- kref_init(&ctxi->kref);
- INIT_LIST_HEAD(&ctxi->luns);
- INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
-}
-
-/**
- * remove_context() - context kref release handler
- * @kref: Kernel reference associated with context to be removed.
- *
- * When a context no longer has any references it can safely be removed
- * from global access and destroyed. Note that it is assumed the thread
- * relinquishing access to the context holds its mutex.
- */
-static void remove_context(struct kref *kref)
-{
- struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
- struct cxlflash_cfg *cfg = ctxi->cfg;
- u64 ctxid = DECODE_CTXID(ctxi->ctxid);
-
- /* Remove context from table/error list */
- WARN_ON(!mutex_is_locked(&ctxi->mutex));
- ctxi->unavail = true;
- mutex_unlock(&ctxi->mutex);
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- mutex_lock(&ctxi->mutex);
-
- if (!list_empty(&ctxi->list))
- list_del(&ctxi->list);
- cfg->ctx_tbl[ctxid] = NULL;
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- mutex_unlock(&ctxi->mutex);
-
- /* Context now completely uncoupled/unreachable */
- destroy_context(cfg, ctxi);
-}
-
-/**
- * _cxlflash_disk_detach() - detaches a LUN from a context
- * @sdev: SCSI device associated with LUN.
- * @ctxi: Context owning resources.
- * @detach: Detach ioctl data structure.
- *
- * As part of the detach, all per-context resources associated with the LUN
- * are cleaned up. When detaching the last LUN for a context, the context
- * itself is cleaned up and released.
- *
- * Return: 0 on success, -errno on failure
- */
-static int _cxlflash_disk_detach(struct scsi_device *sdev,
- struct ctx_info *ctxi,
- struct dk_cxlflash_detach *detach)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct lun_access *lun_access, *t;
- struct dk_cxlflash_release rel;
- bool put_ctx = false;
-
- int i;
- int rc = 0;
- u64 ctxid = DECODE_CTXID(detach->context_id),
- rctxid = detach->context_id;
-
- dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
-
- if (!ctxi) {
- ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
- __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- put_ctx = true;
- }
-
- /* Cleanup outstanding resources tied to this LUN */
- if (ctxi->rht_out) {
- marshal_det_to_rele(detach, &rel);
- for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
- if (ctxi->rht_lun[i] == lli) {
- rel.rsrc_handle = i;
- _cxlflash_disk_release(sdev, ctxi, &rel);
- }
-
- /* No need to loop further if we're done */
- if (ctxi->rht_out == 0)
- break;
- }
- }
-
- /* Take our LUN out of context, free the node */
- list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
- if (lun_access->lli == lli) {
- list_del(&lun_access->list);
- kfree(lun_access);
- lun_access = NULL;
- break;
- }
-
- /*
- * Release the context reference and the sdev reference that
- * bound this LUN to the context.
- */
- if (kref_put(&ctxi->kref, remove_context))
- put_ctx = false;
- scsi_device_put(sdev);
-out:
- if (put_ctx)
- put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-static int cxlflash_disk_detach(struct scsi_device *sdev, void *detach)
-{
- return _cxlflash_disk_detach(sdev, NULL, detach);
-}
-
-/**
- * cxlflash_cxl_release() - release handler for adapter file descriptor
- * @inode: File-system inode associated with fd.
- * @file: File installed with adapter file descriptor.
- *
- * This routine is the release handler for the fops registered with
- * the CXL services on an initial attach for a context. It is called
- * when a close (explicitly by the user or as part of a process tear
- * down) is performed on the adapter file descriptor returned to the
- * user. The user should be aware that explicitly performing a close
- * considered catastrophic and subsequent usage of the superpipe API
- * with previously saved off tokens will fail.
- *
- * This routine derives the context reference and calls detach for
- * each LUN associated with the context.The final detach operation
- * causes the context itself to be freed. With exception to when the
- * CXL process element (context id) lookup fails (a case that should
- * theoretically never occur), every call into this routine results
- * in a complete freeing of a context.
- *
- * Detaching the LUN is typically an ioctl() operation and the underlying
- * code assumes that ioctl_rwsem has been acquired as a reader. To support
- * that design point, the semaphore is acquired and released around detach.
- *
- * Return: 0 on success
- */
-static int cxlflash_cxl_release(struct inode *inode, struct file *file)
-{
- struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
- cxl_fops);
- void *ctx = cfg->ops->fops_get_context(file);
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- struct dk_cxlflash_detach detach = { { 0 }, 0 };
- struct lun_access *lun_access, *t;
- enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
- int ctxid;
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely(ctxid < 0)) {
- dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
- __func__, ctx, ctxid);
- goto out;
- }
-
- ctxi = get_context(cfg, ctxid, file, ctrl);
- if (unlikely(!ctxi)) {
- ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
- if (!ctxi) {
- dev_dbg(dev, "%s: ctxid=%d already free\n",
- __func__, ctxid);
- goto out_release;
- }
-
- dev_dbg(dev, "%s: Another process owns ctxid=%d\n",
- __func__, ctxid);
- put_context(ctxi);
- goto out;
- }
-
- dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
-
- down_read(&cfg->ioctl_rwsem);
- detach.context_id = ctxi->ctxid;
- list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
- _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
- up_read(&cfg->ioctl_rwsem);
-out_release:
- cfg->ops->fd_release(inode, file);
-out:
- dev_dbg(dev, "%s: returning\n", __func__);
- return 0;
-}
-
-/**
- * unmap_context() - clears a previously established mapping
- * @ctxi: Context owning the mapping.
- *
- * This routine is used to switch between the error notification page
- * (dummy page of all 1's) and the real mapping (established by the CXL
- * fault handler).
- */
-static void unmap_context(struct ctx_info *ctxi)
-{
- unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
-}
-
-/**
- * get_err_page() - obtains and allocates the error notification page
- * @cfg: Internal structure associated with the host.
- *
- * Return: error notification page on success, NULL on failure
- */
-static struct page *get_err_page(struct cxlflash_cfg *cfg)
-{
- struct page *err_page = global.err_page;
- struct device *dev = &cfg->dev->dev;
-
- if (unlikely(!err_page)) {
- err_page = alloc_page(GFP_KERNEL);
- if (unlikely(!err_page)) {
- dev_err(dev, "%s: Unable to allocate err_page\n",
- __func__);
- goto out;
- }
-
- memset(page_address(err_page), -1, PAGE_SIZE);
-
- /* Serialize update w/ other threads to avoid a leak */
- mutex_lock(&global.mutex);
- if (likely(!global.err_page))
- global.err_page = err_page;
- else {
- __free_page(err_page);
- err_page = global.err_page;
- }
- mutex_unlock(&global.mutex);
- }
-
-out:
- dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page);
- return err_page;
-}
-
-/**
- * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
- * @vmf: VM fault associated with current fault.
- *
- * To support error notification via MMIO, faults are 'caught' by this routine
- * that was inserted before passing back the adapter file descriptor on attach.
- * When a fault occurs, this routine evaluates if error recovery is active and
- * if so, installs the error page to 'notify' the user about the error state.
- * During normal operation, the fault is simply handled by the original fault
- * handler that was installed by CXL services as part of initializing the
- * adapter file descriptor. The VMA's page protection bits are toggled to
- * indicate cached/not-cached depending on the memory backing the fault.
- *
- * Return: 0 on success, VM_FAULT_SIGBUS on failure
- */
-static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct file *file = vma->vm_file;
- struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
- cxl_fops);
- void *ctx = cfg->ops->fops_get_context(file);
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- struct page *err_page = NULL;
- enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
- vm_fault_t rc = 0;
- int ctxid;
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely(ctxid < 0)) {
- dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
- __func__, ctx, ctxid);
- goto err;
- }
-
- ctxi = get_context(cfg, ctxid, file, ctrl);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
- goto err;
- }
-
- dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
-
- if (likely(!ctxi->err_recovery_active)) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- rc = ctxi->cxl_mmap_vmops->fault(vmf);
- } else {
- dev_dbg(dev, "%s: err recovery active, use err_page\n",
- __func__);
-
- err_page = get_err_page(cfg);
- if (unlikely(!err_page)) {
- dev_err(dev, "%s: Could not get err_page\n", __func__);
- rc = VM_FAULT_RETRY;
- goto out;
- }
-
- get_page(err_page);
- vmf->page = err_page;
- vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
- }
-
-out:
- if (likely(ctxi))
- put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc);
- return rc;
-
-err:
- rc = VM_FAULT_SIGBUS;
- goto out;
-}
-
-/*
- * Local MMAP vmops to 'catch' faults
- */
-static const struct vm_operations_struct cxlflash_mmap_vmops = {
- .fault = cxlflash_mmap_fault,
-};
-
-/**
- * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
- * @file: File installed with adapter file descriptor.
- * @vma: VM area associated with mapping.
- *
- * Installs local mmap vmops to 'catch' faults for error notification support.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
- cxl_fops);
- void *ctx = cfg->ops->fops_get_context(file);
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
- int ctxid;
- int rc = 0;
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely(ctxid < 0)) {
- dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
- __func__, ctx, ctxid);
- rc = -EIO;
- goto out;
- }
-
- ctxi = get_context(cfg, ctxid, file, ctrl);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
- rc = -EIO;
- goto out;
- }
-
- dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
-
- rc = cfg->ops->fd_mmap(file, vma);
- if (likely(!rc)) {
- /* Insert ourself in the mmap fault handler path */
- ctxi->cxl_mmap_vmops = vma->vm_ops;
- vma->vm_ops = &cxlflash_mmap_vmops;
- }
-
-out:
- if (likely(ctxi))
- put_context(ctxi);
- return rc;
-}
-
-const struct file_operations cxlflash_cxl_fops = {
- .owner = THIS_MODULE,
- .mmap = cxlflash_cxl_mmap,
- .release = cxlflash_cxl_release,
-};
-
-/**
- * cxlflash_mark_contexts_error() - move contexts to error state and list
- * @cfg: Internal structure associated with the host.
- *
- * A context is only moved over to the error list when there are no outstanding
- * references to it. This ensures that a running operation has completed.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
-{
- int i, rc = 0;
- struct ctx_info *ctxi = NULL;
-
- mutex_lock(&cfg->ctx_tbl_list_mutex);
-
- for (i = 0; i < MAX_CONTEXT; i++) {
- ctxi = cfg->ctx_tbl[i];
- if (ctxi) {
- mutex_lock(&ctxi->mutex);
- cfg->ctx_tbl[i] = NULL;
- list_add(&ctxi->list, &cfg->ctx_err_recovery);
- ctxi->err_recovery_active = true;
- ctxi->ctrl_map = NULL;
- unmap_context(ctxi);
- mutex_unlock(&ctxi->mutex);
- }
- }
-
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- return rc;
-}
-
-/*
- * Dummy NULL fops
- */
-static const struct file_operations null_fops = {
- .owner = THIS_MODULE,
-};
-
-/**
- * check_state() - checks and responds to the current adapter state
- * @cfg: Internal structure associated with the host.
- *
- * This routine can block and should only be used on process context.
- * It assumes that the caller is an ioctl thread and holding the ioctl
- * read semaphore. This is temporarily let up across the wait to allow
- * for draining actively running ioctls. Also note that when waking up
- * from waiting in reset, the state is unknown and must be checked again
- * before proceeding.
- *
- * Return: 0 on success, -errno on failure
- */
-int check_state(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
-retry:
- switch (cfg->state) {
- case STATE_RESET:
- dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
- up_read(&cfg->ioctl_rwsem);
- rc = wait_event_interruptible(cfg->reset_waitq,
- cfg->state != STATE_RESET);
- down_read(&cfg->ioctl_rwsem);
- if (unlikely(rc))
- break;
- goto retry;
- case STATE_FAILTERM:
- dev_dbg(dev, "%s: Failed/Terminating\n", __func__);
- rc = -ENODEV;
- break;
- default:
- break;
- }
-
- return rc;
-}
-
-/**
- * cxlflash_disk_attach() - attach a LUN to a context
- * @sdev: SCSI device associated with LUN.
- * @arg: Attach ioctl data structure.
- *
- * Creates a context and attaches LUN to it. A LUN can only be attached
- * one time to a context (subsequent attaches for the same context/LUN pair
- * are not supported). Additional LUNs can be attached to a context by
- * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_disk_attach(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_attach *attach = arg;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct ctx_info *ctxi = NULL;
- struct lun_access *lun_access = NULL;
- int rc = 0;
- u32 perms;
- int ctxid = -1;
- u64 irqs = attach->num_interrupts;
- u64 flags = 0UL;
- u64 rctxid = 0UL;
- struct file *file = NULL;
-
- void *ctx = NULL;
-
- int fd = -1;
-
- if (irqs > 4) {
- dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
- __func__, irqs);
- rc = -EINVAL;
- goto out;
- }
-
- if (gli->max_lba == 0) {
- dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n",
- __func__, lli->lun_id[sdev->channel]);
- rc = read_cap16(sdev, lli);
- if (rc) {
- dev_err(dev, "%s: Invalid device rc=%d\n",
- __func__, rc);
- rc = -ENODEV;
- goto out;
- }
- dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba);
- dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len);
- }
-
- if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
- rctxid = attach->context_id;
- ctxi = get_context(cfg, rctxid, NULL, 0);
- if (!ctxi) {
- dev_dbg(dev, "%s: Bad context rctxid=%016llx\n",
- __func__, rctxid);
- rc = -EINVAL;
- goto out;
- }
-
- list_for_each_entry(lun_access, &ctxi->luns, list)
- if (lun_access->lli == lli) {
- dev_dbg(dev, "%s: Already attached\n",
- __func__);
- rc = -EINVAL;
- goto out;
- }
- }
-
- rc = scsi_device_get(sdev);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Unable to get sdev reference\n", __func__);
- goto out;
- }
-
- lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
- if (unlikely(!lun_access)) {
- dev_err(dev, "%s: Unable to allocate lun_access\n", __func__);
- rc = -ENOMEM;
- goto err;
- }
-
- lun_access->lli = lli;
- lun_access->sdev = sdev;
-
- /* Non-NULL context indicates reuse (another context reference) */
- if (ctxi) {
- dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n",
- __func__, rctxid);
- kref_get(&ctxi->kref);
- list_add(&lun_access->list, &ctxi->luns);
- goto out_attach;
- }
-
- ctxi = create_context(cfg);
- if (unlikely(!ctxi)) {
- dev_err(dev, "%s: Failed to create context ctxid=%d\n",
- __func__, ctxid);
- rc = -ENOMEM;
- goto err;
- }
-
- ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
- if (IS_ERR_OR_NULL(ctx)) {
- dev_err(dev, "%s: Could not initialize context %p\n",
- __func__, ctx);
- rc = -ENODEV;
- goto err;
- }
-
- rc = cfg->ops->start_work(ctx, irqs);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: Could not start context rc=%d\n",
- __func__, rc);
- goto err;
- }
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
- dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
- rc = -EPERM;
- goto err;
- }
-
- file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
- if (unlikely(fd < 0)) {
- rc = -ENODEV;
- dev_err(dev, "%s: Could not get file descriptor\n", __func__);
- goto err;
- }
-
- /* Translate read/write O_* flags from fcntl.h to AFU permission bits */
- perms = SISL_RHT_PERM(attach->hdr.flags + 1);
-
- /* Context mutex is locked upon return */
- init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
-
- rc = afu_attach(cfg, ctxi);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
- goto err;
- }
-
- /*
- * No error paths after this point. Once the fd is installed it's
- * visible to user space and can't be undone safely on this thread.
- * There is no need to worry about a deadlock here because no one
- * knows about us yet; we can be the only one holding our mutex.
- */
- list_add(&lun_access->list, &ctxi->luns);
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- mutex_lock(&ctxi->mutex);
- cfg->ctx_tbl[ctxid] = ctxi;
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- fd_install(fd, file);
-
-out_attach:
- if (fd != -1)
- flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD;
- if (afu_is_sq_cmd_mode(afu))
- flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
-
- attach->hdr.return_flags = flags;
- attach->context_id = ctxi->ctxid;
- attach->block_size = gli->blk_len;
- attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
- attach->last_lba = gli->max_lba;
- attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
- attach->max_xfer /= gli->blk_len;
-
-out:
- attach->adap_fd = fd;
-
- if (ctxi)
- put_context(ctxi);
-
- dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
- __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
- return rc;
-
-err:
- /* Cleanup CXL context; okay to 'stop' even if it was not started */
- if (!IS_ERR_OR_NULL(ctx)) {
- cfg->ops->stop_context(ctx);
- cfg->ops->release_context(ctx);
- ctx = NULL;
- }
-
- /*
- * Here, we're overriding the fops with a dummy all-NULL fops because
- * fput() calls the release fop, which will cause us to mistakenly
- * call into the CXL code. Rather than try to add yet more complexity
- * to that routine (cxlflash_cxl_release) we should try to fix the
- * issue here.
- */
- if (fd > 0) {
- file->f_op = &null_fops;
- fput(file);
- put_unused_fd(fd);
- fd = -1;
- file = NULL;
- }
-
- /* Cleanup our context */
- if (ctxi) {
- destroy_context(cfg, ctxi);
- ctxi = NULL;
- }
-
- kfree(lun_access);
- scsi_device_put(sdev);
- goto out;
-}
-
-/**
- * recover_context() - recovers a context in error
- * @cfg: Internal structure associated with the host.
- * @ctxi: Context to release.
- * @adap_fd: Adapter file descriptor associated with new/recovered context.
- *
- * Restablishes the state for a context-in-error.
- *
- * Return: 0 on success, -errno on failure
- */
-static int recover_context(struct cxlflash_cfg *cfg,
- struct ctx_info *ctxi,
- int *adap_fd)
-{
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
- int fd = -1;
- int ctxid = -1;
- struct file *file;
- void *ctx;
- struct afu *afu = cfg->afu;
-
- ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
- if (IS_ERR_OR_NULL(ctx)) {
- dev_err(dev, "%s: Could not initialize context %p\n",
- __func__, ctx);
- rc = -ENODEV;
- goto out;
- }
-
- rc = cfg->ops->start_work(ctx, ctxi->irqs);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: Could not start context rc=%d\n",
- __func__, rc);
- goto err1;
- }
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
- dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
- rc = -EPERM;
- goto err2;
- }
-
- file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
- if (unlikely(fd < 0)) {
- rc = -ENODEV;
- dev_err(dev, "%s: Could not get file descriptor\n", __func__);
- goto err2;
- }
-
- /* Update with new MMIO area based on updated context id */
- ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
-
- rc = afu_attach(cfg, ctxi);
- if (rc) {
- dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
- goto err3;
- }
-
- /*
- * No error paths after this point. Once the fd is installed it's
- * visible to user space and can't be undone safely on this thread.
- */
- ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
- ctxi->ctx = ctx;
- ctxi->file = file;
-
- /*
- * Put context back in table (note the reinit of the context list);
- * we must first drop the context's mutex and then acquire it in
- * order with the table/list mutex to avoid a deadlock - safe to do
- * here because no one can find us at this moment in time.
- */
- mutex_unlock(&ctxi->mutex);
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- mutex_lock(&ctxi->mutex);
- list_del_init(&ctxi->list);
- cfg->ctx_tbl[ctxid] = ctxi;
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- fd_install(fd, file);
- *adap_fd = fd;
-out:
- dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
- __func__, ctxid, fd, rc);
- return rc;
-
-err3:
- fput(file);
- put_unused_fd(fd);
-err2:
- cfg->ops->stop_context(ctx);
-err1:
- cfg->ops->release_context(ctx);
- goto out;
-}
-
-/**
- * cxlflash_afu_recover() - initiates AFU recovery
- * @sdev: SCSI device associated with LUN.
- * @arg: Recover ioctl data structure.
- *
- * Only a single recovery is allowed at a time to avoid exhausting CXL
- * resources (leading to recovery failure) in the event that we're up
- * against the maximum number of contexts limit. For similar reasons,
- * a context recovery is retried if there are multiple recoveries taking
- * place at the same time and the failure was due to CXL services being
- * unable to keep up.
- *
- * As this routine is called on ioctl context, it holds the ioctl r/w
- * semaphore that is used to drain ioctls in recovery scenarios. The
- * implementation to achieve the pacing described above (a local mutex)
- * requires that the ioctl r/w semaphore be dropped and reacquired to
- * avoid a 3-way deadlock when multiple process recoveries operate in
- * parallel.
- *
- * Because a user can detect an error condition before the kernel, it is
- * quite possible for this routine to act as the kernel's EEH detection
- * source (MMIO read of mbox_r). Because of this, there is a window of
- * time where an EEH might have been detected but not yet 'serviced'
- * (callback invoked, causing the device to enter reset state). To avoid
- * looping in this routine during that window, a 1 second sleep is in place
- * between the time the MMIO failure is detected and the time a wait on the
- * reset wait queue is attempted via check_state().
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_afu_recover(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_recover_afu *recover = arg;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct afu *afu = cfg->afu;
- struct ctx_info *ctxi = NULL;
- struct mutex *mutex = &cfg->ctx_recovery_mutex;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- u64 flags;
- u64 ctxid = DECODE_CTXID(recover->context_id),
- rctxid = recover->context_id;
- long reg;
- bool locked = true;
- int lretry = 20; /* up to 2 seconds */
- int new_adap_fd = -1;
- int rc = 0;
-
- atomic_inc(&cfg->recovery_threads);
- up_read(&cfg->ioctl_rwsem);
- rc = mutex_lock_interruptible(mutex);
- down_read(&cfg->ioctl_rwsem);
- if (rc) {
- locked = false;
- goto out;
- }
-
- rc = check_state(cfg);
- if (rc) {
- dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
- rc = -ENODEV;
- goto out;
- }
-
- dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n",
- __func__, recover->reason, rctxid);
-
-retry:
- /* Ensure that this process is attached to the context */
- ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- if (ctxi->err_recovery_active) {
-retry_recover:
- rc = recover_context(cfg, ctxi, &new_adap_fd);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n",
- __func__, ctxid, rc);
- if ((rc == -ENODEV) &&
- ((atomic_read(&cfg->recovery_threads) > 1) ||
- (lretry--))) {
- dev_dbg(dev, "%s: Going to try again\n",
- __func__);
- mutex_unlock(mutex);
- msleep(100);
- rc = mutex_lock_interruptible(mutex);
- if (rc) {
- locked = false;
- goto out;
- }
- goto retry_recover;
- }
-
- goto out;
- }
-
- ctxi->err_recovery_active = false;
-
- flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
- DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
- if (afu_is_sq_cmd_mode(afu))
- flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
-
- recover->hdr.return_flags = flags;
- recover->context_id = ctxi->ctxid;
- recover->adap_fd = new_adap_fd;
- recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
- goto out;
- }
-
- /* Test if in error state */
- reg = readq_be(&hwq->ctrl_map->mbox_r);
- if (reg == -1) {
- dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
-
- /*
- * Before checking the state, put back the context obtained with
- * get_context() as it is no longer needed and sleep for a short
- * period of time (see prolog notes).
- */
- put_context(ctxi);
- ctxi = NULL;
- ssleep(1);
- rc = check_state(cfg);
- if (unlikely(rc))
- goto out;
- goto retry;
- }
-
- dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__);
-out:
- if (likely(ctxi))
- put_context(ctxi);
- if (locked)
- mutex_unlock(mutex);
- atomic_dec_if_positive(&cfg->recovery_threads);
- return rc;
-}
-
-/**
- * process_sense() - evaluates and processes sense data
- * @sdev: SCSI device associated with LUN.
- * @verify: Verify ioctl data structure.
- *
- * Return: 0 on success, -errno on failure
- */
-static int process_sense(struct scsi_device *sdev,
- struct dk_cxlflash_verify *verify)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- u64 prev_lba = gli->max_lba;
- struct scsi_sense_hdr sshdr = { 0 };
- int rc = 0;
-
- rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
- DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
- if (!rc) {
- dev_err(dev, "%s: Failed to normalize sense data\n", __func__);
- rc = -EINVAL;
- goto out;
- }
-
- switch (sshdr.sense_key) {
- case NO_SENSE:
- case RECOVERED_ERROR:
- case NOT_READY:
- break;
- case UNIT_ATTENTION:
- switch (sshdr.asc) {
- case 0x29: /* Power on Reset or Device Reset */
- fallthrough;
- case 0x2A: /* Device settings/capacity changed */
- rc = read_cap16(sdev, lli);
- if (rc) {
- rc = -ENODEV;
- break;
- }
- if (prev_lba != gli->max_lba)
- dev_dbg(dev, "%s: Capacity changed old=%lld "
- "new=%lld\n", __func__, prev_lba,
- gli->max_lba);
- break;
- case 0x3F: /* Report LUNs changed, Rescan. */
- scsi_scan_host(cfg->host);
- break;
- default:
- rc = -EIO;
- break;
- }
- break;
- default:
- rc = -EIO;
- break;
- }
-out:
- dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
- sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
- return rc;
-}
-
-/**
- * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
- * @sdev: SCSI device associated with LUN.
- * @arg: Verify ioctl data structure.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_disk_verify(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_verify *verify = arg;
- int rc = 0;
- struct ctx_info *ctxi = NULL;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct sisl_rht_entry *rhte = NULL;
- res_hndl_t rhndl = verify->rsrc_handle;
- u64 ctxid = DECODE_CTXID(verify->context_id),
- rctxid = verify->context_id;
- u64 last_lba = 0;
-
- dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, "
- "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle,
- verify->hint, verify->hdr.flags);
-
- ctxi = get_context(cfg, rctxid, lli, 0);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- rhte = get_rhte(ctxi, rhndl, lli);
- if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
- __func__, rhndl);
- rc = -EINVAL;
- goto out;
- }
-
- /*
- * Look at the hint/sense to see if it requires us to redrive
- * inquiry (i.e. the Unit attention is due to the WWN changing).
- */
- if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
- /* Can't hold mutex across process_sense/read_cap16,
- * since we could have an intervening EEH event.
- */
- ctxi->unavail = true;
- mutex_unlock(&ctxi->mutex);
- rc = process_sense(sdev, verify);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Failed to validate sense data (%d)\n",
- __func__, rc);
- mutex_lock(&ctxi->mutex);
- ctxi->unavail = false;
- goto out;
- }
- mutex_lock(&ctxi->mutex);
- ctxi->unavail = false;
- }
-
- switch (gli->mode) {
- case MODE_PHYSICAL:
- last_lba = gli->max_lba;
- break;
- case MODE_VIRTUAL:
- /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */
- last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
- last_lba /= CXLFLASH_BLOCK_SIZE;
- last_lba--;
- break;
- default:
- WARN(1, "Unsupported LUN mode!");
- }
-
- verify->last_lba = last_lba;
-
-out:
- if (likely(ctxi))
- put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%d llba=%llx\n",
- __func__, rc, verify->last_lba);
- return rc;
-}
-
-/**
- * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
- * @cmd: The ioctl command to decode.
- *
- * Return: A string identifying the decoded ioctl.
- */
-static char *decode_ioctl(unsigned int cmd)
-{
- switch (cmd) {
- case DK_CXLFLASH_ATTACH:
- return __stringify_1(DK_CXLFLASH_ATTACH);
- case DK_CXLFLASH_USER_DIRECT:
- return __stringify_1(DK_CXLFLASH_USER_DIRECT);
- case DK_CXLFLASH_USER_VIRTUAL:
- return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
- case DK_CXLFLASH_VLUN_RESIZE:
- return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
- case DK_CXLFLASH_RELEASE:
- return __stringify_1(DK_CXLFLASH_RELEASE);
- case DK_CXLFLASH_DETACH:
- return __stringify_1(DK_CXLFLASH_DETACH);
- case DK_CXLFLASH_VERIFY:
- return __stringify_1(DK_CXLFLASH_VERIFY);
- case DK_CXLFLASH_VLUN_CLONE:
- return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
- case DK_CXLFLASH_RECOVER_AFU:
- return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
- case DK_CXLFLASH_MANAGE_LUN:
- return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
- }
-
- return "UNKNOWN";
-}
-
-/**
- * cxlflash_disk_direct_open() - opens a direct (physical) disk
- * @sdev: SCSI device associated with LUN.
- * @arg: UDirect ioctl data structure.
- *
- * On successful return, the user is informed of the resource handle
- * to be used to identify the direct lun and the size (in blocks) of
- * the direct lun in last LBA format.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct dk_cxlflash_release rel = { { 0 }, 0 };
-
- struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
-
- u64 ctxid = DECODE_CTXID(pphys->context_id),
- rctxid = pphys->context_id;
- u64 lun_size = 0;
- u64 last_lba = 0;
- u64 rsrc_handle = -1;
- u32 port = CHAN2PORTMASK(sdev->channel);
-
- int rc = 0;
-
- struct ctx_info *ctxi = NULL;
- struct sisl_rht_entry *rhte = NULL;
-
- dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
-
- rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__);
- goto out;
- }
-
- ctxi = get_context(cfg, rctxid, lli, 0);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
- rc = -EINVAL;
- goto err1;
- }
-
- rhte = rhte_checkout(ctxi, lli);
- if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Too many opens ctxid=%lld\n",
- __func__, ctxid);
- rc = -EMFILE; /* too many opens */
- goto err1;
- }
-
- rsrc_handle = (rhte - ctxi->rht_start);
-
- rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
-
- last_lba = gli->max_lba;
- pphys->hdr.return_flags = 0;
- pphys->last_lba = last_lba;
- pphys->rsrc_handle = rsrc_handle;
-
- rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc);
- goto err2;
- }
-
-out:
- if (likely(ctxi))
- put_context(ctxi);
- dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
- __func__, rsrc_handle, rc, last_lba);
- return rc;
-
-err2:
- marshal_udir_to_rele(pphys, &rel);
- _cxlflash_disk_release(sdev, ctxi, &rel);
- goto out;
-err1:
- cxlflash_lun_detach(gli);
- goto out;
-}
-
-/**
- * ioctl_common() - common IOCTL handler for driver
- * @sdev: SCSI device associated with LUN.
- * @cmd: IOCTL command.
- *
- * Handles common fencing operations that are valid for multiple ioctls. Always
- * allow through ioctls that are cleanup oriented in nature, even when operating
- * in a failed/terminating state.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ioctl_common(struct scsi_device *sdev, unsigned int cmd)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- int rc = 0;
-
- if (unlikely(!lli)) {
- dev_dbg(dev, "%s: Unknown LUN\n", __func__);
- rc = -EINVAL;
- goto out;
- }
-
- rc = check_state(cfg);
- if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
- switch (cmd) {
- case DK_CXLFLASH_VLUN_RESIZE:
- case DK_CXLFLASH_RELEASE:
- case DK_CXLFLASH_DETACH:
- dev_dbg(dev, "%s: Command override rc=%d\n",
- __func__, rc);
- rc = 0;
- break;
- }
- }
-out:
- return rc;
-}
-
-/**
- * cxlflash_ioctl() - IOCTL handler for driver
- * @sdev: SCSI device associated with LUN.
- * @cmd: IOCTL command.
- * @arg: Userspace ioctl data structure.
- *
- * A read/write semaphore is used to implement a 'drain' of currently
- * running ioctls. The read semaphore is taken at the beginning of each
- * ioctl thread and released upon concluding execution. Additionally the
- * semaphore should be released and then reacquired in any ioctl execution
- * path which will wait for an event to occur that is outside the scope of
- * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
- * a thread simply needs to acquire the write semaphore.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
-{
- typedef int (*sioctl) (struct scsi_device *, void *);
-
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct dk_cxlflash_hdr *hdr;
- char buf[sizeof(union cxlflash_ioctls)];
- size_t size = 0;
- bool known_ioctl = false;
- int idx;
- int rc = 0;
- struct Scsi_Host *shost = sdev->host;
- sioctl do_ioctl = NULL;
-
- static const struct {
- size_t size;
- sioctl ioctl;
- } ioctl_tbl[] = { /* NOTE: order matters here */
- {sizeof(struct dk_cxlflash_attach), cxlflash_disk_attach},
- {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
- {sizeof(struct dk_cxlflash_release), cxlflash_disk_release},
- {sizeof(struct dk_cxlflash_detach), cxlflash_disk_detach},
- {sizeof(struct dk_cxlflash_verify), cxlflash_disk_verify},
- {sizeof(struct dk_cxlflash_recover_afu), cxlflash_afu_recover},
- {sizeof(struct dk_cxlflash_manage_lun), cxlflash_manage_lun},
- {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
- {sizeof(struct dk_cxlflash_resize), cxlflash_vlun_resize},
- {sizeof(struct dk_cxlflash_clone), cxlflash_disk_clone},
- };
-
- /* Hold read semaphore so we can drain if needed */
- down_read(&cfg->ioctl_rwsem);
-
- /* Restrict command set to physical support only for internal LUN */
- if (afu->internal_lun)
- switch (cmd) {
- case DK_CXLFLASH_RELEASE:
- case DK_CXLFLASH_USER_VIRTUAL:
- case DK_CXLFLASH_VLUN_RESIZE:
- case DK_CXLFLASH_VLUN_CLONE:
- dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
- __func__, decode_ioctl(cmd), afu->internal_lun);
- rc = -EINVAL;
- goto cxlflash_ioctl_exit;
- }
-
- switch (cmd) {
- case DK_CXLFLASH_ATTACH:
- case DK_CXLFLASH_USER_DIRECT:
- case DK_CXLFLASH_RELEASE:
- case DK_CXLFLASH_DETACH:
- case DK_CXLFLASH_VERIFY:
- case DK_CXLFLASH_RECOVER_AFU:
- case DK_CXLFLASH_USER_VIRTUAL:
- case DK_CXLFLASH_VLUN_RESIZE:
- case DK_CXLFLASH_VLUN_CLONE:
- dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
- __func__, decode_ioctl(cmd), cmd, shost->host_no,
- sdev->channel, sdev->id, sdev->lun);
- rc = ioctl_common(sdev, cmd);
- if (unlikely(rc))
- goto cxlflash_ioctl_exit;
-
- fallthrough;
-
- case DK_CXLFLASH_MANAGE_LUN:
- known_ioctl = true;
- idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
- size = ioctl_tbl[idx].size;
- do_ioctl = ioctl_tbl[idx].ioctl;
-
- if (likely(do_ioctl))
- break;
-
- fallthrough;
- default:
- rc = -EINVAL;
- goto cxlflash_ioctl_exit;
- }
-
- if (unlikely(copy_from_user(&buf, arg, size))) {
- dev_err(dev, "%s: copy_from_user() fail size=%lu cmd=%u (%s) arg=%p\n",
- __func__, size, cmd, decode_ioctl(cmd), arg);
- rc = -EFAULT;
- goto cxlflash_ioctl_exit;
- }
-
- hdr = (struct dk_cxlflash_hdr *)&buf;
- if (hdr->version != DK_CXLFLASH_VERSION_0) {
- dev_dbg(dev, "%s: Version %u not supported for %s\n",
- __func__, hdr->version, decode_ioctl(cmd));
- rc = -EINVAL;
- goto cxlflash_ioctl_exit;
- }
-
- if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
- dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
- rc = -EINVAL;
- goto cxlflash_ioctl_exit;
- }
-
- rc = do_ioctl(sdev, (void *)&buf);
- if (likely(!rc))
- if (unlikely(copy_to_user(arg, &buf, size))) {
- dev_err(dev, "%s: copy_to_user() fail size=%lu cmd=%u (%s) arg=%p\n",
- __func__, size, cmd, decode_ioctl(cmd), arg);
- rc = -EFAULT;
- }
-
- /* fall through to exit */
-
-cxlflash_ioctl_exit:
- up_read(&cfg->ioctl_rwsem);
- if (unlikely(rc && known_ioctl))
- dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
- "returned rc %d\n", __func__,
- decode_ioctl(cmd), cmd, shost->host_no,
- sdev->channel, sdev->id, sdev->lun, rc);
- else
- dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
- "returned rc %d\n", __func__, decode_ioctl(cmd),
- cmd, shost->host_no, sdev->channel, sdev->id,
- sdev->lun, rc);
- return rc;
-}
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
deleted file mode 100644
index fe8c975d13d7..000000000000
--- a/drivers/scsi/cxlflash/superpipe.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _CXLFLASH_SUPERPIPE_H
-#define _CXLFLASH_SUPERPIPE_H
-
-extern struct cxlflash_global global;
-
-/*
- * Terminology: use afu (and not adapter) to refer to the HW.
- * Adapter is the entire slot and includes PSL out of which
- * only the AFU is visible to user space.
- */
-
-/* Chunk size parms: note sislite minimum chunk size is
- * 0x10000 LBAs corresponding to a NMASK or 16.
- */
-#define MC_CHUNK_SIZE (1 << MC_RHT_NMASK) /* in LBAs */
-
-#define CMD_TIMEOUT 30 /* 30 secs */
-#define CMD_RETRIES 5 /* 5 retries for scsi_execute */
-
-#define MAX_SECTOR_UNIT 512 /* max_sector is in 512 byte multiples */
-
-enum lun_mode {
- MODE_NONE = 0,
- MODE_VIRTUAL,
- MODE_PHYSICAL
-};
-
-/* Global (entire driver, spans adapters) lun_info structure */
-struct glun_info {
- u64 max_lba; /* from read cap(16) */
- u32 blk_len; /* from read cap(16) */
- enum lun_mode mode; /* NONE, VIRTUAL, PHYSICAL */
- int users; /* Number of users w/ references to LUN */
-
- u8 wwid[16];
-
- struct mutex mutex;
-
- struct blka blka;
- struct list_head list;
-};
-
-/* Local (per-adapter) lun_info structure */
-struct llun_info {
- u64 lun_id[MAX_FC_PORTS]; /* from REPORT_LUNS */
- u32 lun_index; /* Index in the LUN table */
- u32 host_no; /* host_no from Scsi_host */
- u32 port_sel; /* What port to use for this LUN */
- bool in_table; /* Whether a LUN table entry was created */
-
- u8 wwid[16]; /* Keep a duplicate copy here? */
-
- struct glun_info *parent; /* Pointer to entry in global LUN structure */
- struct scsi_device *sdev;
- struct list_head list;
-};
-
-struct lun_access {
- struct llun_info *lli;
- struct scsi_device *sdev;
- struct list_head list;
-};
-
-enum ctx_ctrl {
- CTX_CTRL_CLONE = (1 << 1),
- CTX_CTRL_ERR = (1 << 2),
- CTX_CTRL_ERR_FALLBACK = (1 << 3),
- CTX_CTRL_NOPID = (1 << 4),
- CTX_CTRL_FILE = (1 << 5)
-};
-
-#define ENCODE_CTXID(_ctx, _id) (((((u64)_ctx) & 0xFFFFFFFF0ULL) << 28) | _id)
-#define DECODE_CTXID(_val) (_val & 0xFFFFFFFF)
-
-struct ctx_info {
- struct sisl_ctrl_map __iomem *ctrl_map; /* initialized at startup */
- struct sisl_rht_entry *rht_start; /* 1 page (req'd for alignment),
- * alloc/free on attach/detach
- */
- u32 rht_out; /* Number of checked out RHT entries */
- u32 rht_perms; /* User-defined permissions for RHT entries */
- struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */
- u8 *rht_needs_ws; /* User-desired write-same function per RHTE */
-
- u64 ctxid;
- u64 irqs; /* Number of interrupts requested for context */
- pid_t pid;
- bool initialized;
- bool unavail;
- bool err_recovery_active;
- struct mutex mutex; /* Context protection */
- struct kref kref;
- void *ctx;
- struct cxlflash_cfg *cfg;
- struct list_head luns; /* LUNs attached to this context */
- const struct vm_operations_struct *cxl_mmap_vmops;
- struct file *file;
- struct list_head list; /* Link contexts in error recovery */
-};
-
-struct cxlflash_global {
- struct mutex mutex;
- struct list_head gluns;/* list of glun_info structs */
- struct page *err_page; /* One page of all 0xF for error notification */
-};
-
-int cxlflash_vlun_resize(struct scsi_device *sdev, void *resize);
-int _cxlflash_vlun_resize(struct scsi_device *sdev, struct ctx_info *ctxi,
- struct dk_cxlflash_resize *resize);
-
-int cxlflash_disk_release(struct scsi_device *sdev,
- void *release);
-int _cxlflash_disk_release(struct scsi_device *sdev, struct ctx_info *ctxi,
- struct dk_cxlflash_release *release);
-
-int cxlflash_disk_clone(struct scsi_device *sdev, void *arg);
-
-int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg);
-
-int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked);
-void cxlflash_lun_detach(struct glun_info *gli);
-
-struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxit, void *arg,
- enum ctx_ctrl ctrl);
-void put_context(struct ctx_info *ctxi);
-
-struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
- struct llun_info *lli);
-
-struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
- struct llun_info *lli);
-void rhte_checkin(struct ctx_info *ctxi, struct sisl_rht_entry *rhte);
-
-void cxlflash_ba_terminate(struct ba_lun *ba_lun);
-
-int cxlflash_manage_lun(struct scsi_device *sdev, void *manage);
-
-int check_state(struct cxlflash_cfg *cfg);
-
-#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
deleted file mode 100644
index 32e807703377..000000000000
--- a/drivers/scsi/cxlflash/vlun.c
+++ /dev/null
@@ -1,1336 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/syscalls.h>
-#include <linux/unaligned.h>
-#include <asm/bitsperlong.h>
-
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <uapi/scsi/cxlflash_ioctl.h>
-
-#include "sislite.h"
-#include "common.h"
-#include "vlun.h"
-#include "superpipe.h"
-
-/**
- * marshal_virt_to_resize() - translate uvirtual to resize structure
- * @virt: Source structure from which to translate/copy.
- * @resize: Destination structure for the translate/copy.
- */
-static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt,
- struct dk_cxlflash_resize *resize)
-{
- resize->hdr = virt->hdr;
- resize->context_id = virt->context_id;
- resize->rsrc_handle = virt->rsrc_handle;
- resize->req_size = virt->lun_size;
- resize->last_lba = virt->last_lba;
-}
-
-/**
- * marshal_clone_to_rele() - translate clone to release structure
- * @clone: Source structure from which to translate/copy.
- * @release: Destination structure for the translate/copy.
- */
-static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
- struct dk_cxlflash_release *release)
-{
- release->hdr = clone->hdr;
- release->context_id = clone->context_id_dst;
-}
-
-/**
- * ba_init() - initializes a block allocator
- * @ba_lun: Block allocator to initialize.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ba_init(struct ba_lun *ba_lun)
-{
- struct ba_lun_info *bali = NULL;
- int lun_size_au = 0, i = 0;
- int last_word_underflow = 0;
- u64 *lam;
-
- pr_debug("%s: Initializing LUN: lun_id=%016llx "
- "ba_lun->lsize=%lx ba_lun->au_size=%lX\n",
- __func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size);
-
- /* Calculate bit map size */
- lun_size_au = ba_lun->lsize / ba_lun->au_size;
- if (lun_size_au == 0) {
- pr_debug("%s: Requested LUN size of 0!\n", __func__);
- return -EINVAL;
- }
-
- /* Allocate lun information container */
- bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL);
- if (unlikely(!bali)) {
- pr_err("%s: Failed to allocate lun_info lun_id=%016llx\n",
- __func__, ba_lun->lun_id);
- return -ENOMEM;
- }
-
- bali->total_aus = lun_size_au;
- bali->lun_bmap_size = lun_size_au / BITS_PER_LONG;
-
- if (lun_size_au % BITS_PER_LONG)
- bali->lun_bmap_size++;
-
- /* Allocate bitmap space */
- bali->lun_alloc_map = kzalloc((bali->lun_bmap_size * sizeof(u64)),
- GFP_KERNEL);
- if (unlikely(!bali->lun_alloc_map)) {
- pr_err("%s: Failed to allocate lun allocation map: "
- "lun_id=%016llx\n", __func__, ba_lun->lun_id);
- kfree(bali);
- return -ENOMEM;
- }
-
- /* Initialize the bit map size and set all bits to '1' */
- bali->free_aun_cnt = lun_size_au;
-
- for (i = 0; i < bali->lun_bmap_size; i++)
- bali->lun_alloc_map[i] = 0xFFFFFFFFFFFFFFFFULL;
-
- /* If the last word not fully utilized, mark extra bits as allocated */
- last_word_underflow = (bali->lun_bmap_size * BITS_PER_LONG);
- last_word_underflow -= bali->free_aun_cnt;
- if (last_word_underflow > 0) {
- lam = &bali->lun_alloc_map[bali->lun_bmap_size - 1];
- for (i = (HIBIT - last_word_underflow + 1);
- i < BITS_PER_LONG;
- i++)
- clear_bit(i, (ulong *)lam);
- }
-
- /* Initialize high elevator index, low/curr already at 0 from kzalloc */
- bali->free_high_idx = bali->lun_bmap_size;
-
- /* Allocate clone map */
- bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)),
- GFP_KERNEL);
- if (unlikely(!bali->aun_clone_map)) {
- pr_err("%s: Failed to allocate clone map: lun_id=%016llx\n",
- __func__, ba_lun->lun_id);
- kfree(bali->lun_alloc_map);
- kfree(bali);
- return -ENOMEM;
- }
-
- /* Pass the allocated LUN info as a handle to the user */
- ba_lun->ba_lun_handle = bali;
-
- pr_debug("%s: Successfully initialized the LUN: "
- "lun_id=%016llx bitmap size=%x, free_aun_cnt=%llx\n",
- __func__, ba_lun->lun_id, bali->lun_bmap_size,
- bali->free_aun_cnt);
- return 0;
-}
-
-/**
- * find_free_range() - locates a free bit within the block allocator
- * @low: First word in block allocator to start search.
- * @high: Last word in block allocator to search.
- * @bali: LUN information structure owning the block allocator to search.
- * @bit_word: Passes back the word in the block allocator owning the free bit.
- *
- * Return: The bit position within the passed back word, -1 on failure
- */
-static int find_free_range(u32 low,
- u32 high,
- struct ba_lun_info *bali, int *bit_word)
-{
- int i;
- u64 bit_pos = -1;
- ulong *lam, num_bits;
-
- for (i = low; i < high; i++)
- if (bali->lun_alloc_map[i] != 0) {
- lam = (ulong *)&bali->lun_alloc_map[i];
- num_bits = (sizeof(*lam) * BITS_PER_BYTE);
- bit_pos = find_first_bit(lam, num_bits);
-
- pr_devel("%s: Found free bit %llu in LUN "
- "map entry %016llx at bitmap index = %d\n",
- __func__, bit_pos, bali->lun_alloc_map[i], i);
-
- *bit_word = i;
- bali->free_aun_cnt--;
- clear_bit(bit_pos, lam);
- break;
- }
-
- return bit_pos;
-}
-
-/**
- * ba_alloc() - allocates a block from the block allocator
- * @ba_lun: Block allocator from which to allocate a block.
- *
- * Return: The allocated block, -1 on failure
- */
-static u64 ba_alloc(struct ba_lun *ba_lun)
-{
- u64 bit_pos = -1;
- int bit_word = 0;
- struct ba_lun_info *bali = NULL;
-
- bali = ba_lun->ba_lun_handle;
-
- pr_debug("%s: Received block allocation request: "
- "lun_id=%016llx free_aun_cnt=%llx\n",
- __func__, ba_lun->lun_id, bali->free_aun_cnt);
-
- if (bali->free_aun_cnt == 0) {
- pr_debug("%s: No space left on LUN: lun_id=%016llx\n",
- __func__, ba_lun->lun_id);
- return -1ULL;
- }
-
- /* Search to find a free entry, curr->high then low->curr */
- bit_pos = find_free_range(bali->free_curr_idx,
- bali->free_high_idx, bali, &bit_word);
- if (bit_pos == -1) {
- bit_pos = find_free_range(bali->free_low_idx,
- bali->free_curr_idx,
- bali, &bit_word);
- if (bit_pos == -1) {
- pr_debug("%s: Could not find an allocation unit on LUN:"
- " lun_id=%016llx\n", __func__, ba_lun->lun_id);
- return -1ULL;
- }
- }
-
- /* Update the free_curr_idx */
- if (bit_pos == HIBIT)
- bali->free_curr_idx = bit_word + 1;
- else
- bali->free_curr_idx = bit_word;
-
- pr_debug("%s: Allocating AU number=%llx lun_id=%016llx "
- "free_aun_cnt=%llx\n", __func__,
- ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id,
- bali->free_aun_cnt);
-
- return (u64) ((bit_word * BITS_PER_LONG) + bit_pos);
-}
-
-/**
- * validate_alloc() - validates the specified block has been allocated
- * @bali: LUN info owning the block allocator.
- * @aun: Block to validate.
- *
- * Return: 0 on success, -1 on failure
- */
-static int validate_alloc(struct ba_lun_info *bali, u64 aun)
-{
- int idx = 0, bit_pos = 0;
-
- idx = aun / BITS_PER_LONG;
- bit_pos = aun % BITS_PER_LONG;
-
- if (test_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]))
- return -1;
-
- return 0;
-}
-
-/**
- * ba_free() - frees a block from the block allocator
- * @ba_lun: Block allocator from which to allocate a block.
- * @to_free: Block to free.
- *
- * Return: 0 on success, -1 on failure
- */
-static int ba_free(struct ba_lun *ba_lun, u64 to_free)
-{
- int idx = 0, bit_pos = 0;
- struct ba_lun_info *bali = NULL;
-
- bali = ba_lun->ba_lun_handle;
-
- if (validate_alloc(bali, to_free)) {
- pr_debug("%s: AUN %llx is not allocated on lun_id=%016llx\n",
- __func__, to_free, ba_lun->lun_id);
- return -1;
- }
-
- pr_debug("%s: Received a request to free AU=%llx lun_id=%016llx "
- "free_aun_cnt=%llx\n", __func__, to_free, ba_lun->lun_id,
- bali->free_aun_cnt);
-
- if (bali->aun_clone_map[to_free] > 0) {
- pr_debug("%s: AUN %llx lun_id=%016llx cloned. Clone count=%x\n",
- __func__, to_free, ba_lun->lun_id,
- bali->aun_clone_map[to_free]);
- bali->aun_clone_map[to_free]--;
- return 0;
- }
-
- idx = to_free / BITS_PER_LONG;
- bit_pos = to_free % BITS_PER_LONG;
-
- set_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]);
- bali->free_aun_cnt++;
-
- if (idx < bali->free_low_idx)
- bali->free_low_idx = idx;
- else if (idx > bali->free_high_idx)
- bali->free_high_idx = idx;
-
- pr_debug("%s: Successfully freed AU bit_pos=%x bit map index=%x "
- "lun_id=%016llx free_aun_cnt=%llx\n", __func__, bit_pos, idx,
- ba_lun->lun_id, bali->free_aun_cnt);
-
- return 0;
-}
-
-/**
- * ba_clone() - Clone a chunk of the block allocation table
- * @ba_lun: Block allocator from which to allocate a block.
- * @to_clone: Block to clone.
- *
- * Return: 0 on success, -1 on failure
- */
-static int ba_clone(struct ba_lun *ba_lun, u64 to_clone)
-{
- struct ba_lun_info *bali = ba_lun->ba_lun_handle;
-
- if (validate_alloc(bali, to_clone)) {
- pr_debug("%s: AUN=%llx not allocated on lun_id=%016llx\n",
- __func__, to_clone, ba_lun->lun_id);
- return -1;
- }
-
- pr_debug("%s: Received a request to clone AUN %llx on lun_id=%016llx\n",
- __func__, to_clone, ba_lun->lun_id);
-
- if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) {
- pr_debug("%s: AUN %llx on lun_id=%016llx hit max clones already\n",
- __func__, to_clone, ba_lun->lun_id);
- return -1;
- }
-
- bali->aun_clone_map[to_clone]++;
-
- return 0;
-}
-
-/**
- * ba_space() - returns the amount of free space left in the block allocator
- * @ba_lun: Block allocator.
- *
- * Return: Amount of free space in block allocator
- */
-static u64 ba_space(struct ba_lun *ba_lun)
-{
- struct ba_lun_info *bali = ba_lun->ba_lun_handle;
-
- return bali->free_aun_cnt;
-}
-
-/**
- * cxlflash_ba_terminate() - frees resources associated with the block allocator
- * @ba_lun: Block allocator.
- *
- * Safe to call in a partially allocated state.
- */
-void cxlflash_ba_terminate(struct ba_lun *ba_lun)
-{
- struct ba_lun_info *bali = ba_lun->ba_lun_handle;
-
- if (bali) {
- kfree(bali->aun_clone_map);
- kfree(bali->lun_alloc_map);
- kfree(bali);
- ba_lun->ba_lun_handle = NULL;
- }
-}
-
-/**
- * init_vlun() - initializes a LUN for virtual use
- * @lli: LUN information structure that owns the block allocator.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_vlun(struct llun_info *lli)
-{
- int rc = 0;
- struct glun_info *gli = lli->parent;
- struct blka *blka = &gli->blka;
-
- memset(blka, 0, sizeof(*blka));
- mutex_init(&blka->mutex);
-
- /* LUN IDs are unique per port, save the index instead */
- blka->ba_lun.lun_id = lli->lun_index;
- blka->ba_lun.lsize = gli->max_lba + 1;
- blka->ba_lun.lba_size = gli->blk_len;
-
- blka->ba_lun.au_size = MC_CHUNK_SIZE;
- blka->nchunk = blka->ba_lun.lsize / MC_CHUNK_SIZE;
-
- rc = ba_init(&blka->ba_lun);
- if (unlikely(rc))
- pr_debug("%s: cannot init block_alloc, rc=%d\n", __func__, rc);
-
- pr_debug("%s: returning rc=%d lli=%p\n", __func__, rc, lli);
- return rc;
-}
-
-/**
- * write_same16() - sends a SCSI WRITE_SAME16 (0) command to specified LUN
- * @sdev: SCSI device associated with LUN.
- * @lba: Logical block address to start write same.
- * @nblks: Number of logical blocks to write same.
- *
- * The SCSI WRITE_SAME16 can take quite a while to complete. Should an EEH occur
- * while in scsi_execute_cmd(), the EEH handler will attempt to recover. As
- * part of the recovery, the handler drains all currently running ioctls,
- * waiting until they have completed before proceeding with a reset. As this
- * routine is used on the ioctl path, this can create a condition where the
- * EEH handler becomes stuck, infinitely waiting for this ioctl thread. To
- * avoid this behavior, temporarily unmark this thread as an ioctl thread by
- * releasing the ioctl read semaphore. This will allow the EEH handler to
- * proceed with a recovery while this thread is still running. Once the
- * scsi_execute_cmd() returns, reacquire the ioctl read semaphore and check the
- * adapter state in case it changed while inside of scsi_execute_cmd(). The
- * state check will wait if the adapter is still being recovered or return a
- * failure if the recovery failed. In the event that the adapter reset failed,
- * simply return the failure as the ioctl would be unable to continue.
- *
- * Note that the above puts a requirement on this routine to only be called on
- * an ioctl thread.
- *
- * Return: 0 on success, -errno on failure
- */
-static int write_same16(struct scsi_device *sdev,
- u64 lba,
- u32 nblks)
-{
- u8 *cmd_buf = NULL;
- u8 *scsi_cmd = NULL;
- int rc = 0;
- int result = 0;
- u64 offset = lba;
- int left = nblks;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- const u32 s = ilog2(sdev->sector_size) - 9;
- const u32 to = sdev->request_queue->rq_timeout;
- const u32 ws_limit =
- sdev->request_queue->limits.max_write_zeroes_sectors >> s;
-
- cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
- scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
- if (unlikely(!cmd_buf || !scsi_cmd)) {
- rc = -ENOMEM;
- goto out;
- }
-
- while (left > 0) {
-
- scsi_cmd[0] = WRITE_SAME_16;
- scsi_cmd[1] = cfg->ws_unmap ? 0x8 : 0;
- put_unaligned_be64(offset, &scsi_cmd[2]);
- put_unaligned_be32(ws_limit < left ? ws_limit : left,
- &scsi_cmd[10]);
-
- /* Drop the ioctl read semaphore across lengthy call */
- up_read(&cfg->ioctl_rwsem);
- result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_OUT,
- cmd_buf, CMD_BUFSIZE, to,
- CMD_RETRIES, NULL);
- down_read(&cfg->ioctl_rwsem);
- rc = check_state(cfg);
- if (rc) {
- dev_err(dev, "%s: Failed state result=%08x\n",
- __func__, result);
- rc = -ENODEV;
- goto out;
- }
-
- if (result) {
- dev_err_ratelimited(dev, "%s: command failed for "
- "offset=%lld result=%08x\n",
- __func__, offset, result);
- rc = -EIO;
- goto out;
- }
- left -= ws_limit;
- offset += ws_limit;
- }
-
-out:
- kfree(cmd_buf);
- kfree(scsi_cmd);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * grow_lxt() - expands the translation table associated with the specified RHTE
- * @afu: AFU associated with the host.
- * @sdev: SCSI device associated with LUN.
- * @ctxid: Context ID of context owning the RHTE.
- * @rhndl: Resource handle associated with the RHTE.
- * @rhte: Resource handle entry (RHTE).
- * @new_size: Number of translation entries associated with RHTE.
- *
- * By design, this routine employs a 'best attempt' allocation and will
- * truncate the requested size down if there is not sufficient space in
- * the block allocator to satisfy the request but there does exist some
- * amount of space. The user is made aware of this by returning the size
- * allocated.
- *
- * Return: 0 on success, -errno on failure
- */
-static int grow_lxt(struct afu *afu,
- struct scsi_device *sdev,
- ctx_hndl_t ctxid,
- res_hndl_t rhndl,
- struct sisl_rht_entry *rhte,
- u64 *new_size)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct blka *blka = &gli->blka;
- u32 av_size;
- u32 ngrps, ngrps_old;
- u64 aun; /* chunk# allocated by block allocator */
- u64 delta = *new_size - rhte->lxt_cnt;
- u64 my_new_size;
- int i, rc = 0;
-
- /*
- * Check what is available in the block allocator before re-allocating
- * LXT array. This is done up front under the mutex which must not be
- * released until after allocation is complete.
- */
- mutex_lock(&blka->mutex);
- av_size = ba_space(&blka->ba_lun);
- if (unlikely(av_size <= 0)) {
- dev_dbg(dev, "%s: ba_space error av_size=%d\n",
- __func__, av_size);
- mutex_unlock(&blka->mutex);
- rc = -ENOSPC;
- goto out;
- }
-
- if (av_size < delta)
- delta = av_size;
-
- lxt_old = rhte->lxt_start;
- ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
- ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt + delta);
-
- if (ngrps != ngrps_old) {
- /* reallocate to fit new size */
- lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
- GFP_KERNEL);
- if (unlikely(!lxt)) {
- mutex_unlock(&blka->mutex);
- rc = -ENOMEM;
- goto out;
- }
-
- /* copy over all old entries */
- memcpy(lxt, lxt_old, (sizeof(*lxt) * rhte->lxt_cnt));
- } else
- lxt = lxt_old;
-
- /* nothing can fail from now on */
- my_new_size = rhte->lxt_cnt + delta;
-
- /* add new entries to the end */
- for (i = rhte->lxt_cnt; i < my_new_size; i++) {
- /*
- * Due to the earlier check of available space, ba_alloc
- * cannot fail here. If it did due to internal error,
- * leave a rlba_base of -1u which will likely be a
- * invalid LUN (too large).
- */
- aun = ba_alloc(&blka->ba_lun);
- if ((aun == -1ULL) || (aun >= blka->nchunk))
- dev_dbg(dev, "%s: ba_alloc error allocated chunk=%llu "
- "max=%llu\n", __func__, aun, blka->nchunk - 1);
-
- /* select both ports, use r/w perms from RHT */
- lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) |
- (lli->lun_index << LXT_LUNIDX_SHIFT) |
- (RHT_PERM_RW << LXT_PERM_SHIFT |
- lli->port_sel));
- }
-
- mutex_unlock(&blka->mutex);
-
- /*
- * The following sequence is prescribed in the SISlite spec
- * for syncing up with the AFU when adding LXT entries.
- */
- dma_wmb(); /* Make LXT updates are visible */
-
- rhte->lxt_start = lxt;
- dma_wmb(); /* Make RHT entry's LXT table update visible */
-
- rhte->lxt_cnt = my_new_size;
- dma_wmb(); /* Make RHT entry's LXT table size update visible */
-
- rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
- if (unlikely(rc))
- rc = -EAGAIN;
-
- /* free old lxt if reallocated */
- if (lxt != lxt_old)
- kfree(lxt_old);
- *new_size = my_new_size;
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * shrink_lxt() - reduces translation table associated with the specified RHTE
- * @afu: AFU associated with the host.
- * @sdev: SCSI device associated with LUN.
- * @rhndl: Resource handle associated with the RHTE.
- * @rhte: Resource handle entry (RHTE).
- * @ctxi: Context owning resources.
- * @new_size: Number of translation entries associated with RHTE.
- *
- * Return: 0 on success, -errno on failure
- */
-static int shrink_lxt(struct afu *afu,
- struct scsi_device *sdev,
- res_hndl_t rhndl,
- struct sisl_rht_entry *rhte,
- struct ctx_info *ctxi,
- u64 *new_size)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct sisl_lxt_entry *lxt, *lxt_old;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct blka *blka = &gli->blka;
- ctx_hndl_t ctxid = DECODE_CTXID(ctxi->ctxid);
- bool needs_ws = ctxi->rht_needs_ws[rhndl];
- bool needs_sync = !ctxi->err_recovery_active;
- u32 ngrps, ngrps_old;
- u64 aun; /* chunk# allocated by block allocator */
- u64 delta = rhte->lxt_cnt - *new_size;
- u64 my_new_size;
- int i, rc = 0;
-
- lxt_old = rhte->lxt_start;
- ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
- ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt - delta);
-
- if (ngrps != ngrps_old) {
- /* Reallocate to fit new size unless new size is 0 */
- if (ngrps) {
- lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
- GFP_KERNEL);
- if (unlikely(!lxt)) {
- rc = -ENOMEM;
- goto out;
- }
-
- /* Copy over old entries that will remain */
- memcpy(lxt, lxt_old,
- (sizeof(*lxt) * (rhte->lxt_cnt - delta)));
- } else
- lxt = NULL;
- } else
- lxt = lxt_old;
-
- /* Nothing can fail from now on */
- my_new_size = rhte->lxt_cnt - delta;
-
- /*
- * The following sequence is prescribed in the SISlite spec
- * for syncing up with the AFU when removing LXT entries.
- */
- rhte->lxt_cnt = my_new_size;
- dma_wmb(); /* Make RHT entry's LXT table size update visible */
-
- rhte->lxt_start = lxt;
- dma_wmb(); /* Make RHT entry's LXT table update visible */
-
- if (needs_sync) {
- rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
- if (unlikely(rc))
- rc = -EAGAIN;
- }
-
- if (needs_ws) {
- /*
- * Mark the context as unavailable, so that we can release
- * the mutex safely.
- */
- ctxi->unavail = true;
- mutex_unlock(&ctxi->mutex);
- }
-
- /* Free LBAs allocated to freed chunks */
- mutex_lock(&blka->mutex);
- for (i = delta - 1; i >= 0; i--) {
- aun = lxt_old[my_new_size + i].rlba_base >> MC_CHUNK_SHIFT;
- if (needs_ws)
- write_same16(sdev, aun, MC_CHUNK_SIZE);
- ba_free(&blka->ba_lun, aun);
- }
- mutex_unlock(&blka->mutex);
-
- if (needs_ws) {
- /* Make the context visible again */
- mutex_lock(&ctxi->mutex);
- ctxi->unavail = false;
- }
-
- /* Free old lxt if reallocated */
- if (lxt != lxt_old)
- kfree(lxt_old);
- *new_size = my_new_size;
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * _cxlflash_vlun_resize() - changes the size of a virtual LUN
- * @sdev: SCSI device associated with LUN owning virtual LUN.
- * @ctxi: Context owning resources.
- * @resize: Resize ioctl data structure.
- *
- * On successful return, the user is informed of the new size (in blocks)
- * of the virtual LUN in last LBA format. When the size of the virtual
- * LUN is zero, the last LBA is reflected as -1. See comment in the
- * prologue for _cxlflash_disk_release() regarding AFU syncs and contexts
- * on the error recovery list.
- *
- * Return: 0 on success, -errno on failure
- */
-int _cxlflash_vlun_resize(struct scsi_device *sdev,
- struct ctx_info *ctxi,
- struct dk_cxlflash_resize *resize)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct afu *afu = cfg->afu;
- bool put_ctx = false;
-
- res_hndl_t rhndl = resize->rsrc_handle;
- u64 new_size;
- u64 nsectors;
- u64 ctxid = DECODE_CTXID(resize->context_id),
- rctxid = resize->context_id;
-
- struct sisl_rht_entry *rhte;
-
- int rc = 0;
-
- /*
- * The requested size (req_size) is always assumed to be in 4k blocks,
- * so we have to convert it here from 4k to chunk size.
- */
- nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len;
- new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE);
-
- dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu req_size=%llu new_size=%llu\n",
- __func__, ctxid, resize->rsrc_handle, resize->req_size,
- new_size);
-
- if (unlikely(gli->mode != MODE_VIRTUAL)) {
- dev_dbg(dev, "%s: LUN mode does not support resize mode=%d\n",
- __func__, gli->mode);
- rc = -EINVAL;
- goto out;
-
- }
-
- if (!ctxi) {
- ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
- __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- put_ctx = true;
- }
-
- rhte = get_rhte(ctxi, rhndl, lli);
- if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Bad resource handle rhndl=%u\n",
- __func__, rhndl);
- rc = -EINVAL;
- goto out;
- }
-
- if (new_size > rhte->lxt_cnt)
- rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size);
- else if (new_size < rhte->lxt_cnt)
- rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size);
- else {
- /*
- * Rare case where there is already sufficient space, just
- * need to perform a translation sync with the AFU. This
- * scenario likely follows a previous sync failure during
- * a resize operation. Accordingly, perform the heavyweight
- * form of translation sync as it is unknown which type of
- * resize failed previously.
- */
- rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
- if (unlikely(rc)) {
- rc = -EAGAIN;
- goto out;
- }
- }
-
- resize->hdr.return_flags = 0;
- resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len);
- resize->last_lba /= CXLFLASH_BLOCK_SIZE;
- resize->last_lba--;
-
-out:
- if (put_ctx)
- put_context(ctxi);
- dev_dbg(dev, "%s: resized to %llu returning rc=%d\n",
- __func__, resize->last_lba, rc);
- return rc;
-}
-
-int cxlflash_vlun_resize(struct scsi_device *sdev, void *resize)
-{
- return _cxlflash_vlun_resize(sdev, NULL, resize);
-}
-
-/**
- * cxlflash_restore_luntable() - Restore LUN table to prior state
- * @cfg: Internal structure associated with the host.
- */
-void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
-{
- struct llun_info *lli, *temp;
- u32 lind;
- int k;
- struct device *dev = &cfg->dev->dev;
- __be64 __iomem *fc_port_luns;
-
- mutex_lock(&global.mutex);
-
- list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
- if (!lli->in_table)
- continue;
-
- lind = lli->lun_index;
- dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
-
- for (k = 0; k < cfg->num_fc_ports; k++)
- if (lli->port_sel & (1 << k)) {
- fc_port_luns = get_fc_port_luns(cfg, k);
- writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
- dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
- }
- }
-
- mutex_unlock(&global.mutex);
-}
-
-/**
- * get_num_ports() - compute number of ports from port selection mask
- * @psm: Port selection mask.
- *
- * Return: Population count of port selection mask
- */
-static inline u8 get_num_ports(u32 psm)
-{
- static const u8 bits[16] = { 0, 1, 1, 2, 1, 2, 2, 3,
- 1, 2, 2, 3, 2, 3, 3, 4 };
-
- return bits[psm & 0xf];
-}
-
-/**
- * init_luntable() - write an entry in the LUN table
- * @cfg: Internal structure associated with the host.
- * @lli: Per adapter LUN information structure.
- *
- * On successful return, a LUN table entry is created:
- * - at the top for LUNs visible on multiple ports.
- * - at the bottom for LUNs visible only on one port.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
-{
- u32 chan;
- u32 lind;
- u32 nports;
- int rc = 0;
- int k;
- struct device *dev = &cfg->dev->dev;
- __be64 __iomem *fc_port_luns;
-
- mutex_lock(&global.mutex);
-
- if (lli->in_table)
- goto out;
-
- nports = get_num_ports(lli->port_sel);
- if (nports == 0 || nports > cfg->num_fc_ports) {
- WARN(1, "Unsupported port configuration nports=%u", nports);
- rc = -EIO;
- goto out;
- }
-
- if (nports > 1) {
- /*
- * When LUN is visible from multiple ports, we will put
- * it in the top half of the LUN table.
- */
- for (k = 0; k < cfg->num_fc_ports; k++) {
- if (!(lli->port_sel & (1 << k)))
- continue;
-
- if (cfg->promote_lun_index == cfg->last_lun_index[k]) {
- rc = -ENOSPC;
- goto out;
- }
- }
-
- lind = lli->lun_index = cfg->promote_lun_index;
- dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
-
- for (k = 0; k < cfg->num_fc_ports; k++) {
- if (!(lli->port_sel & (1 << k)))
- continue;
-
- fc_port_luns = get_fc_port_luns(cfg, k);
- writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
- dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
- }
-
- cfg->promote_lun_index++;
- } else {
- /*
- * When LUN is visible only from one port, we will put
- * it in the bottom half of the LUN table.
- */
- chan = PORTMASK2CHAN(lli->port_sel);
- if (cfg->promote_lun_index == cfg->last_lun_index[chan]) {
- rc = -ENOSPC;
- goto out;
- }
-
- lind = lli->lun_index = cfg->last_lun_index[chan];
- fc_port_luns = get_fc_port_luns(cfg, chan);
- writeq_be(lli->lun_id[chan], &fc_port_luns[lind]);
- cfg->last_lun_index[chan]--;
- dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n\t%d=%llx\n",
- __func__, lind, chan, lli->lun_id[chan]);
- }
-
- lli->in_table = true;
-out:
- mutex_unlock(&global.mutex);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_disk_virtual_open() - open a virtual disk of specified size
- * @sdev: SCSI device associated with LUN owning virtual LUN.
- * @arg: UVirtual ioctl data structure.
- *
- * On successful return, the user is informed of the resource handle
- * to be used to identify the virtual LUN and the size (in blocks) of
- * the virtual LUN in last LBA format. When the size of the virtual LUN
- * is zero, the last LBA is reflected as -1.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
-
- struct dk_cxlflash_uvirtual *virt = (struct dk_cxlflash_uvirtual *)arg;
- struct dk_cxlflash_resize resize;
-
- u64 ctxid = DECODE_CTXID(virt->context_id),
- rctxid = virt->context_id;
- u64 lun_size = virt->lun_size;
- u64 last_lba = 0;
- u64 rsrc_handle = -1;
-
- int rc = 0;
-
- struct ctx_info *ctxi = NULL;
- struct sisl_rht_entry *rhte = NULL;
-
- dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
-
- /* Setup the LUNs block allocator on first call */
- mutex_lock(&gli->mutex);
- if (gli->mode == MODE_NONE) {
- rc = init_vlun(lli);
- if (rc) {
- dev_err(dev, "%s: init_vlun failed rc=%d\n",
- __func__, rc);
- rc = -ENOMEM;
- goto err0;
- }
- }
-
- rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Failed attach to LUN (VIRTUAL)\n", __func__);
- goto err0;
- }
- mutex_unlock(&gli->mutex);
-
- rc = init_luntable(cfg, lli);
- if (rc) {
- dev_err(dev, "%s: init_luntable failed rc=%d\n", __func__, rc);
- goto err1;
- }
-
- ctxi = get_context(cfg, rctxid, lli, 0);
- if (unlikely(!ctxi)) {
- dev_err(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
- rc = -EINVAL;
- goto err1;
- }
-
- rhte = rhte_checkout(ctxi, lli);
- if (unlikely(!rhte)) {
- dev_err(dev, "%s: too many opens ctxid=%llu\n",
- __func__, ctxid);
- rc = -EMFILE; /* too many opens */
- goto err1;
- }
-
- rsrc_handle = (rhte - ctxi->rht_start);
-
- /* Populate RHT format 0 */
- rhte->nmask = MC_RHT_NMASK;
- rhte->fp = SISL_RHT_FP(0U, ctxi->rht_perms);
-
- /* Resize even if requested size is 0 */
- marshal_virt_to_resize(virt, &resize);
- resize.rsrc_handle = rsrc_handle;
- rc = _cxlflash_vlun_resize(sdev, ctxi, &resize);
- if (rc) {
- dev_err(dev, "%s: resize failed rc=%d\n", __func__, rc);
- goto err2;
- }
- last_lba = resize.last_lba;
-
- if (virt->hdr.flags & DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME)
- ctxi->rht_needs_ws[rsrc_handle] = true;
-
- virt->hdr.return_flags = 0;
- virt->last_lba = last_lba;
- virt->rsrc_handle = rsrc_handle;
-
- if (get_num_ports(lli->port_sel) > 1)
- virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE;
-out:
- if (likely(ctxi))
- put_context(ctxi);
- dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
- __func__, rsrc_handle, rc, last_lba);
- return rc;
-
-err2:
- rhte_checkin(ctxi, rhte);
-err1:
- cxlflash_lun_detach(gli);
- goto out;
-err0:
- /* Special common cleanup prior to successful LUN attach */
- cxlflash_ba_terminate(&gli->blka.ba_lun);
- mutex_unlock(&gli->mutex);
- goto out;
-}
-
-/**
- * clone_lxt() - copies translation tables from source to destination RHTE
- * @afu: AFU associated with the host.
- * @blka: Block allocator associated with LUN.
- * @ctxid: Context ID of context owning the RHTE.
- * @rhndl: Resource handle associated with the RHTE.
- * @rhte: Destination resource handle entry (RHTE).
- * @rhte_src: Source resource handle entry (RHTE).
- *
- * Return: 0 on success, -errno on failure
- */
-static int clone_lxt(struct afu *afu,
- struct blka *blka,
- ctx_hndl_t ctxid,
- res_hndl_t rhndl,
- struct sisl_rht_entry *rhte,
- struct sisl_rht_entry *rhte_src)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct sisl_lxt_entry *lxt = NULL;
- bool locked = false;
- u32 ngrps;
- u64 aun; /* chunk# allocated by block allocator */
- int j;
- int i = 0;
- int rc = 0;
-
- ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt);
-
- if (ngrps) {
- /* allocate new LXTs for clone */
- lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
- GFP_KERNEL);
- if (unlikely(!lxt)) {
- rc = -ENOMEM;
- goto out;
- }
-
- /* copy over */
- memcpy(lxt, rhte_src->lxt_start,
- (sizeof(*lxt) * rhte_src->lxt_cnt));
-
- /* clone the LBAs in block allocator via ref_cnt, note that the
- * block allocator mutex must be held until it is established
- * that this routine will complete without the need for a
- * cleanup.
- */
- mutex_lock(&blka->mutex);
- locked = true;
- for (i = 0; i < rhte_src->lxt_cnt; i++) {
- aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT);
- if (ba_clone(&blka->ba_lun, aun) == -1ULL) {
- rc = -EIO;
- goto err;
- }
- }
- }
-
- /*
- * The following sequence is prescribed in the SISlite spec
- * for syncing up with the AFU when adding LXT entries.
- */
- dma_wmb(); /* Make LXT updates are visible */
-
- rhte->lxt_start = lxt;
- dma_wmb(); /* Make RHT entry's LXT table update visible */
-
- rhte->lxt_cnt = rhte_src->lxt_cnt;
- dma_wmb(); /* Make RHT entry's LXT table size update visible */
-
- rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
- if (unlikely(rc)) {
- rc = -EAGAIN;
- goto err2;
- }
-
-out:
- if (locked)
- mutex_unlock(&blka->mutex);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-err2:
- /* Reset the RHTE */
- rhte->lxt_cnt = 0;
- dma_wmb();
- rhte->lxt_start = NULL;
- dma_wmb();
-err:
- /* free the clones already made */
- for (j = 0; j < i; j++) {
- aun = (lxt[j].rlba_base >> MC_CHUNK_SHIFT);
- ba_free(&blka->ba_lun, aun);
- }
- kfree(lxt);
- goto out;
-}
-
-/**
- * cxlflash_disk_clone() - clone a context by making snapshot of another
- * @sdev: SCSI device associated with LUN owning virtual LUN.
- * @arg: Clone ioctl data structure.
- *
- * This routine effectively performs cxlflash_disk_open operation for each
- * in-use virtual resource in the source context. Note that the destination
- * context must be in pristine state and cannot have any resource handles
- * open at the time of the clone.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_disk_clone(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_clone *clone = arg;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct blka *blka = &gli->blka;
- struct afu *afu = cfg->afu;
- struct dk_cxlflash_release release = { { 0 }, 0 };
-
- struct ctx_info *ctxi_src = NULL,
- *ctxi_dst = NULL;
- struct lun_access *lun_access_src, *lun_access_dst;
- u32 perms;
- u64 ctxid_src = DECODE_CTXID(clone->context_id_src),
- ctxid_dst = DECODE_CTXID(clone->context_id_dst),
- rctxid_src = clone->context_id_src,
- rctxid_dst = clone->context_id_dst;
- int i, j;
- int rc = 0;
- bool found;
- LIST_HEAD(sidecar);
-
- dev_dbg(dev, "%s: ctxid_src=%llu ctxid_dst=%llu\n",
- __func__, ctxid_src, ctxid_dst);
-
- /* Do not clone yourself */
- if (unlikely(rctxid_src == rctxid_dst)) {
- rc = -EINVAL;
- goto out;
- }
-
- if (unlikely(gli->mode != MODE_VIRTUAL)) {
- rc = -EINVAL;
- dev_dbg(dev, "%s: Only supported on virtual LUNs mode=%u\n",
- __func__, gli->mode);
- goto out;
- }
-
- ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE);
- ctxi_dst = get_context(cfg, rctxid_dst, lli, 0);
- if (unlikely(!ctxi_src || !ctxi_dst)) {
- dev_dbg(dev, "%s: Bad context ctxid_src=%llu ctxid_dst=%llu\n",
- __func__, ctxid_src, ctxid_dst);
- rc = -EINVAL;
- goto out;
- }
-
- /* Verify there is no open resource handle in the destination context */
- for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
- if (ctxi_dst->rht_start[i].nmask != 0) {
- rc = -EINVAL;
- goto out;
- }
-
- /* Clone LUN access list */
- list_for_each_entry(lun_access_src, &ctxi_src->luns, list) {
- found = false;
- list_for_each_entry(lun_access_dst, &ctxi_dst->luns, list)
- if (lun_access_dst->sdev == lun_access_src->sdev) {
- found = true;
- break;
- }
-
- if (!found) {
- lun_access_dst = kzalloc(sizeof(*lun_access_dst),
- GFP_KERNEL);
- if (unlikely(!lun_access_dst)) {
- dev_err(dev, "%s: lun_access allocation fail\n",
- __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- *lun_access_dst = *lun_access_src;
- list_add(&lun_access_dst->list, &sidecar);
- }
- }
-
- if (unlikely(!ctxi_src->rht_out)) {
- dev_dbg(dev, "%s: Nothing to clone\n", __func__);
- goto out_success;
- }
-
- /* User specified permission on attach */
- perms = ctxi_dst->rht_perms;
-
- /*
- * Copy over checked-out RHT (and their associated LXT) entries by
- * hand, stopping after we've copied all outstanding entries and
- * cleaning up if the clone fails.
- *
- * Note: This loop is equivalent to performing cxlflash_disk_open and
- * cxlflash_vlun_resize. As such, LUN accounting needs to be taken into
- * account by attaching after each successful RHT entry clone. In the
- * event that a clone failure is experienced, the LUN detach is handled
- * via the cleanup performed by _cxlflash_disk_release.
- */
- for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
- if (ctxi_src->rht_out == ctxi_dst->rht_out)
- break;
- if (ctxi_src->rht_start[i].nmask == 0)
- continue;
-
- /* Consume a destination RHT entry */
- ctxi_dst->rht_out++;
- ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask;
- ctxi_dst->rht_start[i].fp =
- SISL_RHT_FP_CLONE(ctxi_src->rht_start[i].fp, perms);
- ctxi_dst->rht_lun[i] = ctxi_src->rht_lun[i];
-
- rc = clone_lxt(afu, blka, ctxid_dst, i,
- &ctxi_dst->rht_start[i],
- &ctxi_src->rht_start[i]);
- if (rc) {
- marshal_clone_to_rele(clone, &release);
- for (j = 0; j < i; j++) {
- release.rsrc_handle = j;
- _cxlflash_disk_release(sdev, ctxi_dst,
- &release);
- }
-
- /* Put back the one we failed on */
- rhte_checkin(ctxi_dst, &ctxi_dst->rht_start[i]);
- goto err;
- }
-
- cxlflash_lun_attach(gli, gli->mode, false);
- }
-
-out_success:
- list_splice(&sidecar, &ctxi_dst->luns);
-
- /* fall through */
-out:
- if (ctxi_src)
- put_context(ctxi_src);
- if (ctxi_dst)
- put_context(ctxi_dst);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-
-err:
- list_for_each_entry_safe(lun_access_src, lun_access_dst, &sidecar, list)
- kfree(lun_access_src);
- goto out;
-}
diff --git a/drivers/scsi/cxlflash/vlun.h b/drivers/scsi/cxlflash/vlun.h
deleted file mode 100644
index 68e3ea52fe80..000000000000
--- a/drivers/scsi/cxlflash/vlun.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _CXLFLASH_VLUN_H
-#define _CXLFLASH_VLUN_H
-
-/* RHT - Resource Handle Table */
-#define MC_RHT_NMASK 16 /* in bits */
-#define MC_CHUNK_SHIFT MC_RHT_NMASK /* shift to go from LBA to chunk# */
-
-#define HIBIT (BITS_PER_LONG - 1)
-
-#define MAX_AUN_CLONE_CNT 0xFF
-
-/*
- * LXT - LBA Translation Table
- *
- * +-------+-------+-------+-------+-------+-------+-------+---+---+
- * | RLBA_BASE |LUN_IDX| P |SEL|
- * +-------+-------+-------+-------+-------+-------+-------+---+---+
- *
- * The LXT Entry contains the physical LBA where the chunk starts (RLBA_BASE).
- * AFU ORes the low order bits from the virtual LBA (offset into the chunk)
- * with RLBA_BASE. The result is the physical LBA to be sent to storage.
- * The LXT Entry also contains an index to a LUN TBL and a bitmask of which
- * outgoing (FC) * ports can be selected. The port select bit-mask is ANDed
- * with a global port select bit-mask maintained by the driver.
- * In addition, it has permission bits that are ANDed with the
- * RHT permissions to arrive at the final permissions for the chunk.
- *
- * LXT tables are allocated dynamically in groups. This is done to avoid
- * a malloc/free overhead each time the LXT has to grow or shrink.
- *
- * Based on the current lxt_cnt (used), it is always possible to know
- * how many are allocated (used+free). The number of allocated entries is
- * not stored anywhere.
- *
- * The LXT table is re-allocated whenever it needs to cross into another group.
- */
-#define LXT_GROUP_SIZE 8
-#define LXT_NUM_GROUPS(lxt_cnt) (((lxt_cnt) + 7)/8) /* alloc'ed groups */
-#define LXT_LUNIDX_SHIFT 8 /* LXT entry, shift for LUN index */
-#define LXT_PERM_SHIFT 4 /* LXT entry, shift for permission bits */
-
-struct ba_lun_info {
- u64 *lun_alloc_map;
- u32 lun_bmap_size;
- u32 total_aus;
- u64 free_aun_cnt;
-
- /* indices to be used for elevator lookup of free map */
- u32 free_low_idx;
- u32 free_curr_idx;
- u32 free_high_idx;
-
- u8 *aun_clone_map;
-};
-
-struct ba_lun {
- u64 lun_id;
- u64 wwpn;
- size_t lsize; /* LUN size in number of LBAs */
- size_t lba_size; /* LBA size in number of bytes */
- size_t au_size; /* Allocation Unit size in number of LBAs */
- struct ba_lun_info *ba_lun_handle;
-};
-
-/* Block Allocator */
-struct blka {
- struct ba_lun ba_lun;
- u64 nchunk; /* number of chunks */
- struct mutex mutex;
-};
-
-#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
index 8469c156ab33..59f277593785 100644
--- a/drivers/scsi/elx/efct/efct_driver.c
+++ b/drivers/scsi/elx/efct/efct_driver.c
@@ -735,7 +735,7 @@ efct_pci_io_resume(struct pci_dev *pdev)
MODULE_DEVICE_TABLE(pci, efct_pci_table);
-static struct pci_error_handlers efct_pci_err_handler = {
+static const struct pci_error_handlers efct_pci_err_handler = {
.error_detected = efct_pci_io_error_detected,
.slot_reset = efct_pci_io_slot_reset,
.resume = efct_pci_io_resume,
diff --git a/drivers/scsi/fnic/fdls_disc.c b/drivers/scsi/fnic/fdls_disc.c
index 11211c469583..4c6bbf417a9a 100644
--- a/drivers/scsi/fnic/fdls_disc.c
+++ b/drivers/scsi/fnic/fdls_disc.c
@@ -308,43 +308,29 @@ void fdls_schedule_oxid_free_retry_work(struct work_struct *work)
struct fnic *fnic = iport->fnic;
struct reclaim_entry_s *reclaim_entry;
unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport));
+ unsigned long flags;
int idx;
- spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
-
for_each_set_bit(idx, oxid_pool->pending_schedule_free, FNIC_OXID_POOL_SZ) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Schedule oxid free. oxid idx: %d\n", idx);
- spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
- reclaim_entry = (struct reclaim_entry_s *)
- kzalloc(sizeof(struct reclaim_entry_s), GFP_KERNEL);
- spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
-
+ reclaim_entry = kzalloc(sizeof(*reclaim_entry), GFP_KERNEL);
if (!reclaim_entry) {
- FNIC_FCS_DBG(KERN_WARNING, fnic->host, fnic->fnic_num,
- "Failed to allocate memory for reclaim struct for oxid idx: 0x%x\n",
- idx);
-
schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry,
msecs_to_jiffies(SCHEDULE_OXID_FREE_RETRY_TIME));
- spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
return;
}
- if (test_and_clear_bit(idx, oxid_pool->pending_schedule_free)) {
- reclaim_entry->oxid_idx = idx;
- reclaim_entry->expires = round_jiffies(jiffies + delay_j);
- list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list);
- schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
- } else {
- /* unlikely scenario, free the allocated memory and continue */
- kfree(reclaim_entry);
- }
-}
-
- spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ clear_bit(idx, oxid_pool->pending_schedule_free);
+ reclaim_entry->oxid_idx = idx;
+ reclaim_entry->expires = round_jiffies(jiffies + delay_j);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
+ }
}
static bool fdls_is_oxid_fabric_req(uint16_t oxid)
@@ -1567,9 +1553,9 @@ void fdls_send_fabric_logo(struct fnic_iport_s *iport)
iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
- FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
- "0x%x: FDLS send fabric LOGO with oxid: 0x%x",
- iport->fcid, oxid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send fabric LOGO with oxid: 0x%x",
+ iport->fcid, oxid);
fnic_send_fcoe_frame(iport, frame, frame_size);
@@ -1898,7 +1884,6 @@ static void fdls_fdmi_register_hba(struct fnic_iport_s *iport)
if (fnic->subsys_desc_len >= FNIC_FDMI_MODEL_LEN)
fnic->subsys_desc_len = FNIC_FDMI_MODEL_LEN - 1;
strscpy_pad(data, fnic->subsys_desc, FNIC_FDMI_MODEL_LEN);
- data[FNIC_FDMI_MODEL_LEN - 1] = 0;
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL, FNIC_FDMI_MODEL_LEN,
data, &attr_off_bytes);
@@ -2061,7 +2046,6 @@ static void fdls_fdmi_register_pa(struct fnic_iport_s *iport)
snprintf(tmp_data, FNIC_FDMI_OS_NAME_LEN - 1, "host%d",
fnic->host->host_no);
strscpy_pad(data, tmp_data, FNIC_FDMI_OS_NAME_LEN);
- data[FNIC_FDMI_OS_NAME_LEN - 1] = 0;
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_OS_NAME,
FNIC_FDMI_OS_NAME_LEN, data, &attr_off_bytes);
@@ -2071,7 +2055,6 @@ static void fdls_fdmi_register_pa(struct fnic_iport_s *iport)
sprintf(fc_host_system_hostname(fnic->host), "%s", utsname()->nodename);
strscpy_pad(data, fc_host_system_hostname(fnic->host),
FNIC_FDMI_HN_LEN);
- data[FNIC_FDMI_HN_LEN - 1] = 0;
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HOST_NAME,
FNIC_FDMI_HN_LEN, data, &attr_off_bytes);
@@ -4659,13 +4642,13 @@ fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport,
d_id = ntoh24(fchdr->fh_d_id);
/* some common validation */
- if (fdls_get_state(fabric) > FDLS_STATE_FABRIC_FLOGI) {
- if ((iport->fcid != d_id) || (!FNIC_FC_FRAME_CS_CTL(fchdr))) {
- FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
- "invalid frame received. Dropping frame");
- return -1;
- }
+ if (fdls_get_state(fabric) > FDLS_STATE_FABRIC_FLOGI) {
+ if (iport->fcid != d_id || (!FNIC_FC_FRAME_CS_CTL(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "invalid frame received. Dropping frame");
+ return -1;
}
+ }
/* BLS ABTS response */
if ((fchdr->fh_r_ctl == FC_RCTL_BA_ACC)
@@ -4682,7 +4665,7 @@ fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport,
"Received unexpected ABTS RSP(oxid:0x%x) from 0x%x. Dropping frame",
oxid, s_id);
return -1;
- }
+ }
return FNIC_FABRIC_BLS_ABTS_RSP;
} else if (fdls_is_oxid_fdmi_req(oxid)) {
return FNIC_FDMI_BLS_ABTS_RSP;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 0b20ac8c3f46..3dd06376e97b 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -1365,10 +1365,9 @@ static void __exit fnic_cleanup_module(void)
if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON)
destroy_workqueue(reset_fnic_work_queue);
- if (fnic_fip_queue) {
- flush_workqueue(fnic_fip_queue);
+ if (fnic_fip_queue)
destroy_workqueue(fnic_fip_queue);
- }
+
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
kmem_cache_destroy(fnic_io_req_cache);
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 2d438d722d0b..e17f5d8226bf 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -633,8 +633,7 @@ extern struct dentry *hisi_sas_debugfs_dir;
extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
extern int hisi_sas_alloc(struct hisi_hba *hisi_hba);
extern void hisi_sas_free(struct hisi_hba *hisi_hba);
-extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
- int direction);
+extern u8 hisi_sas_get_ata_protocol(struct sas_task *task);
extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
extern void hisi_sas_sata_done(struct sas_task *task,
struct hisi_sas_slot *slot);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index da4a2ed8ee86..3596414d970b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -21,8 +21,32 @@ struct hisi_sas_internal_abort_data {
bool rst_ha_timeout; /* reset the HA for timeout */
};
-u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
+static u8 hisi_sas_get_ata_protocol_from_tf(struct ata_queued_cmd *qc)
{
+ if (!qc)
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_NODATA:
+ return HISI_SAS_SATA_PROTOCOL_NONDATA;
+ case ATA_PROT_PIO:
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+ case ATA_PROT_DMA:
+ return HISI_SAS_SATA_PROTOCOL_DMA;
+ case ATA_PROT_NCQ_NODATA:
+ case ATA_PROT_NCQ:
+ return HISI_SAS_SATA_PROTOCOL_FPDMA;
+ default:
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+ }
+}
+
+u8 hisi_sas_get_ata_protocol(struct sas_task *task)
+{
+ struct host_to_dev_fis *fis = &task->ata_task.fis;
+ struct ata_queued_cmd *qc = task->uldd_task;
+ int direction = task->data_dir;
+
switch (fis->command) {
case ATA_CMD_FPDMA_WRITE:
case ATA_CMD_FPDMA_READ:
@@ -93,7 +117,7 @@ u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
{
if (direction == DMA_NONE)
return HISI_SAS_SATA_PROTOCOL_NONDATA;
- return HISI_SAS_SATA_PROTOCOL_PIO;
+ return hisi_sas_get_ata_protocol_from_tf(qc);
}
}
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index bb78e53c66e2..6621d633b2cc 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1806,7 +1806,7 @@ static struct platform_driver hisi_sas_v1_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = sas_v1_of_match,
- .acpi_match_table = ACPI_PTR(sas_v1_acpi_match),
+ .acpi_match_table = sas_v1_acpi_match,
},
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 71cd5b4450c2..04ee02797ca3 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2538,9 +2538,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
(task->ata_task.fis.control & ATA_SRST))
dw1 |= 1 << CMD_HDR_RESET_OFF;
- dw1 |= (hisi_sas_get_ata_protocol(
- &task->ata_task.fis, task->data_dir))
- << CMD_HDR_FRAME_TYPE_OFF;
+ dw1 |= (hisi_sas_get_ata_protocol(task)) << CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
hdr->dw1 = cpu_to_le32(dw1);
@@ -3653,7 +3651,7 @@ static struct platform_driver hisi_sas_v2_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = sas_v2_of_match,
- .acpi_match_table = ACPI_PTR(sas_v2_acpi_match),
+ .acpi_match_table = sas_v2_acpi_match,
},
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 48b95d9a7927..095bbf80c34e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -1456,9 +1456,7 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
(task->ata_task.fis.control & ATA_SRST))
dw1 |= 1 << CMD_HDR_RESET_OFF;
- dw1 |= (hisi_sas_get_ata_protocol(
- &task->ata_task.fis, task->data_dir))
- << CMD_HDR_FRAME_TYPE_OFF;
+ dw1 |= (hisi_sas_get_ata_protocol(task)) << CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis))
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 84d8de07b7ae..c73a71ac3c29 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -453,17 +453,13 @@ static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int status, len;
+ int status;
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
- char tmpbuf[10];
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
- strncpy(tmpbuf, buf, len);
- tmpbuf[len] = '\0';
- if (sscanf(tmpbuf, "%d", &status) != 1)
+ if (kstrtoint(buf, 10, &status))
return -EINVAL;
h = shost_to_hba(shost);
h->acciopath_status = !!status;
@@ -477,17 +473,13 @@ static ssize_t host_store_raid_offload_debug(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int debug_level, len;
+ int debug_level;
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
- char tmpbuf[10];
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
- strncpy(tmpbuf, buf, len);
- tmpbuf[len] = '\0';
- if (sscanf(tmpbuf, "%d", &debug_level) != 1)
+ if (kstrtoint(buf, 10, &debug_level))
return -EINVAL;
if (debug_level < 0)
debug_level = 0;
@@ -7238,8 +7230,7 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
static void init_driver_version(char *driver_version, int len)
{
- memset(driver_version, 0, len);
- strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
+ strscpy_pad(driver_version, HPSA " " HPSA_DRIVER_VERSION, len);
}
static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 16d085d56e9d..9e42230e42b8 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -2922,9 +2922,7 @@ static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
struct timer_cb *p_timer;
p_timer = &vscsi->rsp_q_timer;
- hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-
- p_timer->timer.function = ibmvscsis_service_wait_q;
+ hrtimer_setup(&p_timer->timer, ibmvscsis_service_wait_q, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
p_timer->started = false;
p_timer->timer_pops = 0;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index cce6c6b409ad..94adb6ac02a4 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3631,8 +3631,8 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
break;
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
scb->scsi_cmd->result = DID_OK << 16;
break;
@@ -3899,8 +3899,8 @@ ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus)
case WRITE_6:
case READ_10:
case WRITE_10:
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
break;
case MODE_SENSE:
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 73085d2f5c43..acf0c2038d20 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -91,31 +91,31 @@ MODULE_DEVICE_TABLE(pci, isci_id_table);
/* linux isci specific settings */
-unsigned char no_outbound_task_to = 2;
+static unsigned char no_outbound_task_to = 2;
module_param(no_outbound_task_to, byte, 0);
MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
-u16 ssp_max_occ_to = 20;
+static u16 ssp_max_occ_to = 20;
module_param(ssp_max_occ_to, ushort, 0);
MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)");
-u16 stp_max_occ_to = 5;
+static u16 stp_max_occ_to = 5;
module_param(stp_max_occ_to, ushort, 0);
MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)");
-u16 ssp_inactive_to = 5;
+static u16 ssp_inactive_to = 5;
module_param(ssp_inactive_to, ushort, 0);
MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)");
-u16 stp_inactive_to = 5;
+static u16 stp_inactive_to = 5;
module_param(stp_inactive_to, ushort, 0);
MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
-unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
+static unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
module_param(phy_gen, byte, 0);
MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
-unsigned char max_concurr_spinup;
+static unsigned char max_concurr_spinup;
module_param(max_concurr_spinup, byte, 0);
MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 4e6b1decbca7..f6a8fe206415 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -473,13 +473,6 @@ static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt)
dest[word_cnt] = swab32(src[word_cnt]);
}
-extern unsigned char no_outbound_task_to;
-extern u16 ssp_max_occ_to;
-extern u16 stp_max_occ_to;
-extern u16 ssp_inactive_to;
-extern u16 stp_inactive_to;
-extern unsigned char phy_gen;
-extern unsigned char max_concurr_spinup;
extern uint cable_selection_override;
irqreturn_t isci_msix_isr(int vec, void *data);
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 27ae45332704..561ae3f2cbbd 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -198,7 +198,7 @@ enum sci_status sci_remote_device_reset(
* device. When there are no active IO for the device it is is in this
* state.
*
- * @SCI_STP_DEV_CMD: This is the command state for for the STP remote
+ * @SCI_STP_DEV_CMD: This is the command state for the STP remote
* device. This state is entered when the device is processing a
* non-NCQ command. The device object will fail any new start IO
* requests until this command is complete.
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index e81f60985193..7b4fe0e6afb2 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -17,7 +17,6 @@
* Zhenyu Wang
*/
-#include <crypto/hash.h>
#include <linux/types.h>
#include <linux/inet.h>
#include <linux/slab.h>
@@ -468,8 +467,7 @@ static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
* sufficient room.
*/
if (conn->hdrdgst_en) {
- iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen,
- hdr + hdrlen);
+ iscsi_tcp_dgst_header(hdr, hdrlen, hdr + hdrlen);
hdrlen += ISCSI_DIGEST_SIZE;
}
@@ -494,7 +492,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
- struct ahash_request *tx_hash = NULL;
+ u32 *tx_crcp = NULL;
unsigned int hdr_spec_len;
ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
@@ -507,11 +505,10 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
if (conn->datadgst_en)
- tx_hash = tcp_sw_conn->tx_hash;
+ tx_crcp = &tcp_sw_conn->tx_crc;
return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
- sg, count, offset, len,
- NULL, tx_hash);
+ sg, count, offset, len, NULL, tx_crcp);
}
static void
@@ -520,7 +517,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
- struct ahash_request *tx_hash = NULL;
+ u32 *tx_crcp = NULL;
unsigned int hdr_spec_len;
ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
@@ -532,10 +529,10 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
if (conn->datadgst_en)
- tx_hash = tcp_sw_conn->tx_hash;
+ tx_crcp = &tcp_sw_conn->tx_crc;
iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
- data, len, NULL, tx_hash);
+ data, len, NULL, tx_crcp);
}
static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
@@ -583,7 +580,6 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn;
struct iscsi_tcp_conn *tcp_conn;
struct iscsi_sw_tcp_conn *tcp_sw_conn;
- struct crypto_ahash *tfm;
cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
conn_idx);
@@ -596,37 +592,9 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q;
mutex_init(&tcp_sw_conn->sock_lock);
-
- tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- goto free_conn;
-
- tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!tcp_sw_conn->tx_hash)
- goto free_tfm;
- ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL);
-
- tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!tcp_sw_conn->rx_hash)
- goto free_tx_hash;
- ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL);
-
- tcp_conn->rx_hash = tcp_sw_conn->rx_hash;
+ tcp_conn->rx_crcp = &tcp_sw_conn->rx_crc;
return cls_conn;
-
-free_tx_hash:
- ahash_request_free(tcp_sw_conn->tx_hash);
-free_tfm:
- crypto_free_ahash(tfm);
-free_conn:
- iscsi_conn_printk(KERN_ERR, conn,
- "Could not create connection due to crc32c "
- "loading error. Make sure the crc32c "
- "module is built as a module or into the "
- "kernel\n");
- iscsi_tcp_conn_teardown(cls_conn);
- return NULL;
}
static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
@@ -664,20 +632,8 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
iscsi_sw_tcp_release_conn(conn);
-
- ahash_request_free(tcp_sw_conn->rx_hash);
- if (tcp_sw_conn->tx_hash) {
- struct crypto_ahash *tfm;
-
- tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash);
- ahash_request_free(tcp_sw_conn->tx_hash);
- crypto_free_ahash(tfm);
- }
-
iscsi_tcp_conn_teardown(cls_conn);
}
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 89a6fc552f0b..c3e5d9fa6add 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -41,8 +41,8 @@ struct iscsi_sw_tcp_conn {
void (*old_write_space)(struct sock *);
/* data and header digests */
- struct ahash_request *tx_hash; /* CRC32C (Tx) */
- struct ahash_request *rx_hash; /* CRC32C (Rx) */
+ u32 tx_crc; /* CRC32C (Tx) */
+ u32 rx_crc; /* CRC32C (Rx) */
/* MIB custom statistics */
uint32_t sendpage_failures_cnt;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index c182aa83f2c9..e90805ba868f 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -15,7 +15,7 @@
* Zhenyu Wang
*/
-#include <crypto/hash.h>
+#include <linux/crc32c.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/inet.h>
@@ -168,7 +168,7 @@ iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
segment->size = ISCSI_DIGEST_SIZE;
segment->copied = 0;
segment->sg = NULL;
- segment->hash = NULL;
+ segment->crcp = NULL;
}
/**
@@ -191,29 +191,27 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
struct iscsi_segment *segment, int recv,
unsigned copied)
{
- struct scatterlist sg;
unsigned int pad;
ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copied %u %u size %u %s\n",
segment->copied, copied, segment->size,
recv ? "recv" : "xmit");
- if (segment->hash && copied) {
- /*
- * If a segment is kmapd we must unmap it before sending
- * to the crypto layer since that will try to kmap it again.
- */
- iscsi_tcp_segment_unmap(segment);
-
- if (!segment->data) {
- sg_init_table(&sg, 1);
- sg_set_page(&sg, sg_page(segment->sg), copied,
- segment->copied + segment->sg_offset +
- segment->sg->offset);
- } else
- sg_init_one(&sg, segment->data + segment->copied,
- copied);
- ahash_request_set_crypt(segment->hash, &sg, NULL, copied);
- crypto_ahash_update(segment->hash);
+ if (segment->crcp && copied) {
+ if (segment->data) {
+ *segment->crcp = crc32c(*segment->crcp,
+ segment->data + segment->copied,
+ copied);
+ } else {
+ const void *data;
+
+ data = kmap_local_page(sg_page(segment->sg));
+ *segment->crcp = crc32c(*segment->crcp,
+ data + segment->copied +
+ segment->sg_offset +
+ segment->sg->offset,
+ copied);
+ kunmap_local(data);
+ }
}
segment->copied += copied;
@@ -258,10 +256,8 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
* Set us up for transferring the data digest. hdr digest
* is completely handled in hdr done function.
*/
- if (segment->hash) {
- ahash_request_set_crypt(segment->hash, NULL,
- segment->digest, 0);
- crypto_ahash_final(segment->hash);
+ if (segment->crcp) {
+ put_unaligned_le32(~*segment->crcp, segment->digest);
iscsi_tcp_segment_splice_digest(segment,
recv ? segment->recv_digest : segment->digest);
return 0;
@@ -282,8 +278,7 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_segment_done);
* given buffer, and returns the number of bytes
* consumed, which can actually be less than @len.
*
- * If hash digest is enabled, the function will update the
- * hash while copying.
+ * If CRC is enabled, the function will update the CRC while copying.
* Combining these two operations doesn't buy us a lot (yet),
* but in the future we could implement combined copy+crc,
* just way we do for network layer checksums.
@@ -311,14 +306,10 @@ iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
}
inline void
-iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr,
- size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE])
+iscsi_tcp_dgst_header(const void *hdr, size_t hdrlen,
+ unsigned char digest[ISCSI_DIGEST_SIZE])
{
- struct scatterlist sg;
-
- sg_init_one(&sg, hdr, hdrlen);
- ahash_request_set_crypt(hash, &sg, digest, hdrlen);
- crypto_ahash_digest(hash);
+ put_unaligned_le32(~crc32c(~0, hdr, hdrlen), digest);
}
EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header);
@@ -343,24 +334,23 @@ iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
*/
static inline void
__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
- iscsi_segment_done_fn_t *done, struct ahash_request *hash)
+ iscsi_segment_done_fn_t *done, u32 *crcp)
{
memset(segment, 0, sizeof(*segment));
segment->total_size = size;
segment->done = done;
- if (hash) {
- segment->hash = hash;
- crypto_ahash_init(hash);
+ if (crcp) {
+ segment->crcp = crcp;
+ *crcp = ~0;
}
}
inline void
iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
- size_t size, iscsi_segment_done_fn_t *done,
- struct ahash_request *hash)
+ size_t size, iscsi_segment_done_fn_t *done, u32 *crcp)
{
- __iscsi_segment_init(segment, size, done, hash);
+ __iscsi_segment_init(segment, size, done, crcp);
segment->data = data;
segment->size = size;
}
@@ -370,13 +360,12 @@ inline int
iscsi_segment_seek_sg(struct iscsi_segment *segment,
struct scatterlist *sg_list, unsigned int sg_count,
unsigned int offset, size_t size,
- iscsi_segment_done_fn_t *done,
- struct ahash_request *hash)
+ iscsi_segment_done_fn_t *done, u32 *crcp)
{
struct scatterlist *sg;
unsigned int i;
- __iscsi_segment_init(segment, size, done, hash);
+ __iscsi_segment_init(segment, size, done, crcp);
for_each_sg(sg_list, sg, sg_count, i) {
if (offset < sg->length) {
iscsi_tcp_segment_init_sg(segment, sg, offset);
@@ -393,7 +382,7 @@ EXPORT_SYMBOL_GPL(iscsi_segment_seek_sg);
* iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
* @tcp_conn: iscsi connection to prep for
*
- * This function always passes NULL for the hash argument, because when this
+ * This function always passes NULL for the crcp argument, because when this
* function is called we do not yet know the final size of the header and want
* to delay the digest processing until we know that.
*/
@@ -434,15 +423,15 @@ static void
iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
{
struct iscsi_conn *conn = tcp_conn->iscsi_conn;
- struct ahash_request *rx_hash = NULL;
+ u32 *rx_crcp = NULL;
if (conn->datadgst_en &&
!(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
- rx_hash = tcp_conn->rx_hash;
+ rx_crcp = tcp_conn->rx_crcp;
iscsi_segment_init_linear(&tcp_conn->in.segment,
conn->data, tcp_conn->in.datalen,
- iscsi_tcp_data_recv_done, rx_hash);
+ iscsi_tcp_data_recv_done, rx_crcp);
}
/**
@@ -730,7 +719,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
if (tcp_conn->in.datalen) {
struct iscsi_tcp_task *tcp_task = task->dd_data;
- struct ahash_request *rx_hash = NULL;
+ u32 *rx_crcp = NULL;
struct scsi_data_buffer *sdb = &task->sc->sdb;
/*
@@ -743,7 +732,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
*/
if (conn->datadgst_en &&
!(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
- rx_hash = tcp_conn->rx_hash;
+ rx_crcp = tcp_conn->rx_crcp;
ISCSI_DBG_TCP(conn, "iscsi_tcp_begin_data_in( "
"offset=%d, datalen=%d)\n",
@@ -756,7 +745,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
tcp_task->data_offset,
tcp_conn->in.datalen,
iscsi_tcp_process_data_in,
- rx_hash);
+ rx_crcp);
spin_unlock(&conn->session->back_lock);
return rc;
}
@@ -878,7 +867,7 @@ iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
return 0;
}
- iscsi_tcp_dgst_header(tcp_conn->rx_hash, hdr,
+ iscsi_tcp_dgst_header(hdr,
segment->total_copied - ISCSI_DIGEST_SIZE,
segment->digest);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e5a9c5a323f8..fe4fb67eb50c 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -74,8 +74,7 @@ struct lpfc_sli2_slim;
* queue depths when there are driver resource error or Firmware
* resource error.
*/
-/* 1 Second */
-#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1))
+#define QUEUE_RAMP_DOWN_INTERVAL (secs_to_jiffies(1))
/* Number of exchanges reserved for discovery to complete */
#define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -1715,18 +1714,12 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
* Note: If no valid cpu found, then nr_cpu_ids is returned.
*
**/
-static inline unsigned int
+static __always_inline unsigned int
lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
{
- unsigned int cpu_it;
-
- for_each_cpu_wrap(cpu_it, mask, start) {
- if (cpu_online(cpu_it))
- break;
- }
-
- return cpu_it;
+ return cpumask_next_and_wrap(start, mask, cpu_online_mask);
}
+
/**
* lpfc_next_present_cpu - Finds next present CPU after n
* @n: the cpu prior to search
@@ -1734,16 +1727,9 @@ lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
* Note: If no next present cpu, then fallback to first present cpu.
*
**/
-static inline unsigned int lpfc_next_present_cpu(int n)
+static __always_inline unsigned int lpfc_next_present_cpu(int n)
{
- unsigned int cpu;
-
- cpu = cpumask_next(n, cpu_present_mask);
-
- if (cpu >= nr_cpu_ids)
- cpu = cpumask_first(cpu_present_mask);
-
- return cpu;
+ return cpumask_next_wrap(n, cpu_present_mask);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 1d7db49a8fe4..e08b48b1b655 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -8045,8 +8045,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (test_bit(FC_DISC_TMO, &vport->fc_flag)) {
tmo = ((phba->fc_ratov * 3) + 3);
mod_timer(&vport->fc_disctmo,
- jiffies +
- msecs_to_jiffies(1000 * tmo));
+ jiffies + secs_to_jiffies(tmo));
}
return 0;
}
@@ -8081,7 +8080,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (test_bit(FC_DISC_TMO, &vport->fc_flag)) {
tmo = ((phba->fc_ratov * 3) + 3);
mod_timer(&vport->fc_disctmo,
- jiffies + msecs_to_jiffies(1000 * tmo));
+ jiffies + secs_to_jiffies(tmo));
}
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
!test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) {
@@ -9511,7 +9510,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
if (!list_empty(&pring->txcmplq))
if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
mod_timer(&vport->els_tmofunc,
- jiffies + msecs_to_jiffies(1000 * timeout));
+ jiffies + secs_to_jiffies(timeout));
}
/**
@@ -9569,18 +9568,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags);
/* First we need to issue aborts to outstanding cmds on txcmpl */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+ if (piocb->vport != vport)
+ continue;
+
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2243 iotag = 0x%x cmd_flag = 0x%x "
- "ulp_command = 0x%x this_vport %x "
- "sli_flag = 0x%x\n",
+ "ulp_command = 0x%x sli_flag = 0x%x\n",
piocb->iotag, piocb->cmd_flag,
get_job_cmnd(phba, piocb),
- (piocb->vport == vport),
phba->sli.sli_flag);
- if (piocb->vport != vport)
- continue;
-
if ((phba->sli.sli_flag & LPFC_SLI_ACTIVE) && !mbx_tmo_err) {
if (piocb->cmd_flag & LPFC_IO_LIBDFC)
continue;
@@ -10899,7 +10896,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
"3334 Delay fc port discovery for %d secs\n",
phba->fc_ratov);
mod_timer(&vport->delayed_disc_tmo,
- jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
+ jiffies + secs_to_jiffies(phba->fc_ratov));
return;
}
@@ -11156,7 +11153,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
if (!ndlp)
return;
- mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
phba->pport->port_state = LPFC_FLOGI;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 36e66df36a18..8ca590e8469b 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -228,10 +228,16 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
return;
- /* check for recovered fabric node */
- if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
- ndlp->nlp_DID == Fabric_DID)
+ /* Ignore callback for a mismatched (stale) rport */
+ if (ndlp->rport != rport) {
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_NODE,
+ "6788 fc rport mismatch: d_id x%06x ndlp x%px "
+ "fc rport x%px node rport x%px state x%x "
+ "refcnt %u\n",
+ ndlp->nlp_DID, ndlp, rport, ndlp->rport,
+ ndlp->nlp_state, kref_read(&ndlp->kref));
return;
+ }
if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -3518,7 +3524,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
if (phba->fc_topology &&
phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "3314 Toplogy changed was 0x%x is 0x%x\n",
+ "3314 Topology changed was 0x%x is 0x%x\n",
phba->fc_topology,
bf_get(lpfc_mbx_read_top_topology, la));
phba->fc_topology_changed = 1;
@@ -4973,7 +4979,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
tmo, vport->port_state, vport->fc_flag);
}
- mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
+ mod_timer(&vport->fc_disctmo, jiffies + secs_to_jiffies(tmo));
set_bit(FC_DISC_TMO, &vport->fc_flag);
/* Start Discovery Timer state <hba_state> */
@@ -5564,6 +5570,7 @@ static struct lpfc_nodelist *
__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
{
struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *np = NULL;
uint32_t data1;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
@@ -5578,14 +5585,20 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1, ndlp->nlp_rpi,
ndlp->active_rrqs_xri_bitmap);
- return ndlp;
+
+ /* Check for new or potentially stale node */
+ if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
+ return ndlp;
+ np = ndlp;
}
}
- /* FIND node did <did> NOT FOUND */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0932 FIND node did x%x NOT FOUND.\n", did);
- return NULL;
+ if (!np)
+ /* FIND node did <did> NOT FOUND */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0932 FIND node did x%x NOT FOUND.\n", did);
+
+ return np;
}
struct lpfc_nodelist *
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index bcadf11414c8..7238608ca49f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -595,7 +595,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Set up ring-0 (ELS) timer */
timeout = phba->fc_ratov * 2;
mod_timer(&vport->els_tmofunc,
- jiffies + msecs_to_jiffies(1000 * timeout));
+ jiffies + secs_to_jiffies(timeout));
/* Set up heart beat (HB) timer */
mod_timer(&phba->hb_tmofunc,
jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
@@ -604,7 +604,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
phba->last_completion_time = jiffies;
/* Set up error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll,
- jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
+ jiffies + secs_to_jiffies(phba->eratt_poll_interval));
if (test_bit(LINK_DISABLED, &phba->hba_flag)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -3361,8 +3361,8 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
/* Determine how long we might wait for the active mailbox
* command to be gracefully completed by firmware.
*/
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
- phba->sli.mbox_active) * 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active)) + jiffies;
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -6909,7 +6909,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
* re-instantiate the Vlink using FDISC.
*/
mod_timer(&ndlp->nlp_delayfunc,
- jiffies + msecs_to_jiffies(1000));
+ jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
vport->port_state = LPFC_FDISC;
@@ -7952,11 +7952,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
/* CMF congestion timer */
- hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- phba->cmf_timer.function = lpfc_cmf_timer;
+ hrtimer_setup(&phba->cmf_timer, lpfc_cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
/* CMF 1 minute stats collection timer */
- hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- phba->cmf_stats_timer.function = lpfc_cmf_stats_timer;
+ hrtimer_setup(&phba->cmf_stats_timer, lpfc_cmf_stats_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/*
* Control structure for handling external multi-buffer mailbox
@@ -12873,7 +12872,7 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
if (offline) {
/* Find next online CPU on original mask */
- cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
+ cpu_next = cpumask_next_wrap(cpu, orig_mask);
cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
/* Found a valid CPU */
@@ -13170,6 +13169,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
eqhdl = lpfc_get_eq_hdl(0);
rc = pci_irq_vector(phba->pcidev, 0);
if (rc < 0) {
+ free_irq(phba->pcidev->irq, phba);
pci_free_irq_vectors(phba->pcidev);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0496 MSI pci_irq_vec failed (%d)\n", rc);
@@ -13250,6 +13250,7 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
eqhdl = lpfc_get_eq_hdl(0);
retval = pci_irq_vector(phba->pcidev, 0);
if (retval < 0) {
+ free_irq(phba->pcidev->irq, phba);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0502 INTR pci_irq_vec failed (%d)\n",
retval);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 055ed632c14d..f0158fc00f78 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5645,9 +5645,8 @@ wait_for_cmpl:
* cmd_flag is set to LPFC_DRIVER_ABORTED before we wait
* for abort to complete.
*/
- wait_event_timeout(waitq,
- (lpfc_cmd->pCmd != cmnd),
- msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
+ wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd),
+ secs_to_jiffies(2*vport->cfg_devloss_tmo));
spin_lock(&lpfc_cmd->buf_lock);
@@ -5911,7 +5910,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport)
* If target is not in a MAPPED state, delay until
* target is rediscovered or devloss timeout expires.
*/
- later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ later = secs_to_jiffies(2 * vport->cfg_devloss_tmo) + jiffies;
while (time_after(later, jiffies)) {
if (!pnode)
return FAILED;
@@ -5957,7 +5956,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
lpfc_sli_abort_taskmgmt(vport,
&phba->sli.sli3_ring[LPFC_FCP_RING],
tgt_id, lun_id, context);
- later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ later = secs_to_jiffies(2 * vport->cfg_devloss_tmo) + jiffies;
while (time_after(later, jiffies) && cnt) {
schedule_timeout_uninterruptible(msecs_to_jiffies(20));
cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
@@ -6137,8 +6136,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
wait_event_timeout(waitq,
!test_bit(NLP_WAIT_FOR_LOGO,
&pnode->save_flags),
- msecs_to_jiffies(dev_loss_tmo *
- 1000));
+ secs_to_jiffies(dev_loss_tmo));
if (test_and_clear_bit(NLP_WAIT_FOR_LOGO,
&pnode->save_flags))
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 3fd9723cd271..4e0d48fcb204 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1025,7 +1025,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
LIST_HEAD(send_rrq);
clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
- next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
+ next_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1);
spin_lock_irqsave(&phba->rrq_list_lock, iflags);
list_for_each_entry_safe(rrq, nextrrq,
&phba->active_rrq_list, list) {
@@ -1208,8 +1208,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
else
rrq->send_rrq = 0;
rrq->xritag = xritag;
- rrq->rrq_stop_time = jiffies +
- msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
+ rrq->rrq_stop_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1);
rrq->nlp_DID = ndlp->nlp_DID;
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
@@ -1736,8 +1735,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
BUG_ON(!piocb->vport);
if (!test_bit(FC_UNLOADING, &piocb->vport->load_flag))
mod_timer(&piocb->vport->els_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
+ jiffies + secs_to_jiffies(phba->fc_ratov << 1));
}
return 0;
@@ -2923,6 +2921,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ } else {
+ clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
}
/* The unreg_login mailbox is complete and had a
@@ -3956,8 +3956,7 @@ void lpfc_poll_eratt(struct timer_list *t)
else
/* Restart the timer for next eratt poll */
mod_timer(&phba->eratt_poll,
- jiffies +
- msecs_to_jiffies(1000 * phba->eratt_poll_interval));
+ jiffies + secs_to_jiffies(phba->eratt_poll_interval));
return;
}
@@ -9008,7 +9007,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start the ELS watchdog timer */
mod_timer(&vport->els_tmofunc,
- jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
+ jiffies + secs_to_jiffies(phba->fc_ratov * 2));
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
@@ -9027,7 +9026,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll,
- jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
+ jiffies + secs_to_jiffies(phba->eratt_poll_interval));
/*
* The port is ready, set the host's link state to LINK_DOWN
@@ -9504,8 +9503,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
goto out_not_finished;
}
/* timeout active mbox command */
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
- 1000);
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox));
mod_timer(&psli->mbox_tmo, jiffies + timeout);
}
@@ -9629,8 +9627,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
drvr_flag);
goto out_not_finished;
}
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
- 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox)) + jiffies;
i = 0;
/* Wait for command to complete */
while (((word0 & OWN_CHIP) == OWN_CHIP) ||
@@ -9756,9 +9753,8 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
* command to be gracefully completed by firmware.
*/
if (phba->sli.mbox_active)
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
- phba->sli.mbox_active) *
- 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active)) + jiffies;
spin_unlock_irq(&phba->hbalock);
/* Make sure the mailbox is really active */
@@ -9881,8 +9877,7 @@ lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
}
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
- * 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)) + jiffies;
do {
bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
@@ -10230,7 +10225,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
/* Start timer for the mbox_tmo and log some mailbox post messages */
mod_timer(&psli->mbox_tmo, (jiffies +
- msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
+ secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq))));
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
@@ -13159,7 +13154,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
SLI_IOCB_RET_IOCB);
if (retval == IOCB_SUCCESS) {
- timeout_req = msecs_to_jiffies(timeout * 1000);
+ timeout_req = secs_to_jiffies(timeout);
timeleft = wait_event_timeout(done_q,
lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
@@ -13275,8 +13270,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
/* now issue the command */
retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
- wait_for_completion_timeout(&mbox_done,
- msecs_to_jiffies(timeout * 1000));
+ wait_for_completion_timeout(&mbox_done, secs_to_jiffies(timeout));
spin_lock_irqsave(&phba->hbalock, flag);
pmboxq->ctx_u.mbox_wait = NULL;
@@ -13336,9 +13330,8 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
* command to be gracefully completed by firmware.
*/
if (phba->sli.mbox_active)
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
- phba->sli.mbox_active) *
- 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active)) + jiffies;
spin_unlock_irq(&phba->hbalock);
/* Enable softirqs again, done with phba->hbalock */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c35f7225058e..638b50f35287 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.7"
+#define LPFC_DRIVER_VERSION "14.4.0.8"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2024 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2025 Broadcom. All Rights " \
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 3d70cc517573..cc56a7334319 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -246,7 +246,7 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
* fabric RA_TOV value and dev_loss tmo. The driver's
* devloss_tmo is 10 giving this loop a 3x multiplier minimally.
*/
- wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
+ wait_time_max = secs_to_jiffies((phba->fc_ratov * 3) + 3);
wait_time_max += jiffies;
start_time = jiffies;
while (time_before(jiffies, wait_time_max)) {
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index adab151663dd..2006094af418 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -855,8 +855,8 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
return scb;
#if MEGA_HAVE_CLUSTERING
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
/*
* Do we support clustering and is the support enabled
@@ -875,7 +875,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
}
scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
- scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ?
+ scb->raw_mbox[2] = *cmd->cmnd == RESERVE_6 ?
MEGA_RESERVE_LD : MEGA_RELEASE_LD;
scb->raw_mbox[3] = ldrv_num;
@@ -1618,8 +1618,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
* failed or the input parameter is invalid
*/
if( status == 1 &&
- (cmd->cmnd[0] == RESERVE ||
- cmd->cmnd[0] == RELEASE) ) {
+ (cmd->cmnd[0] == RESERVE_6 ||
+ cmd->cmnd[0] == RELEASE_6) ) {
cmd->result |= (DID_ERROR << 16) |
SAM_STAT_RESERVATION_CONFLICT;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 60cc3372991f..3ba837b3093f 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1725,8 +1725,8 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
return scb;
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
/*
* Do we support clustering and is the support enabled
*/
@@ -1748,7 +1748,7 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
scb->dev_channel = 0xFF;
scb->dev_target = target;
ccb->raw_mbox[0] = CLUSTER_CMD;
- ccb->raw_mbox[2] = (scp->cmnd[0] == RESERVE) ?
+ ccb->raw_mbox[2] = scp->cmnd[0] == RESERVE_6 ?
RESERVE_LD : RELEASE_LD;
ccb->raw_mbox[3] = target;
@@ -2334,8 +2334,8 @@ megaraid_mbox_dpc(unsigned long devp)
* Error code returned is 1 if Reserve or Release
* failed or the input parameter is invalid
*/
- if (status == 1 && (scp->cmnd[0] == RESERVE ||
- scp->cmnd[0] == RELEASE)) {
+ if (status == 1 && (scp->cmnd[0] == RESERVE_6 ||
+ scp->cmnd[0] == RELEASE_6)) {
scp->result = DID_ERROR << 16 |
SAM_STAT_RESERVATION_CONFLICT;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index d85f990aec88..28c75865967a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -93,7 +93,7 @@ static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
module_param(scmd_timeout, int, 0444);
MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
-int perf_mode = -1;
+static int perf_mode = -1;
module_param(perf_mode, int, 0444);
MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
"0 - balanced: High iops and low latency queues are allocated &\n\t\t"
@@ -105,15 +105,15 @@ MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:
"default mode is 'balanced'"
);
-int event_log_level = MFI_EVT_CLASS_CRITICAL;
+static int event_log_level = MFI_EVT_CLASS_CRITICAL;
module_param(event_log_level, int, 0644);
MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
-unsigned int enable_sdev_max_qd;
+static unsigned int enable_sdev_max_qd;
module_param(enable_sdev_max_qd, int, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
-int poll_queues;
+static int poll_queues;
module_param(poll_queues, int, 0444);
MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
"This parameter is effective only if host_tagset_enable=1 &\n\t\t"
@@ -122,7 +122,7 @@ MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode
"High iops queues are not allocated &\n\t\t"
);
-int host_tagset_enable = 1;
+static int host_tagset_enable = 1;
module_param(host_tagset_enable, int, 0444);
MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index 00cd18edfad6..96401eb7e231 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -19,6 +19,7 @@
#define MPI3_CONFIG_PAGETYPE_PCIE_SWITCH (0x31)
#define MPI3_CONFIG_PAGETYPE_PCIE_LINK (0x33)
#define MPI3_CONFIG_PAGEATTR_MASK (0xf0)
+#define MPI3_CONFIG_PAGEATTR_SHIFT (4)
#define MPI3_CONFIG_PAGEATTR_READ_ONLY (0x00)
#define MPI3_CONFIG_PAGEATTR_CHANGEABLE (0x10)
#define MPI3_CONFIG_PAGEATTR_PERSISTENT (0x20)
@@ -29,10 +30,13 @@
#define MPI3_CONFIG_ACTION_READ_PERSISTENT (0x04)
#define MPI3_CONFIG_ACTION_WRITE_PERSISTENT (0x05)
#define MPI3_DEVICE_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_DEVICE_PGAD_FORM_SHIFT (28)
#define MPI3_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
#define MPI3_DEVICE_PGAD_FORM_HANDLE (0x20000000)
#define MPI3_DEVICE_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_DEVICE_PGAD_HANDLE_SHIFT (0)
#define MPI3_SAS_EXPAND_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_SHIFT (28)
#define MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM (0x10000000)
#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE (0x20000000)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
index 2c6e548cbd0f..8d824107a678 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_image.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -66,7 +66,12 @@ struct mpi3_component_image_header {
#define MPI3_IMAGE_HEADER_SIGNATURE1_SMM (0x204d4d53)
#define MPI3_IMAGE_HEADER_SIGNATURE1_PSW (0x20575350)
#define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_UEFI_MASK (0x00000300)
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_UEFI_SHIFT (8)
+#define MPI3_IMAGE_HEADER_FLAGS_CERT_CHAIN_FORMAT_MASK (0x000000c0)
+#define MPI3_IMAGE_HEADER_FLAGS_CERT_CHAIN_FORMAT_SHIFT (6)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_SHIFT (4)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_DI (0x00000010)
#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_NVDATA (0x00000008)
@@ -214,11 +219,13 @@ struct mpi3_encrypted_hash_entry {
#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_1_OF_2 (0x04)
#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_2_OF_2 (0x05)
#define MPI3_HASH_ALGORITHM_VERSION_MASK (0xe0)
+#define MPI3_HASH_ALGORITHM_VERSION_SHIFT (5)
#define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00)
#define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20)
#define MPI3_HASH_ALGORITHM_VERSION_SHA2 (0x40)
#define MPI3_HASH_ALGORITHM_VERSION_SHA3 (0x60)
#define MPI3_HASH_ALGORITHM_SIZE_MASK (0x1f)
+#define MPI3_HASH_ALGORITHM_SIZE_SHIFT (0)
#define MPI3_HASH_ALGORITHM_SIZE_UNUSED (0x00)
#define MPI3_HASH_ALGORITHM_SIZE_SHA256 (0x01)
#define MPI3_HASH_ALGORITHM_SIZE_SHA512 (0x02)
@@ -236,6 +243,7 @@ struct mpi3_encrypted_hash_entry {
#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_65 (0x0c)
#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_44 (0x0d)
#define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_MASK (0x0f)
+#define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_SHIFT (0)
#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX
#define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
index af86d12c8e49..bbef5bac92ed 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_init.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
@@ -38,23 +38,31 @@ struct mpi3_scsi_io_request {
#define MPI3_SCSIIO_MSGFLAGS_METASGL_VALID (0x80)
#define MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE (0x40)
#define MPI3_SCSIIO_FLAGS_LARGE_CDB (0x60000000)
+#define MPI3_SCSIIO_FLAGS_LARGE_CDB_MASK (0x60000000)
+#define MPI3_SCSIIO_FLAGS_LARGE_CDB_SHIFT (29)
+#define MPI3_SCSIIO_FLAGS_IOC_USE_ONLY_27_MASK (0x18000000)
+#define MPI3_SCSIIO_FLAGS_IOC_USE_ONLY_27_SHIFT (27)
#define MPI3_SCSIIO_FLAGS_CDB_16_OR_LESS (0x00000000)
#define MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16 (0x20000000)
#define MPI3_SCSIIO_FLAGS_CDB_IN_SEPARATE_BUFFER (0x40000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_MASK (0x07000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SHIFT (24)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_MASK (0x000c0000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_SHIFT (18)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ (0x00000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ (0x01000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ (0x02000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ (0x04000000)
#define MPI3_SCSIIO_FLAGS_CMDPRI_MASK (0x00f00000)
#define MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT (20)
-#define MPI3_SCSIIO_FLAGS_DATADIRECTION_MASK (0x000c0000)
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER (0x00000000)
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE (0x00040000)
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_READ (0x00080000)
#define MPI3_SCSIIO_FLAGS_DMAOPERATION_MASK (0x00030000)
+#define MPI3_SCSIIO_FLAGS_DMAOPERATION_SHIFT (16)
#define MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI (0x00010000)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_MASK (0x000000f0)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_SHIFT (4)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING (0x00000010)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE (0x00000020)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_PROD_SPECIFIC (0x00000080)
@@ -99,6 +107,7 @@ struct mpi3_scsi_io_reply {
#define MPI3_SCSI_STATUS_ACA_ACTIVE (0x30)
#define MPI3_SCSI_STATUS_TASK_ABORTED (0x40)
#define MPI3_SCSI_STATE_SENSE_MASK (0x03)
+#define MPI3_SCSI_STATE_SENSE_SHIFT (0)
#define MPI3_SCSI_STATE_SENSE_VALID (0x00)
#define MPI3_SCSI_STATE_SENSE_FAILED (0x01)
#define MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY (0x02)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index c374867f9ba0..b42933fcd423 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -30,6 +30,7 @@ struct mpi3_ioc_init_request {
#define MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED (0x08)
#define MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED (0x04)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SHIFT (0)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SEPARATED (0x01)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_INLINE (0x02)
@@ -40,6 +41,7 @@ struct mpi3_ioc_init_request {
#define MPI3_WHOINIT_MANUFACTURER (0x04)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_MASK (0x00000003)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_SHIFT (0)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_GUIDANCE (0x00000000)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_SPECIAL (0x00000001)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_HDD (0x00000002)
@@ -111,9 +113,11 @@ struct mpi3_ioc_facts_data {
__le32 diag_tty_size;
};
#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_MASK (0x80000000)
+#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_SHIFT (31)
#define MPI3_IOCFACTS_CAPABILITY_SUPERVISOR_IOC (0x00000000)
#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC (0x80000000)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_MASK (0x00000600)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_SHIFT (9)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_FIXED_THRESHOLD (0x00000000)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_OUTSTANDING_IO (0x00000200)
#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_SUPPORTED (0x00000100)
@@ -134,6 +138,7 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_EXCEPT_SAS_DISABLED (0x1000)
#define MPI3_IOCFACTS_EXCEPT_SAFE_MODE (0x0800)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_MASK (0x0700)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_SHIFT (8)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_NONE (0x0000)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_MGMT (0x0100)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_MGMT (0x0200)
@@ -149,6 +154,7 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_EXCEPT_BLOCKING_BOOT_EVENT (0x0004)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_SELFTEST_FAILURE (0x0002)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SHIFT (0)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001)
#define MPI3_IOCFACTS_PROTOCOL_SAS (0x0010)
@@ -161,10 +167,12 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK (0x0000ff00)
#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT (8)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK (0x00000030)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_SHIFT (4)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_NOT_STARTED (0x00000000)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_IN_PROGRESS (0x00000010)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_COMPLETE (0x00000020)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK (0x0000000f)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_SHIFT (0)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA (0x00000000)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002)
#define MPI3_IOCFACTS_IO_THROTTLE_DATA_LENGTH_NOT_REQUIRED (0x0000)
@@ -204,6 +212,7 @@ struct mpi3_create_request_queue_request {
};
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SHIFT (7)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
#define MPI3_CREATE_REQUEST_QUEUE_SIZE_MINIMUM (2)
@@ -237,10 +246,12 @@ struct mpi3_create_reply_queue_request {
};
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SHIFT (7)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_COALESCE_DISABLE (0x02)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_SHIFT (0)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01)
#define MPI3_CREATE_REPLY_QUEUE_SIZE_MINIMUM (2)
@@ -326,9 +337,11 @@ struct mpi3_event_notification_reply {
};
#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK (0x01)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_SHIFT (0)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED (0x01)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_NOT_REQUIRED (0x00)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_MASK (0x02)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_SHIFT (1)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_ORIGINAL (0x00)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_REPLAY (0x02)
struct mpi3_event_data_gpio_interrupt {
@@ -487,6 +500,7 @@ struct mpi3_event_sas_topo_phy_entry {
#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_NO_EXIST (0x40)
#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_VACANT (0x80)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_MASK (0x0f)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_SHIFT (0)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING (0x02)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED (0x03)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE (0x04)
@@ -566,6 +580,7 @@ struct mpi3_event_pcie_topo_port_entry {
#define MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05)
#define MPI3_EVENT_PCIE_TOPO_PS_RESPONDING (0x06)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_MASK (0xf0)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_SHIFT (4)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_1 (0x10)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_2 (0x20)
@@ -573,6 +588,7 @@ struct mpi3_event_pcie_topo_port_entry {
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_8 (0x40)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_16 (0x50)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0f)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_SHIFT (0)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02)
@@ -881,6 +897,7 @@ struct mpi3_pel_req_action_acknowledge {
};
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_MASK (0x03)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_SHIFT (0)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_NO_GUIDANCE (0x00)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_CONTINUE_OP (0x01)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_TRANSITION_TO_FAULT (0x02)
@@ -924,6 +941,7 @@ struct mpi3_ci_download_request {
#define MPI3_CI_DOWNLOAD_MSGFLAGS_FORCE_FMC_ENABLE (0x40)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_SIGNED_NVDATA (0x20)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MASK (0x03)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SHIFT (0)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_FAST (0x00)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MEDIUM (0x01)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SLOW (0x02)
@@ -953,6 +971,7 @@ struct mpi3_ci_download_reply {
#define MPI3_CI_DOWNLOAD_FLAGS_OFFLINE_ACTIVATION_REQUIRED (0x20)
#define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_SHIFT (1)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_NOT_NEEDED (0x00)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_AWAITING (0x02)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_ONLINE_PENDING (0x04)
@@ -976,9 +995,11 @@ struct mpi3_ci_upload_request {
};
#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_MASK (0x01)
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SHIFT (0)
#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY (0x00)
#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SECONDARY (0x01)
#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_MASK (0x02)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_SHIFT (1)
#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_FLASH (0x00)
#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_EXECUTABLE (0x02)
#define MPI3_CTRL_OP_FORCE_FULL_DISCOVERY (0x01)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_tool.h b/drivers/scsi/mpi3mr/mpi/mpi30_tool.h
index 3b960893870f..50a65b16a818 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_tool.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_tool.h
@@ -9,6 +9,7 @@
#define MPI3_DIAG_BUFFER_TYPE_FW (0x02)
#define MPI3_DIAG_BUFFER_ACTION_RELEASE (0x01)
+#define MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED (0x01)
struct mpi3_diag_buffer_post_request {
__le16 host_tag;
u8 ioc_use_only02;
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index b2ab25a1cfeb..5c522e2531c3 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -18,7 +18,7 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (34)
+#define MPI3_VERSION_UNIT (35)
#define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
@@ -80,6 +80,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT (20)
#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ (0x000f0000)
#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT (16)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_SHIFT (14)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_MASK (0x0000c000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NO (0x00000000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL (0x00004000)
@@ -97,6 +98,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_IOC_STATUS_READY (0x00000001)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET (0x00000024)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_MASK (0x0fff)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_SHIFT (0)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_OFFSET (0x00000026)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_MASK (0x0fff0000)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_SHIFT (16)
@@ -106,6 +108,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_HIGH_OFFSET (0x00000034)
#define MPI3_SYSIF_COALESCE_CONTROL_OFFSET (0x00000040)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_MASK (0xc0000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_SHIFT (30)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_NO_CHANGE (0x00000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_DISABLE (0x40000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_ENABLE (0xc0000000)
@@ -124,6 +127,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(N) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((N) - 1) * 8))
#define MPI3_SYSIF_WRITE_SEQUENCE_OFFSET (0x00001c04)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_MASK (0x0000000f)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_SHIFT (0)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH (0x0)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST (0xf)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND (0x4)
@@ -133,6 +137,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH (0xd)
#define MPI3_SYSIF_HOST_DIAG_OFFSET (0x00001c08)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_MASK (0x00000700)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SHIFT (8)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_NO_RESET (0x00000000)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET (0x00000100)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_HOST_CONTROL_BOOT_RESET (0x00000200)
@@ -151,6 +156,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_FAULT_FUNC_AREA_SHIFT (24)
#define MPI3_SYSIF_FAULT_FUNC_AREA_MPI_DEFINED (0x00000000)
#define MPI3_SYSIF_FAULT_CODE_MASK (0x0000ffff)
+#define MPI3_SYSIF_FAULT_CODE_SHIFT (0)
#define MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET (0x0000f000)
#define MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET (0x0000f001)
#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS (0x0000f002)
@@ -176,17 +182,20 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00001c5c)
#define MPI3_SYSIF_DIAG_RW_CONTROL_OFFSET (0x00001c60)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_MASK (0x00000030)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_SHIFT (4)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_1BYTE (0x00000000)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_2BYTES (0x00000010)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_4BYTES (0x00000020)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_8BYTES (0x00000030)
#define MPI3_SYSIF_DIAG_RW_CONTROL_RESET (0x00000004)
#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_MASK (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_SHIFT (1)
#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_READ (0x00000000)
#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_WRITE (0x00000002)
#define MPI3_SYSIF_DIAG_RW_CONTROL_START (0x00000001)
#define MPI3_SYSIF_DIAG_RW_STATUS_OFFSET (0x00001c62)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_MASK (0x0000000e)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SHIFT (1)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SUCCESS (0x00000000)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_INV_ADDR (0x00000002)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_ACC_ERR (0x00000004)
@@ -207,7 +216,9 @@ struct mpi3_default_reply_descriptor {
};
#define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK (0x0001)
+#define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_SHIFT (0)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK (0xf000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SHIFT (12)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY (0x0000)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS (0x1000)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_TARGET_COMMAND_BUFFER (0x2000)
@@ -301,6 +312,7 @@ union mpi3_sge_union {
};
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_MASK (0xf0)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_SHIFT (4)
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE (0x00)
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_BIT_BUCKET (0x10)
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_CHAIN (0x20)
@@ -309,6 +321,7 @@ union mpi3_sge_union {
#define MPI3_SGE_FLAGS_END_OF_LIST (0x08)
#define MPI3_SGE_FLAGS_END_OF_BUFFER (0x04)
#define MPI3_SGE_FLAGS_DLAS_MASK (0x03)
+#define MPI3_SGE_FLAGS_DLAS_SHIFT (0)
#define MPI3_SGE_FLAGS_DLAS_SYSTEM (0x00)
#define MPI3_SGE_FLAGS_DLAS_IOC_UDP (0x01)
#define MPI3_SGE_FLAGS_DLAS_IOC_CTL (0x02)
@@ -322,15 +335,18 @@ union mpi3_sge_union {
#define MPI3_EEDPFLAGS_CHK_APP_TAG (0x0200)
#define MPI3_EEDPFLAGS_CHK_GUARD (0x0100)
#define MPI3_EEDPFLAGS_ESC_MODE_MASK (0x00c0)
+#define MPI3_EEDPFLAGS_ESC_MODE_SHIFT (6)
#define MPI3_EEDPFLAGS_ESC_MODE_DO_NOT_DISABLE (0x0040)
#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE (0x0080)
#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_REFTAG_DISABLE (0x00c0)
#define MPI3_EEDPFLAGS_HOST_GUARD_MASK (0x0030)
+#define MPI3_EEDPFLAGS_HOST_GUARD_SHIFT (4)
#define MPI3_EEDPFLAGS_HOST_GUARD_T10_CRC (0x0000)
#define MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM (0x0010)
#define MPI3_EEDPFLAGS_HOST_GUARD_OEM_SPECIFIC (0x0020)
#define MPI3_EEDPFLAGS_PT_REF_TAG (0x0008)
#define MPI3_EEDPFLAGS_EEDP_OP_MASK (0x0007)
+#define MPI3_EEDPFLAGS_EEDP_OP_SHIFT (0)
#define MPI3_EEDPFLAGS_EEDP_OP_CHECK (0x0001)
#define MPI3_EEDPFLAGS_EEDP_OP_STRIP (0x0002)
#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE (0x0003)
@@ -403,6 +419,7 @@ struct mpi3_default_reply {
#define MPI3_IOCSTATUS_LOG_INFO_AVAIL_MASK (0x8000)
#define MPI3_IOCSTATUS_LOG_INFO_AVAILABLE (0x8000)
#define MPI3_IOCSTATUS_STATUS_MASK (0x7fff)
+#define MPI3_IOCSTATUS_STATUS_SHIFT (0)
#define MPI3_IOCSTATUS_SUCCESS (0x0000)
#define MPI3_IOCSTATUS_INVALID_FUNCTION (0x0001)
#define MPI3_IOCSTATUS_BUSY (0x0002)
@@ -469,4 +486,5 @@ struct mpi3_default_reply {
#define MPI3_IOCLOGINFO_TYPE_NONE (0x0)
#define MPI3_IOCLOGINFO_TYPE_SAS (0x3)
#define MPI3_IOCLOGINFO_LOG_DATA_MASK (0x0fffffff)
+#define MPI3_IOCLOGINFO_LOG_DATA_SHIFT (0)
#endif
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 0d72b5f1b69d..9bbc7cb98ca3 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -56,8 +56,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.12.0.3.50"
-#define MPI3MR_DRIVER_RELDATE "11-November-2024"
+#define MPI3MR_DRIVER_VERSION "8.13.0.5.50"
+#define MPI3MR_DRIVER_RELDATE "20-February-2025"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -80,13 +80,14 @@ extern atomic64_t event_counter;
/* Admin queue management definitions */
#define MPI3MR_ADMIN_REQ_Q_SIZE (2 * MPI3MR_PAGE_SIZE_4K)
-#define MPI3MR_ADMIN_REPLY_Q_SIZE (4 * MPI3MR_PAGE_SIZE_4K)
+#define MPI3MR_ADMIN_REPLY_Q_SIZE (8 * MPI3MR_PAGE_SIZE_4K)
#define MPI3MR_ADMIN_REQ_FRAME_SZ 128
#define MPI3MR_ADMIN_REPLY_FRAME_SZ 16
/* Operational queue management definitions */
#define MPI3MR_OP_REQ_Q_QD 512
#define MPI3MR_OP_REP_Q_QD 1024
+#define MPI3MR_OP_REP_Q_QD2K 2048
#define MPI3MR_OP_REP_Q_QD4K 4096
#define MPI3MR_OP_REQ_Q_SEG_SIZE 4096
#define MPI3MR_OP_REP_Q_SEG_SIZE 4096
@@ -328,6 +329,7 @@ enum mpi3mr_reset_reason {
#define MPI3MR_RESET_REASON_OSTYPE_SHIFT 28
#define MPI3MR_RESET_REASON_IOCNUM_SHIFT 20
+
/* Queue type definitions */
enum queue_type {
MPI3MR_DEFAULT_QUEUE = 0,
@@ -387,6 +389,7 @@ struct mpi3mr_ioc_facts {
u16 max_msix_vectors;
u8 personality;
u8 dma_mask;
+ bool max_req_limit;
u8 protocol_flags;
u8 sge_mod_mask;
u8 sge_mod_value;
@@ -456,6 +459,8 @@ struct op_req_qinfo {
* @enable_irq_poll: Flag to indicate polling is enabled
* @in_use: Queue is handled by poll/ISR
* @qtype: Type of queue (types defined in enum queue_type)
+ * @qfull_watermark: Watermark defined in reply queue to avoid
+ * reply queue full
*/
struct op_reply_qinfo {
u16 ci;
@@ -471,6 +476,7 @@ struct op_reply_qinfo {
bool enable_irq_poll;
atomic_t in_use;
enum queue_type qtype;
+ u16 qfull_watermark;
};
/**
@@ -928,6 +934,8 @@ struct trigger_event_data {
* @size: Buffer size
* @addr: Virtual address
* @dma_addr: Buffer DMA address
+ * @is_segmented: The buffer is segmented or not
+ * @disabled_after_reset: The buffer is disabled after reset
*/
struct diag_buffer_desc {
u8 type;
@@ -937,6 +945,8 @@ struct diag_buffer_desc {
u32 size;
void *addr;
dma_addr_t dma_addr;
+ bool is_segmented;
+ bool disabled_after_reset;
};
/**
@@ -1022,6 +1032,8 @@ struct scmd_priv {
* @admin_reply_base: Admin reply queue base virtual address
* @admin_reply_dma: Admin reply queue base dma address
* @admin_reply_q_in_use: Queue is handled by poll/ISR
+ * @admin_pend_isr: Count of unprocessed admin ISR/poll calls
+ * due to another thread processing replies
* @ready_timeout: Controller ready timeout
* @intr_info: Interrupt cookie pointer
* @intr_info_count: Number of interrupt cookies
@@ -1090,6 +1102,7 @@ struct scmd_priv {
* @ts_update_interval: Timestamp update interval
* @reset_in_progress: Reset in progress flag
* @unrecoverable: Controller unrecoverable flag
+ * @io_admin_reset_sync: Manage state of I/O ops during an admin reset process
* @prev_reset_result: Result of previous reset
* @reset_mutex: Controller reset mutex
* @reset_waitq: Controller reset wait queue
@@ -1153,6 +1166,12 @@ struct scmd_priv {
* @snapdump_trigger_active: Snapdump trigger active flag
* @pci_err_recovery: PCI error recovery in progress
* @block_on_pci_err: Block IO during PCI error recovery
+ * @reply_qfull_count: Occurences of reply queue full avoidance kicking-in
+ * @prevent_reply_qfull: Enable reply queue prevention
+ * @seg_tb_support: Segmented trace buffer support
+ * @num_tb_segs: Number of Segments in Trace buffer
+ * @trace_buf_pool: DMA pool for Segmented trace buffer segments
+ * @trace_buf: Trace buffer segments memory descriptor
*/
struct mpi3mr_ioc {
struct list_head list;
@@ -1189,6 +1208,7 @@ struct mpi3mr_ioc {
void *admin_reply_base;
dma_addr_t admin_reply_dma;
atomic_t admin_reply_q_in_use;
+ atomic_t admin_pend_isr;
u32 ready_timeout;
@@ -1276,6 +1296,7 @@ struct mpi3mr_ioc {
u16 ts_update_interval;
u8 reset_in_progress;
u8 unrecoverable;
+ u8 io_admin_reset_sync;
int prev_reset_result;
struct mutex reset_mutex;
wait_queue_head_t reset_waitq;
@@ -1351,6 +1372,13 @@ struct mpi3mr_ioc {
bool fw_release_trigger_active;
bool pci_err_recovery;
bool block_on_pci_err;
+ atomic_t reply_qfull_count;
+ bool prevent_reply_qfull;
+ bool seg_tb_support;
+ u32 num_tb_segs;
+ struct dma_pool *trace_buf_pool;
+ struct segments *trace_buf;
+
};
/**
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 7589f48aebc8..f36663613950 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -12,23 +12,98 @@
#include <uapi/scsi/scsi_bsg_mpi3mr.h>
/**
- * mpi3mr_alloc_trace_buffer: Allocate trace buffer
+ * mpi3mr_alloc_trace_buffer: Allocate segmented trace buffer
* @mrioc: Adapter instance reference
* @trace_size: Trace buffer size
*
- * Allocate trace buffer
+ * Allocate either segmented memory pools or contiguous buffer
+ * based on the controller capability for the host trace
+ * buffer.
+ *
* Return: 0 on success, non-zero on failure.
*/
static int mpi3mr_alloc_trace_buffer(struct mpi3mr_ioc *mrioc, u32 trace_size)
{
struct diag_buffer_desc *diag_buffer = &mrioc->diag_buffers[0];
+ int i, sz;
+ u64 *diag_buffer_list = NULL;
+ dma_addr_t diag_buffer_list_dma;
+ u32 seg_count;
+
+ if (mrioc->seg_tb_support) {
+ seg_count = (trace_size) / MPI3MR_PAGE_SIZE_4K;
+ trace_size = seg_count * MPI3MR_PAGE_SIZE_4K;
+
+ diag_buffer_list = dma_alloc_coherent(&mrioc->pdev->dev,
+ sizeof(u64) * seg_count,
+ &diag_buffer_list_dma, GFP_KERNEL);
+ if (!diag_buffer_list)
+ return -1;
+
+ mrioc->num_tb_segs = seg_count;
+
+ sz = sizeof(struct segments) * seg_count;
+ mrioc->trace_buf = kzalloc(sz, GFP_KERNEL);
+ if (!mrioc->trace_buf)
+ goto trace_buf_failed;
+
+ mrioc->trace_buf_pool = dma_pool_create("trace_buf pool",
+ &mrioc->pdev->dev, MPI3MR_PAGE_SIZE_4K, MPI3MR_PAGE_SIZE_4K,
+ 0);
+ if (!mrioc->trace_buf_pool) {
+ ioc_err(mrioc, "trace buf pool: dma_pool_create failed\n");
+ goto trace_buf_pool_failed;
+ }
- diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev,
- trace_size, &diag_buffer->dma_addr, GFP_KERNEL);
- if (diag_buffer->addr) {
- dprint_init(mrioc, "trace diag buffer is allocated successfully\n");
+ for (i = 0; i < seg_count; i++) {
+ mrioc->trace_buf[i].segment =
+ dma_pool_zalloc(mrioc->trace_buf_pool, GFP_KERNEL,
+ &mrioc->trace_buf[i].segment_dma);
+ diag_buffer_list[i] =
+ (u64) mrioc->trace_buf[i].segment_dma;
+ if (!diag_buffer_list[i])
+ goto tb_seg_alloc_failed;
+ }
+
+ diag_buffer->addr = diag_buffer_list;
+ diag_buffer->dma_addr = diag_buffer_list_dma;
+ diag_buffer->is_segmented = true;
+
+ dprint_init(mrioc, "segmented trace diag buffer\n"
+ "is allocated successfully seg_count:%d\n", seg_count);
return 0;
+ } else {
+ diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev,
+ trace_size, &diag_buffer->dma_addr, GFP_KERNEL);
+ if (diag_buffer->addr) {
+ dprint_init(mrioc, "trace diag buffer is allocated successfully\n");
+ return 0;
+ }
+ return -1;
}
+
+tb_seg_alloc_failed:
+ if (mrioc->trace_buf_pool) {
+ for (i = 0; i < mrioc->num_tb_segs; i++) {
+ if (mrioc->trace_buf[i].segment) {
+ dma_pool_free(mrioc->trace_buf_pool,
+ mrioc->trace_buf[i].segment,
+ mrioc->trace_buf[i].segment_dma);
+ mrioc->trace_buf[i].segment = NULL;
+ }
+ mrioc->trace_buf[i].segment = NULL;
+ }
+ dma_pool_destroy(mrioc->trace_buf_pool);
+ mrioc->trace_buf_pool = NULL;
+ }
+trace_buf_pool_failed:
+ kfree(mrioc->trace_buf);
+ mrioc->trace_buf = NULL;
+trace_buf_failed:
+ if (diag_buffer_list)
+ dma_free_coherent(&mrioc->pdev->dev,
+ sizeof(u64) * mrioc->num_tb_segs,
+ diag_buffer_list, diag_buffer_list_dma);
return -1;
}
@@ -100,8 +175,9 @@ retry_trace:
dprint_init(mrioc,
"trying to allocate trace diag buffer of size = %dKB\n",
trace_size / 1024);
- if (get_order(trace_size) > MAX_PAGE_ORDER ||
+ if ((!mrioc->seg_tb_support && (get_order(trace_size) > MAX_PAGE_ORDER)) ||
mpi3mr_alloc_trace_buffer(mrioc, trace_size)) {
+
retry = true;
trace_size -= trace_dec_size;
dprint_init(mrioc, "trace diag buffer allocation failed\n"
@@ -161,6 +237,12 @@ int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc,
u8 prev_status;
int retval = 0;
+ if (diag_buffer->disabled_after_reset) {
+ dprint_bsg_err(mrioc, "%s: skipping diag buffer posting\n"
+ "as it is disabled after reset\n", __func__);
+ return -1;
+ }
+
memset(&diag_buf_post_req, 0, sizeof(diag_buf_post_req));
mutex_lock(&mrioc->init_cmds.mutex);
if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
@@ -177,8 +259,12 @@ int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc,
diag_buf_post_req.address = le64_to_cpu(diag_buffer->dma_addr);
diag_buf_post_req.length = le32_to_cpu(diag_buffer->size);
- dprint_bsg_info(mrioc, "%s: posting diag buffer type %d\n", __func__,
- diag_buffer->type);
+ if (diag_buffer->is_segmented)
+ diag_buf_post_req.msg_flags |= MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED;
+
+ dprint_bsg_info(mrioc, "%s: posting diag buffer type %d segmented:%d\n", __func__,
+ diag_buffer->type, diag_buffer->is_segmented);
+
prev_status = diag_buffer->status;
diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED;
init_completion(&mrioc->init_cmds.done);
@@ -2339,6 +2425,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
}
if (!mrioc->ioctl_sges_allocated) {
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n",
__func__);
return -ENOMEM;
@@ -3061,6 +3148,29 @@ reply_queue_count_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(reply_queue_count);
/**
+ * reply_qfull_count_show - Show reply qfull count
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Retrieves the current value of the reply_qfull_count from the mrioc structure and
+ * formats it as a string for display.
+ *
+ * Return: sysfs_emit() return
+ */
+static ssize_t
+reply_qfull_count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%u\n", atomic_read(&mrioc->reply_qfull_count));
+}
+
+static DEVICE_ATTR_RO(reply_qfull_count);
+
+/**
* logging_level_show - Show controller debug level
* @dev: class device
* @attr: Device attributes
@@ -3152,6 +3262,7 @@ static struct attribute *mpi3mr_host_attrs[] = {
&dev_attr_fw_queue_depth.attr,
&dev_attr_op_req_q_count.attr,
&dev_attr_reply_queue_count.attr,
+ &dev_attr_reply_qfull_count.attr,
&dev_attr_logging_level.attr,
&dev_attr_adp_state.attr,
NULL,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 5ed31fe57474..3fcb1ad3b070 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -17,7 +17,7 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
struct mpi3_ioc_facts_data *facts_data);
static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
struct mpi3mr_drv_cmd *drv_cmd);
-
+static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc);
static int poll_queues;
module_param(poll_queues, int, 0444);
MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
@@ -446,8 +446,10 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
u16 threshold_comps = 0;
struct mpi3_default_reply_descriptor *reply_desc;
- if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
+ if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) {
+ atomic_inc(&mrioc->admin_pend_isr);
return 0;
+ }
reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
admin_reply_ci;
@@ -459,7 +461,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
}
do {
- if (mrioc->unrecoverable)
+ if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
break;
mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
@@ -554,7 +556,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
}
do {
- if (mrioc->unrecoverable)
+ if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
break;
req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
@@ -1302,7 +1304,7 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
(ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
retval = 0;
- ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
+ ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%08x)/(0x%08x)\n",
(!retval) ? "successful" : "failed", ioc_status, ioc_config);
return retval;
}
@@ -1355,6 +1357,19 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
"\tcontroller while sas transport support is enabled at the\n"
"\tdriver, please reboot the system or reload the driver\n");
+ if (mrioc->seg_tb_support) {
+ if (!(mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)) {
+ ioc_err(mrioc,
+ "critical error: previously enabled segmented trace\n"
+ " buffer capability is disabled after reset. Please\n"
+ " update the firmware or reboot the system or\n"
+ " reload the driver to enable trace diag buffer\n");
+ mrioc->diag_buffers[0].disabled_after_reset = true;
+ } else
+ mrioc->diag_buffers[0].disabled_after_reset = false;
+ }
+
if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
GFP_KERNEL);
@@ -1717,7 +1732,7 @@ static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
ioc_info(mrioc,
- "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
+ "ioc_status/ioc_config after %s reset is (0x%08x)/(0x%08x)\n",
(!retval)?"successful":"failed", ioc_status,
ioc_config);
if (retval)
@@ -2104,15 +2119,22 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
}
reply_qid = qidx + 1;
- op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
- if ((mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
- !mrioc->pdev->revision)
- op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
+
+ if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) {
+ if (mrioc->pdev->revision)
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
+ else
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
+ } else
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K;
+
op_reply_q->ci = 0;
op_reply_q->ephase = 1;
atomic_set(&op_reply_q->pend_ios, 0);
atomic_set(&op_reply_q->in_use, 0);
op_reply_q->enable_irq_poll = false;
+ op_reply_q->qfull_watermark =
+ op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2);
if (!op_reply_q->q_segments) {
retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
@@ -2416,8 +2438,10 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
void *segment_base_addr;
u16 req_sz = mrioc->facts.op_req_sz;
struct segments *segments = op_req_q->q_segments;
+ struct op_reply_qinfo *op_reply_q = NULL;
reply_qidx = op_req_q->reply_qid - 1;
+ op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
if (mrioc->unrecoverable)
return -EFAULT;
@@ -2448,6 +2472,15 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
goto out;
}
+ /* Reply queue is nearing to get full, push back IOs to SML */
+ if ((mrioc->prevent_reply_qfull == true) &&
+ (atomic_read(&op_reply_q->pend_ios) >
+ (op_reply_q->qfull_watermark))) {
+ atomic_inc(&mrioc->reply_qfull_count);
+ retval = -EAGAIN;
+ goto out;
+ }
+
segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
req_entry = (u8 *)segment_base_addr +
((pi % op_req_q->segment_qd) * req_sz);
@@ -2726,7 +2759,16 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
return;
}
- if (mrioc->ts_update_counter++ >= mrioc->ts_update_interval) {
+ if (atomic_read(&mrioc->admin_pend_isr)) {
+ ioc_err(mrioc, "Unprocessed admin ISR instance found\n"
+ "flush admin replies\n");
+ mpi3mr_process_admin_reply_q(mrioc);
+ }
+
+ if (!(mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) &&
+ (mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) {
+
mrioc->ts_update_counter = 0;
mpi3mr_sync_timestamp(mrioc);
}
@@ -3091,6 +3133,9 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.dma_mask = (facts_flags &
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
+ mrioc->facts.dma_mask = (facts_flags &
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
mrioc->facts.protocol_flags = facts_data->protocol_flags;
mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
@@ -4214,6 +4259,13 @@ retry_init:
mrioc->shost->transportt = mpi3mr_transport_template;
}
+ if (mrioc->facts.max_req_limit)
+ mrioc->prevent_reply_qfull = true;
+
+ if (mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)
+ mrioc->seg_tb_support = true;
+
mrioc->reply_sz = mrioc->facts.reply_sz;
retval = mpi3mr_check_reset_dma_mask(mrioc);
@@ -4370,6 +4422,7 @@ retry_init:
goto out_failed_noretry;
}
+ mrioc->io_admin_reset_sync = 0;
if (is_resume || mrioc->block_on_pci_err) {
dprint_reset(mrioc, "setting up single ISR\n");
retval = mpi3mr_setup_isr(mrioc, 1);
@@ -4671,7 +4724,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
*/
void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
{
- u16 i;
+ u16 i, j;
struct mpi3mr_intr_info *intr_info;
struct diag_buffer_desc *diag_buffer;
@@ -4806,6 +4859,26 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
diag_buffer = &mrioc->diag_buffers[i];
+ if ((i == 0) && mrioc->seg_tb_support) {
+ if (mrioc->trace_buf_pool) {
+ for (j = 0; j < mrioc->num_tb_segs; j++) {
+ if (mrioc->trace_buf[j].segment) {
+ dma_pool_free(mrioc->trace_buf_pool,
+ mrioc->trace_buf[j].segment,
+ mrioc->trace_buf[j].segment_dma);
+ mrioc->trace_buf[j].segment = NULL;
+ }
+
+ mrioc->trace_buf[j].segment = NULL;
+ }
+ dma_pool_destroy(mrioc->trace_buf_pool);
+ mrioc->trace_buf_pool = NULL;
+ }
+
+ kfree(mrioc->trace_buf);
+ mrioc->trace_buf = NULL;
+ diag_buffer->size = sizeof(u64) * mrioc->num_tb_segs;
+ }
if (diag_buffer->addr) {
dma_free_coherent(&mrioc->pdev->dev,
diag_buffer->size, diag_buffer->addr,
@@ -4883,7 +4956,7 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
}
ioc_info(mrioc,
- "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
+ "Base IOC Sts/Config after %s shutdown is (0x%08x)/(0x%08x)\n",
(!retval) ? "successful" : "failed", ioc_status,
ioc_config);
}
@@ -5229,6 +5302,55 @@ cleanup_drv_cmd:
}
/**
+ * mpi3mr_check_op_admin_proc -
+ * @mrioc: Adapter instance reference
+ *
+ * Check if any of the operation reply queues
+ * or the admin reply queue are currently in use.
+ * If any queue is in use, this function waits for
+ * a maximum of 10 seconds for them to become available.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc)
+{
+
+ u16 timeout = 10 * 10;
+ u16 elapsed_time = 0;
+ bool op_admin_in_use = false;
+
+ do {
+ op_admin_in_use = false;
+
+ /* Check admin_reply queue first to exit early */
+ if (atomic_read(&mrioc->admin_reply_q_in_use) == 1)
+ op_admin_in_use = true;
+ else {
+ /* Check op_reply queues */
+ int i;
+
+ for (i = 0; i < mrioc->num_queues; i++) {
+ if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) {
+ op_admin_in_use = true;
+ break;
+ }
+ }
+ }
+
+ if (!op_admin_in_use)
+ break;
+
+ msleep(100);
+
+ } while (++elapsed_time < timeout);
+
+ if (op_admin_in_use)
+ return 1;
+
+ return 0;
+}
+
+/**
* mpi3mr_soft_reset_handler - Reset the controller
* @mrioc: Adapter instance reference
* @reset_reason: Reset reason code
@@ -5308,6 +5430,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
mpi3mr_ioc_disable_intr(mrioc);
+ mrioc->io_admin_reset_sync = 1;
if (snapdump) {
mpi3mr_set_diagsave(mrioc);
@@ -5335,6 +5458,16 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
goto out;
}
+
+ retval = mpi3mr_check_op_admin_proc(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n"
+ "thread still processing replies even after a 10 second\n"
+ "timeout. Marking the controller as unrecoverable!\n");
+
+ goto out;
+ }
+
if (mrioc->num_io_throttle_group !=
mrioc->facts.max_io_throttle_group) {
ioc_err(mrioc,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index b9a51d3f2024..c186b892150f 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -3839,6 +3839,18 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
if (scmd) {
+ if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
+ cmd_priv = scsi_cmd_priv(scmd);
+ if (!cmd_priv)
+ goto out_unlock;
+
+ struct op_req_qinfo *op_req_q;
+
+ op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx];
+ tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag);
+ tm_req.task_request_queue_id =
+ cpu_to_le16(op_req_q->qid);
+ }
sdev = scmd->device;
sdev_priv_data = sdev->hostdata;
scsi_tgt_priv_data = ((sdev_priv_data) ?
@@ -4388,6 +4400,92 @@ out:
}
/**
+ * mpi3mr_eh_abort - Callback function for abort error handling
+ * @scmd: SCSI command reference
+ *
+ * Issues Abort Task Management if the command is in LLD scope
+ * and verifies if it is aborted successfully, and return status
+ * accordingly.
+ *
+ * Return: SUCCESS if the abort was successful, otherwise FAILED
+ */
+static int mpi3mr_eh_abort(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct scmd_priv *cmd_priv;
+ u16 dev_handle, timeout = MPI3MR_ABORTTM_TIMEOUT;
+ u8 resp_code = 0;
+ int retval = FAILED, ret = 0;
+ struct request *rq = scsi_cmd_to_rq(scmd);
+ unsigned long scmd_age_ms = jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc);
+ unsigned long scmd_age_sec = scmd_age_ms / HZ;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: attempting abort task for scmd(%p)\n", mrioc->name, scmd);
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: scmd(0x%p) is outstanding for %lus %lums, timeout %us, retries %d, allowed %d\n",
+ mrioc->name, scmd, scmd_age_sec, scmd_age_ms % HZ, rq->timeout / HZ,
+ scmd->retries, scmd->allowed);
+
+ scsi_print_command(scmd);
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: Device not available, Skip issuing abort task\n",
+ mrioc->name);
+ retval = SUCCESS;
+ goto out;
+ }
+
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_handle = stgt_priv_data->dev_handle;
+
+ cmd_priv = scsi_cmd_priv(scmd);
+ if (!cmd_priv->in_lld_scope ||
+ cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: scmd (0x%p) not in LLD scope, Skip issuing Abort Task\n",
+ mrioc->name, scmd);
+ retval = SUCCESS;
+ goto out;
+ }
+
+ if (stgt_priv_data->dev_removed) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: Device (handle = 0x%04x) removed, Skip issuing Abort Task\n",
+ mrioc->name, dev_handle);
+ retval = FAILED;
+ goto out;
+ }
+
+ ret = mpi3mr_issue_tm(mrioc, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ dev_handle, sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
+ timeout, &mrioc->host_tm_cmds, &resp_code, scmd);
+
+ if (ret)
+ goto out;
+
+ if (cmd_priv->in_lld_scope) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: Abort task failed. scmd (0x%p) was not terminated\n",
+ mrioc->name, scmd);
+ goto out;
+ }
+
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: Abort Task %s for scmd (0x%p)\n", mrioc->name,
+ ((retval == SUCCESS) ? "SUCCEEDED" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
* mpi3mr_scan_start - Scan start callback handler
* @shost: SCSI host reference
*
@@ -5069,6 +5167,7 @@ static const struct scsi_host_template mpi3mr_driver_template = {
.scan_finished = mpi3mr_scan_finished,
.scan_start = mpi3mr_scan_start,
.change_queue_depth = mpi3mr_change_queue_depth,
+ .eh_abort_handler = mpi3mr_eh_abort,
.eh_device_reset_handler = mpi3mr_eh_dev_reset,
.eh_target_reset_handler = mpi3mr_eh_target_reset,
.eh_bus_reset_handler = mpi3mr_eh_bus_reset,
@@ -5803,7 +5902,7 @@ static const struct pci_device_id mpi3mr_pci_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
-static struct pci_error_handlers mpi3mr_err_handler = {
+static const struct pci_error_handlers mpi3mr_err_handler = {
.error_detected = mpi3mr_pcierr_error_detected,
.mmio_enabled = mpi3mr_pcierr_mmio_enabled,
.slot_reset = mpi3mr_pcierr_slot_reset,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index 0ba9e6a6a13c..c8d6ced5640e 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -105,10 +105,10 @@ struct rep_manu_reply {
u8 reserved0[2];
u8 sas_format;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 6de35b32223c..b181b113fc80 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -125,6 +125,12 @@
* 06-24-19 02.00.55 Bumped MPI2_HEADER_VERSION_UNIT
* 08-01-19 02.00.56 Bumped MPI2_HEADER_VERSION_UNIT
* 10-02-19 02.00.57 Bumped MPI2_HEADER_VERSION_UNIT
+ * 07-20-20 02.00.58 Bumped MPI2_HEADER_VERSION_UNIT
+ * 03-30-21 02.00.59 Bumped MPI2_HEADER_VERSION_UNIT
+ * 06-03-22 02.00.60 Bumped MPI2_HEADER_VERSION_UNIT
+ * 09-20-23 02.00.61 Bumped MPI2_HEADER_VERSION_UNIT
+ * 09-13-24 02.00.62 Bumped MPI2_HEADER_VERSION_UNIT
+ * Added MPI2_FUNCTION_MCTP_PASSTHROUGH
* --------------------------------------------------------------------------
*/
@@ -165,7 +171,7 @@
/* Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x39)
+#define MPI2_HEADER_VERSION_UNIT (0x3E)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -669,6 +675,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30)
#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31)
#define MPI2_FUNCTION_NVME_ENCAPSULATED (0x33)
+#define MPI2_FUNCTION_MCTP_PASSTHROUGH (0x34)
#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0)
#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 587f7d248219..02bf26ca976e 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -251,6 +251,7 @@
* 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7.
* 08-01-19 02.00.49 Add MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID
* Add MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT
+ * 09-13-24 02.00.50 Added PCIe 32 GT/s link rate
*/
#ifndef MPI2_CNFG_H
@@ -606,7 +607,7 @@ typedef struct _MPI2_CONFIG_REPLY {
typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
- U8 ChipName[16]; /*0x04 */
+ U8 ChipName[16] __nonstring; /*0x04 */
U8 ChipRevision[8]; /*0x14 */
U8 BoardName[16]; /*0x1C */
U8 BoardAssembly[16]; /*0x2C */
@@ -1121,6 +1122,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01)
#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02)
#define MPI2_IOUNITPAGE7_PCIE_SPEED_16_0_GBPS (0x03)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_32_0_GBPS (0x04)
/*defines for IO Unit Page 7 ProcessorState field */
#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F)
@@ -2301,6 +2303,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
/*values for SAS IO Unit Page 1 AdditionalControlFlags */
+#define MPI2_SASIOUNIT1_ACONTROL_PROD_SPECIFIC_1 (0x8000)
#define MPI2_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100)
#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
@@ -3591,6 +3594,7 @@ typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS {
#define MPI26_PCIE_NEG_LINK_RATE_5_0 (0x03)
#define MPI26_PCIE_NEG_LINK_RATE_8_0 (0x04)
#define MPI26_PCIE_NEG_LINK_RATE_16_0 (0x05)
+#define MPI26_PCIE_NEG_LINK_RATE_32_0 (0x06)
/****************************************************************************
@@ -3700,6 +3704,7 @@ typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 {
#define MPI26_PCIEIOUNIT1_MAX_RATE_5_0 (0x30)
#define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40)
#define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50)
+#define MPI26_PCIEIOUNIT1_MAX_RATE_32_0 (0x60)
/*values for PCIe IO Unit Page 1 DMDReportPCIe */
#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_MASK (0x80)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index d92852591134..1a279c6e1a9f 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -179,6 +179,7 @@
* Added MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED
* Added MPI2_FW_DOWNLOAD_ITYPE_COREDUMP
* Added MPI2_FW_UPLOAD_ITYPE_COREDUMP
+ * 9-13-24 02.00.39 Added MPI26_MCTP_PASSTHROUGH messages
* --------------------------------------------------------------------------
*/
@@ -382,6 +383,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
/*ProductID field uses MPI2_FW_HEADER_PID_ */
/*IOCCapabilities */
+#define MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU (0x00800000)
#define MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED (0x00200000)
#define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000)
#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000)
@@ -1798,5 +1800,57 @@ typedef struct _MPI26_IOUNIT_CONTROL_REPLY {
Mpi26IoUnitControlReply_t,
*pMpi26IoUnitControlReply_t;
+/****************************************************************************
+ * MCTP Passthrough messages (MPI v2.6 and later only.)
+ ****************************************************************************/
+
+/* MCTP Passthrough Request Message */
+typedef struct _MPI26_MCTP_PASSTHROUGH_REQUEST {
+ U8 MsgContext; /* 0x00 */
+ U8 Reserved1[2]; /* 0x01 */
+ U8 Function; /* 0x03 */
+ U8 Reserved2[3]; /* 0x04 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U32 Reserved4; /* 0x0C */
+ U8 Flags; /* 0x10 */
+ U8 Reserved5[3]; /* 0x11 */
+ U32 Reserved6; /* 0x14 */
+ U32 H2DLength; /* 0x18 */
+ U32 D2HLength; /* 0x1C */
+ MPI25_SGE_IO_UNION H2DSGL; /* 0x20 */
+ MPI25_SGE_IO_UNION D2HSGL; /* 0x30 */
+} MPI26_MCTP_PASSTHROUGH_REQUEST,
+ *PTR_MPI26_MCTP_PASSTHROUGH_REQUEST,
+ Mpi26MctpPassthroughRequest_t,
+ *pMpi26MctpPassthroughRequest_t;
+
+/* values for the MsgContext field */
+#define MPI26_MCTP_MSG_CONEXT_UNUSED (0x00)
+
+/* values for the Flags field */
+#define MPI26_MCTP_FLAGS_MSG_FORMAT_MPT (0x01)
+
+/* MCTP Passthrough Reply Message */
+typedef struct _MPI26_MCTP_PASSTHROUGH_REPLY {
+ U8 MsgContext; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U8 Reserved2[3]; /* 0x04 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 ResponseDataLength; /* 0x14 */
+} MPI26_MCTP_PASSTHROUGH_REPLY,
+ *PTR_MPI26_MCTP_PASSTHROUGH_REPLY,
+ Mpi26MctpPassthroughReply_t,
+ *pMpi26MctpPassthroughReply_t;
#endif
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index dc43cfa83088..bd3efa5b46c7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1202,6 +1202,11 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
ioc->sge_size;
func_str = "nvme_encapsulated";
break;
+ case MPI2_FUNCTION_MCTP_PASSTHROUGH:
+ frame_sz = sizeof(Mpi26MctpPassthroughRequest_t) +
+ ioc->sge_size;
+ func_str = "mctp_passthru";
+ break;
default:
frame_sz = 32;
func_str = "unknown";
@@ -4874,6 +4879,12 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
i++;
}
+ if (ioc->facts.IOCCapabilities &
+ MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) {
+ pr_cont("%sMCTP Passthru", i ? "," : "");
+ i++;
+ }
+
iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
pr_cont("%sNCQ", i ? "," : "");
@@ -8018,7 +8029,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
mutex_lock(&ioc->hostdiag_unlock_mutex);
if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc, &host_diagnostic))
- goto out;
+ goto unlock;
hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
@@ -8038,7 +8049,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc,
"Invalid host diagnostic register value\n");
_base_dump_reg_set(ioc);
- goto out;
+ goto unlock;
}
if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
break;
@@ -8074,17 +8085,19 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
__func__, ioc_state);
_base_dump_reg_set(ioc);
- goto out;
+ goto fail;
}
pci_cfg_access_unlock(ioc->pdev);
ioc_info(ioc, "diag reset: SUCCESS\n");
return 0;
- out:
+unlock:
+ mutex_unlock(&ioc->hostdiag_unlock_mutex);
+
+fail:
pci_cfg_access_unlock(ioc->pdev);
ioc_err(ioc, "diag reset: FAILED\n");
- mutex_unlock(&ioc->hostdiag_unlock_mutex);
return -EFAULT;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index d8d1a64b4764..939141cde3ca 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -77,8 +77,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "51.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 51
+#define MPT3SAS_DRIVER_VERSION "52.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 52
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 00
#define MPT3SAS_RELEASE_VERSION 00
@@ -1858,9 +1858,6 @@ int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
int mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page);
-int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
- Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
- u16 sz);
int mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply,
struct Mpi2ManufacturingPage10_t *config_page);
@@ -1887,9 +1884,6 @@ int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page,
u32 form, u32 handle);
-int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
- Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
- u32 form, u32 handle);
int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page,
u32 form, u32 handle);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 2e88f456fc34..45ac853e1289 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -577,44 +577,6 @@ mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7
- * @ioc: per adapter object
- * @mpi_reply: reply mf payload returned from firmware
- * @config_page: contents of the config page
- * @sz: size of buffer passed in config_page
- * Context: sleep.
- *
- * Return: 0 for success, non-zero for failure.
- */
-int
-mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
- Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
- u16 sz)
-{
- Mpi2ConfigRequest_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
- mpi_request.Function = MPI2_FUNCTION_CONFIG;
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
- mpi_request.Header.PageNumber = 7;
- mpi_request.Header.PageVersion = MPI2_MANUFACTURING7_PAGEVERSION;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
-
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sz);
- out:
- return r;
-}
-
-/**
* mpt3sas_config_get_manufacturing_pg10 - obtain manufacturing page 10
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
@@ -1214,47 +1176,6 @@ mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * mpt3sas_config_get_sas_device_pg1 - obtain sas device page 1
- * @ioc: per adapter object
- * @mpi_reply: reply mf payload returned from firmware
- * @config_page: contents of the config page
- * @form: GET_NEXT_HANDLE or HANDLE
- * @handle: device handle
- * Context: sleep.
- *
- * Return: 0 for success, non-zero for failure.
- */
-int
-mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
- Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
- u32 form, u32 handle)
-{
- Mpi2ConfigRequest_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
- mpi_request.Function = MPI2_FUNCTION_CONFIG;
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
- mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
- mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION;
- mpi_request.Header.PageNumber = 1;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
-
- mpi_request.PageAddress = cpu_to_le32(form | handle);
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
- out:
- return r;
-}
-
-/**
* mpt3sas_config_get_pcie_device_pg0 - obtain pcie device page 0
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 87784c96249a..063b10dd8251 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -186,6 +186,9 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
case MPI2_FUNCTION_NVME_ENCAPSULATED:
desc = "nvme_encapsulated";
break;
+ case MPI2_FUNCTION_MCTP_PASSTHROUGH:
+ desc = "mctp_passthrough";
+ break;
}
if (!desc)
@@ -653,6 +656,40 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
}
/**
+ * _ctl_send_mctp_passthru_req - Send an MCTP passthru request
+ * @ioc: per adapter object
+ * @mctp_passthru_req: MPI mctp passhthru request from caller
+ * @psge: pointer to the H2DSGL
+ * @data_out_dma: DMA buffer for H2D SGL
+ * @data_out_sz: H2D length
+ * @data_in_dma: DMA buffer for D2H SGL
+ * @data_in_sz: D2H length
+ * @smid: SMID to submit the request
+ *
+ */
+static void
+_ctl_send_mctp_passthru_req(
+ struct MPT3SAS_ADAPTER *ioc,
+ Mpi26MctpPassthroughRequest_t *mctp_passthru_req, void *psge,
+ dma_addr_t data_out_dma, int data_out_sz,
+ dma_addr_t data_in_dma, int data_in_sz,
+ u16 smid)
+{
+ mctp_passthru_req->H2DLength = data_out_sz;
+ mctp_passthru_req->D2HLength = data_in_sz;
+
+ /* Build the H2D SGL from the data out buffer */
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 0, 0);
+
+ psge += ioc->sge_size_ieee;
+
+ /* Build the D2H SGL for the data in buffer */
+ ioc->build_sg(ioc, psge, 0, 0, data_in_dma, data_in_sz);
+
+ ioc->put_smid_default(ioc, smid);
+}
+
+/**
* _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
* @ioc: per adapter object
* @karg: (struct mpt3_ioctl_command)
@@ -679,6 +716,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
size_t data_in_sz = 0;
long ret;
u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+ int tm_ret;
issue_reset = 0;
@@ -792,6 +830,23 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
init_completion(&ioc->ctl_cmds.done);
switch (mpi_request->Function) {
+ case MPI2_FUNCTION_MCTP_PASSTHROUGH:
+ {
+ Mpi26MctpPassthroughRequest_t *mctp_passthru_req =
+ (Mpi26MctpPassthroughRequest_t *)request;
+
+ if (!(ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU)) {
+ ioc_err(ioc, "%s: MCTP Passthrough request not supported\n",
+ __func__);
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ _ctl_send_mctp_passthru_req(ioc, mctp_passthru_req, psge, data_out_dma,
+ data_out_sz, data_in_dma, data_in_sz, smid);
+ break;
+ }
case MPI2_FUNCTION_NVME_ENCAPSULATED:
{
nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
@@ -1120,18 +1175,25 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
if (pcie_device && (!ioc->tm_custom_handling) &&
(!(mpt3sas_scsih_is_pcie_scsi_device(
pcie_device->device_info))))
- mpt3sas_scsih_issue_locked_tm(ioc,
+ tm_ret = mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
0, 0, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, pcie_device->reset_timeout,
MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
else
- mpt3sas_scsih_issue_locked_tm(ioc,
+ tm_ret = mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
0, 0, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
+
+ if (tm_ret != SUCCESS) {
+ ioc_info(ioc,
+ "target reset failed, issue hard reset: handle (0x%04x)\n",
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ }
} else
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
}
@@ -1200,6 +1262,8 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
}
karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+ karg.driver_capability |= MPT3_IOCTL_IOCINFO_DRIVER_CAP_MCTP_PASSTHRU;
+
if (copy_to_user(arg, &karg, sizeof(karg))) {
pr_err("failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -2787,6 +2851,217 @@ out_unlock_pciaccess:
}
/**
+ * _ctl_get_mpt_mctp_passthru_adapter - Traverse the IOC list and return the IOC at
+ * dev_index positionthat support MCTP passhtru
+ * @dev_index: position in the mpt3sas_ioc_list to search for
+ * Return pointer to the IOC on success
+ * NULL if device not found error
+ */
+static struct MPT3SAS_ADAPTER *
+_ctl_get_mpt_mctp_passthru_adapter(int dev_index)
+{
+ struct MPT3SAS_ADAPTER *ioc = NULL;
+ int count = 0;
+
+ spin_lock(&gioc_lock);
+ /* Traverse ioc list and return number of IOC that support MCTP passthru */
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+ if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) {
+ if (count == dev_index) {
+ spin_unlock(&gioc_lock);
+ return 0;
+ }
+ }
+ }
+ spin_unlock(&gioc_lock);
+
+ return NULL;
+}
+
+/**
+ * mpt3sas_get_device_count - Retrieve the count of MCTP passthrough
+ * capable devices managed by the driver.
+ *
+ * Returns number of devices that support MCTP passthrough.
+ */
+int
+mpt3sas_get_device_count(void)
+{
+ int count = 0;
+ struct MPT3SAS_ADAPTER *ioc = NULL;
+
+ spin_lock(&gioc_lock);
+ /* Traverse ioc list and return number of IOC that support MCTP passthru */
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
+ if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU)
+ count++;
+
+ spin_unlock(&gioc_lock);
+
+ return count;
+}
+EXPORT_SYMBOL(mpt3sas_get_device_count);
+
+/**
+ * mpt3sas_send_passthru_cmd - Send an MPI MCTP passthrough command to
+ * firmware
+ * @command: The MPI MCTP passthrough command to send to firmware
+ *
+ * Returns 0 on success, anything else is error.
+ */
+int mpt3sas_send_mctp_passthru_req(struct mpt3_passthru_command *command)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+ MPI2RequestHeader_t *mpi_request = NULL, *request;
+ MPI2DefaultReply_t *mpi_reply;
+ Mpi26MctpPassthroughRequest_t *mctp_passthru_req;
+ u16 smid;
+ unsigned long timeout;
+ u8 issue_reset = 0;
+ u32 sz;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma = 0;
+ size_t data_out_sz = 0;
+ void *data_in = NULL;
+ dma_addr_t data_in_dma = 0;
+ size_t data_in_sz = 0;
+ long ret;
+
+ /* Retrieve ioc from dev_index */
+ ioc = _ctl_get_mpt_mctp_passthru_adapter(command->dev_index);
+ if (!ioc)
+ return -ENODEV;
+
+ mutex_lock(&ioc->pci_access_mutex);
+ if (ioc->shost_recovery ||
+ ioc->pci_error_recovery || ioc->is_driver_loading ||
+ ioc->remove_host) {
+ ret = -EAGAIN;
+ goto unlock_pci_access;
+ }
+
+ /* Lock the ctl_cmds mutex to ensure a single ctl cmd is pending */
+ if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
+ ret = -ERESTARTSYS;
+ goto unlock_pci_access;
+ }
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
+ ret = -EAGAIN;
+ goto unlock_ctl_cmds;
+ }
+
+ ret = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+ if (ret)
+ goto unlock_ctl_cmds;
+
+ mpi_request = (MPI2RequestHeader_t *)command->mpi_request;
+ if (mpi_request->Function != MPI2_FUNCTION_MCTP_PASSTHROUGH) {
+ ioc_err(ioc, "%s: Invalid request received, Function 0x%x\n",
+ __func__, mpi_request->Function);
+ ret = -EINVAL;
+ goto unlock_ctl_cmds;
+ }
+
+ /* Use first reserved smid for passthrough commands */
+ smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
+ ret = 0;
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(request, 0, ioc->request_sz);
+ memcpy(request, command->mpi_request, sizeof(Mpi26MctpPassthroughRequest_t));
+ ioc->ctl_cmds.smid = smid;
+ data_out_sz = command->data_out_size;
+ data_in_sz = command->data_in_size;
+
+ /* obtain dma-able memory for data transfer */
+ if (data_out_sz) /* WRITE */ {
+ data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
+ &data_out_dma, GFP_ATOMIC);
+ if (!data_out) {
+ ret = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ memcpy(data_out, command->data_out_buf_ptr, data_out_sz);
+
+ }
+
+ if (data_in_sz) /* READ */ {
+ data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
+ &data_in_dma, GFP_ATOMIC);
+ if (!data_in) {
+ ret = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ psge = &((Mpi26MctpPassthroughRequest_t *)request)->H2DSGL;
+
+ init_completion(&ioc->ctl_cmds.done);
+
+ mctp_passthru_req = (Mpi26MctpPassthroughRequest_t *)request;
+
+ _ctl_send_mctp_passthru_req(ioc, mctp_passthru_req, psge, data_out_dma,
+ data_out_sz, data_in_dma, data_in_sz, smid);
+
+ timeout = command->timeout;
+ if (timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
+ timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
+
+ wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi26MctpPassthroughRequest_t) / 4, issue_reset);
+ goto issue_host_reset;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+
+ /* copy out xdata to user */
+ if (data_in_sz)
+ memcpy(command->data_in_buf_ptr, data_in, data_in_sz);
+
+ /* copy out reply message frame to user */
+ if (command->max_reply_bytes) {
+ sz = min_t(u32, command->max_reply_bytes, ioc->reply_sz);
+ memcpy(command->reply_frame_buf_ptr, ioc->ctl_cmds.reply, sz);
+ }
+
+issue_host_reset:
+ if (issue_reset) {
+ ret = -ENODATA;
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ }
+
+out:
+ /* free memory associated with sg buffers */
+ if (data_in)
+ dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
+ data_in_dma);
+
+ if (data_out)
+ dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
+ data_out_dma);
+
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+
+unlock_ctl_cmds:
+ mutex_unlock(&ioc->ctl_cmds.mutex);
+
+unlock_pci_access:
+ mutex_unlock(&ioc->pci_access_mutex);
+ return ret;
+
+}
+EXPORT_SYMBOL(mpt3sas_send_mctp_passthru_req);
+
+/**
* _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked)
* @file: (struct file)
* @cmd: ioctl opcode
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 171709e91006..483e0549c02f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -160,6 +160,9 @@ struct mpt3_ioctl_pci_info {
#define MPT3_IOCTL_INTERFACE_SAS35 (0x07)
#define MPT2_IOCTL_VERSION_LENGTH (32)
+/* Bits set for mpt3_ioctl_iocinfo.driver_cap */
+#define MPT3_IOCTL_IOCINFO_DRIVER_CAP_MCTP_PASSTHRU 0x1
+
/**
* struct mpt3_ioctl_iocinfo - generic controller info
* @hdr - generic header
@@ -175,6 +178,7 @@ struct mpt3_ioctl_pci_info {
* @driver_version - driver version - 32 ASCII characters
* @rsvd1 - reserved
* @scsi_id - scsi id of adapter 0
+ * @driver_capability - driver capabilities
* @rsvd2 - reserved
* @pci_information - pci info (2nd revision)
*/
@@ -192,7 +196,8 @@ struct mpt3_ioctl_iocinfo {
uint8_t driver_version[MPT2_IOCTL_VERSION_LENGTH];
uint8_t rsvd1;
uint8_t scsi_id;
- uint16_t rsvd2;
+ uint8_t driver_capability;
+ uint8_t rsvd2;
struct mpt3_ioctl_pci_info pci_information;
};
@@ -458,4 +463,46 @@ struct mpt3_enable_diag_sbr_reload {
struct mpt3_ioctl_header hdr;
};
+/**
+ * struct mpt3_passthru_command - generic mpt firmware passthru command
+ * @dev_index - device index
+ * @timeout - command timeout in seconds. (if zero then use driver default
+ * value).
+ * @reply_frame_buf_ptr - MPI reply location
+ * @data_in_buf_ptr - destination for read
+ * @data_out_buf_ptr - data source for write
+ * @max_reply_bytes - maximum number of reply bytes to be sent to app.
+ * @data_in_size - number bytes for data transfer in (read)
+ * @data_out_size - number bytes for data transfer out (write)
+ * @mpi_request - request frame
+ */
+struct mpt3_passthru_command {
+ u8 dev_index;
+ uint32_t timeout;
+ void *reply_frame_buf_ptr;
+ void *data_in_buf_ptr;
+ void *data_out_buf_ptr;
+ uint32_t max_reply_bytes;
+ uint32_t data_in_size;
+ uint32_t data_out_size;
+ Mpi26MctpPassthroughRequest_t *mpi_request;
+};
+
+/*
+ * mpt3sas_get_device_count - Retrieve the count of MCTP passthrough
+ * capable devices managed by the driver.
+ *
+ * Returns number of devices that support MCTP passthrough.
+ */
+int mpt3sas_get_device_count(void);
+
+/*
+ * mpt3sas_send_passthru_cmd - Send an MPI MCTP passthrough command to
+ * firmware
+ * @command: The MPI MCTP passthrough command to send to firmware
+ *
+ * Returns 0 on success, anything else is error .
+ */
+int mpt3sas_send_mctp_passthru_req(struct mpt3_passthru_command *command);
+
#endif /* MPT3SAS_CTL_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index a456e5ec74d8..508861e88d9f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2703,7 +2703,7 @@ scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
ssp_target = 1;
if (sas_device->device_info &
MPI2_SAS_DEVICE_INFO_SEP) {
- sdev_printk(KERN_WARNING, sdev,
+ sdev_printk(KERN_INFO, sdev,
"set ignore_delay_remove for handle(0x%04x)\n",
sas_device_priv_data->sas_target->handle);
sas_device_priv_data->ignore_delay_remove = 1;
@@ -12710,7 +12710,7 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
};
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
-static struct pci_error_handlers _mpt3sas_err_handler = {
+static const struct pci_error_handlers _mpt3sas_err_handler = {
.error_detected = scsih_pci_error_detected,
.mmio_enabled = scsih_pci_mmio_enabled,
.slot_reset = scsih_pci_slot_reset,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index d84413b77d84..dc74ebc6405a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -328,10 +328,10 @@ struct rep_manu_reply {
u8 reserved0[2];
u8 sas_format;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 1444b1f1c4c8..c4592de4fefc 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -151,16 +151,6 @@ static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
}
-void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
-{
- u32 no;
- for_each_phy(phy_mask, phy_mask, no) {
- if (!(phy_mask & 1))
- continue;
- MVS_CHIP_DISP->phy_reset(mvi, no, hard);
- }
-}
-
int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata)
{
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 19b01f7c4767..09ce3f2241f2 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -425,7 +425,6 @@ struct mvs_task_exec_info {
void mvs_get_sas_addr(void *buf, u32 buflen);
void mvs_iounmap(void __iomem *regs);
int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
-void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index c9539897048a..e87885cc701c 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2876,7 +2876,7 @@ MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
static enum cpuhp_state qedi_cpuhp_state;
-static struct pci_error_handlers qedi_err_handler = {
+static const struct pci_error_handlers qedi_err_handler = {
.error_detected = qedi_io_error_detected,
};
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 4f63aff333db..3a2bd953a976 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -282,8 +282,8 @@ struct register_host_info {
#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32)
struct config_info_data {
- uint8_t model_num[16];
- uint8_t model_description[80];
+ uint8_t model_num[16] __nonstring;
+ uint8_t model_description[80] __nonstring;
uint8_t reserved0[160];
uint8_t symbolic_name[64];
uint8_t serial_num[32];
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 6d16546e1729..9e7a407ba1b9 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2136,8 +2136,8 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
* @flash_id: Flash ID
*
* This function polls the device until bit 7 of what is read matches data
- * bit 7 or until data bit 5 becomes a 1. If that hapens, the flash ROM timed
- * out (a fatal error). The flash book recommeds reading bit 7 again after
+ * bit 7 or until data bit 5 becomes a 1. If that happens, the flash ROM timed
+ * out (a fatal error). The flash book recommends reading bit 7 again after
* reading bit 5 as a 1.
*
* Returns 0 on success, else non-zero.
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index a77e0499b738..53daf923ad8e 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -510,22 +510,34 @@ void scsi_attach_vpd(struct scsi_device *sdev)
return;
for (i = 4; i < vpd_buf->len; i++) {
- if (vpd_buf->data[i] == 0x0)
+ switch (vpd_buf->data[i]) {
+ case 0x0:
scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0);
- if (vpd_buf->data[i] == 0x80)
+ break;
+ case 0x80:
scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80);
- if (vpd_buf->data[i] == 0x83)
+ break;
+ case 0x83:
scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
- if (vpd_buf->data[i] == 0x89)
+ break;
+ case 0x89:
scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89);
- if (vpd_buf->data[i] == 0xb0)
+ break;
+ case 0xb0:
scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0);
- if (vpd_buf->data[i] == 0xb1)
+ break;
+ case 0xb1:
scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1);
- if (vpd_buf->data[i] == 0xb2)
+ break;
+ case 0xb2:
scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2);
- if (vpd_buf->data[i] == 0xb7)
+ break;
+ case 0xb7:
scsi_update_vpd_page(sdev, 0xb7, &sdev->vpd_pgb7);
+ break;
+ default:
+ break;
+ }
}
kfree(vpd_buf);
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 5ceaa4665e5d..f0eec4708ddd 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -71,6 +71,10 @@ static const char *sdebug_version_date = "20210520";
#define NO_ADDITIONAL_SENSE 0x0
#define OVERLAP_ATOMIC_COMMAND_ASC 0x0
#define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
+#define FILEMARK_DETECTED_ASCQ 0x1
+#define EOP_EOM_DETECTED_ASCQ 0x2
+#define BEGINNING_OF_P_M_DETECTED_ASCQ 0x4
+#define EOD_DETECTED_ASCQ 0x5
#define LOGICAL_UNIT_NOT_READY 0x4
#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
#define UNRECOVERED_READ_ERR 0x11
@@ -80,8 +84,10 @@ static const char *sdebug_version_date = "20210520";
#define INVALID_FIELD_IN_CDB 0x24
#define INVALID_FIELD_IN_PARAM_LIST 0x26
#define WRITE_PROTECTED 0x27
+#define UA_READY_ASC 0x28
#define UA_RESET_ASC 0x29
#define UA_CHANGED_ASC 0x2a
+#define TOO_MANY_IN_PARTITION_ASC 0x3b
#define TARGET_CHANGED_ASC 0x3f
#define LUNS_CHANGED_ASCQ 0x0e
#define INSUFF_RES_ASC 0x55
@@ -173,6 +179,37 @@ static const char *sdebug_version_date = "20210520";
#define DEF_ZBC_MAX_OPEN_ZONES 8
#define DEF_ZBC_NR_CONV_ZONES 1
+/* Default parameters for tape drives */
+#define TAPE_DEF_DENSITY 0x0
+#define TAPE_BAD_DENSITY 0x65
+#define TAPE_DEF_BLKSIZE 0
+#define TAPE_MIN_BLKSIZE 512
+#define TAPE_MAX_BLKSIZE 1048576
+#define TAPE_EW 20
+#define TAPE_MAX_PARTITIONS 2
+#define TAPE_UNITS 10000
+#define TAPE_PARTITION_1_UNITS 1000
+
+/* The tape block data definitions */
+#define TAPE_BLOCK_FM_FLAG ((u32)0x1 << 30)
+#define TAPE_BLOCK_EOD_FLAG ((u32)0x2 << 30)
+#define TAPE_BLOCK_MARK_MASK ((u32)0x3 << 30)
+#define TAPE_BLOCK_SIZE_MASK (~TAPE_BLOCK_MARK_MASK)
+#define TAPE_BLOCK_MARK(a) (a & TAPE_BLOCK_MARK_MASK)
+#define TAPE_BLOCK_SIZE(a) (a & TAPE_BLOCK_SIZE_MASK)
+#define IS_TAPE_BLOCK_FM(a) ((a & TAPE_BLOCK_FM_FLAG) != 0)
+#define IS_TAPE_BLOCK_EOD(a) ((a & TAPE_BLOCK_EOD_FLAG) != 0)
+
+struct tape_block {
+ u32 fl_size;
+ unsigned char data[4];
+};
+
+/* Flags for sense data */
+#define SENSE_FLAG_FILEMARK 0x80
+#define SENSE_FLAG_EOM 0x40
+#define SENSE_FLAG_ILI 0x20
+
#define SDEBUG_LUN_0_VAL 0
/* bit mask values for sdebug_opts */
@@ -216,7 +253,8 @@ static const char *sdebug_version_date = "20210520";
#define SDEBUG_UA_LUNS_CHANGED 5
#define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
-#define SDEBUG_NUM_UAS 8
+#define SDEBUG_UA_NOT_READY_TO_READY 8
+#define SDEBUG_NUM_UAS 9
/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
* sector on read commands: */
@@ -262,11 +300,6 @@ static const char *sdebug_version_date = "20210520";
#define SDEB_XA_NOT_IN_USE XA_MARK_1
-static struct kmem_cache *queued_cmd_cache;
-
-#define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
-#define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
-
/* Zone types (zbcr05 table 25) */
enum sdebug_z_type {
ZBC_ZTYPE_CNV = 0x1,
@@ -363,6 +396,19 @@ struct sdebug_dev_info {
ktime_t create_ts; /* time since bootup that this device was created */
struct sdeb_zone_state *zstate;
+ /* For tapes */
+ unsigned int tape_blksize;
+ unsigned int tape_density;
+ unsigned char tape_partition;
+ unsigned char tape_nbr_partitions;
+ unsigned char tape_pending_nbr_partitions;
+ unsigned int tape_pending_part_0_size;
+ unsigned int tape_pending_part_1_size;
+ unsigned char tape_dce;
+ unsigned int tape_location[TAPE_MAX_PARTITIONS];
+ unsigned int tape_eop[TAPE_MAX_PARTITIONS];
+ struct tape_block *tape_blocks[TAPE_MAX_PARTITIONS];
+
struct dentry *debugfs_entry;
struct spinlock list_lock;
struct list_head inject_err_list;
@@ -409,24 +455,9 @@ struct sdebug_defer {
enum sdeb_defer_type defer_t;
};
-struct sdebug_device_access_info {
- bool atomic_write;
- u64 lba;
- u32 num;
- struct scsi_cmnd *self;
-};
-
-struct sdebug_queued_cmd {
- /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
- * instance indicates this slot is in use.
- */
- struct sdebug_defer sd_dp;
- struct scsi_cmnd *scmd;
- struct sdebug_device_access_info *i;
-};
-
struct sdebug_scsi_cmd {
spinlock_t lock;
+ struct sdebug_defer sd_dp;
};
static atomic_t sdebug_cmnd_count; /* number of incoming commands */
@@ -483,22 +514,27 @@ enum sdeb_opcode_index {
SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
SDEB_I_ATOMIC_WRITE_16 = 32,
- SDEB_I_LAST_ELEM_P1 = 33, /* keep this last (previous + 1) */
+ SDEB_I_READ_BLOCK_LIMITS = 33,
+ SDEB_I_LOCATE = 34,
+ SDEB_I_WRITE_FILEMARKS = 35,
+ SDEB_I_SPACE = 36,
+ SDEB_I_FORMAT_MEDIUM = 37,
+ SDEB_I_LAST_ELEM_P1 = 38, /* keep this last (previous + 1) */
};
static const unsigned char opcode_ind_arr[256] = {
/* 0x0; 0x0->0x1f: 6 byte cdbs */
SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
- 0, 0, 0, 0,
+ SDEB_I_FORMAT_MEDIUM, SDEB_I_READ_BLOCK_LIMITS, 0, 0,
SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
- 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
- SDEB_I_RELEASE,
+ SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0,
+ SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE,
0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
SDEB_I_ALLOW_REMOVAL, 0,
/* 0x20; 0x20->0x3f: 10 byte cdbs */
0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
- SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
+ SDEB_I_READ, 0, SDEB_I_WRITE, SDEB_I_LOCATE, 0, 0, 0, SDEB_I_VERIFY,
0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
/* 0x40; 0x40->0x5f: 10 byte cdbs */
@@ -573,6 +609,12 @@ static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *);
static int sdebug_do_add_host(bool mk_new_store);
static int sdebug_add_host_helper(int per_host_idx);
@@ -581,8 +623,6 @@ static int sdebug_add_store(void);
static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
static void sdebug_erase_all_stores(bool apart_from_first);
-static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
-
/*
* The following are overflow arrays for cdbs that "hit" the same index in
* the opcode_info_arr array. The most time sensitive (or commonly used) cdb
@@ -773,7 +813,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
/* 20 */
{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
{6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
+ {0, 0x1, 0, 0, resp_rewind, NULL,
{6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
@@ -800,6 +840,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
resp_pre_fetch, pre_fetch_iarr,
{10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* PRE-FETCH (10) */
+ /* READ POSITION (10) */
/* 30 */
{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
@@ -810,11 +851,23 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
{16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
-/* 31 */
+/* 32 */
{0, 0x0, 0x0, F_D_OUT | FF_MEDIA_IO,
resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
{16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
+ {0, 0x05, 0, F_D_IN, resp_read_blklimits, NULL, /* READ BLOCK LIMITS (6) */
+ {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x2b, 0, F_D_UNKN, resp_locate, NULL, /* LOCATE (10) */
+ {10, 0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0,
+ 0, 0, 0, 0} },
+ {0, 0x10, 0, F_D_IN, resp_write_filemarks, NULL, /* WRITE FILEMARKS (6) */
+ {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x11, 0, F_D_IN, resp_space, NULL, /* SPACE (6) */
+ {6, 0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x4, 0, 0, resp_format_medium, NULL, /* FORMAT MEDIUM (6) */
+ {6, 0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+/* 38 */
/* sentinel */
{0xff, 0, 0, 0, NULL, NULL, /* terminating element */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
@@ -1331,6 +1384,30 @@ static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
my_name, key, asc, asq);
}
+/* Sense data that has information fields for tapes */
+static void mk_sense_info_tape(struct scsi_cmnd *scp, int key, int asc, int asq,
+ unsigned int information, unsigned char tape_flags)
+{
+ if (!scp->sense_buffer) {
+ sdev_printk(KERN_ERR, scp->device,
+ "%s: sense_buffer is NULL\n", __func__);
+ return;
+ }
+ memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+
+ scsi_build_sense(scp, /* sdebug_dsense */ 0, key, asc, asq);
+ /* only fixed format so far */
+
+ scp->sense_buffer[0] |= 0x80; /* valid */
+ scp->sense_buffer[2] |= tape_flags;
+ put_unaligned_be32(information, &scp->sense_buffer[3]);
+
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
+ my_name, key, asc, asq);
+}
+
static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
{
mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
@@ -1493,6 +1570,12 @@ static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if (sdebug_verbose)
cp = "reported luns data has changed";
break;
+ case SDEBUG_UA_NOT_READY_TO_READY:
+ mk_sense_buffer(scp, UNIT_ATTENTION, UA_READY_ASC,
+ 0);
+ if (sdebug_verbose)
+ cp = "not ready to ready transition/media change";
+ break;
default:
pr_warn("unexpected unit attention code=%d\n", k);
if (sdebug_verbose)
@@ -2196,6 +2279,14 @@ static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
changing = (stopped_state != want_stop);
if (changing)
atomic_xchg(&devip->stopped, want_stop);
+ if (sdebug_ptype == TYPE_TAPE && !want_stop) {
+ int i;
+
+ set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */
+ for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
+ devip->tape_location[i] = 0;
+ devip->tape_partition = 0;
+ }
if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
return SDEG_RES_IMMED_MASK;
else
@@ -2728,6 +2819,76 @@ static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
return sizeof(sas_sha_m_pg);
}
+static unsigned char partition_pg[] = {0x11, 12, 1, 0, 0x24, 3, 9, 0,
+ 0xff, 0xff, 0x00, 0x00};
+
+static int resp_partition_m_pg(unsigned char *p, int pcontrol, int target)
+{ /* Partition page for mode_sense (tape) */
+ memcpy(p, partition_pg, sizeof(partition_pg));
+ if (pcontrol == 1)
+ memset(p + 2, 0, sizeof(partition_pg) - 2);
+ return sizeof(partition_pg);
+}
+
+static int process_medium_part_m_pg(struct sdebug_dev_info *devip,
+ unsigned char *new, int pg_len)
+{
+ int new_nbr, p0_size, p1_size;
+
+ if ((new[4] & 0x80) != 0) { /* FDP */
+ partition_pg[4] |= 0x80;
+ devip->tape_pending_nbr_partitions = TAPE_MAX_PARTITIONS;
+ devip->tape_pending_part_0_size = TAPE_UNITS - TAPE_PARTITION_1_UNITS;
+ devip->tape_pending_part_1_size = TAPE_PARTITION_1_UNITS;
+ } else {
+ new_nbr = new[3] + 1;
+ if (new_nbr > TAPE_MAX_PARTITIONS)
+ return 3;
+ if ((new[4] & 0x40) != 0) { /* SDP */
+ p1_size = TAPE_PARTITION_1_UNITS;
+ p0_size = TAPE_UNITS - p1_size;
+ if (p0_size < 100)
+ return 4;
+ } else if ((new[4] & 0x20) != 0) {
+ if (new_nbr > 1) {
+ p0_size = get_unaligned_be16(new + 8);
+ p1_size = get_unaligned_be16(new + 10);
+ if (p1_size == 0xFFFF)
+ p1_size = TAPE_UNITS - p0_size;
+ else if (p0_size == 0xFFFF)
+ p0_size = TAPE_UNITS - p1_size;
+ if (p0_size < 100 || p1_size < 100)
+ return 8;
+ } else {
+ p0_size = TAPE_UNITS;
+ p1_size = 0;
+ }
+ } else
+ return 6;
+ devip->tape_pending_nbr_partitions = new_nbr;
+ devip->tape_pending_part_0_size = p0_size;
+ devip->tape_pending_part_1_size = p1_size;
+ partition_pg[3] = new_nbr;
+ devip->tape_pending_nbr_partitions = new_nbr;
+ }
+
+ return 0;
+}
+
+static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target,
+ unsigned char dce)
+{ /* Compression page for mode_sense (tape) */
+ unsigned char compression_pg[] = {0x0f, 14, 0x40, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 00, 00};
+
+ memcpy(p, compression_pg, sizeof(compression_pg));
+ if (dce)
+ p[2] |= 0x80;
+ if (pcontrol == 1)
+ memset(p + 2, 0, sizeof(compression_pg) - 2);
+ return sizeof(compression_pg);
+}
+
/* PAGE_SIZE is more than necessary but provides room for future expansion. */
#define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
@@ -2742,7 +2903,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
unsigned char *ap;
unsigned char *arr __free(kfree);
unsigned char *cmd = scp->cmnd;
- bool dbd, llbaa, msense_6, is_disk, is_zbc;
+ bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
if (!arr)
@@ -2755,7 +2916,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
is_disk = (sdebug_ptype == TYPE_DISK);
is_zbc = devip->zoned;
- if ((is_disk || is_zbc) && !dbd)
+ is_tape = (sdebug_ptype == TYPE_TAPE);
+ if ((is_disk || is_zbc || is_tape) && !dbd)
bd_len = llbaa ? 16 : 8;
else
bd_len = 0;
@@ -2793,15 +2955,25 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
put_unaligned_be32(0xffffffff, ap + 0);
else
put_unaligned_be32(sdebug_capacity, ap + 0);
- put_unaligned_be16(sdebug_sector_size, ap + 6);
+ if (is_tape) {
+ ap[0] = devip->tape_density;
+ put_unaligned_be16(devip->tape_blksize, ap + 6);
+ } else
+ put_unaligned_be16(sdebug_sector_size, ap + 6);
offset += bd_len;
ap = arr + offset;
} else if (16 == bd_len) {
+ if (is_tape) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4);
+ return check_condition_result;
+ }
put_unaligned_be64((u64)sdebug_capacity, ap + 0);
put_unaligned_be32(sdebug_sector_size, ap + 12);
offset += bd_len;
ap = arr + offset;
}
+ if (cmd[2] == 0)
+ goto only_bd; /* Only block descriptor requested */
/*
* N.B. If len>0 before resp_*_pg() call, then form of that call should be:
@@ -2857,6 +3029,18 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
}
offset += len;
break;
+ case 0xf: /* Compression Mode Page (tape) */
+ if (!is_tape)
+ goto bad_pcode;
+ len = resp_compression_m_pg(ap, pcontrol, target, devip->tape_dce);
+ offset += len;
+ break;
+ case 0x11: /* Partition Mode Page (tape) */
+ if (!is_tape)
+ goto bad_pcode;
+ len = resp_partition_m_pg(ap, pcontrol, target);
+ offset += len;
+ break;
case 0x19: /* if spc==1 then sas phy, control+discover */
if (subpcode > 0x2 && subpcode < 0xff)
goto bad_subpcode;
@@ -2902,6 +3086,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
default:
goto bad_pcode;
}
+only_bd:
if (msense_6)
arr[0] = offset - 1;
else
@@ -2945,8 +3130,34 @@ static int resp_mode_select(struct scsi_cmnd *scp,
__func__, param_len, res);
md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
- off = bd_len + (mselect6 ? 4 : 8);
- if (md_len > 2 || off >= res) {
+ off = (mselect6 ? 4 : 8);
+ if (sdebug_ptype == TYPE_TAPE) {
+ int blksize;
+
+ if (bd_len != 8) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA,
+ mselect6 ? 3 : 6, -1);
+ return check_condition_result;
+ }
+ if (arr[off] == TAPE_BAD_DENSITY) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
+ return check_condition_result;
+ }
+ blksize = get_unaligned_be16(arr + off + 6);
+ if (blksize != 0 &&
+ (blksize < TAPE_MIN_BLKSIZE ||
+ blksize > TAPE_MAX_BLKSIZE ||
+ (blksize % 4) != 0)) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, -1);
+ return check_condition_result;
+ }
+ devip->tape_density = arr[off];
+ devip->tape_blksize = blksize;
+ }
+ off += bd_len;
+ if (off >= res)
+ return 0; /* No page written, just descriptors */
+ if (md_len > 2) {
mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
return check_condition_result;
}
@@ -2984,6 +3195,25 @@ static int resp_mode_select(struct scsi_cmnd *scp,
goto set_mode_changed_ua;
}
break;
+ case 0xf: /* Compression mode page */
+ if (sdebug_ptype != TYPE_TAPE)
+ goto bad_pcode;
+ if ((arr[off + 2] & 0x40) != 0) {
+ devip->tape_dce = (arr[off + 2] & 0x80) != 0;
+ return 0;
+ }
+ break;
+ case 0x11: /* Medium Partition Mode Page (tape) */
+ if (sdebug_ptype == TYPE_TAPE) {
+ int fld;
+
+ fld = process_medium_part_m_pg(devip, &arr[off], pg_len);
+ if (fld == 0)
+ return 0;
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, fld, -1);
+ return check_condition_result;
+ }
+ break;
case 0x1c: /* Informational Exceptions Mode page */
if (iec_m_pg[1] == arr[off + 1]) {
memcpy(iec_m_pg + 2, arr + off + 2,
@@ -2999,6 +3229,10 @@ static int resp_mode_select(struct scsi_cmnd *scp,
set_mode_changed_ua:
set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
return 0;
+
+bad_pcode:
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
+ return check_condition_result;
}
static int resp_temp_l_pg(unsigned char *arr)
@@ -3138,6 +3372,265 @@ static int resp_log_sense(struct scsi_cmnd *scp,
min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
}
+enum {SDEBUG_READ_BLOCK_LIMITS_ARR_SZ = 6};
+static int resp_read_blklimits(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned char arr[SDEBUG_READ_BLOCK_LIMITS_ARR_SZ];
+
+ arr[0] = 4;
+ put_unaligned_be24(TAPE_MAX_BLKSIZE, arr + 1);
+ put_unaligned_be16(TAPE_MIN_BLKSIZE, arr + 4);
+ return fill_from_dev_buffer(scp, arr, SDEBUG_READ_BLOCK_LIMITS_ARR_SZ);
+}
+
+static int resp_locate(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned char *cmd = scp->cmnd;
+ unsigned int i, pos;
+ struct tape_block *blp;
+ int partition;
+
+ if ((cmd[1] & 0x02) != 0) {
+ if (cmd[8] >= devip->tape_nbr_partitions) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
+ return check_condition_result;
+ }
+ devip->tape_partition = cmd[8];
+ }
+ pos = get_unaligned_be32(cmd + 3);
+ partition = devip->tape_partition;
+
+ for (i = 0, blp = devip->tape_blocks[partition];
+ i < pos && i < devip->tape_eop[partition]; i++, blp++)
+ if (IS_TAPE_BLOCK_EOD(blp->fl_size))
+ break;
+ if (i < pos) {
+ devip->tape_location[partition] = i;
+ mk_sense_buffer(scp, BLANK_CHECK, 0x05, 0);
+ return check_condition_result;
+ }
+ devip->tape_location[partition] = pos;
+
+ return 0;
+}
+
+static int resp_write_filemarks(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned char *cmd = scp->cmnd;
+ unsigned int i, count, pos;
+ u32 data;
+ int partition = devip->tape_partition;
+
+ if ((cmd[1] & 0xfe) != 0) { /* probably write setmarks, not in >= SCSI-3 */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
+ return check_condition_result;
+ }
+ count = get_unaligned_be24(cmd + 2);
+ data = TAPE_BLOCK_FM_FLAG;
+ for (i = 0, pos = devip->tape_location[partition]; i < count; i++, pos++) {
+ if (pos >= devip->tape_eop[partition] - 1) { /* don't overwrite EOD */
+ devip->tape_location[partition] = devip->tape_eop[partition] - 1;
+ mk_sense_info_tape(scp, VOLUME_OVERFLOW, NO_ADDITIONAL_SENSE,
+ EOP_EOM_DETECTED_ASCQ, count, SENSE_FLAG_EOM);
+ return check_condition_result;
+ }
+ (devip->tape_blocks[partition] + pos)->fl_size = data;
+ }
+ (devip->tape_blocks[partition] + pos)->fl_size =
+ TAPE_BLOCK_EOD_FLAG;
+ devip->tape_location[partition] = pos;
+
+ return 0;
+}
+
+static int resp_space(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned char *cmd = scp->cmnd, code;
+ int i = 0, pos, count;
+ struct tape_block *blp;
+ int partition = devip->tape_partition;
+
+ count = get_unaligned_be24(cmd + 2);
+ if ((count & 0x800000) != 0) /* extend negative to 32-bit count */
+ count |= 0xff000000;
+ code = cmd[1] & 0x0f;
+
+ pos = devip->tape_location[partition];
+ if (code == 0) { /* blocks */
+ if (count < 0) {
+ count = (-count);
+ pos -= 1;
+ for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
+ i++) {
+ if (pos < 0)
+ goto is_bop;
+ else if (IS_TAPE_BLOCK_FM(blp->fl_size))
+ goto is_fm;
+ if (i > 0) {
+ pos--;
+ blp--;
+ }
+ }
+ } else if (count > 0) {
+ for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
+ i++, pos++, blp++) {
+ if (IS_TAPE_BLOCK_EOD(blp->fl_size))
+ goto is_eod;
+ if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
+ pos += 1;
+ goto is_fm;
+ }
+ if (pos >= devip->tape_eop[partition])
+ goto is_eop;
+ }
+ }
+ } else if (code == 1) { /* filemarks */
+ if (count < 0) {
+ count = (-count);
+ if (pos == 0)
+ goto is_bop;
+ else {
+ for (i = 0, blp = devip->tape_blocks[partition] + pos;
+ i < count && pos >= 0; i++, pos--, blp--) {
+ for (pos--, blp-- ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
+ pos >= 0; pos--, blp--)
+ ; /* empty */
+ if (pos < 0)
+ goto is_bop;
+ }
+ }
+ pos += 1;
+ } else if (count > 0) {
+ for (i = 0, blp = devip->tape_blocks[partition] + pos;
+ i < count; i++, pos++, blp++) {
+ for ( ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
+ !IS_TAPE_BLOCK_EOD(blp->fl_size) &&
+ pos < devip->tape_eop[partition];
+ pos++, blp++)
+ ; /* empty */
+ if (IS_TAPE_BLOCK_EOD(blp->fl_size))
+ goto is_eod;
+ if (pos >= devip->tape_eop[partition])
+ goto is_eop;
+ }
+ }
+ } else if (code == 3) { /* EOD */
+ for (blp = devip->tape_blocks[partition] + pos;
+ !IS_TAPE_BLOCK_EOD(blp->fl_size) && pos < devip->tape_eop[partition];
+ pos++, blp++)
+ ; /* empty */
+ if (pos >= devip->tape_eop[partition])
+ goto is_eop;
+ } else {
+ /* sequential filemarks not supported */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
+ return check_condition_result;
+ }
+ devip->tape_location[partition] = pos;
+ return 0;
+
+is_fm:
+ devip->tape_location[partition] = pos;
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ FILEMARK_DETECTED_ASCQ, count - i,
+ SENSE_FLAG_FILEMARK);
+ return check_condition_result;
+
+is_eod:
+ devip->tape_location[partition] = pos;
+ mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
+ EOD_DETECTED_ASCQ, count - i,
+ 0);
+ return check_condition_result;
+
+is_bop:
+ devip->tape_location[partition] = 0;
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ BEGINNING_OF_P_M_DETECTED_ASCQ, count - i,
+ SENSE_FLAG_EOM);
+ devip->tape_location[partition] = 0;
+ return check_condition_result;
+
+is_eop:
+ devip->tape_location[partition] = devip->tape_eop[partition] - 1;
+ mk_sense_info_tape(scp, MEDIUM_ERROR, NO_ADDITIONAL_SENSE,
+ EOP_EOM_DETECTED_ASCQ, (unsigned int)i,
+ SENSE_FLAG_EOM);
+ return check_condition_result;
+}
+
+static int resp_rewind(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ devip->tape_location[devip->tape_partition] = 0;
+
+ return 0;
+}
+
+static int partition_tape(struct sdebug_dev_info *devip, int nbr_partitions,
+ int part_0_size, int part_1_size)
+{
+ int i;
+
+ if (part_0_size + part_1_size > TAPE_UNITS)
+ return -1;
+ devip->tape_eop[0] = part_0_size;
+ devip->tape_blocks[0]->fl_size = TAPE_BLOCK_EOD_FLAG;
+ devip->tape_eop[1] = part_1_size;
+ devip->tape_blocks[1] = devip->tape_blocks[0] +
+ devip->tape_eop[0];
+ devip->tape_blocks[1]->fl_size = TAPE_BLOCK_EOD_FLAG;
+
+ for (i = 0 ; i < TAPE_MAX_PARTITIONS; i++)
+ devip->tape_location[i] = 0;
+
+ devip->tape_nbr_partitions = nbr_partitions;
+ devip->tape_partition = 0;
+
+ partition_pg[3] = nbr_partitions - 1;
+ put_unaligned_be16(devip->tape_eop[0], partition_pg + 8);
+ put_unaligned_be16(devip->tape_eop[1], partition_pg + 10);
+
+ return nbr_partitions;
+}
+
+static int resp_format_medium(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ int res = 0;
+ unsigned char *cmd = scp->cmnd;
+
+ if (sdebug_ptype != TYPE_TAPE) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 0, -1);
+ return check_condition_result;
+ }
+ if (cmd[2] > 2) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1);
+ return check_condition_result;
+ }
+ if (cmd[2] != 0) {
+ if (devip->tape_pending_nbr_partitions > 0) {
+ res = partition_tape(devip,
+ devip->tape_pending_nbr_partitions,
+ devip->tape_pending_part_0_size,
+ devip->tape_pending_part_1_size);
+ } else
+ res = partition_tape(devip, devip->tape_nbr_partitions,
+ devip->tape_eop[0], devip->tape_eop[1]);
+ } else
+ res = partition_tape(devip, 1, TAPE_UNITS, 0);
+ if (res < 0)
+ return -EINVAL;
+
+ devip->tape_pending_nbr_partitions = -1;
+
+ return 0;
+}
+
static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
{
return devip->nr_zones != 0;
@@ -3871,6 +4364,98 @@ static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
return ret;
}
+static int resp_read_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u32 i, num, transfer, size;
+ u8 *cmd = scp->cmnd;
+ struct scsi_data_buffer *sdb = &scp->sdb;
+ int partition = devip->tape_partition;
+ u32 pos = devip->tape_location[partition];
+ struct tape_block *blp;
+ bool fixed, sili;
+
+ if (cmd[0] != READ_6) { /* Only Read(6) supported */
+ mk_sense_invalid_opcode(scp);
+ return illegal_condition_result;
+ }
+ fixed = (cmd[1] & 0x1) != 0;
+ sili = (cmd[1] & 0x2) != 0;
+ if (fixed && sili) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
+ return check_condition_result;
+ }
+
+ transfer = get_unaligned_be24(cmd + 2);
+ if (fixed) {
+ num = transfer;
+ size = devip->tape_blksize;
+ } else {
+ if (transfer < TAPE_MIN_BLKSIZE ||
+ transfer > TAPE_MAX_BLKSIZE) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
+ return check_condition_result;
+ }
+ num = 1;
+ size = transfer;
+ }
+
+ for (i = 0, blp = devip->tape_blocks[partition] + pos;
+ i < num && pos < devip->tape_eop[partition];
+ i++, pos++, blp++) {
+ devip->tape_location[partition] = pos + 1;
+ if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ FILEMARK_DETECTED_ASCQ, fixed ? num - i : size,
+ SENSE_FLAG_FILEMARK);
+ scsi_set_resid(scp, (num - i) * size);
+ return check_condition_result;
+ }
+ /* Assume no REW */
+ if (IS_TAPE_BLOCK_EOD(blp->fl_size)) {
+ mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
+ EOD_DETECTED_ASCQ, fixed ? num - i : size,
+ 0);
+ devip->tape_location[partition] = pos;
+ scsi_set_resid(scp, (num - i) * size);
+ return check_condition_result;
+ }
+ sg_zero_buffer(sdb->table.sgl, sdb->table.nents,
+ size, i * size);
+ sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
+ &(blp->data), 4, i * size, false);
+ if (fixed) {
+ if (blp->fl_size != devip->tape_blksize) {
+ scsi_set_resid(scp, (num - i) * size);
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ 0, num - i,
+ SENSE_FLAG_ILI);
+ return check_condition_result;
+ }
+ } else {
+ if (blp->fl_size != size) {
+ if (blp->fl_size < size)
+ scsi_set_resid(scp, size - blp->fl_size);
+ if (!sili) {
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ 0, size - blp->fl_size,
+ SENSE_FLAG_ILI);
+ return check_condition_result;
+ }
+ }
+ }
+ }
+ if (pos >= devip->tape_eop[partition]) {
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ EOP_EOM_DETECTED_ASCQ, fixed ? num - i : size,
+ SENSE_FLAG_EOM);
+ devip->tape_location[partition] = pos - 1;
+ return check_condition_result;
+ }
+ devip->tape_location[partition] = pos;
+
+ return 0;
+}
+
static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
bool check_prot;
@@ -3882,6 +4467,9 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd;
bool meta_data_locked = false;
+ if (sdebug_ptype == TYPE_TAPE)
+ return resp_read_tape(scp, devip);
+
switch (cmd[0]) {
case READ_16:
ei_lba = 0;
@@ -4178,6 +4766,67 @@ static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
}
}
+static int resp_write_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u32 i, num, transfer, size, written = 0;
+ u8 *cmd = scp->cmnd;
+ struct scsi_data_buffer *sdb = &scp->sdb;
+ int partition = devip->tape_partition;
+ int pos = devip->tape_location[partition];
+ struct tape_block *blp;
+ bool fixed, ew;
+
+ if (cmd[0] != WRITE_6) { /* Only Write(6) supported */
+ mk_sense_invalid_opcode(scp);
+ return illegal_condition_result;
+ }
+
+ fixed = (cmd[1] & 1) != 0;
+ transfer = get_unaligned_be24(cmd + 2);
+ if (fixed) {
+ num = transfer;
+ size = devip->tape_blksize;
+ } else {
+ if (transfer < TAPE_MIN_BLKSIZE ||
+ transfer > TAPE_MAX_BLKSIZE) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
+ return check_condition_result;
+ }
+ num = 1;
+ size = transfer;
+ }
+
+ scsi_set_resid(scp, num * transfer);
+ for (i = 0, blp = devip->tape_blocks[partition] + pos, ew = false;
+ i < num && pos < devip->tape_eop[partition] - 1; i++, pos++, blp++) {
+ blp->fl_size = size;
+ sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
+ &(blp->data), 4, i * size, true);
+ written += size;
+ scsi_set_resid(scp, num * transfer - written);
+ ew |= (pos == devip->tape_eop[partition] - TAPE_EW);
+ }
+
+ devip->tape_location[partition] = pos;
+ blp->fl_size = TAPE_BLOCK_EOD_FLAG;
+ if (pos >= devip->tape_eop[partition] - 1) {
+ mk_sense_info_tape(scp, VOLUME_OVERFLOW,
+ NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
+ fixed ? num - i : transfer,
+ SENSE_FLAG_EOM);
+ return check_condition_result;
+ }
+ if (ew) { /* early warning */
+ mk_sense_info_tape(scp, NO_SENSE,
+ NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
+ fixed ? num - i : transfer,
+ SENSE_FLAG_EOM);
+ return check_condition_result;
+ }
+
+ return 0;
+}
+
static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
bool check_prot;
@@ -4190,6 +4839,9 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd;
bool meta_data_locked = false;
+ if (sdebug_ptype == TYPE_TAPE)
+ return resp_write_tape(scp, devip);
+
switch (cmd[0]) {
case WRITE_16:
ei_lba = 0;
@@ -4918,7 +5570,10 @@ static int resp_sync_cache(struct scsi_cmnd *scp,
* a GOOD status otherwise. Model a disk with a big cache and yield
* CONDITION MET. Actually tries to bring range in main memory into the
* cache associated with the CPU(s).
+ *
+ * The pcode 0x34 is also used for READ POSITION by tape devices.
*/
+enum {SDEBUG_READ_POSITION_ARR_SZ = 20};
static int resp_pre_fetch(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
@@ -4930,6 +5585,31 @@ static int resp_pre_fetch(struct scsi_cmnd *scp,
struct sdeb_store_info *sip = devip2sip(devip, true);
u8 *fsp = sip->storep;
+ if (sdebug_ptype == TYPE_TAPE) {
+ if (cmd[0] == PRE_FETCH) { /* READ POSITION (10) */
+ int all_length;
+ unsigned char arr[20];
+ unsigned int pos;
+
+ all_length = get_unaligned_be16(cmd + 7);
+ if ((cmd[1] & 0xfe) != 0 ||
+ all_length != 0) { /* only short form */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB,
+ all_length ? 7 : 1, 0);
+ return check_condition_result;
+ }
+ memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ);
+ arr[1] = devip->tape_partition;
+ pos = devip->tape_location[devip->tape_partition];
+ put_unaligned_be32(pos, arr + 4);
+ put_unaligned_be32(pos, arr + 8);
+ return fill_from_dev_buffer(scp, arr,
+ SDEBUG_READ_POSITION_ARR_SZ);
+ }
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
lba = get_unaligned_be32(cmd + 2);
nblks = get_unaligned_be16(cmd + 7);
@@ -5638,10 +6318,10 @@ static u32 get_tag(struct scsi_cmnd *cmnd)
/* Queued (deferred) command completions converge here. */
static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
{
- struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
+ struct sdebug_scsi_cmd *sdsc = container_of(sd_dp,
+ typeof(*sdsc), sd_dp);
+ struct scsi_cmnd *scp = (struct scsi_cmnd *)sdsc - 1;
unsigned long flags;
- struct scsi_cmnd *scp = sqcp->scmd;
- struct sdebug_scsi_cmd *sdsc;
bool aborted;
if (sdebug_statistics) {
@@ -5652,27 +6332,23 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
if (!scp) {
pr_err("scmd=NULL\n");
- goto out;
+ return;
}
- sdsc = scsi_cmd_priv(scp);
spin_lock_irqsave(&sdsc->lock, flags);
aborted = sd_dp->aborted;
if (unlikely(aborted))
sd_dp->aborted = false;
- ASSIGN_QUEUED_CMD(scp, NULL);
spin_unlock_irqrestore(&sdsc->lock, flags);
if (aborted) {
pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
blk_abort_request(scsi_cmd_to_rq(scp));
- goto out;
+ return;
}
scsi_done(scp); /* callback to mid level */
-out:
- sdebug_free_queued_cmd(sqcp);
}
/* When high resolution timer goes off this function is called. */
@@ -5835,6 +6511,10 @@ static struct sdebug_dev_info *sdebug_device_create(
} else {
devip->zoned = false;
}
+ if (sdebug_ptype == TYPE_TAPE) {
+ devip->tape_density = TAPE_DEF_DENSITY;
+ devip->tape_blksize = TAPE_DEF_BLKSIZE;
+ }
devip->create_ts = ktime_get_boottime();
atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
spin_lock_init(&devip->list_lock);
@@ -5905,6 +6585,21 @@ static int scsi_debug_sdev_configure(struct scsi_device *sdp,
if (devip == NULL)
return 1; /* no resources, will be marked offline */
}
+ if (sdebug_ptype == TYPE_TAPE) {
+ if (!devip->tape_blocks[0]) {
+ devip->tape_blocks[0] =
+ kcalloc(TAPE_UNITS, sizeof(struct tape_block),
+ GFP_KERNEL);
+ if (!devip->tape_blocks[0])
+ return 1;
+ }
+ devip->tape_pending_nbr_partitions = -1;
+ if (partition_tape(devip, 1, TAPE_UNITS, 0) < 0) {
+ kfree(devip->tape_blocks[0]);
+ devip->tape_blocks[0] = NULL;
+ return 1;
+ }
+ }
sdp->hostdata = devip;
if (sdebug_no_uld)
sdp->no_uld_attach = 1;
@@ -5950,31 +6645,41 @@ static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
debugfs_remove(devip->debugfs_entry);
+ if (sdebug_ptype == TYPE_TAPE) {
+ kfree(devip->tape_blocks[0]);
+ devip->tape_blocks[0] = NULL;
+ }
+
/* make this slot available for re-use */
devip->used = false;
sdp->hostdata = NULL;
}
-/* Returns true if we require the queued memory to be freed by the caller. */
-static bool stop_qc_helper(struct sdebug_defer *sd_dp,
- enum sdeb_defer_type defer_t)
+/* Returns true if cancelled or not running callback. */
+static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
{
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
+ struct sdebug_defer *sd_dp = &sdsc->sd_dp;
+ enum sdeb_defer_type defer_t = READ_ONCE(sd_dp->defer_t);
+
+ lockdep_assert_held(&sdsc->lock);
+
if (defer_t == SDEB_DEFER_HRT) {
int res = hrtimer_try_to_cancel(&sd_dp->hrt);
switch (res) {
- case 0: /* Not active, it must have already run */
case -1: /* -1 It's executing the CB */
return false;
+ case 0: /* Not active, it must have already run */
case 1: /* Was active, we've now cancelled */
default:
return true;
}
} else if (defer_t == SDEB_DEFER_WQ) {
/* Cancel if pending */
- if (cancel_work_sync(&sd_dp->ew.work))
+ if (cancel_work(&sd_dp->ew.work))
return true;
- /* Was not pending, so it must have run */
+ /* callback may be running, so return false */
return false;
} else if (defer_t == SDEB_DEFER_POLL) {
return true;
@@ -5983,28 +6688,6 @@ static bool stop_qc_helper(struct sdebug_defer *sd_dp,
return false;
}
-
-static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
-{
- enum sdeb_defer_type l_defer_t;
- struct sdebug_defer *sd_dp;
- struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
- struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
-
- lockdep_assert_held(&sdsc->lock);
-
- if (!sqcp)
- return false;
- sd_dp = &sqcp->sd_dp;
- l_defer_t = READ_ONCE(sd_dp->defer_t);
- ASSIGN_QUEUED_CMD(cmnd, NULL);
-
- if (stop_qc_helper(sd_dp, l_defer_t))
- sdebug_free_queued_cmd(sqcp);
-
- return true;
-}
-
/*
* Called from scsi_debug_abort() only, which is for timed-out cmd.
*/
@@ -6076,7 +6759,7 @@ static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
{
- bool ok = scsi_debug_abort_cmnd(SCpnt);
+ bool aborted = scsi_debug_abort_cmnd(SCpnt);
u8 *cmd = SCpnt->cmnd;
u8 opcode = cmd[0];
@@ -6085,7 +6768,8 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
"%s: command%s found\n", __func__,
- ok ? "" : " not");
+ aborted ? "" : " not");
+
if (sdebug_fail_abort(SCpnt)) {
scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
@@ -6093,6 +6777,9 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
return FAILED;
}
+ if (aborted == false)
+ return FAILED;
+
return SUCCESS;
}
@@ -6144,6 +6831,22 @@ static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
return 0;
}
+static void scsi_tape_reset_clear(struct sdebug_dev_info *devip)
+{
+ if (sdebug_ptype == TYPE_TAPE) {
+ int i;
+
+ devip->tape_blksize = TAPE_DEF_BLKSIZE;
+ devip->tape_density = TAPE_DEF_DENSITY;
+ devip->tape_partition = 0;
+ devip->tape_dce = 0;
+ for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
+ devip->tape_location[i] = 0;
+ devip->tape_pending_nbr_partitions = -1;
+ /* Don't reset partitioning? */
+ }
+}
+
static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
{
struct scsi_device *sdp = SCpnt->device;
@@ -6157,8 +6860,10 @@ static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
scsi_debug_stop_all_queued(sdp);
- if (devip)
+ if (devip) {
set_bit(SDEBUG_UA_POR, devip->uas_bm);
+ scsi_tape_reset_clear(devip);
+ }
if (sdebug_fail_lun_reset(SCpnt)) {
scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
@@ -6196,6 +6901,7 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
if (devip->target == sdp->id) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ scsi_tape_reset_clear(devip);
++k;
}
}
@@ -6227,6 +6933,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ scsi_tape_reset_clear(devip);
++k;
}
@@ -6250,6 +6957,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list,
dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ scsi_tape_reset_clear(devip);
++k;
}
}
@@ -6366,33 +7074,6 @@ static bool inject_on_this_cmd(void)
#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
-
-void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
-{
- if (sqcp)
- kmem_cache_free(queued_cmd_cache, sqcp);
-}
-
-static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
-{
- struct sdebug_queued_cmd *sqcp;
- struct sdebug_defer *sd_dp;
-
- sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
- if (!sqcp)
- return NULL;
-
- sd_dp = &sqcp->sd_dp;
-
- hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
- INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
-
- sqcp->scmd = scmd;
-
- return sqcp;
-}
-
/* Complete the processing of the thread that queued a SCSI command to this
* driver. It either completes the command by calling cmnd_done() or
* schedules a hr timer or work queue then returns 0. Returns
@@ -6409,7 +7090,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
unsigned long flags;
u64 ns_from_boot = 0;
- struct sdebug_queued_cmd *sqcp;
struct scsi_device *sdp;
struct sdebug_defer *sd_dp;
@@ -6441,12 +7121,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
}
- sqcp = sdebug_alloc_queued_cmd(cmnd);
- if (!sqcp) {
- pr_err("%s no alloc\n", __func__);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
- sd_dp = &sqcp->sd_dp;
+ sd_dp = &sdsc->sd_dp;
if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
ns_from_boot = ktime_get_boottime_ns();
@@ -6494,7 +7169,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (kt <= d) { /* elapsed duration >= kt */
/* call scsi_done() from this thread */
- sdebug_free_queued_cmd(sqcp);
scsi_done(cmnd);
return 0;
}
@@ -6507,13 +7181,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (polled) {
spin_lock_irqsave(&sdsc->lock, flags);
sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
- ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
spin_unlock_irqrestore(&sdsc->lock, flags);
} else {
/* schedule the invocation of scsi_done() for a later time */
spin_lock_irqsave(&sdsc->lock, flags);
- ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
/*
@@ -6537,13 +7209,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
sd_dp->issuing_cpu = raw_smp_processor_id();
if (polled) {
spin_lock_irqsave(&sdsc->lock, flags);
- ASSIGN_QUEUED_CMD(cmnd, sqcp);
sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
spin_unlock_irqrestore(&sdsc->lock, flags);
} else {
spin_lock_irqsave(&sdsc->lock, flags);
- ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
schedule_work(&sd_dp->ew.work);
spin_unlock_irqrestore(&sdsc->lock, flags);
@@ -6835,7 +7505,7 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
&data);
if (f >= 0) {
- seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
+ seq_printf(m, " BUSY: %s: %d,%d\n",
"first,last bits", f, l);
}
}
@@ -7910,12 +8580,6 @@ static int __init scsi_debug_init(void)
hosts_to_add = sdebug_add_host;
sdebug_add_host = 0;
- queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
- if (!queued_cmd_cache) {
- ret = -ENOMEM;
- goto driver_unreg;
- }
-
sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
if (IS_ERR_OR_NULL(sdebug_debugfs_root))
pr_info("%s: failed to create initial debugfs directory\n", __func__);
@@ -7942,8 +8606,6 @@ static int __init scsi_debug_init(void)
return 0;
-driver_unreg:
- driver_unregister(&sdebug_driverfs_driver);
bus_unreg:
bus_unregister(&pseudo_lld_bus);
dev_unreg:
@@ -7959,7 +8621,6 @@ static void __exit scsi_debug_exit(void)
for (; k; k--)
sdebug_do_remove_host(true);
- kmem_cache_destroy(queued_cmd_cache);
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
@@ -8343,7 +9004,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
struct sdebug_defer *sd_dp;
u32 unique_tag = blk_mq_unique_tag(rq);
u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
- struct sdebug_queued_cmd *sqcp;
unsigned long flags;
int queue_num = data->queue_num;
ktime_t time;
@@ -8359,13 +9019,7 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
time = ktime_get_boottime();
spin_lock_irqsave(&sdsc->lock, flags);
- sqcp = TO_QUEUED_CMD(cmd);
- if (!sqcp) {
- spin_unlock_irqrestore(&sdsc->lock, flags);
- return true;
- }
-
- sd_dp = &sqcp->sd_dp;
+ sd_dp = &sdsc->sd_dp;
if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
spin_unlock_irqrestore(&sdsc->lock, flags);
return true;
@@ -8375,8 +9029,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
spin_unlock_irqrestore(&sdsc->lock, flags);
return true;
}
-
- ASSIGN_QUEUED_CMD(cmd, NULL);
spin_unlock_irqrestore(&sdsc->lock, flags);
if (sdebug_statistics) {
@@ -8385,8 +9037,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
atomic_inc(&sdebug_miss_cpus);
}
- sdebug_free_queued_cmd(sqcp);
-
scsi_done(cmd); /* callback to mid level */
(*data->num_entries)++;
return true;
@@ -8701,8 +9351,12 @@ err_out:
static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
+ struct sdebug_defer *sd_dp = &sdsc->sd_dp;
spin_lock_init(&sdsc->lock);
+ hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
return 0;
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 815e7d63f3e2..376b8897ab90 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -547,6 +547,18 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
scsi_report_sense(sdev, &sshdr);
+ if (sshdr.sense_key == UNIT_ATTENTION) {
+ /*
+ * Increment the counters for Power on/Reset or New Media so
+ * that all ULDs interested in these can see that those have
+ * happened, even if someone else gets the sense data.
+ */
+ if (sshdr.asc == 0x28)
+ scmd->device->ua_new_media_ctr++;
+ else if (sshdr.asc == 0x29)
+ scmd->device->ua_por_ctr++;
+ }
+
if (scsi_sense_is_deferred(&sshdr))
return NEEDS_RETRY;
@@ -711,6 +723,13 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
return SUCCESS;
case COMPLETED:
+ /*
+ * A command using command duration limits (CDL) with a
+ * descriptor set with policy 0xD may be completed with success
+ * and the sense data DATA CURRENTLY UNAVAILABLE, indicating
+ * that the command was in fact aborted because it exceeded its
+ * duration limit. Never retry these commands.
+ */
if (sshdr.asc == 0x55 && sshdr.ascq == 0x0a) {
set_scsi_ml_byte(scmd, SCSIML_STAT_DL_TIMEOUT);
req->cmd_flags |= REQ_FAILFAST_DEV;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f1cfe0bb89b2..0d29470e86b0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1149,7 +1149,7 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
* Next, walk the list, and fill in the addresses and sizes of
* each segment.
*/
- count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
+ count = __blk_rq_map_sg(rq, cmd->sdb.table.sgl, &last_sg);
if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) {
unsigned int pad_len =
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 96d7e1a9a7c7..4833b8fe251b 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -151,8 +151,9 @@ int scsi_complete_async_scans(void)
struct async_scan_data *data;
do {
- if (list_empty(&scanning_hosts))
- return 0;
+ scoped_guard(spinlock, &async_scan_lock)
+ if (list_empty(&scanning_hosts))
+ return 0;
/* If we can't get memory immediately, that's OK. Just
* sleep a little. Even if we never get memory, the async
* scans will finish eventually.
diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c
index be4aef0f4f99..055a03a83ad6 100644
--- a/drivers/scsi/scsi_sysctl.c
+++ b/drivers/scsi/scsi_sysctl.c
@@ -17,7 +17,9 @@ static const struct ctl_table scsi_table[] = {
.data = &scsi_logging_level,
.maxlen = sizeof(scsi_logging_level),
.mode = 0644,
- .proc_handler = proc_dointvec },
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_INT_MAX },
};
static struct ctl_table_header *scsi_table_header;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index ebbd50ec0cda..74a6830b7ed8 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -163,9 +163,11 @@ static const char *st_formats[] = {
static int debugging = DEBUG;
+/* Setting these non-zero may risk recognizing resets */
#define MAX_RETRIES 0
#define MAX_WRITE_RETRIES 0
#define MAX_READY_RETRIES 0
+
#define NO_TAPE NOT_READY
#define ST_TIMEOUT (900 * HZ)
@@ -357,10 +359,18 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
{
int result = SRpnt->result;
u8 scode;
+ unsigned int ctr;
DEB(const char *stp;)
char *name = STp->name;
struct st_cmdstatus *cmdstatp;
+ ctr = scsi_get_ua_por_ctr(STp->device);
+ if (ctr != STp->por_ctr) {
+ STp->por_ctr = ctr;
+ STp->pos_unknown = 1; /* ASC => power on / reset */
+ st_printk(KERN_WARNING, STp, "Power on/reset recognized.");
+ }
+
if (!result)
return 0;
@@ -413,10 +423,11 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
if (cmdstatp->have_sense &&
cmdstatp->sense_hdr.asc == 0 && cmdstatp->sense_hdr.ascq == 0x17)
STp->cleaning_req = 1; /* ASC and ASCQ => cleaning requested */
- if (cmdstatp->have_sense && scode == UNIT_ATTENTION && cmdstatp->sense_hdr.asc == 0x29)
+ if (cmdstatp->have_sense && scode == UNIT_ATTENTION &&
+ cmdstatp->sense_hdr.asc == 0x29 && !STp->pos_unknown) {
STp->pos_unknown = 1; /* ASC => power on / reset */
-
- STp->pos_unknown |= STp->device->was_reset;
+ st_printk(KERN_WARNING, STp, "Power on/reset recognized.");
+ }
if (cmdstatp->have_sense &&
scode == RECOVERED_ERROR
@@ -952,7 +963,6 @@ static void reset_state(struct scsi_tape *STp)
STp->partition = find_partition(STp);
if (STp->partition < 0)
STp->partition = 0;
- STp->new_partition = STp->partition;
}
}
@@ -969,6 +979,7 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
{
int attentions, waits, max_wait, scode;
int retval = CHKRES_READY, new_session = 0;
+ unsigned int ctr;
unsigned char cmd[MAX_COMMAND_SIZE];
struct st_request *SRpnt = NULL;
struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
@@ -1025,6 +1036,13 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
}
}
+ ctr = scsi_get_ua_new_media_ctr(STp->device);
+ if (ctr != STp->new_media_ctr) {
+ STp->new_media_ctr = ctr;
+ new_session = 1;
+ DEBC_printk(STp, "New tape session.");
+ }
+
retval = (STp->buffer)->syscall_result;
if (!retval)
retval = new_session ? CHKRES_NEW_SESSION : CHKRES_READY;
@@ -2897,7 +2915,6 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
timeout = STp->long_timeout * 8;
DEBC_printk(STp, "Erasing tape.\n");
- fileno = blkno = at_sm = 0;
break;
case MTSETBLK: /* Set block length */
case MTSETDENSITY: /* Set tape density */
@@ -2930,14 +2947,17 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
if (cmd_in == MTSETDENSITY) {
(STp->buffer)->b_data[4] = arg;
STp->density_changed = 1; /* At least we tried ;-) */
+ STp->changed_density = arg;
} else if (cmd_in == SET_DENS_AND_BLK)
(STp->buffer)->b_data[4] = arg >> 24;
else
(STp->buffer)->b_data[4] = STp->density;
if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) {
ltmp = arg & MT_ST_BLKSIZE_MASK;
- if (cmd_in == MTSETBLK)
+ if (cmd_in == MTSETBLK) {
STp->blksize_changed = 1; /* At least we tried ;-) */
+ STp->changed_blksize = arg;
+ }
} else
ltmp = STp->block_size;
(STp->buffer)->b_data[9] = (ltmp >> 16);
@@ -3084,7 +3104,9 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd_in == MTSETDRVBUFFER ||
cmd_in == SET_DENS_AND_BLK) {
if (cmdstatp->sense_hdr.sense_key == ILLEGAL_REQUEST &&
- !(STp->use_pf & PF_TESTED)) {
+ cmdstatp->sense_hdr.asc == 0x24 &&
+ (STp->device)->scsi_level <= SCSI_2 &&
+ !(STp->use_pf & PF_TESTED)) {
/* Try the other possible state of Page Format if not
already tried */
STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED;
@@ -3636,9 +3658,23 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
retval = (-EIO);
goto out;
}
- reset_state(STp);
- /* remove this when the midlevel properly clears was_reset */
- STp->device->was_reset = 0;
+ reset_state(STp); /* Clears pos_unknown */
+
+ /* Fix the device settings after reset, ignore errors */
+ if (mtc.mt_op == MTREW || mtc.mt_op == MTSEEK ||
+ mtc.mt_op == MTEOM) {
+ if (STp->can_partitions) {
+ /* STp->new_partition contains the
+ * latest partition set
+ */
+ STp->partition = 0;
+ switch_partition(STp);
+ }
+ if (STp->density_changed)
+ st_int_ioctl(STp, MTSETDENSITY, STp->changed_density);
+ if (STp->blksize_changed)
+ st_int_ioctl(STp, MTSETBLK, STp->changed_blksize);
+ }
}
if (mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK &&
@@ -4122,7 +4158,7 @@ static void validate_options(void)
*/
static int __init st_setup(char *str)
{
- int i, len, ints[5];
+ int i, len, ints[ARRAY_SIZE(parms) + 1];
char *stp;
stp = get_options(str, ARRAY_SIZE(ints), ints);
@@ -4384,6 +4420,9 @@ static int st_probe(struct device *dev)
goto out_idr_remove;
}
+ tpnt->new_media_ctr = scsi_get_ua_new_media_ctr(SDp);
+ tpnt->por_ctr = scsi_get_ua_por_ctr(SDp);
+
dev_set_drvdata(dev, tpnt);
@@ -4665,6 +4704,24 @@ options_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(options);
+/**
+ * position_lost_in_reset_show - Value 1 indicates that reads, writes, etc.
+ * are blocked because a device reset has occurred and no operation positioning
+ * the tape has been issued.
+ * @dev: struct device
+ * @attr: attribute structure
+ * @buf: buffer to return formatted data in
+ */
+static ssize_t position_lost_in_reset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+ struct scsi_tape *STp = STm->tape;
+
+ return sprintf(buf, "%d", STp->pos_unknown);
+}
+static DEVICE_ATTR_RO(position_lost_in_reset);
+
/* Support for tape stats */
/**
@@ -4849,6 +4906,7 @@ static struct attribute *st_dev_attrs[] = {
&dev_attr_default_density.attr,
&dev_attr_default_compression.attr,
&dev_attr_options.attr,
+ &dev_attr_position_lost_in_reset.attr,
NULL,
};
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 1aaaf5369a40..0d7c4b8c2c8a 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -165,6 +165,7 @@ struct scsi_tape {
unsigned char compression_changed;
unsigned char drv_buffer;
unsigned char density;
+ unsigned char changed_density;
unsigned char door_locked;
unsigned char autorew_dev; /* auto-rewind device */
unsigned char rew_at_close; /* rewind necessary at close */
@@ -172,11 +173,16 @@ struct scsi_tape {
unsigned char cleaning_req; /* cleaning requested? */
unsigned char first_tur; /* first TEST UNIT READY */
int block_size;
+ int changed_blksize;
int min_block;
int max_block;
int recover_count; /* From tape opening */
int recover_reg; /* From last status call */
+ /* The saved values of midlevel counters */
+ unsigned int new_media_ctr;
+ unsigned int por_ctr;
+
#if DEBUG
unsigned char write_pending;
int nbr_finished;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index a8614e54544e..35db061ae3ec 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -776,7 +776,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
vstor_packet->status != 0) {
- dev_err(dev, "Failed to create sub-channel: op=%d, sts=%d\n",
+ dev_err(dev, "Failed to create sub-channel: op=%d, host=0x%x\n",
vstor_packet->operation, vstor_packet->status);
return;
}
@@ -1183,7 +1183,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR;
storvsc_log_ratelimited(device, loglevel,
- "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
+ "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x host 0x%x\n",
scsi_cmd_to_rq(request->cmd)->tag,
stor_pkt->vm_srb.cdb[0],
vstor_packet->vm_srb.scsi_status,