diff options
author | Davidlohr Bueso <dave@stgolabs.net> | 2023-05-23 10:09:27 -0700 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2023-05-23 12:55:12 -0700 |
commit | ccadf1310fb0bc8d2cbcd14f94a6279c12ea9bee (patch) | |
tree | 71d097162ca95f1faf2aea4651fe0f27f0f88715 /drivers/cxl/pci.c | |
parent | 9f7a320d167cd7f310114cf25009ceedf6a323ed (diff) | |
download | lwn-ccadf1310fb0bc8d2cbcd14f94a6279c12ea9bee.tar.gz lwn-ccadf1310fb0bc8d2cbcd14f94a6279c12ea9bee.zip |
cxl/mbox: Add background cmd handling machinery
This adds support for handling background operations, as defined in
the CXL 3.0 spec. Commands that can take too long (over ~2 seconds)
can run in the background asynchronously (to the hardware).
The driver will deal with such commands synchronously, blocking all
other incoming commands for a specified period of time, allowing
time-slicing the command such that the caller can send incremental
requests to avoid monopolizing the driver/device. Any out of sync
(timeout) between the driver and hardware is just disregarded as
an invalid state until the next successful submission. Such timeouts
are considered a rare occurrence, either a real device problem or a
driver issue that needs to reduce the size of the background operation
to fit the timeout.
On devices where mbox interrupts are supported, this will still use
a poller that will wakeup in the specified wait intervals. The irq
handler will simply awake the blocked cmd, which is also safe vs a
task that is either waking (timing out) or already awoken. Similarly
any irq setup error during the probing falls back to polling, thus
avoids unnecessarily erroring out.
Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
Link: https://lore.kernel.org/r/20230523170927.20685-5-dave@stgolabs.net
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/cxl/pci.c')
-rw-r--r-- | drivers/cxl/pci.c | 89 |
1 files changed, 89 insertions, 0 deletions
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 18b8f3ce680c..a78e40e6d0e0 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -105,6 +105,26 @@ static int cxl_request_irq(struct cxl_dev_state *cxlds, int irq, NULL, dev_id); } +static bool cxl_mbox_background_complete(struct cxl_dev_state *cxlds) +{ + u64 reg; + + reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET); + return FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg) == 100; +} + +static irqreturn_t cxl_pci_mbox_irq(int irq, void *id) +{ + struct cxl_dev_id *dev_id = id; + struct cxl_dev_state *cxlds = dev_id->cxlds; + + /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */ + if (cxl_mbox_background_complete(cxlds)) + rcuwait_wake_up(&cxlds->mbox_wait); + + return IRQ_HANDLED; +} + /** * __cxl_pci_mbox_send_cmd() - Execute a mailbox command * @cxlds: The device state to communicate with. @@ -198,6 +218,50 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds, mbox_cmd->return_code = FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg); + /* + * Handle the background command in a synchronous manner. + * + * All other mailbox commands will serialize/queue on the mbox_mutex, + * which we currently hold. Furthermore this also guarantees that + * cxl_mbox_background_complete() checks are safe amongst each other, + * in that no new bg operation can occur in between. + * + * Background operations are timesliced in accordance with the nature + * of the command. In the event of timeout, the mailbox state is + * indeterminate until the next successful command submission and the + * driver can get back in sync with the hardware state. + */ + if (mbox_cmd->return_code == CXL_MBOX_CMD_RC_BACKGROUND) { + u64 bg_status_reg; + int i, timeout = mbox_cmd->poll_interval_ms; + + dev_dbg(dev, "Mailbox background operation (0x%04x) started\n", + mbox_cmd->opcode); + + for (i = 0; i < mbox_cmd->poll_count; i++) { + if (rcuwait_wait_event_timeout(&cxlds->mbox_wait, + cxl_mbox_background_complete(cxlds), + TASK_UNINTERRUPTIBLE, + msecs_to_jiffies(timeout)) > 0) + break; + } + + if (!cxl_mbox_background_complete(cxlds)) { + dev_err(dev, "timeout waiting for background (%d ms)\n", + timeout * mbox_cmd->poll_count); + return -ETIMEDOUT; + } + + bg_status_reg = readq(cxlds->regs.mbox + + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET); + mbox_cmd->return_code = + FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_RC_MASK, + bg_status_reg); + dev_dbg(dev, + "Mailbox background operation (0x%04x) completed\n", + mbox_cmd->opcode); + } + if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) { dev_dbg(dev, "Mailbox operation had an error: %s\n", cxl_mbox_cmd_rc2str(mbox_cmd)); @@ -292,6 +356,31 @@ static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds) dev_dbg(cxlds->dev, "Mailbox payload sized %zu", cxlds->payload_size); + rcuwait_init(&cxlds->mbox_wait); + + if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) { + u32 ctrl; + int irq, msgnum; + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + + msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap); + irq = pci_irq_vector(pdev, msgnum); + if (irq < 0) + goto mbox_poll; + + if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL)) + goto mbox_poll; + + /* enable background command mbox irq support */ + ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); + ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ; + writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); + + return 0; + } + +mbox_poll: + dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported"); return 0; } |