diff options
author | Jacob Keller <jacob.e.keller@intel.com> | 2020-07-23 17:22:03 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-07-28 17:07:06 -0700 |
commit | d69ea414c9b498064d4cd3ebbe2fd4b8298568bc (patch) | |
tree | 1fab4c91bfe14e1f4f75e1ae699fbb154859d272 /drivers/net/ethernet/intel/ice/ice_main.c | |
parent | 2ab560a78e3b2f6b19626dcd47790f35b28cc5db (diff) | |
download | lwn-d69ea414c9b498064d4cd3ebbe2fd4b8298568bc.tar.gz lwn-d69ea414c9b498064d4cd3ebbe2fd4b8298568bc.zip |
ice: implement device flash update via devlink
Use the newly added pldmfw library to implement device flash update for
the Intel ice networking device driver. This support uses the devlink
flash update interface.
The main parts of the flash include the Option ROM, the netlist module,
and the main NVM data. The PLDM firmware file contains modules for each
of these components.
Using the pldmfw library, the provided firmware file will be scanned for
the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for
the main NVM module containing the primary device firmware, and
"fw.netlist" containing the netlist module.
The flash is separated into two banks, the active bank containing the
running firmware, and the inactive bank which we use for update. Each
module is updated in a staged process. First, the inactive bank is
erased, preparing the device for update. Second, the contents of the
component are copied to the inactive portion of the flash. After all
components are updated, the driver signals the device to switch the
active bank during the next EMP reset (which would usually occur during
the next reboot).
Although the firmware AdminQ interface does report an immediate status
for each command, the NVM erase and NVM write commands receive status
asynchronously. The driver must not continue writing until previous
erase and write commands have finished. The real status of the NVM
commands is returned over the receive AdminQ. Implement a simple
interface that uses a wait queue so that the main update thread can
sleep until the completion status is reported by firmware. For erasing
the inactive banks, this can take quite a while in practice.
To help visualize the process to the devlink application and other
applications based on the devlink netlink interface, status is reported
via the devlink_flash_update_status_notify. While we do report status
after each 4k block when writing, there is no real status we can report
during erasing. We simply must wait for the complete module erasure to
finish.
With this implementation, basic flash update for the ice hardware is
supported.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 154 |
1 files changed, 154 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 16a4096bb780..646c2cbc6904 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -914,6 +914,151 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) return status; } +enum ice_aq_task_state { + ICE_AQ_TASK_WAITING = 0, + ICE_AQ_TASK_COMPLETE, + ICE_AQ_TASK_CANCELED, +}; + +struct ice_aq_task { + struct hlist_node entry; + + u16 opcode; + struct ice_rq_event_info *event; + enum ice_aq_task_state state; +}; + +/** + * ice_wait_for_aq_event - Wait for an AdminQ event from firmware + * @pf: pointer to the PF private structure + * @opcode: the opcode to wait for + * @timeout: how long to wait, in jiffies + * @event: storage for the event info + * + * Waits for a specific AdminQ completion event on the ARQ for a given PF. The + * current thread will be put to sleep until the specified event occurs or + * until the given timeout is reached. + * + * To obtain only the descriptor contents, pass an event without an allocated + * msg_buf. If the complete data buffer is desired, allocate the + * event->msg_buf with enough space ahead of time. + * + * Returns: zero on success, or a negative error code on failure. + */ +int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, + struct ice_rq_event_info *event) +{ + struct ice_aq_task *task; + long ret; + int err; + + task = kzalloc(sizeof(*task), GFP_KERNEL); + if (!task) + return -ENOMEM; + + INIT_HLIST_NODE(&task->entry); + task->opcode = opcode; + task->event = event; + task->state = ICE_AQ_TASK_WAITING; + + spin_lock_bh(&pf->aq_wait_lock); + hlist_add_head(&task->entry, &pf->aq_wait_list); + spin_unlock_bh(&pf->aq_wait_lock); + + ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state, + timeout); + switch (task->state) { + case ICE_AQ_TASK_WAITING: + err = ret < 0 ? ret : -ETIMEDOUT; + break; + case ICE_AQ_TASK_CANCELED: + err = ret < 0 ? ret : -ECANCELED; + break; + case ICE_AQ_TASK_COMPLETE: + err = ret < 0 ? ret : 0; + break; + default: + WARN(1, "Unexpected AdminQ wait task state %u", task->state); + err = -EINVAL; + break; + } + + spin_lock_bh(&pf->aq_wait_lock); + hlist_del(&task->entry); + spin_unlock_bh(&pf->aq_wait_lock); + kfree(task); + + return err; +} + +/** + * ice_aq_check_events - Check if any thread is waiting for an AdminQ event + * @pf: pointer to the PF private structure + * @opcode: the opcode of the event + * @event: the event to check + * + * Loops over the current list of pending threads waiting for an AdminQ event. + * For each matching task, copy the contents of the event into the task + * structure and wake up the thread. + * + * If multiple threads wait for the same opcode, they will all be woken up. + * + * Note that event->msg_buf will only be duplicated if the event has a buffer + * with enough space already allocated. Otherwise, only the descriptor and + * message length will be copied. + * + * Returns: true if an event was found, false otherwise + */ +static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, + struct ice_rq_event_info *event) +{ + struct ice_aq_task *task; + bool found = false; + + spin_lock_bh(&pf->aq_wait_lock); + hlist_for_each_entry(task, &pf->aq_wait_list, entry) { + if (task->state || task->opcode != opcode) + continue; + + memcpy(&task->event->desc, &event->desc, sizeof(event->desc)); + task->event->msg_len = event->msg_len; + + /* Only copy the data buffer if a destination was set */ + if (task->event->msg_buf && + task->event->buf_len > event->buf_len) { + memcpy(task->event->msg_buf, event->msg_buf, + event->buf_len); + task->event->buf_len = event->buf_len; + } + + task->state = ICE_AQ_TASK_COMPLETE; + found = true; + } + spin_unlock_bh(&pf->aq_wait_lock); + + if (found) + wake_up(&pf->aq_wait_queue); +} + +/** + * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks + * @pf: the PF private structure + * + * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. + * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. + */ +static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) +{ + struct ice_aq_task *task; + + spin_lock_bh(&pf->aq_wait_lock); + hlist_for_each_entry(task, &pf->aq_wait_list, entry) + task->state = ICE_AQ_TASK_CANCELED; + spin_unlock_bh(&pf->aq_wait_lock); + + wake_up(&pf->aq_wait_queue); +} + /** * __ice_clean_ctrlq - helper function to clean controlq rings * @pf: ptr to struct ice_pf @@ -1010,6 +1155,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) opcode = le16_to_cpu(event.desc.opcode); + /* Notify any thread that might be waiting for this event */ + ice_aq_check_events(pf, opcode, &event); + switch (opcode) { case ice_aqc_opc_get_link_status: if (ice_handle_link_event(pf, &event)) @@ -3089,6 +3237,10 @@ static int ice_init_pf(struct ice_pf *pf) mutex_init(&pf->sw_mutex); mutex_init(&pf->tc_mutex); + INIT_HLIST_HEAD(&pf->aq_wait_list); + spin_lock_init(&pf->aq_wait_lock); + init_waitqueue_head(&pf->aq_wait_queue); + /* setup service timer and periodic service task */ timer_setup(&pf->serv_tmr, ice_service_timer, 0); pf->serv_tmr_period = HZ; @@ -4017,6 +4169,8 @@ static void ice_remove(struct pci_dev *pdev) set_bit(__ICE_DOWN, pf->state); ice_service_task_stop(pf); + ice_aq_cancel_waiting_tasks(pf); + mutex_destroy(&(&pf->hw)->fdir_fltr_lock); if (!ice_is_safe_mode(pf)) ice_remove_arfs(pf); |