diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c | 334 |
1 files changed, 105 insertions, 229 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c index e92972fd6ecf..21a8a672fbb2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c @@ -37,47 +37,12 @@ #include "iwl-agn-hw.h" #include "iwl-op-mode.h" #include "iwl-trans-pcie-int.h" +/* FIXME: need to abstract out TX command (once we know what it looks like) */ +#include "iwl-commands.h" #define IWL_TX_CRC_SIZE 4 #define IWL_TX_DELIMITER_SIZE 4 -/* - * mac80211 queues, ACs, hardware queues, FIFOs. - * - * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues - * - * Mac80211 uses the following numbers, which we get as from it - * by way of skb_get_queue_mapping(skb): - * - * VO 0 - * VI 1 - * BE 2 - * BK 3 - * - * - * Regular (not A-MPDU) frames are put into hardware queues corresponding - * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their - * own queue per aggregation session (RA/TID combination), such queues are - * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In - * order to map frames to the right queue, we also need an AC->hw queue - * mapping. This is implemented here. - * - * Due to the way hw queues are set up (by the hw specific code), the AC->hw - * queue mapping is the identity mapping. - */ - -static const u8 tid_to_ac[] = { - IEEE80211_AC_BE, - IEEE80211_AC_BK, - IEEE80211_AC_BK, - IEEE80211_AC_BE, - IEEE80211_AC_VI, - IEEE80211_AC_VI, - IEEE80211_AC_VO, - IEEE80211_AC_VO -}; - - /** * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array */ @@ -95,7 +60,7 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; __le16 bc_ent; struct iwl_tx_cmd *tx_cmd = - (struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload; + (void *) txq->entries[txq->q.write_ptr].cmd->payload; scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; @@ -136,13 +101,15 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq) if (txq->need_update == 0) return; - if (cfg(trans)->base_params->shadow_reg_enable) { + if (trans->cfg->base_params->shadow_reg_enable) { /* shadow register enabled */ iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); } else { + struct iwl_trans_pcie *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(trans); /* if we're trying to save power */ - if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) { + if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) { /* wake up nic if it's powered down ... * uCode will wake up, and interrupt us again, so next * time we'll skip this part. */ @@ -256,13 +223,14 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, lockdep_assert_held(&txq->lock); - iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir); + iwlagn_unmap_tfd(trans, &txq->entries[index].meta, + &tfd_tmp[index], dma_dir); /* free SKB */ - if (txq->skbs) { + if (txq->entries) { struct sk_buff *skb; - skb = txq->skbs[index]; + skb = txq->entries[index].skb; /* Can be called from irqs-disabled context * If skb is not NULL, it means that the whole queue is being @@ -270,7 +238,7 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, */ if (skb) { iwl_op_mode_free_skb(trans->op_mode, skb); - txq->skbs[index] = NULL; + txq->entries[index].skb = NULL; } } } @@ -393,7 +361,7 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, u8 sta_id = 0; __le16 bc_ent; struct iwl_tx_cmd *tx_cmd = - (struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload; + (void *)txq->entries[txq->q.read_ptr].cmd->payload; WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); @@ -448,20 +416,17 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id) void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index) { - IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff); + IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d\n", txq_id, index & 0xff); iwl_write_direct32(trans, HBUS_TARG_WRPTR, (index & 0xff) | (txq_id << 8)); iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index); } void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, - struct iwl_tx_queue *txq, - int tx_fifo_id, int scd_retry) + struct iwl_tx_queue *txq, + int tx_fifo_id, bool active) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int txq_id = txq->q.id; - int active = - test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0; iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | @@ -469,77 +434,22 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, (1 << SCD_QUEUE_STTS_REG_POS_WSL) | SCD_QUEUE_STTS_REG_MSK); - txq->sched_retry = scd_retry; - if (active) - IWL_DEBUG_TX_QUEUES(trans, "Activate %s Queue %d on FIFO %d\n", - scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); + IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n", + txq_id, tx_fifo_id); else - IWL_DEBUG_TX_QUEUES(trans, "Deactivate %s Queue %d\n", - scd_retry ? "BA" : "AC/CMD", txq_id); -} - -static inline int get_ac_from_tid(u16 tid) -{ - if (likely(tid < ARRAY_SIZE(tid_to_ac))) - return tid_to_ac[tid]; - - /* no support for TIDs 8-15 yet */ - return -EINVAL; -} - -static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie, - u8 ctx, u16 tid) -{ - const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx]; - if (likely(tid < ARRAY_SIZE(tid_to_ac))) - return ac_to_fifo[tid_to_ac[tid]]; - - /* no support for TIDs 8-15 yet */ - return -EINVAL; + IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); } -static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id) +void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo, + int sta_id, int tid, int frame_limit, u16 ssn) { - if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE) - return false; - return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE + - hw_params(trans).num_ampdu_queues); -} - -void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, - enum iwl_rxon_context_id ctx, int sta_id, - int tid, int frame_limit, u16 ssn) -{ - int tx_fifo, txq_id; - u16 ra_tid; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); unsigned long flags; + u16 ra_tid = BUILD_RAxTID(sta_id, tid); - struct iwl_trans_pcie *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); - - if (WARN_ON(sta_id == IWL_INVALID_STATION)) - return; - if (WARN_ON(tid >= IWL_MAX_TID_COUNT)) - return; - - tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid); - if (WARN_ON(tx_fifo < 0)) { - IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo); - return; - } - - txq_id = trans_pcie->agg_txq[sta_id][tid]; - if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) { - IWL_ERR(trans, - "queue number out of range: %d, must be %d to %d\n", - txq_id, IWLAGN_FIRST_AMPDU_QUEUE, - IWLAGN_FIRST_AMPDU_QUEUE + - hw_params(trans).num_ampdu_queues - 1); - return; - } - - ra_tid = BUILD_RAxTID(sta_id, tid); + if (test_and_set_bit(txq_id, trans_pcie->queue_used)) + WARN_ONCE(1, "queue %d already used - expect issues", txq_id); spin_lock_irqsave(&trans_pcie->irq_lock, flags); @@ -550,10 +460,10 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id); /* Set this queue as a chain-building queue */ - iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, (1<<txq_id)); + iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id)); /* enable aggregations for the queue */ - iwl_set_bits_prph(trans, SCD_AGGR_SEL, (1<<txq_id)); + iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); /* Place first TFD at index corresponding to start sequence number. * Assumes that ssn_idx is valid (!= 0xFFF) */ @@ -563,92 +473,42 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, /* Set up Tx window size and frame limit for this queue */ iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + - SCD_CONTEXT_QUEUE_OFFSET(txq_id) + - sizeof(u32), - ((frame_limit << - SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & - SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | - ((frame_limit << - SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & - SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), + ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & + SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | + ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & + SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id)); /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], - tx_fifo, 1); - - trans_pcie->txq[txq_id].sta_id = sta_id; - trans_pcie->txq[txq_id].tid = tid; + fifo, true); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); } -/* - * Find first available (lowest unused) Tx Queue, mark it "active". - * Called only when finding queue for aggregation. - * Should never return anything < 7, because they should already - * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) - */ -static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int txq_id; - - for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues; - txq_id++) - if (!test_and_set_bit(txq_id, - &trans_pcie->txq_ctx_active_msk)) - return txq_id; - return -1; -} - -int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, - int sta_id, int tid) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int txq_id; - - txq_id = iwlagn_txq_ctx_activate_free(trans); - if (txq_id == -1) { - IWL_ERR(trans, "No free aggregation queue available\n"); - return -ENXIO; - } - - trans_pcie->agg_txq[sta_id][tid] = txq_id; - iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id); - - return 0; -} - -int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid) +void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u8 txq_id = trans_pcie->agg_txq[sta_id][tid]; - if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) { - IWL_ERR(trans, - "queue number out of range: %d, must be %d to %d\n", - txq_id, IWLAGN_FIRST_AMPDU_QUEUE, - IWLAGN_FIRST_AMPDU_QUEUE + - hw_params(trans).num_ampdu_queues - 1); - return -EINVAL; + if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { + WARN_ONCE(1, "queue %d not used", txq_id); + return; } iwlagn_tx_queue_stop_scheduler(trans, txq_id); - iwl_clear_bits_prph(trans, SCD_AGGR_SEL, (1 << txq_id)); + iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); - trans_pcie->agg_txq[sta_id][tid] = 0; trans_pcie->txq[txq_id].q.read_ptr = 0; trans_pcie->txq[txq_id].q.write_ptr = 0; - /* supposes that ssn_idx is valid (!= 0xFFF) */ iwl_trans_set_wr_ptrs(trans, txq_id, 0); - iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id)); - iwl_txq_ctx_deactivate(trans_pcie, txq_id); - iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0); - return 0; + iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, BIT(txq_id)); + + iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], + 0, false); } /*************** HOST COMMAND QUEUE FUNCTIONS *****/ @@ -681,11 +541,6 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) int trace_idx; #endif - if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) { - IWL_WARN(trans, "fw recovery, no hcmd send\n"); - return -EIO; - } - copy_size = sizeof(out_cmd->hdr); cmd_size = sizeof(out_cmd->hdr); @@ -726,8 +581,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) } idx = get_cmd_index(q, q->write_ptr); - out_cmd = txq->cmd[idx]; - out_meta = &txq->meta[idx]; + out_cmd = txq->entries[idx].cmd; + out_meta = &txq->entries[idx].meta; memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ if (cmd->flags & CMD_WANT_SKB) @@ -753,12 +608,11 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) cmd_dest += cmd->len[i]; } - IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, " - "%d bytes at %d[%d]:%d\n", - get_cmd_string(out_cmd->hdr.cmd), - out_cmd->hdr.cmd, - le16_to_cpu(out_cmd->hdr.sequence), cmd_size, - q->write_ptr, idx, trans_pcie->cmd_queue); + IWL_DEBUG_HC(trans, + "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", + trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd), + out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, + q->write_ptr, idx, trans_pcie->cmd_queue); phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size, DMA_BIDIRECTIONAL); @@ -816,6 +670,10 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) trace_bufs[2], trace_lens[2]); #endif + /* start timer if queue currently empty */ + if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) + mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); + /* Increment and update queue's write index */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); iwl_txq_update_write_ptr(trans, txq); @@ -825,6 +683,22 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) return idx; } +static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie, + struct iwl_tx_queue *txq) +{ + if (!trans_pcie->wd_timeout) + return; + + /* + * if empty delete timer, otherwise move timer forward + * since we're making progress on this queue + */ + if (txq->q.read_ptr == txq->q.write_ptr) + del_timer(&txq->stuck_timer); + else + mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); +} + /** * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd * @@ -859,6 +733,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id, } } + + iwl_queue_progress(trans_pcie, txq); } /** @@ -899,10 +775,8 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, spin_lock(&txq->lock); cmd_index = get_cmd_index(&txq->q, index); - cmd = txq->cmd[cmd_index]; - meta = &txq->meta[cmd_index]; - - txq->time_stamp = jiffies; + cmd = txq->entries[cmd_index].cmd; + meta = &txq->entries[cmd_index].meta; iwlagn_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); @@ -913,21 +787,23 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, meta->source->resp_pkt = pkt; meta->source->_rx_page_addr = (unsigned long)page_address(p); - meta->source->_rx_page_order = hw_params(trans).rx_page_order; + meta->source->_rx_page_order = trans_pcie->rx_page_order; meta->source->handler_status = handler_status; } iwl_hcmd_queue_reclaim(trans, txq_id, index); if (!(meta->flags & CMD_ASYNC)) { - if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) { + if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { IWL_WARN(trans, "HCMD_ACTIVE already clear for command %s\n", - get_cmd_string(cmd->hdr.cmd)); + trans_pcie_get_cmd_string(trans_pcie, + cmd->hdr.cmd)); } - clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); + clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", - get_cmd_string(cmd->hdr.cmd)); + trans_pcie_get_cmd_string(trans_pcie, + cmd->hdr.cmd)); wake_up(&trans->wait_command_queue); } @@ -940,6 +816,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; /* An asynchronous command can not expect an SKB to be set. */ @@ -951,7 +828,7 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if (ret < 0) { IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", - get_cmd_string(cmd->id), ret); + trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); return ret; } return 0; @@ -964,55 +841,51 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) int ret; IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", - get_cmd_string(cmd->id)); - - if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) { - IWL_ERR(trans, "Command %s failed: FW Error\n", - get_cmd_string(cmd->id)); - return -EIO; - } + trans_pcie_get_cmd_string(trans_pcie, cmd->id)); if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, - &trans->shrd->status))) { + &trans_pcie->status))) { IWL_ERR(trans, "Command %s: a command is already active!\n", - get_cmd_string(cmd->id)); + trans_pcie_get_cmd_string(trans_pcie, cmd->id)); return -EIO; } IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", - get_cmd_string(cmd->id)); + trans_pcie_get_cmd_string(trans_pcie, cmd->id)); cmd_idx = iwl_enqueue_hcmd(trans, cmd); if (cmd_idx < 0) { ret = cmd_idx; - clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); + clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", - get_cmd_string(cmd->id), ret); + trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); return ret; } ret = wait_event_timeout(trans->wait_command_queue, - !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status), + !test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status), HOST_COMPLETE_TIMEOUT); if (!ret) { - if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) { + if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_queue *q = &txq->q; IWL_ERR(trans, "Error sending %s: time out after %dms.\n", - get_cmd_string(cmd->id), + trans_pcie_get_cmd_string(trans_pcie, cmd->id), jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", q->read_ptr, q->write_ptr); - clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); - IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command" - "%s\n", get_cmd_string(cmd->id)); + clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); + IWL_DEBUG_INFO(trans, + "Clearing HCMD_ACTIVE for command %s\n", + trans_pcie_get_cmd_string(trans_pcie, + cmd->id)); ret = -ETIMEDOUT; goto cancel; } @@ -1020,7 +893,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { IWL_ERR(trans, "Error: Response NULL in '%s'\n", - get_cmd_string(cmd->id)); + trans_pcie_get_cmd_string(trans_pcie, cmd->id)); ret = -EIO; goto cancel; } @@ -1035,8 +908,8 @@ cancel: * in later, it will possibly set an invalid * address (cmd->meta.source). */ - trans_pcie->txq[trans_pcie->cmd_queue].meta[cmd_idx].flags &= - ~CMD_WANT_SKB; + trans_pcie->txq[trans_pcie->cmd_queue]. + entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; } if (cmd->resp_pkt) { @@ -1091,17 +964,20 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, q->read_ptr != index; q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { - if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL)) + if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) continue; - __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]); + __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb); - txq->skbs[txq->q.read_ptr] = NULL; + txq->entries[txq->q.read_ptr].skb = NULL; iwlagn_txq_inval_byte_cnt_tbl(trans, txq); iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE); freed++; } + + iwl_queue_progress(trans_pcie, txq); + return freed; } |