summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/xilinx/xilinx_axienet_main.c')
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c337
1 files changed, 269 insertions, 68 deletions
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 0f4b02fe6f85..054abf283ab3 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -223,26 +223,62 @@ static void axienet_dma_bd_release(struct net_device *ndev)
lp->rx_bd_p);
}
+static u64 axienet_dma_rate(struct axienet_local *lp)
+{
+ if (lp->axi_clk)
+ return clk_get_rate(lp->axi_clk);
+ return 125000000; /* arbitrary guess if no clock rate set */
+}
+
/**
- * axienet_usec_to_timer - Calculate IRQ delay timer value
- * @lp: Pointer to the axienet_local structure
- * @coalesce_usec: Microseconds to convert into timer value
+ * axienet_calc_cr() - Calculate control register value
+ * @lp: Device private data
+ * @count: Number of completions before an interrupt
+ * @usec: Microseconds after the last completion before an interrupt
+ *
+ * Calculate a control register value based on the coalescing settings. The
+ * run/stop bit is not set.
*/
-static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
+static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec)
{
- u32 result;
- u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
+ u32 cr;
- if (lp->axi_clk)
- clk_rate = clk_get_rate(lp->axi_clk);
+ cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK |
+ XAXIDMA_IRQ_ERROR_MASK;
+ /* Only set interrupt delay timer if not generating an interrupt on
+ * the first packet. Otherwise leave at 0 to disable delay interrupt.
+ */
+ if (count > 1) {
+ u64 clk_rate = axienet_dma_rate(lp);
+ u32 timer;
+
+ /* 1 Timeout Interval = 125 * (clock period of SG clock) */
+ timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate,
+ XAXIDMA_DELAY_SCALE);
- /* 1 Timeout Interval = 125 * (clock period of SG clock) */
- result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
- (u64)125000000);
- if (result > 255)
- result = 255;
+ timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK));
+ cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ }
+
+ return cr;
+}
+
+/**
+ * axienet_coalesce_params() - Extract coalesce parameters from the CR
+ * @lp: Device private data
+ * @cr: The control register to parse
+ * @count: Number of packets before an interrupt
+ * @usec: Idle time (in usec) before an interrupt
+ */
+static void axienet_coalesce_params(struct axienet_local *lp, u32 cr,
+ u32 *count, u32 *usec)
+{
+ u64 clk_rate = axienet_dma_rate(lp);
+ u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr);
- return result;
+ *count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr);
+ *usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate);
}
/**
@@ -251,30 +287,12 @@ static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
*/
static void axienet_dma_start(struct axienet_local *lp)
{
+ spin_lock_irq(&lp->rx_cr_lock);
+
/* Start updating the Rx channel control register */
- lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
- XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
- /* Only set interrupt delay timer if not generating an interrupt on
- * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
- */
- if (lp->coalesce_count_rx > 1)
- lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
- << XAXIDMA_DELAY_SHIFT) |
- XAXIDMA_IRQ_DELAY_MASK;
+ lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
- /* Start updating the Tx channel control register */
- lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
- XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
- /* Only set interrupt delay timer if not generating an interrupt on
- * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
- */
- if (lp->coalesce_count_tx > 1)
- lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
- << XAXIDMA_DELAY_SHIFT) |
- XAXIDMA_IRQ_DELAY_MASK;
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
-
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
@@ -283,6 +301,14 @@ static void axienet_dma_start(struct axienet_local *lp)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
(sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+ lp->rx_dma_started = true;
+
+ spin_unlock_irq(&lp->rx_cr_lock);
+ spin_lock_irq(&lp->tx_cr_lock);
+
+ /* Start updating the Tx channel control register */
+ lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
@@ -291,6 +317,9 @@ static void axienet_dma_start(struct axienet_local *lp)
axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+ lp->tx_dma_started = true;
+
+ spin_unlock_irq(&lp->tx_cr_lock);
}
/**
@@ -626,14 +655,22 @@ static void axienet_dma_stop(struct axienet_local *lp)
int count;
u32 cr, sr;
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ spin_lock_irq(&lp->rx_cr_lock);
+
+ cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ lp->rx_dma_started = false;
+
+ spin_unlock_irq(&lp->rx_cr_lock);
synchronize_irq(lp->rx_irq);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ spin_lock_irq(&lp->tx_cr_lock);
+
+ cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ lp->tx_dma_started = false;
+
+ spin_unlock_irq(&lp->tx_cr_lock);
synchronize_irq(lp->tx_irq);
/* Give DMAs a chance to halt gracefully */
@@ -965,6 +1002,7 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
&size, budget);
if (packets) {
+ netdev_completed_queue(ndev, packets, size);
u64_stats_update_begin(&lp->tx_stat_sync);
u64_stats_add(&lp->tx_packets, packets);
u64_stats_add(&lp->tx_bytes, size);
@@ -982,7 +1020,9 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
* cause an immediate interrupt if any TX packets are
* already pending.
*/
+ spin_lock_irq(&lp->tx_cr_lock);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+ spin_unlock_irq(&lp->tx_cr_lock);
}
return packets;
}
@@ -1086,6 +1126,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (++new_tail_ptr >= lp->tx_bd_num)
new_tail_ptr = 0;
WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
+ netdev_sent_queue(ndev, skb->len);
/* Start the transfer */
axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
@@ -1244,11 +1285,25 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget)
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
if (packets < budget && napi_complete_done(napi, packets)) {
+ if (READ_ONCE(lp->rx_dim_enabled)) {
+ struct dim_sample sample = {
+ .time = ktime_get(),
+ /* Safe because we are the only writer */
+ .pkt_ctr = u64_stats_read(&lp->rx_packets),
+ .byte_ctr = u64_stats_read(&lp->rx_bytes),
+ .event_ctr = READ_ONCE(lp->rx_irqs),
+ };
+
+ net_dim(&lp->rx_dim, &sample);
+ }
+
/* Re-enable RX completion interrupts. This should
* cause an immediate interrupt if any RX packets are
* already pending.
*/
+ spin_lock_irq(&lp->rx_cr_lock);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+ spin_unlock_irq(&lp->rx_cr_lock);
}
return packets;
}
@@ -1286,11 +1341,14 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
/* Disable further TX completion interrupts and schedule
* NAPI to handle the completions.
*/
- u32 cr = lp->tx_dma_cr;
-
- cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
if (napi_schedule_prep(&lp->napi_tx)) {
+ u32 cr;
+
+ spin_lock(&lp->tx_cr_lock);
+ cr = lp->tx_dma_cr;
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ spin_unlock(&lp->tx_cr_lock);
__napi_schedule(&lp->napi_tx);
}
}
@@ -1331,11 +1389,16 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
/* Disable further RX completion interrupts and schedule
* NAPI receive.
*/
- u32 cr = lp->rx_dma_cr;
-
- cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1);
if (napi_schedule_prep(&lp->napi_rx)) {
+ u32 cr;
+
+ spin_lock(&lp->rx_cr_lock);
+ cr = lp->rx_dma_cr;
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ spin_unlock(&lp->rx_cr_lock);
+
__napi_schedule(&lp->napi_rx);
}
}
@@ -1628,6 +1691,7 @@ err_free_eth_irq:
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
err_phy:
+ cancel_work_sync(&lp->rx_dim.work);
cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
@@ -1657,6 +1721,7 @@ static int axienet_stop(struct net_device *ndev)
napi_disable(&lp->napi_rx);
}
+ cancel_work_sync(&lp->rx_dim.work);
cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
@@ -1688,6 +1753,7 @@ static int axienet_stop(struct net_device *ndev)
dma_release_channel(lp->tx_chan);
}
+ netdev_reset_queue(ndev);
axienet_iow(lp, XAE_IE_OFFSET, 0);
if (lp->eth_irq > 0)
@@ -2002,6 +2068,87 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
}
/**
+ * axienet_update_coalesce_rx() - Set RX CR
+ * @lp: Device private data
+ * @cr: Value to write to the RX CR
+ * @mask: Bits to set from @cr
+ */
+static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr,
+ u32 mask)
+{
+ spin_lock_irq(&lp->rx_cr_lock);
+ lp->rx_dma_cr &= ~mask;
+ lp->rx_dma_cr |= cr;
+ /* If DMA isn't started, then the settings will be applied the next
+ * time dma_start() is called.
+ */
+ if (lp->rx_dma_started) {
+ u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+
+ /* Don't enable IRQs if they are disabled by NAPI */
+ if (reg & XAXIDMA_IRQ_ALL_MASK)
+ cr = lp->rx_dma_cr;
+ else
+ cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ }
+ spin_unlock_irq(&lp->rx_cr_lock);
+}
+
+/**
+ * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
+ * @lp: Device private data
+ */
+static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp)
+{
+ return min(1 << (lp->rx_dim.profile_ix << 1), 255);
+}
+
+/**
+ * axienet_rx_dim_work() - Adjust RX DIM settings
+ * @work: The work struct
+ */
+static void axienet_rx_dim_work(struct work_struct *work)
+{
+ struct axienet_local *lp =
+ container_of(work, struct axienet_local, rx_dim.work);
+ u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0);
+ u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK |
+ XAXIDMA_IRQ_ERROR_MASK;
+
+ axienet_update_coalesce_rx(lp, cr, mask);
+ lp->rx_dim.state = DIM_START_MEASURE;
+}
+
+/**
+ * axienet_update_coalesce_tx() - Set TX CR
+ * @lp: Device private data
+ * @cr: Value to write to the TX CR
+ * @mask: Bits to set from @cr
+ */
+static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr,
+ u32 mask)
+{
+ spin_lock_irq(&lp->tx_cr_lock);
+ lp->tx_dma_cr &= ~mask;
+ lp->tx_dma_cr |= cr;
+ /* If DMA isn't started, then the settings will be applied the next
+ * time dma_start() is called.
+ */
+ if (lp->tx_dma_started) {
+ u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+
+ /* Don't enable IRQs if they are disabled by NAPI */
+ if (reg & XAXIDMA_IRQ_ALL_MASK)
+ cr = lp->tx_dma_cr;
+ else
+ cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ }
+ spin_unlock_irq(&lp->tx_cr_lock);
+}
+
+/**
* axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
* @ndev: Pointer to net_device structure
* @ecoalesce: Pointer to ethtool_coalesce structure
@@ -2021,11 +2168,23 @@ axienet_ethtools_get_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
-
- ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
- ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
- ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
- ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
+ u32 cr;
+
+ ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled;
+
+ spin_lock_irq(&lp->rx_cr_lock);
+ cr = lp->rx_dma_cr;
+ spin_unlock_irq(&lp->rx_cr_lock);
+ axienet_coalesce_params(lp, cr,
+ &ecoalesce->rx_max_coalesced_frames,
+ &ecoalesce->rx_coalesce_usecs);
+
+ spin_lock_irq(&lp->tx_cr_lock);
+ cr = lp->tx_dma_cr;
+ spin_unlock_irq(&lp->tx_cr_lock);
+ axienet_coalesce_params(lp, cr,
+ &ecoalesce->tx_max_coalesced_frames,
+ &ecoalesce->tx_coalesce_usecs);
return 0;
}
@@ -2049,22 +2208,56 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
+ bool new_dim = ecoalesce->use_adaptive_rx_coalesce;
+ bool old_dim = lp->rx_dim_enabled;
+ u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK;
- if (netif_running(ndev)) {
+ if (ecoalesce->rx_max_coalesced_frames > 255 ||
+ ecoalesce->tx_max_coalesced_frames > 255) {
+ NL_SET_ERR_MSG(extack, "frames must be less than 256");
+ return -EINVAL;
+ }
+
+ if (!ecoalesce->rx_max_coalesced_frames ||
+ !ecoalesce->tx_max_coalesced_frames) {
+ NL_SET_ERR_MSG(extack, "frames must be non-zero");
+ return -EINVAL;
+ }
+
+ if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) &&
+ !ecoalesce->rx_coalesce_usecs) ||
+ (ecoalesce->tx_max_coalesced_frames > 1 &&
+ !ecoalesce->tx_coalesce_usecs)) {
NL_SET_ERR_MSG(extack,
- "Please stop netif before applying configuration");
- return -EBUSY;
+ "usecs must be non-zero when frames is greater than one");
+ return -EINVAL;
+ }
+
+ if (new_dim && !old_dim) {
+ cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
+ ecoalesce->rx_coalesce_usecs);
+ } else if (!new_dim) {
+ if (old_dim) {
+ WRITE_ONCE(lp->rx_dim_enabled, false);
+ napi_synchronize(&lp->napi_rx);
+ flush_work(&lp->rx_dim.work);
+ }
+
+ cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames,
+ ecoalesce->rx_coalesce_usecs);
+ } else {
+ /* Dummy value for count just to calculate timer */
+ cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs);
+ mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK;
}
- if (ecoalesce->rx_max_coalesced_frames)
- lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
- if (ecoalesce->rx_coalesce_usecs)
- lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
- if (ecoalesce->tx_max_coalesced_frames)
- lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
- if (ecoalesce->tx_coalesce_usecs)
- lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
+ axienet_update_coalesce_rx(lp, cr, mask);
+ if (new_dim && !old_dim)
+ WRITE_ONCE(lp->rx_dim_enabled, true);
+ cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames,
+ ecoalesce->tx_coalesce_usecs);
+ axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK);
return 0;
}
@@ -2302,7 +2495,8 @@ axienet_ethtool_get_rmon_stats(struct net_device *dev,
static const struct ethtool_ops axienet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
- ETHTOOL_COALESCE_USECS,
+ ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -2331,11 +2525,12 @@ static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
}
static void axienet_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
- phylink_mii_c22_pcs_get_state(pcs_phy, state);
+ phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state);
}
static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
@@ -2484,6 +2679,7 @@ static void axienet_dma_err_handler(struct work_struct *work)
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_dma_stop(lp);
+ netdev_reset_queue(ndev);
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
@@ -2843,10 +3039,15 @@ static int axienet_probe(struct platform_device *pdev)
axienet_set_mac_address(ndev, NULL);
}
- lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
- lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
- lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
- lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
+ spin_lock_init(&lp->rx_cr_lock);
+ spin_lock_init(&lp->tx_cr_lock);
+ INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work);
+ lp->rx_dim_enabled = true;
+ lp->rx_dim.profile_ix = 1;
+ lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
+ XAXIDMA_DFT_RX_USEC);
+ lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD,
+ XAXIDMA_DFT_TX_USEC);
ret = axienet_mdio_setup(lp);
if (ret)
@@ -2876,12 +3077,12 @@ static int axienet_probe(struct platform_device *pdev)
}
of_node_put(np);
lp->pcs.ops = &axienet_pcs_ops;
- lp->pcs.neg_mode = true;
lp->pcs.poll = true;
}
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
+ lp->phylink_config.mac_managed_pm = true;
lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
MAC_10FD | MAC_100FD | MAC_1000FD;