diff options
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac/stmmac_main.c')
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 159 |
1 files changed, 144 insertions, 15 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index da8306f60730..4e05c1d92935 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -518,14 +518,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv) return true; } -static inline u32 stmmac_cdc_adjust(struct stmmac_priv *priv) -{ - /* Correct the clk domain crossing(CDC) error */ - if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) - return (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate; - return 0; -} - /* stmmac_get_tx_hwtstamp - get HW TX timestamps * @priv: driver private structure * @p : descriptor pointer @@ -557,7 +549,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, } if (found) { - ns -= stmmac_cdc_adjust(priv); + ns -= priv->plat->cdc_error_adj; memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp.hwtstamp = ns_to_ktime(ns); @@ -594,7 +586,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); - ns -= stmmac_cdc_adjust(priv); + ns -= priv->plat->cdc_error_adj; netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); shhwtstamp = skb_hwtstamps(skb); @@ -2390,7 +2382,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) bool work_done = true; /* Avoids TX time-out as we are sharing with slow path */ - nq->trans_start = jiffies; + txq_trans_cond_update(nq); budget = min(budget, stmmac_tx_avail(priv, queue)); @@ -3673,7 +3665,7 @@ static int stmmac_request_irq(struct net_device *dev) * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ -int stmmac_open(struct net_device *dev) +static int stmmac_open(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); int mode = priv->plat->phy_interface; @@ -3797,7 +3789,7 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) * Description: * This is the stop entry point of the driver. */ -int stmmac_release(struct net_device *dev) +static int stmmac_release(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); u32 chan; @@ -4689,7 +4681,7 @@ static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, __netif_tx_lock(nq, cpu); /* Avoids TX time-out as we are sharing with slow path */ - nq->trans_start = jiffies; + txq_trans_cond_update(nq); res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); if (res == STMMAC_XDP_TX) @@ -6327,7 +6319,7 @@ static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, __netif_tx_lock(nq, cpu); /* Avoids TX time-out as we are sharing with slow path */ - nq->trans_start = jiffies; + txq_trans_cond_update(nq); for (i = 0; i < num_frames; i++) { int res; @@ -6463,6 +6455,140 @@ void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) spin_unlock_irqrestore(&ch->lock, flags); } +void stmmac_xdp_release(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 chan; + + /* Disable NAPI process */ + stmmac_disable_all_queues(priv); + + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + hrtimer_cancel(&priv->tx_queue[chan].txtimer); + + /* Free the IRQ lines */ + stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); + + /* Stop TX/RX DMA channels */ + stmmac_stop_all_dma(priv); + + /* Release and free the Rx/Tx resources */ + free_dma_desc_resources(priv); + + /* Disable the MAC Rx/Tx */ + stmmac_mac_set(priv, priv->ioaddr, false); + + /* set trans_start so we don't get spurious + * watchdogs during reset + */ + netif_trans_update(dev); + netif_carrier_off(dev); +} + +int stmmac_xdp_open(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_cnt = priv->plat->rx_queues_to_use; + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 dma_csr_ch = max(rx_cnt, tx_cnt); + struct stmmac_rx_queue *rx_q; + struct stmmac_tx_queue *tx_q; + u32 buf_size; + bool sph_en; + u32 chan; + int ret; + + ret = alloc_dma_desc_resources(priv); + if (ret < 0) { + netdev_err(dev, "%s: DMA descriptors allocation failed\n", + __func__); + goto dma_desc_error; + } + + ret = init_dma_desc_rings(dev, GFP_KERNEL); + if (ret < 0) { + netdev_err(dev, "%s: DMA descriptors initialization failed\n", + __func__); + goto init_error; + } + + /* DMA CSR Channel configuration */ + for (chan = 0; chan < dma_csr_ch; chan++) + stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); + + /* Adjust Split header */ + sph_en = (priv->hw->rx_csum > 0) && priv->sph; + + /* DMA RX Channel Configuration */ + for (chan = 0; chan < rx_cnt; chan++) { + rx_q = &priv->rx_queue[chan]; + + stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + rx_q->dma_rx_phy, chan); + + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (rx_q->buf_alloc_num * + sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, chan); + + if (rx_q->xsk_pool && rx_q->buf_alloc_num) { + buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); + stmmac_set_dma_bfsize(priv, priv->ioaddr, + buf_size, + rx_q->queue_index); + } else { + stmmac_set_dma_bfsize(priv, priv->ioaddr, + priv->dma_buf_sz, + rx_q->queue_index); + } + + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + } + + /* DMA TX Channel Configuration */ + for (chan = 0; chan < tx_cnt; chan++) { + tx_q = &priv->tx_queue[chan]; + + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, chan); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy; + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, + tx_q->tx_tail_addr, chan); + + hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + tx_q->txtimer.function = stmmac_tx_timer; + } + + /* Enable the MAC Rx/Tx */ + stmmac_mac_set(priv, priv->ioaddr, true); + + /* Start Rx & Tx DMA Channels */ + stmmac_start_all_dma(priv); + + ret = stmmac_request_irq(dev); + if (ret) + goto irq_error; + + /* Enable NAPI process*/ + stmmac_enable_all_queues(priv); + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + + return 0; + +irq_error: + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + hrtimer_cancel(&priv->tx_queue[chan].txtimer); + + stmmac_hw_teardown(dev); +init_error: + free_dma_desc_resources(priv); +dma_desc_error: + return ret; +} + int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) { struct stmmac_priv *priv = netdev_priv(dev); @@ -7086,6 +7212,9 @@ int stmmac_dvr_probe(struct device *device, stmmac_init_fs(ndev); #endif + if (priv->plat->dump_debug_regs) + priv->plat->dump_debug_regs(priv->plat->bsp_priv); + /* Let pm_runtime_put() disable the clocks. * If CONFIG_PM is not enabled, the clocks will stay powered. */ |