From 30b6c28d2aca4669f2e609ad5d77ea2a6cf0dd3a Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 26 May 2006 17:44:45 -0700 Subject: [TG3]: Add 5786 PCI ID Add PCI ID for BCM5786 which is a variant of 5787. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/tg3.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/tg3.c') diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 862c226dbbe2..cb0ebf83c843 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -229,6 +229,8 @@ static struct pci_device_id tg3_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M, -- cgit v1.2.3 From df3e6548186f0baa727cd6d3a492891854bd31f2 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 26 May 2006 17:48:07 -0700 Subject: [TG3]: Add recovery logic when MMIOs are re-ordered Add recovery logic when we suspect that the system is re-ordering MMIOs. Re-ordered MMIOs to the send mailbox can cause bogus tx completions and hit BUG_ON() in the tx completion path. tg3 already has logic to handle re-ordered MMIOs by flushing the MMIOs that must be strictly ordered (such as the send mailbox). Determining when to enable the flush is currently a manual process of adding known chipsets to a list. The new code replaces the BUG_ON() in the tx completion path with the call to tg3_tx_recover(). It will set the TG3_FLAG_MBOX_WRITE_REORDER flag and reset the chip later in the workqueue to recover and start flushing MMIOs to the mailbox. A message to report the problem will be printed. We will then decide whether or not to add the host bridge to the list of chipsets that do re-ordering. We may add some additional code later to print the host bridge's ID so that the user can report it more easily. The assumption that re-ordering can only happen on x86 systems is also removed. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/tg3.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 48 insertions(+), 5 deletions(-) (limited to 'drivers/net/tg3.c') diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index cb0ebf83c843..9e61df607413 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -2967,6 +2967,29 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset) return err; } +/* This is called whenever we suspect that the system chipset is re- + * ordering the sequence of MMIO to the tx send mailbox. The symptom + * is bogus tx completions. We try to recover by setting the + * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later + * in the workqueue. + */ +static void tg3_tx_recover(struct tg3 *tp) +{ + BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) || + tp->write32_tx_mbox == tg3_write_indirect_mbox); + + printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-" + "mapped I/O cycles to the network device, attempting to " + "recover. Please report the problem to the driver maintainer " + "and include system chipset information.\n", tp->dev->name); + + spin_lock(&tp->lock); + spin_lock(&tp->tx_lock); + tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; + spin_unlock(&tp->tx_lock); + spin_unlock(&tp->lock); +} + /* Tigon3 never reports partial packet sends. So we do not * need special logic to handle SKBs that have not had all * of their frags sent yet, like SunGEM does. @@ -2979,9 +3002,13 @@ static void tg3_tx(struct tg3 *tp) while (sw_idx != hw_idx) { struct tx_ring_info *ri = &tp->tx_buffers[sw_idx]; struct sk_buff *skb = ri->skb; - int i; + int i, tx_bug = 0; + + if (unlikely(skb == NULL)) { + tg3_tx_recover(tp); + return; + } - BUG_ON(skb == NULL); pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping), skb_headlen(skb), @@ -2992,10 +3019,9 @@ static void tg3_tx(struct tg3 *tp) sw_idx = NEXT_TX(sw_idx); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - BUG_ON(sw_idx == hw_idx); - ri = &tp->tx_buffers[sw_idx]; - BUG_ON(ri->skb != NULL); + if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) + tx_bug = 1; pci_unmap_page(tp->pdev, pci_unmap_addr(ri, mapping), @@ -3006,6 +3032,11 @@ static void tg3_tx(struct tg3 *tp) } dev_kfree_skb(skb); + + if (unlikely(tx_bug)) { + tg3_tx_recover(tp); + return; + } } tp->tx_cons = sw_idx; @@ -3333,6 +3364,11 @@ static int tg3_poll(struct net_device *netdev, int *budget) /* run TX completion thread */ if (sblk->idx[0].tx_consumer != tp->tx_cons) { tg3_tx(tp); + if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) { + netif_rx_complete(netdev); + schedule_work(&tp->reset_task); + return 0; + } } /* run RX thread, within the bounds set by NAPI. @@ -3581,6 +3617,13 @@ static void tg3_reset_task(void *_data) restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; + if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) { + tp->write32_tx_mbox = tg3_write32_tx_mbox; + tp->write32_rx_mbox = tg3_write_flush_reg32; + tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; + tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING; + } + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); tg3_init_hw(tp, 1); -- cgit v1.2.3 From 9cb3528cdbffc513eb9fb8faa45d41e397355830 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sat, 17 Jun 2006 21:28:28 -0700 Subject: [TG3]: update version and reldate Update version to 3.60. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/tg3.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/tg3.c') diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 9e61df607413..3c9b13b3cf9b 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -69,8 +69,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.59" -#define DRV_MODULE_RELDATE "June 8, 2006" +#define DRV_MODULE_VERSION "3.60" +#define DRV_MODULE_RELDATE "June 17, 2006" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 -- cgit v1.2.3 From c71302d61f844f766a44e1b04258086cc41f624e Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sat, 17 Jun 2006 21:55:55 -0700 Subject: [TG3]: Remove unnecessary tx_lock Remove tx_lock where it is unnecessary. tg3 runs lockless and so it requires interrupts to be disabled and sync'ed, netif_queue and NAPI poll to be stopped before the device can be reconfigured. After stopping everything, it is no longer necessary to get the tx_lock. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/tg3.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/net/tg3.c') diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 3c9b13b3cf9b..542d4c3a10e5 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -2984,9 +2984,7 @@ static void tg3_tx_recover(struct tg3 *tp) "and include system chipset information.\n", tp->dev->name); spin_lock(&tp->lock); - spin_lock(&tp->tx_lock); tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; - spin_unlock(&tp->tx_lock); spin_unlock(&tp->lock); } @@ -3429,12 +3427,10 @@ static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) if (irq_sync) tg3_irq_quiesce(tp); spin_lock_bh(&tp->lock); - spin_lock(&tp->tx_lock); } static inline void tg3_full_unlock(struct tg3 *tp) { - spin_unlock(&tp->tx_lock); spin_unlock_bh(&tp->lock); } -- cgit v1.2.3 From 00b7050426da8e7e58c889c5c80a19920d2d41b3 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sat, 17 Jun 2006 21:58:45 -0700 Subject: [TG3]: Convert to non-LLTX Herbert Xu pointed out that it is unsafe to call netif_tx_disable() from LLTX drivers because it uses dev->xmit_lock to synchronize whereas LLTX drivers use private locks. Convert tg3 to non-LLTX to fix this issue. tg3 is a lockless driver where hard_start_xmit and tx completion handling can run concurrently under normal conditions. A tx_lock is only needed to prevent netif_stop_queue and netif_wake_queue race condtions when the queue is full. So whether we use LLTX or non-LLTX, it makes practically no difference. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/tg3.c | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) (limited to 'drivers/net/tg3.c') diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 542d4c3a10e5..b2ddd4522a87 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -3759,14 +3759,11 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) len = skb_headlen(skb); - /* No BH disabling for tx_lock here. We are running in BH disabled - * context and TX reclaim runs via tp->poll inside of a software + /* We are running in BH disabled context with netif_tx_lock + * and TX reclaim runs via tp->poll inside of a software * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (!spin_trylock(&tp->tx_lock)) - return NETDEV_TX_LOCKED; - if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); @@ -3775,7 +3772,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " "queue awake!\n", dev->name); } - spin_unlock(&tp->tx_lock); return NETDEV_TX_BUSY; } @@ -3858,15 +3854,16 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tp->tx_prod = entry; - if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) { + if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { + spin_lock(&tp->tx_lock); netif_stop_queue(dev); if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) netif_wake_queue(tp->dev); + spin_unlock(&tp->tx_lock); } out_unlock: mmiowb(); - spin_unlock(&tp->tx_lock); dev->trans_start = jiffies; @@ -3885,14 +3882,11 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) len = skb_headlen(skb); - /* No BH disabling for tx_lock here. We are running in BH disabled - * context and TX reclaim runs via tp->poll inside of a software + /* We are running in BH disabled context with netif_tx_lock + * and TX reclaim runs via tp->poll inside of a software * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ - if (!spin_trylock(&tp->tx_lock)) - return NETDEV_TX_LOCKED; - if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); @@ -3901,7 +3895,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " "queue awake!\n", dev->name); } - spin_unlock(&tp->tx_lock); return NETDEV_TX_BUSY; } @@ -4039,15 +4032,16 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); tp->tx_prod = entry; - if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) { + if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { + spin_lock(&tp->tx_lock); netif_stop_queue(dev); if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) netif_wake_queue(tp->dev); + spin_unlock(&tp->tx_lock); } out_unlock: mmiowb(); - spin_unlock(&tp->tx_lock); dev->trans_start = jiffies; @@ -11284,7 +11278,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); - dev->features |= NETIF_F_LLTX; #if TG3_VLAN_TAG_USED dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->vlan_rx_register = tg3_vlan_rx_register; -- cgit v1.2.3