0067-net-mediatek-fix-TX-locking.patch 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. From 6f152b2bdb295d86beb746494ef6fddf17986f8e Mon Sep 17 00:00:00 2001
  2. From: John Crispin <blogic@openwrt.org>
  3. Date: Tue, 29 Mar 2016 17:20:01 +0200
  4. Subject: [PATCH 067/102] net: mediatek: fix TX locking
  5. Inside the TX path there is a lock inside the tx_map function. This is
  6. however too late. The patch moves the lock to the start of the xmit
  7. function right before the free count check of the DMA ring happens.
  8. If we do not do this, the code becomes racy leading to TX stalls and
  9. dropped packets. This happens as there are 2 netdevs running on the
  10. same physical DMA ring.
  11. Signed-off-by: John Crispin <blogic@openwrt.org>
  12. ---
  13. drivers/net/ethernet/mediatek/mtk_eth_soc.c | 20 ++++++++++----------
  14. 1 file changed, 10 insertions(+), 10 deletions(-)
  15. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  16. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  17. @@ -536,7 +536,6 @@ static int mtk_tx_map(struct sk_buff *sk
  18. struct mtk_eth *eth = mac->hw;
  19. struct mtk_tx_dma *itxd, *txd;
  20. struct mtk_tx_buf *tx_buf;
  21. - unsigned long flags;
  22. dma_addr_t mapped_addr;
  23. unsigned int nr_frags;
  24. int i, n_desc = 1;
  25. @@ -568,11 +567,6 @@ static int mtk_tx_map(struct sk_buff *sk
  26. if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
  27. return -ENOMEM;
  28. - /* normally we can rely on the stack not calling this more than once,
  29. - * however we have 2 queues running ont he same ring so we need to lock
  30. - * the ring access
  31. - */
  32. - spin_lock_irqsave(&eth->page_lock, flags);
  33. WRITE_ONCE(itxd->txd1, mapped_addr);
  34. tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
  35. dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
  36. @@ -632,8 +626,6 @@ static int mtk_tx_map(struct sk_buff *sk
  37. WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
  38. (!nr_frags * TX_DMA_LS0)));
  39. - spin_unlock_irqrestore(&eth->page_lock, flags);
  40. -
  41. netdev_sent_queue(dev, skb->len);
  42. skb_tx_timestamp(skb);
  43. @@ -661,8 +653,6 @@ err_dma:
  44. itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
  45. } while (itxd != txd);
  46. - spin_unlock_irqrestore(&eth->page_lock, flags);
  47. -
  48. return -ENOMEM;
  49. }
  50. @@ -712,14 +702,22 @@ static int mtk_start_xmit(struct sk_buff
  51. struct mtk_eth *eth = mac->hw;
  52. struct mtk_tx_ring *ring = &eth->tx_ring;
  53. struct net_device_stats *stats = &dev->stats;
  54. + unsigned long flags;
  55. bool gso = false;
  56. int tx_num;
  57. + /* normally we can rely on the stack not calling this more than once,
  58. + * however we have 2 queues running ont he same ring so we need to lock
  59. + * the ring access
  60. + */
  61. + spin_lock_irqsave(&eth->page_lock, flags);
  62. +
  63. tx_num = mtk_cal_txd_req(skb);
  64. if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
  65. mtk_stop_queue(eth);
  66. netif_err(eth, tx_queued, dev,
  67. "Tx Ring full when queue awake!\n");
  68. + spin_unlock_irqrestore(&eth->page_lock, flags);
  69. return NETDEV_TX_BUSY;
  70. }
  71. @@ -747,10 +745,12 @@ static int mtk_start_xmit(struct sk_buff
  72. ring->thresh))
  73. mtk_wake_queue(eth);
  74. }
  75. + spin_unlock_irqrestore(&eth->page_lock, flags);
  76. return NETDEV_TX_OK;
  77. drop:
  78. + spin_unlock_irqrestore(&eth->page_lock, flags);
  79. stats->tx_dropped++;
  80. dev_kfree_skb(skb);
  81. return NETDEV_TX_OK;