0096-net-next-mediatek-add-support-for-IRQ-grouping.patch 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. From 190df1a9dbf4d8809b7f991194ce60e47f2290a2 Mon Sep 17 00:00:00 2001
  2. From: John Crispin <john@phrozen.org>
  3. Date: Wed, 23 Mar 2016 18:31:48 +0100
  4. Subject: [PATCH 096/102] net-next: mediatek: add support for IRQ grouping
  5. The ethernet core has 3 IRQs. using the IRQ grouping registers we are able
  6. to separate TX and RX IRQs, which allows us to service them on separate
  7. cores. This patch splits the irq handler into 2 separate functions, one for
  8. TX and another for RX. The TX housekeeping is split out into its own NAPI
  9. handler.
  10. Signed-off-by: John Crispin <john@phrozen.org>
  11. ---
  12. drivers/net/ethernet/mediatek/mtk_eth_soc.c | 156 +++++++++++++++++----------
  13. drivers/net/ethernet/mediatek/mtk_eth_soc.h | 15 ++-
  14. 2 files changed, 111 insertions(+), 60 deletions(-)
  15. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  16. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  17. @@ -905,14 +905,13 @@ release_desc:
  18. return done;
  19. }
  20. -static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
  21. +static int mtk_poll_tx(struct mtk_eth *eth, int budget)
  22. {
  23. struct mtk_tx_ring *ring = &eth->tx_ring;
  24. struct mtk_tx_dma *desc;
  25. struct sk_buff *skb;
  26. struct mtk_tx_buf *tx_buf;
  27. - int total = 0, done = 0;
  28. - unsigned int bytes = 0;
  29. + unsigned int bytes = 0, done = 0;
  30. u32 cpu, dma;
  31. static int condition;
  32. int i;
  33. @@ -964,63 +963,82 @@ static int mtk_poll_tx(struct mtk_eth *e
  34. netdev_completed_queue(eth->netdev[i], done, bytes);
  35. }
  36. - /* read hw index again make sure no new tx packet */
  37. - if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
  38. - *tx_again = true;
  39. - else
  40. - mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
  41. -
  42. - if (!total)
  43. - return 0;
  44. -
  45. if (mtk_queue_stopped(eth) &&
  46. (atomic_read(&ring->free_count) > ring->thresh))
  47. mtk_wake_queue(eth);
  48. - return total;
  49. + return done;
  50. }
  51. -static int mtk_poll(struct napi_struct *napi, int budget)
  52. +static void mtk_handle_status_irq(struct mtk_eth *eth)
  53. {
  54. - struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
  55. - u32 status, status2, mask;
  56. - int tx_done, rx_done;
  57. - bool tx_again = false;
  58. -
  59. - status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  60. - status2 = mtk_r32(eth, MTK_INT_STATUS2);
  61. - tx_done = 0;
  62. - rx_done = 0;
  63. - tx_again = 0;
  64. -
  65. - if (status & MTK_TX_DONE_INT)
  66. - tx_done = mtk_poll_tx(eth, budget, &tx_again);
  67. -
  68. - if (status & MTK_RX_DONE_INT)
  69. - rx_done = mtk_poll_rx(napi, budget, eth);
  70. + u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
  71. if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
  72. mtk_stats_update(eth);
  73. mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
  74. MTK_INT_STATUS2);
  75. }
  76. +}
  77. +
  78. +static int mtk_napi_tx(struct napi_struct *napi, int budget)
  79. +{
  80. + struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
  81. + u32 status, mask;
  82. + int tx_done = 0;
  83. +
  84. + mtk_handle_status_irq(eth);
  85. + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
  86. + tx_done = mtk_poll_tx(eth, budget);
  87. if (unlikely(netif_msg_intr(eth))) {
  88. + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  89. mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
  90. - netdev_info(eth->netdev[0],
  91. - "done tx %d, rx %d, intr 0x%08x/0x%x\n",
  92. - tx_done, rx_done, status, mask);
  93. + dev_info(eth->dev,
  94. + "done tx %d, intr 0x%08x/0x%x\n",
  95. + tx_done, status, mask);
  96. }
  97. - if (tx_again || rx_done == budget)
  98. + if (tx_done == budget)
  99. return budget;
  100. status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  101. - if (status & (tx_intr | rx_intr))
  102. + if (status & MTK_TX_DONE_INT)
  103. return budget;
  104. napi_complete(napi);
  105. - mtk_irq_enable(eth, MTK_RX_DONE_INT | MTK_RX_DONE_INT);
  106. + mtk_irq_enable(eth, MTK_TX_DONE_INT);
  107. +
  108. + return tx_done;
  109. +}
  110. +
  111. +static int mtk_napi_rx(struct napi_struct *napi, int budget)
  112. +{
  113. + struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
  114. + u32 status, mask;
  115. + int rx_done = 0;
  116. +
  117. + mtk_handle_status_irq(eth);
  118. + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
  119. + rx_done = mtk_poll_rx(napi, budget, eth);
  120. +
  121. + if (unlikely(netif_msg_intr(eth))) {
  122. + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  123. + mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
  124. + dev_info(eth->dev,
  125. + "done rx %d, intr 0x%08x/0x%x\n",
  126. + rx_done, status, mask);
  127. + }
  128. +
  129. + if (rx_done == budget)
  130. + return budget;
  131. +
  132. + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  133. + if (status & MTK_RX_DONE_INT)
  134. + return budget;
  135. +
  136. + napi_complete(napi);
  137. + mtk_irq_enable(eth, MTK_RX_DONE_INT);
  138. return rx_done;
  139. }
  140. @@ -1256,22 +1274,26 @@ static void mtk_tx_timeout(struct net_de
  141. schedule_work(&eth->pending_work);
  142. }
  143. -static irqreturn_t mtk_handle_irq(int irq, void *_eth)
  144. +static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
  145. {
  146. struct mtk_eth *eth = _eth;
  147. - u32 status;
  148. - status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  149. - if (unlikely(!status))
  150. - return IRQ_NONE;
  151. + if (likely(napi_schedule_prep(&eth->rx_napi))) {
  152. + __napi_schedule(&eth->rx_napi);
  153. + mtk_irq_disable(eth, MTK_RX_DONE_INT);
  154. + }
  155. - if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
  156. - if (likely(napi_schedule_prep(&eth->rx_napi)))
  157. - __napi_schedule(&eth->rx_napi);
  158. - } else {
  159. - mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
  160. + return IRQ_HANDLED;
  161. +}
  162. +
  163. +static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
  164. +{
  165. + struct mtk_eth *eth = _eth;
  166. +
  167. + if (likely(napi_schedule_prep(&eth->tx_napi))) {
  168. + __napi_schedule(&eth->tx_napi);
  169. + mtk_irq_disable(eth, MTK_TX_DONE_INT);
  170. }
  171. - mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
  172. return IRQ_HANDLED;
  173. }
  174. @@ -1284,7 +1306,7 @@ static void mtk_poll_controller(struct n
  175. u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
  176. mtk_irq_disable(eth, int_mask);
  177. - mtk_handle_irq(dev->irq, dev);
  178. + mtk_handle_irq(dev->irq[0], dev);
  179. mtk_irq_enable(eth, int_mask);
  180. }
  181. #endif
  182. @@ -1320,6 +1342,7 @@ static int mtk_open(struct net_device *d
  183. if (err)
  184. return err;
  185. + napi_enable(&eth->tx_napi);
  186. napi_enable(&eth->rx_napi);
  187. mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
  188. }
  189. @@ -1368,6 +1391,7 @@ static int mtk_stop(struct net_device *d
  190. return 0;
  191. mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
  192. + napi_disable(&eth->tx_napi);
  193. napi_disable(&eth->rx_napi);
  194. mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
  195. @@ -1405,7 +1429,11 @@ static int __init mtk_hw_init(struct mtk
  196. /* Enable RX VLan Offloading */
  197. mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
  198. - err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
  199. + err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
  200. + dev_name(eth->dev), eth);
  201. + if (err)
  202. + return err;
  203. + err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
  204. dev_name(eth->dev), eth);
  205. if (err)
  206. return err;
  207. @@ -1421,7 +1449,11 @@ static int __init mtk_hw_init(struct mtk
  208. mtk_w32(eth, 0, MTK_RST_GL);
  209. /* FE int grouping */
  210. - mtk_w32(eth, 0, MTK_FE_INT_GRP);
  211. + mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
  212. + mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
  213. + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
  214. + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
  215. + mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
  216. for (i = 0; i < 2; i++) {
  217. u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
  218. @@ -1469,7 +1501,9 @@ static void mtk_uninit(struct net_device
  219. phy_disconnect(mac->phy_dev);
  220. mtk_mdio_cleanup(eth);
  221. mtk_irq_disable(eth, ~0);
  222. - free_irq(dev->irq, dev);
  223. + free_irq(eth->irq[0], dev);
  224. + free_irq(eth->irq[1], dev);
  225. + free_irq(eth->irq[2], dev);
  226. }
  227. static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  228. @@ -1744,10 +1778,10 @@ static int mtk_add_mac(struct mtk_eth *e
  229. dev_err(eth->dev, "error bringing up device\n");
  230. goto free_netdev;
  231. }
  232. - eth->netdev[id]->irq = eth->irq;
  233. + eth->netdev[id]->irq = eth->irq[0];
  234. netif_info(eth, probe, eth->netdev[id],
  235. "mediatek frame engine at 0x%08lx, irq %d\n",
  236. - eth->netdev[id]->base_addr, eth->netdev[id]->irq);
  237. + eth->netdev[id]->base_addr, eth->irq[0]);
  238. return 0;
  239. @@ -1764,6 +1798,7 @@ static int mtk_probe(struct platform_dev
  240. struct mtk_soc_data *soc;
  241. struct mtk_eth *eth;
  242. int err;
  243. + int i;
  244. match = of_match_device(of_mtk_match, &pdev->dev);
  245. soc = (struct mtk_soc_data *)match->data;
  246. @@ -1799,10 +1834,12 @@ static int mtk_probe(struct platform_dev
  247. return PTR_ERR(eth->rstc);
  248. }
  249. - eth->irq = platform_get_irq(pdev, 0);
  250. - if (eth->irq < 0) {
  251. - dev_err(&pdev->dev, "no IRQ resource found\n");
  252. - return -ENXIO;
  253. + for (i = 0; i < 3; i++) {
  254. + eth->irq[i] = platform_get_irq(pdev, i);
  255. + if (eth->irq[i] < 0) {
  256. + dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
  257. + return -ENXIO;
  258. + }
  259. }
  260. eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
  261. @@ -1843,7 +1880,9 @@ static int mtk_probe(struct platform_dev
  262. * for NAPI to work
  263. */
  264. init_dummy_netdev(&eth->dummy_dev);
  265. - netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
  266. + netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
  267. + MTK_NAPI_WEIGHT);
  268. + netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
  269. MTK_NAPI_WEIGHT);
  270. platform_set_drvdata(pdev, eth);
  271. @@ -1864,6 +1903,7 @@ static int mtk_remove(struct platform_de
  272. clk_disable_unprepare(eth->clk_gp1);
  273. clk_disable_unprepare(eth->clk_gp2);
  274. + netif_napi_del(&eth->tx_napi);
  275. netif_napi_del(&eth->rx_napi);
  276. mtk_cleanup(eth);
  277. platform_set_drvdata(pdev, NULL);
  278. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  279. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  280. @@ -68,6 +68,10 @@
  281. /* Unicast Filter MAC Address Register - High */
  282. #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
  283. +/* PDMA Interrupt grouping registers */
  284. +#define MTK_PDMA_INT_GRP1 0xa50
  285. +#define MTK_PDMA_INT_GRP2 0xa54
  286. +
  287. /* QDMA TX Queue Configuration Registers */
  288. #define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
  289. #define QDMA_RES_THRES 4
  290. @@ -125,6 +129,11 @@
  291. #define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
  292. MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
  293. +/* QDMA Interrupt grouping registers */
  294. +#define MTK_QDMA_INT_GRP1 0x1a20
  295. +#define MTK_QDMA_INT_GRP2 0x1a24
  296. +#define MTK_RLS_DONE_INT BIT(0)
  297. +
  298. /* QDMA Interrupt Status Register */
  299. #define MTK_QDMA_INT_MASK 0x1A1C
  300. @@ -356,7 +365,8 @@ struct mtk_rx_ring {
  301. * @dma_refcnt: track how many netdevs are using the DMA engine
  302. * @tx_ring: Pointer to the memore holding info about the TX ring
  303. * @rx_ring: Pointer to the memore holding info about the RX ring
  304. - * @rx_napi: The NAPI struct
  305. + * @tx_napi: The TX NAPI struct
  306. + * @rx_napi: The RX NAPI struct
  307. * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
  308. * @phy_scratch_ring: physical address of scratch_ring
  309. * @scratch_head: The scratch memory that scratch_ring points to.
  310. @@ -377,7 +387,7 @@ struct mtk_eth {
  311. struct net_device dummy_dev;
  312. struct net_device *netdev[MTK_MAX_DEVS];
  313. struct mtk_mac *mac[MTK_MAX_DEVS];
  314. - int irq;
  315. + int irq[3];
  316. u32 msg_enable;
  317. unsigned long sysclk;
  318. struct regmap *ethsys;
  319. @@ -385,6 +395,7 @@ struct mtk_eth {
  320. atomic_t dma_refcnt;
  321. struct mtk_tx_ring tx_ring;
  322. struct mtk_rx_ring rx_ring;
  323. + struct napi_struct tx_napi;
  324. struct napi_struct rx_napi;
  325. struct mtk_tx_dma *scratch_ring;
  326. dma_addr_t phy_scratch_ring;