033-net-mvneta-Associate-RX-queues-with-each-CPU.patch 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. From: Gregory CLEMENT <gregory.clement@free-electrons.com>
  2. Date: Wed, 9 Dec 2015 18:23:49 +0100
  3. Subject: [PATCH] net: mvneta: Associate RX queues with each CPU
  4. We enable the percpu interrupt for all the CPU and we just associate a
  5. CPU to a few queue at the neta level. The mapping between the CPUs and
  6. the queues is static. The queues are associated to the CPU module the
  7. number of CPUs. However currently we only use on RX queue for a given
  8. Ethernet port.
  9. Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
  10. Signed-off-by: David S. Miller <davem@davemloft.net>
  11. ---
  12. --- a/drivers/net/ethernet/marvell/mvneta.c
  13. +++ b/drivers/net/ethernet/marvell/mvneta.c
  14. @@ -110,9 +110,16 @@
  15. #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
  16. #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
  17. #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
  18. +#define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
  19. #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
  20. -/* Exception Interrupt Port/Queue Cause register */
  21. +/* Exception Interrupt Port/Queue Cause register
  22. + *
  23. + * Their behavior depend of the mapping done using the PCPX2Q
  24. + * registers. For a given CPU if the bit associated to a queue is not
  25. + * set, then for the register a read from this CPU will always return
  26. + * 0 and a write won't do anything
  27. + */
  28. #define MVNETA_INTR_NEW_CAUSE 0x25a0
  29. #define MVNETA_INTR_NEW_MASK 0x25a4
  30. @@ -820,7 +827,13 @@ static void mvneta_port_up(struct mvneta
  31. mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
  32. /* Enable all initialized RXQs. */
  33. - mvreg_write(pp, MVNETA_RXQ_CMD, BIT(pp->rxq_def));
  34. + for (queue = 0; queue < rxq_number; queue++) {
  35. + struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
  36. +
  37. + if (rxq->descs != NULL)
  38. + q_map |= (1 << queue);
  39. + }
  40. + mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
  41. }
  42. /* Stop the Ethernet port activity */
  43. @@ -1026,6 +1039,7 @@ static void mvneta_defaults_set(struct m
  44. int cpu;
  45. int queue;
  46. u32 val;
  47. + int max_cpu = num_present_cpus();
  48. /* Clear all Cause registers */
  49. mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
  50. @@ -1041,13 +1055,23 @@ static void mvneta_defaults_set(struct m
  51. /* Enable MBUS Retry bit16 */
  52. mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
  53. - /* Set CPU queue access map - all CPUs have access to all RX
  54. - * queues and to all TX queues
  55. + /* Set CPU queue access map. CPUs are assigned to the RX
  56. + * queues modulo their number and all the TX queues are
  57. + * assigned to the CPU associated to the default RX queue.
  58. */
  59. - for_each_present_cpu(cpu)
  60. - mvreg_write(pp, MVNETA_CPU_MAP(cpu),
  61. - (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
  62. - MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
  63. + for_each_present_cpu(cpu) {
  64. + int rxq_map = 0, txq_map = 0;
  65. + int rxq;
  66. +
  67. + for (rxq = 0; rxq < rxq_number; rxq++)
  68. + if ((rxq % max_cpu) == cpu)
  69. + rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
  70. +
  71. + if (cpu == rxq_def)
  72. + txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
  73. +
  74. + mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
  75. + }
  76. /* Reset RX and TX DMAs */
  77. mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
  78. @@ -2174,6 +2198,7 @@ static int mvneta_poll(struct napi_struc
  79. {
  80. int rx_done = 0;
  81. u32 cause_rx_tx;
  82. + int rx_queue;
  83. struct mvneta_port *pp = netdev_priv(napi->dev);
  84. struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
  85. @@ -2205,8 +2230,15 @@ static int mvneta_poll(struct napi_struc
  86. /* For the case where the last mvneta_poll did not process all
  87. * RX packets
  88. */
  89. + rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
  90. +
  91. cause_rx_tx |= port->cause_rx_tx;
  92. - rx_done = mvneta_rx(pp, budget, &pp->rxqs[pp->rxq_def]);
  93. +
  94. + if (rx_queue) {
  95. + rx_queue = rx_queue - 1;
  96. + rx_done = mvneta_rx(pp, budget, &pp->rxqs[rx_queue]);
  97. + }
  98. +
  99. budget -= rx_done;
  100. if (budget > 0) {
  101. @@ -2419,19 +2451,27 @@ static void mvneta_cleanup_txqs(struct m
  102. /* Cleanup all Rx queues */
  103. static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
  104. {
  105. - mvneta_rxq_deinit(pp, &pp->rxqs[pp->rxq_def]);
  106. + int queue;
  107. +
  108. + for (queue = 0; queue < txq_number; queue++)
  109. + mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
  110. }
  111. /* Init all Rx queues */
  112. static int mvneta_setup_rxqs(struct mvneta_port *pp)
  113. {
  114. - int err = mvneta_rxq_init(pp, &pp->rxqs[pp->rxq_def]);
  115. - if (err) {
  116. - netdev_err(pp->dev, "%s: can't create rxq=%d\n",
  117. - __func__, pp->rxq_def);
  118. - mvneta_cleanup_rxqs(pp);
  119. - return err;
  120. + int queue;
  121. +
  122. + for (queue = 0; queue < rxq_number; queue++) {
  123. + int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
  124. +
  125. + if (err) {
  126. + netdev_err(pp->dev, "%s: can't create rxq=%d\n",
  127. + __func__, queue);
  128. + mvneta_cleanup_rxqs(pp);
  129. + return err;
  130. + }
  131. }
  132. return 0;
  133. @@ -2455,6 +2495,19 @@ static int mvneta_setup_txqs(struct mvne
  134. return 0;
  135. }
  136. +static void mvneta_percpu_unmask_interrupt(void *arg)
  137. +{
  138. + struct mvneta_port *pp = arg;
  139. +
  140. + /* All the queue are unmasked, but actually only the ones
  141. + * maped to this CPU will be unmasked
  142. + */
  143. + mvreg_write(pp, MVNETA_INTR_NEW_MASK,
  144. + MVNETA_RX_INTR_MASK_ALL |
  145. + MVNETA_TX_INTR_MASK_ALL |
  146. + MVNETA_MISCINTR_INTR_MASK);
  147. +}
  148. +
  149. static void mvneta_start_dev(struct mvneta_port *pp)
  150. {
  151. unsigned int cpu;
  152. @@ -2472,11 +2525,10 @@ static void mvneta_start_dev(struct mvne
  153. napi_enable(&port->napi);
  154. }
  155. - /* Unmask interrupts */
  156. - mvreg_write(pp, MVNETA_INTR_NEW_MASK,
  157. - MVNETA_RX_INTR_MASK(rxq_number) |
  158. - MVNETA_TX_INTR_MASK(txq_number) |
  159. - MVNETA_MISCINTR_INTR_MASK);
  160. + /* Unmask interrupts. It has to be done from each CPU */
  161. + for_each_online_cpu(cpu)
  162. + smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
  163. + pp, true);
  164. mvreg_write(pp, MVNETA_INTR_MISC_MASK,
  165. MVNETA_CAUSE_PHY_STATUS_CHANGE |
  166. MVNETA_CAUSE_LINK_CHANGE |
  167. @@ -2752,22 +2804,35 @@ static void mvneta_percpu_disable(void *
  168. static void mvneta_percpu_elect(struct mvneta_port *pp)
  169. {
  170. - int online_cpu_idx, cpu, i = 0;
  171. + int online_cpu_idx, max_cpu, cpu, i = 0;
  172. online_cpu_idx = pp->rxq_def % num_online_cpus();
  173. + max_cpu = num_present_cpus();
  174. for_each_online_cpu(cpu) {
  175. - if (i == online_cpu_idx)
  176. - /* Enable per-CPU interrupt on the one CPU we
  177. - * just elected
  178. + int rxq_map = 0, txq_map = 0;
  179. + int rxq;
  180. +
  181. + for (rxq = 0; rxq < rxq_number; rxq++)
  182. + if ((rxq % max_cpu) == cpu)
  183. + rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
  184. +
  185. + if (i == online_cpu_idx) {
  186. + /* Map the default receive queue and transmit
  187. + * queue to the elected CPU
  188. */
  189. - smp_call_function_single(cpu, mvneta_percpu_enable,
  190. - pp, true);
  191. - else
  192. - /* Disable per-CPU interrupt on all the other CPU */
  193. - smp_call_function_single(cpu, mvneta_percpu_disable,
  194. - pp, true);
  195. + rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
  196. + txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
  197. + }
  198. + mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
  199. +
  200. + /* Update the interrupt mask on each CPU according the
  201. + * new mapping
  202. + */
  203. + smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
  204. + pp, true);
  205. i++;
  206. +
  207. }
  208. };
  209. @@ -2802,12 +2867,22 @@ static int mvneta_percpu_notifier(struct
  210. mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
  211. napi_enable(&port->napi);
  212. +
  213. + /* Enable per-CPU interrupts on the CPU that is
  214. + * brought up.
  215. + */
  216. + smp_call_function_single(cpu, mvneta_percpu_enable,
  217. + pp, true);
  218. +
  219. /* Enable per-CPU interrupt on the one CPU we care
  220. * about.
  221. */
  222. mvneta_percpu_elect(pp);
  223. - /* Unmask all ethernet port interrupts */
  224. + /* Unmask all ethernet port interrupts, as this
  225. + * notifier is called for each CPU then the CPU to
  226. + * Queue mapping is applied
  227. + */
  228. mvreg_write(pp, MVNETA_INTR_NEW_MASK,
  229. MVNETA_RX_INTR_MASK(rxq_number) |
  230. MVNETA_TX_INTR_MASK(txq_number) |
  231. @@ -2858,7 +2933,7 @@ static int mvneta_percpu_notifier(struct
  232. static int mvneta_open(struct net_device *dev)
  233. {
  234. struct mvneta_port *pp = netdev_priv(dev);
  235. - int ret;
  236. + int ret, cpu;
  237. pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
  238. pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
  239. @@ -2888,8 +2963,13 @@ static int mvneta_open(struct net_device
  240. */
  241. mvneta_percpu_disable(pp);
  242. - /* Elect a CPU to handle our RX queue interrupt */
  243. - mvneta_percpu_elect(pp);
  244. + /* Enable per-CPU interrupt on all the CPU to handle our RX
  245. + * queue interrupts
  246. + */
  247. + for_each_online_cpu(cpu)
  248. + smp_call_function_single(cpu, mvneta_percpu_enable,
  249. + pp, true);
  250. +
  251. /* Register a CPU notifier to handle the case where our CPU
  252. * might be taken offline.