0170-MIPS-lantiq-lock-DMA-register-accesses-for-SMP.patch 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. From 58078a30038b578c26c532545448fe3746648390 Mon Sep 17 00:00:00 2001
  2. From: Hauke Mehrtens <hauke@hauke-m.de>
  3. Date: Thu, 29 Dec 2016 21:02:57 +0100
  4. Subject: [PATCH] MIPS: lantiq: lock DMA register accesses for SMP
  5. The DMA controller channel and port configuration is changed by
  6. selecting the port or channel in one register and then update the
  7. configuration in other registers. This has to be done in an atomic
  8. operation. Previously only the local interrupts were deactivated which
  9. works for single CPU systems. If the system supports SMP a better
  10. locking is needed, use spinlocks instead.
  11. On more recent SoCs (at least xrx200 and later) there are two memory
  12. regions to change the configuration, there we could use one area for
  13. each CPU and do not have to synchronize between the CPUs and more.
  14. Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
  15. ---
  16. arch/mips/lantiq/xway/dma.c | 38 ++++++++++++++++++++------------------
  17. 1 file changed, 20 insertions(+), 18 deletions(-)
  18. --- a/arch/mips/lantiq/xway/dma.c
  19. +++ b/arch/mips/lantiq/xway/dma.c
  20. @@ -20,6 +20,7 @@
  21. #include <linux/io.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/module.h>
  24. +#include <linux/spinlock.h>
  25. #include <linux/clk.h>
  26. #include <linux/err.h>
  27. @@ -59,16 +60,17 @@
  28. ltq_dma_membase + (z))
  29. static void __iomem *ltq_dma_membase;
  30. +static DEFINE_SPINLOCK(ltq_dma_lock);
  31. void
  32. ltq_dma_enable_irq(struct ltq_dma_channel *ch)
  33. {
  34. unsigned long flags;
  35. - local_irq_save(flags);
  36. + spin_lock_irqsave(&ltq_dma_lock, flags);
  37. ltq_dma_w32(ch->nr, LTQ_DMA_CS);
  38. ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
  39. - local_irq_restore(flags);
  40. + spin_unlock_irqrestore(&ltq_dma_lock, flags);
  41. }
  42. EXPORT_SYMBOL_GPL(ltq_dma_enable_irq);
  43. @@ -77,10 +79,10 @@ ltq_dma_disable_irq(struct ltq_dma_chann
  44. {
  45. unsigned long flags;
  46. - local_irq_save(flags);
  47. + spin_lock_irqsave(&ltq_dma_lock, flags);
  48. ltq_dma_w32(ch->nr, LTQ_DMA_CS);
  49. ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
  50. - local_irq_restore(flags);
  51. + spin_unlock_irqrestore(&ltq_dma_lock, flags);
  52. }
  53. EXPORT_SYMBOL_GPL(ltq_dma_disable_irq);
  54. @@ -89,10 +91,10 @@ ltq_dma_ack_irq(struct ltq_dma_channel *
  55. {
  56. unsigned long flags;
  57. - local_irq_save(flags);
  58. + spin_lock_irqsave(&ltq_dma_lock, flags);
  59. ltq_dma_w32(ch->nr, LTQ_DMA_CS);
  60. ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS);
  61. - local_irq_restore(flags);
  62. + spin_unlock_irqrestore(&ltq_dma_lock, flags);
  63. }
  64. EXPORT_SYMBOL_GPL(ltq_dma_ack_irq);
  65. @@ -101,11 +103,11 @@ ltq_dma_open(struct ltq_dma_channel *ch)
  66. {
  67. unsigned long flag;
  68. - local_irq_save(flag);
  69. + spin_lock_irqsave(&ltq_dma_lock, flag);
  70. ltq_dma_w32(ch->nr, LTQ_DMA_CS);
  71. ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL);
  72. - ltq_dma_enable_irq(ch);
  73. - local_irq_restore(flag);
  74. + ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
  75. + spin_unlock_irqrestore(&ltq_dma_lock, flag);
  76. }
  77. EXPORT_SYMBOL_GPL(ltq_dma_open);
  78. @@ -114,11 +116,11 @@ ltq_dma_close(struct ltq_dma_channel *ch
  79. {
  80. unsigned long flag;
  81. - local_irq_save(flag);
  82. + spin_lock_irqsave(&ltq_dma_lock, flag);
  83. ltq_dma_w32(ch->nr, LTQ_DMA_CS);
  84. ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
  85. - ltq_dma_disable_irq(ch);
  86. - local_irq_restore(flag);
  87. + ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
  88. + spin_unlock_irqrestore(&ltq_dma_lock, flag);
  89. }
  90. EXPORT_SYMBOL_GPL(ltq_dma_close);
  91. @@ -133,7 +135,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch
  92. &ch->phys, GFP_ATOMIC);
  93. memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE);
  94. - local_irq_save(flags);
  95. + spin_lock_irqsave(&ltq_dma_lock, flags);
  96. ltq_dma_w32(ch->nr, LTQ_DMA_CS);
  97. ltq_dma_w32(ch->phys, LTQ_DMA_CDBA);
  98. ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN);
  99. @@ -142,7 +144,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch
  100. ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL);
  101. while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST)
  102. ;
  103. - local_irq_restore(flags);
  104. + spin_unlock_irqrestore(&ltq_dma_lock, flags);
  105. }
  106. void
  107. @@ -152,11 +154,11 @@ ltq_dma_alloc_tx(struct ltq_dma_channel
  108. ltq_dma_alloc(ch);
  109. - local_irq_save(flags);
  110. + spin_lock_irqsave(&ltq_dma_lock, flags);
  111. ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
  112. ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
  113. ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL);
  114. - local_irq_restore(flags);
  115. + spin_unlock_irqrestore(&ltq_dma_lock, flags);
  116. }
  117. EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx);
  118. @@ -167,11 +169,11 @@ ltq_dma_alloc_rx(struct ltq_dma_channel
  119. ltq_dma_alloc(ch);
  120. - local_irq_save(flags);
  121. + spin_lock_irqsave(&ltq_dma_lock, flags);
  122. ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
  123. ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
  124. ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL);
  125. - local_irq_restore(flags);
  126. + spin_unlock_irqrestore(&ltq_dma_lock, flags);
  127. }
  128. EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx);