713-spi-qup-Fix-block-mode-to-work-correctly.patch 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. From 148f77310a9ddf4db5036066458d7aed92cea9ae Mon Sep 17 00:00:00 2001
  2. From: Andy Gross <andy.gross@linaro.org>
  3. Date: Sun, 31 Jan 2016 21:28:13 -0600
  4. Subject: [PATCH] spi: qup: Fix block mode to work correctly
  5. This patch corrects the behavior of the BLOCK
  6. transactions. During block transactions, the controller
  7. must be read/written to in block size transactions.
  8. Signed-off-by: Andy Gross <andy.gross@linaro.org>
  9. Change-Id: I4b4f4d25be57e6e8148f6f0d24bed376eb287ecf
  10. ---
  11. drivers/spi/spi-qup.c | 181 +++++++++++++++++++++++++++++++++++++++-----------
  12. 1 file changed, 141 insertions(+), 40 deletions(-)
  13. --- a/drivers/spi/spi-qup.c
  14. +++ b/drivers/spi/spi-qup.c
  15. @@ -83,6 +83,8 @@
  16. #define QUP_IO_M_MODE_BAM 3
  17. /* QUP_OPERATIONAL fields */
  18. +#define QUP_OP_IN_BLOCK_READ_REQ BIT(13)
  19. +#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12)
  20. #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
  21. #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
  22. #define QUP_OP_IN_SERVICE_FLAG BIT(9)
  23. @@ -156,6 +158,12 @@ struct spi_qup {
  24. struct dma_slave_config tx_conf;
  25. };
  26. +static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
  27. +{
  28. + u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
  29. +
  30. + return opflag & flag;
  31. +}
  32. static inline bool spi_qup_is_dma_xfer(int mode)
  33. {
  34. @@ -217,29 +225,26 @@ static int spi_qup_set_state(struct spi_
  35. return 0;
  36. }
  37. -static void spi_qup_fifo_read(struct spi_qup *controller,
  38. - struct spi_transfer *xfer)
  39. +static void spi_qup_read_from_fifo(struct spi_qup *controller,
  40. + struct spi_transfer *xfer, u32 num_words)
  41. {
  42. u8 *rx_buf = xfer->rx_buf;
  43. - u32 word, state;
  44. - int idx, shift, w_size;
  45. -
  46. - w_size = controller->w_size;
  47. + int i, shift, num_bytes;
  48. + u32 word;
  49. - while (controller->rx_bytes < xfer->len) {
  50. -
  51. - state = readl_relaxed(controller->base + QUP_OPERATIONAL);
  52. - if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
  53. - break;
  54. + for (; num_words; num_words--) {
  55. word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
  56. + num_bytes = min_t(int, xfer->len - controller->rx_bytes,
  57. + controller->w_size);
  58. +
  59. if (!rx_buf) {
  60. - controller->rx_bytes += w_size;
  61. + controller->rx_bytes += num_bytes;
  62. continue;
  63. }
  64. - for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
  65. + for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
  66. /*
  67. * The data format depends on bytes per SPI word:
  68. * 4 bytes: 0x12345678
  69. @@ -247,38 +252,80 @@ static void spi_qup_fifo_read(struct spi
  70. * 1 byte : 0x00000012
  71. */
  72. shift = BITS_PER_BYTE;
  73. - shift *= (w_size - idx - 1);
  74. + shift *= (controller->w_size - i - 1);
  75. rx_buf[controller->rx_bytes] = word >> shift;
  76. }
  77. }
  78. }
  79. -static void spi_qup_fifo_write(struct spi_qup *controller,
  80. +static void spi_qup_read(struct spi_qup *controller,
  81. struct spi_transfer *xfer)
  82. {
  83. - const u8 *tx_buf = xfer->tx_buf;
  84. - u32 word, state, data;
  85. - int idx, w_size;
  86. + u32 remainder, words_per_block, num_words;
  87. + bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
  88. +
  89. + remainder = DIV_ROUND_UP(xfer->len - controller->rx_bytes,
  90. + controller->w_size);
  91. + words_per_block = controller->in_blk_sz >> 2;
  92. +
  93. + do {
  94. + /* ACK by clearing service flag */
  95. + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
  96. + controller->base + QUP_OPERATIONAL);
  97. +
  98. + if (is_block_mode) {
  99. + num_words = (remainder > words_per_block) ?
  100. + words_per_block : remainder;
  101. + } else {
  102. + if (!spi_qup_is_flag_set(controller,
  103. + QUP_OP_IN_FIFO_NOT_EMPTY))
  104. + break;
  105. - w_size = controller->w_size;
  106. + num_words = 1;
  107. + }
  108. +
  109. + /* read up to the maximum transfer size available */
  110. + spi_qup_read_from_fifo(controller, xfer, num_words);
  111. - while (controller->tx_bytes < xfer->len) {
  112. + remainder -= num_words;
  113. - state = readl_relaxed(controller->base + QUP_OPERATIONAL);
  114. - if (state & QUP_OP_OUT_FIFO_FULL)
  115. + /* if block mode, check to see if next block is available */
  116. + if (is_block_mode && !spi_qup_is_flag_set(controller,
  117. + QUP_OP_IN_BLOCK_READ_REQ))
  118. break;
  119. + } while (remainder);
  120. +
  121. + /*
  122. + * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
  123. + * mode reads, it has to be cleared again at the very end
  124. + */
  125. + if (is_block_mode && spi_qup_is_flag_set(controller,
  126. + QUP_OP_MAX_INPUT_DONE_FLAG))
  127. + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
  128. + controller->base + QUP_OPERATIONAL);
  129. +
  130. +}
  131. +
  132. +static void spi_qup_write_to_fifo(struct spi_qup *controller,
  133. + struct spi_transfer *xfer, u32 num_words)
  134. +{
  135. + const u8 *tx_buf = xfer->tx_buf;
  136. + int i, num_bytes;
  137. + u32 word, data;
  138. +
  139. + for (; num_words; num_words--) {
  140. word = 0;
  141. - for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
  142. - if (!tx_buf) {
  143. - controller->tx_bytes += w_size;
  144. - break;
  145. + num_bytes = min_t(int, xfer->len - controller->tx_bytes,
  146. + controller->w_size);
  147. + if (tx_buf)
  148. + for (i = 0; i < num_bytes; i++) {
  149. + data = tx_buf[controller->tx_bytes + i];
  150. + word |= data << (BITS_PER_BYTE * (3 - i));
  151. }
  152. - data = tx_buf[controller->tx_bytes];
  153. - word |= data << (BITS_PER_BYTE * (3 - idx));
  154. - }
  155. + controller->tx_bytes += num_bytes;
  156. writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
  157. }
  158. @@ -291,6 +338,44 @@ static void spi_qup_dma_done(void *data)
  159. complete(done);
  160. }
  161. +static void spi_qup_write(struct spi_qup *controller,
  162. + struct spi_transfer *xfer)
  163. +{
  164. + bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
  165. + u32 remainder, words_per_block, num_words;
  166. +
  167. + remainder = DIV_ROUND_UP(xfer->len - controller->tx_bytes,
  168. + controller->w_size);
  169. + words_per_block = controller->out_blk_sz >> 2;
  170. +
  171. + do {
  172. + /* ACK by clearing service flag */
  173. + writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
  174. + controller->base + QUP_OPERATIONAL);
  175. +
  176. + if (is_block_mode) {
  177. + num_words = (remainder > words_per_block) ?
  178. + words_per_block : remainder;
  179. + } else {
  180. + if (spi_qup_is_flag_set(controller,
  181. + QUP_OP_OUT_FIFO_FULL))
  182. + break;
  183. +
  184. + num_words = 1;
  185. + }
  186. +
  187. + spi_qup_write_to_fifo(controller, xfer, num_words);
  188. +
  189. + remainder -= num_words;
  190. +
  191. + /* if block mode, check to see if next block is available */
  192. + if (is_block_mode && !spi_qup_is_flag_set(controller,
  193. + QUP_OP_OUT_BLOCK_WRITE_REQ))
  194. + break;
  195. +
  196. + } while (remainder);
  197. +}
  198. +
  199. static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
  200. enum dma_transfer_direction dir,
  201. dma_async_tx_callback callback,
  202. @@ -348,11 +433,13 @@ unsigned long timeout)
  203. return ret;
  204. }
  205. - if (xfer->rx_buf)
  206. - rx_done = spi_qup_dma_done;
  207. + if (!qup->qup_v1) {
  208. + if (xfer->rx_buf)
  209. + rx_done = spi_qup_dma_done;
  210. - if (xfer->tx_buf)
  211. - tx_done = spi_qup_dma_done;
  212. + if (xfer->tx_buf)
  213. + tx_done = spi_qup_dma_done;
  214. + }
  215. if (xfer->rx_buf) {
  216. ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done,
  217. @@ -401,7 +488,7 @@ static int spi_qup_do_pio(struct spi_mas
  218. }
  219. if (qup->mode == QUP_IO_M_MODE_FIFO)
  220. - spi_qup_fifo_write(qup, xfer);
  221. + spi_qup_write(qup, xfer);
  222. ret = spi_qup_set_state(qup, QUP_STATE_RUN);
  223. if (ret) {
  224. @@ -434,10 +521,11 @@ static irqreturn_t spi_qup_qup_irq(int i
  225. writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
  226. writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
  227. - writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
  228. if (!xfer) {
  229. - dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
  230. + writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
  231. + dev_err_ratelimited(controller->dev,
  232. + "unexpected irq %08x %08x %08x\n",
  233. qup_err, spi_err, opflags);
  234. return IRQ_HANDLED;
  235. }
  236. @@ -463,12 +551,20 @@ static irqreturn_t spi_qup_qup_irq(int i
  237. error = -EIO;
  238. }
  239. - if (!spi_qup_is_dma_xfer(controller->mode)) {
  240. + if (spi_qup_is_dma_xfer(controller->mode)) {
  241. + writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
  242. + if (opflags & QUP_OP_IN_SERVICE_FLAG &&
  243. + opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
  244. + complete(&controller->done);
  245. + if (opflags & QUP_OP_OUT_SERVICE_FLAG &&
  246. + opflags & QUP_OP_MAX_OUTPUT_DONE_FLAG)
  247. + complete(&controller->dma_tx_done);
  248. + } else {
  249. if (opflags & QUP_OP_IN_SERVICE_FLAG)
  250. - spi_qup_fifo_read(controller, xfer);
  251. + spi_qup_read(controller, xfer);
  252. if (opflags & QUP_OP_OUT_SERVICE_FLAG)
  253. - spi_qup_fifo_write(controller, xfer);
  254. + spi_qup_write(controller, xfer);
  255. }
  256. spin_lock_irqsave(&controller->lock, flags);
  257. @@ -476,6 +572,9 @@ static irqreturn_t spi_qup_qup_irq(int i
  258. controller->xfer = xfer;
  259. spin_unlock_irqrestore(&controller->lock, flags);
  260. + /* re-read opflags as flags may have changed due to actions above */
  261. + opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
  262. +
  263. if ((controller->rx_bytes == xfer->len &&
  264. (opflags & QUP_OP_MAX_INPUT_DONE_FLAG)) || error)
  265. complete(&controller->done);
  266. @@ -519,11 +618,13 @@ static int spi_qup_io_config(struct spi_
  267. /* must be zero for FIFO */
  268. writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
  269. writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
  270. - controller->use_dma = 0;
  271. } else if (spi->master->can_dma &&
  272. spi->master->can_dma(spi->master, spi, xfer) &&
  273. spi->master->cur_msg_mapped) {
  274. controller->mode = QUP_IO_M_MODE_BAM;
  275. + writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
  276. + writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
  277. + /* must be zero for BLOCK and BAM */
  278. writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
  279. writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);