002-v3-spi-qup-Fix-incorrect-block-transfers.patch 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376
  1. Content-Type: text/plain; charset="utf-8"
  2. MIME-Version: 1.0
  3. Content-Transfer-Encoding: 7bit
  4. Subject: [v3] spi: qup: Fix incorrect block transfers
  5. From: Andy Gross <agross@codeaurora.org>
  6. X-Patchwork-Id: 5007321
  7. Message-Id: <1412112088-25928-1-git-send-email-agross@codeaurora.org>
  8. To: Mark Brown <broonie@kernel.org>
  9. Cc: linux-spi@vger.kernel.org, linux-kernel@vger.kernel.org,
  10. linux-arm-kernel@lists.infradead.org, linux-arm-msm@vger.kernel.org,
  11. "Ivan T. Ivanov" <iivanov@mm-sol.com>,
  12. Bjorn Andersson <bjorn.andersson@sonymobile.com>,
  13. Kumar Gala <galak@codeaurora.org>, Andy Gross <agross@codeaurora.org>
  14. Date: Tue, 30 Sep 2014 16:21:28 -0500
  15. This patch fixes a number of errors with the QUP block transfer mode. Errors
  16. manifested themselves as input underruns, output overruns, and timed out
  17. transactions.
  18. The block mode does not require the priming that occurs in FIFO mode. At the
  19. moment that the QUP is placed into the RUN state, the QUP will immediately raise
  20. an interrupt if the request is a write. Therefore, there is no need to prime
  21. the pump.
  22. In addition, the block transfers require that whole blocks of data are
  23. read/written at a time. The last block of data that completes a transaction may
  24. contain less than a full blocks worth of data.
  25. Each block of data results in an input/output service interrupt accompanied with
  26. a input/output block flag set. Additional block reads/writes require clearing
  27. of the service flag. It is ok to check for additional blocks of data in the
  28. ISR, but you have to ack every block you transfer. Imbalanced acks result in
  29. early return from complete transactions with pending interrupts that still have
  30. to be ack'd. The next transaction can be affected by these interrupts.
  31. Transactions are deemed complete when the MAX_INPUT or MAX_OUTPUT flag are set.
  32. Changes from v2:
  33. - Added in additional completion check so that transaction done is not
  34. prematurely signaled.
  35. - Fixed various review comments.
  36. Changes from v1:
  37. - Split out read/write block function.
  38. - Removed extraneous checks for transfer length
  39. Signed-off-by: Andy Gross <agross@codeaurora.org>
  40. ---
  41. drivers/spi/spi-qup.c | 201 ++++++++++++++++++++++++++++++++++++-------------
  42. 1 file changed, 148 insertions(+), 53 deletions(-)
  43. --- a/drivers/spi/spi-qup.c
  44. +++ b/drivers/spi/spi-qup.c
  45. @@ -82,6 +82,8 @@
  46. #define QUP_IO_M_MODE_BAM 3
  47. /* QUP_OPERATIONAL fields */
  48. +#define QUP_OP_IN_BLOCK_READ_REQ BIT(13)
  49. +#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12)
  50. #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
  51. #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
  52. #define QUP_OP_IN_SERVICE_FLAG BIT(9)
  53. @@ -147,6 +149,7 @@ struct spi_qup {
  54. int tx_bytes;
  55. int rx_bytes;
  56. int qup_v1;
  57. + int mode;
  58. int use_dma;
  59. @@ -213,30 +216,14 @@ static int spi_qup_set_state(struct spi_
  60. return 0;
  61. }
  62. -
  63. -static void spi_qup_fifo_read(struct spi_qup *controller,
  64. - struct spi_transfer *xfer)
  65. +static void spi_qup_fill_read_buffer(struct spi_qup *controller,
  66. + struct spi_transfer *xfer, u32 data)
  67. {
  68. u8 *rx_buf = xfer->rx_buf;
  69. - u32 word, state;
  70. - int idx, shift, w_size;
  71. -
  72. - w_size = controller->w_size;
  73. -
  74. - while (controller->rx_bytes < xfer->len) {
  75. -
  76. - state = readl_relaxed(controller->base + QUP_OPERATIONAL);
  77. - if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
  78. - break;
  79. -
  80. - word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
  81. -
  82. - if (!rx_buf) {
  83. - controller->rx_bytes += w_size;
  84. - continue;
  85. - }
  86. + int idx, shift;
  87. - for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
  88. + if (rx_buf)
  89. + for (idx = 0; idx < controller->w_size; idx++) {
  90. /*
  91. * The data format depends on bytes per SPI word:
  92. * 4 bytes: 0x12345678
  93. @@ -244,41 +231,139 @@ static void spi_qup_fifo_read(struct spi
  94. * 1 byte : 0x00000012
  95. */
  96. shift = BITS_PER_BYTE;
  97. - shift *= (w_size - idx - 1);
  98. - rx_buf[controller->rx_bytes] = word >> shift;
  99. + shift *= (controller->w_size - idx - 1);
  100. + rx_buf[controller->rx_bytes + idx] = data >> shift;
  101. + }
  102. +
  103. + controller->rx_bytes += controller->w_size;
  104. +}
  105. +
  106. +static void spi_qup_prepare_write_data(struct spi_qup *controller,
  107. + struct spi_transfer *xfer, u32 *data)
  108. +{
  109. + const u8 *tx_buf = xfer->tx_buf;
  110. + u32 val;
  111. + int idx;
  112. +
  113. + *data = 0;
  114. +
  115. + if (tx_buf)
  116. + for (idx = 0; idx < controller->w_size; idx++) {
  117. + val = tx_buf[controller->tx_bytes + idx];
  118. + *data |= val << (BITS_PER_BYTE * (3 - idx));
  119. }
  120. +
  121. + controller->tx_bytes += controller->w_size;
  122. +}
  123. +
  124. +static void spi_qup_fifo_read(struct spi_qup *controller,
  125. + struct spi_transfer *xfer)
  126. +{
  127. + u32 data;
  128. +
  129. + /* clear service request */
  130. + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
  131. + controller->base + QUP_OPERATIONAL);
  132. +
  133. + while (controller->rx_bytes < xfer->len) {
  134. + if (!(readl_relaxed(controller->base + QUP_OPERATIONAL) &
  135. + QUP_OP_IN_FIFO_NOT_EMPTY))
  136. + break;
  137. +
  138. + data = readl_relaxed(controller->base + QUP_INPUT_FIFO);
  139. +
  140. + spi_qup_fill_read_buffer(controller, xfer, data);
  141. }
  142. }
  143. static void spi_qup_fifo_write(struct spi_qup *controller,
  144. - struct spi_transfer *xfer)
  145. + struct spi_transfer *xfer)
  146. {
  147. - const u8 *tx_buf = xfer->tx_buf;
  148. - u32 word, state, data;
  149. - int idx, w_size;
  150. + u32 data;
  151. - w_size = controller->w_size;
  152. + /* clear service request */
  153. + writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
  154. + controller->base + QUP_OPERATIONAL);
  155. while (controller->tx_bytes < xfer->len) {
  156. - state = readl_relaxed(controller->base + QUP_OPERATIONAL);
  157. - if (state & QUP_OP_OUT_FIFO_FULL)
  158. + if (readl_relaxed(controller->base + QUP_OPERATIONAL) &
  159. + QUP_OP_OUT_FIFO_FULL)
  160. break;
  161. - word = 0;
  162. - for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
  163. + spi_qup_prepare_write_data(controller, xfer, &data);
  164. + writel_relaxed(data, controller->base + QUP_OUTPUT_FIFO);
  165. - if (!tx_buf) {
  166. - controller->tx_bytes += w_size;
  167. - break;
  168. - }
  169. + }
  170. +}
  171. - data = tx_buf[controller->tx_bytes];
  172. - word |= data << (BITS_PER_BYTE * (3 - idx));
  173. - }
  174. +static void spi_qup_block_read(struct spi_qup *controller,
  175. + struct spi_transfer *xfer)
  176. +{
  177. + u32 data;
  178. + u32 reads_per_blk = controller->in_blk_sz >> 2;
  179. + u32 num_words = (xfer->len - controller->rx_bytes) / controller->w_size;
  180. + int i;
  181. +
  182. + do {
  183. + /* ACK by clearing service flag */
  184. + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
  185. + controller->base + QUP_OPERATIONAL);
  186. +
  187. + /* transfer up to a block size of data in a single pass */
  188. + for (i = 0; num_words && i < reads_per_blk; i++, num_words--) {
  189. +
  190. + /* read data and fill up rx buffer */
  191. + data = readl_relaxed(controller->base + QUP_INPUT_FIFO);
  192. + spi_qup_fill_read_buffer(controller, xfer, data);
  193. + }
  194. +
  195. + /* check to see if next block is ready */
  196. + if (!(readl_relaxed(controller->base + QUP_OPERATIONAL) &
  197. + QUP_OP_IN_BLOCK_READ_REQ))
  198. + break;
  199. - writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
  200. - }
  201. + } while (num_words);
  202. +
  203. + /*
  204. + * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
  205. + * reads, it has to be cleared again at the very end
  206. + */
  207. + if (readl_relaxed(controller->base + QUP_OPERATIONAL) &
  208. + QUP_OP_MAX_INPUT_DONE_FLAG)
  209. + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
  210. + controller->base + QUP_OPERATIONAL);
  211. +
  212. +}
  213. +
  214. +static void spi_qup_block_write(struct spi_qup *controller,
  215. + struct spi_transfer *xfer)
  216. +{
  217. + u32 data;
  218. + u32 writes_per_blk = controller->out_blk_sz >> 2;
  219. + u32 num_words = (xfer->len - controller->tx_bytes) / controller->w_size;
  220. + int i;
  221. +
  222. + do {
  223. + /* ACK by clearing service flag */
  224. + writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
  225. + controller->base + QUP_OPERATIONAL);
  226. +
  227. + /* transfer up to a block size of data in a single pass */
  228. + for (i = 0; num_words && i < writes_per_blk; i++, num_words--) {
  229. +
  230. + /* swizzle the bytes for output and write out */
  231. + spi_qup_prepare_write_data(controller, xfer, &data);
  232. + writel_relaxed(data,
  233. + controller->base + QUP_OUTPUT_FIFO);
  234. + }
  235. +
  236. + /* check to see if next block is ready */
  237. + if (!(readl_relaxed(controller->base + QUP_OPERATIONAL) &
  238. + QUP_OP_OUT_BLOCK_WRITE_REQ))
  239. + break;
  240. +
  241. + } while (num_words);
  242. }
  243. static void qup_dma_callback(void *data)
  244. @@ -515,9 +600,9 @@ static irqreturn_t spi_qup_qup_irq(int i
  245. writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
  246. writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
  247. - writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
  248. if (!xfer) {
  249. + writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
  250. dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
  251. qup_err, spi_err, opflags);
  252. return IRQ_HANDLED;
  253. @@ -546,11 +631,19 @@ static irqreturn_t spi_qup_qup_irq(int i
  254. }
  255. if (!controller->use_dma) {
  256. - if (opflags & QUP_OP_IN_SERVICE_FLAG)
  257. - spi_qup_fifo_read(controller, xfer);
  258. + if (opflags & QUP_OP_IN_SERVICE_FLAG) {
  259. + if (opflags & QUP_OP_IN_BLOCK_READ_REQ)
  260. + spi_qup_block_read(controller, xfer);
  261. + else
  262. + spi_qup_fifo_read(controller, xfer);
  263. + }
  264. - if (opflags & QUP_OP_OUT_SERVICE_FLAG)
  265. - spi_qup_fifo_write(controller, xfer);
  266. + if (opflags & QUP_OP_OUT_SERVICE_FLAG) {
  267. + if (opflags & QUP_OP_OUT_BLOCK_WRITE_REQ)
  268. + spi_qup_block_write(controller, xfer);
  269. + else
  270. + spi_qup_fifo_write(controller, xfer);
  271. + }
  272. }
  273. spin_lock_irqsave(&controller->lock, flags);
  274. @@ -558,7 +651,8 @@ static irqreturn_t spi_qup_qup_irq(int i
  275. controller->xfer = xfer;
  276. spin_unlock_irqrestore(&controller->lock, flags);
  277. - if (controller->rx_bytes == xfer->len || error)
  278. + if ((controller->rx_bytes == xfer->len &&
  279. + (opflags & QUP_OP_MAX_INPUT_DONE_FLAG)) || error)
  280. complete(&controller->done);
  281. return IRQ_HANDLED;
  282. @@ -569,7 +663,7 @@ static irqreturn_t spi_qup_qup_irq(int i
  283. static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
  284. {
  285. struct spi_qup *controller = spi_master_get_devdata(spi->master);
  286. - u32 config, iomode, mode;
  287. + u32 config, iomode;
  288. int ret, n_words, w_size;
  289. size_t dma_align = dma_get_cache_alignment();
  290. u32 dma_available = 0;
  291. @@ -607,7 +701,7 @@ static int spi_qup_io_config(struct spi_
  292. dma_available = 1;
  293. if (n_words <= (controller->in_fifo_sz / sizeof(u32))) {
  294. - mode = QUP_IO_M_MODE_FIFO;
  295. + controller->mode = QUP_IO_M_MODE_FIFO;
  296. writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
  297. writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
  298. /* must be zero for FIFO */
  299. @@ -615,7 +709,7 @@ static int spi_qup_io_config(struct spi_
  300. writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
  301. controller->use_dma = 0;
  302. } else if (!dma_available) {
  303. - mode = QUP_IO_M_MODE_BLOCK;
  304. + controller->mode = QUP_IO_M_MODE_BLOCK;
  305. writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
  306. writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
  307. /* must be zero for BLOCK and BAM */
  308. @@ -623,7 +717,7 @@ static int spi_qup_io_config(struct spi_
  309. writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
  310. controller->use_dma = 0;
  311. } else {
  312. - mode = QUP_IO_M_MODE_DMOV;
  313. + controller->mode = QUP_IO_M_MODE_DMOV;
  314. writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
  315. writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
  316. controller->use_dma = 1;
  317. @@ -638,8 +732,8 @@ static int spi_qup_io_config(struct spi_
  318. else
  319. iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
  320. - iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
  321. - iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
  322. + iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
  323. + iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
  324. writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
  325. @@ -724,7 +818,8 @@ static int spi_qup_transfer_one(struct s
  326. goto exit;
  327. }
  328. - spi_qup_fifo_write(controller, xfer);
  329. + if (controller->mode == QUP_IO_M_MODE_FIFO)
  330. + spi_qup_fifo_write(controller, xfer);
  331. if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
  332. dev_warn(controller->dev, "cannot set EXECUTE state\n");
  333. @@ -741,6 +836,7 @@ exit:
  334. if (!ret)
  335. ret = controller->error;
  336. spin_unlock_irqrestore(&controller->lock, flags);
  337. +
  338. return ret;
  339. }