001-spi-qup-Add-DMA-capabilities.patch 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522
  1. Content-Type: text/plain; charset="utf-8"
  2. MIME-Version: 1.0
  3. Content-Transfer-Encoding: 7bit
  4. Subject: spi: qup: Add DMA capabilities
  5. From: Andy Gross <agross@codeaurora.org>
  6. X-Patchwork-Id: 4432401
  7. Message-Id: <1403816781-31008-1-git-send-email-agross@codeaurora.org>
  8. To: Mark Brown <broonie@kernel.org>
  9. Cc: linux-spi@vger.kernel.org, Sagar Dharia <sdharia@codeaurora.org>,
  10. Daniel Sneddon <dsneddon@codeaurora.org>,
  11. Bjorn Andersson <bjorn.andersson@sonymobile.com>,
  12. "Ivan T. Ivanov" <iivanov@mm-sol.com>,
  13. linux-kernel@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
  14. linux-arm-msm@vger.kernel.org, Andy Gross <agross@codeaurora.org>
  15. Date: Thu, 26 Jun 2014 16:06:21 -0500
  16. This patch adds DMA capabilities to the spi-qup driver. If DMA channels are
  17. present, the QUP will use DMA instead of block mode for transfers to/from SPI
  18. peripherals for transactions larger than the length of a block.
  19. Signed-off-by: Andy Gross <agross@codeaurora.org>
  20. ---
  21. .../devicetree/bindings/spi/qcom,spi-qup.txt | 10 +
  22. drivers/spi/spi-qup.c | 361 ++++++++++++++++++--
  23. 2 files changed, 350 insertions(+), 21 deletions(-)
  24. --- a/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt
  25. +++ b/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt
  26. @@ -27,6 +27,11 @@ Optional properties:
  27. - spi-max-frequency: Specifies maximum SPI clock frequency,
  28. Units - Hz. Definition as per
  29. Documentation/devicetree/bindings/spi/spi-bus.txt
  30. +- dmas : Two DMA channel specifiers following the convention outlined
  31. + in bindings/dma/dma.txt
  32. +- dma-names: Names for the dma channels, if present. There must be at
  33. + least one channel named "tx" for transmit and named "rx" for
  34. + receive.
  35. - num-cs: total number of chipselects
  36. - cs-gpios: should specify GPIOs used for chipselects.
  37. The gpios will be referred to as reg = <index> in the SPI child
  38. @@ -51,6 +56,10 @@ Example:
  39. clocks = <&gcc GCC_BLSP2_QUP2_SPI_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>;
  40. clock-names = "core", "iface";
  41. + dmas = <&blsp2_bam 2>,
  42. + <&blsp2_bam 3>;
  43. + dma-names = "rx", "tx";
  44. +
  45. pinctrl-names = "default";
  46. pinctrl-0 = <&spi8_default>;
  47. --- a/drivers/spi/spi-qup.c
  48. +++ b/drivers/spi/spi-qup.c
  49. @@ -22,6 +22,8 @@
  50. #include <linux/platform_device.h>
  51. #include <linux/pm_runtime.h>
  52. #include <linux/spi/spi.h>
  53. +#include <linux/dmaengine.h>
  54. +#include <linux/dma-mapping.h>
  55. #define QUP_CONFIG 0x0000
  56. #define QUP_STATE 0x0004
  57. @@ -116,6 +118,8 @@
  58. #define SPI_NUM_CHIPSELECTS 4
  59. +#define SPI_MAX_XFER (SZ_64K - 64)
  60. +
  61. /* high speed mode is when bus rate is greater then 26MHz */
  62. #define SPI_HS_MIN_RATE 26000000
  63. #define SPI_MAX_RATE 50000000
  64. @@ -143,6 +147,17 @@ struct spi_qup {
  65. int tx_bytes;
  66. int rx_bytes;
  67. int qup_v1;
  68. +
  69. + int use_dma;
  70. +
  71. + struct dma_chan *rx_chan;
  72. + struct dma_slave_config rx_conf;
  73. + struct dma_chan *tx_chan;
  74. + struct dma_slave_config tx_conf;
  75. + dma_addr_t rx_dma;
  76. + dma_addr_t tx_dma;
  77. + void *dummy;
  78. + atomic_t dma_outstanding;
  79. };
  80. @@ -266,6 +281,221 @@ static void spi_qup_fifo_write(struct sp
  81. }
  82. }
  83. +static void qup_dma_callback(void *data)
  84. +{
  85. + struct spi_qup *controller = data;
  86. +
  87. + if (atomic_dec_and_test(&controller->dma_outstanding))
  88. + complete(&controller->done);
  89. +}
  90. +
  91. +static int spi_qup_do_dma(struct spi_qup *controller, struct spi_transfer *xfer)
  92. +{
  93. + struct dma_async_tx_descriptor *rxd, *txd;
  94. + dma_cookie_t rx_cookie, tx_cookie;
  95. + u32 xfer_len, rx_align = 0, tx_align = 0, n_words;
  96. + struct scatterlist tx_sg[2], rx_sg[2];
  97. + int ret = 0;
  98. + u32 bytes_to_xfer = xfer->len;
  99. + u32 offset = 0;
  100. + u32 rx_nents = 0, tx_nents = 0;
  101. + dma_addr_t rx_dma = 0, tx_dma = 0, rx_dummy_dma = 0, tx_dummy_dma = 0;
  102. +
  103. +
  104. + if (xfer->rx_buf) {
  105. + rx_dma = dma_map_single(controller->dev, xfer->rx_buf,
  106. + xfer->len, DMA_FROM_DEVICE);
  107. +
  108. + if (dma_mapping_error(controller->dev, rx_dma)) {
  109. + ret = -ENOMEM;
  110. + return ret;
  111. + }
  112. +
  113. + /* check to see if we need dummy buffer for leftover bytes */
  114. + rx_align = xfer->len % controller->in_blk_sz;
  115. + if (rx_align) {
  116. + rx_dummy_dma = dma_map_single(controller->dev,
  117. + controller->dummy, controller->in_fifo_sz,
  118. + DMA_FROM_DEVICE);
  119. +
  120. + if (dma_mapping_error(controller->dev, rx_dummy_dma)) {
  121. + ret = -ENOMEM;
  122. + goto err_map_rx_dummy;
  123. + }
  124. + }
  125. + }
  126. +
  127. + if (xfer->tx_buf) {
  128. + tx_dma = dma_map_single(controller->dev,
  129. + (void *)xfer->tx_buf, xfer->len, DMA_TO_DEVICE);
  130. +
  131. + if (dma_mapping_error(controller->dev, tx_dma)) {
  132. + ret = -ENOMEM;
  133. + goto err_map_tx;
  134. + }
  135. +
  136. + /* check to see if we need dummy buffer for leftover bytes */
  137. + tx_align = xfer->len % controller->out_blk_sz;
  138. + if (tx_align) {
  139. + memcpy(controller->dummy + SZ_1K,
  140. + xfer->tx_buf + xfer->len - tx_align,
  141. + tx_align);
  142. + memset(controller->dummy + SZ_1K + tx_align, 0,
  143. + controller->out_blk_sz - tx_align);
  144. +
  145. + tx_dummy_dma = dma_map_single(controller->dev,
  146. + controller->dummy + SZ_1K,
  147. + controller->out_blk_sz, DMA_TO_DEVICE);
  148. +
  149. + if (dma_mapping_error(controller->dev, tx_dummy_dma)) {
  150. + ret = -ENOMEM;
  151. + goto err_map_tx_dummy;
  152. + }
  153. + }
  154. + }
  155. +
  156. + atomic_set(&controller->dma_outstanding, 0);
  157. +
  158. + while (bytes_to_xfer > 0) {
  159. + xfer_len = min_t(u32, bytes_to_xfer, SPI_MAX_XFER);
  160. + n_words = DIV_ROUND_UP(xfer_len, controller->w_size);
  161. +
  162. + /* write out current word count to controller */
  163. + writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
  164. + writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
  165. +
  166. + reinit_completion(&controller->done);
  167. +
  168. + if (xfer->tx_buf) {
  169. + /* recalc align for each transaction */
  170. + tx_align = xfer_len % controller->out_blk_sz;
  171. +
  172. + if (tx_align)
  173. + tx_nents = 2;
  174. + else
  175. + tx_nents = 1;
  176. +
  177. + /* initialize scatterlists */
  178. + sg_init_table(tx_sg, tx_nents);
  179. + sg_dma_len(&tx_sg[0]) = xfer_len - tx_align;
  180. + sg_dma_address(&tx_sg[0]) = tx_dma + offset;
  181. +
  182. + /* account for non block size transfer */
  183. + if (tx_align) {
  184. + sg_dma_len(&tx_sg[1]) = controller->out_blk_sz;
  185. + sg_dma_address(&tx_sg[1]) = tx_dummy_dma;
  186. + }
  187. +
  188. + txd = dmaengine_prep_slave_sg(controller->tx_chan,
  189. + tx_sg, tx_nents, DMA_MEM_TO_DEV, 0);
  190. + if (!txd) {
  191. + ret = -ENOMEM;
  192. + goto err_unmap;
  193. + }
  194. +
  195. + atomic_inc(&controller->dma_outstanding);
  196. +
  197. + txd->callback = qup_dma_callback;
  198. + txd->callback_param = controller;
  199. +
  200. + tx_cookie = dmaengine_submit(txd);
  201. +
  202. + dma_async_issue_pending(controller->tx_chan);
  203. + }
  204. +
  205. + if (xfer->rx_buf) {
  206. + /* recalc align for each transaction */
  207. + rx_align = xfer_len % controller->in_blk_sz;
  208. +
  209. + if (rx_align)
  210. + rx_nents = 2;
  211. + else
  212. + rx_nents = 1;
  213. +
  214. + /* initialize scatterlists */
  215. + sg_init_table(rx_sg, rx_nents);
  216. + sg_dma_address(&rx_sg[0]) = rx_dma + offset;
  217. + sg_dma_len(&rx_sg[0]) = xfer_len - rx_align;
  218. +
  219. + /* account for non block size transfer */
  220. + if (rx_align) {
  221. + sg_dma_len(&rx_sg[1]) = controller->in_blk_sz;
  222. + sg_dma_address(&rx_sg[1]) = rx_dummy_dma;
  223. + }
  224. +
  225. + rxd = dmaengine_prep_slave_sg(controller->rx_chan,
  226. + rx_sg, rx_nents, DMA_DEV_TO_MEM, 0);
  227. + if (!rxd) {
  228. + ret = -ENOMEM;
  229. + goto err_unmap;
  230. + }
  231. +
  232. + atomic_inc(&controller->dma_outstanding);
  233. +
  234. + rxd->callback = qup_dma_callback;
  235. + rxd->callback_param = controller;
  236. +
  237. + rx_cookie = dmaengine_submit(rxd);
  238. +
  239. + dma_async_issue_pending(controller->rx_chan);
  240. + }
  241. +
  242. + if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
  243. + dev_warn(controller->dev, "cannot set EXECUTE state\n");
  244. + goto err_unmap;
  245. + }
  246. +
  247. + if (!wait_for_completion_timeout(&controller->done,
  248. + msecs_to_jiffies(1000))) {
  249. + ret = -ETIMEDOUT;
  250. +
  251. + /* clear out all the DMA transactions */
  252. + if (xfer->tx_buf)
  253. + dmaengine_terminate_all(controller->tx_chan);
  254. + if (xfer->rx_buf)
  255. + dmaengine_terminate_all(controller->rx_chan);
  256. +
  257. + goto err_unmap;
  258. + }
  259. +
  260. + if (rx_align)
  261. + memcpy(xfer->rx_buf + offset + xfer->len - rx_align,
  262. + controller->dummy, rx_align);
  263. +
  264. + /* adjust remaining bytes to transfer */
  265. + bytes_to_xfer -= xfer_len;
  266. + offset += xfer_len;
  267. +
  268. +
  269. + /* reset mini-core state so we can program next transaction */
  270. + if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
  271. + dev_err(controller->dev, "cannot set RESET state\n");
  272. + goto err_unmap;
  273. + }
  274. + }
  275. +
  276. + ret = 0;
  277. +
  278. +err_unmap:
  279. + if (tx_align)
  280. + dma_unmap_single(controller->dev, tx_dummy_dma,
  281. + controller->out_fifo_sz, DMA_TO_DEVICE);
  282. +err_map_tx_dummy:
  283. + if (xfer->tx_buf)
  284. + dma_unmap_single(controller->dev, tx_dma, xfer->len,
  285. + DMA_TO_DEVICE);
  286. +err_map_tx:
  287. + if (rx_align)
  288. + dma_unmap_single(controller->dev, rx_dummy_dma,
  289. + controller->in_fifo_sz, DMA_FROM_DEVICE);
  290. +err_map_rx_dummy:
  291. + if (xfer->rx_buf)
  292. + dma_unmap_single(controller->dev, rx_dma, xfer->len,
  293. + DMA_FROM_DEVICE);
  294. +
  295. + return ret;
  296. +}
  297. +
  298. static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
  299. {
  300. struct spi_qup *controller = dev_id;
  301. @@ -315,11 +545,13 @@ static irqreturn_t spi_qup_qup_irq(int i
  302. error = -EIO;
  303. }
  304. - if (opflags & QUP_OP_IN_SERVICE_FLAG)
  305. - spi_qup_fifo_read(controller, xfer);
  306. + if (!controller->use_dma) {
  307. + if (opflags & QUP_OP_IN_SERVICE_FLAG)
  308. + spi_qup_fifo_read(controller, xfer);
  309. - if (opflags & QUP_OP_OUT_SERVICE_FLAG)
  310. - spi_qup_fifo_write(controller, xfer);
  311. + if (opflags & QUP_OP_OUT_SERVICE_FLAG)
  312. + spi_qup_fifo_write(controller, xfer);
  313. + }
  314. spin_lock_irqsave(&controller->lock, flags);
  315. controller->error = error;
  316. @@ -339,6 +571,8 @@ static int spi_qup_io_config(struct spi_
  317. struct spi_qup *controller = spi_master_get_devdata(spi->master);
  318. u32 config, iomode, mode;
  319. int ret, n_words, w_size;
  320. + size_t dma_align = dma_get_cache_alignment();
  321. + u32 dma_available = 0;
  322. if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
  323. dev_err(controller->dev, "too big size for loopback %d > %d\n",
  324. @@ -367,6 +601,11 @@ static int spi_qup_io_config(struct spi_
  325. n_words = xfer->len / w_size;
  326. controller->w_size = w_size;
  327. + if (controller->rx_chan &&
  328. + IS_ALIGNED((size_t)xfer->tx_buf, dma_align) &&
  329. + IS_ALIGNED((size_t)xfer->rx_buf, dma_align))
  330. + dma_available = 1;
  331. +
  332. if (n_words <= (controller->in_fifo_sz / sizeof(u32))) {
  333. mode = QUP_IO_M_MODE_FIFO;
  334. writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
  335. @@ -374,19 +613,31 @@ static int spi_qup_io_config(struct spi_
  336. /* must be zero for FIFO */
  337. writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
  338. writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
  339. - } else {
  340. + controller->use_dma = 0;
  341. + } else if (!dma_available) {
  342. mode = QUP_IO_M_MODE_BLOCK;
  343. writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
  344. writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
  345. /* must be zero for BLOCK and BAM */
  346. writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
  347. writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
  348. + controller->use_dma = 0;
  349. + } else {
  350. + mode = QUP_IO_M_MODE_DMOV;
  351. + writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
  352. + writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
  353. + controller->use_dma = 1;
  354. }
  355. iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
  356. /* Set input and output transfer mode */
  357. iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
  358. - iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
  359. +
  360. + if (!controller->use_dma)
  361. + iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
  362. + else
  363. + iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
  364. +
  365. iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
  366. iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
  367. @@ -419,6 +670,14 @@ static int spi_qup_io_config(struct spi_
  368. config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
  369. config |= xfer->bits_per_word - 1;
  370. config |= QUP_CONFIG_SPI_MODE;
  371. +
  372. + if (controller->use_dma) {
  373. + if (!xfer->tx_buf)
  374. + config |= QUP_CONFIG_NO_OUTPUT;
  375. + if (!xfer->rx_buf)
  376. + config |= QUP_CONFIG_NO_INPUT;
  377. + }
  378. +
  379. writel_relaxed(config, controller->base + QUP_CONFIG);
  380. /* only write to OPERATIONAL_MASK when register is present */
  381. @@ -452,25 +711,29 @@ static int spi_qup_transfer_one(struct s
  382. controller->tx_bytes = 0;
  383. spin_unlock_irqrestore(&controller->lock, flags);
  384. - if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
  385. - dev_warn(controller->dev, "cannot set RUN state\n");
  386. - goto exit;
  387. - }
  388. + if (controller->use_dma) {
  389. + ret = spi_qup_do_dma(controller, xfer);
  390. + } else {
  391. + if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
  392. + dev_warn(controller->dev, "cannot set RUN state\n");
  393. + goto exit;
  394. + }
  395. - if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) {
  396. - dev_warn(controller->dev, "cannot set PAUSE state\n");
  397. - goto exit;
  398. - }
  399. + if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) {
  400. + dev_warn(controller->dev, "cannot set PAUSE state\n");
  401. + goto exit;
  402. + }
  403. - spi_qup_fifo_write(controller, xfer);
  404. + spi_qup_fifo_write(controller, xfer);
  405. - if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
  406. - dev_warn(controller->dev, "cannot set EXECUTE state\n");
  407. - goto exit;
  408. - }
  409. + if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
  410. + dev_warn(controller->dev, "cannot set EXECUTE state\n");
  411. + goto exit;
  412. + }
  413. - if (!wait_for_completion_timeout(&controller->done, timeout))
  414. - ret = -ETIMEDOUT;
  415. + if (!wait_for_completion_timeout(&controller->done, timeout))
  416. + ret = -ETIMEDOUT;
  417. + }
  418. exit:
  419. spi_qup_set_state(controller, QUP_STATE_RESET);
  420. spin_lock_irqsave(&controller->lock, flags);
  421. @@ -554,6 +817,7 @@ static int spi_qup_probe(struct platform
  422. master->transfer_one = spi_qup_transfer_one;
  423. master->dev.of_node = pdev->dev.of_node;
  424. master->auto_runtime_pm = true;
  425. + master->dma_alignment = dma_get_cache_alignment();
  426. platform_set_drvdata(pdev, master);
  427. @@ -619,6 +883,56 @@ static int spi_qup_probe(struct platform
  428. QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
  429. base + QUP_ERROR_FLAGS_EN);
  430. + /* allocate dma resources, if available */
  431. + controller->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
  432. + if (controller->rx_chan) {
  433. + controller->tx_chan =
  434. + dma_request_slave_channel(&pdev->dev, "tx");
  435. +
  436. + if (!controller->tx_chan) {
  437. + dev_err(&pdev->dev, "Failed to allocate dma tx chan");
  438. + dma_release_channel(controller->rx_chan);
  439. + }
  440. +
  441. + /* set DMA parameters */
  442. + controller->rx_conf.device_fc = 1;
  443. + controller->rx_conf.src_addr = res->start + QUP_INPUT_FIFO;
  444. + controller->rx_conf.src_maxburst = controller->in_blk_sz;
  445. +
  446. + controller->tx_conf.device_fc = 1;
  447. + controller->tx_conf.dst_addr = res->start + QUP_OUTPUT_FIFO;
  448. + controller->tx_conf.dst_maxburst = controller->out_blk_sz;
  449. +
  450. + if (dmaengine_slave_config(controller->rx_chan,
  451. + &controller->rx_conf)) {
  452. + dev_err(&pdev->dev, "failed to configure RX channel\n");
  453. +
  454. + dma_release_channel(controller->rx_chan);
  455. + dma_release_channel(controller->tx_chan);
  456. + controller->tx_chan = NULL;
  457. + controller->rx_chan = NULL;
  458. + } else if (dmaengine_slave_config(controller->tx_chan,
  459. + &controller->tx_conf)) {
  460. + dev_err(&pdev->dev, "failed to configure TX channel\n");
  461. +
  462. + dma_release_channel(controller->rx_chan);
  463. + dma_release_channel(controller->tx_chan);
  464. + controller->tx_chan = NULL;
  465. + controller->rx_chan = NULL;
  466. + }
  467. +
  468. + controller->dummy = devm_kmalloc(controller->dev, PAGE_SIZE,
  469. + GFP_KERNEL);
  470. +
  471. + if (!controller->dummy) {
  472. + dma_release_channel(controller->rx_chan);
  473. + dma_release_channel(controller->tx_chan);
  474. + controller->tx_chan = NULL;
  475. + controller->rx_chan = NULL;
  476. + }
  477. + }
  478. +
  479. +
  480. writel_relaxed(0, base + SPI_CONFIG);
  481. writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
  482. @@ -731,6 +1045,11 @@ static int spi_qup_remove(struct platfor
  483. if (ret)
  484. return ret;
  485. + if (controller->rx_chan)
  486. + dma_release_channel(controller->rx_chan);
  487. + if (controller->tx_chan)
  488. + dma_release_channel(controller->tx_chan);
  489. +
  490. clk_disable_unprepare(controller->cclk);
  491. clk_disable_unprepare(controller->iclk);