0029-dmaengine-Add-support-for-BCM2708.patch 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130
  1. From ce3d899b0ed284a6e901fb6f4a459fdcf003cadb Mon Sep 17 00:00:00 2001
  2. From: Florian Meier <florian.meier@koalo.de>
  3. Date: Fri, 22 Nov 2013 14:22:53 +0100
  4. Subject: [PATCH 029/114] dmaengine: Add support for BCM2708
  5. Add support for DMA controller of BCM2708 as used in the Raspberry Pi.
  6. Currently it only supports cyclic DMA.
  7. Signed-off-by: Florian Meier <florian.meier@koalo.de>
  8. dmaengine: expand functionality by supporting scatter/gather transfers sdhci-bcm2708 and dma.c: fix for LITE channels
  9. DMA: fix cyclic LITE length overflow bug
  10. ---
  11. arch/arm/mach-bcm2708/dma.c | 2 +
  12. arch/arm/mach-bcm2708/include/mach/dma.h | 6 +-
  13. drivers/dma/Kconfig | 6 +
  14. drivers/dma/Makefile | 1 +
  15. drivers/dma/bcm2708-dmaengine.c | 1052 ++++++++++++++++++++++++++++++
  16. 5 files changed, 1066 insertions(+), 1 deletion(-)
  17. create mode 100644 drivers/dma/bcm2708-dmaengine.c
  18. --- a/arch/arm/mach-bcm2708/dma.c
  19. +++ b/arch/arm/mach-bcm2708/dma.c
  20. @@ -156,6 +156,8 @@ static void vc_dmaman_init(struct vc_dma
  21. dmaman->chan_available = chans_available;
  22. dmaman->has_feature[BCM_DMA_FEATURE_FAST_ORD] = 0x0c; /* chans 2 & 3 */
  23. dmaman->has_feature[BCM_DMA_FEATURE_BULK_ORD] = 0x01; /* chan 0 */
  24. + dmaman->has_feature[BCM_DMA_FEATURE_NORMAL_ORD] = 0xfe; /* chans 1 to 7 */
  25. + dmaman->has_feature[BCM_DMA_FEATURE_LITE_ORD] = 0x7f00; /* chans 8 to 14 */
  26. }
  27. static int vc_dmaman_chan_alloc(struct vc_dmaman *dmaman,
  28. --- a/arch/arm/mach-bcm2708/include/mach/dma.h
  29. +++ b/arch/arm/mach-bcm2708/include/mach/dma.h
  30. @@ -77,9 +77,13 @@ extern int /*rc*/ bcm_dma_abort(void __i
  31. those with higher priority smaller ordinal numbers */
  32. #define BCM_DMA_FEATURE_FAST_ORD 0
  33. #define BCM_DMA_FEATURE_BULK_ORD 1
  34. +#define BCM_DMA_FEATURE_NORMAL_ORD 2
  35. +#define BCM_DMA_FEATURE_LITE_ORD 3
  36. #define BCM_DMA_FEATURE_FAST (1<<BCM_DMA_FEATURE_FAST_ORD)
  37. #define BCM_DMA_FEATURE_BULK (1<<BCM_DMA_FEATURE_BULK_ORD)
  38. -#define BCM_DMA_FEATURE_COUNT 2
  39. +#define BCM_DMA_FEATURE_NORMAL (1<<BCM_DMA_FEATURE_NORMAL_ORD)
  40. +#define BCM_DMA_FEATURE_LITE (1<<BCM_DMA_FEATURE_LITE_ORD)
  41. +#define BCM_DMA_FEATURE_COUNT 4
  42. /* return channel no or -ve error */
  43. extern int bcm_dma_chan_alloc(unsigned preferred_feature_set,
  44. --- a/drivers/dma/Kconfig
  45. +++ b/drivers/dma/Kconfig
  46. @@ -330,6 +330,12 @@ config DMA_BCM2835
  47. select DMA_ENGINE
  48. select DMA_VIRTUAL_CHANNELS
  49. +config DMA_BCM2708
  50. + tristate "BCM2708 DMA engine support"
  51. + depends on MACH_BCM2708
  52. + select DMA_ENGINE
  53. + select DMA_VIRTUAL_CHANNELS
  54. +
  55. config TI_CPPI41
  56. tristate "AM33xx CPPI41 DMA support"
  57. depends on ARCH_OMAP
  58. --- a/drivers/dma/Makefile
  59. +++ b/drivers/dma/Makefile
  60. @@ -38,6 +38,7 @@ obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
  61. obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
  62. obj-$(CONFIG_DMA_OMAP) += omap-dma.o
  63. obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
  64. +obj-$(CONFIG_DMA_BCM2708) += bcm2708-dmaengine.o
  65. obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
  66. obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
  67. obj-$(CONFIG_TI_CPPI41) += cppi41.o
  68. --- /dev/null
  69. +++ b/drivers/dma/bcm2708-dmaengine.c
  70. @@ -0,0 +1,1052 @@
  71. +/*
  72. + * BCM2835 DMA engine support
  73. + *
  74. + * This driver supports cyclic and scatter/gather DMA transfers.
  75. + *
  76. + * Author: Florian Meier <florian.meier@koalo.de>
  77. + * Gellert Weisz <gellert@raspberrypi.org>
  78. + * Copyright 2013-2014
  79. + *
  80. + * Based on
  81. + * OMAP DMAengine support by Russell King
  82. + *
  83. + * BCM2708 DMA Driver
  84. + * Copyright (C) 2010 Broadcom
  85. + *
  86. + * Raspberry Pi PCM I2S ALSA Driver
  87. + * Copyright (c) by Phil Poole 2013
  88. + *
  89. + * MARVELL MMP Peripheral DMA Driver
  90. + * Copyright 2012 Marvell International Ltd.
  91. + *
  92. + * This program is free software; you can redistribute it and/or modify
  93. + * it under the terms of the GNU General Public License as published by
  94. + * the Free Software Foundation; either version 2 of the License, or
  95. + * (at your option) any later version.
  96. + *
  97. + * This program is distributed in the hope that it will be useful,
  98. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  99. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  100. + * GNU General Public License for more details.
  101. + */
  102. +
  103. +#include <linux/dmaengine.h>
  104. +#include <linux/dma-mapping.h>
  105. +#include <linux/err.h>
  106. +#include <linux/init.h>
  107. +#include <linux/interrupt.h>
  108. +#include <linux/list.h>
  109. +#include <linux/module.h>
  110. +#include <linux/platform_device.h>
  111. +#include <linux/slab.h>
  112. +#include <linux/io.h>
  113. +#include <linux/spinlock.h>
  114. +
  115. +#ifndef CONFIG_OF
  116. +
  117. +/* dma manager */
  118. +#include <mach/dma.h>
  119. +
  120. +//#define DMA_COMPLETE DMA_SUCCESS
  121. +
  122. +#endif
  123. +
  124. +#include <linux/of.h>
  125. +#include <linux/of_dma.h>
  126. +
  127. +#include "virt-dma.h"
  128. +
  129. +
  130. +struct bcm2835_dmadev {
  131. + struct dma_device ddev;
  132. + spinlock_t lock;
  133. + void __iomem *base;
  134. + struct device_dma_parameters dma_parms;
  135. +};
  136. +
  137. +struct bcm2835_dma_cb {
  138. + uint32_t info;
  139. + uint32_t src;
  140. + uint32_t dst;
  141. + uint32_t length;
  142. + uint32_t stride;
  143. + uint32_t next;
  144. + uint32_t pad[2];
  145. +};
  146. +
  147. +struct bcm2835_chan {
  148. + struct virt_dma_chan vc;
  149. + struct list_head node;
  150. +
  151. + struct dma_slave_config cfg;
  152. + bool cyclic;
  153. +
  154. + int ch;
  155. + struct bcm2835_desc *desc;
  156. +
  157. + void __iomem *chan_base;
  158. + int irq_number;
  159. +
  160. + unsigned int dreq;
  161. +};
  162. +
  163. +struct bcm2835_desc {
  164. + struct virt_dma_desc vd;
  165. + enum dma_transfer_direction dir;
  166. +
  167. + unsigned int control_block_size;
  168. + struct bcm2835_dma_cb *control_block_base;
  169. + dma_addr_t control_block_base_phys;
  170. +
  171. + unsigned int frames;
  172. + size_t size;
  173. +};
  174. +
  175. +#define BCM2835_DMA_CS 0x00
  176. +#define BCM2835_DMA_ADDR 0x04
  177. +#define BCM2835_DMA_SOURCE_AD 0x0c
  178. +#define BCM2835_DMA_DEST_AD 0x10
  179. +#define BCM2835_DMA_NEXTCB 0x1C
  180. +
  181. +/* DMA CS Control and Status bits */
  182. +#define BCM2835_DMA_ACTIVE BIT(0)
  183. +#define BCM2835_DMA_INT BIT(2)
  184. +#define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */
  185. +#define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */
  186. +#define BCM2835_DMA_ERR BIT(8)
  187. +#define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */
  188. +#define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */
  189. +
  190. +#define BCM2835_DMA_INT_EN BIT(0)
  191. +#define BCM2835_DMA_WAIT_RESP BIT(3)
  192. +#define BCM2835_DMA_D_INC BIT(4)
  193. +#define BCM2835_DMA_D_WIDTH BIT(5)
  194. +#define BCM2835_DMA_D_DREQ BIT(6)
  195. +#define BCM2835_DMA_S_INC BIT(8)
  196. +#define BCM2835_DMA_S_WIDTH BIT(9)
  197. +#define BCM2835_DMA_S_DREQ BIT(10)
  198. +
  199. +#define BCM2835_DMA_PER_MAP(x) ((x) << 16)
  200. +#define BCM2835_DMA_WAITS(x) (((x)&0x1f) << 21)
  201. +
  202. +#define SDHCI_BCM_DMA_WAITS 0 /* delays slowing DMA transfers: 0-31 */
  203. +
  204. +#define BCM2835_DMA_DATA_TYPE_S8 1
  205. +#define BCM2835_DMA_DATA_TYPE_S16 2
  206. +#define BCM2835_DMA_DATA_TYPE_S32 4
  207. +#define BCM2835_DMA_DATA_TYPE_S128 16
  208. +
  209. +#define BCM2835_DMA_BULK_MASK BIT(0)
  210. +#define BCM2835_DMA_FIQ_MASK (BIT(2) | BIT(3))
  211. +
  212. +
  213. +/* Valid only for channels 0 - 14, 15 has its own base address */
  214. +#define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */
  215. +#define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
  216. +
  217. +#define MAX_LITE_TRANSFER 32768
  218. +#define MAX_NORMAL_TRANSFER 1073741824
  219. +
  220. +static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
  221. +{
  222. + return container_of(d, struct bcm2835_dmadev, ddev);
  223. +}
  224. +
  225. +static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c)
  226. +{
  227. + return container_of(c, struct bcm2835_chan, vc.chan);
  228. +}
  229. +
  230. +static inline struct bcm2835_desc *to_bcm2835_dma_desc(
  231. + struct dma_async_tx_descriptor *t)
  232. +{
  233. + return container_of(t, struct bcm2835_desc, vd.tx);
  234. +}
  235. +
  236. +static void dma_dumpregs(struct bcm2835_chan *c)
  237. +{
  238. + pr_debug("-------------DMA DUMPREGS-------------\n");
  239. + pr_debug("CS= %u\n",
  240. + readl(c->chan_base + BCM2835_DMA_CS));
  241. + pr_debug("ADDR= %u\n",
  242. + readl(c->chan_base + BCM2835_DMA_ADDR));
  243. + pr_debug("SOURCE_ADDR= %u\n",
  244. + readl(c->chan_base + BCM2835_DMA_SOURCE_AD));
  245. + pr_debug("DEST_AD= %u\n",
  246. + readl(c->chan_base + BCM2835_DMA_DEST_AD));
  247. + pr_debug("NEXTCB= %u\n",
  248. + readl(c->chan_base + BCM2835_DMA_NEXTCB));
  249. + pr_debug("--------------------------------------\n");
  250. +}
  251. +
  252. +static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
  253. +{
  254. + struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
  255. + dma_free_coherent(desc->vd.tx.chan->device->dev,
  256. + desc->control_block_size,
  257. + desc->control_block_base,
  258. + desc->control_block_base_phys);
  259. + kfree(desc);
  260. +}
  261. +
  262. +static int bcm2835_dma_abort(void __iomem *chan_base)
  263. +{
  264. + unsigned long cs;
  265. + long int timeout = 10000;
  266. +
  267. + cs = readl(chan_base + BCM2835_DMA_CS);
  268. + if (!(cs & BCM2835_DMA_ACTIVE))
  269. + return 0;
  270. +
  271. + /* Write 0 to the active bit - Pause the DMA */
  272. + writel(0, chan_base + BCM2835_DMA_CS);
  273. +
  274. + /* Wait for any current AXI transfer to complete */
  275. + while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) {
  276. + cpu_relax();
  277. + cs = readl(chan_base + BCM2835_DMA_CS);
  278. + }
  279. +
  280. + /* We'll un-pause when we set of our next DMA */
  281. + if (!timeout)
  282. + return -ETIMEDOUT;
  283. +
  284. + if (!(cs & BCM2835_DMA_ACTIVE))
  285. + return 0;
  286. +
  287. + /* Terminate the control block chain */
  288. + writel(0, chan_base + BCM2835_DMA_NEXTCB);
  289. +
  290. + /* Abort the whole DMA */
  291. + writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
  292. + chan_base + BCM2835_DMA_CS);
  293. +
  294. + return 0;
  295. +}
  296. +
  297. +
  298. +static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
  299. +{
  300. + struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
  301. + struct bcm2835_desc *d;
  302. +
  303. + if (!vd) {
  304. + c->desc = NULL;
  305. + return;
  306. + }
  307. +
  308. + list_del(&vd->node);
  309. +
  310. + c->desc = d = to_bcm2835_dma_desc(&vd->tx);
  311. +
  312. + writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR);
  313. + writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
  314. +
  315. +}
  316. +
  317. +static irqreturn_t bcm2835_dma_callback(int irq, void *data)
  318. +{
  319. + struct bcm2835_chan *c = data;
  320. + struct bcm2835_desc *d;
  321. + unsigned long flags;
  322. +
  323. + spin_lock_irqsave(&c->vc.lock, flags);
  324. +
  325. + /* Acknowledge interrupt */
  326. + writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
  327. +
  328. + d = c->desc;
  329. +
  330. + if (d) {
  331. + if (c->cyclic) {
  332. + vchan_cyclic_callback(&d->vd);
  333. +
  334. + /* Keep the DMA engine running */
  335. + writel(BCM2835_DMA_ACTIVE,
  336. + c->chan_base + BCM2835_DMA_CS);
  337. +
  338. + } else {
  339. + vchan_cookie_complete(&c->desc->vd);
  340. + bcm2835_dma_start_desc(c);
  341. + }
  342. + }
  343. +
  344. + spin_unlock_irqrestore(&c->vc.lock, flags);
  345. +
  346. + return IRQ_HANDLED;
  347. +}
  348. +
  349. +static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
  350. +{
  351. + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
  352. + int ret;
  353. +
  354. + dev_dbg(c->vc.chan.device->dev,
  355. + "Allocating DMA channel %d\n", c->ch);
  356. +
  357. + ret = request_irq(c->irq_number,
  358. + bcm2835_dma_callback, 0, "DMA IRQ", c);
  359. +
  360. + return ret;
  361. +}
  362. +
  363. +static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
  364. +{
  365. + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
  366. +
  367. + vchan_free_chan_resources(&c->vc);
  368. + free_irq(c->irq_number, c);
  369. +
  370. + dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
  371. +}
  372. +
  373. +static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d)
  374. +{
  375. + return d->size;
  376. +}
  377. +
  378. +static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
  379. +{
  380. + unsigned int i;
  381. + size_t size;
  382. +
  383. + for (size = i = 0; i < d->frames; i++) {
  384. + struct bcm2835_dma_cb *control_block =
  385. + &d->control_block_base[i];
  386. + size_t this_size = control_block->length;
  387. + dma_addr_t dma;
  388. +
  389. + if (d->dir == DMA_DEV_TO_MEM)
  390. + dma = control_block->dst;
  391. + else
  392. + dma = control_block->src;
  393. +
  394. + if (size)
  395. + size += this_size;
  396. + else if (addr >= dma && addr < dma + this_size)
  397. + size += dma + this_size - addr;
  398. + }
  399. +
  400. + return size;
  401. +}
  402. +
  403. +static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
  404. + dma_cookie_t cookie, struct dma_tx_state *txstate)
  405. +{
  406. + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
  407. + struct bcm2835_desc *d;
  408. + struct virt_dma_desc *vd;
  409. + enum dma_status ret;
  410. + unsigned long flags;
  411. + dma_addr_t pos;
  412. +
  413. + ret = dma_cookie_status(chan, cookie, txstate);
  414. + if (ret == DMA_COMPLETE || !txstate)
  415. + return ret;
  416. +
  417. + spin_lock_irqsave(&c->vc.lock, flags);
  418. + vd = vchan_find_desc(&c->vc, cookie);
  419. + if (vd) {
  420. + txstate->residue =
  421. + bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx));
  422. + } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
  423. + d = c->desc;
  424. +
  425. + if (d->dir == DMA_MEM_TO_DEV)
  426. + pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD);
  427. + else if (d->dir == DMA_DEV_TO_MEM)
  428. + pos = readl(c->chan_base + BCM2835_DMA_DEST_AD);
  429. + else
  430. + pos = 0;
  431. +
  432. + txstate->residue = bcm2835_dma_desc_size_pos(d, pos);
  433. + } else {
  434. + txstate->residue = 0;
  435. + }
  436. +
  437. + spin_unlock_irqrestore(&c->vc.lock, flags);
  438. +
  439. + return ret;
  440. +}
  441. +
  442. +static void bcm2835_dma_issue_pending(struct dma_chan *chan)
  443. +{
  444. + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
  445. + unsigned long flags;
  446. +
  447. + spin_lock_irqsave(&c->vc.lock, flags);
  448. + if (vchan_issue_pending(&c->vc) && !c->desc)
  449. + bcm2835_dma_start_desc(c);
  450. +
  451. + spin_unlock_irqrestore(&c->vc.lock, flags);
  452. +}
  453. +
  454. +static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
  455. + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
  456. + size_t period_len, enum dma_transfer_direction direction,
  457. + unsigned long flags)
  458. +{
  459. + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
  460. + enum dma_slave_buswidth dev_width;
  461. + struct bcm2835_desc *d;
  462. + dma_addr_t dev_addr;
  463. + unsigned int es, sync_type;
  464. + unsigned int frame, max_size;
  465. +
  466. + /* Grab configuration */
  467. + if (!is_slave_direction(direction)) {
  468. + dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
  469. + return NULL;
  470. + }
  471. +
  472. + if (direction == DMA_DEV_TO_MEM) {
  473. + dev_addr = c->cfg.src_addr;
  474. + dev_width = c->cfg.src_addr_width;
  475. + sync_type = BCM2835_DMA_S_DREQ;
  476. + } else {
  477. + dev_addr = c->cfg.dst_addr;
  478. + dev_width = c->cfg.dst_addr_width;
  479. + sync_type = BCM2835_DMA_D_DREQ;
  480. + }
  481. +
  482. + /* Bus width translates to the element size (ES) */
  483. + switch (dev_width) {
  484. + case DMA_SLAVE_BUSWIDTH_4_BYTES:
  485. + es = BCM2835_DMA_DATA_TYPE_S32;
  486. + break;
  487. + default:
  488. + return NULL;
  489. + }
  490. +
  491. + /* Now allocate and setup the descriptor. */
  492. + d = kzalloc(sizeof(*d), GFP_NOWAIT);
  493. + if (!d)
  494. + return NULL;
  495. +
  496. + d->dir = direction;
  497. +
  498. + if (c->ch >= 8) /* we have a LITE channel */
  499. + max_size = MAX_LITE_TRANSFER;
  500. + else
  501. + max_size = MAX_NORMAL_TRANSFER;
  502. + period_len = min(period_len, max_size);
  503. +
  504. + d->frames = (buf_len-1) / period_len + 1;
  505. +
  506. + /* Allocate memory for control blocks */
  507. + d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb);
  508. + d->control_block_base = dma_zalloc_coherent(chan->device->dev,
  509. + d->control_block_size, &d->control_block_base_phys,
  510. + GFP_NOWAIT);
  511. +
  512. + if (!d->control_block_base) {
  513. + kfree(d);
  514. + return NULL;
  515. + }
  516. +
  517. + /*
  518. + * Iterate over all frames, create a control block
  519. + * for each frame and link them together.
  520. + */
  521. + for (frame = 0; frame < d->frames; frame++) {
  522. + struct bcm2835_dma_cb *control_block =
  523. + &d->control_block_base[frame];
  524. +
  525. + /* Setup adresses */
  526. + if (d->dir == DMA_DEV_TO_MEM) {
  527. + control_block->info = BCM2835_DMA_D_INC;
  528. + control_block->src = dev_addr;
  529. + control_block->dst = buf_addr + frame * period_len;
  530. + } else {
  531. + control_block->info = BCM2835_DMA_S_INC;
  532. + control_block->src = buf_addr + frame * period_len;
  533. + control_block->dst = dev_addr;
  534. + }
  535. +
  536. + /* Enable interrupt */
  537. + control_block->info |= BCM2835_DMA_INT_EN;
  538. +
  539. + /* Setup synchronization */
  540. + if (sync_type != 0)
  541. + control_block->info |= sync_type;
  542. +
  543. + /* Setup DREQ channel */
  544. + if (c->cfg.slave_id != 0)
  545. + control_block->info |=
  546. + BCM2835_DMA_PER_MAP(c->cfg.slave_id);
  547. +
  548. + /* Length of a frame */
  549. + if (frame != d->frames-1)
  550. + control_block->length = period_len;
  551. + else
  552. + control_block->length = buf_len - (d->frames - 1) * period_len;
  553. +
  554. + d->size += control_block->length;
  555. +
  556. + /*
  557. + * Next block is the next frame.
  558. + * This function is called on cyclic DMA transfers.
  559. + * Therefore, wrap around at number of frames.
  560. + */
  561. + control_block->next = d->control_block_base_phys +
  562. + sizeof(struct bcm2835_dma_cb)
  563. + * ((frame + 1) % d->frames);
  564. + }
  565. +
  566. + c->cyclic = true;
  567. +
  568. + return vchan_tx_prep(&c->vc, &d->vd, flags);
  569. +}
  570. +
  571. +
  572. +static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg(
  573. + struct dma_chan *chan, struct scatterlist *sgl,
  574. + unsigned int sg_len, enum dma_transfer_direction direction,
  575. + unsigned long flags, void *context)
  576. +{
  577. + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
  578. + enum dma_slave_buswidth dev_width;
  579. + struct bcm2835_desc *d;
  580. + dma_addr_t dev_addr;
  581. + struct scatterlist *sgent;
  582. + unsigned int es, sync_type;
  583. + unsigned int i, j, splitct, max_size;
  584. +
  585. + if (!is_slave_direction(direction)) {
  586. + dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
  587. + return NULL;
  588. + }
  589. +
  590. + if (direction == DMA_DEV_TO_MEM) {
  591. + dev_addr = c->cfg.src_addr;
  592. + dev_width = c->cfg.src_addr_width;
  593. + sync_type = BCM2835_DMA_S_DREQ;
  594. + } else {
  595. + dev_addr = c->cfg.dst_addr;
  596. + dev_width = c->cfg.dst_addr_width;
  597. + sync_type = BCM2835_DMA_D_DREQ;
  598. + }
  599. +
  600. + /* Bus width translates to the element size (ES) */
  601. + switch (dev_width) {
  602. + case DMA_SLAVE_BUSWIDTH_4_BYTES:
  603. + es = BCM2835_DMA_DATA_TYPE_S32;
  604. + break;
  605. + default:
  606. + return NULL;
  607. + }
  608. +
  609. + /* Now allocate and setup the descriptor. */
  610. + d = kzalloc(sizeof(*d), GFP_NOWAIT);
  611. + if (!d)
  612. + return NULL;
  613. +
  614. + d->dir = direction;
  615. +
  616. + if (c->ch >= 8) /* we have a LITE channel */
  617. + max_size = MAX_LITE_TRANSFER;
  618. + else
  619. + max_size = MAX_NORMAL_TRANSFER;
  620. +
  621. + /* We store the length of the SG list in d->frames
  622. + taking care to account for splitting up transfers
  623. + too large for a LITE channel */
  624. +
  625. + d->frames = 0;
  626. + for_each_sg(sgl, sgent, sg_len, i) {
  627. + uint32_t len = sg_dma_len(sgent);
  628. + d->frames += 1 + len / max_size;
  629. + }
  630. +
  631. + /* Allocate memory for control blocks */
  632. + d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb);
  633. + d->control_block_base = dma_zalloc_coherent(chan->device->dev,
  634. + d->control_block_size, &d->control_block_base_phys,
  635. + GFP_NOWAIT);
  636. +
  637. + if (!d->control_block_base) {
  638. + kfree(d);
  639. + return NULL;
  640. + }
  641. +
  642. + /*
  643. + * Iterate over all SG entries, create a control block
  644. + * for each frame and link them together.
  645. + */
  646. +
  647. + /* we count the number of times an SG entry had to be splitct
  648. + as a result of using a LITE channel */
  649. + splitct = 0;
  650. +
  651. + for_each_sg(sgl, sgent, sg_len, i) {
  652. + dma_addr_t addr = sg_dma_address(sgent);
  653. + uint32_t len = sg_dma_len(sgent);
  654. +
  655. + for (j = 0; j < len; j += max_size) {
  656. + struct bcm2835_dma_cb *control_block =
  657. + &d->control_block_base[i+splitct];
  658. +
  659. + /* Setup adresses */
  660. + if (d->dir == DMA_DEV_TO_MEM) {
  661. + control_block->info = BCM2835_DMA_D_INC |
  662. + BCM2835_DMA_D_WIDTH | BCM2835_DMA_S_DREQ;
  663. + control_block->src = dev_addr;
  664. + control_block->dst = addr + (dma_addr_t)j;
  665. + } else {
  666. + control_block->info = BCM2835_DMA_S_INC |
  667. + BCM2835_DMA_S_WIDTH | BCM2835_DMA_D_DREQ;
  668. + control_block->src = addr + (dma_addr_t)j;
  669. + control_block->dst = dev_addr;
  670. + }
  671. +
  672. + /* Common part */
  673. + control_block->info |= BCM2835_DMA_WAITS(SDHCI_BCM_DMA_WAITS);
  674. + control_block->info |= BCM2835_DMA_WAIT_RESP;
  675. +
  676. + /* Enable */
  677. + if (i == sg_len-1 && len-j <= max_size)
  678. + control_block->info |= BCM2835_DMA_INT_EN;
  679. +
  680. + /* Setup synchronization */
  681. + if (sync_type != 0)
  682. + control_block->info |= sync_type;
  683. +
  684. + /* Setup DREQ channel */
  685. + c->dreq = c->cfg.slave_id; /* DREQ loaded from config */
  686. +
  687. + if (c->dreq != 0)
  688. + control_block->info |=
  689. + BCM2835_DMA_PER_MAP(c->dreq);
  690. +
  691. + /* Length of a frame */
  692. + control_block->length = min(len-j, max_size);
  693. + d->size += control_block->length;
  694. +
  695. + /*
  696. + * Next block is the next frame.
  697. + */
  698. + if (i < sg_len-1 || len-j > max_size) {
  699. + /* next block is the next frame. */
  700. + control_block->next = d->control_block_base_phys +
  701. + sizeof(struct bcm2835_dma_cb) * (i + splitct + 1);
  702. + } else {
  703. + /* next block is empty. */
  704. + control_block->next = 0;
  705. + }
  706. +
  707. + if (len-j > max_size)
  708. + splitct++;
  709. + }
  710. + }
  711. +
  712. + c->cyclic = false;
  713. +
  714. + return vchan_tx_prep(&c->vc, &d->vd, flags);
  715. +}
  716. +
  717. +static int bcm2835_dma_slave_config(struct bcm2835_chan *c,
  718. + struct dma_slave_config *cfg)
  719. +{
  720. + if ((cfg->direction == DMA_DEV_TO_MEM &&
  721. + cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
  722. + (cfg->direction == DMA_MEM_TO_DEV &&
  723. + cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
  724. + !is_slave_direction(cfg->direction)) {
  725. + return -EINVAL;
  726. + }
  727. +
  728. + c->cfg = *cfg;
  729. +
  730. + return 0;
  731. +}
  732. +
  733. +static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
  734. +{
  735. + struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
  736. + unsigned long flags;
  737. + int timeout = 10000;
  738. + LIST_HEAD(head);
  739. +
  740. + spin_lock_irqsave(&c->vc.lock, flags);
  741. +
  742. + /* Prevent this channel being scheduled */
  743. + spin_lock(&d->lock);
  744. + list_del_init(&c->node);
  745. + spin_unlock(&d->lock);
  746. +
  747. + /*
  748. + * Stop DMA activity: we assume the callback will not be called
  749. + * after bcm_dma_abort() returns (even if it does, it will see
  750. + * c->desc is NULL and exit.)
  751. + */
  752. + if (c->desc) {
  753. + c->desc = NULL;
  754. + bcm2835_dma_abort(c->chan_base);
  755. +
  756. + /* Wait for stopping */
  757. + while (--timeout) {
  758. + if (!(readl(c->chan_base + BCM2835_DMA_CS) &
  759. + BCM2835_DMA_ACTIVE))
  760. + break;
  761. +
  762. + cpu_relax();
  763. + }
  764. +
  765. + if (!timeout)
  766. + dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
  767. + }
  768. +
  769. + vchan_get_all_descriptors(&c->vc, &head);
  770. + spin_unlock_irqrestore(&c->vc.lock, flags);
  771. + vchan_dma_desc_free_list(&c->vc, &head);
  772. +
  773. + return 0;
  774. +}
  775. +
  776. +static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  777. + unsigned long arg)
  778. +{
  779. + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
  780. +
  781. + switch (cmd) {
  782. + case DMA_SLAVE_CONFIG:
  783. + return bcm2835_dma_slave_config(c,
  784. + (struct dma_slave_config *)arg);
  785. +
  786. + case DMA_TERMINATE_ALL:
  787. + return bcm2835_dma_terminate_all(c);
  788. +
  789. + default:
  790. + return -ENXIO;
  791. + }
  792. +}
  793. +
  794. +#ifdef CONFIG_OF
  795. +static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
  796. +{
  797. + struct bcm2835_chan *c;
  798. +
  799. + c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL);
  800. + if (!c)
  801. + return -ENOMEM;
  802. +
  803. + c->vc.desc_free = bcm2835_dma_desc_free;
  804. + vchan_init(&c->vc, &d->ddev);
  805. + INIT_LIST_HEAD(&c->node);
  806. +
  807. + d->ddev.chancnt++;
  808. +
  809. + c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
  810. + c->ch = chan_id;
  811. + c->irq_number = irq;
  812. +
  813. + return 0;
  814. +}
  815. +#endif
  816. +
  817. +static int bcm2708_dma_chan_init(struct bcm2835_dmadev *d,
  818. + void __iomem *chan_base, int chan_id, int irq)
  819. +{
  820. + struct bcm2835_chan *c;
  821. +
  822. + c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL);
  823. + if (!c)
  824. + return -ENOMEM;
  825. +
  826. + c->vc.desc_free = bcm2835_dma_desc_free;
  827. + vchan_init(&c->vc, &d->ddev);
  828. + INIT_LIST_HEAD(&c->node);
  829. +
  830. + d->ddev.chancnt++;
  831. +
  832. + c->chan_base = chan_base;
  833. + c->ch = chan_id;
  834. + c->irq_number = irq;
  835. +
  836. + return 0;
  837. +}
  838. +
  839. +
  840. +static void bcm2835_dma_free(struct bcm2835_dmadev *od)
  841. +{
  842. + struct bcm2835_chan *c, *next;
  843. +
  844. + list_for_each_entry_safe(c, next, &od->ddev.channels,
  845. + vc.chan.device_node) {
  846. + list_del(&c->vc.chan.device_node);
  847. + tasklet_kill(&c->vc.task);
  848. + }
  849. +}
  850. +
  851. +static const struct of_device_id bcm2835_dma_of_match[] = {
  852. + { .compatible = "brcm,bcm2835-dma", },
  853. + {},
  854. +};
  855. +MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match);
  856. +
  857. +#ifdef CONFIG_OF
  858. +static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
  859. + struct of_dma *ofdma)
  860. +{
  861. + struct bcm2835_dmadev *d = ofdma->of_dma_data;
  862. + struct dma_chan *chan;
  863. +
  864. + chan = dma_get_any_slave_channel(&d->ddev);
  865. + if (!chan)
  866. + return NULL;
  867. +
  868. + /* Set DREQ from param */
  869. + to_bcm2835_dma_chan(chan)->dreq = spec->args[0];
  870. +
  871. + return chan;
  872. +}
  873. +#endif
  874. +
  875. +static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan,
  876. + struct dma_slave_caps *caps)
  877. +{
  878. + caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  879. + caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  880. + caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  881. + caps->cmd_pause = false;
  882. + caps->cmd_terminate = true;
  883. +
  884. + return 0;
  885. +}
  886. +
  887. +static int bcm2835_dma_probe(struct platform_device *pdev)
  888. +{
  889. + struct bcm2835_dmadev *od;
  890. +#ifdef CONFIG_OF
  891. + struct resource *res;
  892. + void __iomem *base;
  893. + uint32_t chans_available;
  894. +#endif
  895. + int rc;
  896. + int i;
  897. + int irq;
  898. +
  899. +
  900. + if (!pdev->dev.dma_mask)
  901. + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
  902. +
  903. + /* If CONFIG_OF is selected, device tree is used */
  904. + /* hence the difference between probing */
  905. +
  906. +#ifndef CONFIG_OF
  907. +
  908. + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
  909. + if (rc)
  910. + return rc;
  911. + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
  912. +
  913. +
  914. + od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
  915. + if (!od)
  916. + return -ENOMEM;
  917. +
  918. + pdev->dev.dma_parms = &od->dma_parms;
  919. + dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
  920. +
  921. +
  922. + dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
  923. + dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
  924. + dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
  925. + od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
  926. + od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
  927. + od->ddev.device_tx_status = bcm2835_dma_tx_status;
  928. + od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
  929. + od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps;
  930. + od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
  931. + od->ddev.device_prep_slave_sg = bcm2835_dma_prep_slave_sg;
  932. + od->ddev.device_control = bcm2835_dma_control;
  933. + od->ddev.dev = &pdev->dev;
  934. + INIT_LIST_HEAD(&od->ddev.channels);
  935. + spin_lock_init(&od->lock);
  936. +
  937. + platform_set_drvdata(pdev, od);
  938. +
  939. + for (i = 0; i < 5; i++) {
  940. + void __iomem *chan_base;
  941. + int chan_id;
  942. +
  943. + chan_id = bcm_dma_chan_alloc(BCM_DMA_FEATURE_LITE,
  944. + &chan_base,
  945. + &irq);
  946. +
  947. + if (chan_id < 0)
  948. + break;
  949. +
  950. + rc = bcm2708_dma_chan_init(od, chan_base, chan_id, irq);
  951. + if (rc)
  952. + goto err_no_dma;
  953. + }
  954. +#else
  955. + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  956. + if (rc)
  957. + return rc;
  958. +
  959. +
  960. + od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
  961. + if (!od)
  962. + return -ENOMEM;
  963. +
  964. + pdev->dev.dma_parms = &od->dma_parms;
  965. + dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
  966. +
  967. +
  968. + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  969. + base = devm_ioremap_resource(&pdev->dev, res);
  970. + if (IS_ERR(base))
  971. + return PTR_ERR(base);
  972. +
  973. + od->base = base;
  974. +
  975. +
  976. + dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
  977. + dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
  978. + dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
  979. + od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
  980. + od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
  981. + od->ddev.device_tx_status = bcm2835_dma_tx_status;
  982. + od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
  983. + od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps;
  984. + od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
  985. + od->ddev.device_prep_slave_sg = bcm2835_dma_prep_slave_sg;
  986. + od->ddev.device_control = bcm2835_dma_control;
  987. + od->ddev.dev = &pdev->dev;
  988. + INIT_LIST_HEAD(&od->ddev.channels);
  989. + spin_lock_init(&od->lock);
  990. +
  991. + platform_set_drvdata(pdev, od);
  992. +
  993. +
  994. + /* Request DMA channel mask from device tree */
  995. + if (of_property_read_u32(pdev->dev.of_node,
  996. + "brcm,dma-channel-mask",
  997. + &chans_available)) {
  998. + dev_err(&pdev->dev, "Failed to get channel mask\n");
  999. + rc = -EINVAL;
  1000. + goto err_no_dma;
  1001. + }
  1002. +
  1003. +
  1004. + /*
  1005. + * Do not use the FIQ and BULK channels,
  1006. + * because they are used by the GPU.
  1007. + */
  1008. + chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK);
  1009. +
  1010. +
  1011. + for (i = 0; i < pdev->num_resources; i++) {
  1012. + irq = platform_get_irq(pdev, i);
  1013. + if (irq < 0)
  1014. + break;
  1015. +
  1016. + if (chans_available & (1 << i)) {
  1017. + rc = bcm2835_dma_chan_init(od, i, irq);
  1018. + if (rc)
  1019. + goto err_no_dma;
  1020. + }
  1021. + }
  1022. +
  1023. + dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i);
  1024. +
  1025. + /* Device-tree DMA controller registration */
  1026. + rc = of_dma_controller_register(pdev->dev.of_node,
  1027. + bcm2835_dma_xlate, od);
  1028. + if (rc) {
  1029. + dev_err(&pdev->dev, "Failed to register DMA controller\n");
  1030. + goto err_no_dma;
  1031. + }
  1032. +#endif
  1033. +
  1034. + rc = dma_async_device_register(&od->ddev);
  1035. + if (rc) {
  1036. + dev_err(&pdev->dev,
  1037. + "Failed to register slave DMA engine device: %d\n", rc);
  1038. + goto err_no_dma;
  1039. + }
  1040. +
  1041. + dev_info(&pdev->dev, "Load BCM2835 DMA engine driver\n");
  1042. +
  1043. + return 0;
  1044. +
  1045. +err_no_dma:
  1046. + bcm2835_dma_free(od);
  1047. + return rc;
  1048. +}
  1049. +
  1050. +static int bcm2835_dma_remove(struct platform_device *pdev)
  1051. +{
  1052. + struct bcm2835_dmadev *od = platform_get_drvdata(pdev);
  1053. +
  1054. + dma_async_device_unregister(&od->ddev);
  1055. + bcm2835_dma_free(od);
  1056. +
  1057. + return 0;
  1058. +}
  1059. +
  1060. +#ifndef CONFIG_OF
  1061. +
  1062. +
  1063. +static struct platform_driver bcm2835_dma_driver = {
  1064. + .probe = bcm2835_dma_probe,
  1065. + .remove = bcm2835_dma_remove,
  1066. + .driver = {
  1067. + .name = "bcm2708-dmaengine",
  1068. + .owner = THIS_MODULE,
  1069. + },
  1070. +};
  1071. +
  1072. +static struct platform_device *pdev;
  1073. +
  1074. +static const struct platform_device_info bcm2835_dma_dev_info = {
  1075. + .name = "bcm2708-dmaengine",
  1076. + .id = -1,
  1077. +};
  1078. +
  1079. +static int bcm2835_dma_init(void)
  1080. +{
  1081. + int rc = platform_driver_register(&bcm2835_dma_driver);
  1082. +
  1083. + if (rc == 0) {
  1084. + pdev = platform_device_register_full(&bcm2835_dma_dev_info);
  1085. + if (IS_ERR(pdev)) {
  1086. + platform_driver_unregister(&bcm2835_dma_driver);
  1087. + rc = PTR_ERR(pdev);
  1088. + }
  1089. + }
  1090. +
  1091. + return rc;
  1092. +}
  1093. +module_init(bcm2835_dma_init); /* preferable to subsys_initcall */
  1094. +
  1095. +static void __exit bcm2835_dma_exit(void)
  1096. +{
  1097. + platform_device_unregister(pdev);
  1098. + platform_driver_unregister(&bcm2835_dma_driver);
  1099. +}
  1100. +module_exit(bcm2835_dma_exit);
  1101. +
  1102. +#else
  1103. +
  1104. +static struct platform_driver bcm2835_dma_driver = {
  1105. + .probe = bcm2835_dma_probe,
  1106. + .remove = bcm2835_dma_remove,
  1107. + .driver = {
  1108. + .name = "bcm2835-dma",
  1109. + .owner = THIS_MODULE,
  1110. + .of_match_table = of_match_ptr(bcm2835_dma_of_match),
  1111. + },
  1112. +};
  1113. +
  1114. +module_platform_driver(bcm2835_dma_driver);
  1115. +
  1116. +#endif
  1117. +
  1118. +MODULE_ALIAS("platform:bcm2835-dma");
  1119. +MODULE_DESCRIPTION("BCM2835 DMA engine driver");
  1120. +MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
  1121. +MODULE_AUTHOR("Gellert Weisz <gellert@raspberrypi.org>");
  1122. +MODULE_LICENSE("GPL v2");