011-dmaengine-core-Introduce-new-universal-API-to-reques.patch 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. From a8135d0d79e9d0ad3a4ff494fceeaae838becf38 Mon Sep 17 00:00:00 2001
  2. From: Peter Ujfalusi <peter.ujfalusi@ti.com>
  3. Date: Mon, 14 Dec 2015 22:47:40 +0200
  4. Subject: [PATCH 2/3] dmaengine: core: Introduce new, universal API to request
  5. a channel
  6. The two API function can cover most, if not all current APIs used to
  7. request a channel. With minimal effort dmaengine drivers, platforms and
  8. dmaengine user drivers can be converted to use the two function.
  9. struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
  10. To request any channel matching with the requested capabilities, can be
  11. used to request channel for memcpy, memset, xor, etc where no hardware
  12. synchronization is needed.
  13. struct dma_chan *dma_request_chan(struct device *dev, const char *name);
  14. To request a slave channel. The dma_request_chan() will try to find the
  15. channel via DT, ACPI or in case if the kernel booted in non DT/ACPI mode
  16. it will use a filter lookup table and retrieves the needed information from
  17. the dma_slave_map provided by the DMA drivers.
  18. This legacy mode needs changes in platform code, in dmaengine drivers and
  19. finally the dmaengine user drivers can be converted:
  20. For each dmaengine driver an array of DMA device, slave and the parameter
  21. for the filter function needs to be added:
  22. static const struct dma_slave_map da830_edma_map[] = {
  23. { "davinci-mcasp.0", "rx", EDMA_FILTER_PARAM(0, 0) },
  24. { "davinci-mcasp.0", "tx", EDMA_FILTER_PARAM(0, 1) },
  25. { "davinci-mcasp.1", "rx", EDMA_FILTER_PARAM(0, 2) },
  26. { "davinci-mcasp.1", "tx", EDMA_FILTER_PARAM(0, 3) },
  27. { "davinci-mcasp.2", "rx", EDMA_FILTER_PARAM(0, 4) },
  28. { "davinci-mcasp.2", "tx", EDMA_FILTER_PARAM(0, 5) },
  29. { "spi_davinci.0", "rx", EDMA_FILTER_PARAM(0, 14) },
  30. { "spi_davinci.0", "tx", EDMA_FILTER_PARAM(0, 15) },
  31. { "da830-mmc.0", "rx", EDMA_FILTER_PARAM(0, 16) },
  32. { "da830-mmc.0", "tx", EDMA_FILTER_PARAM(0, 17) },
  33. { "spi_davinci.1", "rx", EDMA_FILTER_PARAM(0, 18) },
  34. { "spi_davinci.1", "tx", EDMA_FILTER_PARAM(0, 19) },
  35. };
  36. This information is going to be needed by the dmaengine driver, so
  37. modification to the platform_data is needed, and the driver map should be
  38. added to the pdata of the DMA driver:
  39. da8xx_edma0_pdata.slave_map = da830_edma_map;
  40. da8xx_edma0_pdata.slavecnt = ARRAY_SIZE(da830_edma_map);
  41. The DMA driver then needs to configure the needed device -> filter_fn
  42. mapping before it registers with dma_async_device_register() :
  43. ecc->dma_slave.filter_map.map = info->slave_map;
  44. ecc->dma_slave.filter_map.mapcnt = info->slavecnt;
  45. ecc->dma_slave.filter_map.fn = edma_filter_fn;
  46. When neither DT or ACPI lookup is available the dma_request_chan() will
  47. try to match the requester's device name with the filter_map's list of
  48. device names, when a match found it will use the information from the
  49. dma_slave_map to get the channel with the dma_get_channel() internal
  50. function.
  51. Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
  52. Reviewed-by: Arnd Bergmann <arnd@arndb.de>
  53. Signed-off-by: Vinod Koul <vinod.koul@intel.com>
  54. ---
  55. Documentation/dmaengine/client.txt | 23 +++-------
  56. drivers/dma/dmaengine.c | 89 +++++++++++++++++++++++++++++++++-----
  57. include/linux/dmaengine.h | 51 +++++++++++++++++++---
  58. 3 files changed, 127 insertions(+), 36 deletions(-)
  59. --- a/Documentation/dmaengine/client.txt
  60. +++ b/Documentation/dmaengine/client.txt
  61. @@ -22,25 +22,14 @@ The slave DMA usage consists of followin
  62. Channel allocation is slightly different in the slave DMA context,
  63. client drivers typically need a channel from a particular DMA
  64. controller only and even in some cases a specific channel is desired.
  65. - To request a channel dma_request_channel() API is used.
  66. + To request a channel dma_request_chan() API is used.
  67. Interface:
  68. - struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
  69. - dma_filter_fn filter_fn,
  70. - void *filter_param);
  71. - where dma_filter_fn is defined as:
  72. - typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
  73. + struct dma_chan *dma_request_chan(struct device *dev, const char *name);
  74. - The 'filter_fn' parameter is optional, but highly recommended for
  75. - slave and cyclic channels as they typically need to obtain a specific
  76. - DMA channel.
  77. -
  78. - When the optional 'filter_fn' parameter is NULL, dma_request_channel()
  79. - simply returns the first channel that satisfies the capability mask.
  80. -
  81. - Otherwise, the 'filter_fn' routine will be called once for each free
  82. - channel which has a capability in 'mask'. 'filter_fn' is expected to
  83. - return 'true' when the desired DMA channel is found.
  84. + Which will find and return the 'name' DMA channel associated with the 'dev'
  85. + device. The association is done via DT, ACPI or board file based
  86. + dma_slave_map matching table.
  87. A channel allocated via this interface is exclusive to the caller,
  88. until dma_release_channel() is called.
  89. --- a/drivers/dma/dmaengine.c
  90. +++ b/drivers/dma/dmaengine.c
  91. @@ -43,6 +43,7 @@
  92. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  93. +#include <linux/platform_device.h>
  94. #include <linux/dma-mapping.h>
  95. #include <linux/init.h>
  96. #include <linux/module.h>
  97. @@ -665,27 +666,73 @@ struct dma_chan *__dma_request_channel(c
  98. }
  99. EXPORT_SYMBOL_GPL(__dma_request_channel);
  100. +static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
  101. + const char *name,
  102. + struct device *dev)
  103. +{
  104. + int i;
  105. +
  106. + if (!device->filter.mapcnt)
  107. + return NULL;
  108. +
  109. + for (i = 0; i < device->filter.mapcnt; i++) {
  110. + const struct dma_slave_map *map = &device->filter.map[i];
  111. +
  112. + if (!strcmp(map->devname, dev_name(dev)) &&
  113. + !strcmp(map->slave, name))
  114. + return map;
  115. + }
  116. +
  117. + return NULL;
  118. +}
  119. +
  120. /**
  121. - * dma_request_slave_channel_reason - try to allocate an exclusive slave channel
  122. + * dma_request_chan - try to allocate an exclusive slave channel
  123. * @dev: pointer to client device structure
  124. * @name: slave channel name
  125. *
  126. * Returns pointer to appropriate DMA channel on success or an error pointer.
  127. */
  128. -struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
  129. - const char *name)
  130. +struct dma_chan *dma_request_chan(struct device *dev, const char *name)
  131. {
  132. + struct dma_device *d, *_d;
  133. + struct dma_chan *chan = NULL;
  134. +
  135. /* If device-tree is present get slave info from here */
  136. if (dev->of_node)
  137. - return of_dma_request_slave_channel(dev->of_node, name);
  138. + chan = of_dma_request_slave_channel(dev->of_node, name);
  139. /* If device was enumerated by ACPI get slave info from here */
  140. - if (ACPI_HANDLE(dev))
  141. - return acpi_dma_request_slave_chan_by_name(dev, name);
  142. + if (has_acpi_companion(dev) && !chan)
  143. + chan = acpi_dma_request_slave_chan_by_name(dev, name);
  144. +
  145. + if (chan) {
  146. + /* Valid channel found or requester need to be deferred */
  147. + if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
  148. + return chan;
  149. + }
  150. +
  151. + /* Try to find the channel via the DMA filter map(s) */
  152. + mutex_lock(&dma_list_mutex);
  153. + list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
  154. + dma_cap_mask_t mask;
  155. + const struct dma_slave_map *map = dma_filter_match(d, name, dev);
  156. +
  157. + if (!map)
  158. + continue;
  159. +
  160. + dma_cap_zero(mask);
  161. + dma_cap_set(DMA_SLAVE, mask);
  162. - return ERR_PTR(-ENODEV);
  163. + chan = find_candidate(d, &mask, d->filter.fn, map->param);
  164. + if (!IS_ERR(chan))
  165. + break;
  166. + }
  167. + mutex_unlock(&dma_list_mutex);
  168. +
  169. + return chan ? chan : ERR_PTR(-EPROBE_DEFER);
  170. }
  171. -EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
  172. +EXPORT_SYMBOL_GPL(dma_request_chan);
  173. /**
  174. * dma_request_slave_channel - try to allocate an exclusive slave channel
  175. @@ -697,17 +744,35 @@ EXPORT_SYMBOL_GPL(dma_request_slave_chan
  176. struct dma_chan *dma_request_slave_channel(struct device *dev,
  177. const char *name)
  178. {
  179. - struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
  180. + struct dma_chan *ch = dma_request_chan(dev, name);
  181. if (IS_ERR(ch))
  182. return NULL;
  183. - dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
  184. - ch->device->privatecnt++;
  185. -
  186. return ch;
  187. }
  188. EXPORT_SYMBOL_GPL(dma_request_slave_channel);
  189. +/**
  190. + * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
  191. + * @mask: capabilities that the channel must satisfy
  192. + *
  193. + * Returns pointer to appropriate DMA channel on success or an error pointer.
  194. + */
  195. +struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
  196. +{
  197. + struct dma_chan *chan;
  198. +
  199. + if (!mask)
  200. + return ERR_PTR(-ENODEV);
  201. +
  202. + chan = __dma_request_channel(mask, NULL, NULL);
  203. + if (!chan)
  204. + chan = ERR_PTR(-ENODEV);
  205. +
  206. + return chan;
  207. +}
  208. +EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
  209. +
  210. void dma_release_channel(struct dma_chan *chan)
  211. {
  212. mutex_lock(&dma_list_mutex);
  213. --- a/include/linux/dmaengine.h
  214. +++ b/include/linux/dmaengine.h
  215. @@ -607,11 +607,38 @@ enum dmaengine_alignment {
  216. };
  217. /**
  218. + * struct dma_slave_map - associates slave device and it's slave channel with
  219. + * parameter to be used by a filter function
  220. + * @devname: name of the device
  221. + * @slave: slave channel name
  222. + * @param: opaque parameter to pass to struct dma_filter.fn
  223. + */
  224. +struct dma_slave_map {
  225. + const char *devname;
  226. + const char *slave;
  227. + void *param;
  228. +};
  229. +
  230. +/**
  231. + * struct dma_filter - information for slave device/channel to filter_fn/param
  232. + * mapping
  233. + * @fn: filter function callback
  234. + * @mapcnt: number of slave device/channel in the map
  235. + * @map: array of channel to filter mapping data
  236. + */
  237. +struct dma_filter {
  238. + dma_filter_fn fn;
  239. + int mapcnt;
  240. + const struct dma_slave_map *map;
  241. +};
  242. +
  243. +/**
  244. * struct dma_device - info on the entity supplying DMA services
  245. * @chancnt: how many DMA channels are supported
  246. * @privatecnt: how many DMA channels are requested by dma_request_channel
  247. * @channels: the list of struct dma_chan
  248. * @global_node: list_head for global dma_device_list
  249. + * @filter: information for device/slave to filter function/param mapping
  250. * @cap_mask: one or more dma_capability flags
  251. * @max_xor: maximum number of xor sources, 0 if no capability
  252. * @max_pq: maximum number of PQ sources and PQ-continue capability
  253. @@ -666,6 +693,7 @@ struct dma_device {
  254. unsigned int privatecnt;
  255. struct list_head channels;
  256. struct list_head global_node;
  257. + struct dma_filter filter;
  258. dma_cap_mask_t cap_mask;
  259. unsigned short max_xor;
  260. unsigned short max_pq;
  261. @@ -1158,9 +1186,11 @@ enum dma_status dma_wait_for_async_tx(st
  262. void dma_issue_pending_all(void);
  263. struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
  264. dma_filter_fn fn, void *fn_param);
  265. -struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
  266. - const char *name);
  267. struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
  268. +
  269. +struct dma_chan *dma_request_chan(struct device *dev, const char *name);
  270. +struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
  271. +
  272. void dma_release_channel(struct dma_chan *chan);
  273. int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
  274. #else
  275. @@ -1184,16 +1214,21 @@ static inline struct dma_chan *__dma_req
  276. {
  277. return NULL;
  278. }
  279. -static inline struct dma_chan *dma_request_slave_channel_reason(
  280. - struct device *dev, const char *name)
  281. -{
  282. - return ERR_PTR(-ENODEV);
  283. -}
  284. static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
  285. const char *name)
  286. {
  287. return NULL;
  288. }
  289. +static inline struct dma_chan *dma_request_chan(struct device *dev,
  290. + const char *name)
  291. +{
  292. + return ERR_PTR(-ENODEV);
  293. +}
  294. +static inline struct dma_chan *dma_request_chan_by_mask(
  295. + const dma_cap_mask_t *mask)
  296. +{
  297. + return ERR_PTR(-ENODEV);
  298. +}
  299. static inline void dma_release_channel(struct dma_chan *chan)
  300. {
  301. }
  302. @@ -1204,6 +1239,8 @@ static inline int dma_get_slave_caps(str
  303. }
  304. #endif
  305. +#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
  306. +
  307. static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
  308. {
  309. struct dma_slave_caps caps;