020-sata-dwc.patch 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083
  1. From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
  2. Date: Sat, 21 May 2016 22:46:32 +0200
  3. Subject: [PATCH v2 00/23] ata: sata_dwc_460ex: make it working again
  4. The last approach in the commit 8b3444852a2b ("sata_dwc_460ex: move to generic
  5. DMA driver") to switch to generic DMA engine API wasn't tested on bare metal.
  6. Besides that we expecting new board support coming with the same SATA IP but
  7. with different DMA.
  8. This series is targetting the following things:
  9. - a few bug fixes to the original driver
  10. - a part to fix the DMA engine usage and in particularly dw_dmac driver
  11. - move driver to use generic PHY and "dmas" property which leads to update in DTS
  12. The driver has been tested myself on Sam460ex and WD MyBookLive (apollo3g)
  13. boards. In any case I ask Christian, Måns, and Julian to independently test and
  14. provide Tested-by tag or an error report.
  15. Series depends on previously published but not yet fully applied series [1].
  16. The patches are also available via public branch [2].
  17. [1] http://www.spinics.net/lists/dmaengine/msg09250.html
  18. [2] https://bitbucket.org/andy-shev/linux/branch/topic%2Fdw%2Fsata
  19. Since v1:
  20. - simplify patch 8 (David Laight)
  21. - add Tested-by and Acked-by tags
  22. Andy Shevchenko (11):
  23. ata: sata_dwc_460ex: set dma_boundary to 0x1fff
  24. ata: sata_dwc_460ex: burst size must be in items not bytes
  25. ata: sata_dwc_460ex: DMA is always a flow controller
  26. ata: sata_dwc_460ex: select only core part of DMA driver
  27. ata: sata_dwc_460ex: don't call ata_sff_qc_issue() on DMA commands
  28. ata: sata_dwc_460ex: correct HOSTDEV{P}_FROM_*() macros
  29. ata: sata_dwc_460ex: supply physical address of FIFO to DMA
  30. ata: sata_dwc_460ex: switch to new dmaengine_terminate_* API
  31. ata: sata_dwc_460ex: use devm_ioremap
  32. ata: sata_dwc_460ex: make debug messages neat
  33. powerpc/4xx: Device tree update for the 460ex DWC SATA
  34. Christian Lamparter (1):
  35. ata: sata_dwc_460ex: fix crash on offline links without an attached
  36. drive
  37. Mans Rullgard (11):
  38. ata: sata_dwc_460ex: remove incorrect locking
  39. ata: sata_dwc_460ex: skip dma setup for non-dma commands
  40. ata: sata_dwc_460ex: use "dmas" DT property to find dma channel
  41. ata: sata_dwc_460ex: add phy support
  42. ata: sata_dwc_460ex: get rid of global data
  43. ata: sata_dwc_460ex: remove empty libata callback
  44. ata: sata_dwc_460ex: get rid of some pointless casts
  45. ata: sata_dwc_460ex: get rid of incorrect cast
  46. ata: sata_dwc_460ex: add __iomem to register base pointer
  47. ata: sata_dwc_460ex: use readl/writel_relaxed()
  48. ata: sata_dwc_460ex: tidy up sata_dwc_clear_dmacr()
  49. arch/powerpc/boot/dts/canyonlands.dts | 15 +-
  50. drivers/ata/Kconfig | 12 +-
  51. drivers/ata/sata_dwc_460ex.c | 552 +++++++++++++++++-----------------
  52. 3 files changed, 305 insertions(+), 274 deletions(-)
  53. ---
  54. drivers/ata/sata_dwc_460ex.c | 552 ++++++++++++++++++++++---------------------
  55. 1 file changed, 283 insertions(+), 269 deletions(-)
  56. --- a/drivers/ata/sata_dwc_460ex.c
  57. +++ b/drivers/ata/sata_dwc_460ex.c
  58. @@ -30,10 +30,12 @@
  59. #include <linux/kernel.h>
  60. #include <linux/module.h>
  61. #include <linux/device.h>
  62. +#include <linux/dmaengine.h>
  63. #include <linux/of_address.h>
  64. #include <linux/of_irq.h>
  65. #include <linux/of_platform.h>
  66. #include <linux/platform_device.h>
  67. +#include <linux/phy/phy.h>
  68. #include <linux/libata.h>
  69. #include <linux/slab.h>
  70. @@ -42,10 +44,6 @@
  71. #include <scsi/scsi_host.h>
  72. #include <scsi/scsi_cmnd.h>
  73. -/* Supported DMA engine drivers */
  74. -#include <linux/platform_data/dma-dw.h>
  75. -#include <linux/dma/dw.h>
  76. -
  77. /* These two are defined in "libata.h" */
  78. #undef DRV_NAME
  79. #undef DRV_VERSION
  80. @@ -53,19 +51,14 @@
  81. #define DRV_NAME "sata-dwc"
  82. #define DRV_VERSION "1.3"
  83. -#ifndef out_le32
  84. -#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (void __iomem *)(a))
  85. -#endif
  86. -
  87. -#ifndef in_le32
  88. -#define in_le32(a) __le32_to_cpu(__raw_readl((void __iomem *)(a)))
  89. -#endif
  90. +#define sata_dwc_writel(a, v) writel_relaxed(v, a)
  91. +#define sata_dwc_readl(a) readl_relaxed(a)
  92. #ifndef NO_IRQ
  93. #define NO_IRQ 0
  94. #endif
  95. -#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/
  96. +#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
  97. enum {
  98. SATA_DWC_MAX_PORTS = 1,
  99. @@ -102,7 +95,7 @@ struct sata_dwc_regs {
  100. u32 versionr; /* Version Register */
  101. u32 idr; /* ID Register */
  102. u32 unimpl[192]; /* Unimplemented */
  103. - u32 dmadr[256]; /* FIFO Locations in DMA Mode */
  104. + u32 dmadr[256]; /* FIFO Locations in DMA Mode */
  105. };
  106. enum {
  107. @@ -146,9 +139,14 @@ struct sata_dwc_device {
  108. struct device *dev; /* generic device struct */
  109. struct ata_probe_ent *pe; /* ptr to probe-ent */
  110. struct ata_host *host;
  111. - u8 __iomem *reg_base;
  112. - struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */
  113. + struct sata_dwc_regs __iomem *sata_dwc_regs; /* DW SATA specific */
  114. + u32 sactive_issued;
  115. + u32 sactive_queued;
  116. + struct phy *phy;
  117. + phys_addr_t dmadr;
  118. +#ifdef CONFIG_SATA_DWC_OLD_DMA
  119. struct dw_dma_chip *dma;
  120. +#endif
  121. };
  122. #define SATA_DWC_QCMD_MAX 32
  123. @@ -159,25 +157,19 @@ struct sata_dwc_device_port {
  124. int dma_pending[SATA_DWC_QCMD_MAX];
  125. /* DMA info */
  126. - struct dw_dma_slave *dws;
  127. struct dma_chan *chan;
  128. struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX];
  129. u32 dma_interrupt_count;
  130. };
  131. /*
  132. - * Commonly used DWC SATA driver Macros
  133. + * Commonly used DWC SATA driver macros
  134. */
  135. -#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)\
  136. - (host)->private_data)
  137. -#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)\
  138. - (ap)->host->private_data)
  139. -#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)\
  140. - (ap)->private_data)
  141. -#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)\
  142. - (qc)->ap->host->private_data)
  143. -#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)\
  144. - (hsdevp)->hsdev)
  145. +#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)(host)->private_data)
  146. +#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)(ap)->host->private_data)
  147. +#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)(ap)->private_data)
  148. +#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)(qc)->ap->host->private_data)
  149. +#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)(p)->hsdev)
  150. enum {
  151. SATA_DWC_CMD_ISSUED_NOT = 0,
  152. @@ -190,21 +182,6 @@ enum {
  153. SATA_DWC_DMA_PENDING_RX = 2,
  154. };
  155. -struct sata_dwc_host_priv {
  156. - void __iomem *scr_addr_sstatus;
  157. - u32 sata_dwc_sactive_issued ;
  158. - u32 sata_dwc_sactive_queued ;
  159. -};
  160. -
  161. -static struct sata_dwc_host_priv host_pvt;
  162. -
  163. -static struct dw_dma_slave sata_dwc_dma_dws = {
  164. - .src_id = 0,
  165. - .dst_id = 0,
  166. - .src_master = 0,
  167. - .dst_master = 1,
  168. -};
  169. -
  170. /*
  171. * Prototypes
  172. */
  173. @@ -215,6 +192,93 @@ static void sata_dwc_dma_xfer_complete(s
  174. static void sata_dwc_port_stop(struct ata_port *ap);
  175. static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
  176. +#ifdef CONFIG_SATA_DWC_OLD_DMA
  177. +
  178. +#include <linux/platform_data/dma-dw.h>
  179. +#include <linux/dma/dw.h>
  180. +
  181. +static struct dw_dma_slave sata_dwc_dma_dws = {
  182. + .src_id = 0,
  183. + .dst_id = 0,
  184. + .m_master = 1,
  185. + .p_master = 0,
  186. +};
  187. +
  188. +static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
  189. +{
  190. + struct dw_dma_slave *dws = &sata_dwc_dma_dws;
  191. +
  192. + if (dws->dma_dev != chan->device->dev)
  193. + return false;
  194. +
  195. + chan->private = dws;
  196. + return true;
  197. +}
  198. +
  199. +static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
  200. +{
  201. + struct sata_dwc_device *hsdev = hsdevp->hsdev;
  202. + struct dw_dma_slave *dws = &sata_dwc_dma_dws;
  203. + dma_cap_mask_t mask;
  204. +
  205. + dws->dma_dev = hsdev->dev;
  206. +
  207. + dma_cap_zero(mask);
  208. + dma_cap_set(DMA_SLAVE, mask);
  209. +
  210. + /* Acquire DMA channel */
  211. + hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
  212. + if (!hsdevp->chan) {
  213. + dev_err(hsdev->dev, "%s: dma channel unavailable\n",
  214. + __func__);
  215. + return -EAGAIN;
  216. + }
  217. +
  218. + return 0;
  219. +}
  220. +
  221. +static int sata_dwc_dma_init_old(struct platform_device *pdev,
  222. + struct sata_dwc_device *hsdev)
  223. +{
  224. + struct device_node *np = pdev->dev.of_node;
  225. + struct resource *res;
  226. +
  227. + hsdev->dma = devm_kzalloc(&pdev->dev, sizeof(*hsdev->dma), GFP_KERNEL);
  228. + if (!hsdev->dma)
  229. + return -ENOMEM;
  230. +
  231. + hsdev->dma->dev = &pdev->dev;
  232. +
  233. + /* Get SATA DMA interrupt number */
  234. + hsdev->dma->irq = irq_of_parse_and_map(np, 1);
  235. + if (hsdev->dma->irq == NO_IRQ) {
  236. + dev_err(&pdev->dev, "no SATA DMA irq\n");
  237. + return -ENODEV;
  238. + }
  239. +
  240. + /* Get physical SATA DMA register base address */
  241. + res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  242. + hsdev->dma->regs = devm_ioremap_resource(&pdev->dev, res);
  243. + if (IS_ERR(hsdev->dma->regs)) {
  244. + dev_err(&pdev->dev,
  245. + "ioremap failed for AHBDMA register address\n");
  246. + return PTR_ERR(hsdev->dma->regs);
  247. + }
  248. +
  249. + /* Initialize AHB DMAC */
  250. + return dw_dma_probe(hsdev->dma);
  251. +}
  252. +
  253. +static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev)
  254. +{
  255. + if (!hsdev->dma)
  256. + return;
  257. +
  258. + dw_dma_remove(hsdev->dma);
  259. +}
  260. +
  261. +#endif
  262. +
  263. static const char *get_prot_descript(u8 protocol)
  264. {
  265. switch ((enum ata_tf_protocols)protocol) {
  266. @@ -305,21 +369,20 @@ static struct dma_async_tx_descriptor *d
  267. struct ata_port *ap = qc->ap;
  268. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  269. struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
  270. - dma_addr_t addr = (dma_addr_t)&hsdev->sata_dwc_regs->dmadr;
  271. struct dma_slave_config sconf;
  272. struct dma_async_tx_descriptor *desc;
  273. if (qc->dma_dir == DMA_DEV_TO_MEM) {
  274. - sconf.src_addr = addr;
  275. - sconf.device_fc = true;
  276. + sconf.src_addr = hsdev->dmadr;
  277. + sconf.device_fc = false;
  278. } else { /* DMA_MEM_TO_DEV */
  279. - sconf.dst_addr = addr;
  280. + sconf.dst_addr = hsdev->dmadr;
  281. sconf.device_fc = false;
  282. }
  283. sconf.direction = qc->dma_dir;
  284. - sconf.src_maxburst = AHB_DMA_BRST_DFLT;
  285. - sconf.dst_maxburst = AHB_DMA_BRST_DFLT;
  286. + sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */
  287. + sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */
  288. sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  289. sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  290. @@ -336,8 +399,8 @@ static struct dma_async_tx_descriptor *d
  291. desc->callback = dma_dwc_xfer_done;
  292. desc->callback_param = hsdev;
  293. - dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pad\n",
  294. - __func__, qc->sg, qc->n_elem, &addr);
  295. + dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__,
  296. + qc->sg, qc->n_elem, &hsdev->dmadr);
  297. return desc;
  298. }
  299. @@ -350,48 +413,38 @@ static int sata_dwc_scr_read(struct ata_
  300. return -EINVAL;
  301. }
  302. - *val = in_le32(link->ap->ioaddr.scr_addr + (scr * 4));
  303. - dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
  304. - __func__, link->ap->print_id, scr, *val);
  305. + *val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4));
  306. + dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
  307. + link->ap->print_id, scr, *val);
  308. return 0;
  309. }
  310. static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
  311. {
  312. - dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
  313. - __func__, link->ap->print_id, scr, val);
  314. + dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
  315. + link->ap->print_id, scr, val);
  316. if (scr > SCR_NOTIFICATION) {
  317. dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
  318. __func__, scr);
  319. return -EINVAL;
  320. }
  321. - out_le32(link->ap->ioaddr.scr_addr + (scr * 4), val);
  322. + sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val);
  323. return 0;
  324. }
  325. -static u32 core_scr_read(unsigned int scr)
  326. -{
  327. - return in_le32(host_pvt.scr_addr_sstatus + (scr * 4));
  328. -}
  329. -
  330. -static void core_scr_write(unsigned int scr, u32 val)
  331. -{
  332. - out_le32(host_pvt.scr_addr_sstatus + (scr * 4), val);
  333. -}
  334. -
  335. -static void clear_serror(void)
  336. +static void clear_serror(struct ata_port *ap)
  337. {
  338. u32 val;
  339. - val = core_scr_read(SCR_ERROR);
  340. - core_scr_write(SCR_ERROR, val);
  341. + sata_dwc_scr_read(&ap->link, SCR_ERROR, &val);
  342. + sata_dwc_scr_write(&ap->link, SCR_ERROR, val);
  343. }
  344. static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
  345. {
  346. - out_le32(&hsdev->sata_dwc_regs->intpr,
  347. - in_le32(&hsdev->sata_dwc_regs->intpr));
  348. + sata_dwc_writel(&hsdev->sata_dwc_regs->intpr,
  349. + sata_dwc_readl(&hsdev->sata_dwc_regs->intpr));
  350. }
  351. static u32 qcmd_tag_to_mask(u8 tag)
  352. @@ -412,7 +465,7 @@ static void sata_dwc_error_intr(struct a
  353. ata_ehi_clear_desc(ehi);
  354. - serror = core_scr_read(SCR_ERROR);
  355. + sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror);
  356. status = ap->ops->sff_check_status(ap);
  357. tag = ap->link.active_tag;
  358. @@ -423,7 +476,7 @@ static void sata_dwc_error_intr(struct a
  359. hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]);
  360. /* Clear error register and interrupt bit */
  361. - clear_serror();
  362. + clear_serror(ap);
  363. clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
  364. /* This is the only error happening now. TODO check for exact error */
  365. @@ -462,12 +515,12 @@ static irqreturn_t sata_dwc_isr(int irq,
  366. int handled, num_processed, port = 0;
  367. uint intpr, sactive, sactive2, tag_mask;
  368. struct sata_dwc_device_port *hsdevp;
  369. - host_pvt.sata_dwc_sactive_issued = 0;
  370. + hsdev->sactive_issued = 0;
  371. spin_lock_irqsave(&host->lock, flags);
  372. /* Read the interrupt register */
  373. - intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
  374. + intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr);
  375. ap = host->ports[port];
  376. hsdevp = HSDEVP_FROM_AP(ap);
  377. @@ -486,12 +539,12 @@ static irqreturn_t sata_dwc_isr(int irq,
  378. if (intpr & SATA_DWC_INTPR_NEWFP) {
  379. clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
  380. - tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr));
  381. + tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr));
  382. dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
  383. if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
  384. dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
  385. - host_pvt.sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag);
  386. + hsdev->sactive_issued |= qcmd_tag_to_mask(tag);
  387. qc = ata_qc_from_tag(ap, tag);
  388. /*
  389. @@ -505,11 +558,11 @@ static irqreturn_t sata_dwc_isr(int irq,
  390. handled = 1;
  391. goto DONE;
  392. }
  393. - sactive = core_scr_read(SCR_ACTIVE);
  394. - tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
  395. + sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
  396. + tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
  397. /* If no sactive issued and tag_mask is zero then this is not NCQ */
  398. - if (host_pvt.sata_dwc_sactive_issued == 0 && tag_mask == 0) {
  399. + if (hsdev->sactive_issued == 0 && tag_mask == 0) {
  400. if (ap->link.active_tag == ATA_TAG_POISON)
  401. tag = 0;
  402. else
  403. @@ -579,22 +632,19 @@ DRVSTILLBUSY:
  404. */
  405. /* process completed commands */
  406. - sactive = core_scr_read(SCR_ACTIVE);
  407. - tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
  408. + sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
  409. + tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
  410. - if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \
  411. - tag_mask > 1) {
  412. + if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) {
  413. dev_dbg(ap->dev,
  414. "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
  415. - __func__, sactive, host_pvt.sata_dwc_sactive_issued,
  416. - tag_mask);
  417. + __func__, sactive, hsdev->sactive_issued, tag_mask);
  418. }
  419. - if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \
  420. - (host_pvt.sata_dwc_sactive_issued)) {
  421. + if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) {
  422. dev_warn(ap->dev,
  423. - "Bad tag mask? sactive=0x%08x (host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask=0x%08x\n",
  424. - sactive, host_pvt.sata_dwc_sactive_issued, tag_mask);
  425. + "Bad tag mask? sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
  426. + sactive, hsdev->sactive_issued, tag_mask);
  427. }
  428. /* read just to clear ... not bad if currently still busy */
  429. @@ -656,7 +706,7 @@ STILLBUSY:
  430. * we were processing --we read status as part of processing a completed
  431. * command).
  432. */
  433. - sactive2 = core_scr_read(SCR_ACTIVE);
  434. + sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2);
  435. if (sactive2 != sactive) {
  436. dev_dbg(ap->dev,
  437. "More completed - sactive=0x%x sactive2=0x%x\n",
  438. @@ -672,15 +722,14 @@ DONE:
  439. static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
  440. {
  441. struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
  442. + u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr);
  443. if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
  444. - out_le32(&(hsdev->sata_dwc_regs->dmacr),
  445. - SATA_DWC_DMACR_RX_CLEAR(
  446. - in_le32(&(hsdev->sata_dwc_regs->dmacr))));
  447. + dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr);
  448. + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
  449. } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
  450. - out_le32(&(hsdev->sata_dwc_regs->dmacr),
  451. - SATA_DWC_DMACR_TX_CLEAR(
  452. - in_le32(&(hsdev->sata_dwc_regs->dmacr))));
  453. + dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr);
  454. + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
  455. } else {
  456. /*
  457. * This should not happen, it indicates the driver is out of
  458. @@ -688,10 +737,9 @@ static void sata_dwc_clear_dmacr(struct
  459. */
  460. dev_err(hsdev->dev,
  461. "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n",
  462. - __func__, tag, hsdevp->dma_pending[tag],
  463. - in_le32(&hsdev->sata_dwc_regs->dmacr));
  464. - out_le32(&(hsdev->sata_dwc_regs->dmacr),
  465. - SATA_DWC_DMACR_TXRXCH_CLEAR);
  466. + __func__, tag, hsdevp->dma_pending[tag], dmacr);
  467. + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  468. + SATA_DWC_DMACR_TXRXCH_CLEAR);
  469. }
  470. }
  471. @@ -716,7 +764,7 @@ static void sata_dwc_dma_xfer_complete(s
  472. __func__, qc->tag, qc->tf.command,
  473. get_dma_dir_descript(qc->dma_dir),
  474. get_prot_descript(qc->tf.protocol),
  475. - in_le32(&(hsdev->sata_dwc_regs->dmacr)));
  476. + sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
  477. }
  478. #endif
  479. @@ -725,7 +773,7 @@ static void sata_dwc_dma_xfer_complete(s
  480. dev_err(ap->dev,
  481. "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n",
  482. __func__,
  483. - in_le32(&(hsdev->sata_dwc_regs->dmacr)));
  484. + sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
  485. }
  486. hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
  487. @@ -742,8 +790,9 @@ static int sata_dwc_qc_complete(struct a
  488. u8 status = 0;
  489. u32 mask = 0x0;
  490. u8 tag = qc->tag;
  491. + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
  492. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  493. - host_pvt.sata_dwc_sactive_queued = 0;
  494. + hsdev->sactive_queued = 0;
  495. dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
  496. if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
  497. @@ -756,10 +805,8 @@ static int sata_dwc_qc_complete(struct a
  498. /* clear active bit */
  499. mask = (~(qcmd_tag_to_mask(tag)));
  500. - host_pvt.sata_dwc_sactive_queued = (host_pvt.sata_dwc_sactive_queued) \
  501. - & mask;
  502. - host_pvt.sata_dwc_sactive_issued = (host_pvt.sata_dwc_sactive_issued) \
  503. - & mask;
  504. + hsdev->sactive_queued = hsdev->sactive_queued & mask;
  505. + hsdev->sactive_issued = hsdev->sactive_issued & mask;
  506. ata_qc_complete(qc);
  507. return 0;
  508. }
  509. @@ -767,54 +814,62 @@ static int sata_dwc_qc_complete(struct a
  510. static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
  511. {
  512. /* Enable selective interrupts by setting the interrupt maskregister*/
  513. - out_le32(&hsdev->sata_dwc_regs->intmr,
  514. - SATA_DWC_INTMR_ERRM |
  515. - SATA_DWC_INTMR_NEWFPM |
  516. - SATA_DWC_INTMR_PMABRTM |
  517. - SATA_DWC_INTMR_DMATM);
  518. + sata_dwc_writel(&hsdev->sata_dwc_regs->intmr,
  519. + SATA_DWC_INTMR_ERRM |
  520. + SATA_DWC_INTMR_NEWFPM |
  521. + SATA_DWC_INTMR_PMABRTM |
  522. + SATA_DWC_INTMR_DMATM);
  523. /*
  524. * Unmask the error bits that should trigger an error interrupt by
  525. * setting the error mask register.
  526. */
  527. - out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
  528. + sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
  529. dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
  530. - __func__, in_le32(&hsdev->sata_dwc_regs->intmr),
  531. - in_le32(&hsdev->sata_dwc_regs->errmr));
  532. + __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr),
  533. + sata_dwc_readl(&hsdev->sata_dwc_regs->errmr));
  534. }
  535. -static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
  536. +static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base)
  537. {
  538. - struct sata_dwc_device_port *hsdevp = param;
  539. - struct dw_dma_slave *dws = hsdevp->dws;
  540. + port->cmd_addr = base + 0x00;
  541. + port->data_addr = base + 0x00;
  542. - if (dws->dma_dev != chan->device->dev)
  543. - return false;
  544. + port->error_addr = base + 0x04;
  545. + port->feature_addr = base + 0x04;
  546. - chan->private = dws;
  547. - return true;
  548. -}
  549. + port->nsect_addr = base + 0x08;
  550. -static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
  551. -{
  552. - port->cmd_addr = (void __iomem *)base + 0x00;
  553. - port->data_addr = (void __iomem *)base + 0x00;
  554. + port->lbal_addr = base + 0x0c;
  555. + port->lbam_addr = base + 0x10;
  556. + port->lbah_addr = base + 0x14;
  557. +
  558. + port->device_addr = base + 0x18;
  559. + port->command_addr = base + 0x1c;
  560. + port->status_addr = base + 0x1c;
  561. - port->error_addr = (void __iomem *)base + 0x04;
  562. - port->feature_addr = (void __iomem *)base + 0x04;
  563. + port->altstatus_addr = base + 0x20;
  564. + port->ctl_addr = base + 0x20;
  565. +}
  566. - port->nsect_addr = (void __iomem *)base + 0x08;
  567. +static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp)
  568. +{
  569. + struct sata_dwc_device *hsdev = hsdevp->hsdev;
  570. + struct device *dev = hsdev->dev;
  571. - port->lbal_addr = (void __iomem *)base + 0x0c;
  572. - port->lbam_addr = (void __iomem *)base + 0x10;
  573. - port->lbah_addr = (void __iomem *)base + 0x14;
  574. +#ifdef CONFIG_SATA_DWC_OLD_DMA
  575. + if (!of_find_property(dev->of_node, "dmas", NULL))
  576. + return sata_dwc_dma_get_channel_old(hsdevp);
  577. +#endif
  578. - port->device_addr = (void __iomem *)base + 0x18;
  579. - port->command_addr = (void __iomem *)base + 0x1c;
  580. - port->status_addr = (void __iomem *)base + 0x1c;
  581. + hsdevp->chan = dma_request_chan(dev, "sata-dma");
  582. + if (IS_ERR(hsdevp->chan)) {
  583. + dev_err(dev, "failed to allocate dma channel: %ld\n",
  584. + PTR_ERR(hsdevp->chan));
  585. + return PTR_ERR(hsdevp->chan);
  586. + }
  587. - port->altstatus_addr = (void __iomem *)base + 0x20;
  588. - port->ctl_addr = (void __iomem *)base + 0x20;
  589. + return 0;
  590. }
  591. /*
  592. @@ -829,7 +884,6 @@ static int sata_dwc_port_start(struct at
  593. struct sata_dwc_device *hsdev;
  594. struct sata_dwc_device_port *hsdevp = NULL;
  595. struct device *pdev;
  596. - dma_cap_mask_t mask;
  597. int i;
  598. hsdev = HSDEV_FROM_AP(ap);
  599. @@ -853,20 +907,13 @@ static int sata_dwc_port_start(struct at
  600. }
  601. hsdevp->hsdev = hsdev;
  602. - hsdevp->dws = &sata_dwc_dma_dws;
  603. - hsdevp->dws->dma_dev = hsdev->dev;
  604. -
  605. - dma_cap_zero(mask);
  606. - dma_cap_set(DMA_SLAVE, mask);
  607. + err = sata_dwc_dma_get_channel(hsdevp);
  608. + if (err)
  609. + goto CLEANUP_ALLOC;
  610. - /* Acquire DMA channel */
  611. - hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
  612. - if (!hsdevp->chan) {
  613. - dev_err(hsdev->dev, "%s: dma channel unavailable\n",
  614. - __func__);
  615. - err = -EAGAIN;
  616. + err = phy_power_on(hsdev->phy);
  617. + if (err)
  618. goto CLEANUP_ALLOC;
  619. - }
  620. for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
  621. hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
  622. @@ -877,18 +924,18 @@ static int sata_dwc_port_start(struct at
  623. if (ap->port_no == 0) {
  624. dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
  625. __func__);
  626. - out_le32(&hsdev->sata_dwc_regs->dmacr,
  627. - SATA_DWC_DMACR_TXRXCH_CLEAR);
  628. + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  629. + SATA_DWC_DMACR_TXRXCH_CLEAR);
  630. dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
  631. __func__);
  632. - out_le32(&hsdev->sata_dwc_regs->dbtsr,
  633. - (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
  634. - SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
  635. + sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
  636. + (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
  637. + SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
  638. }
  639. /* Clear any error bits before libata starts issuing commands */
  640. - clear_serror();
  641. + clear_serror(ap);
  642. ap->private_data = hsdevp;
  643. dev_dbg(ap->dev, "%s: done\n", __func__);
  644. return 0;
  645. @@ -903,11 +950,13 @@ CLEANUP:
  646. static void sata_dwc_port_stop(struct ata_port *ap)
  647. {
  648. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  649. + struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
  650. dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
  651. - dmaengine_terminate_all(hsdevp->chan);
  652. + dmaengine_terminate_sync(hsdevp->chan);
  653. dma_release_channel(hsdevp->chan);
  654. + phy_power_off(hsdev->phy);
  655. kfree(hsdevp);
  656. ap->private_data = NULL;
  657. @@ -924,22 +973,20 @@ static void sata_dwc_exec_command_by_tag
  658. struct ata_taskfile *tf,
  659. u8 tag, u32 cmd_issued)
  660. {
  661. - unsigned long flags;
  662. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  663. dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
  664. ata_get_cmd_descript(tf->command), tag);
  665. - spin_lock_irqsave(&ap->host->lock, flags);
  666. hsdevp->cmd_issued[tag] = cmd_issued;
  667. - spin_unlock_irqrestore(&ap->host->lock, flags);
  668. +
  669. /*
  670. * Clear SError before executing a new command.
  671. * sata_dwc_scr_write and read can not be used here. Clearing the PM
  672. * managed SError register for the disk needs to be done before the
  673. * task file is loaded.
  674. */
  675. - clear_serror();
  676. + clear_serror(ap);
  677. ata_sff_exec_command(ap, tf);
  678. }
  679. @@ -992,18 +1039,18 @@ static void sata_dwc_bmdma_start_by_tag(
  680. sata_dwc_tf_dump(ap, &qc->tf);
  681. if (start_dma) {
  682. - reg = core_scr_read(SCR_ERROR);
  683. + sata_dwc_scr_read(&ap->link, SCR_ERROR, &reg);
  684. if (reg & SATA_DWC_SERROR_ERR_BITS) {
  685. dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
  686. __func__, reg);
  687. }
  688. if (dir == DMA_TO_DEVICE)
  689. - out_le32(&hsdev->sata_dwc_regs->dmacr,
  690. - SATA_DWC_DMACR_TXCHEN);
  691. + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  692. + SATA_DWC_DMACR_TXCHEN);
  693. else
  694. - out_le32(&hsdev->sata_dwc_regs->dmacr,
  695. - SATA_DWC_DMACR_RXCHEN);
  696. + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  697. + SATA_DWC_DMACR_RXCHEN);
  698. /* Enable AHB DMA transfer on the specified channel */
  699. dmaengine_submit(desc);
  700. @@ -1025,36 +1072,12 @@ static void sata_dwc_bmdma_start(struct
  701. sata_dwc_bmdma_start_by_tag(qc, tag);
  702. }
  703. -/*
  704. - * Function : sata_dwc_qc_prep_by_tag
  705. - * arguments : ata_queued_cmd *qc, u8 tag
  706. - * Return value : None
  707. - * qc_prep for a particular queued command based on tag
  708. - */
  709. -static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
  710. -{
  711. - struct dma_async_tx_descriptor *desc;
  712. - struct ata_port *ap = qc->ap;
  713. - struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  714. -
  715. - dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
  716. - __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir),
  717. - qc->n_elem);
  718. -
  719. - desc = dma_dwc_xfer_setup(qc);
  720. - if (!desc) {
  721. - dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns NULL\n",
  722. - __func__);
  723. - return;
  724. - }
  725. - hsdevp->desc[tag] = desc;
  726. -}
  727. -
  728. static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
  729. {
  730. u32 sactive;
  731. u8 tag = qc->tag;
  732. struct ata_port *ap = qc->ap;
  733. + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  734. #ifdef DEBUG_NCQ
  735. if (qc->tag > 0 || ap->link.sactive > 1)
  736. @@ -1068,47 +1091,33 @@ static unsigned int sata_dwc_qc_issue(st
  737. if (!ata_is_ncq(qc->tf.protocol))
  738. tag = 0;
  739. - sata_dwc_qc_prep_by_tag(qc, tag);
  740. +
  741. + if (ata_is_dma(qc->tf.protocol)) {
  742. + hsdevp->desc[tag] = dma_dwc_xfer_setup(qc);
  743. + if (!hsdevp->desc[tag])
  744. + return AC_ERR_SYSTEM;
  745. + } else {
  746. + hsdevp->desc[tag] = NULL;
  747. + }
  748. if (ata_is_ncq(qc->tf.protocol)) {
  749. - sactive = core_scr_read(SCR_ACTIVE);
  750. + sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
  751. sactive |= (0x00000001 << tag);
  752. - core_scr_write(SCR_ACTIVE, sactive);
  753. + sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive);
  754. dev_dbg(qc->ap->dev,
  755. "%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n",
  756. __func__, tag, qc->ap->link.sactive, sactive);
  757. ap->ops->sff_tf_load(ap, &qc->tf);
  758. - sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag,
  759. + sata_dwc_exec_command_by_tag(ap, &qc->tf, tag,
  760. SATA_DWC_CMD_ISSUED_PEND);
  761. } else {
  762. - ata_sff_qc_issue(qc);
  763. + return ata_bmdma_qc_issue(qc);
  764. }
  765. return 0;
  766. }
  767. -/*
  768. - * Function : sata_dwc_qc_prep
  769. - * arguments : ata_queued_cmd *qc
  770. - * Return value : None
  771. - * qc_prep for a particular queued command
  772. - */
  773. -
  774. -static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
  775. -{
  776. - if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
  777. - return;
  778. -
  779. -#ifdef DEBUG_NCQ
  780. - if (qc->tag > 0)
  781. - dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
  782. - __func__, qc->tag, qc->ap->link.active_tag);
  783. -
  784. - return ;
  785. -#endif
  786. -}
  787. -
  788. static void sata_dwc_error_handler(struct ata_port *ap)
  789. {
  790. ata_sff_error_handler(ap);
  791. @@ -1125,17 +1134,22 @@ static int sata_dwc_hardreset(struct ata
  792. sata_dwc_enable_interrupts(hsdev);
  793. /* Reconfigure the DMA control register */
  794. - out_le32(&hsdev->sata_dwc_regs->dmacr,
  795. - SATA_DWC_DMACR_TXRXCH_CLEAR);
  796. + sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  797. + SATA_DWC_DMACR_TXRXCH_CLEAR);
  798. /* Reconfigure the DMA Burst Transaction Size register */
  799. - out_le32(&hsdev->sata_dwc_regs->dbtsr,
  800. - SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
  801. - SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
  802. + sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
  803. + SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
  804. + SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
  805. return ret;
  806. }
  807. +static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
  808. +{
  809. + /* SATA DWC is master only */
  810. +}
  811. +
  812. /*
  813. * scsi mid-layer and libata interface structures
  814. */
  815. @@ -1148,7 +1162,13 @@ static struct scsi_host_template sata_dw
  816. */
  817. .sg_tablesize = LIBATA_MAX_PRD,
  818. /* .can_queue = ATA_MAX_QUEUE, */
  819. - .dma_boundary = ATA_DMA_BOUNDARY,
  820. + /*
  821. + * Make sure a LLI block is not created that will span 8K max FIS
  822. + * boundary. If the block spans such a FIS boundary, there is a chance
  823. + * that a DMA burst will cross that boundary -- this results in an
  824. + * error in the host controller.
  825. + */
  826. + .dma_boundary = 0x1fff /* ATA_DMA_BOUNDARY */,
  827. };
  828. static struct ata_port_operations sata_dwc_ops = {
  829. @@ -1157,7 +1177,6 @@ static struct ata_port_operations sata_d
  830. .error_handler = sata_dwc_error_handler,
  831. .hardreset = sata_dwc_hardreset,
  832. - .qc_prep = sata_dwc_qc_prep,
  833. .qc_issue = sata_dwc_qc_issue,
  834. .scr_read = sata_dwc_scr_read,
  835. @@ -1166,6 +1185,8 @@ static struct ata_port_operations sata_d
  836. .port_start = sata_dwc_port_start,
  837. .port_stop = sata_dwc_port_stop,
  838. + .sff_dev_select = sata_dwc_dev_select,
  839. +
  840. .bmdma_setup = sata_dwc_bmdma_setup,
  841. .bmdma_start = sata_dwc_bmdma_start,
  842. };
  843. @@ -1184,13 +1205,14 @@ static int sata_dwc_probe(struct platfor
  844. struct sata_dwc_device *hsdev;
  845. u32 idr, versionr;
  846. char *ver = (char *)&versionr;
  847. - u8 __iomem *base;
  848. + void __iomem *base;
  849. int err = 0;
  850. int irq;
  851. struct ata_host *host;
  852. struct ata_port_info pi = sata_dwc_port_info[0];
  853. const struct ata_port_info *ppi[] = { &pi, NULL };
  854. struct device_node *np = ofdev->dev.of_node;
  855. + struct resource *res;
  856. /* Allocate DWC SATA device */
  857. host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
  858. @@ -1201,57 +1223,33 @@ static int sata_dwc_probe(struct platfor
  859. host->private_data = hsdev;
  860. /* Ioremap SATA registers */
  861. - base = of_iomap(np, 0);
  862. - if (!base) {
  863. + res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
  864. + base = devm_ioremap_resource(&ofdev->dev, res);
  865. + if (IS_ERR(base)) {
  866. dev_err(&ofdev->dev,
  867. "ioremap failed for SATA register address\n");
  868. - return -ENODEV;
  869. + return PTR_ERR(base);
  870. }
  871. - hsdev->reg_base = base;
  872. dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
  873. /* Synopsys DWC SATA specific Registers */
  874. - hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
  875. + hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET;
  876. + hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr);
  877. /* Setup port */
  878. host->ports[0]->ioaddr.cmd_addr = base;
  879. host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
  880. - host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
  881. - sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base);
  882. + sata_dwc_setup_port(&host->ports[0]->ioaddr, base);
  883. /* Read the ID and Version Registers */
  884. - idr = in_le32(&hsdev->sata_dwc_regs->idr);
  885. - versionr = in_le32(&hsdev->sata_dwc_regs->versionr);
  886. + idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr);
  887. + versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr);
  888. dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
  889. idr, ver[0], ver[1], ver[2]);
  890. - /* Get SATA DMA interrupt number */
  891. - hsdev->dma->irq = irq_of_parse_and_map(np, 1);
  892. - if (hsdev->dma->irq == NO_IRQ) {
  893. - dev_err(&ofdev->dev, "no SATA DMA irq\n");
  894. - err = -ENODEV;
  895. - goto error_iomap;
  896. - }
  897. -
  898. - /* Get physical SATA DMA register base address */
  899. - hsdev->dma->regs = of_iomap(np, 1);
  900. - if (!hsdev->dma->regs) {
  901. - dev_err(&ofdev->dev,
  902. - "ioremap failed for AHBDMA register address\n");
  903. - err = -ENODEV;
  904. - goto error_iomap;
  905. - }
  906. -
  907. /* Save dev for later use in dev_xxx() routines */
  908. hsdev->dev = &ofdev->dev;
  909. - hsdev->dma->dev = &ofdev->dev;
  910. -
  911. - /* Initialize AHB DMAC */
  912. - err = dw_dma_probe(hsdev->dma, NULL);
  913. - if (err)
  914. - goto error_dma_iomap;
  915. -
  916. /* Enable SATA Interrupts */
  917. sata_dwc_enable_interrupts(hsdev);
  918. @@ -1263,6 +1261,25 @@ static int sata_dwc_probe(struct platfor
  919. goto error_out;
  920. }
  921. +#ifdef CONFIG_SATA_DWC_OLD_DMA
  922. + if (!of_find_property(np, "dmas", NULL)) {
  923. + err = sata_dwc_dma_init_old(ofdev, hsdev);
  924. + if (err)
  925. + goto error_out;
  926. + }
  927. +#endif
  928. +
  929. + hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy");
  930. + if (IS_ERR(hsdev->phy)) {
  931. + err = PTR_ERR(hsdev->phy);
  932. + hsdev->phy = NULL;
  933. + goto error_out;
  934. + }
  935. +
  936. + err = phy_init(hsdev->phy);
  937. + if (err)
  938. + goto error_out;
  939. +
  940. /*
  941. * Now, register with libATA core, this will also initiate the
  942. * device discovery process, invoking our port_start() handler &
  943. @@ -1276,12 +1293,7 @@ static int sata_dwc_probe(struct platfor
  944. return 0;
  945. error_out:
  946. - /* Free SATA DMA resources */
  947. - dw_dma_remove(hsdev->dma);
  948. -error_dma_iomap:
  949. - iounmap(hsdev->dma->regs);
  950. -error_iomap:
  951. - iounmap(base);
  952. + phy_exit(hsdev->phy);
  953. return err;
  954. }
  955. @@ -1293,11 +1305,13 @@ static int sata_dwc_remove(struct platfo
  956. ata_host_detach(host);
  957. + phy_exit(hsdev->phy);
  958. +
  959. +#ifdef CONFIG_SATA_DWC_OLD_DMA
  960. /* Free SATA DMA resources */
  961. - dw_dma_remove(hsdev->dma);
  962. + sata_dwc_dma_exit_old(hsdev);
  963. +#endif
  964. - iounmap(hsdev->dma->regs);
  965. - iounmap(hsdev->reg_base);
  966. dev_dbg(&ofdev->dev, "done\n");
  967. return 0;
  968. }