0031-mtd-spi-nor-Add-driver-for-Cadence-Quad-SPI-Flash-Co.patch 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431
  1. From 30e33517815b3c518fc2483a23bfe1445c0ae92d Mon Sep 17 00:00:00 2001
  2. From: Graham Moore <grmoore@opensource.altera.com>
  3. Date: Tue, 28 Jul 2015 12:38:03 -0500
  4. Subject: [PATCH 31/33] mtd: spi-nor: Add driver for Cadence Quad SPI Flash
  5. Controller.
  6. Add support for the Cadence QSPI controller. This controller is
  7. present in the Altera SoCFPGA SoCs and this driver has been tested
  8. on the Cyclone V SoC.
  9. Signed-off-by: Graham Moore <grmoore@opensource.altera.com>
  10. Signed-off-by: Marek Vasut <marex@denx.de>
  11. Cc: Alan Tull <atull@opensource.altera.com>
  12. Cc: Brian Norris <computersforpeace@gmail.com>
  13. Cc: David Woodhouse <dwmw2@infradead.org>
  14. Cc: Dinh Nguyen <dinguyen@opensource.altera.com>
  15. Cc: Graham Moore <grmoore@opensource.altera.com>
  16. Cc: Vignesh R <vigneshr@ti.com>
  17. Cc: Yves Vandervennet <yvanderv@opensource.altera.com>
  18. Cc: devicetree@vger.kernel.org
  19. V2: use NULL instead of modalias in spi_nor_scan call
  20. V3: Use existing property is-decoded-cs instead of creating duplicate.
  21. V4: Support Micron quad mode by snooping command stream for EVCR command
  22. and subsequently configuring Cadence controller for quad mode.
  23. V5: Clean up sparse and smatch complaints. Remove snooping of Micron
  24. quad mode. Add comment on XIP mode bit and dummy clock cycles. Set
  25. up SRAM partition at 1:1 during init.
  26. V6: Remove dts patch that was included by mistake. Incorporate Vikas's
  27. comments regarding fifo width, SRAM partition setting, and trigger
  28. address. Trigger address was added as an unsigned int, as it is not
  29. an IO resource per se, and does not need to be mapped. Also add
  30. Marek Vasut's workaround for picking up OF properties on subnodes.
  31. V7: - Perform coding-style cleanup and type fixes. Remove ugly QSPI_*()
  32. macros and replace them with functions. Get rid of unused variables.
  33. - Implement support for nor->set_protocol() to handle Quad-command,
  34. this patch now depends on the following patch:
  35. mtd: spi-nor: notify (Q)SPI controller about protocol change
  36. - Replace that cqspi_fifo_read() disaster with plain old readsl()
  37. and cqspi_fifo_write() tentacle horror with pretty writesl().
  38. - Remove CQSPI_SUPPORT_XIP_CHIPS, which is broken.
  39. - Get rid of cqspi_find_chipselect() mess, instead just place the
  40. struct cqspi_st and chipselect number into struct cqspi_flash_pdata
  41. and set nor->priv to the struct cqspi_flash_pdata of that particular
  42. chip.
  43. - Replace the odd math in calculate_ticks_for_ns() with DIV_ROUND_UP().
  44. - Make variables const where applicable.
  45. V8: - Implement a function to wait for bit being set/unset for a given
  46. period of time and use it to replace the ad-hoc bits of code.
  47. - Configure the write underflow watermark to be 1/8 if FIFO size.
  48. - Extract out the SPI NOR flash probing code into separate function
  49. to clearly mark what will soon be considered a boilerplate code.
  50. - Repair the handling of mode bits, which caused instability in V7.
  51. - Clean up the interrupt handling
  52. - Fix Kconfig help text and make the patch depend on OF and COMPILE_TEST.
  53. V9: - Rename CQSPI_REG_IRQ_IND_RD_OVERFLOW to CQSPI_REG_IRQ_IND_SRAM_FULL
  54. - Merge cqspi_controller_disable() into cqspi_controller_enable() and
  55. make the mode selectable via parameter.
  56. V10: - Update against Cyrille's new patchset and changes to linux-mtd.
  57. - Repair problem with multiple QSPI NOR devices having the same mtd->name,
  58. they are now named devname.cs , where cs is the chipselect ID.
  59. V11: - Replace dependency on ARCH_SOCFPGA with dependency on ARM
  60. ---
  61. drivers/mtd/spi-nor/Kconfig | 11 +
  62. drivers/mtd/spi-nor/Makefile | 1 +
  63. drivers/mtd/spi-nor/cadence-quadspi.c | 1324 +++++++++++++++++++++++++++++++++
  64. 3 files changed, 1336 insertions(+)
  65. create mode 100644 drivers/mtd/spi-nor/cadence-quadspi.c
  66. diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
  67. index 2fe2a7e..02082ae 100644
  68. --- a/drivers/mtd/spi-nor/Kconfig
  69. +++ b/drivers/mtd/spi-nor/Kconfig
  70. @@ -41,4 +41,15 @@ config SPI_NXP_SPIFI
  71. Flash. Enable this option if you have a device with a SPIFI
  72. controller and want to access the Flash as a mtd device.
  73. +config SPI_CADENCE_QUADSPI
  74. + tristate "Cadence Quad SPI controller"
  75. + depends on OF && (ARM || COMPILE_TEST)
  76. + help
  77. + Enable support for the Cadence Quad SPI Flash controller.
  78. +
  79. + Cadence QSPI is a specialized controller for connecting an SPI
  80. + Flash over 1/2/4-bit wide bus. Enable this option if you have a
  81. + device with a Cadence QSPI controller and want to access the
  82. + Flash as an MTD device.
  83. +
  84. endif # MTD_SPI_NOR
  85. diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
  86. index e53333e..446c6b9 100644
  87. --- a/drivers/mtd/spi-nor/Makefile
  88. +++ b/drivers/mtd/spi-nor/Makefile
  89. @@ -1,3 +1,4 @@
  90. obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
  91. +obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o
  92. obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
  93. obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
  94. diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
  95. new file mode 100644
  96. index 0000000..7e61fba
  97. --- /dev/null
  98. +++ b/drivers/mtd/spi-nor/cadence-quadspi.c
  99. @@ -0,0 +1,1324 @@
  100. +/*
  101. + * Driver for Cadence QSPI Controller
  102. + *
  103. + * Copyright Altera Corporation (C) 2012-2014. All rights reserved.
  104. + *
  105. + * This program is free software; you can redistribute it and/or modify
  106. + * it under the terms and conditions of the GNU General Public License,
  107. + * version 2, as published by the Free Software Foundation.
  108. + *
  109. + * This program is distributed in the hope it will be useful, but WITHOUT
  110. + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  111. + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  112. + * more details.
  113. + *
  114. + * You should have received a copy of the GNU General Public License along with
  115. + * this program. If not, see <http://www.gnu.org/licenses/>.
  116. + */
  117. +#include <linux/clk.h>
  118. +#include <linux/completion.h>
  119. +#include <linux/delay.h>
  120. +#include <linux/err.h>
  121. +#include <linux/errno.h>
  122. +#include <linux/interrupt.h>
  123. +#include <linux/io.h>
  124. +#include <linux/jiffies.h>
  125. +#include <linux/kernel.h>
  126. +#include <linux/module.h>
  127. +#include <linux/mtd/mtd.h>
  128. +#include <linux/mtd/partitions.h>
  129. +#include <linux/mtd/spi-nor.h>
  130. +#include <linux/of_device.h>
  131. +#include <linux/of.h>
  132. +#include <linux/platform_device.h>
  133. +#include <linux/sched.h>
  134. +#include <linux/spi/spi.h>
  135. +#include <linux/timer.h>
  136. +
  137. +#define CQSPI_NAME "cadence-qspi"
  138. +#define CQSPI_MAX_CHIPSELECT 16
  139. +
  140. +struct cqspi_st;
  141. +
  142. +struct cqspi_flash_pdata {
  143. + struct spi_nor nor;
  144. + struct cqspi_st *cqspi;
  145. + u32 clk_rate;
  146. + u32 read_delay;
  147. + u32 tshsl_ns;
  148. + u32 tsd2d_ns;
  149. + u32 tchsh_ns;
  150. + u32 tslch_ns;
  151. + u8 inst_width;
  152. + u8 addr_width;
  153. + u8 cs;
  154. +};
  155. +
  156. +struct cqspi_st {
  157. + struct platform_device *pdev;
  158. +
  159. + struct clk *clk;
  160. + unsigned int sclk;
  161. +
  162. + void __iomem *iobase;
  163. + void __iomem *ahb_base;
  164. + struct completion transfer_complete;
  165. + struct mutex bus_mutex;
  166. +
  167. + int current_cs;
  168. + unsigned long master_ref_clk_hz;
  169. + bool is_decoded_cs;
  170. + u32 fifo_depth;
  171. + u32 fifo_width;
  172. + u32 trigger_address;
  173. + struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
  174. +};
  175. +
  176. +/* Operation timeout value */
  177. +#define CQSPI_TIMEOUT_MS 500
  178. +#define CQSPI_READ_TIMEOUT_MS 10
  179. +
  180. +/* Instruction type */
  181. +#define CQSPI_INST_TYPE_SINGLE 0
  182. +#define CQSPI_INST_TYPE_DUAL 1
  183. +#define CQSPI_INST_TYPE_QUAD 2
  184. +
  185. +#define CQSPI_DUMMY_CLKS_PER_BYTE 8
  186. +#define CQSPI_DUMMY_BYTES_MAX 4
  187. +#define CQSPI_DUMMY_CLKS_MAX 31
  188. +
  189. +#define CQSPI_STIG_DATA_LEN_MAX 8
  190. +
  191. +/* Register map */
  192. +#define CQSPI_REG_CONFIG 0x00
  193. +#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
  194. +#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
  195. +#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
  196. +#define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
  197. +#define CQSPI_REG_CONFIG_BAUD_LSB 19
  198. +#define CQSPI_REG_CONFIG_IDLE_LSB 31
  199. +#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
  200. +#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
  201. +
  202. +#define CQSPI_REG_RD_INSTR 0x04
  203. +#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
  204. +#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
  205. +#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
  206. +#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
  207. +#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
  208. +#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
  209. +#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
  210. +#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
  211. +#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
  212. +#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
  213. +
  214. +#define CQSPI_REG_WR_INSTR 0x08
  215. +#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
  216. +#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
  217. +#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
  218. +
  219. +#define CQSPI_REG_DELAY 0x0C
  220. +#define CQSPI_REG_DELAY_TSLCH_LSB 0
  221. +#define CQSPI_REG_DELAY_TCHSH_LSB 8
  222. +#define CQSPI_REG_DELAY_TSD2D_LSB 16
  223. +#define CQSPI_REG_DELAY_TSHSL_LSB 24
  224. +#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
  225. +#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
  226. +#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
  227. +#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
  228. +
  229. +#define CQSPI_REG_READCAPTURE 0x10
  230. +#define CQSPI_REG_READCAPTURE_BYPASS_LSB 0
  231. +#define CQSPI_REG_READCAPTURE_DELAY_LSB 1
  232. +#define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF
  233. +
  234. +#define CQSPI_REG_SIZE 0x14
  235. +#define CQSPI_REG_SIZE_ADDRESS_LSB 0
  236. +#define CQSPI_REG_SIZE_PAGE_LSB 4
  237. +#define CQSPI_REG_SIZE_BLOCK_LSB 16
  238. +#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
  239. +#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
  240. +#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
  241. +
  242. +#define CQSPI_REG_SRAMPARTITION 0x18
  243. +#define CQSPI_REG_INDIRECTTRIGGER 0x1C
  244. +
  245. +#define CQSPI_REG_DMA 0x20
  246. +#define CQSPI_REG_DMA_SINGLE_LSB 0
  247. +#define CQSPI_REG_DMA_BURST_LSB 8
  248. +#define CQSPI_REG_DMA_SINGLE_MASK 0xFF
  249. +#define CQSPI_REG_DMA_BURST_MASK 0xFF
  250. +
  251. +#define CQSPI_REG_REMAP 0x24
  252. +#define CQSPI_REG_MODE_BIT 0x28
  253. +
  254. +#define CQSPI_REG_SDRAMLEVEL 0x2C
  255. +#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
  256. +#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
  257. +#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
  258. +#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
  259. +
  260. +#define CQSPI_REG_IRQSTATUS 0x40
  261. +#define CQSPI_REG_IRQMASK 0x44
  262. +
  263. +#define CQSPI_REG_INDIRECTRD 0x60
  264. +#define CQSPI_REG_INDIRECTRD_START_MASK BIT(0)
  265. +#define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1)
  266. +#define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5)
  267. +
  268. +#define CQSPI_REG_INDIRECTRDWATERMARK 0x64
  269. +#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
  270. +#define CQSPI_REG_INDIRECTRDBYTES 0x6C
  271. +
  272. +#define CQSPI_REG_CMDCTRL 0x90
  273. +#define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
  274. +#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
  275. +#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
  276. +#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
  277. +#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
  278. +#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
  279. +#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
  280. +#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
  281. +#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
  282. +#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
  283. +#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
  284. +#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
  285. +
  286. +#define CQSPI_REG_INDIRECTWR 0x70
  287. +#define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
  288. +#define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1)
  289. +#define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5)
  290. +
  291. +#define CQSPI_REG_INDIRECTWRWATERMARK 0x74
  292. +#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
  293. +#define CQSPI_REG_INDIRECTWRBYTES 0x7C
  294. +
  295. +#define CQSPI_REG_CMDADDRESS 0x94
  296. +#define CQSPI_REG_CMDREADDATALOWER 0xA0
  297. +#define CQSPI_REG_CMDREADDATAUPPER 0xA4
  298. +#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
  299. +#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
  300. +
  301. +/* Interrupt status bits */
  302. +#define CQSPI_REG_IRQ_MODE_ERR BIT(0)
  303. +#define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
  304. +#define CQSPI_REG_IRQ_IND_COMP BIT(2)
  305. +#define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3)
  306. +#define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4)
  307. +#define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5)
  308. +#define CQSPI_REG_IRQ_WATERMARK BIT(6)
  309. +#define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12)
  310. +
  311. +#define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \
  312. + CQSPI_REG_IRQ_IND_SRAM_FULL | \
  313. + CQSPI_REG_IRQ_IND_COMP)
  314. +
  315. +#define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \
  316. + CQSPI_REG_IRQ_WATERMARK | \
  317. + CQSPI_REG_IRQ_UNDERFLOW)
  318. +
  319. +#define CQSPI_IRQ_STATUS_MASK 0x1FFFF
  320. +
  321. +static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clear)
  322. +{
  323. + unsigned long end = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
  324. + u32 val;
  325. +
  326. + while (1) {
  327. + val = readl(reg);
  328. + if (clear)
  329. + val = ~val;
  330. + val &= mask;
  331. +
  332. + if (val == mask)
  333. + return 0;
  334. +
  335. + if (time_after(jiffies, end))
  336. + return -ETIMEDOUT;
  337. + }
  338. +}
  339. +
  340. +static bool cqspi_is_idle(struct cqspi_st *cqspi)
  341. +{
  342. + u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
  343. +
  344. + return reg & (1 << CQSPI_REG_CONFIG_IDLE_LSB);
  345. +}
  346. +
  347. +static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
  348. +{
  349. + u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
  350. +
  351. + reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
  352. + return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
  353. +}
  354. +
  355. +static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
  356. +{
  357. + struct cqspi_st *cqspi = dev;
  358. + unsigned int irq_status;
  359. +
  360. + /* Read interrupt status */
  361. + irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
  362. +
  363. + /* Clear interrupt */
  364. + writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
  365. +
  366. + irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
  367. +
  368. + if (irq_status)
  369. + complete(&cqspi->transfer_complete);
  370. +
  371. + return IRQ_HANDLED;
  372. +}
  373. +
  374. +static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode)
  375. +{
  376. + unsigned int rdreg = 0;
  377. + struct cqspi_flash_pdata *f_pdata = nor->priv;
  378. +
  379. + rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
  380. + rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
  381. +
  382. + if (nor->flash_read == SPI_NOR_QUAD)
  383. + rdreg |= CQSPI_INST_TYPE_QUAD
  384. + << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
  385. + return rdreg;
  386. +}
  387. +
  388. +static int cqspi_wait_idle(struct cqspi_st *cqspi)
  389. +{
  390. + const unsigned int poll_idle_retry = 3;
  391. + unsigned int count = 0;
  392. + unsigned long timeout;
  393. +
  394. + timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
  395. + while (1) {
  396. + /*
  397. + * Read few times in succession to ensure the controller
  398. + * is indeed idle, that is, the bit does not transition
  399. + * low again.
  400. + */
  401. + if (cqspi_is_idle(cqspi))
  402. + count++;
  403. + else
  404. + count = 0;
  405. +
  406. + if (count >= poll_idle_retry)
  407. + return 0;
  408. +
  409. + if (time_after(jiffies, timeout)) {
  410. + /* Timeout, in busy mode. */
  411. + dev_err(&cqspi->pdev->dev,
  412. + "QSPI is still busy after %dms timeout.\n",
  413. + CQSPI_TIMEOUT_MS);
  414. + return -ETIMEDOUT;
  415. + }
  416. +
  417. + cpu_relax();
  418. + }
  419. +}
  420. +
  421. +static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
  422. +{
  423. + void __iomem *reg_base = cqspi->iobase;
  424. + int ret;
  425. +
  426. + /* Write the CMDCTRL without start execution. */
  427. + writel(reg, reg_base + CQSPI_REG_CMDCTRL);
  428. + /* Start execute */
  429. + reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
  430. + writel(reg, reg_base + CQSPI_REG_CMDCTRL);
  431. +
  432. + /* Polling for completion. */
  433. + ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL,
  434. + CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1);
  435. + if (ret) {
  436. + dev_err(&cqspi->pdev->dev,
  437. + "Flash command execution timed out.\n");
  438. + return ret;
  439. + }
  440. +
  441. + /* Polling QSPI idle status. */
  442. + return cqspi_wait_idle(cqspi);
  443. +}
  444. +
  445. +static int cqspi_command_read(struct spi_nor *nor,
  446. + const u8 *txbuf, const unsigned n_tx,
  447. + u8 *rxbuf, const unsigned n_rx)
  448. +{
  449. + struct cqspi_flash_pdata *f_pdata = nor->priv;
  450. + struct cqspi_st *cqspi = f_pdata->cqspi;
  451. + void __iomem *reg_base = cqspi->iobase;
  452. + unsigned int rdreg;
  453. + unsigned int reg;
  454. + unsigned int read_len;
  455. + int status;
  456. +
  457. + if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
  458. + dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n",
  459. + n_rx, rxbuf);
  460. + return -EINVAL;
  461. + }
  462. +
  463. + reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
  464. +
  465. + rdreg = cqspi_calc_rdreg(nor, txbuf[0]);
  466. + writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
  467. +
  468. + reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
  469. +
  470. + /* 0 means 1 byte. */
  471. + reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
  472. + << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
  473. + status = cqspi_exec_flash_cmd(cqspi, reg);
  474. + if (status)
  475. + return status;
  476. +
  477. + reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
  478. +
  479. + /* Put the read value into rx_buf */
  480. + read_len = (n_rx > 4) ? 4 : n_rx;
  481. + memcpy(rxbuf, &reg, read_len);
  482. + rxbuf += read_len;
  483. +
  484. + if (n_rx > 4) {
  485. + reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
  486. +
  487. + read_len = n_rx - read_len;
  488. + memcpy(rxbuf, &reg, read_len);
  489. + }
  490. +
  491. + return 0;
  492. +}
  493. +
  494. +static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
  495. + const u8 *txbuf, const unsigned n_tx)
  496. +{
  497. + struct cqspi_flash_pdata *f_pdata = nor->priv;
  498. + struct cqspi_st *cqspi = f_pdata->cqspi;
  499. + void __iomem *reg_base = cqspi->iobase;
  500. + unsigned int reg;
  501. + unsigned int data;
  502. + int ret;
  503. +
  504. + if (n_tx > 4 || (n_tx && !txbuf)) {
  505. + dev_err(nor->dev,
  506. + "Invalid input argument, cmdlen %d txbuf 0x%p\n",
  507. + n_tx, txbuf);
  508. + return -EINVAL;
  509. + }
  510. +
  511. + reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
  512. + if (n_tx) {
  513. + reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
  514. + reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
  515. + << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
  516. + data = 0;
  517. + memcpy(&data, txbuf, n_tx);
  518. + writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
  519. + }
  520. +
  521. + ret = cqspi_exec_flash_cmd(cqspi, reg);
  522. + return ret;
  523. +}
  524. +
  525. +static int cqspi_command_write_addr(struct spi_nor *nor,
  526. + const u8 opcode, const unsigned int addr)
  527. +{
  528. + struct cqspi_flash_pdata *f_pdata = nor->priv;
  529. + struct cqspi_st *cqspi = f_pdata->cqspi;
  530. + void __iomem *reg_base = cqspi->iobase;
  531. + unsigned int reg;
  532. +
  533. + reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
  534. + reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
  535. + reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
  536. + << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
  537. +
  538. + writel(addr, reg_base + CQSPI_REG_CMDADDRESS);
  539. +
  540. + return cqspi_exec_flash_cmd(cqspi, reg);
  541. +}
  542. +
  543. +static int cqspi_indirect_read_setup(struct spi_nor *nor,
  544. + const unsigned int from_addr)
  545. +{
  546. + struct cqspi_flash_pdata *f_pdata = nor->priv;
  547. + struct cqspi_st *cqspi = f_pdata->cqspi;
  548. + void __iomem *reg_base = cqspi->iobase;
  549. + unsigned int dummy_clk = 0;
  550. + unsigned int reg;
  551. +
  552. + writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
  553. +
  554. + reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
  555. + reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
  556. +
  557. + /* Setup dummy clock cycles */
  558. + dummy_clk = nor->read_dummy;
  559. + if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
  560. + dummy_clk = CQSPI_DUMMY_CLKS_MAX;
  561. +
  562. + if (dummy_clk / 8) {
  563. + reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
  564. + /* Set mode bits high to ensure chip doesn't enter XIP */
  565. + writel(0xFF, reg_base + CQSPI_REG_MODE_BIT);
  566. +
  567. + /* Need to subtract the mode byte (8 clocks). */
  568. + if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD)
  569. + dummy_clk -= 8;
  570. +
  571. + if (dummy_clk)
  572. + reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
  573. + << CQSPI_REG_RD_INSTR_DUMMY_LSB;
  574. + }
  575. +
  576. + writel(reg, reg_base + CQSPI_REG_RD_INSTR);
  577. +
  578. + /* Set address width */
  579. + reg = readl(reg_base + CQSPI_REG_SIZE);
  580. + reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
  581. + reg |= (nor->addr_width - 1);
  582. + writel(reg, reg_base + CQSPI_REG_SIZE);
  583. + return 0;
  584. +}
  585. +
  586. +static int cqspi_indirect_read_execute(struct spi_nor *nor,
  587. + u8 *rxbuf, const unsigned n_rx)
  588. +{
  589. + struct cqspi_flash_pdata *f_pdata = nor->priv;
  590. + struct cqspi_st *cqspi = f_pdata->cqspi;
  591. + void __iomem *reg_base = cqspi->iobase;
  592. + void __iomem *ahb_base = cqspi->ahb_base;
  593. + unsigned int remaining = n_rx;
  594. + unsigned int bytes_to_read = 0;
  595. + int ret = 0;
  596. +
  597. + writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
  598. +
  599. + /* Clear all interrupts. */
  600. + writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
  601. +
  602. + writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
  603. +
  604. + reinit_completion(&cqspi->transfer_complete);
  605. + writel(CQSPI_REG_INDIRECTRD_START_MASK,
  606. + reg_base + CQSPI_REG_INDIRECTRD);
  607. +
  608. + while (remaining > 0) {
  609. + ret = wait_for_completion_timeout(&cqspi->transfer_complete,
  610. + msecs_to_jiffies
  611. + (CQSPI_READ_TIMEOUT_MS));
  612. +
  613. + bytes_to_read = cqspi_get_rd_sram_level(cqspi);
  614. +
  615. + if (!ret && bytes_to_read == 0) {
  616. + dev_err(nor->dev, "Indirect read timeout, no bytes\n");
  617. + ret = -ETIMEDOUT;
  618. + goto failrd;
  619. + }
  620. +
  621. + while (bytes_to_read != 0) {
  622. + bytes_to_read *= cqspi->fifo_width;
  623. + bytes_to_read = bytes_to_read > remaining ?
  624. + remaining : bytes_to_read;
  625. + readsl(ahb_base, rxbuf, DIV_ROUND_UP(bytes_to_read, 4));
  626. + rxbuf += bytes_to_read;
  627. + remaining -= bytes_to_read;
  628. + bytes_to_read = cqspi_get_rd_sram_level(cqspi);
  629. + }
  630. +
  631. + if (remaining > 0)
  632. + reinit_completion(&cqspi->transfer_complete);
  633. + }
  634. +
  635. + /* Check indirect done status */
  636. + ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
  637. + CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
  638. + if (ret) {
  639. + dev_err(nor->dev,
  640. + "Indirect read completion error (%i)\n", ret);
  641. + goto failrd;
  642. + }
  643. +
  644. + /* Disable interrupt */
  645. + writel(0, reg_base + CQSPI_REG_IRQMASK);
  646. +
  647. + /* Clear indirect completion status */
  648. + writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
  649. +
  650. + return 0;
  651. +
  652. +failrd:
  653. + /* Disable interrupt */
  654. + writel(0, reg_base + CQSPI_REG_IRQMASK);
  655. +
  656. + /* Cancel the indirect read */
  657. + writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
  658. + reg_base + CQSPI_REG_INDIRECTRD);
  659. + return ret;
  660. +}
  661. +
  662. +static int cqspi_indirect_write_setup(struct spi_nor *nor,
  663. + const unsigned int to_addr)
  664. +{
  665. + unsigned int reg;
  666. + struct cqspi_flash_pdata *f_pdata = nor->priv;
  667. + struct cqspi_st *cqspi = f_pdata->cqspi;
  668. + void __iomem *reg_base = cqspi->iobase;
  669. +
  670. + /* Set opcode. */
  671. + reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
  672. + writel(reg, reg_base + CQSPI_REG_WR_INSTR);
  673. + reg = cqspi_calc_rdreg(nor, nor->program_opcode);
  674. + writel(reg, reg_base + CQSPI_REG_RD_INSTR);
  675. +
  676. + writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
  677. +
  678. + reg = readl(reg_base + CQSPI_REG_SIZE);
  679. + reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
  680. + reg |= (nor->addr_width - 1);
  681. + writel(reg, reg_base + CQSPI_REG_SIZE);
  682. + return 0;
  683. +}
  684. +
  685. +static int cqspi_indirect_write_execute(struct spi_nor *nor,
  686. + const u8 *txbuf, const unsigned n_tx)
  687. +{
  688. + const unsigned int page_size = nor->page_size;
  689. + struct cqspi_flash_pdata *f_pdata = nor->priv;
  690. + struct cqspi_st *cqspi = f_pdata->cqspi;
  691. + void __iomem *reg_base = cqspi->iobase;
  692. + unsigned int remaining = n_tx;
  693. + unsigned int write_bytes;
  694. + int ret;
  695. +
  696. + writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
  697. +
  698. + /* Clear all interrupts. */
  699. + writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
  700. +
  701. + writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
  702. +
  703. + reinit_completion(&cqspi->transfer_complete);
  704. + writel(CQSPI_REG_INDIRECTWR_START_MASK,
  705. + reg_base + CQSPI_REG_INDIRECTWR);
  706. +
  707. + while (remaining > 0) {
  708. + write_bytes = remaining > page_size ? page_size : remaining;
  709. + writesl(cqspi->ahb_base, txbuf, DIV_ROUND_UP(write_bytes, 4));
  710. +
  711. + ret = wait_for_completion_timeout(&cqspi->transfer_complete,
  712. + msecs_to_jiffies
  713. + (CQSPI_TIMEOUT_MS));
  714. + if (!ret) {
  715. + dev_err(nor->dev, "Indirect write timeout\n");
  716. + ret = -ETIMEDOUT;
  717. + goto failwr;
  718. + }
  719. +
  720. + txbuf += write_bytes;
  721. + remaining -= write_bytes;
  722. +
  723. + if (remaining > 0)
  724. + reinit_completion(&cqspi->transfer_complete);
  725. + }
  726. +
  727. + /* Check indirect done status */
  728. + ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
  729. + CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
  730. + if (ret) {
  731. + dev_err(nor->dev,
  732. + "Indirect write completion error (%i)\n", ret);
  733. + goto failwr;
  734. + }
  735. +
  736. + /* Disable interrupt. */
  737. + writel(0, reg_base + CQSPI_REG_IRQMASK);
  738. +
  739. + /* Clear indirect completion status */
  740. + writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
  741. +
  742. + cqspi_wait_idle(cqspi);
  743. +
  744. + return 0;
  745. +
  746. +failwr:
  747. + /* Disable interrupt. */
  748. + writel(0, reg_base + CQSPI_REG_IRQMASK);
  749. +
  750. + /* Cancel the indirect write */
  751. + writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
  752. + reg_base + CQSPI_REG_INDIRECTWR);
  753. + return ret;
  754. +}
  755. +
  756. +static int cqspi_set_protocol(struct spi_nor *nor, enum spi_nor_protocol proto)
  757. +{
  758. + struct cqspi_flash_pdata *f_pdata = nor->priv;
  759. +
  760. + switch (proto) {
  761. + case SNOR_PROTO_1_1_1:
  762. + case SNOR_PROTO_1_1_2:
  763. + case SNOR_PROTO_1_1_4:
  764. + case SNOR_PROTO_1_2_2:
  765. + case SNOR_PROTO_1_4_4:
  766. + f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
  767. + break;
  768. + case SNOR_PROTO_2_2_2:
  769. + f_pdata->inst_width = CQSPI_INST_TYPE_DUAL;
  770. + break;
  771. + case SNOR_PROTO_4_4_4:
  772. + f_pdata->inst_width = CQSPI_INST_TYPE_QUAD;
  773. + break;
  774. + default:
  775. + return -EINVAL;
  776. + }
  777. +
  778. + switch (proto) {
  779. + case SNOR_PROTO_1_1_1:
  780. + case SNOR_PROTO_1_1_2:
  781. + case SNOR_PROTO_1_1_4:
  782. + f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
  783. + break;
  784. + case SNOR_PROTO_1_2_2:
  785. + case SNOR_PROTO_2_2_2:
  786. + f_pdata->addr_width = CQSPI_INST_TYPE_DUAL;
  787. + break;
  788. + case SNOR_PROTO_1_4_4:
  789. + case SNOR_PROTO_4_4_4:
  790. + f_pdata->addr_width = CQSPI_INST_TYPE_QUAD;
  791. + break;
  792. + default:
  793. + return -EINVAL;
  794. + }
  795. +
  796. + return 0;
  797. +}
  798. +
  799. +static void cqspi_write(struct spi_nor *nor, loff_t to,
  800. + size_t len, size_t *retlen, const u_char *buf)
  801. +{
  802. + int ret;
  803. +
  804. + ret = cqspi_set_protocol(nor, nor->write_proto);
  805. + if (ret)
  806. + return;
  807. +
  808. + ret = cqspi_indirect_write_setup(nor, to);
  809. + if (ret)
  810. + return;
  811. +
  812. + ret = cqspi_indirect_write_execute(nor, buf, len);
  813. + if (ret)
  814. + return;
  815. +
  816. + *retlen += len;
  817. +}
  818. +
  819. +static int cqspi_read(struct spi_nor *nor, loff_t from,
  820. + size_t len, size_t *retlen, u_char *buf)
  821. +{
  822. + int ret;
  823. +
  824. + ret = cqspi_set_protocol(nor, nor->read_proto);
  825. + if (ret)
  826. + return ret;
  827. +
  828. + ret = cqspi_indirect_read_setup(nor, from);
  829. + if (ret)
  830. + return ret;
  831. +
  832. + ret = cqspi_indirect_read_execute(nor, buf, len);
  833. + if (ret)
  834. + return ret;
  835. +
  836. + *retlen += len;
  837. + return ret;
  838. +}
  839. +
  840. +static int cqspi_erase(struct spi_nor *nor, loff_t offs)
  841. +{
  842. + int ret;
  843. +
  844. + ret = cqspi_set_protocol(nor, nor->erase_proto);
  845. + if (ret)
  846. + return ret;
  847. +
  848. + /* Send write enable, then erase commands. */
  849. + ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
  850. + if (ret)
  851. + return ret;
  852. +
  853. + /* Set up command buffer. */
  854. + ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs);
  855. + if (ret)
  856. + return ret;
  857. +
  858. + return 0;
  859. +}
  860. +
  861. +static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
  862. + const unsigned int ns_val)
  863. +{
  864. + unsigned int ticks;
  865. +
  866. + ticks = ref_clk_hz / 1000; /* kHz */
  867. + ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
  868. +
  869. + return ticks;
  870. +}
  871. +
  872. +static void cqspi_delay(struct spi_nor *nor, const unsigned int sclk_hz)
  873. +{
  874. + struct cqspi_flash_pdata *f_pdata = nor->priv;
  875. + struct cqspi_st *cqspi = f_pdata->cqspi;
  876. + void __iomem *iobase = cqspi->iobase;
  877. + const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
  878. + unsigned int tshsl, tchsh, tslch, tsd2d;
  879. + unsigned int reg;
  880. + unsigned int tsclk;
  881. +
  882. + /* calculate the number of ref ticks for one sclk tick */
  883. + tsclk = (ref_clk_hz + sclk_hz - 1) / sclk_hz;
  884. +
  885. + tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
  886. + /* this particular value must be at least one sclk */
  887. + if (tshsl < tsclk)
  888. + tshsl = tsclk;
  889. +
  890. + tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
  891. + tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
  892. + tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
  893. +
  894. + reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
  895. + << CQSPI_REG_DELAY_TSHSL_LSB;
  896. + reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
  897. + << CQSPI_REG_DELAY_TCHSH_LSB;
  898. + reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
  899. + << CQSPI_REG_DELAY_TSLCH_LSB;
  900. + reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
  901. + << CQSPI_REG_DELAY_TSD2D_LSB;
  902. + writel(reg, iobase + CQSPI_REG_DELAY);
  903. +}
  904. +
  905. +static void cqspi_config_baudrate_div(struct cqspi_st *cqspi,
  906. + const unsigned int sclk_hz)
  907. +{
  908. + const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
  909. + void __iomem *reg_base = cqspi->iobase;
  910. + unsigned int reg;
  911. + unsigned int div;
  912. +
  913. + reg = readl(reg_base + CQSPI_REG_CONFIG);
  914. + reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
  915. +
  916. + div = ref_clk_hz / sclk_hz;
  917. +
  918. + /* Recalculate the baudrate divisor based on QSPI specification. */
  919. + if (div > 32)
  920. + div = 32;
  921. +
  922. + /* Check if even number. */
  923. + if (div & 1)
  924. + div = (div / 2);
  925. + else
  926. + div = (div / 2) - 1;
  927. +
  928. + div = (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
  929. + reg |= div;
  930. + writel(reg, reg_base + CQSPI_REG_CONFIG);
  931. +}
  932. +
  933. +static void cqspi_readdata_capture(struct cqspi_st *cqspi,
  934. + const unsigned int bypass,
  935. + const unsigned int delay)
  936. +{
  937. + void __iomem *reg_base = cqspi->iobase;
  938. + unsigned int reg;
  939. +
  940. + reg = readl(reg_base + CQSPI_REG_READCAPTURE);
  941. +
  942. + if (bypass)
  943. + reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
  944. + else
  945. + reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
  946. +
  947. + reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
  948. + << CQSPI_REG_READCAPTURE_DELAY_LSB);
  949. +
  950. + reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
  951. + << CQSPI_REG_READCAPTURE_DELAY_LSB;
  952. +
  953. + writel(reg, reg_base + CQSPI_REG_READCAPTURE);
  954. +}
  955. +
  956. +static void cqspi_chipselect(struct spi_nor *nor)
  957. +{
  958. + struct cqspi_flash_pdata *f_pdata = nor->priv;
  959. + struct cqspi_st *cqspi = f_pdata->cqspi;
  960. + void __iomem *reg_base = cqspi->iobase;
  961. + unsigned int chip_select = f_pdata->cs;
  962. + unsigned int reg;
  963. +
  964. + reg = readl(reg_base + CQSPI_REG_CONFIG);
  965. + if (cqspi->is_decoded_cs) {
  966. + reg |= CQSPI_REG_CONFIG_DECODE_MASK;
  967. + } else {
  968. + reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
  969. +
  970. + /* Convert CS if without decoder.
  971. + * CS0 to 4b'1110
  972. + * CS1 to 4b'1101
  973. + * CS2 to 4b'1011
  974. + * CS3 to 4b'0111
  975. + */
  976. + chip_select = 0xF & ~(1 << chip_select);
  977. + }
  978. +
  979. + reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
  980. + << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
  981. + reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
  982. + << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
  983. + writel(reg, reg_base + CQSPI_REG_CONFIG);
  984. +}
  985. +
  986. +static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
  987. +{
  988. + void __iomem *reg_base = cqspi->iobase;
  989. + unsigned int reg;
  990. +
  991. + reg = readl(reg_base + CQSPI_REG_CONFIG);
  992. +
  993. + if (enable)
  994. + reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
  995. + else
  996. + reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
  997. +
  998. + writel(reg, reg_base + CQSPI_REG_CONFIG);
  999. +}
  1000. +
  1001. +static void cqspi_switch_cs(struct spi_nor *nor)
  1002. +{
  1003. + struct cqspi_flash_pdata *f_pdata = nor->priv;
  1004. + struct cqspi_st *cqspi = f_pdata->cqspi;
  1005. + void __iomem *iobase = cqspi->iobase;
  1006. + unsigned int reg;
  1007. +
  1008. + /* configure page size and block size. */
  1009. + reg = readl(iobase + CQSPI_REG_SIZE);
  1010. + reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
  1011. + reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
  1012. + reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
  1013. + reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB);
  1014. + reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB);
  1015. + reg |= (nor->addr_width - 1);
  1016. + writel(reg, iobase + CQSPI_REG_SIZE);
  1017. +
  1018. + /* configure the chip select */
  1019. + cqspi_chipselect(nor);
  1020. +}
  1021. +
  1022. +static int cqspi_prep_unlocked(struct spi_nor *nor, enum spi_nor_ops ops)
  1023. +{
  1024. + struct cqspi_flash_pdata *f_pdata = nor->priv;
  1025. + struct cqspi_st *cqspi = f_pdata->cqspi;
  1026. + const unsigned int sclk = f_pdata->clk_rate;
  1027. + const int switch_cs = (cqspi->current_cs != f_pdata->cs);
  1028. + const int switch_ck = (cqspi->sclk != sclk);
  1029. +
  1030. + if (switch_cs || switch_ck)
  1031. + cqspi_controller_enable(cqspi, 0);
  1032. +
  1033. + /* Switch chip select. */
  1034. + if (switch_cs) {
  1035. + cqspi->current_cs = f_pdata->cs;
  1036. + cqspi_switch_cs(nor);
  1037. + }
  1038. +
  1039. + /* Setup baudrate divisor and delays */
  1040. + if (switch_ck) {
  1041. + cqspi->sclk = sclk;
  1042. + cqspi_config_baudrate_div(cqspi, sclk);
  1043. + cqspi_delay(nor, sclk);
  1044. + cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay);
  1045. + }
  1046. +
  1047. + if (switch_cs || switch_ck)
  1048. + cqspi_controller_enable(cqspi, 1);
  1049. +
  1050. + return 0;
  1051. +}
  1052. +
  1053. +static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
  1054. +{
  1055. + struct cqspi_flash_pdata *f_pdata = nor->priv;
  1056. + struct cqspi_st *cqspi = f_pdata->cqspi;
  1057. +
  1058. + mutex_lock(&cqspi->bus_mutex);
  1059. +
  1060. + return cqspi_prep_unlocked(nor, ops);
  1061. +}
  1062. +
  1063. +static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
  1064. +{
  1065. + struct cqspi_flash_pdata *f_pdata = nor->priv;
  1066. + struct cqspi_st *cqspi = f_pdata->cqspi;
  1067. +
  1068. + mutex_unlock(&cqspi->bus_mutex);
  1069. +}
  1070. +
  1071. +static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
  1072. +{
  1073. + int ret;
  1074. +
  1075. + ret = cqspi_set_protocol(nor, nor->reg_proto);
  1076. + if (ret)
  1077. + goto exit;
  1078. +
  1079. + cqspi_prep_unlocked(nor, SPI_NOR_OPS_READ);
  1080. +
  1081. + ret = cqspi_command_read(nor, &opcode, 1, buf, len);
  1082. +exit:
  1083. + return ret;
  1084. +}
  1085. +
  1086. +static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
  1087. +{
  1088. + int ret;
  1089. +
  1090. + ret = cqspi_set_protocol(nor, nor->reg_proto);
  1091. + if (ret)
  1092. + goto exit;
  1093. +
  1094. + cqspi_prep_unlocked(nor, SPI_NOR_OPS_WRITE);
  1095. +
  1096. + ret = cqspi_command_write(nor, opcode, buf, len);
  1097. +exit:
  1098. + return ret;
  1099. +}
  1100. +
  1101. +static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
  1102. + struct cqspi_flash_pdata *f_pdata,
  1103. + struct device_node *np)
  1104. +{
  1105. + if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
  1106. + dev_err(&pdev->dev, "couldn't determine read-delay\n");
  1107. + return -ENXIO;
  1108. + }
  1109. +
  1110. + if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
  1111. + dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
  1112. + return -ENXIO;
  1113. + }
  1114. +
  1115. + if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
  1116. + dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
  1117. + return -ENXIO;
  1118. + }
  1119. +
  1120. + if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
  1121. + dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
  1122. + return -ENXIO;
  1123. + }
  1124. +
  1125. + if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
  1126. + dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
  1127. + return -ENXIO;
  1128. + }
  1129. +
  1130. + if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
  1131. + dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
  1132. + return -ENXIO;
  1133. + }
  1134. +
  1135. + return 0;
  1136. +}
  1137. +
  1138. +static int cqspi_of_get_pdata(struct platform_device *pdev)
  1139. +{
  1140. + struct device_node *np = pdev->dev.of_node;
  1141. + struct cqspi_st *cqspi = platform_get_drvdata(pdev);
  1142. +
  1143. + cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
  1144. +
  1145. + if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
  1146. + dev_err(&pdev->dev, "couldn't determine fifo-depth\n");
  1147. + return -ENXIO;
  1148. + }
  1149. +
  1150. + if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
  1151. + dev_err(&pdev->dev, "couldn't determine fifo-width\n");
  1152. + return -ENXIO;
  1153. + }
  1154. +
  1155. + if (of_property_read_u32(np, "cdns,trigger-address",
  1156. + &cqspi->trigger_address)) {
  1157. + dev_err(&pdev->dev, "couldn't determine trigger-address\n");
  1158. + return -ENXIO;
  1159. + }
  1160. +
  1161. + return 0;
  1162. +}
  1163. +
  1164. +static void cqspi_controller_init(struct cqspi_st *cqspi)
  1165. +{
  1166. + cqspi_controller_enable(cqspi, 0);
  1167. +
  1168. + /* Configure the remap address register, no remap */
  1169. + writel(0, cqspi->iobase + CQSPI_REG_REMAP);
  1170. +
  1171. + /* Disable all interrupts. */
  1172. + writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
  1173. +
  1174. + /* Configure the SRAM split to 1:1 . */
  1175. + writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
  1176. +
  1177. + /* Load indirect trigger address. */
  1178. + writel(cqspi->trigger_address,
  1179. + cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
  1180. +
  1181. + /* Program read watermark -- 1/2 of the FIFO. */
  1182. + writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
  1183. + cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
  1184. + /* Program write watermark -- 1/8 of the FIFO. */
  1185. + writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
  1186. + cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
  1187. +
  1188. + cqspi_controller_enable(cqspi, 1);
  1189. +}
  1190. +
  1191. +static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
  1192. +{
  1193. + struct platform_device *pdev = cqspi->pdev;
  1194. + struct device *dev = &pdev->dev;
  1195. + struct cqspi_flash_pdata *f_pdata;
  1196. + struct spi_nor *nor;
  1197. + struct mtd_info *mtd;
  1198. + unsigned int cs;
  1199. + int i, ret;
  1200. +
  1201. + /* Get flash device data */
  1202. + for_each_available_child_of_node(dev->of_node, np) {
  1203. + if (of_property_read_u32(np, "reg", &cs)) {
  1204. + dev_err(dev, "Couldn't determine chip select.\n");
  1205. + goto err;
  1206. + }
  1207. +
  1208. + if (cs > CQSPI_MAX_CHIPSELECT) {
  1209. + dev_err(dev, "Chip select %d out of range.\n", cs);
  1210. + goto err;
  1211. + }
  1212. +
  1213. + f_pdata = &cqspi->f_pdata[cs];
  1214. + f_pdata->cqspi = cqspi;
  1215. + f_pdata->cs = cs;
  1216. +
  1217. + ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
  1218. + if (ret)
  1219. + goto err;
  1220. +
  1221. + nor = &f_pdata->nor;
  1222. + mtd = &nor->mtd;
  1223. +
  1224. + mtd->priv = nor;
  1225. +
  1226. + nor->dev = dev;
  1227. + spi_nor_set_flash_node(nor, np);
  1228. + nor->priv = f_pdata;
  1229. +
  1230. + nor->read_reg = cqspi_read_reg;
  1231. + nor->write_reg = cqspi_write_reg;
  1232. + nor->read = cqspi_read;
  1233. + nor->write = cqspi_write;
  1234. + nor->erase = cqspi_erase;
  1235. + nor->prepare = cqspi_prep;
  1236. + nor->unprepare = cqspi_unprep;
  1237. +
  1238. + mtd->name = kasprintf(GFP_KERNEL, "%s.%d", dev_name(dev), cs);
  1239. + if (!mtd->name) {
  1240. + ret = -ENOMEM;
  1241. + goto err;
  1242. + }
  1243. +
  1244. + ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
  1245. + if (ret)
  1246. + goto err;
  1247. +
  1248. + ret = mtd_device_register(mtd, NULL, 0);
  1249. + if (ret)
  1250. + goto err;
  1251. + }
  1252. +
  1253. + return 0;
  1254. +
  1255. +err:
  1256. + for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
  1257. + if (cqspi->f_pdata[i].nor.mtd.name) {
  1258. + mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
  1259. + kfree(cqspi->f_pdata[i].nor.mtd.name);
  1260. + }
  1261. + return ret;
  1262. +}
  1263. +
  1264. +static int cqspi_probe(struct platform_device *pdev)
  1265. +{
  1266. + struct device_node *np = pdev->dev.of_node;
  1267. + struct device *dev = &pdev->dev;
  1268. + struct cqspi_st *cqspi;
  1269. + struct resource *res;
  1270. + struct resource *res_ahb;
  1271. + int ret;
  1272. + int irq;
  1273. +
  1274. + cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL);
  1275. + if (!cqspi)
  1276. + return -ENOMEM;
  1277. +
  1278. + mutex_init(&cqspi->bus_mutex);
  1279. + cqspi->pdev = pdev;
  1280. + platform_set_drvdata(pdev, cqspi);
  1281. +
  1282. + /* Obtain configuration from OF. */
  1283. + ret = cqspi_of_get_pdata(pdev);
  1284. + if (ret) {
  1285. + dev_err(dev, "Cannot get mandatory OF data.\n");
  1286. + return -ENODEV;
  1287. + }
  1288. +
  1289. + /* Obtain QSPI clock. */
  1290. + cqspi->clk = devm_clk_get(dev, NULL);
  1291. + if (IS_ERR(cqspi->clk)) {
  1292. + dev_err(dev, "Cannot claim QSPI clock.\n");
  1293. + return PTR_ERR(cqspi->clk);
  1294. + }
  1295. +
  1296. + /* Obtain and remap controller address. */
  1297. + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1298. + cqspi->iobase = devm_ioremap_resource(dev, res);
  1299. + if (IS_ERR(cqspi->iobase)) {
  1300. + dev_err(dev, "Cannot remap controller address.\n");
  1301. + return PTR_ERR(cqspi->iobase);
  1302. + }
  1303. +
  1304. + /* Obtain and remap AHB address. */
  1305. + res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1306. + cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
  1307. + if (IS_ERR(cqspi->ahb_base)) {
  1308. + dev_err(dev, "Cannot remap AHB address.\n");
  1309. + return PTR_ERR(cqspi->ahb_base);
  1310. + }
  1311. +
  1312. + init_completion(&cqspi->transfer_complete);
  1313. +
  1314. + /* Obtain IRQ line. */
  1315. + irq = platform_get_irq(pdev, 0);
  1316. + if (irq < 0) {
  1317. + dev_err(dev, "Cannot obtain IRQ.\n");
  1318. + return -ENXIO;
  1319. + }
  1320. +
  1321. + ret = clk_prepare_enable(cqspi->clk);
  1322. + if (ret) {
  1323. + dev_err(dev, "Cannot enable QSPI clock.\n");
  1324. + return ret;
  1325. + }
  1326. +
  1327. + cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
  1328. +
  1329. + ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
  1330. + pdev->name, cqspi);
  1331. + if (ret) {
  1332. + dev_err(dev, "Cannot request IRQ.\n");
  1333. + goto probe_irq_failed;
  1334. + }
  1335. +
  1336. + cqspi_wait_idle(cqspi);
  1337. + cqspi_controller_init(cqspi);
  1338. + cqspi->current_cs = -1;
  1339. + cqspi->sclk = 0;
  1340. +
  1341. + ret = cqspi_setup_flash(cqspi, np);
  1342. + if (ret) {
  1343. + dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret);
  1344. + goto probe_setup_failed;
  1345. + }
  1346. +
  1347. + return ret;
  1348. +probe_irq_failed:
  1349. + cqspi_controller_enable(cqspi, 0);
  1350. +probe_setup_failed:
  1351. + clk_disable_unprepare(cqspi->clk);
  1352. + return ret;
  1353. +}
  1354. +
  1355. +static int cqspi_remove(struct platform_device *pdev)
  1356. +{
  1357. + struct cqspi_st *cqspi = platform_get_drvdata(pdev);
  1358. + int i;
  1359. +
  1360. + cqspi_controller_enable(cqspi, 0);
  1361. +
  1362. + for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
  1363. + if (cqspi->f_pdata[i].nor.mtd.name) {
  1364. + mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
  1365. + kfree(cqspi->f_pdata[i].nor.mtd.name);
  1366. + }
  1367. +
  1368. + clk_disable_unprepare(cqspi->clk);
  1369. +
  1370. + return 0;
  1371. +}
  1372. +
  1373. +#ifdef CONFIG_PM_SLEEP
  1374. +static int cqspi_suspend(struct device *dev)
  1375. +{
  1376. + struct cqspi_st *cqspi = dev_get_drvdata(dev);
  1377. +
  1378. + cqspi_controller_enable(cqspi, 0);
  1379. + return 0;
  1380. +}
  1381. +
  1382. +static int cqspi_resume(struct device *dev)
  1383. +{
  1384. + struct cqspi_st *cqspi = dev_get_drvdata(dev);
  1385. +
  1386. + cqspi_controller_enable(cqspi, 1);
  1387. + return 0;
  1388. +}
  1389. +
  1390. +static const struct dev_pm_ops cqspi__dev_pm_ops = {
  1391. + .suspend = cqspi_suspend,
  1392. + .resume = cqspi_resume,
  1393. +};
  1394. +
  1395. +#define CQSPI_DEV_PM_OPS (&cqspi__dev_pm_ops)
  1396. +#else
  1397. +#define CQSPI_DEV_PM_OPS NULL
  1398. +#endif
  1399. +
  1400. +static struct of_device_id const cqspi_dt_ids[] = {
  1401. + {.compatible = "cdns,qspi-nor",},
  1402. + { /* end of table */ }
  1403. +};
  1404. +
  1405. +MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
  1406. +
  1407. +static struct platform_driver cqspi_platform_driver = {
  1408. + .probe = cqspi_probe,
  1409. + .remove = cqspi_remove,
  1410. + .driver = {
  1411. + .name = CQSPI_NAME,
  1412. + .pm = CQSPI_DEV_PM_OPS,
  1413. + .of_match_table = cqspi_dt_ids,
  1414. + },
  1415. +};
  1416. +
  1417. +module_platform_driver(cqspi_platform_driver);
  1418. +
  1419. +MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
  1420. +MODULE_LICENSE("GPL v2");
  1421. +MODULE_ALIAS("platform:" CQSPI_NAME);
  1422. +MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
  1423. +MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");
  1424. --
  1425. 2.8.1