0075-mtd-mediatek-driver-for-MTK-Smart-Device-Gen1-NAND.patch 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064
  1. From de18239fc971cfc17c53320c66ae64dd5ade032d Mon Sep 17 00:00:00 2001
  2. From: Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
  3. Date: Fri, 29 Apr 2016 12:17:22 -0400
  4. Subject: [PATCH 075/102] mtd: mediatek: driver for MTK Smart Device Gen1 NAND
  5. This patch adds support for mediatek's SDG1 NFC nand controller
  6. embedded in SoC 2701
  7. Signed-off-by: Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
  8. ---
  9. drivers/mtd/nand/Kconfig | 7 +
  10. drivers/mtd/nand/Makefile | 1 +
  11. drivers/mtd/nand/mtk_ecc.c | 527 ++++++++++++++++
  12. drivers/mtd/nand/mtk_ecc.h | 53 ++
  13. drivers/mtd/nand/mtk_nand.c | 1432 +++++++++++++++++++++++++++++++++++++++++++
  14. 5 files changed, 2020 insertions(+)
  15. create mode 100644 drivers/mtd/nand/mtk_ecc.c
  16. create mode 100644 drivers/mtd/nand/mtk_ecc.h
  17. create mode 100644 drivers/mtd/nand/mtk_nand.c
  18. --- a/drivers/mtd/nand/Kconfig
  19. +++ b/drivers/mtd/nand/Kconfig
  20. @@ -563,4 +563,11 @@ config MTD_NAND_QCOM
  21. Enables support for NAND flash chips on SoCs containing the EBI2 NAND
  22. controller. This controller is found on IPQ806x SoC.
  23. +config MTD_NAND_MTK
  24. + tristate "Support for NAND controller on MTK SoCs"
  25. + depends on HAS_DMA
  26. + help
  27. + Enables support for NAND controller on MTK SoCs.
  28. + This controller is found on mt27xx, mt81xx, mt65xx SoCs.
  29. +
  30. endif # MTD_NAND
  31. --- a/drivers/mtd/nand/Makefile
  32. +++ b/drivers/mtd/nand/Makefile
  33. @@ -57,5 +57,6 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_n
  34. obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
  35. obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
  36. obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
  37. +obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o
  38. nand-objs := nand_base.o nand_bbt.o nand_timings.o
  39. --- /dev/null
  40. +++ b/drivers/mtd/nand/mtk_ecc.c
  41. @@ -0,0 +1,527 @@
  42. +/*
  43. + * MTK ECC controller driver.
  44. + * Copyright (C) 2016 MediaTek Inc.
  45. + * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
  46. + * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
  47. + *
  48. + * This program is free software; you can redistribute it and/or modify
  49. + * it under the terms of the GNU General Public License version 2 as
  50. + * published by the Free Software Foundation.
  51. + *
  52. + * This program is distributed in the hope that it will be useful,
  53. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  54. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  55. + * GNU General Public License for more details.
  56. + */
  57. +
  58. +#include <linux/platform_device.h>
  59. +#include <linux/dma-mapping.h>
  60. +#include <linux/interrupt.h>
  61. +#include <linux/clk.h>
  62. +#include <linux/module.h>
  63. +#include <linux/iopoll.h>
  64. +#include <linux/of.h>
  65. +#include <linux/of_platform.h>
  66. +#include <linux/semaphore.h>
  67. +
  68. +#include "mtk_ecc.h"
  69. +
  70. +#define ECC_ENCCON (0x00)
  71. +#define ENC_EN (1)
  72. +#define ENC_DE (0)
  73. +#define ECC_ENCCNFG (0x04)
  74. +#define ECC_CNFG_4BIT (0)
  75. +#define ECC_CNFG_6BIT (1)
  76. +#define ECC_CNFG_8BIT (2)
  77. +#define ECC_CNFG_10BIT (3)
  78. +#define ECC_CNFG_12BIT (4)
  79. +#define ECC_CNFG_14BIT (5)
  80. +#define ECC_CNFG_16BIT (6)
  81. +#define ECC_CNFG_18BIT (7)
  82. +#define ECC_CNFG_20BIT (8)
  83. +#define ECC_CNFG_22BIT (9)
  84. +#define ECC_CNFG_24BIT (0xa)
  85. +#define ECC_CNFG_28BIT (0xb)
  86. +#define ECC_CNFG_32BIT (0xc)
  87. +#define ECC_CNFG_36BIT (0xd)
  88. +#define ECC_CNFG_40BIT (0xe)
  89. +#define ECC_CNFG_44BIT (0xf)
  90. +#define ECC_CNFG_48BIT (0x10)
  91. +#define ECC_CNFG_52BIT (0x11)
  92. +#define ECC_CNFG_56BIT (0x12)
  93. +#define ECC_CNFG_60BIT (0x13)
  94. +#define ECC_MODE_SHIFT (5)
  95. +#define ECC_MS_SHIFT (16)
  96. +#define ECC_ENCDIADDR (0x08)
  97. +#define ECC_ENCIDLE (0x0C)
  98. +#define ENC_IDLE BIT(0)
  99. +#define ECC_ENCPAR(x) (0x10 + (x) * sizeof(u32))
  100. +#define ECC_ENCIRQ_EN (0x80)
  101. +#define ENC_IRQEN BIT(0)
  102. +#define ECC_ENCIRQ_STA (0x84)
  103. +#define ECC_DECCON (0x100)
  104. +#define DEC_EN (1)
  105. +#define DEC_DE (0)
  106. +#define ECC_DECCNFG (0x104)
  107. +#define DEC_EMPTY_EN BIT(31)
  108. +#define DEC_CNFG_CORRECT (0x3 << 12)
  109. +#define ECC_DECIDLE (0x10C)
  110. +#define DEC_IDLE BIT(0)
  111. +#define ECC_DECENUM0 (0x114)
  112. +#define ERR_MASK (0x3f)
  113. +#define ECC_DECDONE (0x124)
  114. +#define ECC_DECIRQ_EN (0x200)
  115. +#define DEC_IRQEN BIT(0)
  116. +#define ECC_DECIRQ_STA (0x204)
  117. +
  118. +#define ECC_TIMEOUT (500000)
  119. +
  120. +#define ECC_IDLE_REG(x) ((x) == ECC_ENC ? ECC_ENCIDLE : ECC_DECIDLE)
  121. +#define ECC_IDLE_MASK(x) ((x) == ECC_ENC ? ENC_IDLE : DEC_IDLE)
  122. +#define ECC_IRQ_REG(x) ((x) == ECC_ENC ? ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
  123. +#define ECC_IRQ_EN(x) ((x) == ECC_ENC ? ENC_IRQEN : DEC_IRQEN)
  124. +#define ECC_CTL_REG(x) ((x) == ECC_ENC ? ECC_ENCCON : ECC_DECCON)
  125. +#define ECC_CODEC_ENABLE(x) ((x) == ECC_ENC ? ENC_EN : DEC_EN)
  126. +#define ECC_CODEC_DISABLE(x) ((x) == ECC_ENC ? ENC_DE : DEC_DE)
  127. +
  128. +struct mtk_ecc {
  129. + struct device *dev;
  130. + void __iomem *regs;
  131. + struct clk *clk;
  132. +
  133. + struct completion done;
  134. + struct semaphore sem;
  135. + u32 sec_mask;
  136. +};
  137. +
  138. +static inline void mtk_ecc_codec_wait_idle(struct mtk_ecc *ecc,
  139. + enum mtk_ecc_codec codec)
  140. +{
  141. + struct device *dev = ecc->dev;
  142. + u32 val;
  143. + int ret;
  144. +
  145. + ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(codec), val,
  146. + val & ECC_IDLE_MASK(codec),
  147. + 10, ECC_TIMEOUT);
  148. + if (ret)
  149. + dev_warn(dev, "%s NOT idle\n",
  150. + codec == ECC_ENC ? "encoder" : "decoder");
  151. +}
  152. +
  153. +static irqreturn_t mtk_ecc_irq(int irq, void *id)
  154. +{
  155. + struct mtk_ecc *ecc = id;
  156. + enum mtk_ecc_codec codec;
  157. + u32 dec, enc;
  158. +
  159. + dec = readw(ecc->regs + ECC_DECIRQ_STA) & DEC_IRQEN;
  160. + if (dec) {
  161. + codec = ECC_DEC;
  162. + dec = readw(ecc->regs + ECC_DECDONE);
  163. + if (dec & ecc->sec_mask) {
  164. + ecc->sec_mask = 0;
  165. + complete(&ecc->done);
  166. + } else
  167. + return IRQ_HANDLED;
  168. + } else {
  169. + enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ENC_IRQEN;
  170. + if (enc) {
  171. + codec = ECC_ENC;
  172. + complete(&ecc->done);
  173. + } else
  174. + return IRQ_NONE;
  175. + }
  176. +
  177. + writel(0, ecc->regs + ECC_IRQ_REG(codec));
  178. +
  179. + return IRQ_HANDLED;
  180. +}
  181. +
  182. +static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
  183. +{
  184. + u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz;
  185. + u32 reg;
  186. +
  187. + switch (config->strength) {
  188. + case 4:
  189. + ecc_bit = ECC_CNFG_4BIT;
  190. + break;
  191. + case 6:
  192. + ecc_bit = ECC_CNFG_6BIT;
  193. + break;
  194. + case 8:
  195. + ecc_bit = ECC_CNFG_8BIT;
  196. + break;
  197. + case 10:
  198. + ecc_bit = ECC_CNFG_10BIT;
  199. + break;
  200. + case 12:
  201. + ecc_bit = ECC_CNFG_12BIT;
  202. + break;
  203. + case 14:
  204. + ecc_bit = ECC_CNFG_14BIT;
  205. + break;
  206. + case 16:
  207. + ecc_bit = ECC_CNFG_16BIT;
  208. + break;
  209. + case 18:
  210. + ecc_bit = ECC_CNFG_18BIT;
  211. + break;
  212. + case 20:
  213. + ecc_bit = ECC_CNFG_20BIT;
  214. + break;
  215. + case 22:
  216. + ecc_bit = ECC_CNFG_22BIT;
  217. + break;
  218. + case 24:
  219. + ecc_bit = ECC_CNFG_24BIT;
  220. + break;
  221. + case 28:
  222. + ecc_bit = ECC_CNFG_28BIT;
  223. + break;
  224. + case 32:
  225. + ecc_bit = ECC_CNFG_32BIT;
  226. + break;
  227. + case 36:
  228. + ecc_bit = ECC_CNFG_36BIT;
  229. + break;
  230. + case 40:
  231. + ecc_bit = ECC_CNFG_40BIT;
  232. + break;
  233. + case 44:
  234. + ecc_bit = ECC_CNFG_44BIT;
  235. + break;
  236. + case 48:
  237. + ecc_bit = ECC_CNFG_48BIT;
  238. + break;
  239. + case 52:
  240. + ecc_bit = ECC_CNFG_52BIT;
  241. + break;
  242. + case 56:
  243. + ecc_bit = ECC_CNFG_56BIT;
  244. + break;
  245. + case 60:
  246. + ecc_bit = ECC_CNFG_60BIT;
  247. + break;
  248. + default:
  249. + dev_err(ecc->dev, "invalid strength %d\n", config->strength);
  250. + }
  251. +
  252. + if (config->codec == ECC_ENC) {
  253. + /* configure ECC encoder (in bits) */
  254. + enc_sz = config->enc_len << 3;
  255. +
  256. + reg = ecc_bit | (config->ecc_mode << ECC_MODE_SHIFT);
  257. + reg |= (enc_sz << ECC_MS_SHIFT);
  258. + writel(reg, ecc->regs + ECC_ENCCNFG);
  259. +
  260. + if (config->ecc_mode != ECC_NFI_MODE)
  261. + writel(lower_32_bits(config->addr),
  262. + ecc->regs + ECC_ENCDIADDR);
  263. +
  264. + } else {
  265. + /* configure ECC decoder (in bits) */
  266. + dec_sz = config->dec_len;
  267. +
  268. + reg = ecc_bit | (config->ecc_mode << ECC_MODE_SHIFT);
  269. + reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
  270. + reg |= DEC_EMPTY_EN;
  271. + writel(reg, ecc->regs + ECC_DECCNFG);
  272. +
  273. + if (config->sec_mask)
  274. + ecc->sec_mask = 1 << (config->sec_mask - 1);
  275. + }
  276. +}
  277. +
  278. +void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
  279. + int sectors)
  280. +{
  281. + u32 offset, i, err;
  282. + u32 bitflips = 0;
  283. +
  284. + stats->corrected = 0;
  285. + stats->failed = 0;
  286. +
  287. + for (i = 0; i < sectors; i++) {
  288. + offset = (i >> 2) << 2;
  289. + err = readl(ecc->regs + ECC_DECENUM0 + offset);
  290. + err = err >> ((i % 4) * 8);
  291. + err &= ERR_MASK;
  292. + if (err == ERR_MASK) {
  293. + /* uncorrectable errors */
  294. + stats->failed++;
  295. + continue;
  296. + }
  297. +
  298. + stats->corrected += err;
  299. + bitflips = max_t(u32, bitflips, err);
  300. + }
  301. +
  302. + stats->bitflips = bitflips;
  303. +}
  304. +EXPORT_SYMBOL(mtk_ecc_get_stats);
  305. +
  306. +void mtk_ecc_release(struct mtk_ecc *ecc)
  307. +{
  308. + clk_disable_unprepare(ecc->clk);
  309. + put_device(ecc->dev);
  310. +}
  311. +EXPORT_SYMBOL(mtk_ecc_release);
  312. +
  313. +static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
  314. +{
  315. + struct platform_device *pdev;
  316. + struct mtk_ecc *ecc;
  317. +
  318. + pdev = of_find_device_by_node(np);
  319. + if (!pdev || !platform_get_drvdata(pdev))
  320. + return ERR_PTR(-EPROBE_DEFER);
  321. +
  322. + get_device(&pdev->dev);
  323. + ecc = platform_get_drvdata(pdev);
  324. + clk_prepare_enable(ecc->clk);
  325. + mtk_ecc_hw_init(ecc);
  326. +
  327. + return ecc;
  328. +}
  329. +
  330. +struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
  331. +{
  332. + struct mtk_ecc *ecc = NULL;
  333. + struct device_node *np;
  334. +
  335. + np = of_parse_phandle(of_node, "ecc-engine", 0);
  336. + if (np) {
  337. + ecc = mtk_ecc_get(np);
  338. + of_node_put(np);
  339. + }
  340. +
  341. + return ecc;
  342. +}
  343. +EXPORT_SYMBOL(of_mtk_ecc_get);
  344. +
  345. +int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
  346. +{
  347. + enum mtk_ecc_codec codec = config->codec;
  348. + int ret;
  349. +
  350. + ret = down_interruptible(&ecc->sem);
  351. + if (ret) {
  352. + dev_err(ecc->dev, "interrupted when attempting to lock\n");
  353. + return ret;
  354. + }
  355. +
  356. + mtk_ecc_codec_wait_idle(ecc, codec);
  357. + mtk_ecc_config(ecc, config);
  358. + writew(ECC_CODEC_ENABLE(codec), ecc->regs + ECC_CTL_REG(codec));
  359. +
  360. + init_completion(&ecc->done);
  361. + writew(ECC_IRQ_EN(codec), ecc->regs + ECC_IRQ_REG(codec));
  362. +
  363. + return 0;
  364. +}
  365. +EXPORT_SYMBOL(mtk_ecc_enable);
  366. +
  367. +void mtk_ecc_disable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
  368. +{
  369. + enum mtk_ecc_codec codec = config->codec;
  370. +
  371. + mtk_ecc_codec_wait_idle(ecc, codec);
  372. + writew(0, ecc->regs + ECC_IRQ_REG(codec));
  373. + writew(ECC_CODEC_DISABLE(codec), ecc->regs + ECC_CTL_REG(codec));
  374. + up(&ecc->sem);
  375. +}
  376. +EXPORT_SYMBOL(mtk_ecc_disable);
  377. +
  378. +int mtk_ecc_wait_irq_done(struct mtk_ecc *ecc, enum mtk_ecc_codec codec)
  379. +{
  380. + int ret;
  381. +
  382. + ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
  383. + if (!ret) {
  384. + dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
  385. + (codec == ECC_ENC) ? "encoder" : "decoder");
  386. + return -ETIMEDOUT;
  387. + }
  388. +
  389. + return 0;
  390. +}
  391. +EXPORT_SYMBOL(mtk_ecc_wait_irq_done);
  392. +
  393. +int mtk_ecc_encode_non_nfi_mode(struct mtk_ecc *ecc,
  394. + struct mtk_ecc_config *config, u8 *data, u32 bytes)
  395. +{
  396. + dma_addr_t addr;
  397. + u32 *p, len, i;
  398. + int ret = 0;
  399. +
  400. + addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
  401. + ret = dma_mapping_error(ecc->dev, addr);
  402. + if (ret) {
  403. + dev_err(ecc->dev, "dma mapping error\n");
  404. + return -EINVAL;
  405. + }
  406. +
  407. + config->codec = ECC_ENC;
  408. + config->addr = addr;
  409. + ret = mtk_ecc_enable(ecc, config);
  410. + if (ret) {
  411. + dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
  412. + return ret;
  413. + }
  414. +
  415. + ret = mtk_ecc_wait_irq_done(ecc, ECC_ENC);
  416. + if (ret)
  417. + goto timeout;
  418. +
  419. + mtk_ecc_codec_wait_idle(ecc, ECC_ENC);
  420. +
  421. + /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
  422. + len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
  423. + p = (u32 *) (data + bytes);
  424. +
  425. + /* write the parity bytes generated by the ECC back to the OOB region */
  426. + for (i = 0; i < len; i++)
  427. + p[i] = readl(ecc->regs + ECC_ENCPAR(i));
  428. +timeout:
  429. +
  430. + dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
  431. + mtk_ecc_disable(ecc, config);
  432. +
  433. + return ret;
  434. +}
  435. +EXPORT_SYMBOL(mtk_ecc_encode_non_nfi_mode);
  436. +
  437. +void mtk_ecc_hw_init(struct mtk_ecc *ecc)
  438. +{
  439. + mtk_ecc_codec_wait_idle(ecc, ECC_ENC);
  440. + writew(ENC_DE, ecc->regs + ECC_ENCCON);
  441. +
  442. + mtk_ecc_codec_wait_idle(ecc, ECC_DEC);
  443. + writel(DEC_DE, ecc->regs + ECC_DECCON);
  444. +}
  445. +
  446. +void mtk_ecc_update_strength(u32 *p)
  447. +{
  448. + u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
  449. + 40, 44, 48, 52, 56, 60};
  450. + int i;
  451. +
  452. + for (i = 0; i < ARRAY_SIZE(ecc); i++) {
  453. + if (*p <= ecc[i]) {
  454. + if (!i)
  455. + *p = ecc[i];
  456. + else if (*p != ecc[i])
  457. + *p = ecc[i - 1];
  458. + return;
  459. + }
  460. + }
  461. +
  462. + *p = ecc[ARRAY_SIZE(ecc) - 1];
  463. +}
  464. +EXPORT_SYMBOL(mtk_ecc_update_strength);
  465. +
  466. +static int mtk_ecc_probe(struct platform_device *pdev)
  467. +{
  468. + struct device *dev = &pdev->dev;
  469. + struct mtk_ecc *ecc;
  470. + struct resource *res;
  471. + int irq, ret;
  472. +
  473. + ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
  474. + if (!ecc)
  475. + return -ENOMEM;
  476. +
  477. + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  478. + ecc->regs = devm_ioremap_resource(dev, res);
  479. + if (IS_ERR(ecc->regs)) {
  480. + dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs));
  481. + return PTR_ERR(ecc->regs);
  482. + }
  483. +
  484. + ecc->clk = devm_clk_get(dev, NULL);
  485. + if (IS_ERR(ecc->clk)) {
  486. + dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
  487. + return PTR_ERR(ecc->clk);
  488. + }
  489. +
  490. + irq = platform_get_irq(pdev, 0);
  491. + if (irq < 0) {
  492. + dev_err(dev, "failed to get irq\n");
  493. + return -EINVAL;
  494. + }
  495. +
  496. + ret = dma_set_mask(dev, DMA_BIT_MASK(32));
  497. + if (ret) {
  498. + dev_err(dev, "failed to set DMA mask\n");
  499. + return ret;
  500. + }
  501. +
  502. + ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
  503. + if (ret) {
  504. + dev_err(dev, "failed to request irq\n");
  505. + return -EINVAL;
  506. + }
  507. +
  508. + ecc->dev = dev;
  509. + sema_init(&ecc->sem, 1);
  510. + platform_set_drvdata(pdev, ecc);
  511. + dev_info(dev, "probed\n");
  512. +
  513. + return 0;
  514. +}
  515. +
  516. +#ifdef CONFIG_PM_SLEEP
  517. +static int mtk_ecc_suspend(struct device *dev)
  518. +{
  519. + struct mtk_ecc *ecc = dev_get_drvdata(dev);
  520. +
  521. + clk_disable_unprepare(ecc->clk);
  522. +
  523. + return 0;
  524. +}
  525. +
  526. +static int mtk_ecc_resume(struct device *dev)
  527. +{
  528. + struct mtk_ecc *ecc = dev_get_drvdata(dev);
  529. + int ret;
  530. +
  531. + ret = clk_prepare_enable(ecc->clk);
  532. + if (ret) {
  533. + dev_err(dev, "failed to enable clk\n");
  534. + return ret;
  535. + }
  536. +
  537. + mtk_ecc_hw_init(ecc);
  538. +
  539. + return 0;
  540. +}
  541. +
  542. +static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
  543. +#endif
  544. +
  545. +static const struct of_device_id mtk_ecc_dt_match[] = {
  546. + { .compatible = "mediatek,mt2701-ecc" },
  547. + {},
  548. +};
  549. +
  550. +MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
  551. +
  552. +static struct platform_driver mtk_ecc_driver = {
  553. + .probe = mtk_ecc_probe,
  554. + .driver = {
  555. + .name = "mtk-ecc",
  556. + .of_match_table = of_match_ptr(mtk_ecc_dt_match),
  557. +#ifdef CONFIG_PM_SLEEP
  558. + .pm = &mtk_ecc_pm_ops,
  559. +#endif
  560. + },
  561. +};
  562. +
  563. +module_platform_driver(mtk_ecc_driver);
  564. +
  565. +MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
  566. +MODULE_AUTHOR("Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>");
  567. +MODULE_DESCRIPTION("MTK Nand ECC Driver");
  568. +MODULE_LICENSE("GPL");
  569. --- /dev/null
  570. +++ b/drivers/mtd/nand/mtk_ecc.h
  571. @@ -0,0 +1,53 @@
  572. +/*
  573. + * MTK SDG1 ECC controller
  574. + *
  575. + * Copyright (c) 2016 Mediatek
  576. + * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
  577. + * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
  578. + * This program is free software; you can redistribute it and/or modify it
  579. + * under the terms of the GNU General Public License version 2 as published
  580. + * by the Free Software Foundation.
  581. + */
  582. +
  583. +#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
  584. +#define __DRIVERS_MTD_NAND_MTK_ECC_H__
  585. +
  586. +#include <linux/types.h>
  587. +
  588. +#define ECC_PARITY_BITS (14)
  589. +
  590. +enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
  591. +enum mtk_ecc_codec {ECC_ENC, ECC_DEC};
  592. +
  593. +struct device_node;
  594. +struct mtk_ecc;
  595. +
  596. +struct mtk_ecc_stats {
  597. + u32 corrected;
  598. + u32 bitflips;
  599. + u32 failed;
  600. +};
  601. +
  602. +struct mtk_ecc_config {
  603. + enum mtk_ecc_mode ecc_mode;
  604. + enum mtk_ecc_codec codec;
  605. + dma_addr_t addr;
  606. + u32 sec_mask;
  607. + u32 strength;
  608. + u32 enc_len;
  609. + u32 dec_len;
  610. +};
  611. +
  612. +int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
  613. +void mtk_ecc_disable(struct mtk_ecc *, struct mtk_ecc_config *);
  614. +int mtk_ecc_encode_non_nfi_mode(struct mtk_ecc *, struct mtk_ecc_config *,
  615. + u8 *, u32);
  616. +void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
  617. +int mtk_ecc_wait_irq_done(struct mtk_ecc *, enum mtk_ecc_codec);
  618. +void mtk_ecc_hw_init(struct mtk_ecc *);
  619. +void mtk_ecc_update_strength(u32 *);
  620. +
  621. +struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
  622. +void mtk_ecc_release(struct mtk_ecc *);
  623. +
  624. +#endif
  625. --- /dev/null
  626. +++ b/drivers/mtd/nand/mtk_nand.c
  627. @@ -0,0 +1,1432 @@
  628. +/*
  629. + * MTK NAND Flash controller driver.
  630. + * Copyright (C) 2016 MediaTek Inc.
  631. + * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
  632. + * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
  633. + *
  634. + * This program is free software; you can redistribute it and/or modify
  635. + * it under the terms of the GNU General Public License version 2 as
  636. + * published by the Free Software Foundation.
  637. + *
  638. + * This program is distributed in the hope that it will be useful,
  639. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  640. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  641. + * GNU General Public License for more details.
  642. + */
  643. +
  644. +#include <linux/platform_device.h>
  645. +#include <linux/dma-mapping.h>
  646. +#include <linux/interrupt.h>
  647. +#include <linux/delay.h>
  648. +#include <linux/clk.h>
  649. +#include <linux/mtd/nand.h>
  650. +#include <linux/mtd/mtd.h>
  651. +#include <linux/module.h>
  652. +#include <linux/iopoll.h>
  653. +#include <linux/of.h>
  654. +#include "mtk_ecc.h"
  655. +
  656. +/* NAND controller register definition */
  657. +#define NFI_CNFG (0x00)
  658. +#define CNFG_AHB BIT(0)
  659. +#define CNFG_READ_EN BIT(1)
  660. +#define CNFG_DMA_BURST_EN BIT(2)
  661. +#define CNFG_BYTE_RW BIT(6)
  662. +#define CNFG_HW_ECC_EN BIT(8)
  663. +#define CNFG_AUTO_FMT_EN BIT(9)
  664. +#define CNFG_OP_CUST (6 << 12)
  665. +#define NFI_PAGEFMT (0x04)
  666. +#define PAGEFMT_FDM_ECC_SHIFT (12)
  667. +#define PAGEFMT_FDM_SHIFT (8)
  668. +#define PAGEFMT_SPARE_16 (0)
  669. +#define PAGEFMT_SPARE_26 (1)
  670. +#define PAGEFMT_SPARE_27 (2)
  671. +#define PAGEFMT_SPARE_28 (3)
  672. +#define PAGEFMT_SPARE_32 (4)
  673. +#define PAGEFMT_SPARE_36 (5)
  674. +#define PAGEFMT_SPARE_40 (6)
  675. +#define PAGEFMT_SPARE_44 (7)
  676. +#define PAGEFMT_SPARE_48 (8)
  677. +#define PAGEFMT_SPARE_49 (9)
  678. +#define PAGEFMT_SPARE_50 (0xa)
  679. +#define PAGEFMT_SPARE_51 (0xb)
  680. +#define PAGEFMT_SPARE_52 (0xc)
  681. +#define PAGEFMT_SPARE_62 (0xd)
  682. +#define PAGEFMT_SPARE_63 (0xe)
  683. +#define PAGEFMT_SPARE_64 (0xf)
  684. +#define PAGEFMT_SPARE_SHIFT (4)
  685. +#define PAGEFMT_SEC_SEL_512 BIT(2)
  686. +#define PAGEFMT_512_2K (0)
  687. +#define PAGEFMT_2K_4K (1)
  688. +#define PAGEFMT_4K_8K (2)
  689. +#define PAGEFMT_8K_16K (3)
  690. +/* NFI control */
  691. +#define NFI_CON (0x08)
  692. +#define CON_FIFO_FLUSH BIT(0)
  693. +#define CON_NFI_RST BIT(1)
  694. +#define CON_BRD BIT(8) /* burst read */
  695. +#define CON_BWR BIT(9) /* burst write */
  696. +#define CON_SEC_SHIFT (12)
  697. +/* Timming control register */
  698. +#define NFI_ACCCON (0x0C)
  699. +#define NFI_INTR_EN (0x10)
  700. +#define INTR_AHB_DONE_EN BIT(6)
  701. +#define NFI_INTR_STA (0x14)
  702. +#define NFI_CMD (0x20)
  703. +#define NFI_ADDRNOB (0x30)
  704. +#define NFI_COLADDR (0x34)
  705. +#define NFI_ROWADDR (0x38)
  706. +#define NFI_STRDATA (0x40)
  707. +#define STAR_EN (1)
  708. +#define STAR_DE (0)
  709. +#define NFI_CNRNB (0x44)
  710. +#define NFI_DATAW (0x50)
  711. +#define NFI_DATAR (0x54)
  712. +#define NFI_PIO_DIRDY (0x58)
  713. +#define PIO_DI_RDY (0x01)
  714. +#define NFI_STA (0x60)
  715. +#define STA_CMD BIT(0)
  716. +#define STA_ADDR BIT(1)
  717. +#define STA_BUSY BIT(8)
  718. +#define STA_EMP_PAGE BIT(12)
  719. +#define NFI_FSM_CUSTDATA (0xe << 16)
  720. +#define NFI_FSM_MASK (0xf << 16)
  721. +#define NFI_ADDRCNTR (0x70)
  722. +#define CNTR_MASK GENMASK(16, 12)
  723. +#define NFI_STRADDR (0x80)
  724. +#define NFI_BYTELEN (0x84)
  725. +#define NFI_CSEL (0x90)
  726. +#define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2)
  727. +#define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2)
  728. +#define NFI_FDM_MAX_SIZE (8)
  729. +#define NFI_MASTER_STA (0x224)
  730. +#define MASTER_STA_MASK (0x0FFF)
  731. +#define NFI_EMPTY_THRESH (0x23C)
  732. +
  733. +#define MTK_NAME "mtk-nand"
  734. +#define KB(x) ((x) * 1024UL)
  735. +#define MB(x) (KB(x) * 1024UL)
  736. +
  737. +#define MTK_TIMEOUT (500000)
  738. +#define MTK_RESET_TIMEOUT (1000000)
  739. +#define MTK_MAX_SECTOR (16)
  740. +#define MTK_NAND_MAX_NSELS (2)
  741. +
  742. +typedef void (*bad_mark_swap)(struct mtd_info *, uint8_t *buf, int raw);
  743. +struct mtk_nfc_bad_mark_ctl {
  744. + bad_mark_swap bm_swap;
  745. + u32 sec;
  746. + u32 pos;
  747. +};
  748. +
  749. +/*
  750. + * FDM: region used to store free OOB data
  751. + */
  752. +struct mtk_nfc_fdm {
  753. + u32 reg_size;
  754. + u32 ecc_size;
  755. +};
  756. +
  757. +struct mtk_nfc_nand_chip {
  758. + struct list_head node;
  759. + struct nand_chip nand;
  760. +
  761. + struct mtk_nfc_bad_mark_ctl bad_mark;
  762. + struct mtk_nfc_fdm fdm;
  763. + u32 spare_per_sector;
  764. +
  765. + int nsels;
  766. + u8 sels[0];
  767. + /* nothing after this field */
  768. +};
  769. +
  770. +struct mtk_nfc_clk {
  771. + struct clk *nfi_clk;
  772. + struct clk *pad_clk;
  773. +};
  774. +
  775. +struct mtk_nfc {
  776. + struct nand_hw_control controller;
  777. + struct mtk_ecc_config ecc_cfg;
  778. + struct mtk_nfc_clk clk;
  779. + struct mtk_ecc *ecc;
  780. +
  781. + struct device *dev;
  782. + void __iomem *regs;
  783. +
  784. + struct completion done;
  785. + struct list_head chips;
  786. +
  787. + u8 *buffer;
  788. +};
  789. +
  790. +static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
  791. +{
  792. + return container_of(nand, struct mtk_nfc_nand_chip, nand);
  793. +}
  794. +
  795. +static inline uint8_t *data_ptr(struct nand_chip *chip, const uint8_t *p, int i)
  796. +{
  797. + return (uint8_t *) p + i * chip->ecc.size;
  798. +}
  799. +
  800. +static inline uint8_t *oob_ptr(struct nand_chip *chip, int i)
  801. +{
  802. + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  803. + uint8_t *poi;
  804. +
  805. + if (i < mtk_nand->bad_mark.sec)
  806. + poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size;
  807. + else if (i == mtk_nand->bad_mark.sec)
  808. + poi = chip->oob_poi;
  809. + else
  810. + poi = chip->oob_poi + i * mtk_nand->fdm.reg_size;
  811. +
  812. + return poi;
  813. +}
  814. +
  815. +static inline int mtk_data_len(struct nand_chip *chip)
  816. +{
  817. + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  818. +
  819. + return chip->ecc.size + mtk_nand->spare_per_sector;
  820. +}
  821. +
  822. +static inline uint8_t *mtk_data_ptr(struct nand_chip *chip, int i)
  823. +{
  824. + struct mtk_nfc *nfc = nand_get_controller_data(chip);
  825. +
  826. + return nfc->buffer + i * mtk_data_len(chip);
  827. +}
  828. +
  829. +static inline uint8_t *mtk_oob_ptr(struct nand_chip *chip, int i)
  830. +{
  831. + struct mtk_nfc *nfc = nand_get_controller_data(chip);
  832. +
  833. + return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size;
  834. +}
  835. +
  836. +static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg)
  837. +{
  838. + writel(val, nfc->regs + reg);
  839. +}
  840. +
  841. +static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg)
  842. +{
  843. + writew(val, nfc->regs + reg);
  844. +}
  845. +
  846. +static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg)
  847. +{
  848. + writeb(val, nfc->regs + reg);
  849. +}
  850. +
  851. +static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg)
  852. +{
  853. + return readl_relaxed(nfc->regs + reg);
  854. +}
  855. +
  856. +static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg)
  857. +{
  858. + return readw_relaxed(nfc->regs + reg);
  859. +}
  860. +
  861. +static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg)
  862. +{
  863. + return readb_relaxed(nfc->regs + reg);
  864. +}
  865. +
  866. +static void mtk_nfc_hw_reset(struct mtk_nfc *nfc)
  867. +{
  868. + struct device *dev = nfc->dev;
  869. + u32 val;
  870. + int ret;
  871. +
  872. + /* reset all registers and force the NFI master to terminate */
  873. + nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
  874. +
  875. + /* wait for the master to finish the last transaction */
  876. + ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val,
  877. + !(val & MASTER_STA_MASK), 50, MTK_RESET_TIMEOUT);
  878. + if (ret)
  879. + dev_warn(dev, "master active in reset [0x%x] = 0x%x\n",
  880. + NFI_MASTER_STA, val);
  881. +
  882. + /* ensure any status register affected by the NFI master is reset */
  883. + nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
  884. + nfi_writew(nfc, STAR_DE, NFI_STRDATA);
  885. +}
  886. +
  887. +static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command)
  888. +{
  889. + struct device *dev = nfc->dev;
  890. + u32 val;
  891. + int ret;
  892. +
  893. + nfi_writel(nfc, command, NFI_CMD);
  894. +
  895. + ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
  896. + !(val & STA_CMD), 10, MTK_TIMEOUT);
  897. + if (ret) {
  898. + dev_warn(dev, "nfi core timed out entering command mode\n");
  899. + return -EIO;
  900. + }
  901. +
  902. + return 0;
  903. +}
  904. +
  905. +static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr)
  906. +{
  907. + struct device *dev = nfc->dev;
  908. + u32 val;
  909. + int ret;
  910. +
  911. + nfi_writel(nfc, addr, NFI_COLADDR);
  912. + nfi_writel(nfc, 0, NFI_ROWADDR);
  913. + nfi_writew(nfc, 1, NFI_ADDRNOB);
  914. +
  915. + ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
  916. + !(val & STA_ADDR), 10, MTK_TIMEOUT);
  917. + if (ret) {
  918. + dev_warn(dev, "nfi core timed out entering address mode\n");
  919. + return -EIO;
  920. + }
  921. +
  922. + return 0;
  923. +}
  924. +
  925. +static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
  926. +{
  927. + struct nand_chip *chip = mtd_to_nand(mtd);
  928. + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  929. + struct mtk_nfc *nfc = nand_get_controller_data(chip);
  930. + u32 fmt, spare;
  931. +
  932. + if (!mtd->writesize)
  933. + return 0;
  934. +
  935. + spare = mtk_nand->spare_per_sector;
  936. +
  937. + switch (mtd->writesize) {
  938. + case 512:
  939. + fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
  940. + break;
  941. + case KB(2):
  942. + if (chip->ecc.size == 512)
  943. + fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
  944. + else
  945. + fmt = PAGEFMT_512_2K;
  946. + break;
  947. + case KB(4):
  948. + if (chip->ecc.size == 512)
  949. + fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
  950. + else
  951. + fmt = PAGEFMT_2K_4K;
  952. + break;
  953. + case KB(8):
  954. + if (chip->ecc.size == 512)
  955. + fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
  956. + else
  957. + fmt = PAGEFMT_4K_8K;
  958. + break;
  959. + case KB(16):
  960. + fmt = PAGEFMT_8K_16K;
  961. + break;
  962. + default:
  963. + dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize);
  964. + return -EINVAL;
  965. + }
  966. +
  967. + /* the hardware doubles the value for this eccsize so let's halve it */
  968. + if (chip->ecc.size == 1024)
  969. + spare >>= 1;
  970. +
  971. + switch (spare) {
  972. + case 16:
  973. + fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT);
  974. + break;
  975. + case 26:
  976. + fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT);
  977. + break;
  978. + case 27:
  979. + fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT);
  980. + break;
  981. + case 28:
  982. + fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT);
  983. + break;
  984. + case 32:
  985. + fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT);
  986. + break;
  987. + case 36:
  988. + fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT);
  989. + break;
  990. + case 40:
  991. + fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT);
  992. + break;
  993. + case 44:
  994. + fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT);
  995. + break;
  996. + case 48:
  997. + fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT);
  998. + break;
  999. + case 49:
  1000. + fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT);
  1001. + break;
  1002. + case 50:
  1003. + fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT);
  1004. + break;
  1005. + case 51:
  1006. + fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT);
  1007. + break;
  1008. + case 52:
  1009. + fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT);
  1010. + break;
  1011. + case 62:
  1012. + fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT);
  1013. + break;
  1014. + case 63:
  1015. + fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT);
  1016. + break;
  1017. + case 64:
  1018. + fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT);
  1019. + break;
  1020. + default:
  1021. + dev_err(nfc->dev, "invalid spare per sector %d\n", spare);
  1022. + return -EINVAL;
  1023. + }
  1024. +
  1025. + fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
  1026. + fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
  1027. + nfi_writew(nfc, fmt, NFI_PAGEFMT);
  1028. +
  1029. + nfc->ecc_cfg.strength = chip->ecc.strength;
  1030. + nfc->ecc_cfg.enc_len = chip->ecc.size + mtk_nand->fdm.ecc_size;
  1031. + nfc->ecc_cfg.dec_len = (nfc->ecc_cfg.enc_len << 3)
  1032. + + chip->ecc.strength * ECC_PARITY_BITS;
  1033. +
  1034. + return 0;
  1035. +}
  1036. +
  1037. +static void mtk_nfc_select_chip(struct mtd_info *mtd, int chip)
  1038. +{
  1039. + struct nand_chip *nand = mtd_to_nand(mtd);
  1040. + struct mtk_nfc *nfc = nand_get_controller_data(nand);
  1041. + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
  1042. +
  1043. + if (chip < 0)
  1044. + return;
  1045. +
  1046. + mtk_nfc_hw_runtime_config(mtd);
  1047. +
  1048. + nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL);
  1049. +}
  1050. +
  1051. +static int mtk_nfc_dev_ready(struct mtd_info *mtd)
  1052. +{
  1053. + struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
  1054. +
  1055. + if (nfi_readl(nfc, NFI_STA) & STA_BUSY)
  1056. + return 0;
  1057. +
  1058. + return 1;
  1059. +}
  1060. +
  1061. +static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
  1062. +{
  1063. + struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
  1064. +
  1065. + if (ctrl & NAND_ALE)
  1066. + mtk_nfc_send_address(nfc, dat);
  1067. + else if (ctrl & NAND_CLE) {
  1068. + mtk_nfc_hw_reset(nfc);
  1069. +
  1070. + nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
  1071. + mtk_nfc_send_command(nfc, dat);
  1072. + }
  1073. +}
  1074. +
  1075. +static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
  1076. +{
  1077. + int rc;
  1078. + u8 val;
  1079. +
  1080. + rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val,
  1081. + val & PIO_DI_RDY, 10, MTK_TIMEOUT);
  1082. + if (rc < 0)
  1083. + dev_err(nfc->dev, "data not ready\n");
  1084. +}
  1085. +
  1086. +static inline uint8_t mtk_nfc_read_byte(struct mtd_info *mtd)
  1087. +{
  1088. + struct nand_chip *chip = mtd_to_nand(mtd);
  1089. + struct mtk_nfc *nfc = nand_get_controller_data(chip);
  1090. + u32 reg;
  1091. +
  1092. + reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
  1093. + if (reg != NFI_FSM_CUSTDATA) {
  1094. + reg = nfi_readw(nfc, NFI_CNFG);
  1095. + reg |= CNFG_BYTE_RW | CNFG_READ_EN;
  1096. + nfi_writew(nfc, reg, NFI_CNFG);
  1097. +
  1098. + reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
  1099. + nfi_writel(nfc, reg, NFI_CON);
  1100. +
  1101. + /* trigger to fetch data */
  1102. + nfi_writew(nfc, STAR_EN, NFI_STRDATA);
  1103. + }
  1104. +
  1105. + mtk_nfc_wait_ioready(nfc);
  1106. +
  1107. + return nfi_readb(nfc, NFI_DATAR);
  1108. +}
  1109. +
  1110. +static void mtk_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  1111. +{
  1112. + int i;
  1113. +
  1114. + for (i = 0; i < len; i++)
  1115. + buf[i] = mtk_nfc_read_byte(mtd);
  1116. +}
  1117. +
  1118. +static void mtk_nfc_write_byte(struct mtd_info *mtd, uint8_t byte)
  1119. +{
  1120. + struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
  1121. + u32 reg;
  1122. +
  1123. + reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
  1124. +
  1125. + if (reg != NFI_FSM_CUSTDATA) {
  1126. + reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
  1127. + nfi_writew(nfc, reg, NFI_CNFG);
  1128. +
  1129. + reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
  1130. + nfi_writel(nfc, reg, NFI_CON);
  1131. +
  1132. + nfi_writew(nfc, STAR_EN, NFI_STRDATA);
  1133. + }
  1134. +
  1135. + mtk_nfc_wait_ioready(nfc);
  1136. + nfi_writeb(nfc, byte, NFI_DATAW);
  1137. +}
  1138. +
  1139. +static void mtk_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
  1140. +{
  1141. + int i;
  1142. +
  1143. + for (i = 0; i < len; i++)
  1144. + mtk_nfc_write_byte(mtd, buf[i]);
  1145. +}
  1146. +
  1147. +static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data)
  1148. +{
  1149. + struct mtk_nfc *nfc = nand_get_controller_data(chip);
  1150. + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  1151. + int size = chip->ecc.size + mtk_nand->fdm.reg_size;
  1152. +
  1153. + nfc->ecc_cfg.ecc_mode = ECC_DMA_MODE;
  1154. + nfc->ecc_cfg.codec = ECC_ENC;
  1155. + return mtk_ecc_encode_non_nfi_mode(nfc->ecc, &nfc->ecc_cfg, data, size);
  1156. +}
  1157. +
  1158. +static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, uint8_t *b, int c)
  1159. +{
  1160. + /* nope */
  1161. +}
  1162. +
  1163. +static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, uint8_t *buf, int raw)
  1164. +{
  1165. + struct nand_chip *chip = mtd_to_nand(mtd);
  1166. + struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip);
  1167. + u32 bad_pos = nand->bad_mark.pos;
  1168. +
  1169. + if (raw)
  1170. + bad_pos += nand->bad_mark.sec * mtk_data_len(chip);
  1171. + else
  1172. + bad_pos += nand->bad_mark.sec * chip->ecc.size;
  1173. +
  1174. + swap(chip->oob_poi[0], buf[bad_pos]);
  1175. +}
  1176. +
  1177. +static int mtk_nfc_format_subpage(struct mtd_info *mtd, uint32_t offset,
  1178. + uint32_t len, const uint8_t *buf)
  1179. +{
  1180. + struct nand_chip *chip = mtd_to_nand(mtd);
  1181. + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  1182. + struct mtk_nfc *nfc = nand_get_controller_data(chip);
  1183. + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
  1184. + u32 start, end;
  1185. + int i, ret;
  1186. +
  1187. + start = offset / chip->ecc.size;
  1188. + end = DIV_ROUND_UP(offset + len, chip->ecc.size);
  1189. +
  1190. + memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
  1191. + for (i = 0; i < chip->ecc.steps; i++) {
  1192. +
  1193. + memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
  1194. + chip->ecc.size);
  1195. +
  1196. + if (start > i || i >= end)
  1197. + continue;
  1198. +
  1199. + if (i == mtk_nand->bad_mark.sec)
  1200. + mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
  1201. +
  1202. + memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
  1203. +
  1204. + /* program the CRC back to the OOB */
  1205. + ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i));
  1206. + if (ret < 0)
  1207. + return ret;
  1208. + }
  1209. +
  1210. + return 0;
  1211. +}
  1212. +
  1213. +static void mtk_nfc_format_page(struct mtd_info *mtd, const uint8_t *buf)
  1214. +{
  1215. + struct nand_chip *chip = mtd_to_nand(mtd);
  1216. + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  1217. + struct mtk_nfc *nfc = nand_get_controller_data(chip);
  1218. + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
  1219. + u32 i;
  1220. +
  1221. + memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
  1222. + for (i = 0; i < chip->ecc.steps; i++) {
  1223. + if (buf)
  1224. + memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
  1225. + chip->ecc.size);
  1226. +
  1227. + if (i == mtk_nand->bad_mark.sec)
  1228. + mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
  1229. +
  1230. + memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
  1231. + }
  1232. +}
  1233. +
  1234. +static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start,
  1235. + u32 sectors)
  1236. +{
  1237. + struct mtk_nfc *nfc = nand_get_controller_data(chip);
  1238. + u32 *p;
  1239. + int i;
  1240. +
  1241. + for (i = 0; i < sectors; i++) {
  1242. + p = (u32 *) oob_ptr(chip, start + i);
  1243. + p[0] = nfi_readl(nfc, NFI_FDML(i));
  1244. + p[1] = nfi_readl(nfc, NFI_FDMM(i));
  1245. + }
  1246. +}
  1247. +
  1248. +static inline void mtk_nfc_write_fdm(struct nand_chip *chip)
  1249. +{
  1250. + struct mtk_nfc *nfc = nand_get_controller_data(chip);
  1251. + u32 *p;
  1252. + int i;
  1253. +
  1254. + for (i = 0; i < chip->ecc.steps ; i++) {
  1255. + p = (u32 *) oob_ptr(chip, i);
  1256. + nfi_writel(nfc, p[0], NFI_FDML(i));
  1257. + nfi_writel(nfc, p[1], NFI_FDMM(i));
  1258. + }
  1259. +}
  1260. +
  1261. +static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  1262. + const uint8_t *buf, int page, int len)
  1263. +{
  1264. +
  1265. + struct mtk_nfc *nfc = nand_get_controller_data(chip);
  1266. + struct device *dev = nfc->dev;
  1267. + dma_addr_t addr;
  1268. + u32 reg;
  1269. + int ret;
  1270. +
  1271. + addr = dma_map_single(dev, (void *) buf, len, DMA_TO_DEVICE);
  1272. + ret = dma_mapping_error(nfc->dev, addr);
  1273. + if (ret) {
  1274. + dev_err(nfc->dev, "dma mapping error\n");
  1275. + return -EINVAL;
  1276. + }
  1277. +
  1278. + reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN;
  1279. + nfi_writew(nfc, reg, NFI_CNFG);
  1280. +
  1281. + nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON);
  1282. + nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
  1283. + nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
  1284. +
  1285. + init_completion(&nfc->done);
  1286. +
  1287. + reg = nfi_readl(nfc, NFI_CON) | CON_BWR;
  1288. + nfi_writel(nfc, reg, NFI_CON);
  1289. + nfi_writew(nfc, STAR_EN, NFI_STRDATA);
  1290. +
  1291. + ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
  1292. + if (!ret) {
  1293. + dev_err(dev, "program ahb done timeout\n");
  1294. + nfi_writew(nfc, 0, NFI_INTR_EN);
  1295. + ret = -ETIMEDOUT;
  1296. + goto timeout;
  1297. + }
  1298. +
  1299. + ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
  1300. + (reg & CNTR_MASK) >= chip->ecc.steps, 10, MTK_TIMEOUT);
  1301. + if (ret)
  1302. + dev_err(dev, "hwecc write timeout\n");
  1303. +
  1304. +timeout:
  1305. +
  1306. + dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE);
  1307. + nfi_writel(nfc, 0, NFI_CON);
  1308. +
  1309. + return ret;
  1310. +}
  1311. +
  1312. +static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  1313. + const uint8_t *buf, int page, int raw)
  1314. +{
  1315. + struct mtk_nfc *nfc = nand_get_controller_data(chip);
  1316. + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  1317. + size_t len;
  1318. + const u8 *bufpoi;
  1319. + u32 reg;
  1320. + int ret;
  1321. +
  1322. + if (!raw) {
  1323. + /* OOB => FDM: from register, ECC: from HW */
  1324. + reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
  1325. + nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG);
  1326. +
  1327. + nfc->ecc_cfg.codec = ECC_ENC;
  1328. + nfc->ecc_cfg.ecc_mode = ECC_NFI_MODE;
  1329. + ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
  1330. + if (ret) {
  1331. + /* clear NFI config */
  1332. + reg = nfi_readw(nfc, NFI_CNFG);
  1333. + reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
  1334. + nfi_writew(nfc, reg, NFI_CNFG);
  1335. +
  1336. + return ret;
  1337. + }
  1338. +
  1339. + memcpy(nfc->buffer, buf, mtd->writesize);
  1340. + mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw);
  1341. + bufpoi = nfc->buffer;
  1342. +
  1343. + /* write OOB into the FDM registers (OOB area in MTK NAND) */
  1344. + mtk_nfc_write_fdm(chip);
  1345. + } else
  1346. + bufpoi = buf;
  1347. +
  1348. + len = mtd->writesize + (raw ? mtd->oobsize : 0);
  1349. + ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len);
  1350. +
  1351. + if (!raw)
  1352. + mtk_ecc_disable(nfc->ecc, &nfc->ecc_cfg);
  1353. +
  1354. + return ret;
  1355. +}
  1356. +
  1357. +static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
  1358. + struct nand_chip *chip, const uint8_t *buf, int oob_on, int page)
  1359. +{
  1360. + return mtk_nfc_write_page(mtd, chip, buf, page, 0);
  1361. +}
  1362. +
  1363. +static int mtk_nfc_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  1364. + const uint8_t *buf, int oob_on, int pg)
  1365. +{
  1366. + struct mtk_nfc *nfc = nand_get_controller_data(chip);
  1367. +
  1368. + mtk_nfc_format_page(mtd, buf);
  1369. + return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
  1370. +}
  1371. +
  1372. +static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
  1373. + struct nand_chip *chip, uint32_t offset, uint32_t data_len,
  1374. + const uint8_t *buf, int oob_on, int page)
  1375. +{
  1376. + struct mtk_nfc *nfc = nand_get_controller_data(chip);
  1377. + int ret;
  1378. +
  1379. + ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf);
  1380. + if (ret < 0)
  1381. + return ret;
  1382. +
  1383. + /* use the data in the private buffer (now with FDM and CRC) */
  1384. + return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
  1385. +}
  1386. +
  1387. +static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
  1388. + int page)
  1389. +{
  1390. + int ret;
  1391. +
  1392. + chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
  1393. +
  1394. + ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
  1395. + if (ret < 0)
  1396. + return -EIO;
  1397. +
  1398. + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  1399. + ret = chip->waitfunc(mtd, chip);
  1400. +
  1401. + return ret & NAND_STATUS_FAIL ? -EIO : 0;
  1402. +}
  1403. +
  1404. +static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
  1405. +{
  1406. + struct nand_chip *chip = mtd_to_nand(mtd);
  1407. + struct mtk_nfc *nfc = nand_get_controller_data(chip);
  1408. + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  1409. + struct mtk_ecc_stats stats;
  1410. + int rc, i;
  1411. +
  1412. + rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
  1413. + if (rc) {
  1414. + memset(buf, 0xff, sectors * chip->ecc.size);
  1415. + for (i = 0; i < sectors; i++)
  1416. + memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
  1417. + return 0;
  1418. + }
  1419. +
  1420. + mtk_ecc_get_stats(nfc->ecc, &stats, sectors);
  1421. + mtd->ecc_stats.corrected += stats.corrected;
  1422. + mtd->ecc_stats.failed += stats.failed;
  1423. +
  1424. + return stats.bitflips;
  1425. +}
  1426. +
  1427. +static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
  1428. + uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
  1429. + int page, int raw)
  1430. +{
  1431. + struct mtk_nfc *nfc = nand_get_controller_data(chip);
  1432. + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  1433. + u32 spare = mtk_nand->spare_per_sector;
  1434. + u32 column, sectors, start, end, reg;
  1435. + dma_addr_t addr;
  1436. + int bitflips;
  1437. + size_t len;
  1438. + u8 *buf;
  1439. + int rc;
  1440. +
  1441. + start = data_offs / chip->ecc.size;
  1442. + end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
  1443. +
  1444. + sectors = end - start;
  1445. + column = start * (chip->ecc.size + spare);
  1446. +
  1447. + len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
  1448. + buf = bufpoi + start * chip->ecc.size;
  1449. +
  1450. + if (column != 0)
  1451. + chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
  1452. +
  1453. + addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
  1454. + rc = dma_mapping_error(nfc->dev, addr);
  1455. + if (rc) {
  1456. + dev_err(nfc->dev, "dma mapping error\n");
  1457. +
  1458. + return -EINVAL;
  1459. + }
  1460. +
  1461. + reg = nfi_readw(nfc, NFI_CNFG);
  1462. + reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB;
  1463. + if (!raw) {
  1464. + reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
  1465. + nfi_writew(nfc, reg, NFI_CNFG);
  1466. +
  1467. + nfc->ecc_cfg.ecc_mode = ECC_NFI_MODE;
  1468. + nfc->ecc_cfg.sec_mask = sectors;
  1469. + nfc->ecc_cfg.codec = ECC_DEC;
  1470. + rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
  1471. + if (rc) {
  1472. + dev_err(nfc->dev, "ecc enable\n");
  1473. + /* clear NFI_CNFG */
  1474. + reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN |
  1475. + CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
  1476. + nfi_writew(nfc, reg, NFI_CNFG);
  1477. + dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
  1478. +
  1479. + return rc;
  1480. + }
  1481. + } else
  1482. + nfi_writew(nfc, reg, NFI_CNFG);
  1483. +
  1484. + nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON);
  1485. + nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
  1486. + nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
  1487. +
  1488. + init_completion(&nfc->done);
  1489. + reg = nfi_readl(nfc, NFI_CON) | CON_BRD;
  1490. + nfi_writel(nfc, reg, NFI_CON);
  1491. + nfi_writew(nfc, STAR_EN, NFI_STRDATA);
  1492. +
  1493. + rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
  1494. + if (!rc)
  1495. + dev_warn(nfc->dev, "read ahb/dma done timeout\n");
  1496. +
  1497. + rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
  1498. + (reg & CNTR_MASK) >= sectors, 10, MTK_TIMEOUT);
  1499. + if (rc < 0) {
  1500. + dev_err(nfc->dev, "subpage done timeout\n");
  1501. + bitflips = -EIO;
  1502. + } else {
  1503. + bitflips = 0;
  1504. + if (!raw) {
  1505. + rc = mtk_ecc_wait_irq_done(nfc->ecc, ECC_DEC);
  1506. + bitflips = rc < 0 ? -ETIMEDOUT :
  1507. + mtk_nfc_update_ecc_stats(mtd, buf, sectors);
  1508. + mtk_nfc_read_fdm(chip, start, sectors);
  1509. + }
  1510. + }
  1511. +
  1512. + dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
  1513. +
  1514. + if (raw)
  1515. + goto done;
  1516. +
  1517. + mtk_ecc_disable(nfc->ecc, &nfc->ecc_cfg);
  1518. +
  1519. + if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec)
  1520. + mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw);
  1521. +done:
  1522. + nfi_writel(nfc, 0, NFI_CON);
  1523. +
  1524. + return bitflips;
  1525. +}
  1526. +
  1527. +static int mtk_nfc_read_subpage_hwecc(struct mtd_info *mtd,
  1528. + struct nand_chip *chip, uint32_t off, uint32_t len, uint8_t *p, int pg)
  1529. +{
  1530. + return mtk_nfc_read_subpage(mtd, chip, off, len, p, pg, 0);
  1531. +}
  1532. +
  1533. +static int mtk_nfc_read_page_hwecc(struct mtd_info *mtd,
  1534. + struct nand_chip *chip, uint8_t *p, int oob_on, int pg)
  1535. +{
  1536. + return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
  1537. +}
  1538. +
  1539. +static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  1540. + uint8_t *buf, int oob_on, int page)
  1541. +{
  1542. + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  1543. + struct mtk_nfc *nfc = nand_get_controller_data(chip);
  1544. + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
  1545. + int i, ret;
  1546. +
  1547. + memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
  1548. + ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer,
  1549. + page, 1);
  1550. + if (ret < 0)
  1551. + return ret;
  1552. +
  1553. + for (i = 0; i < chip->ecc.steps; i++) {
  1554. + memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
  1555. + if (i == mtk_nand->bad_mark.sec)
  1556. + mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
  1557. +
  1558. + if (buf)
  1559. + memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
  1560. + chip->ecc.size);
  1561. + }
  1562. +
  1563. + return ret;
  1564. +}
  1565. +
  1566. +static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
  1567. + int page)
  1568. +{
  1569. + chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
  1570. +
  1571. + return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
  1572. +}
  1573. +
  1574. +static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
  1575. +{
  1576. + nfi_writel(nfc, 0x10804211, NFI_ACCCON);
  1577. + nfi_writew(nfc, 0xf1, NFI_CNRNB);
  1578. + nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
  1579. +
  1580. + mtk_nfc_hw_reset(nfc);
  1581. +
  1582. + nfi_readl(nfc, NFI_INTR_STA);
  1583. + nfi_writel(nfc, 0, NFI_INTR_EN);
  1584. +}
  1585. +
  1586. +static irqreturn_t mtk_nfc_irq(int irq, void *id)
  1587. +{
  1588. + struct mtk_nfc *nfc = id;
  1589. + u16 sta, ien;
  1590. +
  1591. + sta = nfi_readw(nfc, NFI_INTR_STA);
  1592. + ien = nfi_readw(nfc, NFI_INTR_EN);
  1593. +
  1594. + if (!(sta & ien))
  1595. + return IRQ_NONE;
  1596. +
  1597. + nfi_writew(nfc, ~sta & ien, NFI_INTR_EN);
  1598. + complete(&nfc->done);
  1599. +
  1600. + return IRQ_HANDLED;
  1601. +}
  1602. +
  1603. +static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk)
  1604. +{
  1605. + int ret;
  1606. +
  1607. + ret = clk_prepare_enable(clk->nfi_clk);
  1608. + if (ret) {
  1609. + dev_err(dev, "failed to enable nfi clk\n");
  1610. + return ret;
  1611. + }
  1612. +
  1613. + ret = clk_prepare_enable(clk->pad_clk);
  1614. + if (ret) {
  1615. + dev_err(dev, "failed to enable pad clk\n");
  1616. + clk_disable_unprepare(clk->nfi_clk);
  1617. + return ret;
  1618. + }
  1619. +
  1620. + return 0;
  1621. +}
  1622. +
  1623. +static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk)
  1624. +{
  1625. + clk_disable_unprepare(clk->nfi_clk);
  1626. + clk_disable_unprepare(clk->pad_clk);
  1627. +}
  1628. +
  1629. +static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
  1630. + struct mtd_oob_region *oob_region)
  1631. +{
  1632. + struct nand_chip *chip = mtd_to_nand(mtd);
  1633. + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  1634. + struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
  1635. + u32 eccsteps;
  1636. +
  1637. + eccsteps = mtd->writesize / chip->ecc.size;
  1638. +
  1639. + if (section >= eccsteps)
  1640. + return -ERANGE;
  1641. +
  1642. + oob_region->length = fdm->reg_size - fdm->ecc_size;
  1643. + oob_region->offset = section * fdm->reg_size + fdm->ecc_size;
  1644. +
  1645. + return 0;
  1646. +}
  1647. +
  1648. +static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
  1649. + struct mtd_oob_region *oob_region)
  1650. +{
  1651. + struct nand_chip *chip = mtd_to_nand(mtd);
  1652. + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
  1653. + u32 eccsteps;
  1654. +
  1655. + if (section)
  1656. + return -ERANGE;
  1657. +
  1658. + eccsteps = mtd->writesize / chip->ecc.size;
  1659. + oob_region->offset = mtk_nand->fdm.reg_size * eccsteps;
  1660. + oob_region->length = mtd->oobsize - oob_region->offset;
  1661. +
  1662. + return 0;
  1663. +}
  1664. +
  1665. +static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
  1666. + .free = mtk_nfc_ooblayout_free,
  1667. + .ecc = mtk_nfc_ooblayout_ecc,
  1668. +};
  1669. +
  1670. +static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
  1671. +{
  1672. + struct nand_chip *nand = mtd_to_nand(mtd);
  1673. + struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
  1674. + u32 ecc_bytes;
  1675. +
  1676. + ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
  1677. +
  1678. + fdm->reg_size = chip->spare_per_sector - ecc_bytes;
  1679. + if (fdm->reg_size > NFI_FDM_MAX_SIZE)
  1680. + fdm->reg_size = NFI_FDM_MAX_SIZE;
  1681. +
  1682. + /* bad block mark storage */
  1683. + fdm->ecc_size = 1;
  1684. +}
  1685. +
  1686. +static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
  1687. + struct mtd_info *mtd)
  1688. +{
  1689. + struct nand_chip *nand = mtd_to_nand(mtd);
  1690. +
  1691. + if (mtd->writesize == 512)
  1692. + bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap;
  1693. + else {
  1694. + bm_ctl->bm_swap = mtk_nfc_bad_mark_swap;
  1695. + bm_ctl->sec = mtd->writesize / mtk_data_len(nand);
  1696. + bm_ctl->pos = mtd->writesize % mtk_data_len(nand);
  1697. + }
  1698. +}
  1699. +
  1700. +static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
  1701. +{
  1702. + struct nand_chip *nand = mtd_to_nand(mtd);
  1703. + u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44,
  1704. + 48, 49, 50, 51, 52, 62, 63, 64};
  1705. + u32 eccsteps, i;
  1706. +
  1707. + eccsteps = mtd->writesize / nand->ecc.size;
  1708. + *sps = mtd->oobsize / eccsteps;
  1709. +
  1710. + if (nand->ecc.size == 1024)
  1711. + *sps >>= 1;
  1712. +
  1713. + for (i = 0; i < ARRAY_SIZE(spare); i++) {
  1714. + if (*sps <= spare[i]) {
  1715. + if (!i)
  1716. + *sps = spare[i];
  1717. + else if (*sps != spare[i])
  1718. + *sps = spare[i - 1];
  1719. + break;
  1720. + }
  1721. + }
  1722. +
  1723. + if (i >= ARRAY_SIZE(spare))
  1724. + *sps = spare[ARRAY_SIZE(spare) - 1];
  1725. +
  1726. + if (nand->ecc.size == 1024)
  1727. + *sps <<= 1;
  1728. +}
  1729. +
  1730. +static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
  1731. +{
  1732. + struct nand_chip *nand = mtd_to_nand(mtd);
  1733. + u32 spare;
  1734. +
  1735. + /* support only ecc hw mode */
  1736. + if (nand->ecc.mode != NAND_ECC_HW) {
  1737. + dev_err(dev, "ecc.mode not supported\n");
  1738. + return -EINVAL;
  1739. + }
  1740. +
  1741. + /* if optional DT settings are not present */
  1742. + if (!nand->ecc.size || !nand->ecc.strength) {
  1743. +
  1744. + /* controller only supports sizes 512 and 1024 */
  1745. + nand->ecc.size = (mtd->writesize > 512) ? 1024 : 512;
  1746. +
  1747. + /* get controller valid values */
  1748. + mtk_nfc_set_spare_per_sector(&spare, mtd);
  1749. + spare = spare - NFI_FDM_MAX_SIZE;
  1750. + nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
  1751. + }
  1752. +
  1753. + mtk_ecc_update_strength(&nand->ecc.strength);
  1754. +
  1755. + dev_info(dev, "eccsize %d eccstrength %d\n",
  1756. + nand->ecc.size, nand->ecc.strength);
  1757. +
  1758. + return 0;
  1759. +}
  1760. +
  1761. +static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
  1762. + struct device_node *np)
  1763. +{
  1764. + struct mtk_nfc_nand_chip *chip;
  1765. + struct nand_chip *nand;
  1766. + struct mtd_info *mtd;
  1767. + int nsels, len;
  1768. + u32 tmp;
  1769. + int ret;
  1770. + int i;
  1771. +
  1772. + if (!of_get_property(np, "reg", &nsels))
  1773. + return -ENODEV;
  1774. +
  1775. + nsels /= sizeof(u32);
  1776. + if (!nsels || nsels > MTK_NAND_MAX_NSELS) {
  1777. + dev_err(dev, "invalid reg property size %d\n", nsels);
  1778. + return -EINVAL;
  1779. + }
  1780. +
  1781. + chip = devm_kzalloc(dev,
  1782. + sizeof(*chip) + nsels * sizeof(u8), GFP_KERNEL);
  1783. + if (!chip)
  1784. + return -ENOMEM;
  1785. +
  1786. + chip->nsels = nsels;
  1787. + for (i = 0; i < nsels; i++) {
  1788. + ret = of_property_read_u32_index(np, "reg", i, &tmp);
  1789. + if (ret) {
  1790. + dev_err(dev, "reg property failure : %d\n", ret);
  1791. + return ret;
  1792. + }
  1793. + chip->sels[i] = tmp;
  1794. + }
  1795. +
  1796. + nand = &chip->nand;
  1797. + nand->controller = &nfc->controller;
  1798. +
  1799. + nand_set_flash_node(nand, np);
  1800. + nand_set_controller_data(nand, nfc);
  1801. +
  1802. + nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
  1803. + nand->dev_ready = mtk_nfc_dev_ready;
  1804. + nand->select_chip = mtk_nfc_select_chip;
  1805. + nand->write_byte = mtk_nfc_write_byte;
  1806. + nand->write_buf = mtk_nfc_write_buf;
  1807. + nand->read_byte = mtk_nfc_read_byte;
  1808. + nand->read_buf = mtk_nfc_read_buf;
  1809. + nand->cmd_ctrl = mtk_nfc_cmd_ctrl;
  1810. +
  1811. + /* set default mode in case dt entry is missing */
  1812. + nand->ecc.mode = NAND_ECC_HW;
  1813. +
  1814. + nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
  1815. + nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
  1816. + nand->ecc.write_page = mtk_nfc_write_page_hwecc;
  1817. + nand->ecc.write_oob_raw = mtk_nfc_write_oob_std;
  1818. + nand->ecc.write_oob = mtk_nfc_write_oob_std;
  1819. +
  1820. + nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc;
  1821. + nand->ecc.read_page_raw = mtk_nfc_read_page_raw;
  1822. + nand->ecc.read_page = mtk_nfc_read_page_hwecc;
  1823. + nand->ecc.read_oob_raw = mtk_nfc_read_oob_std;
  1824. + nand->ecc.read_oob = mtk_nfc_read_oob_std;
  1825. +
  1826. + mtd = nand_to_mtd(nand);
  1827. + mtd->owner = THIS_MODULE;
  1828. + mtd->dev.parent = dev;
  1829. + mtd->name = MTK_NAME;
  1830. + mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
  1831. +
  1832. + mtk_nfc_hw_init(nfc);
  1833. +
  1834. + ret = nand_scan_ident(mtd, nsels, NULL);
  1835. + if (ret)
  1836. + return -ENODEV;
  1837. +
  1838. + /* store bbt magic in page, cause OOB is not protected */
  1839. + if (nand->bbt_options & NAND_BBT_USE_FLASH)
  1840. + nand->bbt_options |= NAND_BBT_NO_OOB;
  1841. +
  1842. + ret = mtk_nfc_ecc_init(dev, mtd);
  1843. + if (ret)
  1844. + return -EINVAL;
  1845. +
  1846. + mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd);
  1847. + mtk_nfc_set_fdm(&chip->fdm, mtd);
  1848. + mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd);
  1849. +
  1850. + len = mtd->writesize + mtd->oobsize;
  1851. + nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
  1852. + if (!nfc->buffer)
  1853. + return -ENOMEM;
  1854. +
  1855. + ret = nand_scan_tail(mtd);
  1856. + if (ret)
  1857. + return -ENODEV;
  1858. +
  1859. + ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
  1860. + if (ret) {
  1861. + dev_err(dev, "mtd parse partition error\n");
  1862. + nand_release(mtd);
  1863. + return ret;
  1864. + }
  1865. +
  1866. + list_add_tail(&chip->node, &nfc->chips);
  1867. +
  1868. + return 0;
  1869. +}
  1870. +
  1871. +static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
  1872. +{
  1873. + struct device_node *np = dev->of_node;
  1874. + struct device_node *nand_np;
  1875. + int ret;
  1876. +
  1877. + for_each_child_of_node(np, nand_np) {
  1878. + ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
  1879. + if (ret) {
  1880. + of_node_put(nand_np);
  1881. + return ret;
  1882. + }
  1883. + }
  1884. +
  1885. + return 0;
  1886. +}
  1887. +
  1888. +static int mtk_nfc_probe(struct platform_device *pdev)
  1889. +{
  1890. + struct device *dev = &pdev->dev;
  1891. + struct device_node *np = dev->of_node;
  1892. + struct mtk_nfc *nfc;
  1893. + struct resource *res;
  1894. + int ret, irq;
  1895. +
  1896. + nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
  1897. + if (!nfc)
  1898. + return -ENOMEM;
  1899. +
  1900. + spin_lock_init(&nfc->controller.lock);
  1901. + init_waitqueue_head(&nfc->controller.wq);
  1902. + INIT_LIST_HEAD(&nfc->chips);
  1903. +
  1904. + /* probe defer if not ready */
  1905. + nfc->ecc = of_mtk_ecc_get(np);
  1906. + if (IS_ERR(nfc->ecc))
  1907. + return PTR_ERR(nfc->ecc);
  1908. + else if (!nfc->ecc)
  1909. + return -ENODEV;
  1910. +
  1911. + nfc->dev = dev;
  1912. +
  1913. + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1914. + nfc->regs = devm_ioremap_resource(dev, res);
  1915. + if (IS_ERR(nfc->regs)) {
  1916. + ret = PTR_ERR(nfc->regs);
  1917. + dev_err(dev, "no nfi base\n");
  1918. + goto release_ecc;
  1919. + }
  1920. +
  1921. + nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
  1922. + if (IS_ERR(nfc->clk.nfi_clk)) {
  1923. + dev_err(dev, "no clk\n");
  1924. + ret = PTR_ERR(nfc->clk.nfi_clk);
  1925. + goto release_ecc;
  1926. + }
  1927. +
  1928. + nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk");
  1929. + if (IS_ERR(nfc->clk.pad_clk)) {
  1930. + dev_err(dev, "no pad clk\n");
  1931. + ret = PTR_ERR(nfc->clk.pad_clk);
  1932. + goto release_ecc;
  1933. + }
  1934. +
  1935. + ret = mtk_nfc_enable_clk(dev, &nfc->clk);
  1936. + if (ret)
  1937. + goto release_ecc;
  1938. +
  1939. + irq = platform_get_irq(pdev, 0);
  1940. + if (irq < 0) {
  1941. + dev_err(dev, "no nfi irq resource\n");
  1942. + ret = -EINVAL;
  1943. + goto clk_disable;
  1944. + }
  1945. +
  1946. + ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
  1947. + if (ret) {
  1948. + dev_err(dev, "failed to request nfi irq\n");
  1949. + goto clk_disable;
  1950. + }
  1951. +
  1952. + ret = dma_set_mask(dev, DMA_BIT_MASK(32));
  1953. + if (ret) {
  1954. + dev_err(dev, "failed to set dma mask\n");
  1955. + goto clk_disable;
  1956. + }
  1957. +
  1958. + platform_set_drvdata(pdev, nfc);
  1959. +
  1960. + ret = mtk_nfc_nand_chips_init(dev, nfc);
  1961. + if (ret) {
  1962. + dev_err(dev, "failed to init nand chips\n");
  1963. + goto clk_disable;
  1964. + }
  1965. +
  1966. + return 0;
  1967. +
  1968. +clk_disable:
  1969. + mtk_nfc_disable_clk(&nfc->clk);
  1970. +
  1971. +release_ecc:
  1972. + mtk_ecc_release(nfc->ecc);
  1973. +
  1974. + return ret;
  1975. +}
  1976. +
  1977. +static int mtk_nfc_remove(struct platform_device *pdev)
  1978. +{
  1979. + struct mtk_nfc *nfc = platform_get_drvdata(pdev);
  1980. + struct mtk_nfc_nand_chip *chip;
  1981. +
  1982. + while (!list_empty(&nfc->chips)) {
  1983. + chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip,
  1984. + node);
  1985. + nand_release(nand_to_mtd(&chip->nand));
  1986. + list_del(&chip->node);
  1987. + }
  1988. +
  1989. + mtk_ecc_release(nfc->ecc);
  1990. + mtk_nfc_disable_clk(&nfc->clk);
  1991. +
  1992. + return 0;
  1993. +}
  1994. +
  1995. +#ifdef CONFIG_PM_SLEEP
  1996. +static int mtk_nfc_suspend(struct device *dev)
  1997. +{
  1998. + struct mtk_nfc *nfc = dev_get_drvdata(dev);
  1999. +
  2000. + mtk_nfc_disable_clk(&nfc->clk);
  2001. +
  2002. + return 0;
  2003. +}
  2004. +
  2005. +static int mtk_nfc_resume(struct device *dev)
  2006. +{
  2007. + struct mtk_nfc *nfc = dev_get_drvdata(dev);
  2008. + struct mtk_nfc_nand_chip *chip;
  2009. + struct nand_chip *nand;
  2010. + struct mtd_info *mtd;
  2011. + int ret;
  2012. + u32 i;
  2013. +
  2014. + udelay(200);
  2015. +
  2016. + ret = mtk_nfc_enable_clk(dev, &nfc->clk);
  2017. + if (ret)
  2018. + return ret;
  2019. +
  2020. + mtk_nfc_hw_init(nfc);
  2021. +
  2022. + list_for_each_entry(chip, &nfc->chips, node) {
  2023. + nand = &chip->nand;
  2024. + mtd = nand_to_mtd(nand);
  2025. + for (i = 0; i < chip->nsels; i++) {
  2026. + nand->select_chip(mtd, i);
  2027. + nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
  2028. + }
  2029. + }
  2030. +
  2031. + return 0;
  2032. +}
  2033. +static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
  2034. +#endif
  2035. +
  2036. +static const struct of_device_id mtk_nfc_id_table[] = {
  2037. + { .compatible = "mediatek,mt2701-nfc" },
  2038. + {}
  2039. +};
  2040. +MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
  2041. +
  2042. +static struct platform_driver mtk_nfc_driver = {
  2043. + .probe = mtk_nfc_probe,
  2044. + .remove = mtk_nfc_remove,
  2045. + .driver = {
  2046. + .name = MTK_NAME,
  2047. + .of_match_table = mtk_nfc_id_table,
  2048. +#ifdef CONFIG_PM_SLEEP
  2049. + .pm = &mtk_nfc_pm_ops,
  2050. +#endif
  2051. + },
  2052. +};
  2053. +
  2054. +module_platform_driver(mtk_nfc_driver);
  2055. +
  2056. +MODULE_LICENSE("GPL");
  2057. +MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
  2058. +MODULE_AUTHOR("Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>");
  2059. +MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");