092-Add-Broadcom-STB-NAND.patch 77 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765
  1. This contains the following commits:
  2. commit bcb83a19d3ac95fe3c0e79e942fb628120738853
  3. Author: Hauke Mehrtens <hauke@hauke-m.de>
  4. Date: Sun May 17 17:41:01 2015 +0200
  5. mtd: brcmnand: do not make local variable static
  6. Remove static in front of ctrl. This variable should not be shared
  7. between different instances of brcmnand_probe(), it should be local to
  8. this function and stored on the stack.
  9. Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
  10. Signed-off-by: Brian Norris <computersforpeace@gmail.com>
  11. commit 802041247a0abbeaf1dddb8a8d56f491762ae357
  12. Author: Hauke Mehrtens <hauke@hauke-m.de>
  13. Date: Sun May 17 17:41:00 2015 +0200
  14. mtd: brcmnand: remove double new line from print
  15. The caller already adds a new line and in the other cases there is no
  16. new line added.
  17. Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
  18. Signed-off-by: Brian Norris <computersforpeace@gmail.com>
  19. commit f628ece6636c2f0354a52566cafdea6d2f963b3d
  20. Author: Brian Norris <computersforpeace@gmail.com>
  21. Date: Tue May 12 12:13:14 2015 -0700
  22. mtd: brcmnand: add BCM63138 support
  23. Signed-off-by: Brian Norris <computersforpeace@gmail.com>
  24. Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
  25. Tested-by: Florian Fainelli <f.fainelli@gmail.com>
  26. commit ca22f040dd145fc4d8069ce174f6eb0bc3ebd19f
  27. Author: Brian Norris <computersforpeace@gmail.com>
  28. Date: Tue May 12 12:12:02 2015 -0700
  29. mtd: brcmnand: add support for Broadcom's IPROC family
  30. Signed-off-by: Brian Norris <computersforpeace@gmail.com>
  31. commit c26211d37f11d5913d9803fdede6d053f918ba7b
  32. Author: Brian Norris <computersforpeace@gmail.com>
  33. Date: Tue May 12 12:09:28 2015 -0700
  34. mtd: brcmnand: add extra SoC support to library
  35. There are a few small hooks required for chips like BCM63138 and the
  36. iProc family. Let's introduce those now.
  37. Signed-off-by: Brian Norris <computersforpeace@gmail.com>
  38. Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
  39. Tested-by: Florian Fainelli <f.fainelli@gmail.com>
  40. commit 303b4420ff1896b444017b5b0eb8252ce197797d
  41. Author: Brian Norris <computersforpeace@gmail.com>
  42. Date: Tue May 12 17:00:57 2015 -0700
  43. mtd: brcmnand: add support for STB chips
  44. BCM7xxx chips are supported entirely by the library code, since they use
  45. generic irqchip interfaces and don't need any extra SoC-specific
  46. configuration.
  47. Signed-off-by: Brian Norris <computersforpeace@gmail.com>
  48. commit 27c5b17cd1b10564fa36f8f51e4b4b41436ecc32
  49. Author: Brian Norris <computersforpeace@gmail.com>
  50. Date: Fri Mar 6 11:38:08 2015 -0800
  51. mtd: nand: add NAND driver "library" for Broadcom STB NAND controller
  52. This core originated in Set-Top Box chips (BCM7xxx) but is used in a
  53. variety of other Broadcom chips, including some BCM63xxx, BCM33xx, and
  54. iProc/Cygnus. It's been used only on ARM and MIPS SoCs, so restrict it
  55. to those architectures.
  56. There are multiple revisions of this core throughout the years, and
  57. almost every version broke register compatibility in some small way, but
  58. with some effort, this driver is able to support v4.0, v5.0, v6.x, v7.0,
  59. and v7.1. It's been tested on v5.0, v6.0, v6.1, v7.0, and v7.1 recently,
  60. so there hopefully are no more lurking inconsistencies.
  61. This patch adds just some library support, on which platform drivers can
  62. be built.
  63. Signed-off-by: Brian Norris <computersforpeace@gmail.com>
  64. Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
  65. Tested-by: Florian Fainelli <f.fainelli@gmail.com>
  66. --- a/drivers/mtd/nand/Kconfig
  67. +++ b/drivers/mtd/nand/Kconfig
  68. @@ -394,6 +394,14 @@ config MTD_NAND_GPMI_NAND
  69. block, such as SD card. So pay attention to it when you enable
  70. the GPMI.
  71. +config MTD_NAND_BRCMNAND
  72. + tristate "Broadcom STB NAND controller"
  73. + depends on ARM || MIPS
  74. + help
  75. + Enables the Broadcom NAND controller driver. The controller was
  76. + originally designed for Set-Top Box but is used on various BCM7xxx,
  77. + BCM3xxx, BCM63xxx, iProc/Cygnus and more.
  78. +
  79. config MTD_NAND_BCM47XXNFLASH
  80. tristate "Support for NAND flash on BCM4706 BCMA bus"
  81. depends on BCMA_NFLASH
  82. --- a/drivers/mtd/nand/Makefile
  83. +++ b/drivers/mtd/nand/Makefile
  84. @@ -52,5 +52,6 @@ obj-$(CONFIG_MTD_NAND_XWAY) += xway_nan
  85. obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
  86. obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
  87. obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
  88. +obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
  89. nand-objs := nand_base.o nand_bbt.o nand_timings.o
  90. --- /dev/null
  91. +++ b/drivers/mtd/nand/brcmnand/Makefile
  92. @@ -0,0 +1,6 @@
  93. +# link order matters; don't link the more generic brcmstb_nand.o before the
  94. +# more specific iproc_nand.o, for instance
  95. +obj-$(CONFIG_MTD_NAND_BRCMNAND) += iproc_nand.o
  96. +obj-$(CONFIG_MTD_NAND_BRCMNAND) += bcm63138_nand.o
  97. +obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmstb_nand.o
  98. +obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand.o
  99. --- /dev/null
  100. +++ b/drivers/mtd/nand/brcmnand/bcm63138_nand.c
  101. @@ -0,0 +1,109 @@
  102. +/*
  103. + * Copyright © 2015 Broadcom Corporation
  104. + *
  105. + * This program is free software; you can redistribute it and/or modify
  106. + * it under the terms of the GNU General Public License version 2 as
  107. + * published by the Free Software Foundation.
  108. + *
  109. + * This program is distributed in the hope that it will be useful,
  110. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  111. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  112. + * GNU General Public License for more details.
  113. + */
  114. +
  115. +#include <linux/device.h>
  116. +#include <linux/io.h>
  117. +#include <linux/ioport.h>
  118. +#include <linux/module.h>
  119. +#include <linux/of.h>
  120. +#include <linux/of_address.h>
  121. +#include <linux/platform_device.h>
  122. +#include <linux/slab.h>
  123. +
  124. +#include "brcmnand.h"
  125. +
  126. +struct bcm63138_nand_soc {
  127. + struct brcmnand_soc soc;
  128. + void __iomem *base;
  129. +};
  130. +
  131. +#define BCM63138_NAND_INT_STATUS 0x00
  132. +#define BCM63138_NAND_INT_EN 0x04
  133. +
  134. +enum {
  135. + BCM63138_CTLRDY = BIT(4),
  136. +};
  137. +
  138. +static bool bcm63138_nand_intc_ack(struct brcmnand_soc *soc)
  139. +{
  140. + struct bcm63138_nand_soc *priv =
  141. + container_of(soc, struct bcm63138_nand_soc, soc);
  142. + void __iomem *mmio = priv->base + BCM63138_NAND_INT_STATUS;
  143. + u32 val = brcmnand_readl(mmio);
  144. +
  145. + if (val & BCM63138_CTLRDY) {
  146. + brcmnand_writel(val & ~BCM63138_CTLRDY, mmio);
  147. + return true;
  148. + }
  149. +
  150. + return false;
  151. +}
  152. +
  153. +static void bcm63138_nand_intc_set(struct brcmnand_soc *soc, bool en)
  154. +{
  155. + struct bcm63138_nand_soc *priv =
  156. + container_of(soc, struct bcm63138_nand_soc, soc);
  157. + void __iomem *mmio = priv->base + BCM63138_NAND_INT_EN;
  158. + u32 val = brcmnand_readl(mmio);
  159. +
  160. + if (en)
  161. + val |= BCM63138_CTLRDY;
  162. + else
  163. + val &= ~BCM63138_CTLRDY;
  164. +
  165. + brcmnand_writel(val, mmio);
  166. +}
  167. +
  168. +static int bcm63138_nand_probe(struct platform_device *pdev)
  169. +{
  170. + struct device *dev = &pdev->dev;
  171. + struct bcm63138_nand_soc *priv;
  172. + struct brcmnand_soc *soc;
  173. + struct resource *res;
  174. +
  175. + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  176. + if (!priv)
  177. + return -ENOMEM;
  178. + soc = &priv->soc;
  179. +
  180. + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-int-base");
  181. + priv->base = devm_ioremap_resource(dev, res);
  182. + if (IS_ERR(priv->base))
  183. + return PTR_ERR(priv->base);
  184. +
  185. + soc->ctlrdy_ack = bcm63138_nand_intc_ack;
  186. + soc->ctlrdy_set_enabled = bcm63138_nand_intc_set;
  187. +
  188. + return brcmnand_probe(pdev, soc);
  189. +}
  190. +
  191. +static const struct of_device_id bcm63138_nand_of_match[] = {
  192. + { .compatible = "brcm,nand-bcm63138" },
  193. + {},
  194. +};
  195. +MODULE_DEVICE_TABLE(of, bcm63138_nand_of_match);
  196. +
  197. +static struct platform_driver bcm63138_nand_driver = {
  198. + .probe = bcm63138_nand_probe,
  199. + .remove = brcmnand_remove,
  200. + .driver = {
  201. + .name = "bcm63138_nand",
  202. + .pm = &brcmnand_pm_ops,
  203. + .of_match_table = bcm63138_nand_of_match,
  204. + }
  205. +};
  206. +module_platform_driver(bcm63138_nand_driver);
  207. +
  208. +MODULE_LICENSE("GPL v2");
  209. +MODULE_AUTHOR("Brian Norris");
  210. +MODULE_DESCRIPTION("NAND driver for BCM63138");
  211. --- /dev/null
  212. +++ b/drivers/mtd/nand/brcmnand/brcmnand.c
  213. @@ -0,0 +1,2246 @@
  214. +/*
  215. + * Copyright © 2010-2015 Broadcom Corporation
  216. + *
  217. + * This program is free software; you can redistribute it and/or modify
  218. + * it under the terms of the GNU General Public License version 2 as
  219. + * published by the Free Software Foundation.
  220. + *
  221. + * This program is distributed in the hope that it will be useful,
  222. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  223. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  224. + * GNU General Public License for more details.
  225. + */
  226. +
  227. +#include <linux/version.h>
  228. +#include <linux/module.h>
  229. +#include <linux/init.h>
  230. +#include <linux/delay.h>
  231. +#include <linux/device.h>
  232. +#include <linux/platform_device.h>
  233. +#include <linux/err.h>
  234. +#include <linux/completion.h>
  235. +#include <linux/interrupt.h>
  236. +#include <linux/spinlock.h>
  237. +#include <linux/dma-mapping.h>
  238. +#include <linux/ioport.h>
  239. +#include <linux/bug.h>
  240. +#include <linux/kernel.h>
  241. +#include <linux/bitops.h>
  242. +#include <linux/mm.h>
  243. +#include <linux/mtd/mtd.h>
  244. +#include <linux/mtd/nand.h>
  245. +#include <linux/mtd/partitions.h>
  246. +#include <linux/of.h>
  247. +#include <linux/of_mtd.h>
  248. +#include <linux/of_platform.h>
  249. +#include <linux/slab.h>
  250. +#include <linux/list.h>
  251. +#include <linux/log2.h>
  252. +
  253. +#include "brcmnand.h"
  254. +
  255. +/*
  256. + * This flag controls if WP stays on between erase/write commands to mitigate
  257. + * flash corruption due to power glitches. Values:
  258. + * 0: NAND_WP is not used or not available
  259. + * 1: NAND_WP is set by default, cleared for erase/write operations
  260. + * 2: NAND_WP is always cleared
  261. + */
  262. +static int wp_on = 1;
  263. +module_param(wp_on, int, 0444);
  264. +
  265. +/***********************************************************************
  266. + * Definitions
  267. + ***********************************************************************/
  268. +
  269. +#define DRV_NAME "brcmnand"
  270. +
  271. +#define CMD_NULL 0x00
  272. +#define CMD_PAGE_READ 0x01
  273. +#define CMD_SPARE_AREA_READ 0x02
  274. +#define CMD_STATUS_READ 0x03
  275. +#define CMD_PROGRAM_PAGE 0x04
  276. +#define CMD_PROGRAM_SPARE_AREA 0x05
  277. +#define CMD_COPY_BACK 0x06
  278. +#define CMD_DEVICE_ID_READ 0x07
  279. +#define CMD_BLOCK_ERASE 0x08
  280. +#define CMD_FLASH_RESET 0x09
  281. +#define CMD_BLOCKS_LOCK 0x0a
  282. +#define CMD_BLOCKS_LOCK_DOWN 0x0b
  283. +#define CMD_BLOCKS_UNLOCK 0x0c
  284. +#define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
  285. +#define CMD_PARAMETER_READ 0x0e
  286. +#define CMD_PARAMETER_CHANGE_COL 0x0f
  287. +#define CMD_LOW_LEVEL_OP 0x10
  288. +
  289. +struct brcm_nand_dma_desc {
  290. + u32 next_desc;
  291. + u32 next_desc_ext;
  292. + u32 cmd_irq;
  293. + u32 dram_addr;
  294. + u32 dram_addr_ext;
  295. + u32 tfr_len;
  296. + u32 total_len;
  297. + u32 flash_addr;
  298. + u32 flash_addr_ext;
  299. + u32 cs;
  300. + u32 pad2[5];
  301. + u32 status_valid;
  302. +} __packed;
  303. +
  304. +/* Bitfields for brcm_nand_dma_desc::status_valid */
  305. +#define FLASH_DMA_ECC_ERROR (1 << 8)
  306. +#define FLASH_DMA_CORR_ERROR (1 << 9)
  307. +
  308. +/* 512B flash cache in the NAND controller HW */
  309. +#define FC_SHIFT 9U
  310. +#define FC_BYTES 512U
  311. +#define FC_WORDS (FC_BYTES >> 2)
  312. +
  313. +#define BRCMNAND_MIN_PAGESIZE 512
  314. +#define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
  315. +#define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
  316. +
  317. +/* Controller feature flags */
  318. +enum {
  319. + BRCMNAND_HAS_1K_SECTORS = BIT(0),
  320. + BRCMNAND_HAS_PREFETCH = BIT(1),
  321. + BRCMNAND_HAS_CACHE_MODE = BIT(2),
  322. + BRCMNAND_HAS_WP = BIT(3),
  323. +};
  324. +
  325. +struct brcmnand_controller {
  326. + struct device *dev;
  327. + struct nand_hw_control controller;
  328. + void __iomem *nand_base;
  329. + void __iomem *nand_fc; /* flash cache */
  330. + void __iomem *flash_dma_base;
  331. + unsigned int irq;
  332. + unsigned int dma_irq;
  333. + int nand_version;
  334. +
  335. + /* Some SoCs provide custom interrupt status register(s) */
  336. + struct brcmnand_soc *soc;
  337. +
  338. + int cmd_pending;
  339. + bool dma_pending;
  340. + struct completion done;
  341. + struct completion dma_done;
  342. +
  343. + /* List of NAND hosts (one for each chip-select) */
  344. + struct list_head host_list;
  345. +
  346. + struct brcm_nand_dma_desc *dma_desc;
  347. + dma_addr_t dma_pa;
  348. +
  349. + /* in-memory cache of the FLASH_CACHE, used only for some commands */
  350. + u32 flash_cache[FC_WORDS];
  351. +
  352. + /* Controller revision details */
  353. + const u16 *reg_offsets;
  354. + unsigned int reg_spacing; /* between CS1, CS2, ... regs */
  355. + const u8 *cs_offsets; /* within each chip-select */
  356. + const u8 *cs0_offsets; /* within CS0, if different */
  357. + unsigned int max_block_size;
  358. + const unsigned int *block_sizes;
  359. + unsigned int max_page_size;
  360. + const unsigned int *page_sizes;
  361. + unsigned int max_oob;
  362. + u32 features;
  363. +
  364. + /* for low-power standby/resume only */
  365. + u32 nand_cs_nand_select;
  366. + u32 nand_cs_nand_xor;
  367. + u32 corr_stat_threshold;
  368. + u32 flash_dma_mode;
  369. +};
  370. +
  371. +struct brcmnand_cfg {
  372. + u64 device_size;
  373. + unsigned int block_size;
  374. + unsigned int page_size;
  375. + unsigned int spare_area_size;
  376. + unsigned int device_width;
  377. + unsigned int col_adr_bytes;
  378. + unsigned int blk_adr_bytes;
  379. + unsigned int ful_adr_bytes;
  380. + unsigned int sector_size_1k;
  381. + unsigned int ecc_level;
  382. + /* use for low-power standby/resume only */
  383. + u32 acc_control;
  384. + u32 config;
  385. + u32 config_ext;
  386. + u32 timing_1;
  387. + u32 timing_2;
  388. +};
  389. +
  390. +struct brcmnand_host {
  391. + struct list_head node;
  392. + struct device_node *of_node;
  393. +
  394. + struct nand_chip chip;
  395. + struct mtd_info mtd;
  396. + struct platform_device *pdev;
  397. + int cs;
  398. +
  399. + unsigned int last_cmd;
  400. + unsigned int last_byte;
  401. + u64 last_addr;
  402. + struct brcmnand_cfg hwcfg;
  403. + struct brcmnand_controller *ctrl;
  404. +};
  405. +
  406. +enum brcmnand_reg {
  407. + BRCMNAND_CMD_START = 0,
  408. + BRCMNAND_CMD_EXT_ADDRESS,
  409. + BRCMNAND_CMD_ADDRESS,
  410. + BRCMNAND_INTFC_STATUS,
  411. + BRCMNAND_CS_SELECT,
  412. + BRCMNAND_CS_XOR,
  413. + BRCMNAND_LL_OP,
  414. + BRCMNAND_CS0_BASE,
  415. + BRCMNAND_CS1_BASE, /* CS1 regs, if non-contiguous */
  416. + BRCMNAND_CORR_THRESHOLD,
  417. + BRCMNAND_CORR_THRESHOLD_EXT,
  418. + BRCMNAND_UNCORR_COUNT,
  419. + BRCMNAND_CORR_COUNT,
  420. + BRCMNAND_CORR_EXT_ADDR,
  421. + BRCMNAND_CORR_ADDR,
  422. + BRCMNAND_UNCORR_EXT_ADDR,
  423. + BRCMNAND_UNCORR_ADDR,
  424. + BRCMNAND_SEMAPHORE,
  425. + BRCMNAND_ID,
  426. + BRCMNAND_ID_EXT,
  427. + BRCMNAND_LL_RDATA,
  428. + BRCMNAND_OOB_READ_BASE,
  429. + BRCMNAND_OOB_READ_10_BASE, /* offset 0x10, if non-contiguous */
  430. + BRCMNAND_OOB_WRITE_BASE,
  431. + BRCMNAND_OOB_WRITE_10_BASE, /* offset 0x10, if non-contiguous */
  432. + BRCMNAND_FC_BASE,
  433. +};
  434. +
  435. +/* BRCMNAND v4.0 */
  436. +static const u16 brcmnand_regs_v40[] = {
  437. + [BRCMNAND_CMD_START] = 0x04,
  438. + [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
  439. + [BRCMNAND_CMD_ADDRESS] = 0x0c,
  440. + [BRCMNAND_INTFC_STATUS] = 0x6c,
  441. + [BRCMNAND_CS_SELECT] = 0x14,
  442. + [BRCMNAND_CS_XOR] = 0x18,
  443. + [BRCMNAND_LL_OP] = 0x178,
  444. + [BRCMNAND_CS0_BASE] = 0x40,
  445. + [BRCMNAND_CS1_BASE] = 0xd0,
  446. + [BRCMNAND_CORR_THRESHOLD] = 0x84,
  447. + [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
  448. + [BRCMNAND_UNCORR_COUNT] = 0,
  449. + [BRCMNAND_CORR_COUNT] = 0,
  450. + [BRCMNAND_CORR_EXT_ADDR] = 0x70,
  451. + [BRCMNAND_CORR_ADDR] = 0x74,
  452. + [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
  453. + [BRCMNAND_UNCORR_ADDR] = 0x7c,
  454. + [BRCMNAND_SEMAPHORE] = 0x58,
  455. + [BRCMNAND_ID] = 0x60,
  456. + [BRCMNAND_ID_EXT] = 0x64,
  457. + [BRCMNAND_LL_RDATA] = 0x17c,
  458. + [BRCMNAND_OOB_READ_BASE] = 0x20,
  459. + [BRCMNAND_OOB_READ_10_BASE] = 0x130,
  460. + [BRCMNAND_OOB_WRITE_BASE] = 0x30,
  461. + [BRCMNAND_OOB_WRITE_10_BASE] = 0,
  462. + [BRCMNAND_FC_BASE] = 0x200,
  463. +};
  464. +
  465. +/* BRCMNAND v5.0 */
  466. +static const u16 brcmnand_regs_v50[] = {
  467. + [BRCMNAND_CMD_START] = 0x04,
  468. + [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
  469. + [BRCMNAND_CMD_ADDRESS] = 0x0c,
  470. + [BRCMNAND_INTFC_STATUS] = 0x6c,
  471. + [BRCMNAND_CS_SELECT] = 0x14,
  472. + [BRCMNAND_CS_XOR] = 0x18,
  473. + [BRCMNAND_LL_OP] = 0x178,
  474. + [BRCMNAND_CS0_BASE] = 0x40,
  475. + [BRCMNAND_CS1_BASE] = 0xd0,
  476. + [BRCMNAND_CORR_THRESHOLD] = 0x84,
  477. + [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
  478. + [BRCMNAND_UNCORR_COUNT] = 0,
  479. + [BRCMNAND_CORR_COUNT] = 0,
  480. + [BRCMNAND_CORR_EXT_ADDR] = 0x70,
  481. + [BRCMNAND_CORR_ADDR] = 0x74,
  482. + [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
  483. + [BRCMNAND_UNCORR_ADDR] = 0x7c,
  484. + [BRCMNAND_SEMAPHORE] = 0x58,
  485. + [BRCMNAND_ID] = 0x60,
  486. + [BRCMNAND_ID_EXT] = 0x64,
  487. + [BRCMNAND_LL_RDATA] = 0x17c,
  488. + [BRCMNAND_OOB_READ_BASE] = 0x20,
  489. + [BRCMNAND_OOB_READ_10_BASE] = 0x130,
  490. + [BRCMNAND_OOB_WRITE_BASE] = 0x30,
  491. + [BRCMNAND_OOB_WRITE_10_BASE] = 0x140,
  492. + [BRCMNAND_FC_BASE] = 0x200,
  493. +};
  494. +
  495. +/* BRCMNAND v6.0 - v7.1 */
  496. +static const u16 brcmnand_regs_v60[] = {
  497. + [BRCMNAND_CMD_START] = 0x04,
  498. + [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
  499. + [BRCMNAND_CMD_ADDRESS] = 0x0c,
  500. + [BRCMNAND_INTFC_STATUS] = 0x14,
  501. + [BRCMNAND_CS_SELECT] = 0x18,
  502. + [BRCMNAND_CS_XOR] = 0x1c,
  503. + [BRCMNAND_LL_OP] = 0x20,
  504. + [BRCMNAND_CS0_BASE] = 0x50,
  505. + [BRCMNAND_CS1_BASE] = 0,
  506. + [BRCMNAND_CORR_THRESHOLD] = 0xc0,
  507. + [BRCMNAND_CORR_THRESHOLD_EXT] = 0xc4,
  508. + [BRCMNAND_UNCORR_COUNT] = 0xfc,
  509. + [BRCMNAND_CORR_COUNT] = 0x100,
  510. + [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
  511. + [BRCMNAND_CORR_ADDR] = 0x110,
  512. + [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
  513. + [BRCMNAND_UNCORR_ADDR] = 0x118,
  514. + [BRCMNAND_SEMAPHORE] = 0x150,
  515. + [BRCMNAND_ID] = 0x194,
  516. + [BRCMNAND_ID_EXT] = 0x198,
  517. + [BRCMNAND_LL_RDATA] = 0x19c,
  518. + [BRCMNAND_OOB_READ_BASE] = 0x200,
  519. + [BRCMNAND_OOB_READ_10_BASE] = 0,
  520. + [BRCMNAND_OOB_WRITE_BASE] = 0x280,
  521. + [BRCMNAND_OOB_WRITE_10_BASE] = 0,
  522. + [BRCMNAND_FC_BASE] = 0x400,
  523. +};
  524. +
  525. +enum brcmnand_cs_reg {
  526. + BRCMNAND_CS_CFG_EXT = 0,
  527. + BRCMNAND_CS_CFG,
  528. + BRCMNAND_CS_ACC_CONTROL,
  529. + BRCMNAND_CS_TIMING1,
  530. + BRCMNAND_CS_TIMING2,
  531. +};
  532. +
  533. +/* Per chip-select offsets for v7.1 */
  534. +static const u8 brcmnand_cs_offsets_v71[] = {
  535. + [BRCMNAND_CS_ACC_CONTROL] = 0x00,
  536. + [BRCMNAND_CS_CFG_EXT] = 0x04,
  537. + [BRCMNAND_CS_CFG] = 0x08,
  538. + [BRCMNAND_CS_TIMING1] = 0x0c,
  539. + [BRCMNAND_CS_TIMING2] = 0x10,
  540. +};
  541. +
  542. +/* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
  543. +static const u8 brcmnand_cs_offsets[] = {
  544. + [BRCMNAND_CS_ACC_CONTROL] = 0x00,
  545. + [BRCMNAND_CS_CFG_EXT] = 0x04,
  546. + [BRCMNAND_CS_CFG] = 0x04,
  547. + [BRCMNAND_CS_TIMING1] = 0x08,
  548. + [BRCMNAND_CS_TIMING2] = 0x0c,
  549. +};
  550. +
  551. +/* Per chip-select offset for <= v5.0 on CS0 only */
  552. +static const u8 brcmnand_cs_offsets_cs0[] = {
  553. + [BRCMNAND_CS_ACC_CONTROL] = 0x00,
  554. + [BRCMNAND_CS_CFG_EXT] = 0x08,
  555. + [BRCMNAND_CS_CFG] = 0x08,
  556. + [BRCMNAND_CS_TIMING1] = 0x10,
  557. + [BRCMNAND_CS_TIMING2] = 0x14,
  558. +};
  559. +
  560. +/* BRCMNAND_INTFC_STATUS */
  561. +enum {
  562. + INTFC_FLASH_STATUS = GENMASK(7, 0),
  563. +
  564. + INTFC_ERASED = BIT(27),
  565. + INTFC_OOB_VALID = BIT(28),
  566. + INTFC_CACHE_VALID = BIT(29),
  567. + INTFC_FLASH_READY = BIT(30),
  568. + INTFC_CTLR_READY = BIT(31),
  569. +};
  570. +
  571. +static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
  572. +{
  573. + return brcmnand_readl(ctrl->nand_base + offs);
  574. +}
  575. +
  576. +static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
  577. + u32 val)
  578. +{
  579. + brcmnand_writel(val, ctrl->nand_base + offs);
  580. +}
  581. +
  582. +static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
  583. +{
  584. + static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
  585. + static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
  586. + static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 };
  587. +
  588. + ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
  589. +
  590. + /* Only support v4.0+? */
  591. + if (ctrl->nand_version < 0x0400) {
  592. + dev_err(ctrl->dev, "version %#x not supported\n",
  593. + ctrl->nand_version);
  594. + return -ENODEV;
  595. + }
  596. +
  597. + /* Register offsets */
  598. + if (ctrl->nand_version >= 0x0600)
  599. + ctrl->reg_offsets = brcmnand_regs_v60;
  600. + else if (ctrl->nand_version >= 0x0500)
  601. + ctrl->reg_offsets = brcmnand_regs_v50;
  602. + else if (ctrl->nand_version >= 0x0400)
  603. + ctrl->reg_offsets = brcmnand_regs_v40;
  604. +
  605. + /* Chip-select stride */
  606. + if (ctrl->nand_version >= 0x0701)
  607. + ctrl->reg_spacing = 0x14;
  608. + else
  609. + ctrl->reg_spacing = 0x10;
  610. +
  611. + /* Per chip-select registers */
  612. + if (ctrl->nand_version >= 0x0701) {
  613. + ctrl->cs_offsets = brcmnand_cs_offsets_v71;
  614. + } else {
  615. + ctrl->cs_offsets = brcmnand_cs_offsets;
  616. +
  617. + /* v5.0 and earlier has a different CS0 offset layout */
  618. + if (ctrl->nand_version <= 0x0500)
  619. + ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
  620. + }
  621. +
  622. + /* Page / block sizes */
  623. + if (ctrl->nand_version >= 0x0701) {
  624. + /* >= v7.1 use nice power-of-2 values! */
  625. + ctrl->max_page_size = 16 * 1024;
  626. + ctrl->max_block_size = 2 * 1024 * 1024;
  627. + } else {
  628. + ctrl->page_sizes = page_sizes;
  629. + if (ctrl->nand_version >= 0x0600)
  630. + ctrl->block_sizes = block_sizes_v6;
  631. + else
  632. + ctrl->block_sizes = block_sizes_v4;
  633. +
  634. + if (ctrl->nand_version < 0x0400) {
  635. + ctrl->max_page_size = 4096;
  636. + ctrl->max_block_size = 512 * 1024;
  637. + }
  638. + }
  639. +
  640. + /* Maximum spare area sector size (per 512B) */
  641. + if (ctrl->nand_version >= 0x0600)
  642. + ctrl->max_oob = 64;
  643. + else if (ctrl->nand_version >= 0x0500)
  644. + ctrl->max_oob = 32;
  645. + else
  646. + ctrl->max_oob = 16;
  647. +
  648. + /* v6.0 and newer (except v6.1) have prefetch support */
  649. + if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601)
  650. + ctrl->features |= BRCMNAND_HAS_PREFETCH;
  651. +
  652. + /*
  653. + * v6.x has cache mode, but it's implemented differently. Ignore it for
  654. + * now.
  655. + */
  656. + if (ctrl->nand_version >= 0x0700)
  657. + ctrl->features |= BRCMNAND_HAS_CACHE_MODE;
  658. +
  659. + if (ctrl->nand_version >= 0x0500)
  660. + ctrl->features |= BRCMNAND_HAS_1K_SECTORS;
  661. +
  662. + if (ctrl->nand_version >= 0x0700)
  663. + ctrl->features |= BRCMNAND_HAS_WP;
  664. + else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
  665. + ctrl->features |= BRCMNAND_HAS_WP;
  666. +
  667. + return 0;
  668. +}
  669. +
  670. +static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
  671. + enum brcmnand_reg reg)
  672. +{
  673. + u16 offs = ctrl->reg_offsets[reg];
  674. +
  675. + if (offs)
  676. + return nand_readreg(ctrl, offs);
  677. + else
  678. + return 0;
  679. +}
  680. +
  681. +static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl,
  682. + enum brcmnand_reg reg, u32 val)
  683. +{
  684. + u16 offs = ctrl->reg_offsets[reg];
  685. +
  686. + if (offs)
  687. + nand_writereg(ctrl, offs, val);
  688. +}
  689. +
  690. +static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
  691. + enum brcmnand_reg reg, u32 mask, unsigned
  692. + int shift, u32 val)
  693. +{
  694. + u32 tmp = brcmnand_read_reg(ctrl, reg);
  695. +
  696. + tmp &= ~mask;
  697. + tmp |= val << shift;
  698. + brcmnand_write_reg(ctrl, reg, tmp);
  699. +}
  700. +
  701. +static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
  702. +{
  703. + return __raw_readl(ctrl->nand_fc + word * 4);
  704. +}
  705. +
  706. +static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
  707. + int word, u32 val)
  708. +{
  709. + __raw_writel(val, ctrl->nand_fc + word * 4);
  710. +}
  711. +
  712. +static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
  713. + enum brcmnand_cs_reg reg)
  714. +{
  715. + u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE];
  716. + u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE];
  717. + u8 cs_offs;
  718. +
  719. + if (cs == 0 && ctrl->cs0_offsets)
  720. + cs_offs = ctrl->cs0_offsets[reg];
  721. + else
  722. + cs_offs = ctrl->cs_offsets[reg];
  723. +
  724. + if (cs && offs_cs1)
  725. + return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs;
  726. +
  727. + return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
  728. +}
  729. +
  730. +static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
  731. +{
  732. + if (ctrl->nand_version < 0x0600)
  733. + return 1;
  734. + return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
  735. +}
  736. +
  737. +static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
  738. +{
  739. + struct brcmnand_controller *ctrl = host->ctrl;
  740. + unsigned int shift = 0, bits;
  741. + enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
  742. + int cs = host->cs;
  743. +
  744. + if (ctrl->nand_version >= 0x0600)
  745. + bits = 6;
  746. + else if (ctrl->nand_version >= 0x0500)
  747. + bits = 5;
  748. + else
  749. + bits = 4;
  750. +
  751. + if (ctrl->nand_version >= 0x0600) {
  752. + if (cs >= 5)
  753. + reg = BRCMNAND_CORR_THRESHOLD_EXT;
  754. + shift = (cs % 5) * bits;
  755. + }
  756. + brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val);
  757. +}
  758. +
  759. +static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
  760. +{
  761. + if (ctrl->nand_version < 0x0700)
  762. + return 24;
  763. + return 0;
  764. +}
  765. +
  766. +/***********************************************************************
  767. + * NAND ACC CONTROL bitfield
  768. + *
  769. + * Some bits have remained constant throughout hardware revision, while
  770. + * others have shifted around.
  771. + ***********************************************************************/
  772. +
  773. +/* Constant for all versions (where supported) */
  774. +enum {
  775. + /* See BRCMNAND_HAS_CACHE_MODE */
  776. + ACC_CONTROL_CACHE_MODE = BIT(22),
  777. +
  778. + /* See BRCMNAND_HAS_PREFETCH */
  779. + ACC_CONTROL_PREFETCH = BIT(23),
  780. +
  781. + ACC_CONTROL_PAGE_HIT = BIT(24),
  782. + ACC_CONTROL_WR_PREEMPT = BIT(25),
  783. + ACC_CONTROL_PARTIAL_PAGE = BIT(26),
  784. + ACC_CONTROL_RD_ERASED = BIT(27),
  785. + ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
  786. + ACC_CONTROL_WR_ECC = BIT(30),
  787. + ACC_CONTROL_RD_ECC = BIT(31),
  788. +};
  789. +
  790. +static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
  791. +{
  792. + if (ctrl->nand_version >= 0x0600)
  793. + return GENMASK(6, 0);
  794. + else
  795. + return GENMASK(5, 0);
  796. +}
  797. +
  798. +#define NAND_ACC_CONTROL_ECC_SHIFT 16
  799. +
  800. +static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
  801. +{
  802. + u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
  803. +
  804. + return mask << NAND_ACC_CONTROL_ECC_SHIFT;
  805. +}
  806. +
  807. +static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
  808. +{
  809. + struct brcmnand_controller *ctrl = host->ctrl;
  810. + u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
  811. + u32 acc_control = nand_readreg(ctrl, offs);
  812. + u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC;
  813. +
  814. + if (en) {
  815. + acc_control |= ecc_flags; /* enable RD/WR ECC */
  816. + acc_control |= host->hwcfg.ecc_level
  817. + << NAND_ACC_CONTROL_ECC_SHIFT;
  818. + } else {
  819. + acc_control &= ~ecc_flags; /* disable RD/WR ECC */
  820. + acc_control &= ~brcmnand_ecc_level_mask(ctrl);
  821. + }
  822. +
  823. + nand_writereg(ctrl, offs, acc_control);
  824. +}
  825. +
  826. +static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
  827. +{
  828. + if (ctrl->nand_version >= 0x0600)
  829. + return 7;
  830. + else if (ctrl->nand_version >= 0x0500)
  831. + return 6;
  832. + else
  833. + return -1;
  834. +}
  835. +
  836. +static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
  837. +{
  838. + struct brcmnand_controller *ctrl = host->ctrl;
  839. + int shift = brcmnand_sector_1k_shift(ctrl);
  840. + u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
  841. + BRCMNAND_CS_ACC_CONTROL);
  842. +
  843. + if (shift < 0)
  844. + return 0;
  845. +
  846. + return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
  847. +}
  848. +
  849. +static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
  850. +{
  851. + struct brcmnand_controller *ctrl = host->ctrl;
  852. + int shift = brcmnand_sector_1k_shift(ctrl);
  853. + u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
  854. + BRCMNAND_CS_ACC_CONTROL);
  855. + u32 tmp;
  856. +
  857. + if (shift < 0)
  858. + return;
  859. +
  860. + tmp = nand_readreg(ctrl, acc_control_offs);
  861. + tmp &= ~(1 << shift);
  862. + tmp |= (!!val) << shift;
  863. + nand_writereg(ctrl, acc_control_offs, tmp);
  864. +}
  865. +
  866. +/***********************************************************************
  867. + * CS_NAND_SELECT
  868. + ***********************************************************************/
  869. +
  870. +enum {
  871. + CS_SELECT_NAND_WP = BIT(29),
  872. + CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
  873. +};
  874. +
  875. +static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
  876. +{
  877. + u32 val = en ? CS_SELECT_NAND_WP : 0;
  878. +
  879. + brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val);
  880. +}
  881. +
  882. +/***********************************************************************
  883. + * Flash DMA
  884. + ***********************************************************************/
  885. +
  886. +enum flash_dma_reg {
  887. + FLASH_DMA_REVISION = 0x00,
  888. + FLASH_DMA_FIRST_DESC = 0x04,
  889. + FLASH_DMA_FIRST_DESC_EXT = 0x08,
  890. + FLASH_DMA_CTRL = 0x0c,
  891. + FLASH_DMA_MODE = 0x10,
  892. + FLASH_DMA_STATUS = 0x14,
  893. + FLASH_DMA_INTERRUPT_DESC = 0x18,
  894. + FLASH_DMA_INTERRUPT_DESC_EXT = 0x1c,
  895. + FLASH_DMA_ERROR_STATUS = 0x20,
  896. + FLASH_DMA_CURRENT_DESC = 0x24,
  897. + FLASH_DMA_CURRENT_DESC_EXT = 0x28,
  898. +};
  899. +
  900. +static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
  901. +{
  902. + return ctrl->flash_dma_base;
  903. +}
  904. +
  905. +static inline bool flash_dma_buf_ok(const void *buf)
  906. +{
  907. + return buf && !is_vmalloc_addr(buf) &&
  908. + likely(IS_ALIGNED((uintptr_t)buf, 4));
  909. +}
  910. +
  911. +static inline void flash_dma_writel(struct brcmnand_controller *ctrl, u8 offs,
  912. + u32 val)
  913. +{
  914. + brcmnand_writel(val, ctrl->flash_dma_base + offs);
  915. +}
  916. +
  917. +static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl, u8 offs)
  918. +{
  919. + return brcmnand_readl(ctrl->flash_dma_base + offs);
  920. +}
  921. +
  922. +/* Low-level operation types: command, address, write, or read */
  923. +enum brcmnand_llop_type {
  924. + LL_OP_CMD,
  925. + LL_OP_ADDR,
  926. + LL_OP_WR,
  927. + LL_OP_RD,
  928. +};
  929. +
  930. +/***********************************************************************
  931. + * Internal support functions
  932. + ***********************************************************************/
  933. +
  934. +static inline bool is_hamming_ecc(struct brcmnand_cfg *cfg)
  935. +{
  936. + return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
  937. + cfg->ecc_level == 15;
  938. +}
  939. +
  940. +/*
  941. + * Returns a nand_ecclayout strucutre for the given layout/configuration.
  942. + * Returns NULL on failure.
  943. + */
  944. +static struct nand_ecclayout *brcmnand_create_layout(int ecc_level,
  945. + struct brcmnand_host *host)
  946. +{
  947. + struct brcmnand_cfg *cfg = &host->hwcfg;
  948. + int i, j;
  949. + struct nand_ecclayout *layout;
  950. + int req;
  951. + int sectors;
  952. + int sas;
  953. + int idx1, idx2;
  954. +
  955. + layout = devm_kzalloc(&host->pdev->dev, sizeof(*layout), GFP_KERNEL);
  956. + if (!layout)
  957. + return NULL;
  958. +
  959. + sectors = cfg->page_size / (512 << cfg->sector_size_1k);
  960. + sas = cfg->spare_area_size << cfg->sector_size_1k;
  961. +
  962. + /* Hamming */
  963. + if (is_hamming_ecc(cfg)) {
  964. + for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) {
  965. + /* First sector of each page may have BBI */
  966. + if (i == 0) {
  967. + layout->oobfree[idx2].offset = i * sas + 1;
  968. + /* Small-page NAND use byte 6 for BBI */
  969. + if (cfg->page_size == 512)
  970. + layout->oobfree[idx2].offset--;
  971. + layout->oobfree[idx2].length = 5;
  972. + } else {
  973. + layout->oobfree[idx2].offset = i * sas;
  974. + layout->oobfree[idx2].length = 6;
  975. + }
  976. + idx2++;
  977. + layout->eccpos[idx1++] = i * sas + 6;
  978. + layout->eccpos[idx1++] = i * sas + 7;
  979. + layout->eccpos[idx1++] = i * sas + 8;
  980. + layout->oobfree[idx2].offset = i * sas + 9;
  981. + layout->oobfree[idx2].length = 7;
  982. + idx2++;
  983. + /* Leave zero-terminated entry for OOBFREE */
  984. + if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE ||
  985. + idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
  986. + break;
  987. + }
  988. + goto out;
  989. + }
  990. +
  991. + /*
  992. + * CONTROLLER_VERSION:
  993. + * < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
  994. + * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
  995. + * But we will just be conservative.
  996. + */
  997. + req = DIV_ROUND_UP(ecc_level * 14, 8);
  998. + if (req >= sas) {
  999. + dev_err(&host->pdev->dev,
  1000. + "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
  1001. + req, sas);
  1002. + return NULL;
  1003. + }
  1004. +
  1005. + layout->eccbytes = req * sectors;
  1006. + for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) {
  1007. + for (j = sas - req; j < sas && idx1 <
  1008. + MTD_MAX_ECCPOS_ENTRIES_LARGE; j++, idx1++)
  1009. + layout->eccpos[idx1] = i * sas + j;
  1010. +
  1011. + /* First sector of each page may have BBI */
  1012. + if (i == 0) {
  1013. + if (cfg->page_size == 512 && (sas - req >= 6)) {
  1014. + /* Small-page NAND use byte 6 for BBI */
  1015. + layout->oobfree[idx2].offset = 0;
  1016. + layout->oobfree[idx2].length = 5;
  1017. + idx2++;
  1018. + if (sas - req > 6) {
  1019. + layout->oobfree[idx2].offset = 6;
  1020. + layout->oobfree[idx2].length =
  1021. + sas - req - 6;
  1022. + idx2++;
  1023. + }
  1024. + } else if (sas > req + 1) {
  1025. + layout->oobfree[idx2].offset = i * sas + 1;
  1026. + layout->oobfree[idx2].length = sas - req - 1;
  1027. + idx2++;
  1028. + }
  1029. + } else if (sas > req) {
  1030. + layout->oobfree[idx2].offset = i * sas;
  1031. + layout->oobfree[idx2].length = sas - req;
  1032. + idx2++;
  1033. + }
  1034. + /* Leave zero-terminated entry for OOBFREE */
  1035. + if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE ||
  1036. + idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
  1037. + break;
  1038. + }
  1039. +out:
  1040. + /* Sum available OOB */
  1041. + for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE; i++)
  1042. + layout->oobavail += layout->oobfree[i].length;
  1043. + return layout;
  1044. +}
  1045. +
  1046. +static struct nand_ecclayout *brcmstb_choose_ecc_layout(
  1047. + struct brcmnand_host *host)
  1048. +{
  1049. + struct nand_ecclayout *layout;
  1050. + struct brcmnand_cfg *p = &host->hwcfg;
  1051. + unsigned int ecc_level = p->ecc_level;
  1052. +
  1053. + if (p->sector_size_1k)
  1054. + ecc_level <<= 1;
  1055. +
  1056. + layout = brcmnand_create_layout(ecc_level, host);
  1057. + if (!layout) {
  1058. + dev_err(&host->pdev->dev,
  1059. + "no proper ecc_layout for this NAND cfg\n");
  1060. + return NULL;
  1061. + }
  1062. +
  1063. + return layout;
  1064. +}
  1065. +
  1066. +static void brcmnand_wp(struct mtd_info *mtd, int wp)
  1067. +{
  1068. + struct nand_chip *chip = mtd->priv;
  1069. + struct brcmnand_host *host = chip->priv;
  1070. + struct brcmnand_controller *ctrl = host->ctrl;
  1071. +
  1072. + if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
  1073. + static int old_wp = -1;
  1074. +
  1075. + if (old_wp != wp) {
  1076. + dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
  1077. + old_wp = wp;
  1078. + }
  1079. + brcmnand_set_wp(ctrl, wp);
  1080. + }
  1081. +}
  1082. +
  1083. +/* Helper functions for reading and writing OOB registers */
  1084. +static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
  1085. +{
  1086. + u16 offset0, offset10, reg_offs;
  1087. +
  1088. + offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
  1089. + offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE];
  1090. +
  1091. + if (offs >= ctrl->max_oob)
  1092. + return 0x77;
  1093. +
  1094. + if (offs >= 16 && offset10)
  1095. + reg_offs = offset10 + ((offs - 0x10) & ~0x03);
  1096. + else
  1097. + reg_offs = offset0 + (offs & ~0x03);
  1098. +
  1099. + return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
  1100. +}
  1101. +
  1102. +static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs,
  1103. + u32 data)
  1104. +{
  1105. + u16 offset0, offset10, reg_offs;
  1106. +
  1107. + offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE];
  1108. + offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE];
  1109. +
  1110. + if (offs >= ctrl->max_oob)
  1111. + return;
  1112. +
  1113. + if (offs >= 16 && offset10)
  1114. + reg_offs = offset10 + ((offs - 0x10) & ~0x03);
  1115. + else
  1116. + reg_offs = offset0 + (offs & ~0x03);
  1117. +
  1118. + nand_writereg(ctrl, reg_offs, data);
  1119. +}
  1120. +
  1121. +/*
  1122. + * read_oob_from_regs - read data from OOB registers
  1123. + * @ctrl: NAND controller
  1124. + * @i: sub-page sector index
  1125. + * @oob: buffer to read to
  1126. + * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
  1127. + * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
  1128. + */
  1129. +static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob,
  1130. + int sas, int sector_1k)
  1131. +{
  1132. + int tbytes = sas << sector_1k;
  1133. + int j;
  1134. +
  1135. + /* Adjust OOB values for 1K sector size */
  1136. + if (sector_1k && (i & 0x01))
  1137. + tbytes = max(0, tbytes - (int)ctrl->max_oob);
  1138. + tbytes = min_t(int, tbytes, ctrl->max_oob);
  1139. +
  1140. + for (j = 0; j < tbytes; j++)
  1141. + oob[j] = oob_reg_read(ctrl, j);
  1142. + return tbytes;
  1143. +}
  1144. +
  1145. +/*
  1146. + * write_oob_to_regs - write data to OOB registers
  1147. + * @i: sub-page sector index
  1148. + * @oob: buffer to write from
  1149. + * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
  1150. + * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
  1151. + */
  1152. +static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
  1153. + const u8 *oob, int sas, int sector_1k)
  1154. +{
  1155. + int tbytes = sas << sector_1k;
  1156. + int j;
  1157. +
  1158. + /* Adjust OOB values for 1K sector size */
  1159. + if (sector_1k && (i & 0x01))
  1160. + tbytes = max(0, tbytes - (int)ctrl->max_oob);
  1161. + tbytes = min_t(int, tbytes, ctrl->max_oob);
  1162. +
  1163. + for (j = 0; j < tbytes; j += 4)
  1164. + oob_reg_write(ctrl, j,
  1165. + (oob[j + 0] << 24) |
  1166. + (oob[j + 1] << 16) |
  1167. + (oob[j + 2] << 8) |
  1168. + (oob[j + 3] << 0));
  1169. + return tbytes;
  1170. +}
  1171. +
  1172. +static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
  1173. +{
  1174. + struct brcmnand_controller *ctrl = data;
  1175. +
  1176. + /* Discard all NAND_CTLRDY interrupts during DMA */
  1177. + if (ctrl->dma_pending)
  1178. + return IRQ_HANDLED;
  1179. +
  1180. + complete(&ctrl->done);
  1181. + return IRQ_HANDLED;
  1182. +}
  1183. +
  1184. +/* Handle SoC-specific interrupt hardware */
  1185. +static irqreturn_t brcmnand_irq(int irq, void *data)
  1186. +{
  1187. + struct brcmnand_controller *ctrl = data;
  1188. +
  1189. + if (ctrl->soc->ctlrdy_ack(ctrl->soc))
  1190. + return brcmnand_ctlrdy_irq(irq, data);
  1191. +
  1192. + return IRQ_NONE;
  1193. +}
  1194. +
  1195. +static irqreturn_t brcmnand_dma_irq(int irq, void *data)
  1196. +{
  1197. + struct brcmnand_controller *ctrl = data;
  1198. +
  1199. + complete(&ctrl->dma_done);
  1200. +
  1201. + return IRQ_HANDLED;
  1202. +}
  1203. +
  1204. +static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
  1205. +{
  1206. + struct brcmnand_controller *ctrl = host->ctrl;
  1207. + u32 intfc;
  1208. +
  1209. + dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
  1210. + brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
  1211. + BUG_ON(ctrl->cmd_pending != 0);
  1212. + ctrl->cmd_pending = cmd;
  1213. +
  1214. + intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
  1215. + BUG_ON(!(intfc & INTFC_CTLR_READY));
  1216. +
  1217. + mb(); /* flush previous writes */
  1218. + brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
  1219. + cmd << brcmnand_cmd_shift(ctrl));
  1220. +}
  1221. +
  1222. +/***********************************************************************
  1223. + * NAND MTD API: read/program/erase
  1224. + ***********************************************************************/
  1225. +
  1226. +static void brcmnand_cmd_ctrl(struct mtd_info *mtd, int dat,
  1227. + unsigned int ctrl)
  1228. +{
  1229. + /* intentionally left blank */
  1230. +}
  1231. +
  1232. +static int brcmnand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
  1233. +{
  1234. + struct nand_chip *chip = mtd->priv;
  1235. + struct brcmnand_host *host = chip->priv;
  1236. + struct brcmnand_controller *ctrl = host->ctrl;
  1237. + unsigned long timeo = msecs_to_jiffies(100);
  1238. +
  1239. + dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
  1240. + if (ctrl->cmd_pending &&
  1241. + wait_for_completion_timeout(&ctrl->done, timeo) <= 0) {
  1242. + u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
  1243. + >> brcmnand_cmd_shift(ctrl);
  1244. +
  1245. + dev_err_ratelimited(ctrl->dev,
  1246. + "timeout waiting for command %#02x\n", cmd);
  1247. + dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
  1248. + brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
  1249. + }
  1250. + ctrl->cmd_pending = 0;
  1251. + return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
  1252. + INTFC_FLASH_STATUS;
  1253. +}
  1254. +
  1255. +enum {
  1256. + LLOP_RE = BIT(16),
  1257. + LLOP_WE = BIT(17),
  1258. + LLOP_ALE = BIT(18),
  1259. + LLOP_CLE = BIT(19),
  1260. + LLOP_RETURN_IDLE = BIT(31),
  1261. +
  1262. + LLOP_DATA_MASK = GENMASK(15, 0),
  1263. +};
  1264. +
  1265. +static int brcmnand_low_level_op(struct brcmnand_host *host,
  1266. + enum brcmnand_llop_type type, u32 data,
  1267. + bool last_op)
  1268. +{
  1269. + struct mtd_info *mtd = &host->mtd;
  1270. + struct nand_chip *chip = &host->chip;
  1271. + struct brcmnand_controller *ctrl = host->ctrl;
  1272. + u32 tmp;
  1273. +
  1274. + tmp = data & LLOP_DATA_MASK;
  1275. + switch (type) {
  1276. + case LL_OP_CMD:
  1277. + tmp |= LLOP_WE | LLOP_CLE;
  1278. + break;
  1279. + case LL_OP_ADDR:
  1280. + /* WE | ALE */
  1281. + tmp |= LLOP_WE | LLOP_ALE;
  1282. + break;
  1283. + case LL_OP_WR:
  1284. + /* WE */
  1285. + tmp |= LLOP_WE;
  1286. + break;
  1287. + case LL_OP_RD:
  1288. + /* RE */
  1289. + tmp |= LLOP_RE;
  1290. + break;
  1291. + }
  1292. + if (last_op)
  1293. + /* RETURN_IDLE */
  1294. + tmp |= LLOP_RETURN_IDLE;
  1295. +
  1296. + dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp);
  1297. +
  1298. + brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp);
  1299. + (void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
  1300. +
  1301. + brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP);
  1302. + return brcmnand_waitfunc(mtd, chip);
  1303. +}
  1304. +
  1305. +static void brcmnand_cmdfunc(struct mtd_info *mtd, unsigned command,
  1306. + int column, int page_addr)
  1307. +{
  1308. + struct nand_chip *chip = mtd->priv;
  1309. + struct brcmnand_host *host = chip->priv;
  1310. + struct brcmnand_controller *ctrl = host->ctrl;
  1311. + u64 addr = (u64)page_addr << chip->page_shift;
  1312. + int native_cmd = 0;
  1313. +
  1314. + if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
  1315. + command == NAND_CMD_RNDOUT)
  1316. + addr = (u64)column;
  1317. + /* Avoid propagating a negative, don't-care address */
  1318. + else if (page_addr < 0)
  1319. + addr = 0;
  1320. +
  1321. + dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
  1322. + (unsigned long long)addr);
  1323. +
  1324. + host->last_cmd = command;
  1325. + host->last_byte = 0;
  1326. + host->last_addr = addr;
  1327. +
  1328. + switch (command) {
  1329. + case NAND_CMD_RESET:
  1330. + native_cmd = CMD_FLASH_RESET;
  1331. + break;
  1332. + case NAND_CMD_STATUS:
  1333. + native_cmd = CMD_STATUS_READ;
  1334. + break;
  1335. + case NAND_CMD_READID:
  1336. + native_cmd = CMD_DEVICE_ID_READ;
  1337. + break;
  1338. + case NAND_CMD_READOOB:
  1339. + native_cmd = CMD_SPARE_AREA_READ;
  1340. + break;
  1341. + case NAND_CMD_ERASE1:
  1342. + native_cmd = CMD_BLOCK_ERASE;
  1343. + brcmnand_wp(mtd, 0);
  1344. + break;
  1345. + case NAND_CMD_PARAM:
  1346. + native_cmd = CMD_PARAMETER_READ;
  1347. + break;
  1348. + case NAND_CMD_SET_FEATURES:
  1349. + case NAND_CMD_GET_FEATURES:
  1350. + brcmnand_low_level_op(host, LL_OP_CMD, command, false);
  1351. + brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
  1352. + break;
  1353. + case NAND_CMD_RNDOUT:
  1354. + native_cmd = CMD_PARAMETER_CHANGE_COL;
  1355. + addr &= ~((u64)(FC_BYTES - 1));
  1356. + /*
  1357. + * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
  1358. + * NB: hwcfg.sector_size_1k may not be initialized yet
  1359. + */
  1360. + if (brcmnand_get_sector_size_1k(host)) {
  1361. + host->hwcfg.sector_size_1k =
  1362. + brcmnand_get_sector_size_1k(host);
  1363. + brcmnand_set_sector_size_1k(host, 0);
  1364. + }
  1365. + break;
  1366. + }
  1367. +
  1368. + if (!native_cmd)
  1369. + return;
  1370. +
  1371. + brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
  1372. + (host->cs << 16) | ((addr >> 32) & 0xffff));
  1373. + (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
  1374. + brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, lower_32_bits(addr));
  1375. + (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
  1376. +
  1377. + brcmnand_send_cmd(host, native_cmd);
  1378. + brcmnand_waitfunc(mtd, chip);
  1379. +
  1380. + if (native_cmd == CMD_PARAMETER_READ ||
  1381. + native_cmd == CMD_PARAMETER_CHANGE_COL) {
  1382. + int i;
  1383. +
  1384. + brcmnand_soc_data_bus_prepare(ctrl->soc);
  1385. +
  1386. + /*
  1387. + * Must cache the FLASH_CACHE now, since changes in
  1388. + * SECTOR_SIZE_1K may invalidate it
  1389. + */
  1390. + for (i = 0; i < FC_WORDS; i++)
  1391. + ctrl->flash_cache[i] = brcmnand_read_fc(ctrl, i);
  1392. +
  1393. + brcmnand_soc_data_bus_unprepare(ctrl->soc);
  1394. +
  1395. + /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
  1396. + if (host->hwcfg.sector_size_1k)
  1397. + brcmnand_set_sector_size_1k(host,
  1398. + host->hwcfg.sector_size_1k);
  1399. + }
  1400. +
  1401. + /* Re-enable protection is necessary only after erase */
  1402. + if (command == NAND_CMD_ERASE1)
  1403. + brcmnand_wp(mtd, 1);
  1404. +}
  1405. +
  1406. +static uint8_t brcmnand_read_byte(struct mtd_info *mtd)
  1407. +{
  1408. + struct nand_chip *chip = mtd->priv;
  1409. + struct brcmnand_host *host = chip->priv;
  1410. + struct brcmnand_controller *ctrl = host->ctrl;
  1411. + uint8_t ret = 0;
  1412. + int addr, offs;
  1413. +
  1414. + switch (host->last_cmd) {
  1415. + case NAND_CMD_READID:
  1416. + if (host->last_byte < 4)
  1417. + ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
  1418. + (24 - (host->last_byte << 3));
  1419. + else if (host->last_byte < 8)
  1420. + ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
  1421. + (56 - (host->last_byte << 3));
  1422. + break;
  1423. +
  1424. + case NAND_CMD_READOOB:
  1425. + ret = oob_reg_read(ctrl, host->last_byte);
  1426. + break;
  1427. +
  1428. + case NAND_CMD_STATUS:
  1429. + ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
  1430. + INTFC_FLASH_STATUS;
  1431. + if (wp_on) /* hide WP status */
  1432. + ret |= NAND_STATUS_WP;
  1433. + break;
  1434. +
  1435. + case NAND_CMD_PARAM:
  1436. + case NAND_CMD_RNDOUT:
  1437. + addr = host->last_addr + host->last_byte;
  1438. + offs = addr & (FC_BYTES - 1);
  1439. +
  1440. + /* At FC_BYTES boundary, switch to next column */
  1441. + if (host->last_byte > 0 && offs == 0)
  1442. + chip->cmdfunc(mtd, NAND_CMD_RNDOUT, addr, -1);
  1443. +
  1444. + ret = ctrl->flash_cache[offs >> 2] >>
  1445. + (24 - ((offs & 0x03) << 3));
  1446. + break;
  1447. + case NAND_CMD_GET_FEATURES:
  1448. + if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
  1449. + ret = 0;
  1450. + } else {
  1451. + bool last = host->last_byte ==
  1452. + ONFI_SUBFEATURE_PARAM_LEN - 1;
  1453. + brcmnand_low_level_op(host, LL_OP_RD, 0, last);
  1454. + ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
  1455. + }
  1456. + }
  1457. +
  1458. + dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
  1459. + host->last_byte++;
  1460. +
  1461. + return ret;
  1462. +}
  1463. +
  1464. +static void brcmnand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  1465. +{
  1466. + int i;
  1467. +
  1468. + for (i = 0; i < len; i++, buf++)
  1469. + *buf = brcmnand_read_byte(mtd);
  1470. +}
  1471. +
  1472. +static void brcmnand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
  1473. + int len)
  1474. +{
  1475. + int i;
  1476. + struct nand_chip *chip = mtd->priv;
  1477. + struct brcmnand_host *host = chip->priv;
  1478. +
  1479. + switch (host->last_cmd) {
  1480. + case NAND_CMD_SET_FEATURES:
  1481. + for (i = 0; i < len; i++)
  1482. + brcmnand_low_level_op(host, LL_OP_WR, buf[i],
  1483. + (i + 1) == len);
  1484. + break;
  1485. + default:
  1486. + BUG();
  1487. + break;
  1488. + }
  1489. +}
  1490. +
  1491. +/**
  1492. + * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
  1493. + * following ahead of time:
  1494. + * - Is this descriptor the beginning or end of a linked list?
  1495. + * - What is the (DMA) address of the next descriptor in the linked list?
  1496. + */
  1497. +static int brcmnand_fill_dma_desc(struct brcmnand_host *host,
  1498. + struct brcm_nand_dma_desc *desc, u64 addr,
  1499. + dma_addr_t buf, u32 len, u8 dma_cmd,
  1500. + bool begin, bool end,
  1501. + dma_addr_t next_desc)
  1502. +{
  1503. + memset(desc, 0, sizeof(*desc));
  1504. + /* Descriptors are written in native byte order (wordwise) */
  1505. + desc->next_desc = lower_32_bits(next_desc);
  1506. + desc->next_desc_ext = upper_32_bits(next_desc);
  1507. + desc->cmd_irq = (dma_cmd << 24) |
  1508. + (end ? (0x03 << 8) : 0) | /* IRQ | STOP */
  1509. + (!!begin) | ((!!end) << 1); /* head, tail */
  1510. +#ifdef CONFIG_CPU_BIG_ENDIAN
  1511. + desc->cmd_irq |= 0x01 << 12;
  1512. +#endif
  1513. + desc->dram_addr = lower_32_bits(buf);
  1514. + desc->dram_addr_ext = upper_32_bits(buf);
  1515. + desc->tfr_len = len;
  1516. + desc->total_len = len;
  1517. + desc->flash_addr = lower_32_bits(addr);
  1518. + desc->flash_addr_ext = upper_32_bits(addr);
  1519. + desc->cs = host->cs;
  1520. + desc->status_valid = 0x01;
  1521. + return 0;
  1522. +}
  1523. +
  1524. +/**
  1525. + * Kick the FLASH_DMA engine, with a given DMA descriptor
  1526. + */
  1527. +static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
  1528. +{
  1529. + struct brcmnand_controller *ctrl = host->ctrl;
  1530. + unsigned long timeo = msecs_to_jiffies(100);
  1531. +
  1532. + flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
  1533. + (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
  1534. + flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc));
  1535. + (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
  1536. +
  1537. + /* Start FLASH_DMA engine */
  1538. + ctrl->dma_pending = true;
  1539. + mb(); /* flush previous writes */
  1540. + flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */
  1541. +
  1542. + if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) {
  1543. + dev_err(ctrl->dev,
  1544. + "timeout waiting for DMA; status %#x, error status %#x\n",
  1545. + flash_dma_readl(ctrl, FLASH_DMA_STATUS),
  1546. + flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS));
  1547. + }
  1548. + ctrl->dma_pending = false;
  1549. + flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */
  1550. +}
  1551. +
  1552. +static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
  1553. + u32 len, u8 dma_cmd)
  1554. +{
  1555. + struct brcmnand_controller *ctrl = host->ctrl;
  1556. + dma_addr_t buf_pa;
  1557. + int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  1558. +
  1559. + buf_pa = dma_map_single(ctrl->dev, buf, len, dir);
  1560. + if (dma_mapping_error(ctrl->dev, buf_pa)) {
  1561. + dev_err(ctrl->dev, "unable to map buffer for DMA\n");
  1562. + return -ENOMEM;
  1563. + }
  1564. +
  1565. + brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len,
  1566. + dma_cmd, true, true, 0);
  1567. +
  1568. + brcmnand_dma_run(host, ctrl->dma_pa);
  1569. +
  1570. + dma_unmap_single(ctrl->dev, buf_pa, len, dir);
  1571. +
  1572. + if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR)
  1573. + return -EBADMSG;
  1574. + else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR)
  1575. + return -EUCLEAN;
  1576. +
  1577. + return 0;
  1578. +}
  1579. +
  1580. +/*
  1581. + * Assumes proper CS is already set
  1582. + */
  1583. +static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
  1584. + u64 addr, unsigned int trans, u32 *buf,
  1585. + u8 *oob, u64 *err_addr)
  1586. +{
  1587. + struct brcmnand_host *host = chip->priv;
  1588. + struct brcmnand_controller *ctrl = host->ctrl;
  1589. + int i, j, ret = 0;
  1590. +
  1591. + /* Clear error addresses */
  1592. + brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
  1593. + brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
  1594. +
  1595. + brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
  1596. + (host->cs << 16) | ((addr >> 32) & 0xffff));
  1597. + (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
  1598. +
  1599. + for (i = 0; i < trans; i++, addr += FC_BYTES) {
  1600. + brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
  1601. + lower_32_bits(addr));
  1602. + (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
  1603. + /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
  1604. + brcmnand_send_cmd(host, CMD_PAGE_READ);
  1605. + brcmnand_waitfunc(mtd, chip);
  1606. +
  1607. + if (likely(buf)) {
  1608. + brcmnand_soc_data_bus_prepare(ctrl->soc);
  1609. +
  1610. + for (j = 0; j < FC_WORDS; j++, buf++)
  1611. + *buf = brcmnand_read_fc(ctrl, j);
  1612. +
  1613. + brcmnand_soc_data_bus_unprepare(ctrl->soc);
  1614. + }
  1615. +
  1616. + if (oob)
  1617. + oob += read_oob_from_regs(ctrl, i, oob,
  1618. + mtd->oobsize / trans,
  1619. + host->hwcfg.sector_size_1k);
  1620. +
  1621. + if (!ret) {
  1622. + *err_addr = brcmnand_read_reg(ctrl,
  1623. + BRCMNAND_UNCORR_ADDR) |
  1624. + ((u64)(brcmnand_read_reg(ctrl,
  1625. + BRCMNAND_UNCORR_EXT_ADDR)
  1626. + & 0xffff) << 32);
  1627. + if (*err_addr)
  1628. + ret = -EBADMSG;
  1629. + }
  1630. +
  1631. + if (!ret) {
  1632. + *err_addr = brcmnand_read_reg(ctrl,
  1633. + BRCMNAND_CORR_ADDR) |
  1634. + ((u64)(brcmnand_read_reg(ctrl,
  1635. + BRCMNAND_CORR_EXT_ADDR)
  1636. + & 0xffff) << 32);
  1637. + if (*err_addr)
  1638. + ret = -EUCLEAN;
  1639. + }
  1640. + }
  1641. +
  1642. + return ret;
  1643. +}
  1644. +
  1645. +static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
  1646. + u64 addr, unsigned int trans, u32 *buf, u8 *oob)
  1647. +{
  1648. + struct brcmnand_host *host = chip->priv;
  1649. + struct brcmnand_controller *ctrl = host->ctrl;
  1650. + u64 err_addr = 0;
  1651. + int err;
  1652. +
  1653. + dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
  1654. +
  1655. + brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0);
  1656. +
  1657. + if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
  1658. + err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
  1659. + CMD_PAGE_READ);
  1660. + if (err) {
  1661. + if (mtd_is_bitflip_or_eccerr(err))
  1662. + err_addr = addr;
  1663. + else
  1664. + return -EIO;
  1665. + }
  1666. + } else {
  1667. + if (oob)
  1668. + memset(oob, 0x99, mtd->oobsize);
  1669. +
  1670. + err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
  1671. + oob, &err_addr);
  1672. + }
  1673. +
  1674. + if (mtd_is_eccerr(err)) {
  1675. + dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
  1676. + (unsigned long long)err_addr);
  1677. + mtd->ecc_stats.failed++;
  1678. + /* NAND layer expects zero on ECC errors */
  1679. + return 0;
  1680. + }
  1681. +
  1682. + if (mtd_is_bitflip(err)) {
  1683. + unsigned int corrected = brcmnand_count_corrected(ctrl);
  1684. +
  1685. + dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
  1686. + (unsigned long long)err_addr);
  1687. + mtd->ecc_stats.corrected += corrected;
  1688. + /* Always exceed the software-imposed threshold */
  1689. + return max(mtd->bitflip_threshold, corrected);
  1690. + }
  1691. +
  1692. + return 0;
  1693. +}
  1694. +
  1695. +static int brcmnand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
  1696. + uint8_t *buf, int oob_required, int page)
  1697. +{
  1698. + struct brcmnand_host *host = chip->priv;
  1699. + u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
  1700. +
  1701. + return brcmnand_read(mtd, chip, host->last_addr,
  1702. + mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
  1703. +}
  1704. +
  1705. +static int brcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  1706. + uint8_t *buf, int oob_required, int page)
  1707. +{
  1708. + struct brcmnand_host *host = chip->priv;
  1709. + u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
  1710. + int ret;
  1711. +
  1712. + brcmnand_set_ecc_enabled(host, 0);
  1713. + ret = brcmnand_read(mtd, chip, host->last_addr,
  1714. + mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
  1715. + brcmnand_set_ecc_enabled(host, 1);
  1716. + return ret;
  1717. +}
  1718. +
  1719. +static int brcmnand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  1720. + int page)
  1721. +{
  1722. + return brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
  1723. + mtd->writesize >> FC_SHIFT,
  1724. + NULL, (u8 *)chip->oob_poi);
  1725. +}
  1726. +
  1727. +static int brcmnand_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
  1728. + int page)
  1729. +{
  1730. + struct brcmnand_host *host = chip->priv;
  1731. +
  1732. + brcmnand_set_ecc_enabled(host, 0);
  1733. + brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
  1734. + mtd->writesize >> FC_SHIFT,
  1735. + NULL, (u8 *)chip->oob_poi);
  1736. + brcmnand_set_ecc_enabled(host, 1);
  1737. + return 0;
  1738. +}
  1739. +
  1740. +static int brcmnand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
  1741. + uint32_t data_offs, uint32_t readlen,
  1742. + uint8_t *bufpoi, int page)
  1743. +{
  1744. + struct brcmnand_host *host = chip->priv;
  1745. +
  1746. + return brcmnand_read(mtd, chip, host->last_addr + data_offs,
  1747. + readlen >> FC_SHIFT, (u32 *)bufpoi, NULL);
  1748. +}
  1749. +
  1750. +static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
  1751. + u64 addr, const u32 *buf, u8 *oob)
  1752. +{
  1753. + struct brcmnand_host *host = chip->priv;
  1754. + struct brcmnand_controller *ctrl = host->ctrl;
  1755. + unsigned int i, j, trans = mtd->writesize >> FC_SHIFT;
  1756. + int status, ret = 0;
  1757. +
  1758. + dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf);
  1759. +
  1760. + if (unlikely((u32)buf & 0x03)) {
  1761. + dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf);
  1762. + buf = (u32 *)((u32)buf & ~0x03);
  1763. + }
  1764. +
  1765. + brcmnand_wp(mtd, 0);
  1766. +
  1767. + for (i = 0; i < ctrl->max_oob; i += 4)
  1768. + oob_reg_write(ctrl, i, 0xffffffff);
  1769. +
  1770. + if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
  1771. + if (brcmnand_dma_trans(host, addr, (u32 *)buf,
  1772. + mtd->writesize, CMD_PROGRAM_PAGE))
  1773. + ret = -EIO;
  1774. + goto out;
  1775. + }
  1776. +
  1777. + brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
  1778. + (host->cs << 16) | ((addr >> 32) & 0xffff));
  1779. + (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
  1780. +
  1781. + for (i = 0; i < trans; i++, addr += FC_BYTES) {
  1782. + /* full address MUST be set before populating FC */
  1783. + brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
  1784. + lower_32_bits(addr));
  1785. + (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
  1786. +
  1787. + if (buf) {
  1788. + brcmnand_soc_data_bus_prepare(ctrl->soc);
  1789. +
  1790. + for (j = 0; j < FC_WORDS; j++, buf++)
  1791. + brcmnand_write_fc(ctrl, j, *buf);
  1792. +
  1793. + brcmnand_soc_data_bus_unprepare(ctrl->soc);
  1794. + } else if (oob) {
  1795. + for (j = 0; j < FC_WORDS; j++)
  1796. + brcmnand_write_fc(ctrl, j, 0xffffffff);
  1797. + }
  1798. +
  1799. + if (oob) {
  1800. + oob += write_oob_to_regs(ctrl, i, oob,
  1801. + mtd->oobsize / trans,
  1802. + host->hwcfg.sector_size_1k);
  1803. + }
  1804. +
  1805. + /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
  1806. + brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
  1807. + status = brcmnand_waitfunc(mtd, chip);
  1808. +
  1809. + if (status & NAND_STATUS_FAIL) {
  1810. + dev_info(ctrl->dev, "program failed at %llx\n",
  1811. + (unsigned long long)addr);
  1812. + ret = -EIO;
  1813. + goto out;
  1814. + }
  1815. + }
  1816. +out:
  1817. + brcmnand_wp(mtd, 1);
  1818. + return ret;
  1819. +}
  1820. +
  1821. +static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  1822. + const uint8_t *buf, int oob_required)
  1823. +{
  1824. + struct brcmnand_host *host = chip->priv;
  1825. + void *oob = oob_required ? chip->oob_poi : NULL;
  1826. +
  1827. + brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
  1828. + return 0;
  1829. +}
  1830. +
  1831. +static int brcmnand_write_page_raw(struct mtd_info *mtd,
  1832. + struct nand_chip *chip, const uint8_t *buf,
  1833. + int oob_required)
  1834. +{
  1835. + struct brcmnand_host *host = chip->priv;
  1836. + void *oob = oob_required ? chip->oob_poi : NULL;
  1837. +
  1838. + brcmnand_set_ecc_enabled(host, 0);
  1839. + brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
  1840. + brcmnand_set_ecc_enabled(host, 1);
  1841. + return 0;
  1842. +}
  1843. +
  1844. +static int brcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  1845. + int page)
  1846. +{
  1847. + return brcmnand_write(mtd, chip, (u64)page << chip->page_shift,
  1848. + NULL, chip->oob_poi);
  1849. +}
  1850. +
  1851. +static int brcmnand_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
  1852. + int page)
  1853. +{
  1854. + struct brcmnand_host *host = chip->priv;
  1855. + int ret;
  1856. +
  1857. + brcmnand_set_ecc_enabled(host, 0);
  1858. + ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL,
  1859. + (u8 *)chip->oob_poi);
  1860. + brcmnand_set_ecc_enabled(host, 1);
  1861. +
  1862. + return ret;
  1863. +}
  1864. +
  1865. +/***********************************************************************
  1866. + * Per-CS setup (1 NAND device)
  1867. + ***********************************************************************/
  1868. +
  1869. +static int brcmnand_set_cfg(struct brcmnand_host *host,
  1870. + struct brcmnand_cfg *cfg)
  1871. +{
  1872. + struct brcmnand_controller *ctrl = host->ctrl;
  1873. + struct nand_chip *chip = &host->chip;
  1874. + u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
  1875. + u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
  1876. + BRCMNAND_CS_CFG_EXT);
  1877. + u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
  1878. + BRCMNAND_CS_ACC_CONTROL);
  1879. + u8 block_size = 0, page_size = 0, device_size = 0;
  1880. + u32 tmp;
  1881. +
  1882. + if (ctrl->block_sizes) {
  1883. + int i, found;
  1884. +
  1885. + for (i = 0, found = 0; ctrl->block_sizes[i]; i++)
  1886. + if (ctrl->block_sizes[i] * 1024 == cfg->block_size) {
  1887. + block_size = i;
  1888. + found = 1;
  1889. + }
  1890. + if (!found) {
  1891. + dev_warn(ctrl->dev, "invalid block size %u\n",
  1892. + cfg->block_size);
  1893. + return -EINVAL;
  1894. + }
  1895. + } else {
  1896. + block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE);
  1897. + }
  1898. +
  1899. + if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size &&
  1900. + cfg->block_size > ctrl->max_block_size)) {
  1901. + dev_warn(ctrl->dev, "invalid block size %u\n",
  1902. + cfg->block_size);
  1903. + block_size = 0;
  1904. + }
  1905. +
  1906. + if (ctrl->page_sizes) {
  1907. + int i, found;
  1908. +
  1909. + for (i = 0, found = 0; ctrl->page_sizes[i]; i++)
  1910. + if (ctrl->page_sizes[i] == cfg->page_size) {
  1911. + page_size = i;
  1912. + found = 1;
  1913. + }
  1914. + if (!found) {
  1915. + dev_warn(ctrl->dev, "invalid page size %u\n",
  1916. + cfg->page_size);
  1917. + return -EINVAL;
  1918. + }
  1919. + } else {
  1920. + page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE);
  1921. + }
  1922. +
  1923. + if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size &&
  1924. + cfg->page_size > ctrl->max_page_size)) {
  1925. + dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size);
  1926. + return -EINVAL;
  1927. + }
  1928. +
  1929. + if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) {
  1930. + dev_warn(ctrl->dev, "invalid device size 0x%llx\n",
  1931. + (unsigned long long)cfg->device_size);
  1932. + return -EINVAL;
  1933. + }
  1934. + device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE);
  1935. +
  1936. + tmp = (cfg->blk_adr_bytes << 8) |
  1937. + (cfg->col_adr_bytes << 12) |
  1938. + (cfg->ful_adr_bytes << 16) |
  1939. + (!!(cfg->device_width == 16) << 23) |
  1940. + (device_size << 24);
  1941. + if (cfg_offs == cfg_ext_offs) {
  1942. + tmp |= (page_size << 20) | (block_size << 28);
  1943. + nand_writereg(ctrl, cfg_offs, tmp);
  1944. + } else {
  1945. + nand_writereg(ctrl, cfg_offs, tmp);
  1946. + tmp = page_size | (block_size << 4);
  1947. + nand_writereg(ctrl, cfg_ext_offs, tmp);
  1948. + }
  1949. +
  1950. + tmp = nand_readreg(ctrl, acc_control_offs);
  1951. + tmp &= ~brcmnand_ecc_level_mask(ctrl);
  1952. + tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
  1953. + tmp &= ~brcmnand_spare_area_mask(ctrl);
  1954. + tmp |= cfg->spare_area_size;
  1955. + nand_writereg(ctrl, acc_control_offs, tmp);
  1956. +
  1957. + brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
  1958. +
  1959. + /* threshold = ceil(BCH-level * 0.75) */
  1960. + brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4));
  1961. +
  1962. + return 0;
  1963. +}
  1964. +
  1965. +static void brcmnand_print_cfg(char *buf, struct brcmnand_cfg *cfg)
  1966. +{
  1967. + buf += sprintf(buf,
  1968. + "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
  1969. + (unsigned long long)cfg->device_size >> 20,
  1970. + cfg->block_size >> 10,
  1971. + cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size,
  1972. + cfg->page_size >= 1024 ? "KiB" : "B",
  1973. + cfg->spare_area_size, cfg->device_width);
  1974. +
  1975. + /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
  1976. + if (is_hamming_ecc(cfg))
  1977. + sprintf(buf, ", Hamming ECC");
  1978. + else if (cfg->sector_size_1k)
  1979. + sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
  1980. + else
  1981. + sprintf(buf, ", BCH-%u", cfg->ecc_level);
  1982. +}
  1983. +
  1984. +/*
  1985. + * Minimum number of bytes to address a page. Calculated as:
  1986. + * roundup(log2(size / page-size) / 8)
  1987. + *
  1988. + * NB: the following does not "round up" for non-power-of-2 'size'; but this is
  1989. + * OK because many other things will break if 'size' is irregular...
  1990. + */
  1991. +static inline int get_blk_adr_bytes(u64 size, u32 writesize)
  1992. +{
  1993. + return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3;
  1994. +}
  1995. +
  1996. +static int brcmnand_setup_dev(struct brcmnand_host *host)
  1997. +{
  1998. + struct mtd_info *mtd = &host->mtd;
  1999. + struct nand_chip *chip = &host->chip;
  2000. + struct brcmnand_controller *ctrl = host->ctrl;
  2001. + struct brcmnand_cfg *cfg = &host->hwcfg;
  2002. + char msg[128];
  2003. + u32 offs, tmp, oob_sector;
  2004. + int ret;
  2005. +
  2006. + memset(cfg, 0, sizeof(*cfg));
  2007. +
  2008. + ret = of_property_read_u32(chip->dn, "brcm,nand-oob-sector-size",
  2009. + &oob_sector);
  2010. + if (ret) {
  2011. + /* Use detected size */
  2012. + cfg->spare_area_size = mtd->oobsize /
  2013. + (mtd->writesize >> FC_SHIFT);
  2014. + } else {
  2015. + cfg->spare_area_size = oob_sector;
  2016. + }
  2017. + if (cfg->spare_area_size > ctrl->max_oob)
  2018. + cfg->spare_area_size = ctrl->max_oob;
  2019. + /*
  2020. + * Set oobsize to be consistent with controller's spare_area_size, as
  2021. + * the rest is inaccessible.
  2022. + */
  2023. + mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
  2024. +
  2025. + cfg->device_size = mtd->size;
  2026. + cfg->block_size = mtd->erasesize;
  2027. + cfg->page_size = mtd->writesize;
  2028. + cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8;
  2029. + cfg->col_adr_bytes = 2;
  2030. + cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
  2031. +
  2032. + switch (chip->ecc.size) {
  2033. + case 512:
  2034. + if (chip->ecc.strength == 1) /* Hamming */
  2035. + cfg->ecc_level = 15;
  2036. + else
  2037. + cfg->ecc_level = chip->ecc.strength;
  2038. + cfg->sector_size_1k = 0;
  2039. + break;
  2040. + case 1024:
  2041. + if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) {
  2042. + dev_err(ctrl->dev, "1KB sectors not supported\n");
  2043. + return -EINVAL;
  2044. + }
  2045. + if (chip->ecc.strength & 0x1) {
  2046. + dev_err(ctrl->dev,
  2047. + "odd ECC not supported with 1KB sectors\n");
  2048. + return -EINVAL;
  2049. + }
  2050. +
  2051. + cfg->ecc_level = chip->ecc.strength >> 1;
  2052. + cfg->sector_size_1k = 1;
  2053. + break;
  2054. + default:
  2055. + dev_err(ctrl->dev, "unsupported ECC size: %d\n",
  2056. + chip->ecc.size);
  2057. + return -EINVAL;
  2058. + }
  2059. +
  2060. + cfg->ful_adr_bytes = cfg->blk_adr_bytes;
  2061. + if (mtd->writesize > 512)
  2062. + cfg->ful_adr_bytes += cfg->col_adr_bytes;
  2063. + else
  2064. + cfg->ful_adr_bytes += 1;
  2065. +
  2066. + ret = brcmnand_set_cfg(host, cfg);
  2067. + if (ret)
  2068. + return ret;
  2069. +
  2070. + brcmnand_set_ecc_enabled(host, 1);
  2071. +
  2072. + brcmnand_print_cfg(msg, cfg);
  2073. + dev_info(ctrl->dev, "detected %s\n", msg);
  2074. +
  2075. + /* Configure ACC_CONTROL */
  2076. + offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
  2077. + tmp = nand_readreg(ctrl, offs);
  2078. + tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
  2079. + tmp &= ~ACC_CONTROL_RD_ERASED;
  2080. + tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
  2081. + if (ctrl->features & BRCMNAND_HAS_PREFETCH) {
  2082. + /*
  2083. + * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
  2084. + * errors
  2085. + */
  2086. + if (has_flash_dma(ctrl))
  2087. + tmp &= ~ACC_CONTROL_PREFETCH;
  2088. + else
  2089. + tmp |= ACC_CONTROL_PREFETCH;
  2090. + }
  2091. + nand_writereg(ctrl, offs, tmp);
  2092. +
  2093. + return 0;
  2094. +}
  2095. +
  2096. +static int brcmnand_init_cs(struct brcmnand_host *host)
  2097. +{
  2098. + struct brcmnand_controller *ctrl = host->ctrl;
  2099. + struct device_node *dn = host->of_node;
  2100. + struct platform_device *pdev = host->pdev;
  2101. + struct mtd_info *mtd;
  2102. + struct nand_chip *chip;
  2103. + int ret = 0;
  2104. + struct mtd_part_parser_data ppdata = { .of_node = dn };
  2105. +
  2106. + ret = of_property_read_u32(dn, "reg", &host->cs);
  2107. + if (ret) {
  2108. + dev_err(&pdev->dev, "can't get chip-select\n");
  2109. + return -ENXIO;
  2110. + }
  2111. +
  2112. + mtd = &host->mtd;
  2113. + chip = &host->chip;
  2114. +
  2115. + chip->dn = dn;
  2116. + chip->priv = host;
  2117. + mtd->priv = chip;
  2118. + mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
  2119. + host->cs);
  2120. + mtd->owner = THIS_MODULE;
  2121. + mtd->dev.parent = &pdev->dev;
  2122. +
  2123. + chip->IO_ADDR_R = (void __iomem *)0xdeadbeef;
  2124. + chip->IO_ADDR_W = (void __iomem *)0xdeadbeef;
  2125. +
  2126. + chip->cmd_ctrl = brcmnand_cmd_ctrl;
  2127. + chip->cmdfunc = brcmnand_cmdfunc;
  2128. + chip->waitfunc = brcmnand_waitfunc;
  2129. + chip->read_byte = brcmnand_read_byte;
  2130. + chip->read_buf = brcmnand_read_buf;
  2131. + chip->write_buf = brcmnand_write_buf;
  2132. +
  2133. + chip->ecc.mode = NAND_ECC_HW;
  2134. + chip->ecc.read_page = brcmnand_read_page;
  2135. + chip->ecc.read_subpage = brcmnand_read_subpage;
  2136. + chip->ecc.write_page = brcmnand_write_page;
  2137. + chip->ecc.read_page_raw = brcmnand_read_page_raw;
  2138. + chip->ecc.write_page_raw = brcmnand_write_page_raw;
  2139. + chip->ecc.write_oob_raw = brcmnand_write_oob_raw;
  2140. + chip->ecc.read_oob_raw = brcmnand_read_oob_raw;
  2141. + chip->ecc.read_oob = brcmnand_read_oob;
  2142. + chip->ecc.write_oob = brcmnand_write_oob;
  2143. +
  2144. + chip->controller = &ctrl->controller;
  2145. +
  2146. + if (nand_scan_ident(mtd, 1, NULL))
  2147. + return -ENXIO;
  2148. +
  2149. + chip->options |= NAND_NO_SUBPAGE_WRITE;
  2150. + /*
  2151. + * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
  2152. + * to/from, and have nand_base pass us a bounce buffer instead, as
  2153. + * needed.
  2154. + */
  2155. + chip->options |= NAND_USE_BOUNCE_BUFFER;
  2156. +
  2157. + if (of_get_nand_on_flash_bbt(dn))
  2158. + chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
  2159. +
  2160. + if (brcmnand_setup_dev(host))
  2161. + return -ENXIO;
  2162. +
  2163. + chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
  2164. + /* only use our internal HW threshold */
  2165. + mtd->bitflip_threshold = 1;
  2166. +
  2167. + chip->ecc.layout = brcmstb_choose_ecc_layout(host);
  2168. + if (!chip->ecc.layout)
  2169. + return -ENXIO;
  2170. +
  2171. + if (nand_scan_tail(mtd))
  2172. + return -ENXIO;
  2173. +
  2174. + return mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
  2175. +}
  2176. +
  2177. +static void brcmnand_save_restore_cs_config(struct brcmnand_host *host,
  2178. + int restore)
  2179. +{
  2180. + struct brcmnand_controller *ctrl = host->ctrl;
  2181. + u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
  2182. + u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
  2183. + BRCMNAND_CS_CFG_EXT);
  2184. + u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
  2185. + BRCMNAND_CS_ACC_CONTROL);
  2186. + u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1);
  2187. + u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2);
  2188. +
  2189. + if (restore) {
  2190. + nand_writereg(ctrl, cfg_offs, host->hwcfg.config);
  2191. + if (cfg_offs != cfg_ext_offs)
  2192. + nand_writereg(ctrl, cfg_ext_offs,
  2193. + host->hwcfg.config_ext);
  2194. + nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control);
  2195. + nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1);
  2196. + nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2);
  2197. + } else {
  2198. + host->hwcfg.config = nand_readreg(ctrl, cfg_offs);
  2199. + if (cfg_offs != cfg_ext_offs)
  2200. + host->hwcfg.config_ext =
  2201. + nand_readreg(ctrl, cfg_ext_offs);
  2202. + host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs);
  2203. + host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs);
  2204. + host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs);
  2205. + }
  2206. +}
  2207. +
  2208. +static int brcmnand_suspend(struct device *dev)
  2209. +{
  2210. + struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
  2211. + struct brcmnand_host *host;
  2212. +
  2213. + list_for_each_entry(host, &ctrl->host_list, node)
  2214. + brcmnand_save_restore_cs_config(host, 0);
  2215. +
  2216. + ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT);
  2217. + ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR);
  2218. + ctrl->corr_stat_threshold =
  2219. + brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD);
  2220. +
  2221. + if (has_flash_dma(ctrl))
  2222. + ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
  2223. +
  2224. + return 0;
  2225. +}
  2226. +
  2227. +static int brcmnand_resume(struct device *dev)
  2228. +{
  2229. + struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
  2230. + struct brcmnand_host *host;
  2231. +
  2232. + if (has_flash_dma(ctrl)) {
  2233. + flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode);
  2234. + flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
  2235. + }
  2236. +
  2237. + brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
  2238. + brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
  2239. + brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
  2240. + ctrl->corr_stat_threshold);
  2241. + if (ctrl->soc) {
  2242. + /* Clear/re-enable interrupt */
  2243. + ctrl->soc->ctlrdy_ack(ctrl->soc);
  2244. + ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
  2245. + }
  2246. +
  2247. + list_for_each_entry(host, &ctrl->host_list, node) {
  2248. + struct mtd_info *mtd = &host->mtd;
  2249. + struct nand_chip *chip = mtd->priv;
  2250. +
  2251. + brcmnand_save_restore_cs_config(host, 1);
  2252. +
  2253. + /* Reset the chip, required by some chips after power-up */
  2254. + chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
  2255. + }
  2256. +
  2257. + return 0;
  2258. +}
  2259. +
  2260. +const struct dev_pm_ops brcmnand_pm_ops = {
  2261. + .suspend = brcmnand_suspend,
  2262. + .resume = brcmnand_resume,
  2263. +};
  2264. +EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
  2265. +
  2266. +static const struct of_device_id brcmnand_of_match[] = {
  2267. + { .compatible = "brcm,brcmnand-v4.0" },
  2268. + { .compatible = "brcm,brcmnand-v5.0" },
  2269. + { .compatible = "brcm,brcmnand-v6.0" },
  2270. + { .compatible = "brcm,brcmnand-v6.1" },
  2271. + { .compatible = "brcm,brcmnand-v7.0" },
  2272. + { .compatible = "brcm,brcmnand-v7.1" },
  2273. + {},
  2274. +};
  2275. +MODULE_DEVICE_TABLE(of, brcmnand_of_match);
  2276. +
  2277. +/***********************************************************************
  2278. + * Platform driver setup (per controller)
  2279. + ***********************************************************************/
  2280. +
  2281. +int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
  2282. +{
  2283. + struct device *dev = &pdev->dev;
  2284. + struct device_node *dn = dev->of_node, *child;
  2285. + struct brcmnand_controller *ctrl;
  2286. + struct resource *res;
  2287. + int ret;
  2288. +
  2289. + /* We only support device-tree instantiation */
  2290. + if (!dn)
  2291. + return -ENODEV;
  2292. +
  2293. + if (!of_match_node(brcmnand_of_match, dn))
  2294. + return -ENODEV;
  2295. +
  2296. + ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
  2297. + if (!ctrl)
  2298. + return -ENOMEM;
  2299. +
  2300. + dev_set_drvdata(dev, ctrl);
  2301. + ctrl->dev = dev;
  2302. +
  2303. + init_completion(&ctrl->done);
  2304. + init_completion(&ctrl->dma_done);
  2305. + spin_lock_init(&ctrl->controller.lock);
  2306. + init_waitqueue_head(&ctrl->controller.wq);
  2307. + INIT_LIST_HEAD(&ctrl->host_list);
  2308. +
  2309. + /* NAND register range */
  2310. + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2311. + ctrl->nand_base = devm_ioremap_resource(dev, res);
  2312. + if (IS_ERR(ctrl->nand_base))
  2313. + return PTR_ERR(ctrl->nand_base);
  2314. +
  2315. + /* Initialize NAND revision */
  2316. + ret = brcmnand_revision_init(ctrl);
  2317. + if (ret)
  2318. + return ret;
  2319. +
  2320. + /*
  2321. + * Most chips have this cache at a fixed offset within 'nand' block.
  2322. + * Some must specify this region separately.
  2323. + */
  2324. + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache");
  2325. + if (res) {
  2326. + ctrl->nand_fc = devm_ioremap_resource(dev, res);
  2327. + if (IS_ERR(ctrl->nand_fc))
  2328. + return PTR_ERR(ctrl->nand_fc);
  2329. + } else {
  2330. + ctrl->nand_fc = ctrl->nand_base +
  2331. + ctrl->reg_offsets[BRCMNAND_FC_BASE];
  2332. + }
  2333. +
  2334. + /* FLASH_DMA */
  2335. + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma");
  2336. + if (res) {
  2337. + ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
  2338. + if (IS_ERR(ctrl->flash_dma_base))
  2339. + return PTR_ERR(ctrl->flash_dma_base);
  2340. +
  2341. + flash_dma_writel(ctrl, FLASH_DMA_MODE, 1); /* linked-list */
  2342. + flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
  2343. +
  2344. + /* Allocate descriptor(s) */
  2345. + ctrl->dma_desc = dmam_alloc_coherent(dev,
  2346. + sizeof(*ctrl->dma_desc),
  2347. + &ctrl->dma_pa, GFP_KERNEL);
  2348. + if (!ctrl->dma_desc)
  2349. + return -ENOMEM;
  2350. +
  2351. + ctrl->dma_irq = platform_get_irq(pdev, 1);
  2352. + if ((int)ctrl->dma_irq < 0) {
  2353. + dev_err(dev, "missing FLASH_DMA IRQ\n");
  2354. + return -ENODEV;
  2355. + }
  2356. +
  2357. + ret = devm_request_irq(dev, ctrl->dma_irq,
  2358. + brcmnand_dma_irq, 0, DRV_NAME,
  2359. + ctrl);
  2360. + if (ret < 0) {
  2361. + dev_err(dev, "can't allocate IRQ %d: error %d\n",
  2362. + ctrl->dma_irq, ret);
  2363. + return ret;
  2364. + }
  2365. +
  2366. + dev_info(dev, "enabling FLASH_DMA\n");
  2367. + }
  2368. +
  2369. + /* Disable automatic device ID config, direct addressing */
  2370. + brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
  2371. + CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0);
  2372. + /* Disable XOR addressing */
  2373. + brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
  2374. +
  2375. + if (ctrl->features & BRCMNAND_HAS_WP) {
  2376. + /* Permanently disable write protection */
  2377. + if (wp_on == 2)
  2378. + brcmnand_set_wp(ctrl, false);
  2379. + } else {
  2380. + wp_on = 0;
  2381. + }
  2382. +
  2383. + /* IRQ */
  2384. + ctrl->irq = platform_get_irq(pdev, 0);
  2385. + if ((int)ctrl->irq < 0) {
  2386. + dev_err(dev, "no IRQ defined\n");
  2387. + return -ENODEV;
  2388. + }
  2389. +
  2390. + /*
  2391. + * Some SoCs integrate this controller (e.g., its interrupt bits) in
  2392. + * interesting ways
  2393. + */
  2394. + if (soc) {
  2395. + ctrl->soc = soc;
  2396. +
  2397. + ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
  2398. + DRV_NAME, ctrl);
  2399. +
  2400. + /* Enable interrupt */
  2401. + ctrl->soc->ctlrdy_ack(ctrl->soc);
  2402. + ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
  2403. + } else {
  2404. + /* Use standard interrupt infrastructure */
  2405. + ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
  2406. + DRV_NAME, ctrl);
  2407. + }
  2408. + if (ret < 0) {
  2409. + dev_err(dev, "can't allocate IRQ %d: error %d\n",
  2410. + ctrl->irq, ret);
  2411. + return ret;
  2412. + }
  2413. +
  2414. + for_each_available_child_of_node(dn, child) {
  2415. + if (of_device_is_compatible(child, "brcm,nandcs")) {
  2416. + struct brcmnand_host *host;
  2417. +
  2418. + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
  2419. + if (!host)
  2420. + return -ENOMEM;
  2421. + host->pdev = pdev;
  2422. + host->ctrl = ctrl;
  2423. + host->of_node = child;
  2424. +
  2425. + ret = brcmnand_init_cs(host);
  2426. + if (ret)
  2427. + continue; /* Try all chip-selects */
  2428. +
  2429. + list_add_tail(&host->node, &ctrl->host_list);
  2430. + }
  2431. + }
  2432. +
  2433. + /* No chip-selects could initialize properly */
  2434. + if (list_empty(&ctrl->host_list))
  2435. + return -ENODEV;
  2436. +
  2437. + return 0;
  2438. +}
  2439. +EXPORT_SYMBOL_GPL(brcmnand_probe);
  2440. +
  2441. +int brcmnand_remove(struct platform_device *pdev)
  2442. +{
  2443. + struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
  2444. + struct brcmnand_host *host;
  2445. +
  2446. + list_for_each_entry(host, &ctrl->host_list, node)
  2447. + nand_release(&host->mtd);
  2448. +
  2449. + dev_set_drvdata(&pdev->dev, NULL);
  2450. +
  2451. + return 0;
  2452. +}
  2453. +EXPORT_SYMBOL_GPL(brcmnand_remove);
  2454. +
  2455. +MODULE_LICENSE("GPL v2");
  2456. +MODULE_AUTHOR("Kevin Cernekee");
  2457. +MODULE_AUTHOR("Brian Norris");
  2458. +MODULE_DESCRIPTION("NAND driver for Broadcom chips");
  2459. +MODULE_ALIAS("platform:brcmnand");
  2460. --- /dev/null
  2461. +++ b/drivers/mtd/nand/brcmnand/brcmnand.h
  2462. @@ -0,0 +1,71 @@
  2463. +/*
  2464. + * Copyright © 2015 Broadcom Corporation
  2465. + *
  2466. + * This program is free software; you can redistribute it and/or modify
  2467. + * it under the terms of the GNU General Public License version 2 as
  2468. + * published by the Free Software Foundation.
  2469. + *
  2470. + * This program is distributed in the hope that it will be useful,
  2471. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  2472. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  2473. + * GNU General Public License for more details.
  2474. + */
  2475. +
  2476. +#ifndef __BRCMNAND_H__
  2477. +#define __BRCMNAND_H__
  2478. +
  2479. +#include <linux/types.h>
  2480. +#include <linux/io.h>
  2481. +
  2482. +struct platform_device;
  2483. +struct dev_pm_ops;
  2484. +
  2485. +struct brcmnand_soc {
  2486. + bool (*ctlrdy_ack)(struct brcmnand_soc *soc);
  2487. + void (*ctlrdy_set_enabled)(struct brcmnand_soc *soc, bool en);
  2488. + void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare);
  2489. +};
  2490. +
  2491. +static inline void brcmnand_soc_data_bus_prepare(struct brcmnand_soc *soc)
  2492. +{
  2493. + if (soc && soc->prepare_data_bus)
  2494. + soc->prepare_data_bus(soc, true);
  2495. +}
  2496. +
  2497. +static inline void brcmnand_soc_data_bus_unprepare(struct brcmnand_soc *soc)
  2498. +{
  2499. + if (soc && soc->prepare_data_bus)
  2500. + soc->prepare_data_bus(soc, false);
  2501. +}
  2502. +
  2503. +static inline u32 brcmnand_readl(void __iomem *addr)
  2504. +{
  2505. + /*
  2506. + * MIPS endianness is configured by boot strap, which also reverses all
  2507. + * bus endianness (i.e., big-endian CPU + big endian bus ==> native
  2508. + * endian I/O).
  2509. + *
  2510. + * Other architectures (e.g., ARM) either do not support big endian, or
  2511. + * else leave I/O in little endian mode.
  2512. + */
  2513. + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
  2514. + return __raw_readl(addr);
  2515. + else
  2516. + return readl_relaxed(addr);
  2517. +}
  2518. +
  2519. +static inline void brcmnand_writel(u32 val, void __iomem *addr)
  2520. +{
  2521. + /* See brcmnand_readl() comments */
  2522. + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
  2523. + __raw_writel(val, addr);
  2524. + else
  2525. + writel_relaxed(val, addr);
  2526. +}
  2527. +
  2528. +int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc);
  2529. +int brcmnand_remove(struct platform_device *pdev);
  2530. +
  2531. +extern const struct dev_pm_ops brcmnand_pm_ops;
  2532. +
  2533. +#endif /* __BRCMNAND_H__ */
  2534. --- /dev/null
  2535. +++ b/drivers/mtd/nand/brcmnand/brcmstb_nand.c
  2536. @@ -0,0 +1,44 @@
  2537. +/*
  2538. + * Copyright © 2015 Broadcom Corporation
  2539. + *
  2540. + * This program is free software; you can redistribute it and/or modify
  2541. + * it under the terms of the GNU General Public License version 2 as
  2542. + * published by the Free Software Foundation.
  2543. + *
  2544. + * This program is distributed in the hope that it will be useful,
  2545. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  2546. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  2547. + * GNU General Public License for more details.
  2548. + */
  2549. +
  2550. +#include <linux/device.h>
  2551. +#include <linux/module.h>
  2552. +#include <linux/platform_device.h>
  2553. +
  2554. +#include "brcmnand.h"
  2555. +
  2556. +static const struct of_device_id brcmstb_nand_of_match[] = {
  2557. + { .compatible = "brcm,brcmnand" },
  2558. + {},
  2559. +};
  2560. +MODULE_DEVICE_TABLE(of, brcmstb_nand_of_match);
  2561. +
  2562. +static int brcmstb_nand_probe(struct platform_device *pdev)
  2563. +{
  2564. + return brcmnand_probe(pdev, NULL);
  2565. +}
  2566. +
  2567. +static struct platform_driver brcmstb_nand_driver = {
  2568. + .probe = brcmstb_nand_probe,
  2569. + .remove = brcmnand_remove,
  2570. + .driver = {
  2571. + .name = "brcmstb_nand",
  2572. + .pm = &brcmnand_pm_ops,
  2573. + .of_match_table = brcmstb_nand_of_match,
  2574. + }
  2575. +};
  2576. +module_platform_driver(brcmstb_nand_driver);
  2577. +
  2578. +MODULE_LICENSE("GPL v2");
  2579. +MODULE_AUTHOR("Brian Norris");
  2580. +MODULE_DESCRIPTION("NAND driver for Broadcom STB chips");
  2581. --- /dev/null
  2582. +++ b/drivers/mtd/nand/brcmnand/iproc_nand.c
  2583. @@ -0,0 +1,150 @@
  2584. +/*
  2585. + * Copyright © 2015 Broadcom Corporation
  2586. + *
  2587. + * This program is free software; you can redistribute it and/or modify
  2588. + * it under the terms of the GNU General Public License version 2 as
  2589. + * published by the Free Software Foundation.
  2590. + *
  2591. + * This program is distributed in the hope that it will be useful,
  2592. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  2593. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  2594. + * GNU General Public License for more details.
  2595. + */
  2596. +
  2597. +#include <linux/device.h>
  2598. +#include <linux/io.h>
  2599. +#include <linux/ioport.h>
  2600. +#include <linux/module.h>
  2601. +#include <linux/of.h>
  2602. +#include <linux/of_address.h>
  2603. +#include <linux/platform_device.h>
  2604. +#include <linux/slab.h>
  2605. +
  2606. +#include "brcmnand.h"
  2607. +
  2608. +struct iproc_nand_soc {
  2609. + struct brcmnand_soc soc;
  2610. +
  2611. + void __iomem *idm_base;
  2612. + void __iomem *ext_base;
  2613. + spinlock_t idm_lock;
  2614. +};
  2615. +
  2616. +#define IPROC_NAND_CTLR_READY_OFFSET 0x10
  2617. +#define IPROC_NAND_CTLR_READY BIT(0)
  2618. +
  2619. +#define IPROC_NAND_IO_CTRL_OFFSET 0x00
  2620. +#define IPROC_NAND_APB_LE_MODE BIT(24)
  2621. +#define IPROC_NAND_INT_CTRL_READ_ENABLE BIT(6)
  2622. +
  2623. +static bool iproc_nand_intc_ack(struct brcmnand_soc *soc)
  2624. +{
  2625. + struct iproc_nand_soc *priv =
  2626. + container_of(soc, struct iproc_nand_soc, soc);
  2627. + void __iomem *mmio = priv->ext_base + IPROC_NAND_CTLR_READY_OFFSET;
  2628. + u32 val = brcmnand_readl(mmio);
  2629. +
  2630. + if (val & IPROC_NAND_CTLR_READY) {
  2631. + brcmnand_writel(IPROC_NAND_CTLR_READY, mmio);
  2632. + return true;
  2633. + }
  2634. +
  2635. + return false;
  2636. +}
  2637. +
  2638. +static void iproc_nand_intc_set(struct brcmnand_soc *soc, bool en)
  2639. +{
  2640. + struct iproc_nand_soc *priv =
  2641. + container_of(soc, struct iproc_nand_soc, soc);
  2642. + void __iomem *mmio = priv->idm_base + IPROC_NAND_IO_CTRL_OFFSET;
  2643. + u32 val;
  2644. + unsigned long flags;
  2645. +
  2646. + spin_lock_irqsave(&priv->idm_lock, flags);
  2647. +
  2648. + val = brcmnand_readl(mmio);
  2649. +
  2650. + if (en)
  2651. + val |= IPROC_NAND_INT_CTRL_READ_ENABLE;
  2652. + else
  2653. + val &= ~IPROC_NAND_INT_CTRL_READ_ENABLE;
  2654. +
  2655. + brcmnand_writel(val, mmio);
  2656. +
  2657. + spin_unlock_irqrestore(&priv->idm_lock, flags);
  2658. +}
  2659. +
  2660. +static void iproc_nand_apb_access(struct brcmnand_soc *soc, bool prepare)
  2661. +{
  2662. + struct iproc_nand_soc *priv =
  2663. + container_of(soc, struct iproc_nand_soc, soc);
  2664. + void __iomem *mmio = priv->idm_base + IPROC_NAND_IO_CTRL_OFFSET;
  2665. + u32 val;
  2666. + unsigned long flags;
  2667. +
  2668. + spin_lock_irqsave(&priv->idm_lock, flags);
  2669. +
  2670. + val = brcmnand_readl(mmio);
  2671. +
  2672. + if (prepare)
  2673. + val |= IPROC_NAND_APB_LE_MODE;
  2674. + else
  2675. + val &= ~IPROC_NAND_APB_LE_MODE;
  2676. +
  2677. + brcmnand_writel(val, mmio);
  2678. +
  2679. + spin_unlock_irqrestore(&priv->idm_lock, flags);
  2680. +}
  2681. +
  2682. +static int iproc_nand_probe(struct platform_device *pdev)
  2683. +{
  2684. + struct device *dev = &pdev->dev;
  2685. + struct iproc_nand_soc *priv;
  2686. + struct brcmnand_soc *soc;
  2687. + struct resource *res;
  2688. +
  2689. + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  2690. + if (!priv)
  2691. + return -ENOMEM;
  2692. + soc = &priv->soc;
  2693. +
  2694. + spin_lock_init(&priv->idm_lock);
  2695. +
  2696. + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-idm");
  2697. + priv->idm_base = devm_ioremap_resource(dev, res);
  2698. + if (IS_ERR(priv->idm_base))
  2699. + return PTR_ERR(priv->idm_base);
  2700. +
  2701. + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-ext");
  2702. + priv->ext_base = devm_ioremap_resource(dev, res);
  2703. + if (IS_ERR(priv->ext_base))
  2704. + return PTR_ERR(priv->ext_base);
  2705. +
  2706. + soc->ctlrdy_ack = iproc_nand_intc_ack;
  2707. + soc->ctlrdy_set_enabled = iproc_nand_intc_set;
  2708. + soc->prepare_data_bus = iproc_nand_apb_access;
  2709. +
  2710. + return brcmnand_probe(pdev, soc);
  2711. +}
  2712. +
  2713. +static const struct of_device_id iproc_nand_of_match[] = {
  2714. + { .compatible = "brcm,nand-iproc" },
  2715. + {},
  2716. +};
  2717. +MODULE_DEVICE_TABLE(of, iproc_nand_of_match);
  2718. +
  2719. +static struct platform_driver iproc_nand_driver = {
  2720. + .probe = iproc_nand_probe,
  2721. + .remove = brcmnand_remove,
  2722. + .driver = {
  2723. + .name = "iproc_nand",
  2724. + .pm = &brcmnand_pm_ops,
  2725. + .of_match_table = iproc_nand_of_match,
  2726. + }
  2727. +};
  2728. +module_platform_driver(iproc_nand_driver);
  2729. +
  2730. +MODULE_LICENSE("GPL v2");
  2731. +MODULE_AUTHOR("Brian Norris");
  2732. +MODULE_AUTHOR("Ray Jui");
  2733. +MODULE_DESCRIPTION("NAND driver for Broadcom IPROC-based SoCs");