420-mtd-bcm5301x_nand.patch 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608
  1. --- a/drivers/mtd/nand/Kconfig
  2. +++ b/drivers/mtd/nand/Kconfig
  3. @@ -516,4 +516,10 @@ config MTD_NAND_XWAY
  4. Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
  5. to the External Bus Unit (EBU).
  6. +config MTD_NAND_BCM
  7. + tristate "Support for NAND on some Broadcom SoC"
  8. + help
  9. + This driver is currently used for the NAND flash controller on the
  10. + Broadcom BCM5301X (NorthStar) SoCs.
  11. +
  12. endif # MTD_NAND
  13. --- a/drivers/mtd/nand/Makefile
  14. +++ b/drivers/mtd/nand/Makefile
  15. @@ -50,5 +50,6 @@ obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740
  16. obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
  17. obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
  18. obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
  19. +obj-$(CONFIG_MTD_NAND_BCM) += bcm_nand.o
  20. nand-objs := nand_base.o nand_bbt.o nand_timings.o
  21. --- /dev/null
  22. +++ b/drivers/mtd/nand/bcm_nand.c
  23. @@ -0,0 +1,1583 @@
  24. +/*
  25. + * Nortstar NAND controller driver
  26. + *
  27. + * (c) Broadcom, Inc. 2012 All Rights Reserved.
  28. + * Copyright 2014 Hauke Mehrtens <hauke@hauke-m.de>
  29. + *
  30. + * Licensed under the GNU/GPL. See COPYING for details.
  31. + *
  32. + * This module interfaces the NAND controller and hardware ECC capabilities
  33. + * tp the generic NAND chip support in the NAND library.
  34. + *
  35. + * Notes:
  36. + * This driver depends on generic NAND driver, but works at the
  37. + * page level for operations.
  38. + *
  39. + * When a page is written, the ECC calculated also protects the OOB
  40. + * bytes not taken by ECC, and so the OOB must be combined with any
  41. + * OOB data that preceded the page-write operation in order for the
  42. + * ECC to be calculated correctly.
  43. + * Also, when the page is erased, but OOB data is not, HW ECC will
  44. + * indicate an error, because it checks OOB too, which calls for some
  45. + * help from the software in this driver.
  46. + *
  47. + * TBD:
  48. + * Block locking/unlocking support, OTP support
  49. + */
  50. +
  51. +
  52. +#include <linux/kernel.h>
  53. +#include <linux/module.h>
  54. +#include <linux/io.h>
  55. +#include <linux/ioport.h>
  56. +#include <linux/interrupt.h>
  57. +#include <linux/delay.h>
  58. +#include <linux/err.h>
  59. +#include <linux/slab.h>
  60. +#include <linux/bcma/bcma.h>
  61. +#include <linux/of_irq.h>
  62. +
  63. +#include <linux/mtd/mtd.h>
  64. +#include <linux/mtd/nand.h>
  65. +#include <linux/mtd/partitions.h>
  66. +
  67. +#define NANDC_MAX_CHIPS 2 /* Only 2 CSn supported in NorthStar */
  68. +
  69. +/*
  70. + * Driver private control structure
  71. + */
  72. +struct bcmnand_ctrl {
  73. + struct mtd_info mtd;
  74. + struct nand_chip nand;
  75. + struct bcma_device *core;
  76. +
  77. + struct completion op_completion;
  78. +
  79. + struct nand_ecclayout ecclayout;
  80. + int cmd_ret; /* saved error code */
  81. + unsigned char oob_index;
  82. + unsigned char id_byte_index;
  83. + unsigned char chip_num;
  84. + unsigned char last_cmd;
  85. + unsigned char ecc_level;
  86. + unsigned char sector_size_shift;
  87. + unsigned char sec_per_page_shift;
  88. +};
  89. +
  90. +
  91. +/*
  92. + * IRQ numbers - offset from first irq in nandc_irq resource
  93. + */
  94. +#define NANDC_IRQ_RD_MISS 0
  95. +#define NANDC_IRQ_ERASE_COMPLETE 1
  96. +#define NANDC_IRQ_COPYBACK_COMPLETE 2
  97. +#define NANDC_IRQ_PROGRAM_COMPLETE 3
  98. +#define NANDC_IRQ_CONTROLLER_RDY 4
  99. +#define NANDC_IRQ_RDBSY_RDY 5
  100. +#define NANDC_IRQ_ECC_UNCORRECTABLE 6
  101. +#define NANDC_IRQ_ECC_CORRECTABLE 7
  102. +#define NANDC_IRQ_NUM 8
  103. +
  104. +struct bcmnand_reg_field {
  105. + unsigned int reg;
  106. + unsigned int pos;
  107. + unsigned int width;
  108. +};
  109. +
  110. +/*
  111. + * REGISTERS
  112. + *
  113. + * Individual bit-fields aof registers are specificed here
  114. + * for clarity, and the rest of the code will access each field
  115. + * as if it was its own register.
  116. + *
  117. + * Following registers are off <reg_base>:
  118. + */
  119. +#define REG_BIT_FIELD(r, p, w) ((struct bcmnand_reg_field){(r), (p), (w)})
  120. +
  121. +#define NANDC_8KB_PAGE_SUPPORT REG_BIT_FIELD(0x0, 31, 1)
  122. +#define NANDC_REV_MAJOR REG_BIT_FIELD(0x0, 8, 8)
  123. +#define NANDC_REV_MINOR REG_BIT_FIELD(0x0, 0, 8)
  124. +
  125. +#define NANDC_CMD_START_OPCODE REG_BIT_FIELD(0x4, 24, 5)
  126. +
  127. +#define NANDC_CMD_CS_SEL REG_BIT_FIELD(0x8, 16, 3)
  128. +#define NANDC_CMD_EXT_ADDR REG_BIT_FIELD(0x8, 0, 16)
  129. +
  130. +#define NANDC_CMD_ADDRESS REG_BIT_FIELD(0xc, 0, 32)
  131. +#define NANDC_CMD_END_ADDRESS REG_BIT_FIELD(0x10, 0, 32)
  132. +
  133. +#define NANDC_INT_STATUS REG_BIT_FIELD(0x14, 0, 32)
  134. +#define NANDC_INT_STAT_CTLR_RDY REG_BIT_FIELD(0x14, 31, 1)
  135. +#define NANDC_INT_STAT_FLASH_RDY REG_BIT_FIELD(0x14, 30, 1)
  136. +#define NANDC_INT_STAT_CACHE_VALID REG_BIT_FIELD(0x14, 29, 1)
  137. +#define NANDC_INT_STAT_SPARE_VALID REG_BIT_FIELD(0x14, 28, 1)
  138. +#define NANDC_INT_STAT_ERASED REG_BIT_FIELD(0x14, 27, 1)
  139. +#define NANDC_INT_STAT_PLANE_RDY REG_BIT_FIELD(0x14, 26, 1)
  140. +#define NANDC_INT_STAT_FLASH_STATUS REG_BIT_FIELD(0x14, 0, 8)
  141. +
  142. +#define NANDC_CS_LOCK REG_BIT_FIELD(0x18, 31, 1)
  143. +#define NANDC_CS_AUTO_CONFIG REG_BIT_FIELD(0x18, 30, 1)
  144. +#define NANDC_CS_NAND_WP REG_BIT_FIELD(0x18, 29, 1)
  145. +#define NANDC_CS_BLK0_WP REG_BIT_FIELD(0x18, 28, 1)
  146. +#define NANDC_CS_SW_USING_CS(n) REG_BIT_FIELD(0x18, 8+(n), 1)
  147. +#define NANDC_CS_MAP_SEL_CS(n) REG_BIT_FIELD(0x18, 0+(n), 1)
  148. +
  149. +#define NANDC_XOR_ADDR_BLK0_ONLY REG_BIT_FIELD(0x1c, 31, 1)
  150. +#define NANDC_XOR_ADDR_CS(n) REG_BIT_FIELD(0x1c, 0+(n), 1)
  151. +
  152. +#define NANDC_LL_OP_RET_IDLE REG_BIT_FIELD(0x20, 31, 1)
  153. +#define NANDC_LL_OP_CLE REG_BIT_FIELD(0x20, 19, 1)
  154. +#define NANDC_LL_OP_ALE REG_BIT_FIELD(0x20, 18, 1)
  155. +#define NANDC_LL_OP_WE REG_BIT_FIELD(0x20, 17, 1)
  156. +#define NANDC_LL_OP_RE REG_BIT_FIELD(0x20, 16, 1)
  157. +#define NANDC_LL_OP_DATA REG_BIT_FIELD(0x20, 0, 16)
  158. +
  159. +#define NANDC_MPLANE_ADDR_EXT REG_BIT_FIELD(0x24, 0, 16)
  160. +#define NANDC_MPLANE_ADDR REG_BIT_FIELD(0x28, 0, 32)
  161. +
  162. +#define NANDC_ACC_CTRL_CS(n) REG_BIT_FIELD(0x50+((n)<<4), 0, 32)
  163. +#define NANDC_ACC_CTRL_RD_ECC(n) REG_BIT_FIELD(0x50+((n)<<4), 31, 1)
  164. +#define NANDC_ACC_CTRL_WR_ECC(n) REG_BIT_FIELD(0x50+((n)<<4), 30, 1)
  165. +#define NANDC_ACC_CTRL_CE_CARE(n) REG_BIT_FIELD(0x50+((n)<<4), 29, 1)
  166. +#define NANDC_ACC_CTRL_PGM_RDIN(n) REG_BIT_FIELD(0x50+((n)<<4), 28, 1)
  167. +#define NANDC_ACC_CTRL_ERA_ECC_ERR(n) REG_BIT_FIELD(0x50+((n)<<4), 27, 1)
  168. +#define NANDC_ACC_CTRL_PGM_PARTIAL(n) REG_BIT_FIELD(0x50+((n)<<4), 26, 1)
  169. +#define NANDC_ACC_CTRL_WR_PREEMPT(n) REG_BIT_FIELD(0x50+((n)<<4), 25, 1)
  170. +#define NANDC_ACC_CTRL_PG_HIT(n) REG_BIT_FIELD(0x50+((n)<<4), 24, 1)
  171. +#define NANDC_ACC_CTRL_PREFETCH(n) REG_BIT_FIELD(0x50+((n)<<4), 23, 1)
  172. +#define NANDC_ACC_CTRL_CACHE_MODE(n) REG_BIT_FIELD(0x50+((n)<<4), 22, 1)
  173. +#define NANDC_ACC_CTRL_CACHE_LASTPG(n) REG_BIT_FIELD(0x50+((n)<<4), 21, 1)
  174. +#define NANDC_ACC_CTRL_ECC_LEVEL(n) REG_BIT_FIELD(0x50+((n)<<4), 16, 5)
  175. +#define NANDC_ACC_CTRL_SECTOR_1K(n) REG_BIT_FIELD(0x50+((n)<<4), 7, 1)
  176. +#define NANDC_ACC_CTRL_SPARE_SIZE(n) REG_BIT_FIELD(0x50+((n)<<4), 0, 7)
  177. +
  178. +#define NANDC_CONFIG_CS(n) REG_BIT_FIELD(0x54+((n)<<4), 0, 32)
  179. +#define NANDC_CONFIG_LOCK(n) REG_BIT_FIELD(0x54+((n)<<4), 31, 1)
  180. +#define NANDC_CONFIG_BLK_SIZE(n) REG_BIT_FIELD(0x54+((n)<<4), 28, 3)
  181. +#define NANDC_CONFIG_CHIP_SIZE(n) REG_BIT_FIELD(0x54+((n)<<4), 24, 4)
  182. +#define NANDC_CONFIG_CHIP_WIDTH(n) REG_BIT_FIELD(0x54+((n)<<4), 23, 1)
  183. +#define NANDC_CONFIG_PAGE_SIZE(n) REG_BIT_FIELD(0x54+((n)<<4), 20, 2)
  184. +#define NANDC_CONFIG_FUL_ADDR_BYTES(n) REG_BIT_FIELD(0x54+((n)<<4), 16, 3)
  185. +#define NANDC_CONFIG_COL_ADDR_BYTES(n) REG_BIT_FIELD(0x54+((n)<<4), 12, 3)
  186. +#define NANDC_CONFIG_BLK_ADDR_BYTES(n) REG_BIT_FIELD(0x54+((n)<<4), 8, 3)
  187. +
  188. +#define NANDC_TIMING_1_CS(n) REG_BIT_FIELD(0x58+((n)<<4), 0, 32)
  189. +#define NANDC_TIMING_2_CS(n) REG_BIT_FIELD(0x5c+((n)<<4), 0, 32)
  190. + /* Individual bits for Timing registers - TBD */
  191. +
  192. +#define NANDC_CORR_STAT_THRESH_CS(n) REG_BIT_FIELD(0xc0, 6*(n), 6)
  193. +
  194. +#define NANDC_BLK_WP_END_ADDR REG_BIT_FIELD(0xc8, 0, 32)
  195. +
  196. +#define NANDC_MPLANE_ERASE_CYC2_OPCODE REG_BIT_FIELD(0xcc, 24, 8)
  197. +#define NANDC_MPLANE_READ_STAT_OPCODE REG_BIT_FIELD(0xcc, 16, 8)
  198. +#define NANDC_MPLANE_PROG_ODD_OPCODE REG_BIT_FIELD(0xcc, 8, 8)
  199. +#define NANDC_MPLANE_PROG_TRL_OPCODE REG_BIT_FIELD(0xcc, 0, 8)
  200. +
  201. +#define NANDC_MPLANE_PGCACHE_TRL_OPCODE REG_BIT_FIELD(0xd0, 24, 8)
  202. +#define NANDC_MPLANE_READ_STAT2_OPCODE REG_BIT_FIELD(0xd0, 16, 8)
  203. +#define NANDC_MPLANE_READ_EVEN_OPCODE REG_BIT_FIELD(0xd0, 8, 8)
  204. +#define NANDC_MPLANE_READ_ODD__OPCODE REG_BIT_FIELD(0xd0, 0, 8)
  205. +
  206. +#define NANDC_MPLANE_CTRL_ERASE_CYC2_EN REG_BIT_FIELD(0xd4, 31, 1)
  207. +#define NANDC_MPLANE_CTRL_RD_ADDR_SIZE REG_BIT_FIELD(0xd4, 30, 1)
  208. +#define NANDC_MPLANE_CTRL_RD_CYC_ADDR REG_BIT_FIELD(0xd4, 29, 1)
  209. +#define NANDC_MPLANE_CTRL_RD_COL_ADDR REG_BIT_FIELD(0xd4, 28, 1)
  210. +
  211. +#define NANDC_UNCORR_ERR_COUNT REG_BIT_FIELD(0xfc, 0, 32)
  212. +
  213. +#define NANDC_CORR_ERR_COUNT REG_BIT_FIELD(0x100, 0, 32)
  214. +
  215. +#define NANDC_READ_CORR_BIT_COUNT REG_BIT_FIELD(0x104, 0, 32)
  216. +
  217. +#define NANDC_BLOCK_LOCK_STATUS REG_BIT_FIELD(0x108, 0, 8)
  218. +
  219. +#define NANDC_ECC_CORR_ADDR_CS REG_BIT_FIELD(0x10c, 16, 3)
  220. +#define NANDC_ECC_CORR_ADDR_EXT REG_BIT_FIELD(0x10c, 0, 16)
  221. +
  222. +#define NANDC_ECC_CORR_ADDR REG_BIT_FIELD(0x110, 0, 32)
  223. +
  224. +#define NANDC_ECC_UNC_ADDR_CS REG_BIT_FIELD(0x114, 16, 3)
  225. +#define NANDC_ECC_UNC_ADDR_EXT REG_BIT_FIELD(0x114, 0, 16)
  226. +
  227. +#define NANDC_ECC_UNC_ADDR REG_BIT_FIELD(0x118, 0, 32)
  228. +
  229. +#define NANDC_READ_ADDR_CS REG_BIT_FIELD(0x11c, 16, 3)
  230. +#define NANDC_READ_ADDR_EXT REG_BIT_FIELD(0x11c, 0, 16)
  231. +#define NANDC_READ_ADDR REG_BIT_FIELD(0x120, 0, 32)
  232. +
  233. +#define NANDC_PROG_ADDR_CS REG_BIT_FIELD(0x124, 16, 3)
  234. +#define NANDC_PROG_ADDR_EXT REG_BIT_FIELD(0x124, 0, 16)
  235. +#define NANDC_PROG_ADDR REG_BIT_FIELD(0x128, 0, 32)
  236. +
  237. +#define NANDC_CPYBK_ADDR_CS REG_BIT_FIELD(0x12c, 16, 3)
  238. +#define NANDC_CPYBK_ADDR_EXT REG_BIT_FIELD(0x12c, 0, 16)
  239. +#define NANDC_CPYBK_ADDR REG_BIT_FIELD(0x130, 0, 32)
  240. +
  241. +#define NANDC_ERASE_ADDR_CS REG_BIT_FIELD(0x134, 16, 3)
  242. +#define NANDC_ERASE_ADDR_EXT REG_BIT_FIELD(0x134, 0, 16)
  243. +#define NANDC_ERASE_ADDR REG_BIT_FIELD(0x138, 0, 32)
  244. +
  245. +#define NANDC_INV_READ_ADDR_CS REG_BIT_FIELD(0x13c, 16, 3)
  246. +#define NANDC_INV_READ_ADDR_EXT REG_BIT_FIELD(0x13c, 0, 16)
  247. +#define NANDC_INV_READ_ADDR REG_BIT_FIELD(0x140, 0, 32)
  248. +
  249. +#define NANDC_INIT_STAT REG_BIT_FIELD(0x144, 0, 32)
  250. +#define NANDC_INIT_ONFI_DONE REG_BIT_FIELD(0x144, 31, 1)
  251. +#define NANDC_INIT_DEVID_DONE REG_BIT_FIELD(0x144, 30, 1)
  252. +#define NANDC_INIT_SUCCESS REG_BIT_FIELD(0x144, 29, 1)
  253. +#define NANDC_INIT_FAIL REG_BIT_FIELD(0x144, 28, 1)
  254. +#define NANDC_INIT_BLANK REG_BIT_FIELD(0x144, 27, 1)
  255. +#define NANDC_INIT_TIMEOUT REG_BIT_FIELD(0x144, 26, 1)
  256. +#define NANDC_INIT_UNC_ERROR REG_BIT_FIELD(0x144, 25, 1)
  257. +#define NANDC_INIT_CORR_ERROR REG_BIT_FIELD(0x144, 24, 1)
  258. +#define NANDC_INIT_PARAM_RDY REG_BIT_FIELD(0x144, 23, 1)
  259. +#define NANDC_INIT_AUTH_FAIL REG_BIT_FIELD(0x144, 22, 1)
  260. +
  261. +#define NANDC_ONFI_STAT REG_BIT_FIELD(0x148, 0, 32)
  262. +#define NANDC_ONFI_DEBUG REG_BIT_FIELD(0x148, 28, 4)
  263. +#define NANDC_ONFI_PRESENT REG_BIT_FIELD(0x148, 27, 1)
  264. +#define NANDC_ONFI_BADID_PG2 REG_BIT_FIELD(0x148, 5, 1)
  265. +#define NANDC_ONFI_BADID_PG1 REG_BIT_FIELD(0x148, 4, 1)
  266. +#define NANDC_ONFI_BADID_PG0 REG_BIT_FIELD(0x148, 3, 1)
  267. +#define NANDC_ONFI_BADCRC_PG2 REG_BIT_FIELD(0x148, 2, 1)
  268. +#define NANDC_ONFI_BADCRC_PG1 REG_BIT_FIELD(0x148, 1, 1)
  269. +#define NANDC_ONFI_BADCRC_PG0 REG_BIT_FIELD(0x148, 0, 1)
  270. +
  271. +#define NANDC_ONFI_DEBUG_DATA REG_BIT_FIELD(0x14c, 0, 32)
  272. +
  273. +#define NANDC_SEMAPHORE REG_BIT_FIELD(0x150, 0, 8)
  274. +
  275. +#define NANDC_DEVID_BYTE(b) REG_BIT_FIELD(0x194+((b)&0x4), \
  276. + 24-(((b)&3)<<3), 8)
  277. +
  278. +#define NANDC_LL_RDDATA REG_BIT_FIELD(0x19c, 0, 16)
  279. +
  280. +#define NANDC_INT_N_REG(n) REG_BIT_FIELD(0xf00|((n)<<2), 0, 1)
  281. +#define NANDC_INT_DIREC_READ_MISS REG_BIT_FIELD(0xf00, 0, 1)
  282. +#define NANDC_INT_ERASE_DONE REG_BIT_FIELD(0xf04, 0, 1)
  283. +#define NANDC_INT_CPYBK_DONE REG_BIT_FIELD(0xf08, 0, 1)
  284. +#define NANDC_INT_PROGRAM_DONE REG_BIT_FIELD(0xf0c, 0, 1)
  285. +#define NANDC_INT_CONTROLLER_RDY REG_BIT_FIELD(0xf10, 0, 1)
  286. +#define NANDC_INT_RDBSY_RDY REG_BIT_FIELD(0xf14, 0, 1)
  287. +#define NANDC_INT_ECC_UNCORRECTABLE REG_BIT_FIELD(0xf18, 0, 1)
  288. +#define NANDC_INT_ECC_CORRECTABLE REG_BIT_FIELD(0xf1c, 0, 1)
  289. +
  290. +/*
  291. + * Following registers are treated as contigous IO memory, offset is from
  292. + * <reg_base>, and the data is in big-endian byte order
  293. + */
  294. +#define NANDC_SPARE_AREA_READ_OFF 0x200
  295. +#define NANDC_SPARE_AREA_WRITE_OFF 0x280
  296. +#define NANDC_CACHE_OFF 0x400
  297. +#define NANDC_CACHE_SIZE (128*4)
  298. +
  299. +struct bcmnand_areg_field {
  300. + unsigned int reg;
  301. + unsigned int pos;
  302. + unsigned int width;
  303. +};
  304. +
  305. +/*
  306. + * Following are IDM (a.k.a. Slave Wrapper) registers are off <idm_base>:
  307. + */
  308. +#define IDMREG_BIT_FIELD(r, p, w) ((struct bcmnand_areg_field){(r), (p), (w)})
  309. +
  310. +#define NANDC_IDM_AXI_BIG_ENDIAN IDMREG_BIT_FIELD(0x408, 28, 1)
  311. +#define NANDC_IDM_APB_LITTLE_ENDIAN IDMREG_BIT_FIELD(0x408, 24, 1)
  312. +#define NANDC_IDM_TM IDMREG_BIT_FIELD(0x408, 16, 5)
  313. +#define NANDC_IDM_IRQ_CORRECABLE_EN IDMREG_BIT_FIELD(0x408, 9, 1)
  314. +#define NANDC_IDM_IRQ_UNCORRECABLE_EN IDMREG_BIT_FIELD(0x408, 8, 1)
  315. +#define NANDC_IDM_IRQ_RDYBSY_RDY_EN IDMREG_BIT_FIELD(0x408, 7, 1)
  316. +#define NANDC_IDM_IRQ_CONTROLLER_RDY_EN IDMREG_BIT_FIELD(0x408, 6, 1)
  317. +#define NANDC_IDM_IRQ_PRPOGRAM_COMP_EN IDMREG_BIT_FIELD(0x408, 5, 1)
  318. +#define NANDC_IDM_IRQ_COPYBK_COMP_EN IDMREG_BIT_FIELD(0x408, 4, 1)
  319. +#define NANDC_IDM_IRQ_ERASE_COMP_EN IDMREG_BIT_FIELD(0x408, 3, 1)
  320. +#define NANDC_IDM_IRQ_READ_MISS_EN IDMREG_BIT_FIELD(0x408, 2, 1)
  321. +#define NANDC_IDM_IRQ_N_EN(n) IDMREG_BIT_FIELD(0x408, 2+(n), 1)
  322. +
  323. +#define NANDC_IDM_CLOCK_EN IDMREG_BIT_FIELD(0x408, 0, 1)
  324. +
  325. +#define NANDC_IDM_IO_ECC_CORR IDMREG_BIT_FIELD(0x500, 3, 1)
  326. +#define NANDC_IDM_IO_ECC_UNCORR IDMREG_BIT_FIELD(0x500, 2, 1)
  327. +#define NANDC_IDM_IO_RDYBSY IDMREG_BIT_FIELD(0x500, 1, 1)
  328. +#define NANDC_IDM_IO_CTRL_RDY IDMREG_BIT_FIELD(0x500, 0, 1)
  329. +
  330. +#define NANDC_IDM_RESET IDMREG_BIT_FIELD(0x800, 0, 1)
  331. + /* Remaining IDM registers do not seem to be useful, skipped */
  332. +
  333. +/*
  334. + * NAND Controller has its own command opcodes
  335. + * different from opcodes sent to the actual flash chip
  336. + */
  337. +#define NANDC_CMD_OPCODE_NULL 0
  338. +#define NANDC_CMD_OPCODE_PAGE_READ 1
  339. +#define NANDC_CMD_OPCODE_SPARE_READ 2
  340. +#define NANDC_CMD_OPCODE_STATUS_READ 3
  341. +#define NANDC_CMD_OPCODE_PAGE_PROG 4
  342. +#define NANDC_CMD_OPCODE_SPARE_PROG 5
  343. +#define NANDC_CMD_OPCODE_DEVID_READ 7
  344. +#define NANDC_CMD_OPCODE_BLOCK_ERASE 8
  345. +#define NANDC_CMD_OPCODE_FLASH_RESET 9
  346. +
  347. +/*
  348. + * NAND Controller hardware ECC data size
  349. + *
  350. + * The following table contains the number of bytes needed for
  351. + * each of the ECC levels, per "sector", which is either 512 or 1024 bytes.
  352. + * The actual layout is as follows:
  353. + * The entire spare area is equally divided into as many sections as there
  354. + * are sectors per page, and the ECC data is located at the end of each
  355. + * of these sections.
  356. + * For example, given a 2K per page and 64 bytes spare device, configured for
  357. + * sector size 1k and ECC level of 4, the spare area will be divided into 2
  358. + * sections 32 bytes each, and the last 14 bytes of 32 in each section will
  359. + * be filled with ECC data.
  360. + * Note: the name of the algorythm and the number of error bits it can correct
  361. + * is of no consequence to this driver, therefore omitted.
  362. + */
  363. +struct bcmnand_ecc_size_s {
  364. + unsigned char sector_size_shift;
  365. + unsigned char ecc_level;
  366. + unsigned char ecc_bytes_per_sec;
  367. + unsigned char reserved;
  368. +};
  369. +
  370. +static const struct bcmnand_ecc_size_s bcmnand_ecc_sizes[] = {
  371. + { 9, 0, 0 },
  372. + { 10, 0, 0 },
  373. + { 9, 1, 2 },
  374. + { 10, 1, 4 },
  375. + { 9, 2, 4 },
  376. + { 10, 2, 7 },
  377. + { 9, 3, 6 },
  378. + { 10, 3, 11 },
  379. + { 9, 4, 7 },
  380. + { 10, 4, 14 },
  381. + { 9, 5, 9 },
  382. + { 10, 5, 18 },
  383. + { 9, 6, 11 },
  384. + { 10, 6, 21 },
  385. + { 9, 7, 13 },
  386. + { 10, 7, 25 },
  387. + { 9, 8, 14 },
  388. + { 10, 8, 28 },
  389. +
  390. + { 9, 9, 16 },
  391. + { 9, 10, 18 },
  392. + { 9, 11, 20 },
  393. + { 9, 12, 21 },
  394. +
  395. + { 10, 9, 32 },
  396. + { 10, 10, 35 },
  397. + { 10, 11, 39 },
  398. + { 10, 12, 42 },
  399. +};
  400. +
  401. +/*
  402. + * Populate the various fields that depend on how
  403. + * the hardware ECC data is located in the spare area
  404. + *
  405. + * For this controiller, it is easier to fill-in these
  406. + * structures at run time.
  407. + *
  408. + * The bad-block marker is assumed to occupy one byte
  409. + * at chip->badblockpos, which must be in the first
  410. + * sector of the spare area, namely it is either
  411. + * at offset 0 or 5.
  412. + * Some chips use both for manufacturer's bad block
  413. + * markers, but we ingore that issue here, and assume only
  414. + * one byte is used as bad-block marker always.
  415. + */
  416. +static int bcmnand_hw_ecc_layout(struct bcmnand_ctrl *ctrl)
  417. +{
  418. + struct nand_ecclayout *layout;
  419. + struct device *dev = &ctrl->core->dev;
  420. + unsigned int i, j, k;
  421. + unsigned int ecc_per_sec, oob_per_sec;
  422. + unsigned int bbm_pos = ctrl->nand.badblockpos;
  423. +
  424. + /* Caclculate spare area per sector size */
  425. + oob_per_sec = ctrl->mtd.oobsize >> ctrl->sec_per_page_shift;
  426. +
  427. + /* Try to calculate the amount of ECC bytes per sector with a formula */
  428. + if (ctrl->sector_size_shift == 9)
  429. + ecc_per_sec = ((ctrl->ecc_level * 14) + 7) >> 3;
  430. + else if (ctrl->sector_size_shift == 10)
  431. + ecc_per_sec = ((ctrl->ecc_level * 14) + 3) >> 2;
  432. + else
  433. + ecc_per_sec = oob_per_sec + 1; /* cause an error if not in table */
  434. +
  435. + /* Now find out the answer according to the table */
  436. + for (i = 0; i < ARRAY_SIZE(bcmnand_ecc_sizes); i++) {
  437. + if (bcmnand_ecc_sizes[i].ecc_level == ctrl->ecc_level &&
  438. + bcmnand_ecc_sizes[i].sector_size_shift ==
  439. + ctrl->sector_size_shift) {
  440. + break;
  441. + }
  442. + }
  443. +
  444. + /* Table match overrides formula */
  445. + if (bcmnand_ecc_sizes[i].ecc_level == ctrl->ecc_level &&
  446. + bcmnand_ecc_sizes[i].sector_size_shift == ctrl->sector_size_shift)
  447. + ecc_per_sec = bcmnand_ecc_sizes[i].ecc_bytes_per_sec;
  448. +
  449. + /* Return an error if calculated ECC leaves no room for OOB */
  450. + if ((ctrl->sec_per_page_shift != 0 && ecc_per_sec >= oob_per_sec) ||
  451. + (ctrl->sec_per_page_shift == 0 && ecc_per_sec >= (oob_per_sec - 1))) {
  452. + dev_err(dev, "ECC level %d too high, leaves no room for OOB data\n",
  453. + ctrl->ecc_level);
  454. + return -EINVAL;
  455. + }
  456. +
  457. + /* Fill in the needed fields */
  458. + ctrl->nand.ecc.size = ctrl->mtd.writesize >> ctrl->sec_per_page_shift;
  459. + ctrl->nand.ecc.bytes = ecc_per_sec;
  460. + ctrl->nand.ecc.steps = 1 << ctrl->sec_per_page_shift;
  461. + ctrl->nand.ecc.total = ecc_per_sec << ctrl->sec_per_page_shift;
  462. + ctrl->nand.ecc.strength = ctrl->ecc_level;
  463. +
  464. + /* Build an ecc layout data structure */
  465. + layout = &ctrl->ecclayout;
  466. + memset(layout, 0, sizeof(*layout));
  467. +
  468. + /* Total number of bytes used by HW ECC */
  469. + layout->eccbytes = ecc_per_sec << ctrl->sec_per_page_shift;
  470. +
  471. + /* Location for each of the HW ECC bytes */
  472. + for (i = j = 0, k = 1;
  473. + i < ARRAY_SIZE(layout->eccpos) && i < layout->eccbytes;
  474. + i++, j++) {
  475. + /* switch sector # */
  476. + if (j == ecc_per_sec) {
  477. + j = 0;
  478. + k++;
  479. + }
  480. + /* save position of each HW-generated ECC byte */
  481. + layout->eccpos[i] = (oob_per_sec * k) - ecc_per_sec + j;
  482. +
  483. + /* Check that HW ECC does not overlap bad-block marker */
  484. + if (bbm_pos == layout->eccpos[i]) {
  485. + dev_err(dev, "ECC level %d too high, HW ECC collides with bad-block marker position\n",
  486. + ctrl->ecc_level);
  487. + return -EINVAL;
  488. + }
  489. + }
  490. +
  491. + /* Location of all user-available OOB byte-ranges */
  492. + for (i = 0; i < ARRAY_SIZE(layout->oobfree); i++) {
  493. + struct nand_oobfree *oobfree = &layout->oobfree[i];
  494. +
  495. + if (i >= (1 << ctrl->sec_per_page_shift))
  496. + break;
  497. + oobfree->offset = oob_per_sec * i;
  498. + oobfree->length = oob_per_sec - ecc_per_sec;
  499. +
  500. + /* Bad-block marker must be in the first sector spare area */
  501. + if (WARN_ON(bbm_pos >= (oobfree->offset + oobfree->length)))
  502. + return -EINVAL;
  503. +
  504. + if (i != 0)
  505. + continue;
  506. +
  507. + /* Remove bad-block marker from available byte range */
  508. + if (bbm_pos == oobfree->offset) {
  509. + oobfree->offset += 1;
  510. + oobfree->length -= 1;
  511. + } else if (bbm_pos == (oobfree->offset + oobfree->length - 1)) {
  512. + oobfree->length -= 1;
  513. + } else {
  514. + layout->oobfree[i + 1].offset = bbm_pos + 1;
  515. + layout->oobfree[i + 1].length =
  516. + oobfree->length - bbm_pos - 1;
  517. + oobfree->length = bbm_pos;
  518. + i++;
  519. + }
  520. + }
  521. +
  522. + layout->oobavail = ((oob_per_sec - ecc_per_sec)
  523. + << ctrl->sec_per_page_shift) - 1;
  524. +
  525. + ctrl->mtd.oobavail = layout->oobavail;
  526. + ctrl->nand.ecc.layout = layout;
  527. +
  528. + /* Output layout for debugging */
  529. + dev_dbg(dev, "Spare area=%d eccbytes %d, ecc bytes located at:\n",
  530. + ctrl->mtd.oobsize, layout->eccbytes);
  531. + for (i = j = 0;
  532. + i < ARRAY_SIZE(layout->eccpos) && i < layout->eccbytes; i++)
  533. + pr_debug(" %d", layout->eccpos[i]);
  534. + pr_debug("\n");
  535. +
  536. + dev_dbg(dev, "Available %d bytes at (off,len):\n", layout->oobavail);
  537. + for (i = 0; i < ARRAY_SIZE(layout->oobfree); i++)
  538. + pr_debug("(%d,%d) ", layout->oobfree[i].offset,
  539. + layout->oobfree[i].length);
  540. + pr_debug("\n");
  541. +
  542. + return 0;
  543. +}
  544. +
  545. +/*
  546. + * Register bit-field manipulation routines
  547. + */
  548. +
  549. +static inline unsigned int bcmnand_reg_read(struct bcmnand_ctrl *ctrl,
  550. + struct bcmnand_reg_field rbf)
  551. +{
  552. + u32 val;
  553. +
  554. + val = bcma_read32(ctrl->core, rbf.reg);
  555. + val >>= rbf.pos;
  556. + val &= (1 << rbf.width) - 1;
  557. +
  558. + return val;
  559. +}
  560. +
  561. +static inline void bcmnand_reg_write(struct bcmnand_ctrl *ctrl,
  562. + struct bcmnand_reg_field rbf,
  563. + unsigned newval)
  564. +{
  565. + u32 val, msk;
  566. +
  567. + msk = (1 << rbf.width) - 1;
  568. + msk <<= rbf.pos;
  569. + newval <<= rbf.pos;
  570. + newval &= msk;
  571. +
  572. + val = bcma_read32(ctrl->core, rbf.reg);
  573. + val &= ~msk;
  574. + val |= newval;
  575. + bcma_write32(ctrl->core, rbf.reg, val);
  576. +}
  577. +
  578. +static inline unsigned int bcmnand_reg_aread(struct bcmnand_ctrl *ctrl,
  579. + struct bcmnand_areg_field rbf)
  580. +{
  581. + u32 val;
  582. +
  583. + val = bcma_aread32(ctrl->core, rbf.reg);
  584. + val >>= rbf.pos;
  585. + val &= (1 << rbf.width) - 1;
  586. +
  587. + return val;
  588. +}
  589. +
  590. +static inline void bcmnand_reg_awrite(struct bcmnand_ctrl *ctrl,
  591. + struct bcmnand_areg_field rbf,
  592. + unsigned int newval)
  593. +{
  594. + u32 val, msk;
  595. +
  596. + msk = (1 << rbf.width) - 1;
  597. + msk <<= rbf.pos;
  598. + newval <<= rbf.pos;
  599. + newval &= msk;
  600. +
  601. + val = bcma_aread32(ctrl->core, rbf.reg);
  602. + val &= ~msk;
  603. + val |= newval;
  604. + bcma_awrite32(ctrl->core, rbf.reg, val);
  605. +}
  606. +
  607. +/*
  608. + * NAND Interface - dev_ready
  609. + *
  610. + * Return 1 iff device is ready, 0 otherwise
  611. + */
  612. +static int bcmnand_dev_ready(struct mtd_info *mtd)
  613. +{
  614. + struct nand_chip *chip = mtd->priv;
  615. + struct bcmnand_ctrl *ctrl = chip->priv;
  616. +
  617. + return bcmnand_reg_aread(ctrl, NANDC_IDM_IO_CTRL_RDY);
  618. +}
  619. +
  620. +/*
  621. + * Interrupt service routines
  622. + */
  623. +static irqreturn_t bcmnand_isr(int irq, void *dev_id)
  624. +{
  625. + struct bcmnand_ctrl *ctrl = dev_id;
  626. + int irq_off;
  627. +
  628. + irq_off = irq - ctrl->core->irq;
  629. + WARN_ON(irq_off < 0 || irq_off >= NANDC_IRQ_NUM);
  630. +
  631. + if (!bcmnand_reg_read(ctrl, NANDC_INT_N_REG(irq_off)))
  632. + return IRQ_NONE;
  633. +
  634. + /* Acknowledge interrupt */
  635. + bcmnand_reg_write(ctrl, NANDC_INT_N_REG(irq_off), 1);
  636. +
  637. + /* Wake up task */
  638. + complete(&ctrl->op_completion);
  639. +
  640. + return IRQ_HANDLED;
  641. +}
  642. +
  643. +static int bcmnand_wait_interrupt(struct bcmnand_ctrl *ctrl,
  644. + unsigned int irq_off,
  645. + unsigned int timeout_usec)
  646. +{
  647. + long timeout_jiffies;
  648. + int ret = 0;
  649. +
  650. + reinit_completion(&ctrl->op_completion);
  651. +
  652. + /* Acknowledge interrupt */
  653. + bcmnand_reg_write(ctrl, NANDC_INT_N_REG(irq_off), 1);
  654. +
  655. + /* Enable IRQ to wait on */
  656. + bcmnand_reg_awrite(ctrl, NANDC_IDM_IRQ_N_EN(irq_off), 1);
  657. +
  658. + timeout_jiffies = 1 + usecs_to_jiffies(timeout_usec);
  659. +
  660. + if (irq_off != NANDC_IRQ_CONTROLLER_RDY ||
  661. + 0 == bcmnand_reg_aread(ctrl, NANDC_IDM_IO_CTRL_RDY)) {
  662. +
  663. + timeout_jiffies = wait_for_completion_timeout(
  664. + &ctrl->op_completion, timeout_jiffies);
  665. +
  666. + if (timeout_jiffies < 0)
  667. + ret = timeout_jiffies;
  668. + if (timeout_jiffies == 0)
  669. + ret = -ETIME;
  670. + }
  671. +
  672. + /* Disable IRQ, we're done waiting */
  673. + bcmnand_reg_awrite(ctrl, NANDC_IDM_IRQ_N_EN(irq_off), 0);
  674. +
  675. + if (bcmnand_reg_aread(ctrl, NANDC_IDM_IO_CTRL_RDY))
  676. + ret = 0;
  677. +
  678. + return ret;
  679. +}
  680. +
  681. +/*
  682. + * wait for command completion
  683. + */
  684. +static int bcmnand_wait_cmd(struct bcmnand_ctrl *ctrl, unsigned int timeout_usec)
  685. +{
  686. + unsigned int retries;
  687. +
  688. + if (bcmnand_reg_read(ctrl, NANDC_INT_STAT_CTLR_RDY))
  689. + return 0;
  690. +
  691. + /* If the timeout is long, wait for interrupt */
  692. + if (timeout_usec >= jiffies_to_usecs(1) >> 4)
  693. + return bcmnand_wait_interrupt(
  694. + ctrl, NANDC_IRQ_CONTROLLER_RDY, timeout_usec);
  695. +
  696. + /* Wait for completion of the prior command */
  697. + retries = (timeout_usec >> 3) + 1;
  698. +
  699. + while (retries-- &&
  700. + 0 == bcmnand_reg_read(ctrl, NANDC_INT_STAT_CTLR_RDY)) {
  701. + cpu_relax();
  702. + udelay(6);
  703. + }
  704. +
  705. + if (retries == 0)
  706. + return -ETIME;
  707. +
  708. + return 0;
  709. +}
  710. +
  711. +
  712. +/*
  713. + * NAND Interface - waitfunc
  714. + */
  715. +static int bcmnand_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
  716. +{
  717. + struct bcmnand_ctrl *ctrl = chip->priv;
  718. + unsigned int to;
  719. + int ret;
  720. +
  721. + /* figure out timeout based on what command is on */
  722. + switch (ctrl->last_cmd) {
  723. + default:
  724. + case NAND_CMD_ERASE1:
  725. + case NAND_CMD_ERASE2:
  726. + to = 1 << 16;
  727. + break;
  728. + case NAND_CMD_STATUS:
  729. + case NAND_CMD_RESET:
  730. + to = 256;
  731. + break;
  732. + case NAND_CMD_READID:
  733. + to = 1024;
  734. + break;
  735. + case NAND_CMD_READ1:
  736. + case NAND_CMD_READ0:
  737. + to = 2048;
  738. + break;
  739. + case NAND_CMD_PAGEPROG:
  740. + to = 4096;
  741. + break;
  742. + case NAND_CMD_READOOB:
  743. + to = 512;
  744. + break;
  745. + }
  746. +
  747. + /* deliver deferred error code if any */
  748. + ret = ctrl->cmd_ret;
  749. + if (ret < 0)
  750. + ctrl->cmd_ret = 0;
  751. + else
  752. + ret = bcmnand_wait_cmd(ctrl, to);
  753. +
  754. + /* Timeout */
  755. + if (ret < 0)
  756. + return NAND_STATUS_FAIL;
  757. +
  758. + ret = bcmnand_reg_read(ctrl, NANDC_INT_STAT_FLASH_STATUS);
  759. +
  760. + return ret;
  761. +}
  762. +
  763. +/*
  764. + * NAND Interface - read_oob
  765. + */
  766. +static int bcmnand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  767. + int page)
  768. +{
  769. + struct bcmnand_ctrl *ctrl = chip->priv;
  770. + unsigned int n = ctrl->chip_num;
  771. + void __iomem *ctrl_spare;
  772. + unsigned int spare_per_sec, sector;
  773. + u64 nand_addr;
  774. +
  775. + ctrl_spare = ctrl->core->io_addr + NANDC_SPARE_AREA_READ_OFF;
  776. +
  777. + /* Set the page address for the following commands */
  778. + nand_addr = ((u64)page << chip->page_shift);
  779. + bcmnand_reg_write(ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32);
  780. +
  781. + spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift;
  782. +
  783. + /* Disable ECC validation for spare area reads */
  784. + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_RD_ECC(n), 0);
  785. +
  786. + /* Loop all sectors in page */
  787. + for (sector = 0; sector < (1<<ctrl->sec_per_page_shift); sector++) {
  788. + unsigned int col;
  789. +
  790. + col = (sector << ctrl->sector_size_shift);
  791. +
  792. + /* Issue command to read partial page */
  793. + bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS, nand_addr + col);
  794. +
  795. + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
  796. + NANDC_CMD_OPCODE_SPARE_READ);
  797. +
  798. + /* Wait for the command to complete */
  799. + if (bcmnand_wait_cmd(ctrl, (sector == 0) ? 10000 : 100))
  800. + return -EIO;
  801. +
  802. + if (!bcmnand_reg_read(ctrl, NANDC_INT_STAT_SPARE_VALID))
  803. + return -EIO;
  804. +
  805. + /* Set controller to Little Endian mode for copying */
  806. + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 1);
  807. +
  808. + memcpy(chip->oob_poi + sector * spare_per_sec,
  809. + ctrl_spare, spare_per_sec);
  810. +
  811. + /* Return to Big Endian mode for commands etc */
  812. + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
  813. + }
  814. +
  815. + return 0;
  816. +}
  817. +
  818. +/*
  819. + * NAND Interface - write_oob
  820. + */
  821. +static int bcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  822. + int page)
  823. +{
  824. + struct bcmnand_ctrl *ctrl = chip->priv;
  825. + unsigned int n = ctrl->chip_num;
  826. + void __iomem *ctrl_spare;
  827. + unsigned int spare_per_sec, sector, num_sec;
  828. + u64 nand_addr;
  829. + int to, status = 0;
  830. +
  831. + ctrl_spare = ctrl->core->io_addr + NANDC_SPARE_AREA_WRITE_OFF;
  832. +
  833. + /* Disable ECC generation for spare area writes */
  834. + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_WR_ECC(n), 0);
  835. +
  836. + spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift;
  837. +
  838. + /* Set the page address for the following commands */
  839. + nand_addr = ((u64)page << chip->page_shift);
  840. + bcmnand_reg_write(ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32);
  841. +
  842. + /* Must allow partial programming to change spare area only */
  843. + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_PGM_PARTIAL(n), 1);
  844. +
  845. + num_sec = 1 << ctrl->sec_per_page_shift;
  846. + /* Loop all sectors in page */
  847. + for (sector = 0; sector < num_sec; sector++) {
  848. + unsigned int col;
  849. +
  850. + /* Spare area accessed by the data sector offset */
  851. + col = (sector << ctrl->sector_size_shift);
  852. +
  853. + bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS, nand_addr + col);
  854. +
  855. + /* Set controller to Little Endian mode for copying */
  856. + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 1);
  857. +
  858. + memcpy(ctrl_spare, chip->oob_poi + sector * spare_per_sec,
  859. + spare_per_sec);
  860. +
  861. + /* Return to Big Endian mode for commands etc */
  862. + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
  863. +
  864. + /* Push spare bytes into internal buffer, last goes to flash */
  865. + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
  866. + NANDC_CMD_OPCODE_SPARE_PROG);
  867. +
  868. + if (sector == (num_sec - 1))
  869. + to = 1 << 16;
  870. + else
  871. + to = 1 << 10;
  872. +
  873. + if (bcmnand_wait_cmd(ctrl, to))
  874. + return -EIO;
  875. + }
  876. +
  877. + /* Restore partial programming inhibition */
  878. + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_PGM_PARTIAL(n), 0);
  879. +
  880. + status = bcmnand_waitfunc(mtd, chip);
  881. + return status & NAND_STATUS_FAIL ? -EIO : 0;
  882. +}
  883. +
  884. +/*
  885. + * verify that a buffer is all erased
  886. + */
  887. +static bool bcmnand_buf_erased(const void *buf, unsigned int len)
  888. +{
  889. + unsigned int i;
  890. + const u32 *p = buf;
  891. +
  892. + for (i = 0; i < (len >> 2); i++) {
  893. + if (p[i] != 0xffffffff)
  894. + return false;
  895. + }
  896. + return true;
  897. +}
  898. +
  899. +/*
  900. + * read a page, with or without ECC checking
  901. + */
  902. +static int bcmnand_read_page_do(struct mtd_info *mtd, struct nand_chip *chip,
  903. + uint8_t *buf, int page, bool ecc)
  904. +{
  905. + struct bcmnand_ctrl *ctrl = chip->priv;
  906. + unsigned int n = ctrl->chip_num;
  907. + void __iomem *ctrl_cache;
  908. + void __iomem *ctrl_spare;
  909. + unsigned int data_bytes;
  910. + unsigned int spare_per_sec;
  911. + unsigned int sector, to = 1 << 16;
  912. + u32 err_soft_reg, err_hard_reg;
  913. + unsigned int hard_err_count = 0;
  914. + int ret;
  915. + u64 nand_addr;
  916. +
  917. + ctrl_cache = ctrl->core->io_addr + NANDC_CACHE_OFF;
  918. + ctrl_spare = ctrl->core->io_addr + NANDC_SPARE_AREA_READ_OFF;
  919. +
  920. + /* Reset ECC error stats */
  921. + err_hard_reg = bcmnand_reg_read(ctrl, NANDC_UNCORR_ERR_COUNT);
  922. + err_soft_reg = bcmnand_reg_read(ctrl, NANDC_READ_CORR_BIT_COUNT);
  923. +
  924. + spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift;
  925. +
  926. + /* Set the page address for the following commands */
  927. + nand_addr = ((u64)page << chip->page_shift);
  928. + bcmnand_reg_write(ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32);
  929. +
  930. + /* Enable ECC validation for ecc page reads */
  931. + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_RD_ECC(n), ecc);
  932. +
  933. + /* Loop all sectors in page */
  934. + for (sector = 0; sector < (1 << ctrl->sec_per_page_shift); sector++) {
  935. + data_bytes = 0;
  936. +
  937. + /* Copy partial sectors sized by cache reg */
  938. + while (data_bytes < (1<<ctrl->sector_size_shift)) {
  939. + unsigned int col;
  940. +
  941. + col = data_bytes + (sector << ctrl->sector_size_shift);
  942. +
  943. + bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS,
  944. + nand_addr + col);
  945. +
  946. + /* Issue command to read partial page */
  947. + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
  948. + NANDC_CMD_OPCODE_PAGE_READ);
  949. +
  950. + /* Wait for the command to complete */
  951. + ret = bcmnand_wait_cmd(ctrl, to);
  952. + if (ret < 0)
  953. + return ret;
  954. +
  955. + /* Set controller to Little Endian mode for copying */
  956. + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 1);
  957. +
  958. + if (data_bytes == 0) {
  959. + memcpy(chip->oob_poi + sector * spare_per_sec,
  960. + ctrl_spare, spare_per_sec);
  961. + }
  962. +
  963. + memcpy(buf + col, ctrl_cache, NANDC_CACHE_SIZE);
  964. + data_bytes += NANDC_CACHE_SIZE;
  965. +
  966. + /* Return to Big Endian mode for commands etc */
  967. + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
  968. +
  969. + /* Next iterations should go fast */
  970. + to = 1 << 10;
  971. +
  972. + /* capture hard errors for each partial */
  973. + if (err_hard_reg != bcmnand_reg_read(ctrl, NANDC_UNCORR_ERR_COUNT)) {
  974. + int era = bcmnand_reg_read(ctrl, NANDC_INT_STAT_ERASED);
  975. +
  976. + if (!era &&
  977. + !bcmnand_buf_erased(buf + col, NANDC_CACHE_SIZE))
  978. + hard_err_count++;
  979. +
  980. + err_hard_reg = bcmnand_reg_read(ctrl,
  981. + NANDC_UNCORR_ERR_COUNT);
  982. + }
  983. + }
  984. + }
  985. +
  986. + if (!ecc)
  987. + return 0;
  988. +
  989. + /* Report hard ECC errors */
  990. + if (hard_err_count)
  991. + mtd->ecc_stats.failed++;
  992. +
  993. + /* Get ECC soft error stats */
  994. + mtd->ecc_stats.corrected += err_soft_reg -
  995. + bcmnand_reg_read(ctrl, NANDC_READ_CORR_BIT_COUNT);
  996. +
  997. + return 0;
  998. +}
  999. +
  1000. +/*
  1001. + * NAND Interface - read_page_ecc
  1002. + */
  1003. +static int bcmnand_read_page_ecc(struct mtd_info *mtd, struct nand_chip *chip,
  1004. + uint8_t *buf, int oob_required, int page)
  1005. +{
  1006. + return bcmnand_read_page_do(mtd, chip, buf, page, true);
  1007. +}
  1008. +
  1009. +/*
  1010. + * NAND Interface - read_page_raw
  1011. + */
  1012. +static int bcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  1013. + uint8_t *buf, int oob_required, int page)
  1014. +{
  1015. + return bcmnand_read_page_do(mtd, chip, buf, page, true);
  1016. +}
  1017. +
  1018. +/*
  1019. + * do page write, with or without ECC generation enabled
  1020. + */
  1021. +static int bcmnand_write_page_do(struct mtd_info *mtd, struct nand_chip *chip,
  1022. + const uint8_t *buf, bool ecc)
  1023. +{
  1024. + struct bcmnand_ctrl *ctrl = chip->priv;
  1025. + unsigned int n = ctrl->chip_num;
  1026. + void __iomem *ctrl_cache;
  1027. + void __iomem *ctrl_spare;
  1028. + unsigned int spare_per_sec, sector, num_sec;
  1029. + unsigned int data_bytes, spare_bytes;
  1030. + int i, to;
  1031. + uint8_t *tmp_poi;
  1032. + u32 nand_addr;
  1033. +
  1034. + ctrl_cache = ctrl->core->io_addr + NANDC_CACHE_OFF;
  1035. + ctrl_spare = ctrl->core->io_addr + NANDC_SPARE_AREA_WRITE_OFF;
  1036. +
  1037. + /* Get start-of-page address */
  1038. + nand_addr = bcmnand_reg_read(ctrl, NANDC_CMD_ADDRESS);
  1039. +
  1040. + tmp_poi = kmalloc(mtd->oobsize, GFP_KERNEL);
  1041. + if (!tmp_poi)
  1042. + return -ENOMEM;
  1043. +
  1044. + /* Retreive pre-existing OOB values */
  1045. + memcpy(tmp_poi, chip->oob_poi, mtd->oobsize);
  1046. + ctrl->cmd_ret = bcmnand_read_oob(mtd, chip,
  1047. + nand_addr >> chip->page_shift);
  1048. + if (ctrl->cmd_ret < 0) {
  1049. + kfree(tmp_poi);
  1050. + return ctrl->cmd_ret;
  1051. + }
  1052. +
  1053. + /* Apply new OOB data bytes just like they would end up on the chip */
  1054. + for (i = 0; i < mtd->oobsize; i++)
  1055. + chip->oob_poi[i] &= tmp_poi[i];
  1056. + kfree(tmp_poi);
  1057. +
  1058. + spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift;
  1059. +
  1060. + /* Enable ECC generation for ecc page write, if requested */
  1061. + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_WR_ECC(n), ecc);
  1062. +
  1063. + spare_bytes = 0;
  1064. + num_sec = 1 << ctrl->sec_per_page_shift;
  1065. +
  1066. + /* Loop all sectors in page */
  1067. + for (sector = 0; sector < num_sec; sector++) {
  1068. + data_bytes = 0;
  1069. +
  1070. + /* Copy partial sectors sized by cache reg */
  1071. + while (data_bytes < (1<<ctrl->sector_size_shift)) {
  1072. + unsigned int col;
  1073. +
  1074. + col = data_bytes +
  1075. + (sector << ctrl->sector_size_shift);
  1076. +
  1077. + /* Set address of 512-byte sub-page */
  1078. + bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS,
  1079. + nand_addr + col);
  1080. +
  1081. + /* Set controller to Little Endian mode for copying */
  1082. + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN,
  1083. + 1);
  1084. +
  1085. + /* Set spare area is written at each sector start */
  1086. + if (data_bytes == 0) {
  1087. + memcpy(ctrl_spare,
  1088. + chip->oob_poi + spare_bytes,
  1089. + spare_per_sec);
  1090. + spare_bytes += spare_per_sec;
  1091. + }
  1092. +
  1093. + /* Copy sub-page data */
  1094. + memcpy(ctrl_cache, buf + col, NANDC_CACHE_SIZE);
  1095. + data_bytes += NANDC_CACHE_SIZE;
  1096. +
  1097. + /* Return to Big Endian mode for commands etc */
  1098. + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
  1099. +
  1100. + /* Push data into internal cache */
  1101. + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
  1102. + NANDC_CMD_OPCODE_PAGE_PROG);
  1103. +
  1104. + /* Wait for the command to complete */
  1105. + if (sector == (num_sec - 1))
  1106. + to = 1 << 16;
  1107. + else
  1108. + to = 1 << 10;
  1109. + ctrl->cmd_ret = bcmnand_wait_cmd(ctrl, to);
  1110. + if (ctrl->cmd_ret < 0)
  1111. + return ctrl->cmd_ret;
  1112. + }
  1113. + }
  1114. + return 0;
  1115. +}
  1116. +
  1117. +/*
  1118. + * NAND Interface = write_page_ecc
  1119. + */
  1120. +static int bcmnand_write_page_ecc(struct mtd_info *mtd, struct nand_chip *chip,
  1121. + const uint8_t *buf, int oob_required)
  1122. +{
  1123. + return bcmnand_write_page_do(mtd, chip, buf, true);
  1124. +}
  1125. +
  1126. +/*
  1127. + * NAND Interface = write_page_raw
  1128. + */
  1129. +static int bcmnand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  1130. + const uint8_t *buf, int oob_required)
  1131. +{
  1132. + return bcmnand_write_page_do(mtd, chip, buf, false);
  1133. +}
  1134. +
  1135. +/*
  1136. + * MTD Interface - read_byte
  1137. + *
  1138. + * This function emulates simple controllers behavior
  1139. + * for just a few relevant commands
  1140. + */
  1141. +static uint8_t bcmnand_read_byte(struct mtd_info *mtd)
  1142. +{
  1143. + struct nand_chip *nand = mtd->priv;
  1144. + struct bcmnand_ctrl *ctrl = nand->priv;
  1145. + struct device *dev = &ctrl->core->dev;
  1146. + uint8_t b = ~0;
  1147. +
  1148. + switch (ctrl->last_cmd) {
  1149. + case NAND_CMD_READID:
  1150. + if (ctrl->id_byte_index < 8) {
  1151. + b = bcmnand_reg_read(ctrl, NANDC_DEVID_BYTE(
  1152. + ctrl->id_byte_index));
  1153. + ctrl->id_byte_index++;
  1154. + }
  1155. + break;
  1156. + case NAND_CMD_READOOB:
  1157. + if (ctrl->oob_index < mtd->oobsize)
  1158. + b = nand->oob_poi[ctrl->oob_index++];
  1159. + break;
  1160. + case NAND_CMD_STATUS:
  1161. + b = bcmnand_reg_read(ctrl, NANDC_INT_STAT_FLASH_STATUS);
  1162. + break;
  1163. + default:
  1164. + dev_err(dev, "got unkown command: 0x%x in read_byte\n",
  1165. + ctrl->last_cmd);
  1166. + }
  1167. + return b;
  1168. +}
  1169. +
  1170. +/*
  1171. + * MTD Interface - read_word
  1172. + *
  1173. + * Can not be tested without x16 chip, but the SoC does not support x16 i/f.
  1174. + */
  1175. +static u16 bcmnand_read_word(struct mtd_info *mtd)
  1176. +{
  1177. + u16 w = ~0;
  1178. +
  1179. + w = bcmnand_read_byte(mtd);
  1180. + barrier();
  1181. + w |= bcmnand_read_byte(mtd) << 8;
  1182. +
  1183. + return w;
  1184. +}
  1185. +
  1186. +/*
  1187. + * MTD Interface - select a chip from an array
  1188. + */
  1189. +static void bcmnand_select_chip(struct mtd_info *mtd, int chip)
  1190. +{
  1191. + struct nand_chip *nand = mtd->priv;
  1192. + struct bcmnand_ctrl *ctrl = nand->priv;
  1193. +
  1194. + ctrl->chip_num = chip;
  1195. + bcmnand_reg_write(ctrl, NANDC_CMD_CS_SEL, chip);
  1196. +}
  1197. +
  1198. +/*
  1199. + * NAND Interface - emulate low-level NAND commands
  1200. + *
  1201. + * Only a few low-level commands are really needed by generic NAND,
  1202. + * and they do not call for CMD_LL operations the controller can support.
  1203. + */
  1204. +static void bcmnand_cmdfunc(struct mtd_info *mtd, unsigned int command,
  1205. + int column, int page_addr)
  1206. +{
  1207. + struct nand_chip *nand = mtd->priv;
  1208. + struct bcmnand_ctrl *ctrl = nand->priv;
  1209. + struct device *dev = &ctrl->core->dev;
  1210. + u64 nand_addr;
  1211. + unsigned int to = 1;
  1212. +
  1213. + ctrl->last_cmd = command;
  1214. +
  1215. + /* Set address for some commands */
  1216. + switch (command) {
  1217. + case NAND_CMD_ERASE1:
  1218. + column = 0;
  1219. + /*FALLTHROUGH*/
  1220. + case NAND_CMD_SEQIN:
  1221. + case NAND_CMD_READ0:
  1222. + case NAND_CMD_READ1:
  1223. + WARN_ON(column >= mtd->writesize);
  1224. + nand_addr = (u64) column |
  1225. + ((u64)page_addr << nand->page_shift);
  1226. + bcmnand_reg_write(ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32);
  1227. + bcmnand_reg_write(ctrl, NANDC_CMD_ADDRESS, nand_addr);
  1228. + break;
  1229. + case NAND_CMD_ERASE2:
  1230. + case NAND_CMD_RESET:
  1231. + case NAND_CMD_READID:
  1232. + case NAND_CMD_READOOB:
  1233. + case NAND_CMD_PAGEPROG:
  1234. + default:
  1235. + /* Do nothing, address not used */
  1236. + break;
  1237. + }
  1238. +
  1239. + /* Issue appropriate command to controller */
  1240. + switch (command) {
  1241. + case NAND_CMD_SEQIN:
  1242. + /* Only need to load command address, done */
  1243. + return;
  1244. +
  1245. + case NAND_CMD_RESET:
  1246. + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
  1247. + NANDC_CMD_OPCODE_FLASH_RESET);
  1248. + to = 1 << 8;
  1249. + break;
  1250. +
  1251. + case NAND_CMD_READID:
  1252. + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
  1253. + NANDC_CMD_OPCODE_DEVID_READ);
  1254. + ctrl->id_byte_index = 0;
  1255. + to = 1 << 8;
  1256. + break;
  1257. +
  1258. + case NAND_CMD_READ0:
  1259. + case NAND_CMD_READ1:
  1260. + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
  1261. + NANDC_CMD_OPCODE_PAGE_READ);
  1262. + to = 1 << 15;
  1263. + break;
  1264. + case NAND_CMD_STATUS:
  1265. + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
  1266. + NANDC_CMD_OPCODE_STATUS_READ);
  1267. + to = 1 << 8;
  1268. + break;
  1269. + case NAND_CMD_ERASE1:
  1270. + return;
  1271. +
  1272. + case NAND_CMD_ERASE2:
  1273. + bcmnand_reg_write(ctrl, NANDC_CMD_START_OPCODE,
  1274. + NANDC_CMD_OPCODE_BLOCK_ERASE);
  1275. + to = 1 << 18;
  1276. + break;
  1277. +
  1278. + case NAND_CMD_PAGEPROG:
  1279. + /* Cmd already set from write_page */
  1280. + return;
  1281. +
  1282. + case NAND_CMD_READOOB:
  1283. + /* Emulate simple interface */
  1284. + bcmnand_read_oob(mtd, nand, page_addr);
  1285. + ctrl->oob_index = 0;
  1286. + return;
  1287. +
  1288. + default:
  1289. + dev_err(dev, "got unkown command: 0x%x in cmdfunc\n",
  1290. + ctrl->last_cmd);
  1291. + }
  1292. +
  1293. + /* Wait for command to complete */
  1294. + ctrl->cmd_ret = bcmnand_wait_cmd(ctrl, to);
  1295. +
  1296. +}
  1297. +
  1298. +static int bcmnand_scan(struct mtd_info *mtd)
  1299. +{
  1300. + struct nand_chip *nand = mtd->priv;
  1301. + struct bcmnand_ctrl *ctrl = nand->priv;
  1302. + struct device *dev = &ctrl->core->dev;
  1303. + bool sector_1k = false;
  1304. + unsigned int chip_num = 0;
  1305. + int ecc_level = 0;
  1306. + int ret;
  1307. +
  1308. + ret = nand_scan_ident(mtd, NANDC_MAX_CHIPS, NULL);
  1309. + if (ret)
  1310. + return ret;
  1311. +
  1312. + /* Get configuration from first chip */
  1313. + sector_1k = bcmnand_reg_read(ctrl, NANDC_ACC_CTRL_SECTOR_1K(0));
  1314. + ecc_level = bcmnand_reg_read(ctrl, NANDC_ACC_CTRL_ECC_LEVEL(0));
  1315. + mtd->writesize_shift = nand->page_shift;
  1316. +
  1317. + ctrl->ecc_level = ecc_level;
  1318. + ctrl->sector_size_shift = sector_1k ? 10 : 9;
  1319. +
  1320. + /* Configure spare area, tweak as needed */
  1321. + do {
  1322. + ctrl->sec_per_page_shift =
  1323. + mtd->writesize_shift - ctrl->sector_size_shift;
  1324. +
  1325. + /* will return -EINVAL if OOB space exhausted */
  1326. + ret = bcmnand_hw_ecc_layout(ctrl);
  1327. +
  1328. + /* First try to bump sector size to 1k, then decrease level */
  1329. + if (ret && nand->page_shift > 9 && ctrl->sector_size_shift < 10)
  1330. + ctrl->sector_size_shift = 10;
  1331. + else if (ret)
  1332. + ctrl->ecc_level--;
  1333. +
  1334. + } while (ret && ctrl->ecc_level > 0);
  1335. +
  1336. + if (WARN_ON(ctrl->ecc_level == 0))
  1337. + return -ENOENT;
  1338. +
  1339. + if ((ctrl->sector_size_shift > 9) != (sector_1k == 1)) {
  1340. + dev_info(dev, "sector size adjusted to 1k\n");
  1341. + sector_1k = 1;
  1342. + }
  1343. +
  1344. + if (ecc_level != ctrl->ecc_level) {
  1345. + dev_info(dev, "ECC level adjusted from %u to %u\n",
  1346. + ecc_level, ctrl->ecc_level);
  1347. + ecc_level = ctrl->ecc_level;
  1348. + }
  1349. +
  1350. + /* handle the hardware chip config registers */
  1351. + for (chip_num = 0; chip_num < nand->numchips; chip_num++) {
  1352. + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_SECTOR_1K(chip_num),
  1353. + sector_1k);
  1354. + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_ECC_LEVEL(chip_num),
  1355. + ecc_level);
  1356. +
  1357. + /* Large pages: no partial page programming */
  1358. + if (mtd->writesize > 512) {
  1359. + bcmnand_reg_write(ctrl,
  1360. + NANDC_ACC_CTRL_PGM_RDIN(chip_num), 0);
  1361. + bcmnand_reg_write(ctrl,
  1362. + NANDC_ACC_CTRL_PGM_PARTIAL(chip_num), 0);
  1363. + }
  1364. +
  1365. + /* Do not raise ECC error when reading erased pages */
  1366. + /* This bit has only partial effect, driver needs to help */
  1367. + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_ERA_ECC_ERR(chip_num),
  1368. + 0);
  1369. +
  1370. + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_PG_HIT(chip_num), 0);
  1371. + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_PREFETCH(chip_num), 0);
  1372. + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_CACHE_MODE(chip_num), 0);
  1373. + bcmnand_reg_write(ctrl, NANDC_ACC_CTRL_CACHE_LASTPG(chip_num),
  1374. + 0);
  1375. +
  1376. + /* TBD: consolidate or at least verify the s/w and h/w geometries agree */
  1377. + }
  1378. +
  1379. + /* Allow writing on device */
  1380. + if (!(nand->options & NAND_ROM))
  1381. + bcmnand_reg_write(ctrl, NANDC_CS_NAND_WP, 0);
  1382. +
  1383. + dev_dbg(dev, "layout.oobavail=%d\n", nand->ecc.layout->oobavail);
  1384. +
  1385. + ret = nand_scan_tail(mtd);
  1386. +
  1387. + if (nand->badblockbits == 0)
  1388. + nand->badblockbits = 8;
  1389. + if (WARN_ON((1 << nand->page_shift) != mtd->writesize))
  1390. + return -EIO;
  1391. +
  1392. + /* Spit out some key chip parameters as detected by nand_base */
  1393. + dev_dbg(dev, "erasesize=%d writesize=%d oobsize=%d page_shift=%d badblockpos=%d badblockbits=%d\n",
  1394. + mtd->erasesize, mtd->writesize, mtd->oobsize,
  1395. + nand->page_shift, nand->badblockpos, nand->badblockbits);
  1396. +
  1397. + return ret;
  1398. +}
  1399. +
  1400. +/*
  1401. + * main intiailization function
  1402. + */
  1403. +static int bcmnand_ctrl_init(struct bcmnand_ctrl *ctrl)
  1404. +{
  1405. + unsigned int chip;
  1406. + struct nand_chip *nand;
  1407. + struct mtd_info *mtd;
  1408. + struct device *dev = &ctrl->core->dev;
  1409. + int ret;
  1410. +
  1411. + /* Software variables init */
  1412. + nand = &ctrl->nand;
  1413. + mtd = &ctrl->mtd;
  1414. +
  1415. + init_completion(&ctrl->op_completion);
  1416. +
  1417. + mtd->priv = nand;
  1418. + mtd->owner = THIS_MODULE;
  1419. + mtd->name = KBUILD_MODNAME;
  1420. +
  1421. + nand->priv = ctrl;
  1422. +
  1423. + nand->chip_delay = 5; /* not used */
  1424. + nand->IO_ADDR_R = nand->IO_ADDR_W = (void *)~0L;
  1425. +
  1426. + if (bcmnand_reg_read(ctrl, NANDC_CONFIG_CHIP_WIDTH(0)))
  1427. + nand->options |= NAND_BUSWIDTH_16;
  1428. + nand->options |= NAND_SKIP_BBTSCAN; /* Dont need BBTs */
  1429. +
  1430. + nand->options |= NAND_NO_SUBPAGE_WRITE; /* Subpages unsupported */
  1431. +
  1432. + nand->dev_ready = bcmnand_dev_ready;
  1433. + nand->read_byte = bcmnand_read_byte;
  1434. + nand->read_word = bcmnand_read_word;
  1435. + nand->select_chip = bcmnand_select_chip;
  1436. + nand->cmdfunc = bcmnand_cmdfunc;
  1437. + nand->waitfunc = bcmnand_waitfunc;
  1438. +
  1439. + nand->ecc.mode = NAND_ECC_HW;
  1440. + nand->ecc.read_page_raw = bcmnand_read_page_raw;
  1441. + nand->ecc.write_page_raw = bcmnand_write_page_raw;
  1442. + nand->ecc.read_page = bcmnand_read_page_ecc;
  1443. + nand->ecc.write_page = bcmnand_write_page_ecc;
  1444. + nand->ecc.read_oob = bcmnand_read_oob;
  1445. + nand->ecc.write_oob = bcmnand_write_oob;
  1446. +
  1447. + /* Set AUTO_CNFIG bit - try to auto-detect chips */
  1448. + bcmnand_reg_write(ctrl, NANDC_CS_AUTO_CONFIG, 1);
  1449. +
  1450. + usleep_range(1000, 1500);
  1451. +
  1452. + /* Print out current chip config */
  1453. + for (chip = 0; chip < NANDC_MAX_CHIPS; chip++) {
  1454. + dev_dbg(dev, "chip[%d]: size=%#x block=%#x page=%#x ecc_level=%#x\n",
  1455. + chip,
  1456. + bcmnand_reg_read(ctrl, NANDC_CONFIG_CHIP_SIZE(chip)),
  1457. + bcmnand_reg_read(ctrl, NANDC_CONFIG_BLK_SIZE(chip)),
  1458. + bcmnand_reg_read(ctrl, NANDC_CONFIG_PAGE_SIZE(chip)),
  1459. + bcmnand_reg_read(ctrl, NANDC_ACC_CTRL_ECC_LEVEL(chip)));
  1460. + }
  1461. +
  1462. + dev_dbg(dev, "Nand controller is reads=%d\n",
  1463. + bcmnand_reg_aread(ctrl, NANDC_IDM_IO_CTRL_RDY));
  1464. +
  1465. + ret = bcmnand_scan(mtd);
  1466. + if (ret) {
  1467. + dev_err(dev, "scanning the nand flash chip failed with %i\n",
  1468. + ret);
  1469. + return ret;
  1470. + }
  1471. +
  1472. + return 0;
  1473. +}
  1474. +
  1475. +static int bcmnand_idm_init(struct bcmnand_ctrl *ctrl)
  1476. +{
  1477. + int irq_off;
  1478. + unsigned int retries = 0x1000;
  1479. + struct device *dev = &ctrl->core->dev;
  1480. +
  1481. + if (bcmnand_reg_aread(ctrl, NANDC_IDM_RESET))
  1482. + dev_info(dev, "stuck in reset\n");
  1483. +
  1484. + bcmnand_reg_awrite(ctrl, NANDC_IDM_RESET, 1);
  1485. + if (!bcmnand_reg_aread(ctrl, NANDC_IDM_RESET)) {
  1486. + dev_err(dev, "reset of failed\n");
  1487. + return -EIO;
  1488. + }
  1489. +
  1490. + while (bcmnand_reg_aread(ctrl, NANDC_IDM_RESET)) {
  1491. + bcmnand_reg_awrite(ctrl, NANDC_IDM_RESET, 0);
  1492. + cpu_relax();
  1493. + usleep_range(100, 150);
  1494. + if (!(retries--)) {
  1495. + dev_err(dev, "did not came back from reset\n");
  1496. + return -ETIMEDOUT;
  1497. + }
  1498. + }
  1499. +
  1500. + bcmnand_reg_awrite(ctrl, NANDC_IDM_CLOCK_EN, 1);
  1501. + bcmnand_reg_awrite(ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0);
  1502. + udelay(10);
  1503. +
  1504. + dev_info(dev, "NAND Controller rev %d.%02d\n",
  1505. + bcmnand_reg_read(ctrl, NANDC_REV_MAJOR),
  1506. + bcmnand_reg_read(ctrl, NANDC_REV_MINOR));
  1507. +
  1508. + usleep_range(250, 350);
  1509. +
  1510. + /* Disable all IRQs */
  1511. + for (irq_off = 0; irq_off < NANDC_IRQ_NUM; irq_off++)
  1512. + bcmnand_reg_awrite(ctrl, NANDC_IDM_IRQ_N_EN(irq_off), 0);
  1513. +
  1514. + return 0;
  1515. +}
  1516. +
  1517. +static const char * const part_probes[] = { "ofpart", "bcm47xxpart", NULL };
  1518. +
  1519. +/*
  1520. + * Top-level init function
  1521. + */
  1522. +static int bcmnand_probe(struct bcma_device *core)
  1523. +{
  1524. + struct mtd_part_parser_data parser_data;
  1525. + struct device *dev = &core->dev;
  1526. + struct bcmnand_ctrl *ctrl;
  1527. + int res, i, irq;
  1528. +
  1529. + ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
  1530. + if (!ctrl)
  1531. + return -ENOMEM;
  1532. +
  1533. + bcma_set_drvdata(core, ctrl);
  1534. +
  1535. + ctrl->mtd.dev.parent = &core->dev;
  1536. + ctrl->core = core;
  1537. +
  1538. + /* Acquire all interrupt lines */
  1539. + for (i = 0; i < NANDC_IRQ_NUM; i++) {
  1540. + irq = bcma_core_irq(core, i);
  1541. + if (!irq) {
  1542. + dev_err(dev, "IRQ idx %i not available\n", i);
  1543. + return -ENOENT;
  1544. + }
  1545. + res = devm_request_irq(dev, irq, bcmnand_isr, 0,
  1546. + KBUILD_MODNAME, ctrl);
  1547. + if (res < 0) {
  1548. + dev_err(dev, "problem requesting irq: %i (idx: %i)\n",
  1549. + irq, i);
  1550. + return res;
  1551. + }
  1552. + }
  1553. +
  1554. + res = bcmnand_idm_init(ctrl);
  1555. + if (res)
  1556. + return res;
  1557. +
  1558. + res = bcmnand_ctrl_init(ctrl);
  1559. + if (res)
  1560. + return res;
  1561. +
  1562. + parser_data.of_node = dev->of_node;
  1563. + res = mtd_device_parse_register(&ctrl->mtd, part_probes, &parser_data, NULL, 0);
  1564. + if (res) {
  1565. + dev_err(dev, "Failed to register MTD device: %d\n", res);
  1566. + return res;
  1567. + }
  1568. + return 0;
  1569. +}
  1570. +
  1571. +static void bcmnand_remove(struct bcma_device *core)
  1572. +{
  1573. + struct bcmnand_ctrl *ctrl = bcma_get_drvdata(core);
  1574. +
  1575. + mtd_device_unregister(&ctrl->mtd);
  1576. +}
  1577. +
  1578. +static const struct bcma_device_id bcmnand_bcma_tbl[] = {
  1579. + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_NAND, BCMA_ANY_REV, BCMA_ANY_CLASS),
  1580. + BCMA_CORETABLE_END
  1581. +};
  1582. +MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
  1583. +
  1584. +static struct bcma_driver bcmnand_bcma_driver = {
  1585. + .name = KBUILD_MODNAME,
  1586. + .id_table = bcmnand_bcma_tbl,
  1587. + .probe = bcmnand_probe,
  1588. + .remove = bcmnand_remove,
  1589. +};
  1590. +
  1591. +static int __init bcmnand_init(void)
  1592. +{
  1593. + return bcma_driver_register(&bcmnand_bcma_driver);
  1594. +}
  1595. +
  1596. +static void __exit bcmnand_exit(void)
  1597. +{
  1598. + bcma_driver_unregister(&bcmnand_bcma_driver);
  1599. +}
  1600. +
  1601. +module_init(bcmnand_init)
  1602. +module_exit(bcmnand_exit)
  1603. +
  1604. +MODULE_LICENSE("GPL");
  1605. +MODULE_AUTHOR("Hauke Mehrtens");
  1606. +MODULE_DESCRIPTION("Northstar on-chip NAND Flash Controller driver");