sunxi_nand.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291
  1. /*
  2. * Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com>
  3. *
  4. * Derived from:
  5. * https://github.com/yuq/sunxi-nfc-mtd
  6. * Copyright (C) 2013 Qiang Yu <yuq825@gmail.com>
  7. *
  8. * https://github.com/hno/Allwinner-Info
  9. * Copyright (C) 2013 Henrik Nordström <Henrik Nordström>
  10. *
  11. * Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com>
  12. * Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. */
  24. #include <linux/dma-mapping.h>
  25. #include <linux/slab.h>
  26. #include <linux/module.h>
  27. #include <linux/moduleparam.h>
  28. #include <linux/platform_device.h>
  29. #include <linux/of.h>
  30. #include <linux/of_device.h>
  31. #include <linux/of_gpio.h>
  32. #include <linux/mtd/mtd.h>
  33. #include <linux/mtd/nand.h>
  34. #include <linux/mtd/partitions.h>
  35. #include <linux/clk.h>
  36. #include <linux/delay.h>
  37. #include <linux/dmaengine.h>
  38. #include <linux/gpio.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/iopoll.h>
  41. #include <linux/reset.h>
  42. #define NFC_REG_CTL 0x0000
  43. #define NFC_REG_ST 0x0004
  44. #define NFC_REG_INT 0x0008
  45. #define NFC_REG_TIMING_CTL 0x000C
  46. #define NFC_REG_TIMING_CFG 0x0010
  47. #define NFC_REG_ADDR_LOW 0x0014
  48. #define NFC_REG_ADDR_HIGH 0x0018
  49. #define NFC_REG_SECTOR_NUM 0x001C
  50. #define NFC_REG_CNT 0x0020
  51. #define NFC_REG_CMD 0x0024
  52. #define NFC_REG_RCMD_SET 0x0028
  53. #define NFC_REG_WCMD_SET 0x002C
  54. #define NFC_REG_IO_DATA 0x0030
  55. #define NFC_REG_ECC_CTL 0x0034
  56. #define NFC_REG_ECC_ST 0x0038
  57. #define NFC_REG_DEBUG 0x003C
  58. #define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3)
  59. #define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
  60. #define NFC_REG_SPARE_AREA 0x00A0
  61. #define NFC_REG_PAT_ID 0x00A4
  62. #define NFC_RAM0_BASE 0x0400
  63. #define NFC_RAM1_BASE 0x0800
  64. /* define bit use in NFC_CTL */
  65. #define NFC_EN BIT(0)
  66. #define NFC_RESET BIT(1)
  67. #define NFC_BUS_WIDTH_MSK BIT(2)
  68. #define NFC_BUS_WIDTH_8 (0 << 2)
  69. #define NFC_BUS_WIDTH_16 (1 << 2)
  70. #define NFC_RB_SEL_MSK BIT(3)
  71. #define NFC_RB_SEL(x) ((x) << 3)
  72. #define NFC_CE_SEL_MSK GENMASK(26, 24)
  73. #define NFC_CE_SEL(x) ((x) << 24)
  74. #define NFC_CE_CTL BIT(6)
  75. #define NFC_PAGE_SHIFT_MSK GENMASK(11, 8)
  76. #define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8)
  77. #define NFC_SAM BIT(12)
  78. #define NFC_RAM_METHOD BIT(14)
  79. #define NFC_DEBUG_CTL BIT(31)
  80. /* define bit use in NFC_ST */
  81. #define NFC_RB_B2R BIT(0)
  82. #define NFC_CMD_INT_FLAG BIT(1)
  83. #define NFC_DMA_INT_FLAG BIT(2)
  84. #define NFC_CMD_FIFO_STATUS BIT(3)
  85. #define NFC_STA BIT(4)
  86. #define NFC_NATCH_INT_FLAG BIT(5)
  87. #define NFC_RB_STATE(x) BIT(x + 8)
  88. /* define bit use in NFC_INT */
  89. #define NFC_B2R_INT_ENABLE BIT(0)
  90. #define NFC_CMD_INT_ENABLE BIT(1)
  91. #define NFC_DMA_INT_ENABLE BIT(2)
  92. #define NFC_INT_MASK (NFC_B2R_INT_ENABLE | \
  93. NFC_CMD_INT_ENABLE | \
  94. NFC_DMA_INT_ENABLE)
  95. /* define bit use in NFC_TIMING_CTL */
  96. #define NFC_TIMING_CTL_EDO BIT(8)
  97. /* define NFC_TIMING_CFG register layout */
  98. #define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD) \
  99. (((tWB) & 0x3) | (((tADL) & 0x3) << 2) | \
  100. (((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) | \
  101. (((tCAD) & 0x7) << 8))
  102. /* define bit use in NFC_CMD */
  103. #define NFC_CMD_LOW_BYTE_MSK GENMASK(7, 0)
  104. #define NFC_CMD_HIGH_BYTE_MSK GENMASK(15, 8)
  105. #define NFC_CMD(x) (x)
  106. #define NFC_ADR_NUM_MSK GENMASK(18, 16)
  107. #define NFC_ADR_NUM(x) (((x) - 1) << 16)
  108. #define NFC_SEND_ADR BIT(19)
  109. #define NFC_ACCESS_DIR BIT(20)
  110. #define NFC_DATA_TRANS BIT(21)
  111. #define NFC_SEND_CMD1 BIT(22)
  112. #define NFC_WAIT_FLAG BIT(23)
  113. #define NFC_SEND_CMD2 BIT(24)
  114. #define NFC_SEQ BIT(25)
  115. #define NFC_DATA_SWAP_METHOD BIT(26)
  116. #define NFC_ROW_AUTO_INC BIT(27)
  117. #define NFC_SEND_CMD3 BIT(28)
  118. #define NFC_SEND_CMD4 BIT(29)
  119. #define NFC_CMD_TYPE_MSK GENMASK(31, 30)
  120. #define NFC_NORMAL_OP (0 << 30)
  121. #define NFC_ECC_OP (1 << 30)
  122. #define NFC_PAGE_OP (2 << 30)
  123. /* define bit use in NFC_RCMD_SET */
  124. #define NFC_READ_CMD_MSK GENMASK(7, 0)
  125. #define NFC_RND_READ_CMD0_MSK GENMASK(15, 8)
  126. #define NFC_RND_READ_CMD1_MSK GENMASK(23, 16)
  127. /* define bit use in NFC_WCMD_SET */
  128. #define NFC_PROGRAM_CMD_MSK GENMASK(7, 0)
  129. #define NFC_RND_WRITE_CMD_MSK GENMASK(15, 8)
  130. #define NFC_READ_CMD0_MSK GENMASK(23, 16)
  131. #define NFC_READ_CMD1_MSK GENMASK(31, 24)
  132. /* define bit use in NFC_ECC_CTL */
  133. #define NFC_ECC_EN BIT(0)
  134. #define NFC_ECC_PIPELINE BIT(3)
  135. #define NFC_ECC_EXCEPTION BIT(4)
  136. #define NFC_ECC_BLOCK_SIZE_MSK BIT(5)
  137. #define NFC_RANDOM_EN BIT(9)
  138. #define NFC_RANDOM_DIRECTION BIT(10)
  139. #define NFC_ECC_MODE_MSK GENMASK(15, 12)
  140. #define NFC_ECC_MODE(x) ((x) << 12)
  141. #define NFC_RANDOM_SEED_MSK GENMASK(30, 16)
  142. #define NFC_RANDOM_SEED(x) ((x) << 16)
  143. /* define bit use in NFC_ECC_ST */
  144. #define NFC_ECC_ERR(x) BIT(x)
  145. #define NFC_ECC_ERR_MSK GENMASK(15, 0)
  146. #define NFC_ECC_PAT_FOUND(x) BIT(x + 16)
  147. #define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff)
  148. #define NFC_DEFAULT_TIMEOUT_MS 1000
  149. #define NFC_SRAM_SIZE 1024
  150. #define NFC_MAX_CS 7
  151. /*
  152. * Ready/Busy detection type: describes the Ready/Busy detection modes
  153. *
  154. * @RB_NONE: no external detection available, rely on STATUS command
  155. * and software timeouts
  156. * @RB_NATIVE: use sunxi NAND controller Ready/Busy support. The Ready/Busy
  157. * pin of the NAND flash chip must be connected to one of the
  158. * native NAND R/B pins (those which can be muxed to the NAND
  159. * Controller)
  160. * @RB_GPIO: use a simple GPIO to handle Ready/Busy status. The Ready/Busy
  161. * pin of the NAND flash chip must be connected to a GPIO capable
  162. * pin.
  163. */
  164. enum sunxi_nand_rb_type {
  165. RB_NONE,
  166. RB_NATIVE,
  167. RB_GPIO,
  168. };
  169. /*
  170. * Ready/Busy structure: stores information related to Ready/Busy detection
  171. *
  172. * @type: the Ready/Busy detection mode
  173. * @info: information related to the R/B detection mode. Either a gpio
  174. * id or a native R/B id (those supported by the NAND controller).
  175. */
  176. struct sunxi_nand_rb {
  177. enum sunxi_nand_rb_type type;
  178. union {
  179. int gpio;
  180. int nativeid;
  181. } info;
  182. };
  183. /*
  184. * Chip Select structure: stores information related to NAND Chip Select
  185. *
  186. * @cs: the NAND CS id used to communicate with a NAND Chip
  187. * @rb: the Ready/Busy description
  188. */
  189. struct sunxi_nand_chip_sel {
  190. u8 cs;
  191. struct sunxi_nand_rb rb;
  192. };
  193. /*
  194. * sunxi HW ECC infos: stores information related to HW ECC support
  195. *
  196. * @mode: the sunxi ECC mode field deduced from ECC requirements
  197. */
  198. struct sunxi_nand_hw_ecc {
  199. int mode;
  200. };
  201. /*
  202. * NAND chip structure: stores NAND chip device related information
  203. *
  204. * @node: used to store NAND chips into a list
  205. * @nand: base NAND chip structure
  206. * @mtd: base MTD structure
  207. * @clk_rate: clk_rate required for this NAND chip
  208. * @timing_cfg TIMING_CFG register value for this NAND chip
  209. * @selected: current active CS
  210. * @nsels: number of CS lines required by the NAND chip
  211. * @sels: array of CS lines descriptions
  212. */
  213. struct sunxi_nand_chip {
  214. struct list_head node;
  215. struct nand_chip nand;
  216. unsigned long clk_rate;
  217. u32 timing_cfg;
  218. u32 timing_ctl;
  219. int selected;
  220. int addr_cycles;
  221. u32 addr[2];
  222. int cmd_cycles;
  223. u8 cmd[2];
  224. int nsels;
  225. struct sunxi_nand_chip_sel sels[0];
  226. };
  227. static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
  228. {
  229. return container_of(nand, struct sunxi_nand_chip, nand);
  230. }
  231. /*
  232. * NAND Controller structure: stores sunxi NAND controller information
  233. *
  234. * @controller: base controller structure
  235. * @dev: parent device (used to print error messages)
  236. * @regs: NAND controller registers
  237. * @ahb_clk: NAND Controller AHB clock
  238. * @mod_clk: NAND Controller mod clock
  239. * @assigned_cs: bitmask describing already assigned CS lines
  240. * @clk_rate: NAND controller current clock rate
  241. * @chips: a list containing all the NAND chips attached to
  242. * this NAND controller
  243. * @complete: a completion object used to wait for NAND
  244. * controller events
  245. */
  246. struct sunxi_nfc {
  247. struct nand_hw_control controller;
  248. struct device *dev;
  249. void __iomem *regs;
  250. struct clk *ahb_clk;
  251. struct clk *mod_clk;
  252. struct reset_control *reset;
  253. unsigned long assigned_cs;
  254. unsigned long clk_rate;
  255. struct list_head chips;
  256. struct completion complete;
  257. struct dma_chan *dmac;
  258. };
  259. static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
  260. {
  261. return container_of(ctrl, struct sunxi_nfc, controller);
  262. }
  263. static irqreturn_t sunxi_nfc_interrupt(int irq, void *dev_id)
  264. {
  265. struct sunxi_nfc *nfc = dev_id;
  266. u32 st = readl(nfc->regs + NFC_REG_ST);
  267. u32 ien = readl(nfc->regs + NFC_REG_INT);
  268. if (!(ien & st))
  269. return IRQ_NONE;
  270. if ((ien & st) == ien)
  271. complete(&nfc->complete);
  272. writel(st & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
  273. writel(~st & ien & NFC_INT_MASK, nfc->regs + NFC_REG_INT);
  274. return IRQ_HANDLED;
  275. }
  276. static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
  277. bool use_polling, unsigned int timeout_ms)
  278. {
  279. int ret;
  280. if (events & ~NFC_INT_MASK)
  281. return -EINVAL;
  282. if (!timeout_ms)
  283. timeout_ms = NFC_DEFAULT_TIMEOUT_MS;
  284. if (!use_polling) {
  285. init_completion(&nfc->complete);
  286. writel(events, nfc->regs + NFC_REG_INT);
  287. ret = wait_for_completion_timeout(&nfc->complete,
  288. msecs_to_jiffies(timeout_ms));
  289. writel(0, nfc->regs + NFC_REG_INT);
  290. } else {
  291. u32 status;
  292. ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
  293. (status & events) == events, 1,
  294. timeout_ms * 1000);
  295. }
  296. writel(events & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
  297. if (ret)
  298. dev_err(nfc->dev, "wait interrupt timedout\n");
  299. return ret;
  300. }
  301. static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc)
  302. {
  303. u32 status;
  304. int ret;
  305. ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
  306. !(status & NFC_CMD_FIFO_STATUS), 1,
  307. NFC_DEFAULT_TIMEOUT_MS * 1000);
  308. if (ret)
  309. dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
  310. return ret;
  311. }
  312. static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
  313. {
  314. u32 ctl;
  315. int ret;
  316. writel(0, nfc->regs + NFC_REG_ECC_CTL);
  317. writel(NFC_RESET, nfc->regs + NFC_REG_CTL);
  318. ret = readl_poll_timeout(nfc->regs + NFC_REG_CTL, ctl,
  319. !(ctl & NFC_RESET), 1,
  320. NFC_DEFAULT_TIMEOUT_MS * 1000);
  321. if (ret)
  322. dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
  323. return ret;
  324. }
  325. static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf,
  326. int chunksize, int nchunks,
  327. enum dma_data_direction ddir,
  328. struct scatterlist *sg)
  329. {
  330. struct nand_chip *nand = mtd_to_nand(mtd);
  331. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  332. struct dma_async_tx_descriptor *dmad;
  333. enum dma_transfer_direction tdir;
  334. dma_cookie_t dmat;
  335. int ret;
  336. if (ddir == DMA_FROM_DEVICE)
  337. tdir = DMA_DEV_TO_MEM;
  338. else
  339. tdir = DMA_MEM_TO_DEV;
  340. sg_init_one(sg, buf, nchunks * chunksize);
  341. ret = dma_map_sg(nfc->dev, sg, 1, ddir);
  342. if (!ret)
  343. return -ENOMEM;
  344. dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
  345. if (!dmad) {
  346. ret = -EINVAL;
  347. goto err_unmap_buf;
  348. }
  349. writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
  350. nfc->regs + NFC_REG_CTL);
  351. writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
  352. writel(chunksize, nfc->regs + NFC_REG_CNT);
  353. dmat = dmaengine_submit(dmad);
  354. ret = dma_submit_error(dmat);
  355. if (ret)
  356. goto err_clr_dma_flag;
  357. return 0;
  358. err_clr_dma_flag:
  359. writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
  360. nfc->regs + NFC_REG_CTL);
  361. err_unmap_buf:
  362. dma_unmap_sg(nfc->dev, sg, 1, ddir);
  363. return ret;
  364. }
  365. static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd,
  366. enum dma_data_direction ddir,
  367. struct scatterlist *sg)
  368. {
  369. struct nand_chip *nand = mtd_to_nand(mtd);
  370. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  371. dma_unmap_sg(nfc->dev, sg, 1, ddir);
  372. writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
  373. nfc->regs + NFC_REG_CTL);
  374. }
  375. static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
  376. {
  377. struct nand_chip *nand = mtd_to_nand(mtd);
  378. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  379. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  380. struct sunxi_nand_rb *rb;
  381. int ret;
  382. if (sunxi_nand->selected < 0)
  383. return 0;
  384. rb = &sunxi_nand->sels[sunxi_nand->selected].rb;
  385. switch (rb->type) {
  386. case RB_NATIVE:
  387. ret = !!(readl(nfc->regs + NFC_REG_ST) &
  388. NFC_RB_STATE(rb->info.nativeid));
  389. break;
  390. case RB_GPIO:
  391. ret = gpio_get_value(rb->info.gpio);
  392. break;
  393. case RB_NONE:
  394. default:
  395. ret = 0;
  396. dev_err(nfc->dev, "cannot check R/B NAND status!\n");
  397. break;
  398. }
  399. return ret;
  400. }
  401. static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
  402. {
  403. struct nand_chip *nand = mtd_to_nand(mtd);
  404. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  405. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  406. struct sunxi_nand_chip_sel *sel;
  407. u32 ctl;
  408. if (chip > 0 && chip >= sunxi_nand->nsels)
  409. return;
  410. if (chip == sunxi_nand->selected)
  411. return;
  412. ctl = readl(nfc->regs + NFC_REG_CTL) &
  413. ~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN);
  414. if (chip >= 0) {
  415. sel = &sunxi_nand->sels[chip];
  416. ctl |= NFC_CE_SEL(sel->cs) | NFC_EN |
  417. NFC_PAGE_SHIFT(nand->page_shift);
  418. if (sel->rb.type == RB_NONE) {
  419. nand->dev_ready = NULL;
  420. } else {
  421. nand->dev_ready = sunxi_nfc_dev_ready;
  422. if (sel->rb.type == RB_NATIVE)
  423. ctl |= NFC_RB_SEL(sel->rb.info.nativeid);
  424. }
  425. writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
  426. if (nfc->clk_rate != sunxi_nand->clk_rate) {
  427. clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
  428. nfc->clk_rate = sunxi_nand->clk_rate;
  429. }
  430. }
  431. writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL);
  432. writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG);
  433. writel(ctl, nfc->regs + NFC_REG_CTL);
  434. sunxi_nand->selected = chip;
  435. }
  436. static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  437. {
  438. struct nand_chip *nand = mtd_to_nand(mtd);
  439. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  440. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  441. int ret;
  442. int cnt;
  443. int offs = 0;
  444. u32 tmp;
  445. while (len > offs) {
  446. cnt = min(len - offs, NFC_SRAM_SIZE);
  447. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  448. if (ret)
  449. break;
  450. writel(cnt, nfc->regs + NFC_REG_CNT);
  451. tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
  452. writel(tmp, nfc->regs + NFC_REG_CMD);
  453. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
  454. if (ret)
  455. break;
  456. if (buf)
  457. memcpy_fromio(buf + offs, nfc->regs + NFC_RAM0_BASE,
  458. cnt);
  459. offs += cnt;
  460. }
  461. }
  462. static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
  463. int len)
  464. {
  465. struct nand_chip *nand = mtd_to_nand(mtd);
  466. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  467. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  468. int ret;
  469. int cnt;
  470. int offs = 0;
  471. u32 tmp;
  472. while (len > offs) {
  473. cnt = min(len - offs, NFC_SRAM_SIZE);
  474. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  475. if (ret)
  476. break;
  477. writel(cnt, nfc->regs + NFC_REG_CNT);
  478. memcpy_toio(nfc->regs + NFC_RAM0_BASE, buf + offs, cnt);
  479. tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
  480. NFC_ACCESS_DIR;
  481. writel(tmp, nfc->regs + NFC_REG_CMD);
  482. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
  483. if (ret)
  484. break;
  485. offs += cnt;
  486. }
  487. }
  488. static uint8_t sunxi_nfc_read_byte(struct mtd_info *mtd)
  489. {
  490. uint8_t ret;
  491. sunxi_nfc_read_buf(mtd, &ret, 1);
  492. return ret;
  493. }
  494. static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
  495. unsigned int ctrl)
  496. {
  497. struct nand_chip *nand = mtd_to_nand(mtd);
  498. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  499. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  500. int ret;
  501. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  502. if (ret)
  503. return;
  504. if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) &&
  505. !(ctrl & (NAND_CLE | NAND_ALE))) {
  506. u32 cmd = 0;
  507. if (!sunxi_nand->addr_cycles && !sunxi_nand->cmd_cycles)
  508. return;
  509. if (sunxi_nand->cmd_cycles--)
  510. cmd |= NFC_SEND_CMD1 | sunxi_nand->cmd[0];
  511. if (sunxi_nand->cmd_cycles--) {
  512. cmd |= NFC_SEND_CMD2;
  513. writel(sunxi_nand->cmd[1],
  514. nfc->regs + NFC_REG_RCMD_SET);
  515. }
  516. sunxi_nand->cmd_cycles = 0;
  517. if (sunxi_nand->addr_cycles) {
  518. cmd |= NFC_SEND_ADR |
  519. NFC_ADR_NUM(sunxi_nand->addr_cycles);
  520. writel(sunxi_nand->addr[0],
  521. nfc->regs + NFC_REG_ADDR_LOW);
  522. }
  523. if (sunxi_nand->addr_cycles > 4)
  524. writel(sunxi_nand->addr[1],
  525. nfc->regs + NFC_REG_ADDR_HIGH);
  526. writel(cmd, nfc->regs + NFC_REG_CMD);
  527. sunxi_nand->addr[0] = 0;
  528. sunxi_nand->addr[1] = 0;
  529. sunxi_nand->addr_cycles = 0;
  530. sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
  531. }
  532. if (ctrl & NAND_CLE) {
  533. sunxi_nand->cmd[sunxi_nand->cmd_cycles++] = dat;
  534. } else if (ctrl & NAND_ALE) {
  535. sunxi_nand->addr[sunxi_nand->addr_cycles / 4] |=
  536. dat << ((sunxi_nand->addr_cycles % 4) * 8);
  537. sunxi_nand->addr_cycles++;
  538. }
  539. }
  540. /* These seed values have been extracted from Allwinner's BSP */
  541. static const u16 sunxi_nfc_randomizer_page_seeds[] = {
  542. 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
  543. 0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
  544. 0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
  545. 0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
  546. 0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
  547. 0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
  548. 0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
  549. 0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
  550. 0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
  551. 0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
  552. 0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
  553. 0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
  554. 0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
  555. 0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
  556. 0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
  557. 0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
  558. };
  559. /*
  560. * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds
  561. * have been generated using
  562. * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what
  563. * the randomizer engine does internally before de/scrambling OOB data.
  564. *
  565. * Those tables are statically defined to avoid calculating randomizer state
  566. * at runtime.
  567. */
  568. static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = {
  569. 0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64,
  570. 0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409,
  571. 0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617,
  572. 0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d,
  573. 0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91,
  574. 0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d,
  575. 0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab,
  576. 0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8,
  577. 0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8,
  578. 0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b,
  579. 0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5,
  580. 0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a,
  581. 0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891,
  582. 0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36,
  583. 0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd,
  584. 0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0,
  585. };
  586. static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = {
  587. 0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6,
  588. 0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982,
  589. 0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9,
  590. 0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07,
  591. 0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e,
  592. 0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2,
  593. 0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c,
  594. 0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f,
  595. 0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc,
  596. 0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e,
  597. 0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8,
  598. 0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68,
  599. 0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d,
  600. 0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179,
  601. 0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601,
  602. 0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd,
  603. };
  604. static u16 sunxi_nfc_randomizer_step(u16 state, int count)
  605. {
  606. state &= 0x7fff;
  607. /*
  608. * This loop is just a simple implementation of a Fibonacci LFSR using
  609. * the x16 + x15 + 1 polynomial.
  610. */
  611. while (count--)
  612. state = ((state >> 1) |
  613. (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff;
  614. return state;
  615. }
  616. static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc)
  617. {
  618. const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
  619. int mod = mtd_div_by_ws(mtd->erasesize, mtd);
  620. if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds))
  621. mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds);
  622. if (ecc) {
  623. if (mtd->ecc_step_size == 512)
  624. seeds = sunxi_nfc_randomizer_ecc512_seeds;
  625. else
  626. seeds = sunxi_nfc_randomizer_ecc1024_seeds;
  627. }
  628. return seeds[page % mod];
  629. }
  630. static void sunxi_nfc_randomizer_config(struct mtd_info *mtd,
  631. int page, bool ecc)
  632. {
  633. struct nand_chip *nand = mtd_to_nand(mtd);
  634. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  635. u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
  636. u16 state;
  637. if (!(nand->options & NAND_NEED_SCRAMBLING))
  638. return;
  639. ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
  640. state = sunxi_nfc_randomizer_state(mtd, page, ecc);
  641. ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
  642. writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
  643. }
  644. static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd)
  645. {
  646. struct nand_chip *nand = mtd_to_nand(mtd);
  647. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  648. if (!(nand->options & NAND_NEED_SCRAMBLING))
  649. return;
  650. writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN,
  651. nfc->regs + NFC_REG_ECC_CTL);
  652. }
  653. static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd)
  654. {
  655. struct nand_chip *nand = mtd_to_nand(mtd);
  656. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  657. if (!(nand->options & NAND_NEED_SCRAMBLING))
  658. return;
  659. writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN,
  660. nfc->regs + NFC_REG_ECC_CTL);
  661. }
  662. static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm)
  663. {
  664. u16 state = sunxi_nfc_randomizer_state(mtd, page, true);
  665. bbm[0] ^= state;
  666. bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
  667. }
  668. static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd,
  669. const uint8_t *buf, int len,
  670. bool ecc, int page)
  671. {
  672. sunxi_nfc_randomizer_config(mtd, page, ecc);
  673. sunxi_nfc_randomizer_enable(mtd);
  674. sunxi_nfc_write_buf(mtd, buf, len);
  675. sunxi_nfc_randomizer_disable(mtd);
  676. }
  677. static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf,
  678. int len, bool ecc, int page)
  679. {
  680. sunxi_nfc_randomizer_config(mtd, page, ecc);
  681. sunxi_nfc_randomizer_enable(mtd);
  682. sunxi_nfc_read_buf(mtd, buf, len);
  683. sunxi_nfc_randomizer_disable(mtd);
  684. }
  685. static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
  686. {
  687. struct nand_chip *nand = mtd_to_nand(mtd);
  688. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  689. struct sunxi_nand_hw_ecc *data = nand->ecc.priv;
  690. u32 ecc_ctl;
  691. ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
  692. ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE |
  693. NFC_ECC_BLOCK_SIZE_MSK);
  694. ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION |
  695. NFC_ECC_PIPELINE;
  696. writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
  697. }
  698. static void sunxi_nfc_hw_ecc_disable(struct mtd_info *mtd)
  699. {
  700. struct nand_chip *nand = mtd_to_nand(mtd);
  701. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  702. writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN,
  703. nfc->regs + NFC_REG_ECC_CTL);
  704. }
  705. static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf)
  706. {
  707. buf[0] = user_data;
  708. buf[1] = user_data >> 8;
  709. buf[2] = user_data >> 16;
  710. buf[3] = user_data >> 24;
  711. }
  712. static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
  713. {
  714. return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
  715. }
  716. static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob,
  717. int step, bool bbm, int page)
  718. {
  719. struct nand_chip *nand = mtd_to_nand(mtd);
  720. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  721. sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)),
  722. oob);
  723. /* De-randomize the Bad Block Marker. */
  724. if (bbm && (nand->options & NAND_NEED_SCRAMBLING))
  725. sunxi_nfc_randomize_bbm(mtd, page, oob);
  726. }
  727. static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd,
  728. const u8 *oob, int step,
  729. bool bbm, int page)
  730. {
  731. struct nand_chip *nand = mtd_to_nand(mtd);
  732. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  733. u8 user_data[4];
  734. /* Randomize the Bad Block Marker. */
  735. if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) {
  736. memcpy(user_data, oob, sizeof(user_data));
  737. sunxi_nfc_randomize_bbm(mtd, page, user_data);
  738. oob = user_data;
  739. }
  740. writel(sunxi_nfc_buf_to_user_data(oob),
  741. nfc->regs + NFC_REG_USER_DATA(step));
  742. }
  743. static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
  744. unsigned int *max_bitflips, int ret)
  745. {
  746. if (ret < 0) {
  747. mtd->ecc_stats.failed++;
  748. } else {
  749. mtd->ecc_stats.corrected += ret;
  750. *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
  751. }
  752. }
  753. static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
  754. int step, u32 status, bool *erased)
  755. {
  756. struct nand_chip *nand = mtd_to_nand(mtd);
  757. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  758. struct nand_ecc_ctrl *ecc = &nand->ecc;
  759. u32 tmp;
  760. *erased = false;
  761. if (status & NFC_ECC_ERR(step))
  762. return -EBADMSG;
  763. if (status & NFC_ECC_PAT_FOUND(step)) {
  764. u8 pattern;
  765. if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) {
  766. pattern = 0x0;
  767. } else {
  768. pattern = 0xff;
  769. *erased = true;
  770. }
  771. if (data)
  772. memset(data, pattern, ecc->size);
  773. if (oob)
  774. memset(oob, pattern, ecc->bytes + 4);
  775. return 0;
  776. }
  777. tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(step));
  778. return NFC_ECC_ERR_CNT(step, tmp);
  779. }
  780. static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
  781. u8 *data, int data_off,
  782. u8 *oob, int oob_off,
  783. int *cur_off,
  784. unsigned int *max_bitflips,
  785. bool bbm, bool oob_required, int page)
  786. {
  787. struct nand_chip *nand = mtd_to_nand(mtd);
  788. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  789. struct nand_ecc_ctrl *ecc = &nand->ecc;
  790. int raw_mode = 0;
  791. bool erased;
  792. int ret;
  793. if (*cur_off != data_off)
  794. nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
  795. sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
  796. if (data_off + ecc->size != oob_off)
  797. nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
  798. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  799. if (ret)
  800. return ret;
  801. sunxi_nfc_randomizer_enable(mtd);
  802. writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
  803. nfc->regs + NFC_REG_CMD);
  804. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
  805. sunxi_nfc_randomizer_disable(mtd);
  806. if (ret)
  807. return ret;
  808. *cur_off = oob_off + ecc->bytes + 4;
  809. ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0,
  810. readl(nfc->regs + NFC_REG_ECC_ST),
  811. &erased);
  812. if (erased)
  813. return 1;
  814. if (ret < 0) {
  815. /*
  816. * Re-read the data with the randomizer disabled to identify
  817. * bitflips in erased pages.
  818. */
  819. if (nand->options & NAND_NEED_SCRAMBLING) {
  820. nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
  821. nand->read_buf(mtd, data, ecc->size);
  822. } else {
  823. memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
  824. ecc->size);
  825. }
  826. nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
  827. nand->read_buf(mtd, oob, ecc->bytes + 4);
  828. ret = nand_check_erased_ecc_chunk(data, ecc->size,
  829. oob, ecc->bytes + 4,
  830. NULL, 0, ecc->strength);
  831. if (ret >= 0)
  832. raw_mode = 1;
  833. } else {
  834. memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
  835. if (oob_required) {
  836. nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
  837. sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4,
  838. true, page);
  839. sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, 0,
  840. bbm, page);
  841. }
  842. }
  843. sunxi_nfc_hw_ecc_update_stats(mtd, max_bitflips, ret);
  844. return raw_mode;
  845. }
  846. static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
  847. u8 *oob, int *cur_off,
  848. bool randomize, int page)
  849. {
  850. struct nand_chip *nand = mtd_to_nand(mtd);
  851. struct nand_ecc_ctrl *ecc = &nand->ecc;
  852. int offset = ((ecc->bytes + 4) * ecc->steps);
  853. int len = mtd->oobsize - offset;
  854. if (len <= 0)
  855. return;
  856. if (!cur_off || *cur_off != offset)
  857. nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
  858. offset + mtd->writesize, -1);
  859. if (!randomize)
  860. sunxi_nfc_read_buf(mtd, oob + offset, len);
  861. else
  862. sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len,
  863. false, page);
  864. if (cur_off)
  865. *cur_off = mtd->oobsize + mtd->writesize;
  866. }
  867. static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
  868. int oob_required, int page,
  869. int nchunks)
  870. {
  871. struct nand_chip *nand = mtd_to_nand(mtd);
  872. bool randomized = nand->options & NAND_NEED_SCRAMBLING;
  873. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  874. struct nand_ecc_ctrl *ecc = &nand->ecc;
  875. unsigned int max_bitflips = 0;
  876. int ret, i, raw_mode = 0;
  877. struct scatterlist sg;
  878. u32 status;
  879. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  880. if (ret)
  881. return ret;
  882. ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks,
  883. DMA_FROM_DEVICE, &sg);
  884. if (ret)
  885. return ret;
  886. sunxi_nfc_hw_ecc_enable(mtd);
  887. sunxi_nfc_randomizer_config(mtd, page, false);
  888. sunxi_nfc_randomizer_enable(mtd);
  889. writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
  890. NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
  891. dma_async_issue_pending(nfc->dmac);
  892. writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
  893. nfc->regs + NFC_REG_CMD);
  894. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
  895. if (ret)
  896. dmaengine_terminate_all(nfc->dmac);
  897. sunxi_nfc_randomizer_disable(mtd);
  898. sunxi_nfc_hw_ecc_disable(mtd);
  899. sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg);
  900. if (ret)
  901. return ret;
  902. status = readl(nfc->regs + NFC_REG_ECC_ST);
  903. for (i = 0; i < nchunks; i++) {
  904. int data_off = i * ecc->size;
  905. int oob_off = i * (ecc->bytes + 4);
  906. u8 *data = buf + data_off;
  907. u8 *oob = nand->oob_poi + oob_off;
  908. bool erased;
  909. ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL,
  910. oob_required ? oob : NULL,
  911. i, status, &erased);
  912. /* ECC errors are handled in the second loop. */
  913. if (ret < 0)
  914. continue;
  915. if (oob_required && !erased) {
  916. /* TODO: use DMA to retrieve OOB */
  917. nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
  918. mtd->writesize + oob_off, -1);
  919. nand->read_buf(mtd, oob, ecc->bytes + 4);
  920. sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
  921. !i, page);
  922. }
  923. if (erased)
  924. raw_mode = 1;
  925. sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
  926. }
  927. if (status & NFC_ECC_ERR_MSK) {
  928. for (i = 0; i < nchunks; i++) {
  929. int data_off = i * ecc->size;
  930. int oob_off = i * (ecc->bytes + 4);
  931. u8 *data = buf + data_off;
  932. u8 *oob = nand->oob_poi + oob_off;
  933. if (!(status & NFC_ECC_ERR(i)))
  934. continue;
  935. /*
  936. * Re-read the data with the randomizer disabled to
  937. * identify bitflips in erased pages.
  938. */
  939. if (randomized) {
  940. /* TODO: use DMA to read page in raw mode */
  941. nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
  942. data_off, -1);
  943. nand->read_buf(mtd, data, ecc->size);
  944. }
  945. /* TODO: use DMA to retrieve OOB */
  946. nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
  947. mtd->writesize + oob_off, -1);
  948. nand->read_buf(mtd, oob, ecc->bytes + 4);
  949. ret = nand_check_erased_ecc_chunk(data, ecc->size,
  950. oob, ecc->bytes + 4,
  951. NULL, 0,
  952. ecc->strength);
  953. if (ret >= 0)
  954. raw_mode = 1;
  955. sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
  956. }
  957. }
  958. if (oob_required)
  959. sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi,
  960. NULL, !raw_mode,
  961. page);
  962. return max_bitflips;
  963. }
  964. static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
  965. const u8 *data, int data_off,
  966. const u8 *oob, int oob_off,
  967. int *cur_off, bool bbm,
  968. int page)
  969. {
  970. struct nand_chip *nand = mtd_to_nand(mtd);
  971. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  972. struct nand_ecc_ctrl *ecc = &nand->ecc;
  973. int ret;
  974. if (data_off != *cur_off)
  975. nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
  976. sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
  977. if (data_off + ecc->size != oob_off)
  978. nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
  979. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  980. if (ret)
  981. return ret;
  982. sunxi_nfc_randomizer_enable(mtd);
  983. sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, 0, bbm, page);
  984. writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
  985. NFC_ACCESS_DIR | NFC_ECC_OP,
  986. nfc->regs + NFC_REG_CMD);
  987. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
  988. sunxi_nfc_randomizer_disable(mtd);
  989. if (ret)
  990. return ret;
  991. *cur_off = oob_off + ecc->bytes + 4;
  992. return 0;
  993. }
  994. static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
  995. u8 *oob, int *cur_off,
  996. int page)
  997. {
  998. struct nand_chip *nand = mtd_to_nand(mtd);
  999. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1000. int offset = ((ecc->bytes + 4) * ecc->steps);
  1001. int len = mtd->oobsize - offset;
  1002. if (len <= 0)
  1003. return;
  1004. if (!cur_off || *cur_off != offset)
  1005. nand->cmdfunc(mtd, NAND_CMD_RNDIN,
  1006. offset + mtd->writesize, -1);
  1007. sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
  1008. if (cur_off)
  1009. *cur_off = mtd->oobsize + mtd->writesize;
  1010. }
  1011. static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
  1012. struct nand_chip *chip, uint8_t *buf,
  1013. int oob_required, int page)
  1014. {
  1015. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1016. unsigned int max_bitflips = 0;
  1017. int ret, i, cur_off = 0;
  1018. bool raw_mode = false;
  1019. sunxi_nfc_hw_ecc_enable(mtd);
  1020. for (i = 0; i < ecc->steps; i++) {
  1021. int data_off = i * ecc->size;
  1022. int oob_off = i * (ecc->bytes + 4);
  1023. u8 *data = buf + data_off;
  1024. u8 *oob = chip->oob_poi + oob_off;
  1025. ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
  1026. oob_off + mtd->writesize,
  1027. &cur_off, &max_bitflips,
  1028. !i, oob_required, page);
  1029. if (ret < 0)
  1030. return ret;
  1031. else if (ret)
  1032. raw_mode = true;
  1033. }
  1034. if (oob_required)
  1035. sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
  1036. !raw_mode, page);
  1037. sunxi_nfc_hw_ecc_disable(mtd);
  1038. return max_bitflips;
  1039. }
  1040. static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
  1041. struct nand_chip *chip, u8 *buf,
  1042. int oob_required, int page)
  1043. {
  1044. int ret;
  1045. ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
  1046. chip->ecc.steps);
  1047. if (ret >= 0)
  1048. return ret;
  1049. /* Fallback to PIO mode */
  1050. chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
  1051. return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
  1052. }
  1053. static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
  1054. struct nand_chip *chip,
  1055. u32 data_offs, u32 readlen,
  1056. u8 *bufpoi, int page)
  1057. {
  1058. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1059. int ret, i, cur_off = 0;
  1060. unsigned int max_bitflips = 0;
  1061. sunxi_nfc_hw_ecc_enable(mtd);
  1062. chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
  1063. for (i = data_offs / ecc->size;
  1064. i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
  1065. int data_off = i * ecc->size;
  1066. int oob_off = i * (ecc->bytes + 4);
  1067. u8 *data = bufpoi + data_off;
  1068. u8 *oob = chip->oob_poi + oob_off;
  1069. ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off,
  1070. oob,
  1071. oob_off + mtd->writesize,
  1072. &cur_off, &max_bitflips, !i,
  1073. false, page);
  1074. if (ret < 0)
  1075. return ret;
  1076. }
  1077. sunxi_nfc_hw_ecc_disable(mtd);
  1078. return max_bitflips;
  1079. }
  1080. static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
  1081. struct nand_chip *chip,
  1082. u32 data_offs, u32 readlen,
  1083. u8 *buf, int page)
  1084. {
  1085. int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
  1086. int ret;
  1087. ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
  1088. if (ret >= 0)
  1089. return ret;
  1090. /* Fallback to PIO mode */
  1091. chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
  1092. return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
  1093. buf, page);
  1094. }
  1095. static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
  1096. struct nand_chip *chip,
  1097. const uint8_t *buf, int oob_required,
  1098. int page)
  1099. {
  1100. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1101. int ret, i, cur_off = 0;
  1102. sunxi_nfc_hw_ecc_enable(mtd);
  1103. for (i = 0; i < ecc->steps; i++) {
  1104. int data_off = i * ecc->size;
  1105. int oob_off = i * (ecc->bytes + 4);
  1106. const u8 *data = buf + data_off;
  1107. const u8 *oob = chip->oob_poi + oob_off;
  1108. ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
  1109. oob_off + mtd->writesize,
  1110. &cur_off, !i, page);
  1111. if (ret)
  1112. return ret;
  1113. }
  1114. if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
  1115. sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
  1116. &cur_off, page);
  1117. sunxi_nfc_hw_ecc_disable(mtd);
  1118. return 0;
  1119. }
  1120. static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
  1121. struct nand_chip *chip,
  1122. u32 data_offs, u32 data_len,
  1123. const u8 *buf, int oob_required,
  1124. int page)
  1125. {
  1126. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1127. int ret, i, cur_off = 0;
  1128. sunxi_nfc_hw_ecc_enable(mtd);
  1129. for (i = data_offs / ecc->size;
  1130. i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
  1131. int data_off = i * ecc->size;
  1132. int oob_off = i * (ecc->bytes + 4);
  1133. const u8 *data = buf + data_off;
  1134. const u8 *oob = chip->oob_poi + oob_off;
  1135. ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
  1136. oob_off + mtd->writesize,
  1137. &cur_off, !i, page);
  1138. if (ret)
  1139. return ret;
  1140. }
  1141. sunxi_nfc_hw_ecc_disable(mtd);
  1142. return 0;
  1143. }
  1144. static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
  1145. struct nand_chip *chip,
  1146. const u8 *buf,
  1147. int oob_required,
  1148. int page)
  1149. {
  1150. struct nand_chip *nand = mtd_to_nand(mtd);
  1151. struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
  1152. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1153. struct scatterlist sg;
  1154. int ret, i;
  1155. ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
  1156. if (ret)
  1157. return ret;
  1158. ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps,
  1159. DMA_TO_DEVICE, &sg);
  1160. if (ret)
  1161. goto pio_fallback;
  1162. for (i = 0; i < ecc->steps; i++) {
  1163. const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
  1164. sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
  1165. }
  1166. sunxi_nfc_hw_ecc_enable(mtd);
  1167. sunxi_nfc_randomizer_config(mtd, page, false);
  1168. sunxi_nfc_randomizer_enable(mtd);
  1169. writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
  1170. nfc->regs + NFC_REG_RCMD_SET);
  1171. dma_async_issue_pending(nfc->dmac);
  1172. writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD |
  1173. NFC_DATA_TRANS | NFC_ACCESS_DIR,
  1174. nfc->regs + NFC_REG_CMD);
  1175. ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
  1176. if (ret)
  1177. dmaengine_terminate_all(nfc->dmac);
  1178. sunxi_nfc_randomizer_disable(mtd);
  1179. sunxi_nfc_hw_ecc_disable(mtd);
  1180. sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg);
  1181. if (ret)
  1182. return ret;
  1183. if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
  1184. /* TODO: use DMA to transfer extra OOB bytes ? */
  1185. sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
  1186. NULL, page);
  1187. return 0;
  1188. pio_fallback:
  1189. return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
  1190. }
  1191. static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
  1192. struct nand_chip *chip,
  1193. uint8_t *buf, int oob_required,
  1194. int page)
  1195. {
  1196. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1197. unsigned int max_bitflips = 0;
  1198. int ret, i, cur_off = 0;
  1199. bool raw_mode = false;
  1200. sunxi_nfc_hw_ecc_enable(mtd);
  1201. for (i = 0; i < ecc->steps; i++) {
  1202. int data_off = i * (ecc->size + ecc->bytes + 4);
  1203. int oob_off = data_off + ecc->size;
  1204. u8 *data = buf + (i * ecc->size);
  1205. u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
  1206. ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
  1207. oob_off, &cur_off,
  1208. &max_bitflips, !i,
  1209. oob_required,
  1210. page);
  1211. if (ret < 0)
  1212. return ret;
  1213. else if (ret)
  1214. raw_mode = true;
  1215. }
  1216. if (oob_required)
  1217. sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
  1218. !raw_mode, page);
  1219. sunxi_nfc_hw_ecc_disable(mtd);
  1220. return max_bitflips;
  1221. }
  1222. static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
  1223. struct nand_chip *chip,
  1224. const uint8_t *buf,
  1225. int oob_required, int page)
  1226. {
  1227. struct nand_ecc_ctrl *ecc = &chip->ecc;
  1228. int ret, i, cur_off = 0;
  1229. sunxi_nfc_hw_ecc_enable(mtd);
  1230. for (i = 0; i < ecc->steps; i++) {
  1231. int data_off = i * (ecc->size + ecc->bytes + 4);
  1232. int oob_off = data_off + ecc->size;
  1233. const u8 *data = buf + (i * ecc->size);
  1234. const u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
  1235. ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off,
  1236. oob, oob_off, &cur_off,
  1237. false, page);
  1238. if (ret)
  1239. return ret;
  1240. }
  1241. if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
  1242. sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
  1243. &cur_off, page);
  1244. sunxi_nfc_hw_ecc_disable(mtd);
  1245. return 0;
  1246. }
  1247. static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd,
  1248. struct nand_chip *chip,
  1249. int page)
  1250. {
  1251. chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
  1252. chip->pagebuf = -1;
  1253. return chip->ecc.read_page(mtd, chip, chip->buffers->databuf, 1, page);
  1254. }
  1255. static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd,
  1256. struct nand_chip *chip,
  1257. int page)
  1258. {
  1259. int ret, status;
  1260. chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
  1261. chip->pagebuf = -1;
  1262. memset(chip->buffers->databuf, 0xff, mtd->writesize);
  1263. ret = chip->ecc.write_page(mtd, chip, chip->buffers->databuf, 1, page);
  1264. if (ret)
  1265. return ret;
  1266. /* Send command to program the OOB data */
  1267. chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  1268. status = chip->waitfunc(mtd, chip);
  1269. return status & NAND_STATUS_FAIL ? -EIO : 0;
  1270. }
  1271. static const s32 tWB_lut[] = {6, 12, 16, 20};
  1272. static const s32 tRHW_lut[] = {4, 8, 12, 20};
  1273. static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
  1274. u32 clk_period)
  1275. {
  1276. u32 clk_cycles = DIV_ROUND_UP(duration, clk_period);
  1277. int i;
  1278. for (i = 0; i < lut_size; i++) {
  1279. if (clk_cycles <= lut[i])
  1280. return i;
  1281. }
  1282. /* Doesn't fit */
  1283. return -EINVAL;
  1284. }
  1285. #define sunxi_nand_lookup_timing(l, p, c) \
  1286. _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
  1287. static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd,
  1288. const struct nand_data_interface *conf,
  1289. bool check_only)
  1290. {
  1291. struct nand_chip *nand = mtd_to_nand(mtd);
  1292. struct sunxi_nand_chip *chip = to_sunxi_nand(nand);
  1293. struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller);
  1294. const struct nand_sdr_timings *timings;
  1295. u32 min_clk_period = 0;
  1296. s32 tWB, tADL, tWHR, tRHW, tCAD;
  1297. long real_clk_rate;
  1298. timings = nand_get_sdr_timings(conf);
  1299. if (IS_ERR(timings))
  1300. return -ENOTSUPP;
  1301. /* T1 <=> tCLS */
  1302. if (timings->tCLS_min > min_clk_period)
  1303. min_clk_period = timings->tCLS_min;
  1304. /* T2 <=> tCLH */
  1305. if (timings->tCLH_min > min_clk_period)
  1306. min_clk_period = timings->tCLH_min;
  1307. /* T3 <=> tCS */
  1308. if (timings->tCS_min > min_clk_period)
  1309. min_clk_period = timings->tCS_min;
  1310. /* T4 <=> tCH */
  1311. if (timings->tCH_min > min_clk_period)
  1312. min_clk_period = timings->tCH_min;
  1313. /* T5 <=> tWP */
  1314. if (timings->tWP_min > min_clk_period)
  1315. min_clk_period = timings->tWP_min;
  1316. /* T6 <=> tWH */
  1317. if (timings->tWH_min > min_clk_period)
  1318. min_clk_period = timings->tWH_min;
  1319. /* T7 <=> tALS */
  1320. if (timings->tALS_min > min_clk_period)
  1321. min_clk_period = timings->tALS_min;
  1322. /* T8 <=> tDS */
  1323. if (timings->tDS_min > min_clk_period)
  1324. min_clk_period = timings->tDS_min;
  1325. /* T9 <=> tDH */
  1326. if (timings->tDH_min > min_clk_period)
  1327. min_clk_period = timings->tDH_min;
  1328. /* T10 <=> tRR */
  1329. if (timings->tRR_min > (min_clk_period * 3))
  1330. min_clk_period = DIV_ROUND_UP(timings->tRR_min, 3);
  1331. /* T11 <=> tALH */
  1332. if (timings->tALH_min > min_clk_period)
  1333. min_clk_period = timings->tALH_min;
  1334. /* T12 <=> tRP */
  1335. if (timings->tRP_min > min_clk_period)
  1336. min_clk_period = timings->tRP_min;
  1337. /* T13 <=> tREH */
  1338. if (timings->tREH_min > min_clk_period)
  1339. min_clk_period = timings->tREH_min;
  1340. /* T14 <=> tRC */
  1341. if (timings->tRC_min > (min_clk_period * 2))
  1342. min_clk_period = DIV_ROUND_UP(timings->tRC_min, 2);
  1343. /* T15 <=> tWC */
  1344. if (timings->tWC_min > (min_clk_period * 2))
  1345. min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
  1346. /* T16 - T19 + tCAD */
  1347. if (timings->tWB_max > (min_clk_period * 20))
  1348. min_clk_period = DIV_ROUND_UP(timings->tWB_max, 20);
  1349. if (timings->tADL_min > (min_clk_period * 32))
  1350. min_clk_period = DIV_ROUND_UP(timings->tADL_min, 32);
  1351. if (timings->tWHR_min > (min_clk_period * 32))
  1352. min_clk_period = DIV_ROUND_UP(timings->tWHR_min, 32);
  1353. if (timings->tRHW_min > (min_clk_period * 20))
  1354. min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20);
  1355. tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
  1356. min_clk_period);
  1357. if (tWB < 0) {
  1358. dev_err(nfc->dev, "unsupported tWB\n");
  1359. return tWB;
  1360. }
  1361. tADL = DIV_ROUND_UP(timings->tADL_min, min_clk_period) >> 3;
  1362. if (tADL > 3) {
  1363. dev_err(nfc->dev, "unsupported tADL\n");
  1364. return -EINVAL;
  1365. }
  1366. tWHR = DIV_ROUND_UP(timings->tWHR_min, min_clk_period) >> 3;
  1367. if (tWHR > 3) {
  1368. dev_err(nfc->dev, "unsupported tWHR\n");
  1369. return -EINVAL;
  1370. }
  1371. tRHW = sunxi_nand_lookup_timing(tRHW_lut, timings->tRHW_min,
  1372. min_clk_period);
  1373. if (tRHW < 0) {
  1374. dev_err(nfc->dev, "unsupported tRHW\n");
  1375. return tRHW;
  1376. }
  1377. if (check_only)
  1378. return 0;
  1379. /*
  1380. * TODO: according to ONFI specs this value only applies for DDR NAND,
  1381. * but Allwinner seems to set this to 0x7. Mimic them for now.
  1382. */
  1383. tCAD = 0x7;
  1384. /* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
  1385. chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
  1386. /* Convert min_clk_period from picoseconds to nanoseconds */
  1387. min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
  1388. /*
  1389. * Unlike what is stated in Allwinner datasheet, the clk_rate should
  1390. * be set to (1 / min_clk_period), and not (2 / min_clk_period).
  1391. * This new formula was verified with a scope and validated by
  1392. * Allwinner engineers.
  1393. */
  1394. chip->clk_rate = NSEC_PER_SEC / min_clk_period;
  1395. real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
  1396. /*
  1397. * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
  1398. * output cycle timings shall be used if the host drives tRC less than
  1399. * 30 ns.
  1400. */
  1401. min_clk_period = NSEC_PER_SEC / real_clk_rate;
  1402. chip->timing_ctl = ((min_clk_period * 2) < 30) ?
  1403. NFC_TIMING_CTL_EDO : 0;
  1404. return 0;
  1405. }
  1406. static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
  1407. struct mtd_oob_region *oobregion)
  1408. {
  1409. struct nand_chip *nand = mtd_to_nand(mtd);
  1410. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1411. if (section >= ecc->steps)
  1412. return -ERANGE;
  1413. oobregion->offset = section * (ecc->bytes + 4) + 4;
  1414. oobregion->length = ecc->bytes;
  1415. return 0;
  1416. }
  1417. static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section,
  1418. struct mtd_oob_region *oobregion)
  1419. {
  1420. struct nand_chip *nand = mtd_to_nand(mtd);
  1421. struct nand_ecc_ctrl *ecc = &nand->ecc;
  1422. if (section > ecc->steps)
  1423. return -ERANGE;
  1424. /*
  1425. * The first 2 bytes are used for BB markers, hence we
  1426. * only have 2 bytes available in the first user data
  1427. * section.
  1428. */
  1429. if (!section && ecc->mode == NAND_ECC_HW) {
  1430. oobregion->offset = 2;
  1431. oobregion->length = 2;
  1432. return 0;
  1433. }
  1434. oobregion->offset = section * (ecc->bytes + 4);
  1435. if (section < ecc->steps)
  1436. oobregion->length = 4;
  1437. else
  1438. oobregion->offset = mtd->oobsize - oobregion->offset;
  1439. return 0;
  1440. }
  1441. static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = {
  1442. .ecc = sunxi_nand_ooblayout_ecc,
  1443. .free = sunxi_nand_ooblayout_free,
  1444. };
  1445. static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
  1446. struct nand_ecc_ctrl *ecc,
  1447. struct device_node *np)
  1448. {
  1449. static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
  1450. struct nand_chip *nand = mtd_to_nand(mtd);
  1451. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  1452. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  1453. struct sunxi_nand_hw_ecc *data;
  1454. int nsectors;
  1455. int ret;
  1456. int i;
  1457. if (ecc->options & NAND_ECC_MAXIMIZE) {
  1458. int bytes;
  1459. ecc->size = 1024;
  1460. nsectors = mtd->writesize / ecc->size;
  1461. /* Reserve 2 bytes for the BBM */
  1462. bytes = (mtd->oobsize - 2) / nsectors;
  1463. /* 4 non-ECC bytes are added before each ECC bytes section */
  1464. bytes -= 4;
  1465. /* and bytes has to be even. */
  1466. if (bytes % 2)
  1467. bytes--;
  1468. ecc->strength = bytes * 8 / fls(8 * ecc->size);
  1469. for (i = 0; i < ARRAY_SIZE(strengths); i++) {
  1470. if (strengths[i] > ecc->strength)
  1471. break;
  1472. }
  1473. if (!i)
  1474. ecc->strength = 0;
  1475. else
  1476. ecc->strength = strengths[i - 1];
  1477. }
  1478. if (ecc->size != 512 && ecc->size != 1024)
  1479. return -EINVAL;
  1480. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1481. if (!data)
  1482. return -ENOMEM;
  1483. /* Prefer 1k ECC chunk over 512 ones */
  1484. if (ecc->size == 512 && mtd->writesize > 512) {
  1485. ecc->size = 1024;
  1486. ecc->strength *= 2;
  1487. }
  1488. /* Add ECC info retrieval from DT */
  1489. for (i = 0; i < ARRAY_SIZE(strengths); i++) {
  1490. if (ecc->strength <= strengths[i])
  1491. break;
  1492. }
  1493. if (i >= ARRAY_SIZE(strengths)) {
  1494. dev_err(nfc->dev, "unsupported strength\n");
  1495. ret = -ENOTSUPP;
  1496. goto err;
  1497. }
  1498. data->mode = i;
  1499. /* HW ECC always request ECC bytes for 1024 bytes blocks */
  1500. ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8);
  1501. /* HW ECC always work with even numbers of ECC bytes */
  1502. ecc->bytes = ALIGN(ecc->bytes, 2);
  1503. nsectors = mtd->writesize / ecc->size;
  1504. if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) {
  1505. ret = -EINVAL;
  1506. goto err;
  1507. }
  1508. ecc->read_oob = sunxi_nfc_hw_common_ecc_read_oob;
  1509. ecc->write_oob = sunxi_nfc_hw_common_ecc_write_oob;
  1510. mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops);
  1511. ecc->priv = data;
  1512. return 0;
  1513. err:
  1514. kfree(data);
  1515. return ret;
  1516. }
  1517. static void sunxi_nand_hw_common_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc)
  1518. {
  1519. kfree(ecc->priv);
  1520. }
  1521. static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
  1522. struct nand_ecc_ctrl *ecc,
  1523. struct device_node *np)
  1524. {
  1525. struct nand_chip *nand = mtd_to_nand(mtd);
  1526. struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
  1527. struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
  1528. int ret;
  1529. ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
  1530. if (ret)
  1531. return ret;
  1532. if (nfc->dmac) {
  1533. ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
  1534. ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
  1535. ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
  1536. nand->options |= NAND_USE_BOUNCE_BUFFER;
  1537. } else {
  1538. ecc->read_page = sunxi_nfc_hw_ecc_read_page;
  1539. ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
  1540. ecc->write_page = sunxi_nfc_hw_ecc_write_page;
  1541. }
  1542. /* TODO: support DMA for raw accesses and subpage write */
  1543. ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage;
  1544. ecc->read_oob_raw = nand_read_oob_std;
  1545. ecc->write_oob_raw = nand_write_oob_std;
  1546. ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
  1547. return 0;
  1548. }
  1549. static int sunxi_nand_hw_syndrome_ecc_ctrl_init(struct mtd_info *mtd,
  1550. struct nand_ecc_ctrl *ecc,
  1551. struct device_node *np)
  1552. {
  1553. int ret;
  1554. ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
  1555. if (ret)
  1556. return ret;
  1557. ecc->prepad = 4;
  1558. ecc->read_page = sunxi_nfc_hw_syndrome_ecc_read_page;
  1559. ecc->write_page = sunxi_nfc_hw_syndrome_ecc_write_page;
  1560. ecc->read_oob_raw = nand_read_oob_syndrome;
  1561. ecc->write_oob_raw = nand_write_oob_syndrome;
  1562. return 0;
  1563. }
  1564. static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
  1565. {
  1566. switch (ecc->mode) {
  1567. case NAND_ECC_HW:
  1568. case NAND_ECC_HW_SYNDROME:
  1569. sunxi_nand_hw_common_ecc_ctrl_cleanup(ecc);
  1570. break;
  1571. case NAND_ECC_NONE:
  1572. default:
  1573. break;
  1574. }
  1575. }
  1576. static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
  1577. struct device_node *np)
  1578. {
  1579. struct nand_chip *nand = mtd_to_nand(mtd);
  1580. int ret;
  1581. if (!ecc->size) {
  1582. ecc->size = nand->ecc_step_ds;
  1583. ecc->strength = nand->ecc_strength_ds;
  1584. }
  1585. if (!ecc->size || !ecc->strength)
  1586. return -EINVAL;
  1587. switch (ecc->mode) {
  1588. case NAND_ECC_HW:
  1589. ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np);
  1590. if (ret)
  1591. return ret;
  1592. break;
  1593. case NAND_ECC_HW_SYNDROME:
  1594. ret = sunxi_nand_hw_syndrome_ecc_ctrl_init(mtd, ecc, np);
  1595. if (ret)
  1596. return ret;
  1597. break;
  1598. case NAND_ECC_NONE:
  1599. case NAND_ECC_SOFT:
  1600. break;
  1601. default:
  1602. return -EINVAL;
  1603. }
  1604. return 0;
  1605. }
  1606. static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
  1607. struct device_node *np)
  1608. {
  1609. struct sunxi_nand_chip *chip;
  1610. struct mtd_info *mtd;
  1611. struct nand_chip *nand;
  1612. int nsels;
  1613. int ret;
  1614. int i;
  1615. u32 tmp;
  1616. if (!of_get_property(np, "reg", &nsels))
  1617. return -EINVAL;
  1618. nsels /= sizeof(u32);
  1619. if (!nsels) {
  1620. dev_err(dev, "invalid reg property size\n");
  1621. return -EINVAL;
  1622. }
  1623. chip = devm_kzalloc(dev,
  1624. sizeof(*chip) +
  1625. (nsels * sizeof(struct sunxi_nand_chip_sel)),
  1626. GFP_KERNEL);
  1627. if (!chip) {
  1628. dev_err(dev, "could not allocate chip\n");
  1629. return -ENOMEM;
  1630. }
  1631. chip->nsels = nsels;
  1632. chip->selected = -1;
  1633. for (i = 0; i < nsels; i++) {
  1634. ret = of_property_read_u32_index(np, "reg", i, &tmp);
  1635. if (ret) {
  1636. dev_err(dev, "could not retrieve reg property: %d\n",
  1637. ret);
  1638. return ret;
  1639. }
  1640. if (tmp > NFC_MAX_CS) {
  1641. dev_err(dev,
  1642. "invalid reg value: %u (max CS = 7)\n",
  1643. tmp);
  1644. return -EINVAL;
  1645. }
  1646. if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
  1647. dev_err(dev, "CS %d already assigned\n", tmp);
  1648. return -EINVAL;
  1649. }
  1650. chip->sels[i].cs = tmp;
  1651. if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
  1652. tmp < 2) {
  1653. chip->sels[i].rb.type = RB_NATIVE;
  1654. chip->sels[i].rb.info.nativeid = tmp;
  1655. } else {
  1656. ret = of_get_named_gpio(np, "rb-gpios", i);
  1657. if (ret >= 0) {
  1658. tmp = ret;
  1659. chip->sels[i].rb.type = RB_GPIO;
  1660. chip->sels[i].rb.info.gpio = tmp;
  1661. ret = devm_gpio_request(dev, tmp, "nand-rb");
  1662. if (ret)
  1663. return ret;
  1664. ret = gpio_direction_input(tmp);
  1665. if (ret)
  1666. return ret;
  1667. } else {
  1668. chip->sels[i].rb.type = RB_NONE;
  1669. }
  1670. }
  1671. }
  1672. nand = &chip->nand;
  1673. /* Default tR value specified in the ONFI spec (chapter 4.15.1) */
  1674. nand->chip_delay = 200;
  1675. nand->controller = &nfc->controller;
  1676. /*
  1677. * Set the ECC mode to the default value in case nothing is specified
  1678. * in the DT.
  1679. */
  1680. nand->ecc.mode = NAND_ECC_HW;
  1681. nand_set_flash_node(nand, np);
  1682. nand->select_chip = sunxi_nfc_select_chip;
  1683. nand->cmd_ctrl = sunxi_nfc_cmd_ctrl;
  1684. nand->read_buf = sunxi_nfc_read_buf;
  1685. nand->write_buf = sunxi_nfc_write_buf;
  1686. nand->read_byte = sunxi_nfc_read_byte;
  1687. nand->setup_data_interface = sunxi_nfc_setup_data_interface;
  1688. mtd = nand_to_mtd(nand);
  1689. mtd->dev.parent = dev;
  1690. ret = nand_scan_ident(mtd, nsels, NULL);
  1691. if (ret)
  1692. return ret;
  1693. if (nand->bbt_options & NAND_BBT_USE_FLASH)
  1694. nand->bbt_options |= NAND_BBT_NO_OOB;
  1695. if (nand->options & NAND_NEED_SCRAMBLING)
  1696. nand->options |= NAND_NO_SUBPAGE_WRITE;
  1697. nand->options |= NAND_SUBPAGE_READ;
  1698. ret = sunxi_nand_ecc_init(mtd, &nand->ecc, np);
  1699. if (ret) {
  1700. dev_err(dev, "ECC init failed: %d\n", ret);
  1701. return ret;
  1702. }
  1703. ret = nand_scan_tail(mtd);
  1704. if (ret) {
  1705. dev_err(dev, "nand_scan_tail failed: %d\n", ret);
  1706. return ret;
  1707. }
  1708. ret = mtd_device_register(mtd, NULL, 0);
  1709. if (ret) {
  1710. dev_err(dev, "failed to register mtd device: %d\n", ret);
  1711. nand_release(mtd);
  1712. return ret;
  1713. }
  1714. list_add_tail(&chip->node, &nfc->chips);
  1715. return 0;
  1716. }
  1717. static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
  1718. {
  1719. struct device_node *np = dev->of_node;
  1720. struct device_node *nand_np;
  1721. int nchips = of_get_child_count(np);
  1722. int ret;
  1723. if (nchips > 8) {
  1724. dev_err(dev, "too many NAND chips: %d (max = 8)\n", nchips);
  1725. return -EINVAL;
  1726. }
  1727. for_each_child_of_node(np, nand_np) {
  1728. ret = sunxi_nand_chip_init(dev, nfc, nand_np);
  1729. if (ret) {
  1730. of_node_put(nand_np);
  1731. return ret;
  1732. }
  1733. }
  1734. return 0;
  1735. }
  1736. static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
  1737. {
  1738. struct sunxi_nand_chip *chip;
  1739. while (!list_empty(&nfc->chips)) {
  1740. chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip,
  1741. node);
  1742. nand_release(nand_to_mtd(&chip->nand));
  1743. sunxi_nand_ecc_cleanup(&chip->nand.ecc);
  1744. list_del(&chip->node);
  1745. }
  1746. }
  1747. static int sunxi_nfc_probe(struct platform_device *pdev)
  1748. {
  1749. struct device *dev = &pdev->dev;
  1750. struct resource *r;
  1751. struct sunxi_nfc *nfc;
  1752. int irq;
  1753. int ret;
  1754. nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
  1755. if (!nfc)
  1756. return -ENOMEM;
  1757. nfc->dev = dev;
  1758. nand_hw_control_init(&nfc->controller);
  1759. INIT_LIST_HEAD(&nfc->chips);
  1760. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1761. nfc->regs = devm_ioremap_resource(dev, r);
  1762. if (IS_ERR(nfc->regs))
  1763. return PTR_ERR(nfc->regs);
  1764. irq = platform_get_irq(pdev, 0);
  1765. if (irq < 0) {
  1766. dev_err(dev, "failed to retrieve irq\n");
  1767. return irq;
  1768. }
  1769. nfc->ahb_clk = devm_clk_get(dev, "ahb");
  1770. if (IS_ERR(nfc->ahb_clk)) {
  1771. dev_err(dev, "failed to retrieve ahb clk\n");
  1772. return PTR_ERR(nfc->ahb_clk);
  1773. }
  1774. ret = clk_prepare_enable(nfc->ahb_clk);
  1775. if (ret)
  1776. return ret;
  1777. nfc->mod_clk = devm_clk_get(dev, "mod");
  1778. if (IS_ERR(nfc->mod_clk)) {
  1779. dev_err(dev, "failed to retrieve mod clk\n");
  1780. ret = PTR_ERR(nfc->mod_clk);
  1781. goto out_ahb_clk_unprepare;
  1782. }
  1783. ret = clk_prepare_enable(nfc->mod_clk);
  1784. if (ret)
  1785. goto out_ahb_clk_unprepare;
  1786. nfc->reset = devm_reset_control_get_optional(dev, "ahb");
  1787. if (!IS_ERR(nfc->reset)) {
  1788. ret = reset_control_deassert(nfc->reset);
  1789. if (ret) {
  1790. dev_err(dev, "reset err %d\n", ret);
  1791. goto out_mod_clk_unprepare;
  1792. }
  1793. } else if (PTR_ERR(nfc->reset) != -ENOENT) {
  1794. ret = PTR_ERR(nfc->reset);
  1795. goto out_mod_clk_unprepare;
  1796. }
  1797. ret = sunxi_nfc_rst(nfc);
  1798. if (ret)
  1799. goto out_ahb_reset_reassert;
  1800. writel(0, nfc->regs + NFC_REG_INT);
  1801. ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt,
  1802. 0, "sunxi-nand", nfc);
  1803. if (ret)
  1804. goto out_ahb_reset_reassert;
  1805. nfc->dmac = dma_request_slave_channel(dev, "rxtx");
  1806. if (nfc->dmac) {
  1807. struct dma_slave_config dmac_cfg = { };
  1808. dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA;
  1809. dmac_cfg.dst_addr = dmac_cfg.src_addr;
  1810. dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  1811. dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
  1812. dmac_cfg.src_maxburst = 4;
  1813. dmac_cfg.dst_maxburst = 4;
  1814. dmaengine_slave_config(nfc->dmac, &dmac_cfg);
  1815. } else {
  1816. dev_warn(dev, "failed to request rxtx DMA channel\n");
  1817. }
  1818. platform_set_drvdata(pdev, nfc);
  1819. ret = sunxi_nand_chips_init(dev, nfc);
  1820. if (ret) {
  1821. dev_err(dev, "failed to init nand chips\n");
  1822. goto out_release_dmac;
  1823. }
  1824. return 0;
  1825. out_release_dmac:
  1826. if (nfc->dmac)
  1827. dma_release_channel(nfc->dmac);
  1828. out_ahb_reset_reassert:
  1829. if (!IS_ERR(nfc->reset))
  1830. reset_control_assert(nfc->reset);
  1831. out_mod_clk_unprepare:
  1832. clk_disable_unprepare(nfc->mod_clk);
  1833. out_ahb_clk_unprepare:
  1834. clk_disable_unprepare(nfc->ahb_clk);
  1835. return ret;
  1836. }
  1837. static int sunxi_nfc_remove(struct platform_device *pdev)
  1838. {
  1839. struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
  1840. sunxi_nand_chips_cleanup(nfc);
  1841. if (!IS_ERR(nfc->reset))
  1842. reset_control_assert(nfc->reset);
  1843. if (nfc->dmac)
  1844. dma_release_channel(nfc->dmac);
  1845. clk_disable_unprepare(nfc->mod_clk);
  1846. clk_disable_unprepare(nfc->ahb_clk);
  1847. return 0;
  1848. }
  1849. static const struct of_device_id sunxi_nfc_ids[] = {
  1850. { .compatible = "allwinner,sun4i-a10-nand" },
  1851. { /* sentinel */ }
  1852. };
  1853. MODULE_DEVICE_TABLE(of, sunxi_nfc_ids);
  1854. static struct platform_driver sunxi_nfc_driver = {
  1855. .driver = {
  1856. .name = "sunxi_nand",
  1857. .of_match_table = sunxi_nfc_ids,
  1858. },
  1859. .probe = sunxi_nfc_probe,
  1860. .remove = sunxi_nfc_remove,
  1861. };
  1862. module_platform_driver(sunxi_nfc_driver);
  1863. MODULE_LICENSE("GPL v2");
  1864. MODULE_AUTHOR("Boris BREZILLON");
  1865. MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
  1866. MODULE_ALIAS("platform:sunxi_nand");