denali.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298
  1. /*
  2. * Copyright (C) 2014 Panasonic Corporation
  3. * Copyright (C) 2013-2014, Altera Corporation <www.altera.com>
  4. * Copyright (C) 2009-2010, Intel Corporation and its suppliers.
  5. *
  6. * SPDX-License-Identifier: GPL-2.0+
  7. */
  8. #include <common.h>
  9. #include <malloc.h>
  10. #include <nand.h>
  11. #include <linux/errno.h>
  12. #include <asm/io.h>
  13. #include "denali.h"
  14. #define NAND_DEFAULT_TIMINGS -1
  15. static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
  16. /*
  17. * We define a macro here that combines all interrupts this driver uses into
  18. * a single constant value, for convenience.
  19. */
  20. #define DENALI_IRQ_ALL (INTR_STATUS__DMA_CMD_COMP | \
  21. INTR_STATUS__ECC_TRANSACTION_DONE | \
  22. INTR_STATUS__ECC_ERR | \
  23. INTR_STATUS__PROGRAM_FAIL | \
  24. INTR_STATUS__LOAD_COMP | \
  25. INTR_STATUS__PROGRAM_COMP | \
  26. INTR_STATUS__TIME_OUT | \
  27. INTR_STATUS__ERASE_FAIL | \
  28. INTR_STATUS__RST_COMP | \
  29. INTR_STATUS__ERASE_COMP | \
  30. INTR_STATUS__ECC_UNCOR_ERR | \
  31. INTR_STATUS__INT_ACT | \
  32. INTR_STATUS__LOCKED_BLK)
  33. /*
  34. * indicates whether or not the internal value for the flash bank is
  35. * valid or not
  36. */
  37. #define CHIP_SELECT_INVALID -1
  38. #define SUPPORT_8BITECC 1
  39. /*
  40. * this macro allows us to convert from an MTD structure to our own
  41. * device context (denali) structure.
  42. */
  43. static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
  44. {
  45. return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
  46. }
  47. /*
  48. * These constants are defined by the driver to enable common driver
  49. * configuration options.
  50. */
  51. #define SPARE_ACCESS 0x41
  52. #define MAIN_ACCESS 0x42
  53. #define MAIN_SPARE_ACCESS 0x43
  54. #define PIPELINE_ACCESS 0x2000
  55. #define DENALI_UNLOCK_START 0x10
  56. #define DENALI_UNLOCK_END 0x11
  57. #define DENALI_LOCK 0x21
  58. #define DENALI_LOCK_TIGHT 0x31
  59. #define DENALI_BUFFER_LOAD 0x60
  60. #define DENALI_BUFFER_WRITE 0x62
  61. #define DENALI_READ 0
  62. #define DENALI_WRITE 0x100
  63. /* types of device accesses. We can issue commands and get status */
  64. #define COMMAND_CYCLE 0
  65. #define ADDR_CYCLE 1
  66. #define STATUS_CYCLE 2
  67. /*
  68. * this is a helper macro that allows us to
  69. * format the bank into the proper bits for the controller
  70. */
  71. #define BANK(x) ((x) << 24)
  72. /* Interrupts are cleared by writing a 1 to the appropriate status bit */
  73. static inline void clear_interrupt(struct denali_nand_info *denali,
  74. uint32_t irq_mask)
  75. {
  76. uint32_t intr_status_reg;
  77. intr_status_reg = INTR_STATUS(denali->flash_bank);
  78. writel(irq_mask, denali->flash_reg + intr_status_reg);
  79. }
  80. static uint32_t read_interrupt_status(struct denali_nand_info *denali)
  81. {
  82. uint32_t intr_status_reg;
  83. intr_status_reg = INTR_STATUS(denali->flash_bank);
  84. return readl(denali->flash_reg + intr_status_reg);
  85. }
  86. static void clear_interrupts(struct denali_nand_info *denali)
  87. {
  88. uint32_t status;
  89. status = read_interrupt_status(denali);
  90. clear_interrupt(denali, status);
  91. denali->irq_status = 0;
  92. }
  93. static void denali_irq_enable(struct denali_nand_info *denali,
  94. uint32_t int_mask)
  95. {
  96. int i;
  97. for (i = 0; i < denali->max_banks; ++i)
  98. writel(int_mask, denali->flash_reg + INTR_EN(i));
  99. }
  100. static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
  101. {
  102. unsigned long timeout = 1000000;
  103. uint32_t intr_status;
  104. do {
  105. intr_status = read_interrupt_status(denali) & DENALI_IRQ_ALL;
  106. if (intr_status & irq_mask) {
  107. denali->irq_status &= ~irq_mask;
  108. /* our interrupt was detected */
  109. break;
  110. }
  111. udelay(1);
  112. timeout--;
  113. } while (timeout != 0);
  114. if (timeout == 0) {
  115. /* timeout */
  116. printf("Denali timeout with interrupt status %08x\n",
  117. read_interrupt_status(denali));
  118. intr_status = 0;
  119. }
  120. return intr_status;
  121. }
  122. /*
  123. * Certain operations for the denali NAND controller use an indexed mode to
  124. * read/write data. The operation is performed by writing the address value
  125. * of the command to the device memory followed by the data. This function
  126. * abstracts this common operation.
  127. */
  128. static void index_addr(struct denali_nand_info *denali,
  129. uint32_t address, uint32_t data)
  130. {
  131. writel(address, denali->flash_mem + INDEX_CTRL_REG);
  132. writel(data, denali->flash_mem + INDEX_DATA_REG);
  133. }
  134. /* Perform an indexed read of the device */
  135. static void index_addr_read_data(struct denali_nand_info *denali,
  136. uint32_t address, uint32_t *pdata)
  137. {
  138. writel(address, denali->flash_mem + INDEX_CTRL_REG);
  139. *pdata = readl(denali->flash_mem + INDEX_DATA_REG);
  140. }
  141. /*
  142. * We need to buffer some data for some of the NAND core routines.
  143. * The operations manage buffering that data.
  144. */
  145. static void reset_buf(struct denali_nand_info *denali)
  146. {
  147. denali->buf.head = 0;
  148. denali->buf.tail = 0;
  149. }
  150. static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
  151. {
  152. denali->buf.buf[denali->buf.tail++] = byte;
  153. }
  154. /* resets a specific device connected to the core */
  155. static void reset_bank(struct denali_nand_info *denali)
  156. {
  157. uint32_t irq_status;
  158. uint32_t irq_mask = INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT;
  159. clear_interrupts(denali);
  160. writel(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
  161. irq_status = wait_for_irq(denali, irq_mask);
  162. if (irq_status & INTR_STATUS__TIME_OUT)
  163. debug("reset bank failed.\n");
  164. }
  165. /* Reset the flash controller */
  166. static uint32_t denali_nand_reset(struct denali_nand_info *denali)
  167. {
  168. int i;
  169. for (i = 0; i < denali->max_banks; i++)
  170. writel(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
  171. denali->flash_reg + INTR_STATUS(i));
  172. for (i = 0; i < denali->max_banks; i++) {
  173. writel(1 << i, denali->flash_reg + DEVICE_RESET);
  174. while (!(readl(denali->flash_reg + INTR_STATUS(i)) &
  175. (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT)))
  176. if (readl(denali->flash_reg + INTR_STATUS(i)) &
  177. INTR_STATUS__TIME_OUT)
  178. debug("NAND Reset operation timed out on bank"
  179. " %d\n", i);
  180. }
  181. for (i = 0; i < denali->max_banks; i++)
  182. writel(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
  183. denali->flash_reg + INTR_STATUS(i));
  184. return 0;
  185. }
  186. /*
  187. * this routine calculates the ONFI timing values for a given mode and
  188. * programs the clocking register accordingly. The mode is determined by
  189. * the get_onfi_nand_para routine.
  190. */
  191. static void nand_onfi_timing_set(struct denali_nand_info *denali,
  192. uint16_t mode)
  193. {
  194. uint32_t trea[6] = {40, 30, 25, 20, 20, 16};
  195. uint32_t trp[6] = {50, 25, 17, 15, 12, 10};
  196. uint32_t treh[6] = {30, 15, 15, 10, 10, 7};
  197. uint32_t trc[6] = {100, 50, 35, 30, 25, 20};
  198. uint32_t trhoh[6] = {0, 15, 15, 15, 15, 15};
  199. uint32_t trloh[6] = {0, 0, 0, 0, 5, 5};
  200. uint32_t tcea[6] = {100, 45, 30, 25, 25, 25};
  201. uint32_t tadl[6] = {200, 100, 100, 100, 70, 70};
  202. uint32_t trhw[6] = {200, 100, 100, 100, 100, 100};
  203. uint32_t trhz[6] = {200, 100, 100, 100, 100, 100};
  204. uint32_t twhr[6] = {120, 80, 80, 60, 60, 60};
  205. uint32_t tcs[6] = {70, 35, 25, 25, 20, 15};
  206. uint32_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
  207. uint32_t dv_window = 0;
  208. uint32_t en_lo, en_hi;
  209. uint32_t acc_clks;
  210. uint32_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
  211. en_lo = DIV_ROUND_UP(trp[mode], CLK_X);
  212. en_hi = DIV_ROUND_UP(treh[mode], CLK_X);
  213. if ((en_hi * CLK_X) < (treh[mode] + 2))
  214. en_hi++;
  215. if ((en_lo + en_hi) * CLK_X < trc[mode])
  216. en_lo += DIV_ROUND_UP((trc[mode] - (en_lo + en_hi) * CLK_X),
  217. CLK_X);
  218. if ((en_lo + en_hi) < CLK_MULTI)
  219. en_lo += CLK_MULTI - en_lo - en_hi;
  220. while (dv_window < 8) {
  221. data_invalid_rhoh = en_lo * CLK_X + trhoh[mode];
  222. data_invalid_rloh = (en_lo + en_hi) * CLK_X + trloh[mode];
  223. data_invalid = data_invalid_rhoh < data_invalid_rloh ?
  224. data_invalid_rhoh : data_invalid_rloh;
  225. dv_window = data_invalid - trea[mode];
  226. if (dv_window < 8)
  227. en_lo++;
  228. }
  229. acc_clks = DIV_ROUND_UP(trea[mode], CLK_X);
  230. while (acc_clks * CLK_X - trea[mode] < 3)
  231. acc_clks++;
  232. if (data_invalid - acc_clks * CLK_X < 2)
  233. debug("%s, Line %d: Warning!\n", __FILE__, __LINE__);
  234. addr_2_data = DIV_ROUND_UP(tadl[mode], CLK_X);
  235. re_2_we = DIV_ROUND_UP(trhw[mode], CLK_X);
  236. re_2_re = DIV_ROUND_UP(trhz[mode], CLK_X);
  237. we_2_re = DIV_ROUND_UP(twhr[mode], CLK_X);
  238. cs_cnt = DIV_ROUND_UP((tcs[mode] - trp[mode]), CLK_X);
  239. if (cs_cnt == 0)
  240. cs_cnt = 1;
  241. if (tcea[mode]) {
  242. while (cs_cnt * CLK_X + trea[mode] < tcea[mode])
  243. cs_cnt++;
  244. }
  245. /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
  246. if (readl(denali->flash_reg + MANUFACTURER_ID) == 0 &&
  247. readl(denali->flash_reg + DEVICE_ID) == 0x88)
  248. acc_clks = 6;
  249. writel(acc_clks, denali->flash_reg + ACC_CLKS);
  250. writel(re_2_we, denali->flash_reg + RE_2_WE);
  251. writel(re_2_re, denali->flash_reg + RE_2_RE);
  252. writel(we_2_re, denali->flash_reg + WE_2_RE);
  253. writel(addr_2_data, denali->flash_reg + ADDR_2_DATA);
  254. writel(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
  255. writel(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
  256. writel(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
  257. }
  258. /* queries the NAND device to see what ONFI modes it supports. */
  259. static uint32_t get_onfi_nand_para(struct denali_nand_info *denali)
  260. {
  261. int i;
  262. /*
  263. * we needn't to do a reset here because driver has already
  264. * reset all the banks before
  265. */
  266. if (!(readl(denali->flash_reg + ONFI_TIMING_MODE) &
  267. ONFI_TIMING_MODE__VALUE))
  268. return -EIO;
  269. for (i = 5; i > 0; i--) {
  270. if (readl(denali->flash_reg + ONFI_TIMING_MODE) &
  271. (0x01 << i))
  272. break;
  273. }
  274. nand_onfi_timing_set(denali, i);
  275. /*
  276. * By now, all the ONFI devices we know support the page cache
  277. * rw feature. So here we enable the pipeline_rw_ahead feature
  278. */
  279. return 0;
  280. }
  281. static void get_samsung_nand_para(struct denali_nand_info *denali,
  282. uint8_t device_id)
  283. {
  284. if (device_id == 0xd3) { /* Samsung K9WAG08U1A */
  285. /* Set timing register values according to datasheet */
  286. writel(5, denali->flash_reg + ACC_CLKS);
  287. writel(20, denali->flash_reg + RE_2_WE);
  288. writel(12, denali->flash_reg + WE_2_RE);
  289. writel(14, denali->flash_reg + ADDR_2_DATA);
  290. writel(3, denali->flash_reg + RDWR_EN_LO_CNT);
  291. writel(2, denali->flash_reg + RDWR_EN_HI_CNT);
  292. writel(2, denali->flash_reg + CS_SETUP_CNT);
  293. }
  294. }
  295. static void get_toshiba_nand_para(struct denali_nand_info *denali)
  296. {
  297. uint32_t tmp;
  298. /*
  299. * Workaround to fix a controller bug which reports a wrong
  300. * spare area size for some kind of Toshiba NAND device
  301. */
  302. if ((readl(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
  303. (readl(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
  304. writel(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
  305. tmp = readl(denali->flash_reg + DEVICES_CONNECTED) *
  306. readl(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
  307. writel(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
  308. }
  309. }
  310. static void get_hynix_nand_para(struct denali_nand_info *denali,
  311. uint8_t device_id)
  312. {
  313. uint32_t main_size, spare_size;
  314. switch (device_id) {
  315. case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
  316. case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
  317. writel(128, denali->flash_reg + PAGES_PER_BLOCK);
  318. writel(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
  319. writel(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
  320. main_size = 4096 *
  321. readl(denali->flash_reg + DEVICES_CONNECTED);
  322. spare_size = 224 *
  323. readl(denali->flash_reg + DEVICES_CONNECTED);
  324. writel(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
  325. writel(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
  326. writel(0, denali->flash_reg + DEVICE_WIDTH);
  327. break;
  328. default:
  329. debug("Spectra: Unknown Hynix NAND (Device ID: 0x%x).\n"
  330. "Will use default parameter values instead.\n",
  331. device_id);
  332. }
  333. }
  334. /*
  335. * determines how many NAND chips are connected to the controller. Note for
  336. * Intel CE4100 devices we don't support more than one device.
  337. */
  338. static void find_valid_banks(struct denali_nand_info *denali)
  339. {
  340. uint32_t id[denali->max_banks];
  341. int i;
  342. denali->total_used_banks = 1;
  343. for (i = 0; i < denali->max_banks; i++) {
  344. index_addr(denali, MODE_11 | (i << 24) | 0, 0x90);
  345. index_addr(denali, MODE_11 | (i << 24) | 1, 0);
  346. index_addr_read_data(denali, MODE_11 | (i << 24) | 2, &id[i]);
  347. if (i == 0) {
  348. if (!(id[i] & 0x0ff))
  349. break;
  350. } else {
  351. if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
  352. denali->total_used_banks++;
  353. else
  354. break;
  355. }
  356. }
  357. }
  358. /*
  359. * Use the configuration feature register to determine the maximum number of
  360. * banks that the hardware supports.
  361. */
  362. static void detect_max_banks(struct denali_nand_info *denali)
  363. {
  364. uint32_t features = readl(denali->flash_reg + FEATURES);
  365. /*
  366. * Read the revision register, so we can calculate the max_banks
  367. * properly: the encoding changed from rev 5.0 to 5.1
  368. */
  369. u32 revision = MAKE_COMPARABLE_REVISION(
  370. readl(denali->flash_reg + REVISION));
  371. if (revision < REVISION_5_1)
  372. denali->max_banks = 2 << (features & FEATURES__N_BANKS);
  373. else
  374. denali->max_banks = 1 << (features & FEATURES__N_BANKS);
  375. }
  376. static void detect_partition_feature(struct denali_nand_info *denali)
  377. {
  378. /*
  379. * For MRST platform, denali->fwblks represent the
  380. * number of blocks firmware is taken,
  381. * FW is in protect partition and MTD driver has no
  382. * permission to access it. So let driver know how many
  383. * blocks it can't touch.
  384. */
  385. if (readl(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
  386. if ((readl(denali->flash_reg + PERM_SRC_ID(1)) &
  387. PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
  388. denali->fwblks =
  389. ((readl(denali->flash_reg + MIN_MAX_BANK(1)) &
  390. MIN_MAX_BANK__MIN_VALUE) *
  391. denali->blksperchip)
  392. +
  393. (readl(denali->flash_reg + MIN_BLK_ADDR(1)) &
  394. MIN_BLK_ADDR__VALUE);
  395. } else {
  396. denali->fwblks = SPECTRA_START_BLOCK;
  397. }
  398. } else {
  399. denali->fwblks = SPECTRA_START_BLOCK;
  400. }
  401. }
  402. static uint32_t denali_nand_timing_set(struct denali_nand_info *denali)
  403. {
  404. uint32_t id_bytes[8], addr;
  405. uint8_t maf_id, device_id;
  406. int i;
  407. /*
  408. * Use read id method to get device ID and other params.
  409. * For some NAND chips, controller can't report the correct
  410. * device ID by reading from DEVICE_ID register
  411. */
  412. addr = MODE_11 | BANK(denali->flash_bank);
  413. index_addr(denali, addr | 0, 0x90);
  414. index_addr(denali, addr | 1, 0);
  415. for (i = 0; i < 8; i++)
  416. index_addr_read_data(denali, addr | 2, &id_bytes[i]);
  417. maf_id = id_bytes[0];
  418. device_id = id_bytes[1];
  419. if (readl(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
  420. ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
  421. if (get_onfi_nand_para(denali))
  422. return -EIO;
  423. } else if (maf_id == 0xEC) { /* Samsung NAND */
  424. get_samsung_nand_para(denali, device_id);
  425. } else if (maf_id == 0x98) { /* Toshiba NAND */
  426. get_toshiba_nand_para(denali);
  427. } else if (maf_id == 0xAD) { /* Hynix NAND */
  428. get_hynix_nand_para(denali, device_id);
  429. }
  430. find_valid_banks(denali);
  431. detect_partition_feature(denali);
  432. /*
  433. * If the user specified to override the default timings
  434. * with a specific ONFI mode, we apply those changes here.
  435. */
  436. if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
  437. nand_onfi_timing_set(denali, onfi_timing_mode);
  438. return 0;
  439. }
  440. /*
  441. * validation function to verify that the controlling software is making
  442. * a valid request
  443. */
  444. static inline bool is_flash_bank_valid(int flash_bank)
  445. {
  446. return flash_bank >= 0 && flash_bank < 4;
  447. }
  448. static void denali_irq_init(struct denali_nand_info *denali)
  449. {
  450. uint32_t int_mask;
  451. int i;
  452. /* Disable global interrupts */
  453. writel(0, denali->flash_reg + GLOBAL_INT_ENABLE);
  454. int_mask = DENALI_IRQ_ALL;
  455. /* Clear all status bits */
  456. for (i = 0; i < denali->max_banks; ++i)
  457. writel(0xFFFF, denali->flash_reg + INTR_STATUS(i));
  458. denali_irq_enable(denali, int_mask);
  459. }
  460. /*
  461. * This helper function setups the registers for ECC and whether or not
  462. * the spare area will be transferred.
  463. */
  464. static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
  465. bool transfer_spare)
  466. {
  467. int ecc_en_flag, transfer_spare_flag;
  468. /* set ECC, transfer spare bits if needed */
  469. ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
  470. transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
  471. /* Enable spare area/ECC per user's request. */
  472. writel(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
  473. /* applicable for MAP01 only */
  474. writel(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
  475. }
  476. /*
  477. * sends a pipeline command operation to the controller. See the Denali NAND
  478. * controller's user guide for more information (section 4.2.3.6).
  479. */
  480. static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
  481. bool ecc_en, bool transfer_spare,
  482. int access_type, int op)
  483. {
  484. uint32_t addr, cmd, irq_status;
  485. static uint32_t page_count = 1;
  486. setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
  487. clear_interrupts(denali);
  488. addr = BANK(denali->flash_bank) | denali->page;
  489. /* setup the acccess type */
  490. cmd = MODE_10 | addr;
  491. index_addr(denali, cmd, access_type);
  492. /* setup the pipeline command */
  493. index_addr(denali, cmd, 0x2000 | op | page_count);
  494. cmd = MODE_01 | addr;
  495. writel(cmd, denali->flash_mem + INDEX_CTRL_REG);
  496. if (op == DENALI_READ) {
  497. /* wait for command to be accepted */
  498. irq_status = wait_for_irq(denali, INTR_STATUS__LOAD_COMP);
  499. if (irq_status == 0)
  500. return -EIO;
  501. }
  502. return 0;
  503. }
  504. /* helper function that simply writes a buffer to the flash */
  505. static int write_data_to_flash_mem(struct denali_nand_info *denali,
  506. const uint8_t *buf, int len)
  507. {
  508. uint32_t *buf32;
  509. int i;
  510. /*
  511. * verify that the len is a multiple of 4.
  512. * see comment in read_data_from_flash_mem()
  513. */
  514. BUG_ON((len % 4) != 0);
  515. /* write the data to the flash memory */
  516. buf32 = (uint32_t *)buf;
  517. for (i = 0; i < len / 4; i++)
  518. writel(*buf32++, denali->flash_mem + INDEX_DATA_REG);
  519. return i * 4; /* intent is to return the number of bytes read */
  520. }
  521. /* helper function that simply reads a buffer from the flash */
  522. static int read_data_from_flash_mem(struct denali_nand_info *denali,
  523. uint8_t *buf, int len)
  524. {
  525. uint32_t *buf32;
  526. int i;
  527. /*
  528. * we assume that len will be a multiple of 4, if not it would be nice
  529. * to know about it ASAP rather than have random failures...
  530. * This assumption is based on the fact that this function is designed
  531. * to be used to read flash pages, which are typically multiples of 4.
  532. */
  533. BUG_ON((len % 4) != 0);
  534. /* transfer the data from the flash */
  535. buf32 = (uint32_t *)buf;
  536. for (i = 0; i < len / 4; i++)
  537. *buf32++ = readl(denali->flash_mem + INDEX_DATA_REG);
  538. return i * 4; /* intent is to return the number of bytes read */
  539. }
  540. static void denali_mode_main_access(struct denali_nand_info *denali)
  541. {
  542. uint32_t addr, cmd;
  543. addr = BANK(denali->flash_bank) | denali->page;
  544. cmd = MODE_10 | addr;
  545. index_addr(denali, cmd, MAIN_ACCESS);
  546. }
  547. static void denali_mode_main_spare_access(struct denali_nand_info *denali)
  548. {
  549. uint32_t addr, cmd;
  550. addr = BANK(denali->flash_bank) | denali->page;
  551. cmd = MODE_10 | addr;
  552. index_addr(denali, cmd, MAIN_SPARE_ACCESS);
  553. }
  554. /* writes OOB data to the device */
  555. static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
  556. {
  557. struct denali_nand_info *denali = mtd_to_denali(mtd);
  558. uint32_t irq_status;
  559. uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP |
  560. INTR_STATUS__PROGRAM_FAIL;
  561. int status = 0;
  562. denali->page = page;
  563. if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
  564. DENALI_WRITE) == 0) {
  565. write_data_to_flash_mem(denali, buf, mtd->oobsize);
  566. /* wait for operation to complete */
  567. irq_status = wait_for_irq(denali, irq_mask);
  568. if (irq_status == 0) {
  569. dev_err(denali->dev, "OOB write failed\n");
  570. status = -EIO;
  571. }
  572. } else {
  573. printf("unable to send pipeline command\n");
  574. status = -EIO;
  575. }
  576. return status;
  577. }
  578. /* reads OOB data from the device */
  579. static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
  580. {
  581. struct denali_nand_info *denali = mtd_to_denali(mtd);
  582. uint32_t irq_mask = INTR_STATUS__LOAD_COMP;
  583. uint32_t irq_status, addr, cmd;
  584. denali->page = page;
  585. if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
  586. DENALI_READ) == 0) {
  587. read_data_from_flash_mem(denali, buf, mtd->oobsize);
  588. /*
  589. * wait for command to be accepted
  590. * can always use status0 bit as the
  591. * mask is identical for each bank.
  592. */
  593. irq_status = wait_for_irq(denali, irq_mask);
  594. if (irq_status == 0)
  595. printf("page on OOB timeout %d\n", denali->page);
  596. /*
  597. * We set the device back to MAIN_ACCESS here as I observed
  598. * instability with the controller if you do a block erase
  599. * and the last transaction was a SPARE_ACCESS. Block erase
  600. * is reliable (according to the MTD test infrastructure)
  601. * if you are in MAIN_ACCESS.
  602. */
  603. addr = BANK(denali->flash_bank) | denali->page;
  604. cmd = MODE_10 | addr;
  605. index_addr(denali, cmd, MAIN_ACCESS);
  606. }
  607. }
  608. /*
  609. * this function examines buffers to see if they contain data that
  610. * indicate that the buffer is part of an erased region of flash.
  611. */
  612. static bool is_erased(uint8_t *buf, int len)
  613. {
  614. int i;
  615. for (i = 0; i < len; i++)
  616. if (buf[i] != 0xFF)
  617. return false;
  618. return true;
  619. }
  620. /* programs the controller to either enable/disable DMA transfers */
  621. static void denali_enable_dma(struct denali_nand_info *denali, bool en)
  622. {
  623. writel(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE);
  624. readl(denali->flash_reg + DMA_ENABLE);
  625. }
  626. /* setups the HW to perform the data DMA */
  627. static void denali_setup_dma(struct denali_nand_info *denali, int op)
  628. {
  629. uint32_t mode;
  630. const int page_count = 1;
  631. uint64_t addr = (unsigned long)denali->buf.dma_buf;
  632. flush_dcache_range(addr, addr + sizeof(denali->buf.dma_buf));
  633. /* For Denali controller that is 64 bit bus IP core */
  634. #ifdef CONFIG_SYS_NAND_DENALI_64BIT
  635. mode = MODE_10 | BANK(denali->flash_bank) | denali->page;
  636. /* DMA is a three step process */
  637. /* 1. setup transfer type, interrupt when complete,
  638. burst len = 64 bytes, the number of pages */
  639. index_addr(denali, mode, 0x01002000 | (64 << 16) | op | page_count);
  640. /* 2. set memory low address bits 31:0 */
  641. index_addr(denali, mode, addr);
  642. /* 3. set memory high address bits 64:32 */
  643. index_addr(denali, mode, addr >> 32);
  644. #else
  645. mode = MODE_10 | BANK(denali->flash_bank);
  646. /* DMA is a four step process */
  647. /* 1. setup transfer type and # of pages */
  648. index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
  649. /* 2. set memory high address bits 23:8 */
  650. index_addr(denali, mode | (((addr >> 16) & 0xffff) << 8), 0x2200);
  651. /* 3. set memory low address bits 23:8 */
  652. index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300);
  653. /* 4. interrupt when complete, burst len = 64 bytes */
  654. index_addr(denali, mode | 0x14000, 0x2400);
  655. #endif
  656. }
  657. /* Common DMA function */
  658. static uint32_t denali_dma_configuration(struct denali_nand_info *denali,
  659. uint32_t ops, bool raw_xfer,
  660. uint32_t irq_mask, int oob_required)
  661. {
  662. uint32_t irq_status = 0;
  663. /* setup_ecc_for_xfer(bool ecc_en, bool transfer_spare) */
  664. setup_ecc_for_xfer(denali, !raw_xfer, oob_required);
  665. /* clear any previous interrupt flags */
  666. clear_interrupts(denali);
  667. /* enable the DMA */
  668. denali_enable_dma(denali, true);
  669. /* setup the DMA */
  670. denali_setup_dma(denali, ops);
  671. /* wait for operation to complete */
  672. irq_status = wait_for_irq(denali, irq_mask);
  673. /* if ECC fault happen, seems we need delay before turning off DMA.
  674. * If not, the controller will go into non responsive condition */
  675. if (irq_status & INTR_STATUS__ECC_UNCOR_ERR)
  676. udelay(100);
  677. /* disable the DMA */
  678. denali_enable_dma(denali, false);
  679. return irq_status;
  680. }
  681. static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
  682. const uint8_t *buf, bool raw_xfer, int oob_required)
  683. {
  684. struct denali_nand_info *denali = mtd_to_denali(mtd);
  685. uint32_t irq_status = 0;
  686. uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
  687. denali->status = 0;
  688. /* copy buffer into DMA buffer */
  689. memcpy(denali->buf.dma_buf, buf, mtd->writesize);
  690. /* need extra memcpy for raw transfer */
  691. if (raw_xfer)
  692. memcpy(denali->buf.dma_buf + mtd->writesize,
  693. chip->oob_poi, mtd->oobsize);
  694. /* setting up DMA */
  695. irq_status = denali_dma_configuration(denali, DENALI_WRITE, raw_xfer,
  696. irq_mask, oob_required);
  697. /* if timeout happen, error out */
  698. if (!(irq_status & INTR_STATUS__DMA_CMD_COMP)) {
  699. debug("DMA timeout for denali write_page\n");
  700. denali->status = NAND_STATUS_FAIL;
  701. return -EIO;
  702. }
  703. if (irq_status & INTR_STATUS__LOCKED_BLK) {
  704. debug("Failed as write to locked block\n");
  705. denali->status = NAND_STATUS_FAIL;
  706. return -EIO;
  707. }
  708. return 0;
  709. }
  710. /* NAND core entry points */
  711. /*
  712. * this is the callback that the NAND core calls to write a page. Since
  713. * writing a page with ECC or without is similar, all the work is done
  714. * by write_page above.
  715. */
  716. static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  717. const uint8_t *buf, int oob_required, int page)
  718. {
  719. struct denali_nand_info *denali = mtd_to_denali(mtd);
  720. /*
  721. * for regular page writes, we let HW handle all the ECC
  722. * data written to the device.
  723. */
  724. if (oob_required)
  725. /* switch to main + spare access */
  726. denali_mode_main_spare_access(denali);
  727. else
  728. /* switch to main access only */
  729. denali_mode_main_access(denali);
  730. return write_page(mtd, chip, buf, false, oob_required);
  731. }
  732. /*
  733. * This is the callback that the NAND core calls to write a page without ECC.
  734. * raw access is similar to ECC page writes, so all the work is done in the
  735. * write_page() function above.
  736. */
  737. static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  738. const uint8_t *buf, int oob_required,
  739. int page)
  740. {
  741. struct denali_nand_info *denali = mtd_to_denali(mtd);
  742. /*
  743. * for raw page writes, we want to disable ECC and simply write
  744. * whatever data is in the buffer.
  745. */
  746. if (oob_required)
  747. /* switch to main + spare access */
  748. denali_mode_main_spare_access(denali);
  749. else
  750. /* switch to main access only */
  751. denali_mode_main_access(denali);
  752. return write_page(mtd, chip, buf, true, oob_required);
  753. }
  754. static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  755. int page)
  756. {
  757. return write_oob_data(mtd, chip->oob_poi, page);
  758. }
  759. /* raw include ECC value and all the spare area */
  760. static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  761. uint8_t *buf, int oob_required, int page)
  762. {
  763. struct denali_nand_info *denali = mtd_to_denali(mtd);
  764. uint32_t irq_status, irq_mask = INTR_STATUS__DMA_CMD_COMP;
  765. if (denali->page != page) {
  766. debug("Missing NAND_CMD_READ0 command\n");
  767. return -EIO;
  768. }
  769. if (oob_required)
  770. /* switch to main + spare access */
  771. denali_mode_main_spare_access(denali);
  772. else
  773. /* switch to main access only */
  774. denali_mode_main_access(denali);
  775. /* setting up the DMA where ecc_enable is false */
  776. irq_status = denali_dma_configuration(denali, DENALI_READ, true,
  777. irq_mask, oob_required);
  778. /* if timeout happen, error out */
  779. if (!(irq_status & INTR_STATUS__DMA_CMD_COMP)) {
  780. debug("DMA timeout for denali_read_page_raw\n");
  781. return -EIO;
  782. }
  783. /* splitting the content to destination buffer holder */
  784. memcpy(chip->oob_poi, (denali->buf.dma_buf + mtd->writesize),
  785. mtd->oobsize);
  786. memcpy(buf, denali->buf.dma_buf, mtd->writesize);
  787. return 0;
  788. }
  789. static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
  790. uint8_t *buf, int oob_required, int page)
  791. {
  792. struct denali_nand_info *denali = mtd_to_denali(mtd);
  793. uint32_t irq_status, irq_mask = INTR_STATUS__DMA_CMD_COMP;
  794. if (denali->page != page) {
  795. debug("Missing NAND_CMD_READ0 command\n");
  796. return -EIO;
  797. }
  798. if (oob_required)
  799. /* switch to main + spare access */
  800. denali_mode_main_spare_access(denali);
  801. else
  802. /* switch to main access only */
  803. denali_mode_main_access(denali);
  804. /* setting up the DMA where ecc_enable is true */
  805. irq_status = denali_dma_configuration(denali, DENALI_READ, false,
  806. irq_mask, oob_required);
  807. memcpy(buf, denali->buf.dma_buf, mtd->writesize);
  808. /* check whether any ECC error */
  809. if (irq_status & INTR_STATUS__ECC_UNCOR_ERR) {
  810. /* is the ECC cause by erase page, check using read_page_raw */
  811. debug(" Uncorrected ECC detected\n");
  812. denali_read_page_raw(mtd, chip, buf, oob_required,
  813. denali->page);
  814. if (is_erased(buf, mtd->writesize) == true &&
  815. is_erased(chip->oob_poi, mtd->oobsize) == true) {
  816. debug(" ECC error cause by erased block\n");
  817. /* false alarm, return the 0xFF */
  818. } else {
  819. return -EBADMSG;
  820. }
  821. }
  822. memcpy(buf, denali->buf.dma_buf, mtd->writesize);
  823. return 0;
  824. }
  825. static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  826. int page)
  827. {
  828. read_oob_data(mtd, chip->oob_poi, page);
  829. return 0;
  830. }
  831. static uint8_t denali_read_byte(struct mtd_info *mtd)
  832. {
  833. struct denali_nand_info *denali = mtd_to_denali(mtd);
  834. uint32_t addr, result;
  835. addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
  836. index_addr_read_data(denali, addr | 2, &result);
  837. return (uint8_t)result & 0xFF;
  838. }
  839. static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  840. {
  841. struct denali_nand_info *denali = mtd_to_denali(mtd);
  842. uint32_t i, addr, result;
  843. /* delay for tR (data transfer from Flash array to data register) */
  844. udelay(25);
  845. /* ensure device completed else additional delay and polling */
  846. wait_for_irq(denali, INTR_STATUS__INT_ACT);
  847. addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
  848. for (i = 0; i < len; i++) {
  849. index_addr_read_data(denali, (uint32_t)addr | 2, &result);
  850. write_byte_to_buf(denali, result);
  851. }
  852. memcpy(buf, denali->buf.buf, len);
  853. }
  854. static void denali_select_chip(struct mtd_info *mtd, int chip)
  855. {
  856. struct denali_nand_info *denali = mtd_to_denali(mtd);
  857. denali->flash_bank = chip;
  858. }
  859. static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
  860. {
  861. struct denali_nand_info *denali = mtd_to_denali(mtd);
  862. int status = denali->status;
  863. denali->status = 0;
  864. return status;
  865. }
  866. static int denali_erase(struct mtd_info *mtd, int page)
  867. {
  868. struct denali_nand_info *denali = mtd_to_denali(mtd);
  869. uint32_t cmd, irq_status;
  870. clear_interrupts(denali);
  871. /* setup page read request for access type */
  872. cmd = MODE_10 | BANK(denali->flash_bank) | page;
  873. index_addr(denali, cmd, 0x1);
  874. /* wait for erase to complete or failure to occur */
  875. irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
  876. INTR_STATUS__ERASE_FAIL);
  877. if (irq_status & INTR_STATUS__ERASE_FAIL ||
  878. irq_status & INTR_STATUS__LOCKED_BLK)
  879. return NAND_STATUS_FAIL;
  880. return 0;
  881. }
  882. static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
  883. int page)
  884. {
  885. struct denali_nand_info *denali = mtd_to_denali(mtd);
  886. uint32_t addr;
  887. switch (cmd) {
  888. case NAND_CMD_PAGEPROG:
  889. break;
  890. case NAND_CMD_STATUS:
  891. addr = MODE_11 | BANK(denali->flash_bank);
  892. index_addr(denali, addr | 0, cmd);
  893. break;
  894. case NAND_CMD_READID:
  895. case NAND_CMD_PARAM:
  896. reset_buf(denali);
  897. /*
  898. * sometimes ManufactureId read from register is not right
  899. * e.g. some of Micron MT29F32G08QAA MLC NAND chips
  900. * So here we send READID cmd to NAND insteand
  901. */
  902. addr = MODE_11 | BANK(denali->flash_bank);
  903. index_addr(denali, addr | 0, cmd);
  904. index_addr(denali, addr | 1, col & 0xFF);
  905. if (cmd == NAND_CMD_PARAM)
  906. udelay(50);
  907. break;
  908. case NAND_CMD_RNDOUT:
  909. addr = MODE_11 | BANK(denali->flash_bank);
  910. index_addr(denali, addr | 0, cmd);
  911. index_addr(denali, addr | 1, col & 0xFF);
  912. index_addr(denali, addr | 1, col >> 8);
  913. index_addr(denali, addr | 0, NAND_CMD_RNDOUTSTART);
  914. break;
  915. case NAND_CMD_READ0:
  916. case NAND_CMD_SEQIN:
  917. denali->page = page;
  918. break;
  919. case NAND_CMD_RESET:
  920. reset_bank(denali);
  921. break;
  922. case NAND_CMD_READOOB:
  923. /* TODO: Read OOB data */
  924. break;
  925. case NAND_CMD_ERASE1:
  926. /*
  927. * supporting block erase only, not multiblock erase as
  928. * it will cross plane and software need complex calculation
  929. * to identify the block count for the cross plane
  930. */
  931. denali_erase(mtd, page);
  932. break;
  933. case NAND_CMD_ERASE2:
  934. /* nothing to do here as it was done during NAND_CMD_ERASE1 */
  935. break;
  936. case NAND_CMD_UNLOCK1:
  937. addr = MODE_10 | BANK(denali->flash_bank) | page;
  938. index_addr(denali, addr | 0, DENALI_UNLOCK_START);
  939. break;
  940. case NAND_CMD_UNLOCK2:
  941. addr = MODE_10 | BANK(denali->flash_bank) | page;
  942. index_addr(denali, addr | 0, DENALI_UNLOCK_END);
  943. break;
  944. case NAND_CMD_LOCK:
  945. addr = MODE_10 | BANK(denali->flash_bank);
  946. index_addr(denali, addr | 0, DENALI_LOCK);
  947. break;
  948. default:
  949. printf(": unsupported command received 0x%x\n", cmd);
  950. break;
  951. }
  952. }
  953. /* end NAND core entry points */
  954. /* Initialization code to bring the device up to a known good state */
  955. static void denali_hw_init(struct denali_nand_info *denali)
  956. {
  957. /*
  958. * tell driver how many bit controller will skip before writing
  959. * ECC code in OOB. This is normally used for bad block marker
  960. */
  961. writel(CONFIG_NAND_DENALI_SPARE_AREA_SKIP_BYTES,
  962. denali->flash_reg + SPARE_AREA_SKIP_BYTES);
  963. detect_max_banks(denali);
  964. denali_nand_reset(denali);
  965. writel(0x0F, denali->flash_reg + RB_PIN_ENABLED);
  966. writel(CHIP_EN_DONT_CARE__FLAG,
  967. denali->flash_reg + CHIP_ENABLE_DONT_CARE);
  968. writel(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
  969. /* Should set value for these registers when init */
  970. writel(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
  971. writel(1, denali->flash_reg + ECC_ENABLE);
  972. denali_nand_timing_set(denali);
  973. denali_irq_init(denali);
  974. }
  975. static struct nand_ecclayout nand_oob;
  976. static int denali_init(struct denali_nand_info *denali)
  977. {
  978. struct mtd_info *mtd = nand_to_mtd(&denali->nand);
  979. int ret;
  980. denali_hw_init(denali);
  981. mtd->name = "denali-nand";
  982. mtd->owner = THIS_MODULE;
  983. /* register the driver with the NAND core subsystem */
  984. denali->nand.select_chip = denali_select_chip;
  985. denali->nand.cmdfunc = denali_cmdfunc;
  986. denali->nand.read_byte = denali_read_byte;
  987. denali->nand.read_buf = denali_read_buf;
  988. denali->nand.waitfunc = denali_waitfunc;
  989. /*
  990. * scan for NAND devices attached to the controller
  991. * this is the first stage in a two step process to register
  992. * with the nand subsystem
  993. */
  994. if (nand_scan_ident(mtd, denali->max_banks, NULL)) {
  995. ret = -ENXIO;
  996. goto fail;
  997. }
  998. #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
  999. /* check whether flash got BBT table (located at end of flash). As we
  1000. * use NAND_BBT_NO_OOB, the BBT page will start with
  1001. * bbt_pattern. We will have mirror pattern too */
  1002. denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
  1003. /*
  1004. * We are using main + spare with ECC support. As BBT need ECC support,
  1005. * we need to ensure BBT code don't write to OOB for the BBT pattern.
  1006. * All BBT info will be stored into data area with ECC support.
  1007. */
  1008. denali->nand.bbt_options |= NAND_BBT_NO_OOB;
  1009. #endif
  1010. denali->nand.ecc.mode = NAND_ECC_HW;
  1011. denali->nand.ecc.size = CONFIG_NAND_DENALI_ECC_SIZE;
  1012. /* no subpage writes on denali */
  1013. denali->nand.options |= NAND_NO_SUBPAGE_WRITE;
  1014. /*
  1015. * Tell driver the ecc strength. This register may be already set
  1016. * correctly. So we read this value out.
  1017. */
  1018. denali->nand.ecc.strength = readl(denali->flash_reg + ECC_CORRECTION);
  1019. switch (denali->nand.ecc.size) {
  1020. case 512:
  1021. denali->nand.ecc.bytes =
  1022. (denali->nand.ecc.strength * 13 + 15) / 16 * 2;
  1023. break;
  1024. case 1024:
  1025. denali->nand.ecc.bytes =
  1026. (denali->nand.ecc.strength * 14 + 15) / 16 * 2;
  1027. break;
  1028. default:
  1029. pr_err("Unsupported ECC size\n");
  1030. ret = -EINVAL;
  1031. goto fail;
  1032. }
  1033. nand_oob.eccbytes = denali->nand.ecc.bytes;
  1034. denali->nand.ecc.layout = &nand_oob;
  1035. writel(mtd->erasesize / mtd->writesize,
  1036. denali->flash_reg + PAGES_PER_BLOCK);
  1037. writel(denali->nand.options & NAND_BUSWIDTH_16 ? 1 : 0,
  1038. denali->flash_reg + DEVICE_WIDTH);
  1039. writel(mtd->writesize,
  1040. denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
  1041. writel(mtd->oobsize,
  1042. denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
  1043. if (readl(denali->flash_reg + DEVICES_CONNECTED) == 0)
  1044. writel(1, denali->flash_reg + DEVICES_CONNECTED);
  1045. /* override the default operations */
  1046. denali->nand.ecc.read_page = denali_read_page;
  1047. denali->nand.ecc.read_page_raw = denali_read_page_raw;
  1048. denali->nand.ecc.write_page = denali_write_page;
  1049. denali->nand.ecc.write_page_raw = denali_write_page_raw;
  1050. denali->nand.ecc.read_oob = denali_read_oob;
  1051. denali->nand.ecc.write_oob = denali_write_oob;
  1052. if (nand_scan_tail(mtd)) {
  1053. ret = -ENXIO;
  1054. goto fail;
  1055. }
  1056. ret = nand_register(0, mtd);
  1057. fail:
  1058. return ret;
  1059. }
  1060. static int __board_nand_init(void)
  1061. {
  1062. struct denali_nand_info *denali;
  1063. denali = kzalloc(sizeof(*denali), GFP_KERNEL);
  1064. if (!denali)
  1065. return -ENOMEM;
  1066. /*
  1067. * In the future, these base addresses should be taken from
  1068. * Device Tree or platform data.
  1069. */
  1070. denali->flash_reg = (void __iomem *)CONFIG_SYS_NAND_REGS_BASE;
  1071. denali->flash_mem = (void __iomem *)CONFIG_SYS_NAND_DATA_BASE;
  1072. return denali_init(denali);
  1073. }
  1074. void board_nand_init(void)
  1075. {
  1076. if (__board_nand_init() < 0)
  1077. pr_warn("Failed to initialize Denali NAND controller.\n");
  1078. }