cadence_qspi_apb.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817
  1. /*
  2. * Copyright (C) 2012 Altera Corporation <www.altera.com>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. * - Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * - Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. * - Neither the name of the Altera Corporation nor the
  13. * names of its contributors may be used to endorse or promote products
  14. * derived from this software without specific prior written permission.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  17. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  18. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  19. * ARE DISCLAIMED. IN NO EVENT SHALL ALTERA CORPORATION BE LIABLE FOR ANY
  20. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  21. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  22. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  23. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  24. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  25. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. */
  27. #include <common.h>
  28. #include <asm/io.h>
  29. #include <linux/errno.h>
  30. #include <wait_bit.h>
  31. #include <spi.h>
  32. #include <bouncebuf.h>
  33. #include "cadence_qspi.h"
  34. #define CQSPI_REG_POLL_US 1 /* 1us */
  35. #define CQSPI_REG_RETRY 10000
  36. #define CQSPI_POLL_IDLE_RETRY 3
  37. #define CQSPI_FIFO_WIDTH 4
  38. #define CQSPI_REG_SRAM_THRESHOLD_WORDS 50
  39. /* Transfer mode */
  40. #define CQSPI_INST_TYPE_SINGLE 0
  41. #define CQSPI_INST_TYPE_DUAL 1
  42. #define CQSPI_INST_TYPE_QUAD 2
  43. #define CQSPI_STIG_DATA_LEN_MAX 8
  44. #define CQSPI_DUMMY_CLKS_PER_BYTE 8
  45. #define CQSPI_DUMMY_BYTES_MAX 4
  46. #define CQSPI_REG_SRAM_FILL_THRESHOLD \
  47. ((CQSPI_REG_SRAM_SIZE_WORD / 2) * CQSPI_FIFO_WIDTH)
  48. /****************************************************************************
  49. * Controller's configuration and status register (offset from QSPI_BASE)
  50. ****************************************************************************/
  51. #define CQSPI_REG_CONFIG 0x00
  52. #define CQSPI_REG_CONFIG_ENABLE BIT(0)
  53. #define CQSPI_REG_CONFIG_CLK_POL BIT(1)
  54. #define CQSPI_REG_CONFIG_CLK_PHA BIT(2)
  55. #define CQSPI_REG_CONFIG_DIRECT BIT(7)
  56. #define CQSPI_REG_CONFIG_DECODE BIT(9)
  57. #define CQSPI_REG_CONFIG_XIP_IMM BIT(18)
  58. #define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
  59. #define CQSPI_REG_CONFIG_BAUD_LSB 19
  60. #define CQSPI_REG_CONFIG_IDLE_LSB 31
  61. #define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
  62. #define CQSPI_REG_CONFIG_BAUD_MASK 0xF
  63. #define CQSPI_REG_RD_INSTR 0x04
  64. #define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
  65. #define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
  66. #define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
  67. #define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
  68. #define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
  69. #define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
  70. #define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
  71. #define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
  72. #define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
  73. #define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
  74. #define CQSPI_REG_WR_INSTR 0x08
  75. #define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
  76. #define CQSPI_REG_DELAY 0x0C
  77. #define CQSPI_REG_DELAY_TSLCH_LSB 0
  78. #define CQSPI_REG_DELAY_TCHSH_LSB 8
  79. #define CQSPI_REG_DELAY_TSD2D_LSB 16
  80. #define CQSPI_REG_DELAY_TSHSL_LSB 24
  81. #define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
  82. #define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
  83. #define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
  84. #define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
  85. #define CQSPI_REG_RD_DATA_CAPTURE 0x10
  86. #define CQSPI_REG_RD_DATA_CAPTURE_BYPASS BIT(0)
  87. #define CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB 1
  88. #define CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK 0xF
  89. #define CQSPI_REG_SIZE 0x14
  90. #define CQSPI_REG_SIZE_ADDRESS_LSB 0
  91. #define CQSPI_REG_SIZE_PAGE_LSB 4
  92. #define CQSPI_REG_SIZE_BLOCK_LSB 16
  93. #define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
  94. #define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
  95. #define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
  96. #define CQSPI_REG_SRAMPARTITION 0x18
  97. #define CQSPI_REG_INDIRECTTRIGGER 0x1C
  98. #define CQSPI_REG_REMAP 0x24
  99. #define CQSPI_REG_MODE_BIT 0x28
  100. #define CQSPI_REG_SDRAMLEVEL 0x2C
  101. #define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
  102. #define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
  103. #define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
  104. #define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
  105. #define CQSPI_REG_IRQSTATUS 0x40
  106. #define CQSPI_REG_IRQMASK 0x44
  107. #define CQSPI_REG_INDIRECTRD 0x60
  108. #define CQSPI_REG_INDIRECTRD_START BIT(0)
  109. #define CQSPI_REG_INDIRECTRD_CANCEL BIT(1)
  110. #define CQSPI_REG_INDIRECTRD_INPROGRESS BIT(2)
  111. #define CQSPI_REG_INDIRECTRD_DONE BIT(5)
  112. #define CQSPI_REG_INDIRECTRDWATERMARK 0x64
  113. #define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
  114. #define CQSPI_REG_INDIRECTRDBYTES 0x6C
  115. #define CQSPI_REG_CMDCTRL 0x90
  116. #define CQSPI_REG_CMDCTRL_EXECUTE BIT(0)
  117. #define CQSPI_REG_CMDCTRL_INPROGRESS BIT(1)
  118. #define CQSPI_REG_CMDCTRL_DUMMY_LSB 7
  119. #define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
  120. #define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
  121. #define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
  122. #define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
  123. #define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
  124. #define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
  125. #define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
  126. #define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F
  127. #define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
  128. #define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
  129. #define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
  130. #define CQSPI_REG_CMDCTRL_OPCODE_MASK 0xFF
  131. #define CQSPI_REG_INDIRECTWR 0x70
  132. #define CQSPI_REG_INDIRECTWR_START BIT(0)
  133. #define CQSPI_REG_INDIRECTWR_CANCEL BIT(1)
  134. #define CQSPI_REG_INDIRECTWR_INPROGRESS BIT(2)
  135. #define CQSPI_REG_INDIRECTWR_DONE BIT(5)
  136. #define CQSPI_REG_INDIRECTWRWATERMARK 0x74
  137. #define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
  138. #define CQSPI_REG_INDIRECTWRBYTES 0x7C
  139. #define CQSPI_REG_CMDADDRESS 0x94
  140. #define CQSPI_REG_CMDREADDATALOWER 0xA0
  141. #define CQSPI_REG_CMDREADDATAUPPER 0xA4
  142. #define CQSPI_REG_CMDWRITEDATALOWER 0xA8
  143. #define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
  144. #define CQSPI_REG_IS_IDLE(base) \
  145. ((readl(base + CQSPI_REG_CONFIG) >> \
  146. CQSPI_REG_CONFIG_IDLE_LSB) & 0x1)
  147. #define CQSPI_GET_RD_SRAM_LEVEL(reg_base) \
  148. (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
  149. CQSPI_REG_SDRAMLEVEL_RD_LSB) & CQSPI_REG_SDRAMLEVEL_RD_MASK)
  150. #define CQSPI_GET_WR_SRAM_LEVEL(reg_base) \
  151. (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
  152. CQSPI_REG_SDRAMLEVEL_WR_LSB) & CQSPI_REG_SDRAMLEVEL_WR_MASK)
  153. static unsigned int cadence_qspi_apb_cmd2addr(const unsigned char *addr_buf,
  154. unsigned int addr_width)
  155. {
  156. unsigned int addr;
  157. addr = (addr_buf[0] << 16) | (addr_buf[1] << 8) | addr_buf[2];
  158. if (addr_width == 4)
  159. addr = (addr << 8) | addr_buf[3];
  160. return addr;
  161. }
  162. void cadence_qspi_apb_controller_enable(void *reg_base)
  163. {
  164. unsigned int reg;
  165. reg = readl(reg_base + CQSPI_REG_CONFIG);
  166. reg |= CQSPI_REG_CONFIG_ENABLE;
  167. writel(reg, reg_base + CQSPI_REG_CONFIG);
  168. }
  169. void cadence_qspi_apb_controller_disable(void *reg_base)
  170. {
  171. unsigned int reg;
  172. reg = readl(reg_base + CQSPI_REG_CONFIG);
  173. reg &= ~CQSPI_REG_CONFIG_ENABLE;
  174. writel(reg, reg_base + CQSPI_REG_CONFIG);
  175. }
  176. /* Return 1 if idle, otherwise return 0 (busy). */
  177. static unsigned int cadence_qspi_wait_idle(void *reg_base)
  178. {
  179. unsigned int start, count = 0;
  180. /* timeout in unit of ms */
  181. unsigned int timeout = 5000;
  182. start = get_timer(0);
  183. for ( ; get_timer(start) < timeout ; ) {
  184. if (CQSPI_REG_IS_IDLE(reg_base))
  185. count++;
  186. else
  187. count = 0;
  188. /*
  189. * Ensure the QSPI controller is in true idle state after
  190. * reading back the same idle status consecutively
  191. */
  192. if (count >= CQSPI_POLL_IDLE_RETRY)
  193. return 1;
  194. }
  195. /* Timeout, still in busy mode. */
  196. printf("QSPI: QSPI is still busy after poll for %d times.\n",
  197. CQSPI_REG_RETRY);
  198. return 0;
  199. }
  200. void cadence_qspi_apb_readdata_capture(void *reg_base,
  201. unsigned int bypass, unsigned int delay)
  202. {
  203. unsigned int reg;
  204. cadence_qspi_apb_controller_disable(reg_base);
  205. reg = readl(reg_base + CQSPI_REG_RD_DATA_CAPTURE);
  206. if (bypass)
  207. reg |= CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
  208. else
  209. reg &= ~CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
  210. reg &= ~(CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK
  211. << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB);
  212. reg |= (delay & CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK)
  213. << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB;
  214. writel(reg, reg_base + CQSPI_REG_RD_DATA_CAPTURE);
  215. cadence_qspi_apb_controller_enable(reg_base);
  216. }
  217. void cadence_qspi_apb_config_baudrate_div(void *reg_base,
  218. unsigned int ref_clk_hz, unsigned int sclk_hz)
  219. {
  220. unsigned int reg;
  221. unsigned int div;
  222. cadence_qspi_apb_controller_disable(reg_base);
  223. reg = readl(reg_base + CQSPI_REG_CONFIG);
  224. reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
  225. /*
  226. * The baud_div field in the config reg is 4 bits, and the ref clock is
  227. * divided by 2 * (baud_div + 1). Round up the divider to ensure the
  228. * SPI clock rate is less than or equal to the requested clock rate.
  229. */
  230. div = DIV_ROUND_UP(ref_clk_hz, sclk_hz * 2) - 1;
  231. /* ensure the baud rate doesn't exceed the max value */
  232. if (div > CQSPI_REG_CONFIG_BAUD_MASK)
  233. div = CQSPI_REG_CONFIG_BAUD_MASK;
  234. debug("%s: ref_clk %dHz sclk %dHz Div 0x%x, actual %dHz\n", __func__,
  235. ref_clk_hz, sclk_hz, div, ref_clk_hz / (2 * (div + 1)));
  236. reg |= (div << CQSPI_REG_CONFIG_BAUD_LSB);
  237. writel(reg, reg_base + CQSPI_REG_CONFIG);
  238. cadence_qspi_apb_controller_enable(reg_base);
  239. }
  240. void cadence_qspi_apb_set_clk_mode(void *reg_base, uint mode)
  241. {
  242. unsigned int reg;
  243. cadence_qspi_apb_controller_disable(reg_base);
  244. reg = readl(reg_base + CQSPI_REG_CONFIG);
  245. reg &= ~(CQSPI_REG_CONFIG_CLK_POL | CQSPI_REG_CONFIG_CLK_PHA);
  246. if (mode & SPI_CPOL)
  247. reg |= CQSPI_REG_CONFIG_CLK_POL;
  248. if (mode & SPI_CPHA)
  249. reg |= CQSPI_REG_CONFIG_CLK_PHA;
  250. writel(reg, reg_base + CQSPI_REG_CONFIG);
  251. cadence_qspi_apb_controller_enable(reg_base);
  252. }
  253. void cadence_qspi_apb_chipselect(void *reg_base,
  254. unsigned int chip_select, unsigned int decoder_enable)
  255. {
  256. unsigned int reg;
  257. cadence_qspi_apb_controller_disable(reg_base);
  258. debug("%s : chipselect %d decode %d\n", __func__, chip_select,
  259. decoder_enable);
  260. reg = readl(reg_base + CQSPI_REG_CONFIG);
  261. /* docoder */
  262. if (decoder_enable) {
  263. reg |= CQSPI_REG_CONFIG_DECODE;
  264. } else {
  265. reg &= ~CQSPI_REG_CONFIG_DECODE;
  266. /* Convert CS if without decoder.
  267. * CS0 to 4b'1110
  268. * CS1 to 4b'1101
  269. * CS2 to 4b'1011
  270. * CS3 to 4b'0111
  271. */
  272. chip_select = 0xF & ~(1 << chip_select);
  273. }
  274. reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
  275. << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
  276. reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
  277. << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
  278. writel(reg, reg_base + CQSPI_REG_CONFIG);
  279. cadence_qspi_apb_controller_enable(reg_base);
  280. }
  281. void cadence_qspi_apb_delay(void *reg_base,
  282. unsigned int ref_clk, unsigned int sclk_hz,
  283. unsigned int tshsl_ns, unsigned int tsd2d_ns,
  284. unsigned int tchsh_ns, unsigned int tslch_ns)
  285. {
  286. unsigned int ref_clk_ns;
  287. unsigned int sclk_ns;
  288. unsigned int tshsl, tchsh, tslch, tsd2d;
  289. unsigned int reg;
  290. cadence_qspi_apb_controller_disable(reg_base);
  291. /* Convert to ns. */
  292. ref_clk_ns = DIV_ROUND_UP(1000000000, ref_clk);
  293. /* Convert to ns. */
  294. sclk_ns = DIV_ROUND_UP(1000000000, sclk_hz);
  295. /* The controller adds additional delay to that programmed in the reg */
  296. if (tshsl_ns >= sclk_ns + ref_clk_ns)
  297. tshsl_ns -= sclk_ns + ref_clk_ns;
  298. if (tchsh_ns >= sclk_ns + 3 * ref_clk_ns)
  299. tchsh_ns -= sclk_ns + 3 * ref_clk_ns;
  300. tshsl = DIV_ROUND_UP(tshsl_ns, ref_clk_ns);
  301. tchsh = DIV_ROUND_UP(tchsh_ns, ref_clk_ns);
  302. tslch = DIV_ROUND_UP(tslch_ns, ref_clk_ns);
  303. tsd2d = DIV_ROUND_UP(tsd2d_ns, ref_clk_ns);
  304. reg = ((tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
  305. << CQSPI_REG_DELAY_TSHSL_LSB);
  306. reg |= ((tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
  307. << CQSPI_REG_DELAY_TCHSH_LSB);
  308. reg |= ((tslch & CQSPI_REG_DELAY_TSLCH_MASK)
  309. << CQSPI_REG_DELAY_TSLCH_LSB);
  310. reg |= ((tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
  311. << CQSPI_REG_DELAY_TSD2D_LSB);
  312. writel(reg, reg_base + CQSPI_REG_DELAY);
  313. cadence_qspi_apb_controller_enable(reg_base);
  314. }
  315. void cadence_qspi_apb_controller_init(struct cadence_spi_platdata *plat)
  316. {
  317. unsigned reg;
  318. cadence_qspi_apb_controller_disable(plat->regbase);
  319. /* Configure the device size and address bytes */
  320. reg = readl(plat->regbase + CQSPI_REG_SIZE);
  321. /* Clear the previous value */
  322. reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
  323. reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
  324. reg |= (plat->page_size << CQSPI_REG_SIZE_PAGE_LSB);
  325. reg |= (plat->block_size << CQSPI_REG_SIZE_BLOCK_LSB);
  326. writel(reg, plat->regbase + CQSPI_REG_SIZE);
  327. /* Configure the remap address register, no remap */
  328. writel(0, plat->regbase + CQSPI_REG_REMAP);
  329. /* Indirect mode configurations */
  330. writel((plat->sram_size/2), plat->regbase + CQSPI_REG_SRAMPARTITION);
  331. /* Disable all interrupts */
  332. writel(0, plat->regbase + CQSPI_REG_IRQMASK);
  333. cadence_qspi_apb_controller_enable(plat->regbase);
  334. }
  335. static int cadence_qspi_apb_exec_flash_cmd(void *reg_base,
  336. unsigned int reg)
  337. {
  338. unsigned int retry = CQSPI_REG_RETRY;
  339. /* Write the CMDCTRL without start execution. */
  340. writel(reg, reg_base + CQSPI_REG_CMDCTRL);
  341. /* Start execute */
  342. reg |= CQSPI_REG_CMDCTRL_EXECUTE;
  343. writel(reg, reg_base + CQSPI_REG_CMDCTRL);
  344. while (retry--) {
  345. reg = readl(reg_base + CQSPI_REG_CMDCTRL);
  346. if ((reg & CQSPI_REG_CMDCTRL_INPROGRESS) == 0)
  347. break;
  348. udelay(1);
  349. }
  350. if (!retry) {
  351. printf("QSPI: flash command execution timeout\n");
  352. return -EIO;
  353. }
  354. /* Polling QSPI idle status. */
  355. if (!cadence_qspi_wait_idle(reg_base))
  356. return -EIO;
  357. return 0;
  358. }
  359. /* For command RDID, RDSR. */
  360. int cadence_qspi_apb_command_read(void *reg_base,
  361. unsigned int cmdlen, const u8 *cmdbuf, unsigned int rxlen,
  362. u8 *rxbuf)
  363. {
  364. unsigned int reg;
  365. unsigned int read_len;
  366. int status;
  367. if (!cmdlen || rxlen > CQSPI_STIG_DATA_LEN_MAX || rxbuf == NULL) {
  368. printf("QSPI: Invalid input arguments cmdlen %d rxlen %d\n",
  369. cmdlen, rxlen);
  370. return -EINVAL;
  371. }
  372. reg = cmdbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
  373. reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
  374. /* 0 means 1 byte. */
  375. reg |= (((rxlen - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
  376. << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
  377. status = cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
  378. if (status != 0)
  379. return status;
  380. reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
  381. /* Put the read value into rx_buf */
  382. read_len = (rxlen > 4) ? 4 : rxlen;
  383. memcpy(rxbuf, &reg, read_len);
  384. rxbuf += read_len;
  385. if (rxlen > 4) {
  386. reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
  387. read_len = rxlen - read_len;
  388. memcpy(rxbuf, &reg, read_len);
  389. }
  390. return 0;
  391. }
  392. /* For commands: WRSR, WREN, WRDI, CHIP_ERASE, BE, etc. */
  393. int cadence_qspi_apb_command_write(void *reg_base, unsigned int cmdlen,
  394. const u8 *cmdbuf, unsigned int txlen, const u8 *txbuf)
  395. {
  396. unsigned int reg = 0;
  397. unsigned int addr_value;
  398. unsigned int wr_data;
  399. unsigned int wr_len;
  400. if (!cmdlen || cmdlen > 5 || txlen > 8 || cmdbuf == NULL) {
  401. printf("QSPI: Invalid input arguments cmdlen %d txlen %d\n",
  402. cmdlen, txlen);
  403. return -EINVAL;
  404. }
  405. reg |= cmdbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
  406. if (cmdlen == 4 || cmdlen == 5) {
  407. /* Command with address */
  408. reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
  409. /* Number of bytes to write. */
  410. reg |= ((cmdlen - 2) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
  411. << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
  412. /* Get address */
  413. addr_value = cadence_qspi_apb_cmd2addr(&cmdbuf[1],
  414. cmdlen >= 5 ? 4 : 3);
  415. writel(addr_value, reg_base + CQSPI_REG_CMDADDRESS);
  416. }
  417. if (txlen) {
  418. /* writing data = yes */
  419. reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
  420. reg |= ((txlen - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
  421. << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
  422. wr_len = txlen > 4 ? 4 : txlen;
  423. memcpy(&wr_data, txbuf, wr_len);
  424. writel(wr_data, reg_base +
  425. CQSPI_REG_CMDWRITEDATALOWER);
  426. if (txlen > 4) {
  427. txbuf += wr_len;
  428. wr_len = txlen - wr_len;
  429. memcpy(&wr_data, txbuf, wr_len);
  430. writel(wr_data, reg_base +
  431. CQSPI_REG_CMDWRITEDATAUPPER);
  432. }
  433. }
  434. /* Execute the command */
  435. return cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
  436. }
  437. /* Opcode + Address (3/4 bytes) + dummy bytes (0-4 bytes) */
  438. int cadence_qspi_apb_indirect_read_setup(struct cadence_spi_platdata *plat,
  439. unsigned int cmdlen, unsigned int rx_width, const u8 *cmdbuf)
  440. {
  441. unsigned int reg;
  442. unsigned int rd_reg;
  443. unsigned int addr_value;
  444. unsigned int dummy_clk;
  445. unsigned int dummy_bytes;
  446. unsigned int addr_bytes;
  447. /*
  448. * Identify addr_byte. All NOR flash device drivers are using fast read
  449. * which always expecting 1 dummy byte, 1 cmd byte and 3/4 addr byte.
  450. * With that, the length is in value of 5 or 6. Only FRAM chip from
  451. * ramtron using normal read (which won't need dummy byte).
  452. * Unlikely NOR flash using normal read due to performance issue.
  453. */
  454. if (cmdlen >= 5)
  455. /* to cater fast read where cmd + addr + dummy */
  456. addr_bytes = cmdlen - 2;
  457. else
  458. /* for normal read (only ramtron as of now) */
  459. addr_bytes = cmdlen - 1;
  460. /* Setup the indirect trigger address */
  461. writel((u32)plat->ahbbase,
  462. plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
  463. /* Configure the opcode */
  464. rd_reg = cmdbuf[0] << CQSPI_REG_RD_INSTR_OPCODE_LSB;
  465. if (rx_width & SPI_RX_QUAD)
  466. /* Instruction and address at DQ0, data at DQ0-3. */
  467. rd_reg |= CQSPI_INST_TYPE_QUAD << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
  468. /* Get address */
  469. addr_value = cadence_qspi_apb_cmd2addr(&cmdbuf[1], addr_bytes);
  470. writel(addr_value, plat->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
  471. /* The remaining lenght is dummy bytes. */
  472. dummy_bytes = cmdlen - addr_bytes - 1;
  473. if (dummy_bytes) {
  474. if (dummy_bytes > CQSPI_DUMMY_BYTES_MAX)
  475. dummy_bytes = CQSPI_DUMMY_BYTES_MAX;
  476. rd_reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
  477. #if defined(CONFIG_SPL_SPI_XIP) && defined(CONFIG_SPL_BUILD)
  478. writel(0x0, plat->regbase + CQSPI_REG_MODE_BIT);
  479. #else
  480. writel(0xFF, plat->regbase + CQSPI_REG_MODE_BIT);
  481. #endif
  482. /* Convert to clock cycles. */
  483. dummy_clk = dummy_bytes * CQSPI_DUMMY_CLKS_PER_BYTE;
  484. /* Need to minus the mode byte (8 clocks). */
  485. dummy_clk -= CQSPI_DUMMY_CLKS_PER_BYTE;
  486. if (dummy_clk)
  487. rd_reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
  488. << CQSPI_REG_RD_INSTR_DUMMY_LSB;
  489. }
  490. writel(rd_reg, plat->regbase + CQSPI_REG_RD_INSTR);
  491. /* set device size */
  492. reg = readl(plat->regbase + CQSPI_REG_SIZE);
  493. reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
  494. reg |= (addr_bytes - 1);
  495. writel(reg, plat->regbase + CQSPI_REG_SIZE);
  496. return 0;
  497. }
  498. static u32 cadence_qspi_get_rd_sram_level(struct cadence_spi_platdata *plat)
  499. {
  500. u32 reg = readl(plat->regbase + CQSPI_REG_SDRAMLEVEL);
  501. reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
  502. return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
  503. }
  504. static int cadence_qspi_wait_for_data(struct cadence_spi_platdata *plat)
  505. {
  506. unsigned int timeout = 10000;
  507. u32 reg;
  508. while (timeout--) {
  509. reg = cadence_qspi_get_rd_sram_level(plat);
  510. if (reg)
  511. return reg;
  512. udelay(1);
  513. }
  514. return -ETIMEDOUT;
  515. }
  516. int cadence_qspi_apb_indirect_read_execute(struct cadence_spi_platdata *plat,
  517. unsigned int n_rx, u8 *rxbuf)
  518. {
  519. unsigned int remaining = n_rx;
  520. unsigned int bytes_to_read = 0;
  521. struct bounce_buffer bb;
  522. u8 *bb_rxbuf;
  523. int ret;
  524. writel(n_rx, plat->regbase + CQSPI_REG_INDIRECTRDBYTES);
  525. /* Start the indirect read transfer */
  526. writel(CQSPI_REG_INDIRECTRD_START,
  527. plat->regbase + CQSPI_REG_INDIRECTRD);
  528. ret = bounce_buffer_start(&bb, (void *)rxbuf, n_rx, GEN_BB_WRITE);
  529. if (ret)
  530. return ret;
  531. bb_rxbuf = bb.bounce_buffer;
  532. while (remaining > 0) {
  533. ret = cadence_qspi_wait_for_data(plat);
  534. if (ret < 0) {
  535. printf("Indirect write timed out (%i)\n", ret);
  536. goto failrd;
  537. }
  538. bytes_to_read = ret;
  539. while (bytes_to_read != 0) {
  540. bytes_to_read *= CQSPI_FIFO_WIDTH;
  541. bytes_to_read = bytes_to_read > remaining ?
  542. remaining : bytes_to_read;
  543. readsl(plat->ahbbase, bb_rxbuf, bytes_to_read >> 2);
  544. if (bytes_to_read % 4)
  545. readsb(plat->ahbbase,
  546. bb_rxbuf + rounddown(bytes_to_read, 4),
  547. bytes_to_read % 4);
  548. bb_rxbuf += bytes_to_read;
  549. remaining -= bytes_to_read;
  550. bytes_to_read = cadence_qspi_get_rd_sram_level(plat);
  551. }
  552. }
  553. /* Check indirect done status */
  554. ret = wait_for_bit("QSPI", plat->regbase + CQSPI_REG_INDIRECTRD,
  555. CQSPI_REG_INDIRECTRD_DONE, 1, 10, 0);
  556. if (ret) {
  557. printf("Indirect read completion error (%i)\n", ret);
  558. goto failrd;
  559. }
  560. /* Clear indirect completion status */
  561. writel(CQSPI_REG_INDIRECTRD_DONE,
  562. plat->regbase + CQSPI_REG_INDIRECTRD);
  563. bounce_buffer_stop(&bb);
  564. return 0;
  565. failrd:
  566. /* Cancel the indirect read */
  567. writel(CQSPI_REG_INDIRECTRD_CANCEL,
  568. plat->regbase + CQSPI_REG_INDIRECTRD);
  569. bounce_buffer_stop(&bb);
  570. return ret;
  571. }
  572. /* Opcode + Address (3/4 bytes) */
  573. int cadence_qspi_apb_indirect_write_setup(struct cadence_spi_platdata *plat,
  574. unsigned int cmdlen, const u8 *cmdbuf)
  575. {
  576. unsigned int reg;
  577. unsigned int addr_bytes = cmdlen > 4 ? 4 : 3;
  578. if (cmdlen < 4 || cmdbuf == NULL) {
  579. printf("QSPI: iInvalid input argument, len %d cmdbuf 0x%08x\n",
  580. cmdlen, (unsigned int)cmdbuf);
  581. return -EINVAL;
  582. }
  583. /* Setup the indirect trigger address */
  584. writel((u32)plat->ahbbase,
  585. plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
  586. /* Configure the opcode */
  587. reg = cmdbuf[0] << CQSPI_REG_WR_INSTR_OPCODE_LSB;
  588. writel(reg, plat->regbase + CQSPI_REG_WR_INSTR);
  589. /* Setup write address. */
  590. reg = cadence_qspi_apb_cmd2addr(&cmdbuf[1], addr_bytes);
  591. writel(reg, plat->regbase + CQSPI_REG_INDIRECTWRSTARTADDR);
  592. reg = readl(plat->regbase + CQSPI_REG_SIZE);
  593. reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
  594. reg |= (addr_bytes - 1);
  595. writel(reg, plat->regbase + CQSPI_REG_SIZE);
  596. return 0;
  597. }
  598. int cadence_qspi_apb_indirect_write_execute(struct cadence_spi_platdata *plat,
  599. unsigned int n_tx, const u8 *txbuf)
  600. {
  601. unsigned int page_size = plat->page_size;
  602. unsigned int remaining = n_tx;
  603. unsigned int write_bytes;
  604. int ret;
  605. struct bounce_buffer bb;
  606. u8 *bb_txbuf;
  607. /*
  608. * Handle non-4-byte aligned accesses via bounce buffer to
  609. * avoid data abort.
  610. */
  611. ret = bounce_buffer_start(&bb, (void *)txbuf, n_tx, GEN_BB_READ);
  612. if (ret)
  613. return ret;
  614. bb_txbuf = bb.bounce_buffer;
  615. /* Configure the indirect read transfer bytes */
  616. writel(n_tx, plat->regbase + CQSPI_REG_INDIRECTWRBYTES);
  617. /* Start the indirect write transfer */
  618. writel(CQSPI_REG_INDIRECTWR_START,
  619. plat->regbase + CQSPI_REG_INDIRECTWR);
  620. while (remaining > 0) {
  621. write_bytes = remaining > page_size ? page_size : remaining;
  622. writesl(plat->ahbbase, bb_txbuf, write_bytes >> 2);
  623. if (write_bytes % 4)
  624. writesb(plat->ahbbase,
  625. bb_txbuf + rounddown(write_bytes, 4),
  626. write_bytes % 4);
  627. ret = wait_for_bit("QSPI", plat->regbase + CQSPI_REG_SDRAMLEVEL,
  628. CQSPI_REG_SDRAMLEVEL_WR_MASK <<
  629. CQSPI_REG_SDRAMLEVEL_WR_LSB, 0, 10, 0);
  630. if (ret) {
  631. printf("Indirect write timed out (%i)\n", ret);
  632. goto failwr;
  633. }
  634. bb_txbuf += write_bytes;
  635. remaining -= write_bytes;
  636. }
  637. /* Check indirect done status */
  638. ret = wait_for_bit("QSPI", plat->regbase + CQSPI_REG_INDIRECTWR,
  639. CQSPI_REG_INDIRECTWR_DONE, 1, 10, 0);
  640. if (ret) {
  641. printf("Indirect write completion error (%i)\n", ret);
  642. goto failwr;
  643. }
  644. bounce_buffer_stop(&bb);
  645. /* Clear indirect completion status */
  646. writel(CQSPI_REG_INDIRECTWR_DONE,
  647. plat->regbase + CQSPI_REG_INDIRECTWR);
  648. return 0;
  649. failwr:
  650. /* Cancel the indirect write */
  651. writel(CQSPI_REG_INDIRECTWR_CANCEL,
  652. plat->regbase + CQSPI_REG_INDIRECTWR);
  653. bounce_buffer_stop(&bb);
  654. return ret;
  655. }
  656. void cadence_qspi_apb_enter_xip(void *reg_base, char xip_dummy)
  657. {
  658. unsigned int reg;
  659. /* enter XiP mode immediately and enable direct mode */
  660. reg = readl(reg_base + CQSPI_REG_CONFIG);
  661. reg |= CQSPI_REG_CONFIG_ENABLE;
  662. reg |= CQSPI_REG_CONFIG_DIRECT;
  663. reg |= CQSPI_REG_CONFIG_XIP_IMM;
  664. writel(reg, reg_base + CQSPI_REG_CONFIG);
  665. /* keep the XiP mode */
  666. writel(xip_dummy, reg_base + CQSPI_REG_MODE_BIT);
  667. /* Enable mode bit at devrd */
  668. reg = readl(reg_base + CQSPI_REG_RD_INSTR);
  669. reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
  670. writel(reg, reg_base + CQSPI_REG_RD_INSTR);
  671. }