dram_sun9i.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961
  1. /*
  2. * sun9i dram controller initialisation
  3. *
  4. * (C) Copyright 2007-2015
  5. * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
  6. * Jerry Wang <wangflord@allwinnertech.com>
  7. *
  8. * (C) Copyright 2016 Theobroma Systems Design und Consulting GmbH
  9. * Philipp Tomsich <philipp.tomsich@theobroma-systems.com>
  10. *
  11. * SPDX-License-Identifier: GPL-2.0+
  12. */
  13. #include <common.h>
  14. #include <dm.h>
  15. #include <errno.h>
  16. #include <ram.h>
  17. #include <asm/io.h>
  18. #include <asm/arch/clock.h>
  19. #include <asm/arch/dram.h>
  20. #include <asm/arch/sys_proto.h>
  21. DECLARE_GLOBAL_DATA_PTR;
  22. #define DRAM_CLK (CONFIG_DRAM_CLK * 1000000)
  23. /*
  24. * The following amounts to an extensive rewrite of the code received from
  25. * Allwinner as part of the open-source bootloader release (refer to
  26. * https://github.com/allwinner-zh/bootloader.git) and augments the upstream
  27. * sources (which act as the primary reference point for the inner workings
  28. * of the 'underdocumented' DRAM controller in the A80) using the following
  29. * documentation for other memory controllers based on the (Synopsys)
  30. * Designware IP (DDR memory protocol controller and DDR PHY)
  31. * * TI Keystone II Architecture: DDR3 Memory Controller, User's Guide
  32. * Document 'SPRUHN7C', Oct 2013 (revised March 2015)
  33. * * Xilinx Zynq UltraScale+ MPSoC Register Reference
  34. * document ug1087 (v1.0)
  35. * Note that the Zynq-documentation provides a very close match for the DDR
  36. * memory protocol controller (and provides a very good guide to the rounding
  37. * rules for various timings), whereas the TI Keystone II document should be
  38. * referred to for DDR PHY specifics only.
  39. *
  40. * The DRAM controller in the A80 runs at half the frequency of the DDR PHY
  41. * (i.e. the rules for MEMC_FREQ_RATIO=2 from the Zynq-documentation apply).
  42. *
  43. * Known limitations
  44. * =================
  45. * In the current state, the following features are not fully supported and
  46. * a number of simplifying assumptions have been made:
  47. * 1) Only DDR3 support is implemented, as our test platform (the A80-Q7
  48. * module) is designed to accomodate DDR3/DDR3L.
  49. * 2) Only 2T-mode has been implemented and tested.
  50. * 3) The controller supports two different clocking strategies (PLL6 can
  51. * either be 2*CK or CK/2)... we only support the 2*CK clock at this
  52. * time and haven't verified whether the alternative clocking strategy
  53. * works. If you are interested in porting this over/testing this,
  54. * please refer to cases where bit 0 of 'dram_tpr8' is tested in the
  55. * original code from Allwinner.
  56. * 4) Support for 2 ranks per controller is not implemented (as we don't
  57. * the hardware to test it).
  58. *
  59. * Future directions
  60. * =================
  61. * The driver should be driven from a device-tree based configuration that
  62. * can dynamically provide the necessary timing parameters (i.e. target
  63. * frequency and speed-bin information)---the data structures used in the
  64. * calculation of the timing parameters are already designed to capture
  65. * similar information as the device tree would provide.
  66. *
  67. * To enable a device-tree based configuration of the sun9i platform, we
  68. * will need to enable CONFIG_TPL and bootstrap in 3 stages: initially
  69. * into SRAM A1 (40KB) and next into SRAM A2 (160KB)---which would be the
  70. * stage to initialise the platform via the device-tree---before having
  71. * the full U-Boot run from DDR.
  72. */
  73. /*
  74. * A number of DDR3 timings are given as "the greater of a fixed number of
  75. * clock cycles (CK) or nanoseconds. We express these using a structure
  76. * that holds a cycle count and a duration in picoseconds (so we can model
  77. * sub-ns timings, such as 7.5ns without losing precision or resorting to
  78. * rounding up early.
  79. */
  80. struct dram_sun9i_timing {
  81. u32 ck;
  82. u32 ps;
  83. };
  84. /* */
  85. struct dram_sun9i_cl_cwl_timing {
  86. u32 CL;
  87. u32 CWL;
  88. u32 tCKmin; /* in ps */
  89. u32 tCKmax; /* in ps */
  90. };
  91. struct dram_sun9i_para {
  92. u32 dram_type;
  93. u8 bus_width;
  94. u8 chan;
  95. u8 rank;
  96. u8 rows;
  97. u16 page_size;
  98. /* Timing information for each speed-bin */
  99. struct dram_sun9i_cl_cwl_timing *cl_cwl_table;
  100. u32 cl_cwl_numentries;
  101. /*
  102. * For the timings, we try to keep the order and grouping used in
  103. * JEDEC Standard No. 79-3F
  104. */
  105. /* timings */
  106. u32 tREFI; /* in ns */
  107. u32 tRFC; /* in ns */
  108. u32 tRAS; /* in ps */
  109. /* command and address timing */
  110. u32 tDLLK; /* in nCK */
  111. struct dram_sun9i_timing tRTP;
  112. struct dram_sun9i_timing tWTR;
  113. u32 tWR; /* in nCK */
  114. u32 tMRD; /* in nCK */
  115. struct dram_sun9i_timing tMOD;
  116. u32 tRCD; /* in ps */
  117. u32 tRP; /* in ps */
  118. u32 tRC; /* in ps */
  119. u32 tCCD; /* in nCK */
  120. struct dram_sun9i_timing tRRD;
  121. u32 tFAW; /* in ps */
  122. /* calibration timing */
  123. /* struct dram_sun9i_timing tZQinit; */
  124. struct dram_sun9i_timing tZQoper;
  125. struct dram_sun9i_timing tZQCS;
  126. /* reset timing */
  127. /* struct dram_sun9i_timing tXPR; */
  128. /* self-refresh timings */
  129. struct dram_sun9i_timing tXS;
  130. u32 tXSDLL; /* in nCK */
  131. /* struct dram_sun9i_timing tCKESR; */
  132. struct dram_sun9i_timing tCKSRE;
  133. struct dram_sun9i_timing tCKSRX;
  134. /* power-down timings */
  135. struct dram_sun9i_timing tXP;
  136. struct dram_sun9i_timing tXPDLL;
  137. struct dram_sun9i_timing tCKE;
  138. /* write leveling timings */
  139. u32 tWLMRD; /* min, in nCK */
  140. /* u32 tWLDQSEN; min, in nCK */
  141. u32 tWLO; /* max, in ns */
  142. /* u32 tWLOE; max, in ns */
  143. /* u32 tCKDPX; in nCK */
  144. /* u32 tCKCSX; in nCK */
  145. };
  146. static void mctl_sys_init(void);
  147. #define SCHED_RDWR_IDLE_GAP(n) ((n & 0xff) << 24)
  148. #define SCHED_GO2CRITICAL_HYSTERESIS(n) ((n & 0xff) << 16)
  149. #define SCHED_LPR_NUM_ENTRIES(n) ((n & 0xff) << 8)
  150. #define SCHED_PAGECLOSE (1 << 2)
  151. #define SCHED_PREFER_WRITE (1 << 1)
  152. #define SCHED_FORCE_LOW_PRI_N (1 << 0)
  153. #define SCHED_CONFIG (SCHED_RDWR_IDLE_GAP(0xf) | \
  154. SCHED_GO2CRITICAL_HYSTERESIS(0x80) | \
  155. SCHED_LPR_NUM_ENTRIES(0x20) | \
  156. SCHED_FORCE_LOW_PRI_N)
  157. #define PERFHPR0_CONFIG 0x0000001f
  158. #define PERFHPR1_CONFIG 0x1f00001f
  159. #define PERFLPR0_CONFIG 0x000000ff
  160. #define PERFLPR1_CONFIG 0x0f0000ff
  161. #define PERFWR0_CONFIG 0x000000ff
  162. #define PERFWR1_CONFIG 0x0f0001ff
  163. static void mctl_ctl_sched_init(unsigned long base)
  164. {
  165. struct sunxi_mctl_ctl_reg *mctl_ctl =
  166. (struct sunxi_mctl_ctl_reg *)base;
  167. /* Needs to be done before the global clk enable... */
  168. writel(SCHED_CONFIG, &mctl_ctl->sched);
  169. writel(PERFHPR0_CONFIG, &mctl_ctl->perfhpr0);
  170. writel(PERFHPR1_CONFIG, &mctl_ctl->perfhpr1);
  171. writel(PERFLPR0_CONFIG, &mctl_ctl->perflpr0);
  172. writel(PERFLPR1_CONFIG, &mctl_ctl->perflpr1);
  173. writel(PERFWR0_CONFIG, &mctl_ctl->perfwr0);
  174. writel(PERFWR1_CONFIG, &mctl_ctl->perfwr1);
  175. }
  176. static void mctl_sys_init(void)
  177. {
  178. struct sunxi_ccm_reg * const ccm =
  179. (struct sunxi_ccm_reg *)SUNXI_CCM_BASE;
  180. struct sunxi_mctl_com_reg * const mctl_com =
  181. (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
  182. debug("Setting PLL6 to %d\n", DRAM_CLK * 2);
  183. clock_set_pll6(DRAM_CLK * 2);
  184. /* Original dram init code which may come in handy later
  185. ********************************************************
  186. clock_set_pll6(use_2channelPLL ? (DRAM_CLK * 2) :
  187. (DRAM_CLK / 2), false);
  188. if ((para->dram_clk <= 400)|((para->dram_tpr8 & 0x1)==0)) {
  189. * PLL6 should be 2*CK *
  190. * ccm_setup_pll6_ddr_clk(PLL6_DDR_CLK); *
  191. ccm_setup_pll6_ddr_clk((1000000 * (para->dram_clk) * 2), 0);
  192. } else {
  193. * PLL6 should be CK/2 *
  194. ccm_setup_pll6_ddr_clk((1000000 * (para->dram_clk) / 2), 1);
  195. }
  196. if (para->dram_tpr13 & (0xf<<18)) {
  197. *
  198. * bit21:bit18=0001:pll swing 0.4
  199. * bit21:bit18=0010:pll swing 0.3
  200. * bit21:bit18=0100:pll swing 0.2
  201. * bit21:bit18=1000:pll swing 0.1
  202. *
  203. dram_dbg("DRAM fre extend open !\n");
  204. reg_val=mctl_read_w(CCM_PLL6_DDR_REG);
  205. reg_val&=(0x1<<16);
  206. reg_val=reg_val>>16;
  207. if(para->dram_tpr13 & (0x1<<18))
  208. {
  209. mctl_write_w(CCM_PLL_BASE + 0x114,
  210. (0x3333U|(0x3<<17)|(reg_val<<19)|(0x120U<<20)|
  211. (0x2U<<29)|(0x1U<<31)));
  212. }
  213. else if(para->dram_tpr13 & (0x1<<19))
  214. {
  215. mctl_write_w(CCM_PLL_BASE + 0x114,
  216. (0x6666U|(0x3U<<17)|(reg_val<<19)|(0xD8U<<20)|
  217. (0x2U<<29)|(0x1U<<31)));
  218. }
  219. else if(para->dram_tpr13 & (0x1<<20))
  220. {
  221. mctl_write_w(CCM_PLL_BASE + 0x114,
  222. (0x9999U|(0x3U<<17)|(reg_val<<19)|(0x90U<<20)|
  223. (0x2U<<29)|(0x1U<<31)));
  224. }
  225. else if(para->dram_tpr13 & (0x1<<21))
  226. {
  227. mctl_write_w(CCM_PLL_BASE + 0x114,
  228. (0xccccU|(0x3U<<17)|(reg_val<<19)|(0x48U<<20)|
  229. (0x2U<<29)|(0x1U<<31)));
  230. }
  231. //frequency extend open
  232. reg_val = mctl_read_w(CCM_PLL6_DDR_REG);
  233. reg_val |= ((0x1<<24)|(0x1<<30));
  234. mctl_write_w(CCM_PLL6_DDR_REG, reg_val);
  235. while(mctl_read_w(CCM_PLL6_DDR_REG) & (0x1<<30));
  236. }
  237. aw_delay(0x20000); //make some delay
  238. ********************************************************
  239. */
  240. /* assert mctl reset */
  241. clrbits_le32(&ccm->ahb_reset0_cfg, 1 << AHB_RESET_OFFSET_MCTL);
  242. /* stop mctl clock */
  243. clrbits_le32(&ccm->ahb_gate0, 1 << AHB_GATE_OFFSET_MCTL);
  244. sdelay(2000);
  245. /* deassert mctl reset */
  246. setbits_le32(&ccm->ahb_reset0_cfg, 1 << AHB_RESET_OFFSET_MCTL);
  247. /* enable mctl clock */
  248. setbits_le32(&ccm->ahb_gate0, 1 << AHB_GATE_OFFSET_MCTL);
  249. /* set up the transactions scheduling before enabling the global clk */
  250. mctl_ctl_sched_init(SUNXI_DRAM_CTL0_BASE);
  251. mctl_ctl_sched_init(SUNXI_DRAM_CTL1_BASE);
  252. sdelay(1000);
  253. debug("2\n");
  254. /* (3 << 12): PLL_DDR */
  255. writel((3 << 12) | (1 << 16), &ccm->dram_clk_cfg);
  256. do {
  257. debug("Waiting for DRAM_CLK_CFG\n");
  258. sdelay(10000);
  259. } while (readl(&ccm->dram_clk_cfg) & (1 << 16));
  260. setbits_le32(&ccm->dram_clk_cfg, (1 << 31));
  261. /* TODO: we only support the common case ... i.e. 2*CK */
  262. setbits_le32(&mctl_com->ccr, (1 << 14) | (1 << 30));
  263. writel(2, &mctl_com->rmcr); /* controller clock is PLL6/4 */
  264. sdelay(2000);
  265. /* Original dram init code which may come in handy later
  266. ********************************************************
  267. if ((para->dram_clk <= 400) | ((para->dram_tpr8 & 0x1) == 0)) {
  268. * PLL6 should be 2*CK *
  269. * gating 2 channel pll *
  270. reg_val = mctl_read_w(MC_CCR);
  271. reg_val |= ((0x1 << 14) | (0x1U << 30));
  272. mctl_write_w(MC_CCR, reg_val);
  273. mctl_write_w(MC_RMCR, 0x2); * controller clock use pll6/4 *
  274. } else {
  275. * enable 2 channel pll *
  276. reg_val = mctl_read_w(MC_CCR);
  277. reg_val &= ~((0x1 << 14) | (0x1U << 30));
  278. mctl_write_w(MC_CCR, reg_val);
  279. mctl_write_w(MC_RMCR, 0x0); * controller clock use pll6 *
  280. }
  281. reg_val = mctl_read_w(MC_CCR);
  282. reg_val &= ~((0x1<<15)|(0x1U<<31));
  283. mctl_write_w(MC_CCR, reg_val);
  284. aw_delay(20);
  285. //aw_delay(0x10);
  286. ********************************************************
  287. */
  288. clrbits_le32(&mctl_com->ccr, MCTL_CCR_CH0_CLK_EN | MCTL_CCR_CH1_CLK_EN);
  289. sdelay(1000);
  290. setbits_le32(&mctl_com->ccr, MCTL_CCR_CH0_CLK_EN);
  291. /* TODO if (para->chan == 2) */
  292. setbits_le32(&mctl_com->ccr, MCTL_CCR_CH1_CLK_EN);
  293. }
  294. static void mctl_com_init(struct dram_sun9i_para *para)
  295. {
  296. struct sunxi_mctl_com_reg * const mctl_com =
  297. (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
  298. /* TODO: hard-wired for DDR3 now */
  299. writel(((para->chan == 2) ? MCTL_CR_CHANNEL_DUAL :
  300. MCTL_CR_CHANNEL_SINGLE)
  301. | MCTL_CR_DRAMTYPE_DDR3 | MCTL_CR_BANK(1)
  302. | MCTL_CR_ROW(para->rows)
  303. | ((para->bus_width == 32) ? MCTL_CR_BUSW32 : MCTL_CR_BUSW16)
  304. | MCTL_CR_PAGE_SIZE(para->page_size) | MCTL_CR_RANK(para->rank),
  305. &mctl_com->cr);
  306. debug("CR: %d\n", readl(&mctl_com->cr));
  307. }
  308. static u32 mctl_channel_init(u32 ch_index, struct dram_sun9i_para *para)
  309. {
  310. struct sunxi_mctl_ctl_reg *mctl_ctl;
  311. struct sunxi_mctl_phy_reg *mctl_phy;
  312. u32 CL = 0;
  313. u32 CWL = 0;
  314. u16 mr[4] = { 0, };
  315. #define PS2CYCLES_FLOOR(n) ((n * CONFIG_DRAM_CLK) / 1000000)
  316. #define PS2CYCLES_ROUNDUP(n) ((n * CONFIG_DRAM_CLK + 999999) / 1000000)
  317. #define NS2CYCLES_FLOOR(n) ((n * CONFIG_DRAM_CLK) / 1000)
  318. #define NS2CYCLES_ROUNDUP(n) ((n * CONFIG_DRAM_CLK + 999) / 1000)
  319. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  320. /*
  321. * Convert the values to cycle counts (nCK) from what is provided
  322. * by the definition of each speed bin.
  323. */
  324. /* const u32 tREFI = NS2CYCLES_FLOOR(para->tREFI); */
  325. const u32 tREFI = NS2CYCLES_FLOOR(para->tREFI);
  326. const u32 tRFC = NS2CYCLES_ROUNDUP(para->tRFC);
  327. const u32 tRCD = PS2CYCLES_ROUNDUP(para->tRCD);
  328. const u32 tRP = PS2CYCLES_ROUNDUP(para->tRP);
  329. const u32 tRC = PS2CYCLES_ROUNDUP(para->tRC);
  330. const u32 tRAS = PS2CYCLES_ROUNDUP(para->tRAS);
  331. /* command and address timing */
  332. const u32 tDLLK = para->tDLLK;
  333. const u32 tRTP = MAX(para->tRTP.ck, PS2CYCLES_ROUNDUP(para->tRTP.ps));
  334. const u32 tWTR = MAX(para->tWTR.ck, PS2CYCLES_ROUNDUP(para->tWTR.ps));
  335. const u32 tWR = NS2CYCLES_FLOOR(para->tWR);
  336. const u32 tMRD = para->tMRD;
  337. const u32 tMOD = MAX(para->tMOD.ck, PS2CYCLES_ROUNDUP(para->tMOD.ps));
  338. const u32 tCCD = para->tCCD;
  339. const u32 tRRD = MAX(para->tRRD.ck, PS2CYCLES_ROUNDUP(para->tRRD.ps));
  340. const u32 tFAW = PS2CYCLES_ROUNDUP(para->tFAW);
  341. /* calibration timings */
  342. /* const u32 tZQinit = MAX(para->tZQinit.ck,
  343. PS2CYCLES_ROUNDUP(para->tZQinit.ps)); */
  344. const u32 tZQoper = MAX(para->tZQoper.ck,
  345. PS2CYCLES_ROUNDUP(para->tZQoper.ps));
  346. const u32 tZQCS = MAX(para->tZQCS.ck,
  347. PS2CYCLES_ROUNDUP(para->tZQCS.ps));
  348. /* reset timing */
  349. /* const u32 tXPR = MAX(para->tXPR.ck,
  350. PS2CYCLES_ROUNDUP(para->tXPR.ps)); */
  351. /* power-down timings */
  352. const u32 tXP = MAX(para->tXP.ck, PS2CYCLES_ROUNDUP(para->tXP.ps));
  353. const u32 tXPDLL = MAX(para->tXPDLL.ck,
  354. PS2CYCLES_ROUNDUP(para->tXPDLL.ps));
  355. const u32 tCKE = MAX(para->tCKE.ck, PS2CYCLES_ROUNDUP(para->tCKE.ps));
  356. /*
  357. * self-refresh timings (keep below power-down timings, as tCKESR
  358. * needs to be calculated based on the nCK value of tCKE)
  359. */
  360. const u32 tXS = MAX(para->tXS.ck, PS2CYCLES_ROUNDUP(para->tXS.ps));
  361. const u32 tXSDLL = para->tXSDLL;
  362. const u32 tCKSRE = MAX(para->tCKSRE.ck,
  363. PS2CYCLES_ROUNDUP(para->tCKSRE.ps));
  364. const u32 tCKESR = tCKE + 1;
  365. const u32 tCKSRX = MAX(para->tCKSRX.ck,
  366. PS2CYCLES_ROUNDUP(para->tCKSRX.ps));
  367. /* write leveling timings */
  368. const u32 tWLMRD = para->tWLMRD;
  369. /* const u32 tWLDQSEN = para->tWLDQSEN; */
  370. const u32 tWLO = PS2CYCLES_FLOOR(para->tWLO);
  371. /* const u32 tWLOE = PS2CYCLES_FLOOR(para->tWLOE); */
  372. const u32 tRASmax = tREFI * 9;
  373. int i;
  374. for (i = 0; i < para->cl_cwl_numentries; ++i) {
  375. const u32 tCK = 1000000 / CONFIG_DRAM_CLK;
  376. if ((para->cl_cwl_table[i].tCKmin <= tCK) &&
  377. (tCK < para->cl_cwl_table[i].tCKmax)) {
  378. CL = para->cl_cwl_table[i].CL;
  379. CWL = para->cl_cwl_table[i].CWL;
  380. debug("found CL/CWL: CL = %d, CWL = %d\n", CL, CWL);
  381. break;
  382. }
  383. }
  384. if ((CL == 0) && (CWL == 0)) {
  385. printf("failed to find valid CL/CWL for operating point %d MHz\n",
  386. CONFIG_DRAM_CLK);
  387. return 0;
  388. }
  389. if (ch_index == 0) {
  390. mctl_ctl = (struct sunxi_mctl_ctl_reg *)SUNXI_DRAM_CTL0_BASE;
  391. mctl_phy = (struct sunxi_mctl_phy_reg *)SUNXI_DRAM_PHY0_BASE;
  392. } else {
  393. mctl_ctl = (struct sunxi_mctl_ctl_reg *)SUNXI_DRAM_CTL1_BASE;
  394. mctl_phy = (struct sunxi_mctl_phy_reg *)SUNXI_DRAM_PHY1_BASE;
  395. }
  396. if (para->dram_type == DRAM_TYPE_DDR3) {
  397. mr[0] = DDR3_MR0_PPD_FAST_EXIT | DDR3_MR0_WR(tWR) |
  398. DDR3_MR0_CL(CL);
  399. mr[1] = DDR3_MR1_RTT120OHM;
  400. mr[2] = DDR3_MR2_TWL(CWL);
  401. mr[3] = 0;
  402. /*
  403. * DRAM3 initialisation requires holding CKE LOW for
  404. * at least 500us prior to starting the initialisation
  405. * sequence and at least 10ns after driving CKE HIGH
  406. * before the initialisation sequence may be started).
  407. *
  408. * Refer to Micron document "TN-41-07: DDR3 Power-Up,
  409. * Initialization, and Reset DDR3 Initialization
  410. * Routine" for details).
  411. */
  412. writel(MCTL_INIT0_POST_CKE_x1024(1) |
  413. MCTL_INIT0_PRE_CKE_x1024(
  414. (500 * CONFIG_DRAM_CLK + 1023) / 1024), /* 500us */
  415. &mctl_ctl->init[0]);
  416. writel(MCTL_INIT1_DRAM_RSTN_x1024(1),
  417. &mctl_ctl->init[1]);
  418. /* INIT2 is not used for DDR3 */
  419. writel(MCTL_INIT3_MR(mr[0]) | MCTL_INIT3_EMR(mr[1]),
  420. &mctl_ctl->init[3]);
  421. writel(MCTL_INIT4_EMR2(mr[2]) | MCTL_INIT4_EMR3(mr[3]),
  422. &mctl_ctl->init[4]);
  423. writel(MCTL_INIT5_DEV_ZQINIT_x32(512 / 32), /* 512 cycles */
  424. &mctl_ctl->init[5]);
  425. } else {
  426. /* !!! UNTESTED !!! */
  427. /*
  428. * LPDDR2 and/or LPDDR3 require a 200us minimum delay
  429. * after driving CKE HIGH in the initialisation sequence.
  430. */
  431. writel(MCTL_INIT0_POST_CKE_x1024(
  432. (200 * CONFIG_DRAM_CLK + 1023) / 1024),
  433. &mctl_ctl->init[0]);
  434. writel(MCTL_INIT1_DRAM_RSTN_x1024(1),
  435. &mctl_ctl->init[1]);
  436. writel(MCTL_INIT2_IDLE_AFTER_RESET_x32(
  437. (CONFIG_DRAM_CLK + 31) / 32) /* 1us */
  438. | MCTL_INIT2_MIN_STABLE_CLOCK_x1(5), /* 5 cycles */
  439. &mctl_ctl->init[2]);
  440. writel(MCTL_INIT3_MR(mr[1]) | MCTL_INIT3_EMR(mr[2]),
  441. &mctl_ctl->init[3]);
  442. writel(MCTL_INIT4_EMR2(mr[3]),
  443. &mctl_ctl->init[4]);
  444. writel(MCTL_INIT5_DEV_ZQINIT_x32(
  445. (CONFIG_DRAM_CLK + 31) / 32) /* 1us */
  446. | MCTL_INIT5_MAX_AUTO_INIT_x1024(
  447. (10 * CONFIG_DRAM_CLK + 1023) / 1024),
  448. &mctl_ctl->init[5]);
  449. }
  450. /* (DDR3) We always use a burst-length of 8. */
  451. #define MCTL_BL 8
  452. /* wr2pre: WL + BL/2 + tWR */
  453. #define WR2PRE (MCTL_BL/2 + CWL + tWTR)
  454. /* wr2rd = CWL + BL/2 + tWTR */
  455. #define WR2RD (MCTL_BL/2 + CWL + tWTR)
  456. /*
  457. * rd2wr = RL + BL/2 + 2 - WL (for DDR3)
  458. * rd2wr = RL + BL/2 + RU(tDQSCKmax/tCK) + 1 - WL (for LPDDR2/LPDDR3)
  459. */
  460. #define RD2WR (CL + MCTL_BL/2 + 2 - CWL)
  461. #define MCTL_PHY_TRTW 0
  462. #define MCTL_PHY_TRTODT 0
  463. #define MCTL_DIV2(n) ((n + 1)/2)
  464. #define MCTL_DIV32(n) (n/32)
  465. #define MCTL_DIV1024(n) (n/1024)
  466. writel((MCTL_DIV2(WR2PRE) << 24) | (MCTL_DIV2(tFAW) << 16) |
  467. (MCTL_DIV1024(tRASmax) << 8) | (MCTL_DIV2(tRAS) << 0),
  468. &mctl_ctl->dramtmg[0]);
  469. writel((MCTL_DIV2(tXP) << 16) | (MCTL_DIV2(tRTP) << 8) |
  470. (MCTL_DIV2(tRC) << 0),
  471. &mctl_ctl->dramtmg[1]);
  472. writel((MCTL_DIV2(CWL) << 24) | (MCTL_DIV2(CL) << 16) |
  473. (MCTL_DIV2(RD2WR) << 8) | (MCTL_DIV2(WR2RD) << 0),
  474. &mctl_ctl->dramtmg[2]);
  475. /*
  476. * Note: tMRW is located at bit 16 (and up) in DRAMTMG3...
  477. * this is only relevant for LPDDR2/LPDDR3
  478. */
  479. writel((MCTL_DIV2(tMRD) << 12) | (MCTL_DIV2(tMOD) << 0),
  480. &mctl_ctl->dramtmg[3]);
  481. writel((MCTL_DIV2(tRCD) << 24) | (MCTL_DIV2(tCCD) << 16) |
  482. (MCTL_DIV2(tRRD) << 8) | (MCTL_DIV2(tRP) << 0),
  483. &mctl_ctl->dramtmg[4]);
  484. writel((MCTL_DIV2(tCKSRX) << 24) | (MCTL_DIV2(tCKSRE) << 16) |
  485. (MCTL_DIV2(tCKESR) << 8) | (MCTL_DIV2(tCKE) << 0),
  486. &mctl_ctl->dramtmg[5]);
  487. /* These timings are relevant for LPDDR2/LPDDR3 only */
  488. /* writel((MCTL_TCKDPDE << 24) | (MCTL_TCKDPX << 16) |
  489. (MCTL_TCKCSX << 0), &mctl_ctl->dramtmg[6]); */
  490. /* printf("DRAMTMG7 reset value: 0x%x\n",
  491. readl(&mctl_ctl->dramtmg[7])); */
  492. /* DRAMTMG7 reset value: 0x202 */
  493. /* DRAMTMG7 should contain t_ckpde and t_ckpdx: check reset values!!! */
  494. /* printf("DRAMTMG8 reset value: 0x%x\n",
  495. readl(&mctl_ctl->dramtmg[8])); */
  496. /* DRAMTMG8 reset value: 0x44 */
  497. writel((MCTL_DIV32(tXSDLL) << 0), &mctl_ctl->dramtmg[8]);
  498. writel((MCTL_DIV32(tREFI) << 16) | (MCTL_DIV2(tRFC) << 0),
  499. &mctl_ctl->rfshtmg);
  500. if (para->dram_type == DRAM_TYPE_DDR3) {
  501. writel((2 << 24) | ((MCTL_DIV2(CL) - 2) << 16) |
  502. (1 << 8) | ((MCTL_DIV2(CWL) - 2) << 0),
  503. &mctl_ctl->dfitmg[0]);
  504. } else {
  505. /* TODO */
  506. }
  507. /* TODO: handle the case of the write latency domain going to 0 ... */
  508. /*
  509. * Disable dfi_init_complete_en (the triggering of the SDRAM
  510. * initialisation when the PHY initialisation completes).
  511. */
  512. clrbits_le32(&mctl_ctl->dfimisc, MCTL_DFIMISC_DFI_INIT_COMPLETE_EN);
  513. /* Disable the automatic generation of DLL calibration requests */
  514. setbits_le32(&mctl_ctl->dfiupd[0], MCTL_DFIUPD0_DIS_AUTO_CTRLUPD);
  515. /* A80-Q7: 2T, 1 rank, DDR3, full-32bit-DQ */
  516. /* TODO: make 2T and BUSWIDTH configurable */
  517. writel(MCTL_MSTR_DEVICETYPE(para->dram_type) |
  518. MCTL_MSTR_BURSTLENGTH(para->dram_type) |
  519. MCTL_MSTR_ACTIVERANKS(para->rank) |
  520. MCTL_MSTR_2TMODE | MCTL_MSTR_BUSWIDTH32,
  521. &mctl_ctl->mstr);
  522. if (para->dram_type == DRAM_TYPE_DDR3) {
  523. writel(MCTL_ZQCTRL0_TZQCL(MCTL_DIV2(tZQoper)) |
  524. (MCTL_DIV2(tZQCS)), &mctl_ctl->zqctrl[0]);
  525. /*
  526. * TODO: is the following really necessary as the bottom
  527. * half should already be 0x100 and the upper half should
  528. * be ignored for a DDR3 device???
  529. */
  530. writel(MCTL_ZQCTRL1_TZQSI_x1024(0x100),
  531. &mctl_ctl->zqctrl[1]);
  532. } else {
  533. writel(MCTL_ZQCTRL0_TZQCL(0x200) | MCTL_ZQCTRL0_TZQCS(0x40),
  534. &mctl_ctl->zqctrl[0]);
  535. writel(MCTL_ZQCTRL1_TZQRESET(0x28) |
  536. MCTL_ZQCTRL1_TZQSI_x1024(0x100),
  537. &mctl_ctl->zqctrl[1]);
  538. }
  539. /* Assert dfi_init_complete signal */
  540. setbits_le32(&mctl_ctl->dfimisc, MCTL_DFIMISC_DFI_INIT_COMPLETE_EN);
  541. /* Disable auto-refresh */
  542. setbits_le32(&mctl_ctl->rfshctl3, MCTL_RFSHCTL3_DIS_AUTO_REFRESH);
  543. /* PHY initialisation */
  544. /* TODO: make 2T and 8-bank mode configurable */
  545. writel(MCTL_PHY_DCR_BYTEMASK | MCTL_PHY_DCR_2TMODE |
  546. MCTL_PHY_DCR_DDR8BNK | MCTL_PHY_DRAMMODE_DDR3,
  547. &mctl_phy->dcr);
  548. /* For LPDDR2 or LPDDR3, set DQSGX to 0 before training. */
  549. if (para->dram_type != DRAM_TYPE_DDR3)
  550. clrbits_le32(&mctl_phy->dsgcr, (3 << 6));
  551. writel(mr[0], &mctl_phy->mr0);
  552. writel(mr[1], &mctl_phy->mr1);
  553. writel(mr[2], &mctl_phy->mr2);
  554. writel(mr[3], &mctl_phy->mr3);
  555. /*
  556. * The DFI PHY is running at full rate. We thus use the actual
  557. * timings in clock cycles here.
  558. */
  559. writel((tRC << 26) | (tRRD << 22) | (tRAS << 16) |
  560. (tRCD << 12) | (tRP << 8) | (tWTR << 4) | (tRTP << 0),
  561. &mctl_phy->dtpr[0]);
  562. writel((tMRD << 0) | ((tMOD - 12) << 2) | (tFAW << 5) |
  563. (tRFC << 11) | (tWLMRD << 20) | (tWLO << 26),
  564. &mctl_phy->dtpr[1]);
  565. writel((tXS << 0) | (MAX(tXP, tXPDLL) << 10) |
  566. (tCKE << 15) | (tDLLK << 19) |
  567. (MCTL_PHY_TRTODT << 29) | (MCTL_PHY_TRTW << 30) |
  568. (((tCCD - 4) & 0x1) << 31),
  569. &mctl_phy->dtpr[2]);
  570. /* tDQSCK and tDQSCKmax are used LPDDR2/LPDDR3 */
  571. /* writel((tDQSCK << 0) | (tDQSCKMAX << 3), &mctl_phy->dtpr[3]); */
  572. /*
  573. * We use the same values used by Allwinner's Boot0 for the PTR
  574. * (PHY timing register) configuration that is tied to the PHY
  575. * implementation.
  576. */
  577. writel(0x42C21590, &mctl_phy->ptr[0]);
  578. writel(0xD05612C0, &mctl_phy->ptr[1]);
  579. if (para->dram_type == DRAM_TYPE_DDR3) {
  580. const unsigned int tdinit0 = 500 * CONFIG_DRAM_CLK; /* 500us */
  581. const unsigned int tdinit1 = (360 * CONFIG_DRAM_CLK + 999) /
  582. 1000; /* 360ns */
  583. const unsigned int tdinit2 = 200 * CONFIG_DRAM_CLK; /* 200us */
  584. const unsigned int tdinit3 = CONFIG_DRAM_CLK; /* 1us */
  585. writel((tdinit1 << 20) | tdinit0, &mctl_phy->ptr[3]);
  586. writel((tdinit3 << 18) | tdinit2, &mctl_phy->ptr[4]);
  587. } else {
  588. /* LPDDR2 or LPDDR3 */
  589. const unsigned int tdinit0 = (100 * CONFIG_DRAM_CLK + 999) /
  590. 1000; /* 100ns */
  591. const unsigned int tdinit1 = 200 * CONFIG_DRAM_CLK; /* 200us */
  592. const unsigned int tdinit2 = 22 * CONFIG_DRAM_CLK; /* 11us */
  593. const unsigned int tdinit3 = 2 * CONFIG_DRAM_CLK; /* 2us */
  594. writel((tdinit1 << 20) | tdinit0, &mctl_phy->ptr[3]);
  595. writel((tdinit3 << 18) | tdinit2, &mctl_phy->ptr[4]);
  596. }
  597. /* TEST ME */
  598. writel(0x00203131, &mctl_phy->acmdlr);
  599. /* TODO: can we enable this for 2 ranks, even when we don't know yet */
  600. writel(MCTL_DTCR_DEFAULT | MCTL_DTCR_RANKEN(para->rank),
  601. &mctl_phy->dtcr);
  602. /* TODO: half width */
  603. debug("DX2GCR0 reset: 0x%x\n", readl(&mctl_phy->dx[2].gcr[0]));
  604. writel(0x7C000285, &mctl_phy->dx[2].gcr[0]);
  605. writel(0x7C000285, &mctl_phy->dx[3].gcr[0]);
  606. clrsetbits_le32(&mctl_phy->zq[0].pr, 0xff,
  607. (CONFIG_DRAM_ZQ >> 0) & 0xff); /* CK/CA */
  608. clrsetbits_le32(&mctl_phy->zq[1].pr, 0xff,
  609. (CONFIG_DRAM_ZQ >> 8) & 0xff); /* DX0/DX1 */
  610. clrsetbits_le32(&mctl_phy->zq[2].pr, 0xff,
  611. (CONFIG_DRAM_ZQ >> 16) & 0xff); /* DX2/DX3 */
  612. /* TODO: make configurable & implement non-ODT path */
  613. if (1) {
  614. int lane;
  615. for (lane = 0; lane < 4; ++lane) {
  616. clrbits_le32(&mctl_phy->dx[lane].gcr[2], 0xffff);
  617. clrbits_le32(&mctl_phy->dx[lane].gcr[3],
  618. (0x3<<12) | (0x3<<4));
  619. }
  620. } else {
  621. /* TODO: check */
  622. int lane;
  623. for (lane = 0; lane < 4; ++lane) {
  624. clrsetbits_le32(&mctl_phy->dx[lane].gcr[2], 0xffff,
  625. 0xaaaa);
  626. if (para->dram_type == DRAM_TYPE_DDR3)
  627. setbits_le32(&mctl_phy->dx[lane].gcr[3],
  628. (0x3<<12) | (0x3<<4));
  629. else
  630. setbits_le32(&mctl_phy->dx[lane].gcr[3],
  631. 0x00000012);
  632. }
  633. }
  634. writel(0x04058D02, &mctl_phy->zq[0].cr); /* CK/CA */
  635. writel(0x04058D02, &mctl_phy->zq[1].cr); /* DX0/DX1 */
  636. writel(0x04058D02, &mctl_phy->zq[2].cr); /* DX2/DX3 */
  637. /* Disable auto-refresh prior to data training */
  638. setbits_le32(&mctl_ctl->rfshctl3, MCTL_RFSHCTL3_DIS_AUTO_REFRESH);
  639. setbits_le32(&mctl_phy->dsgcr, 0xf << 24); /* unclear what this is... */
  640. /* TODO: IODDRM (IO DDR-MODE) for DDR3L */
  641. clrsetbits_le32(&mctl_phy->pgcr[1],
  642. MCTL_PGCR1_ZCKSEL_MASK,
  643. MCTL_PGCR1_IODDRM_DDR3 | MCTL_PGCR1_INHVT_EN);
  644. setbits_le32(&mctl_phy->pllcr, 0x3 << 19); /* PLL frequency select */
  645. /* TODO: single-channel PLL mode??? missing */
  646. setbits_le32(&mctl_phy->pllcr,
  647. MCTL_PLLGCR_PLL_BYPASS | MCTL_PLLGCR_PLL_POWERDOWN);
  648. /* setbits_le32(&mctl_phy->pir, MCTL_PIR_PLL_BYPASS); included below */
  649. /* Disable VT compensation */
  650. clrbits_le32(&mctl_phy->pgcr[0], 0x3f);
  651. /* TODO: "other" PLL mode ... 0x20000 seems to be the PLL Bypass */
  652. if (para->dram_type == DRAM_TYPE_DDR3)
  653. clrsetbits_le32(&mctl_phy->pir, MCTL_PIR_MASK, 0x20df3);
  654. else
  655. clrsetbits_le32(&mctl_phy->pir, MCTL_PIR_MASK, 0x2c573);
  656. sdelay(10000); /* XXX necessary? */
  657. /* Wait for the INIT bit to clear itself... */
  658. while ((readl(&mctl_phy->pir) & MCTL_PIR_INIT) != MCTL_PIR_INIT) {
  659. /* not done yet -- keep spinning */
  660. debug("MCTL_PIR_INIT not set\n");
  661. sdelay(1000);
  662. /* TODO: implement timeout */
  663. }
  664. /* TODO: not used --- there's a "2rank debug" section here */
  665. /* Original dram init code which may come in handy later
  666. ********************************************************
  667. * LPDDR2 and LPDDR3 *
  668. if ((para->dram_type) == 6 || (para->dram_type) == 7) {
  669. reg_val = mctl_read_w(P0_DSGCR + ch_offset);
  670. reg_val &= (~(0x3<<6)); * set DQSGX to 1 *
  671. reg_val |= (0x1<<6); * dqs gate extend *
  672. mctl_write_w(P0_DSGCR + ch_offset, reg_val);
  673. dram_dbg("DQS Gate Extend Enable!\n", ch_index);
  674. }
  675. * Disable ZCAL after initial--for nand dma debug--20140330 by YSZ *
  676. if (para->dram_tpr13 & (0x1<<31)) {
  677. reg_val = mctl_read_w(P0_ZQ0CR + ch_offset);
  678. reg_val |= (0x7<<11);
  679. mctl_write_w(P0_ZQ0CR + ch_offset, reg_val);
  680. }
  681. ********************************************************
  682. */
  683. /*
  684. * TODO: more 2-rank support
  685. * (setting the "dqs gate delay to average between 2 rank")
  686. */
  687. /* check if any errors are set */
  688. if (readl(&mctl_phy->pgsr[0]) & MCTL_PGSR0_ERRORS) {
  689. debug("Channel %d unavailable!\n", ch_index);
  690. return 0;
  691. } else{
  692. /* initial OK */
  693. debug("Channel %d OK!\n", ch_index);
  694. /* return 1; */
  695. }
  696. while ((readl(&mctl_ctl->stat) & 0x1) != 0x1) {
  697. debug("Waiting for INIT to be done (controller to come up into 'normal operating' mode\n");
  698. sdelay(100000);
  699. /* init not done */
  700. /* TODO: implement time-out */
  701. }
  702. debug("done\n");
  703. /* "DDR is controller by contoller" */
  704. clrbits_le32(&mctl_phy->pgcr[3], (1 << 25));
  705. /* TODO: is the following necessary? */
  706. debug("DFIMISC before writing 0: 0x%x\n", readl(&mctl_ctl->dfimisc));
  707. writel(0, &mctl_ctl->dfimisc);
  708. /* Enable auto-refresh */
  709. clrbits_le32(&mctl_ctl->rfshctl3, MCTL_RFSHCTL3_DIS_AUTO_REFRESH);
  710. debug("channel_init complete\n");
  711. return 1;
  712. }
  713. signed int DRAMC_get_dram_size(void)
  714. {
  715. struct sunxi_mctl_com_reg * const mctl_com =
  716. (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
  717. unsigned int reg_val;
  718. unsigned int dram_size;
  719. unsigned int temp;
  720. reg_val = readl(&mctl_com->cr);
  721. temp = (reg_val >> 8) & 0xf; /* page size code */
  722. dram_size = (temp - 6); /* (1 << dram_size) * 512Bytes */
  723. temp = (reg_val >> 4) & 0xf; /* row width code */
  724. dram_size += (temp + 1); /* (1 << dram_size) * 512Bytes */
  725. temp = (reg_val >> 2) & 0x3; /* bank number code */
  726. dram_size += (temp + 2); /* (1 << dram_size) * 512Bytes */
  727. temp = reg_val & 0x3; /* rank number code */
  728. dram_size += temp; /* (1 << dram_size) * 512Bytes */
  729. temp = (reg_val >> 19) & 0x1; /* channel number code */
  730. dram_size += temp; /* (1 << dram_size) * 512Bytes */
  731. dram_size = dram_size - 11; /* (1 << dram_size) MBytes */
  732. return 1 << dram_size;
  733. }
  734. unsigned long sunxi_dram_init(void)
  735. {
  736. struct sunxi_mctl_com_reg * const mctl_com =
  737. (struct sunxi_mctl_com_reg *)SUNXI_DRAM_COM_BASE;
  738. struct dram_sun9i_cl_cwl_timing cl_cwl[] = {
  739. { .CL = 5, .CWL = 5, .tCKmin = 3000, .tCKmax = 3300 },
  740. { .CL = 6, .CWL = 5, .tCKmin = 2500, .tCKmax = 3300 },
  741. { .CL = 8, .CWL = 6, .tCKmin = 1875, .tCKmax = 2500 },
  742. { .CL = 10, .CWL = 7, .tCKmin = 1500, .tCKmax = 1875 },
  743. { .CL = 11, .CWL = 8, .tCKmin = 1250, .tCKmax = 1500 }
  744. };
  745. /* Set initial parameters, these get modified by the autodetect code */
  746. struct dram_sun9i_para para = {
  747. .dram_type = DRAM_TYPE_DDR3,
  748. .bus_width = 32,
  749. .chan = 2,
  750. .rank = 1,
  751. /* .rank = 2, */
  752. .page_size = 4096,
  753. /* .rows = 16, */
  754. .rows = 15,
  755. /* CL/CWL table for the speed bin */
  756. .cl_cwl_table = cl_cwl,
  757. .cl_cwl_numentries = sizeof(cl_cwl) /
  758. sizeof(struct dram_sun9i_cl_cwl_timing),
  759. /* timings */
  760. .tREFI = 7800, /* 7.8us (up to 85 degC) */
  761. .tRFC = 260, /* 260ns for 4GBit devices */
  762. /* 350ns @ 8GBit */
  763. .tRCD = 13750,
  764. .tRP = 13750,
  765. .tRC = 48750,
  766. .tRAS = 35000,
  767. .tDLLK = 512,
  768. .tRTP = { .ck = 4, .ps = 7500 },
  769. .tWTR = { .ck = 4, .ps = 7500 },
  770. .tWR = 15,
  771. .tMRD = 4,
  772. .tMOD = { .ck = 12, .ps = 15000 },
  773. .tCCD = 4,
  774. .tRRD = { .ck = 4, .ps = 7500 },
  775. .tFAW = 40,
  776. /* calibration timing */
  777. /* .tZQinit = { .ck = 512, .ps = 640000 }, */
  778. .tZQoper = { .ck = 256, .ps = 320000 },
  779. .tZQCS = { .ck = 64, .ps = 80000 },
  780. /* reset timing */
  781. /* .tXPR = { .ck = 5, .ps = 10000 }, */
  782. /* self-refresh timings */
  783. .tXS = { .ck = 5, .ps = 10000 },
  784. .tXSDLL = 512,
  785. .tCKSRE = { .ck = 5, .ps = 10000 },
  786. .tCKSRX = { .ck = 5, .ps = 10000 },
  787. /* power-down timings */
  788. .tXP = { .ck = 3, .ps = 6000 },
  789. .tXPDLL = { .ck = 10, .ps = 24000 },
  790. .tCKE = { .ck = 3, .ps = 5000 },
  791. /* write leveling timings */
  792. .tWLMRD = 40,
  793. /* .tWLDQSEN = 25, */
  794. .tWLO = 7500,
  795. /* .tWLOE = 2000, */
  796. };
  797. /*
  798. * Disable A80 internal 240 ohm resistor.
  799. *
  800. * This code sequence is adapated from Allwinner's Boot0 (see
  801. * https://github.com/allwinner-zh/bootloader.git), as there
  802. * is no documentation for these two registers in the R_PRCM
  803. * block.
  804. */
  805. setbits_le32(SUNXI_PRCM_BASE + 0x1e0, (0x3 << 8));
  806. writel(0, SUNXI_PRCM_BASE + 0x1e8);
  807. mctl_sys_init();
  808. if (!mctl_channel_init(0, &para))
  809. return 0;
  810. /* dual-channel */
  811. if (!mctl_channel_init(1, &para)) {
  812. /* disable channel 1 */
  813. clrsetbits_le32(&mctl_com->cr, MCTL_CR_CHANNEL_MASK,
  814. MCTL_CR_CHANNEL_SINGLE);
  815. /* disable channel 1 global clock */
  816. clrbits_le32(&mctl_com->cr, MCTL_CCR_CH1_CLK_EN);
  817. }
  818. mctl_com_init(&para);
  819. /* return the proper RAM size */
  820. return DRAMC_get_dram_size() << 20;
  821. }