clocks-common.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920
  1. /*
  2. *
  3. * Clock initialization for OMAP4
  4. *
  5. * (C) Copyright 2010
  6. * Texas Instruments, <www.ti.com>
  7. *
  8. * Aneesh V <aneesh@ti.com>
  9. *
  10. * Based on previous work by:
  11. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  12. * Rajendra Nayak <rnayak@ti.com>
  13. *
  14. * SPDX-License-Identifier: GPL-2.0+
  15. */
  16. #include <common.h>
  17. #include <i2c.h>
  18. #include <asm/omap_common.h>
  19. #include <asm/gpio.h>
  20. #include <asm/arch/clock.h>
  21. #include <asm/arch/sys_proto.h>
  22. #include <asm/utils.h>
  23. #include <asm/omap_gpio.h>
  24. #include <asm/emif.h>
  25. #ifndef CONFIG_SPL_BUILD
  26. /*
  27. * printing to console doesn't work unless
  28. * this code is executed from SPL
  29. */
  30. #define printf(fmt, args...)
  31. #define puts(s)
  32. #endif
  33. const u32 sys_clk_array[8] = {
  34. 12000000, /* 12 MHz */
  35. 20000000, /* 20 MHz */
  36. 16800000, /* 16.8 MHz */
  37. 19200000, /* 19.2 MHz */
  38. 26000000, /* 26 MHz */
  39. 27000000, /* 27 MHz */
  40. 38400000, /* 38.4 MHz */
  41. };
  42. static inline u32 __get_sys_clk_index(void)
  43. {
  44. s8 ind;
  45. /*
  46. * For ES1 the ROM code calibration of sys clock is not reliable
  47. * due to hw issue. So, use hard-coded value. If this value is not
  48. * correct for any board over-ride this function in board file
  49. * From ES2.0 onwards you will get this information from
  50. * CM_SYS_CLKSEL
  51. */
  52. if (omap_revision() == OMAP4430_ES1_0)
  53. ind = OMAP_SYS_CLK_IND_38_4_MHZ;
  54. else {
  55. /* SYS_CLKSEL - 1 to match the dpll param array indices */
  56. ind = (readl((*prcm)->cm_sys_clksel) &
  57. CM_SYS_CLKSEL_SYS_CLKSEL_MASK) - 1;
  58. }
  59. return ind;
  60. }
  61. u32 get_sys_clk_index(void)
  62. __attribute__ ((weak, alias("__get_sys_clk_index")));
  63. u32 get_sys_clk_freq(void)
  64. {
  65. u8 index = get_sys_clk_index();
  66. return sys_clk_array[index];
  67. }
  68. void setup_post_dividers(u32 const base, const struct dpll_params *params)
  69. {
  70. struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
  71. /* Setup post-dividers */
  72. if (params->m2 >= 0)
  73. writel(params->m2, &dpll_regs->cm_div_m2_dpll);
  74. if (params->m3 >= 0)
  75. writel(params->m3, &dpll_regs->cm_div_m3_dpll);
  76. if (params->m4_h11 >= 0)
  77. writel(params->m4_h11, &dpll_regs->cm_div_m4_h11_dpll);
  78. if (params->m5_h12 >= 0)
  79. writel(params->m5_h12, &dpll_regs->cm_div_m5_h12_dpll);
  80. if (params->m6_h13 >= 0)
  81. writel(params->m6_h13, &dpll_regs->cm_div_m6_h13_dpll);
  82. if (params->m7_h14 >= 0)
  83. writel(params->m7_h14, &dpll_regs->cm_div_m7_h14_dpll);
  84. if (params->h21 >= 0)
  85. writel(params->h21, &dpll_regs->cm_div_h21_dpll);
  86. if (params->h22 >= 0)
  87. writel(params->h22, &dpll_regs->cm_div_h22_dpll);
  88. if (params->h23 >= 0)
  89. writel(params->h23, &dpll_regs->cm_div_h23_dpll);
  90. if (params->h24 >= 0)
  91. writel(params->h24, &dpll_regs->cm_div_h24_dpll);
  92. }
  93. static inline void do_bypass_dpll(u32 const base)
  94. {
  95. struct dpll_regs *dpll_regs = (struct dpll_regs *)base;
  96. clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
  97. CM_CLKMODE_DPLL_DPLL_EN_MASK,
  98. DPLL_EN_FAST_RELOCK_BYPASS <<
  99. CM_CLKMODE_DPLL_EN_SHIFT);
  100. }
  101. static inline void wait_for_bypass(u32 const base)
  102. {
  103. struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
  104. if (!wait_on_value(ST_DPLL_CLK_MASK, 0, &dpll_regs->cm_idlest_dpll,
  105. LDELAY)) {
  106. printf("Bypassing DPLL failed %x\n", base);
  107. }
  108. }
  109. static inline void do_lock_dpll(u32 const base)
  110. {
  111. struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
  112. clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
  113. CM_CLKMODE_DPLL_DPLL_EN_MASK,
  114. DPLL_EN_LOCK << CM_CLKMODE_DPLL_EN_SHIFT);
  115. }
  116. static inline void wait_for_lock(u32 const base)
  117. {
  118. struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
  119. if (!wait_on_value(ST_DPLL_CLK_MASK, ST_DPLL_CLK_MASK,
  120. &dpll_regs->cm_idlest_dpll, LDELAY)) {
  121. printf("DPLL locking failed for %x\n", base);
  122. hang();
  123. }
  124. }
  125. inline u32 check_for_lock(u32 const base)
  126. {
  127. struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
  128. u32 lock = readl(&dpll_regs->cm_idlest_dpll) & ST_DPLL_CLK_MASK;
  129. return lock;
  130. }
  131. const struct dpll_params *get_mpu_dpll_params(struct dplls const *dpll_data)
  132. {
  133. u32 sysclk_ind = get_sys_clk_index();
  134. return &dpll_data->mpu[sysclk_ind];
  135. }
  136. const struct dpll_params *get_core_dpll_params(struct dplls const *dpll_data)
  137. {
  138. u32 sysclk_ind = get_sys_clk_index();
  139. return &dpll_data->core[sysclk_ind];
  140. }
  141. const struct dpll_params *get_per_dpll_params(struct dplls const *dpll_data)
  142. {
  143. u32 sysclk_ind = get_sys_clk_index();
  144. return &dpll_data->per[sysclk_ind];
  145. }
  146. const struct dpll_params *get_iva_dpll_params(struct dplls const *dpll_data)
  147. {
  148. u32 sysclk_ind = get_sys_clk_index();
  149. return &dpll_data->iva[sysclk_ind];
  150. }
  151. const struct dpll_params *get_usb_dpll_params(struct dplls const *dpll_data)
  152. {
  153. u32 sysclk_ind = get_sys_clk_index();
  154. return &dpll_data->usb[sysclk_ind];
  155. }
  156. const struct dpll_params *get_abe_dpll_params(struct dplls const *dpll_data)
  157. {
  158. #ifdef CONFIG_SYS_OMAP_ABE_SYSCK
  159. u32 sysclk_ind = get_sys_clk_index();
  160. return &dpll_data->abe[sysclk_ind];
  161. #else
  162. return dpll_data->abe;
  163. #endif
  164. }
  165. static const struct dpll_params *get_ddr_dpll_params
  166. (struct dplls const *dpll_data)
  167. {
  168. u32 sysclk_ind = get_sys_clk_index();
  169. if (!dpll_data->ddr)
  170. return NULL;
  171. return &dpll_data->ddr[sysclk_ind];
  172. }
  173. #ifdef CONFIG_DRIVER_TI_CPSW
  174. static const struct dpll_params *get_gmac_dpll_params
  175. (struct dplls const *dpll_data)
  176. {
  177. u32 sysclk_ind = get_sys_clk_index();
  178. if (!dpll_data->gmac)
  179. return NULL;
  180. return &dpll_data->gmac[sysclk_ind];
  181. }
  182. #endif
  183. static void do_setup_dpll(u32 const base, const struct dpll_params *params,
  184. u8 lock, char *dpll)
  185. {
  186. u32 temp, M, N;
  187. struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
  188. if (!params)
  189. return;
  190. temp = readl(&dpll_regs->cm_clksel_dpll);
  191. if (check_for_lock(base)) {
  192. /*
  193. * The Dpll has already been locked by rom code using CH.
  194. * Check if M,N are matching with Ideal nominal opp values.
  195. * If matches, skip the rest otherwise relock.
  196. */
  197. M = (temp & CM_CLKSEL_DPLL_M_MASK) >> CM_CLKSEL_DPLL_M_SHIFT;
  198. N = (temp & CM_CLKSEL_DPLL_N_MASK) >> CM_CLKSEL_DPLL_N_SHIFT;
  199. if ((M != (params->m)) || (N != (params->n))) {
  200. debug("\n %s Dpll locked, but not for ideal M = %d,"
  201. "N = %d values, current values are M = %d,"
  202. "N= %d" , dpll, params->m, params->n,
  203. M, N);
  204. } else {
  205. /* Dpll locked with ideal values for nominal opps. */
  206. debug("\n %s Dpll already locked with ideal"
  207. "nominal opp values", dpll);
  208. bypass_dpll(base);
  209. goto setup_post_dividers;
  210. }
  211. }
  212. bypass_dpll(base);
  213. /* Set M & N */
  214. temp &= ~CM_CLKSEL_DPLL_M_MASK;
  215. temp |= (params->m << CM_CLKSEL_DPLL_M_SHIFT) & CM_CLKSEL_DPLL_M_MASK;
  216. temp &= ~CM_CLKSEL_DPLL_N_MASK;
  217. temp |= (params->n << CM_CLKSEL_DPLL_N_SHIFT) & CM_CLKSEL_DPLL_N_MASK;
  218. writel(temp, &dpll_regs->cm_clksel_dpll);
  219. setup_post_dividers:
  220. setup_post_dividers(base, params);
  221. /* Lock */
  222. if (lock)
  223. do_lock_dpll(base);
  224. /* Wait till the DPLL locks */
  225. if (lock)
  226. wait_for_lock(base);
  227. }
  228. u32 omap_ddr_clk(void)
  229. {
  230. u32 ddr_clk, sys_clk_khz, omap_rev, divider;
  231. const struct dpll_params *core_dpll_params;
  232. omap_rev = omap_revision();
  233. sys_clk_khz = get_sys_clk_freq() / 1000;
  234. core_dpll_params = get_core_dpll_params(*dplls_data);
  235. debug("sys_clk %d\n ", sys_clk_khz * 1000);
  236. /* Find Core DPLL locked frequency first */
  237. ddr_clk = sys_clk_khz * 2 * core_dpll_params->m /
  238. (core_dpll_params->n + 1);
  239. if (omap_rev < OMAP5430_ES1_0) {
  240. /*
  241. * DDR frequency is PHY_ROOT_CLK/2
  242. * PHY_ROOT_CLK = Fdpll/2/M2
  243. */
  244. divider = 4;
  245. } else {
  246. /*
  247. * DDR frequency is PHY_ROOT_CLK
  248. * PHY_ROOT_CLK = Fdpll/2/M2
  249. */
  250. divider = 2;
  251. }
  252. ddr_clk = ddr_clk / divider / core_dpll_params->m2;
  253. ddr_clk *= 1000; /* convert to Hz */
  254. debug("ddr_clk %d\n ", ddr_clk);
  255. return ddr_clk;
  256. }
  257. /*
  258. * Lock MPU dpll
  259. *
  260. * Resulting MPU frequencies:
  261. * 4430 ES1.0 : 600 MHz
  262. * 4430 ES2.x : 792 MHz (OPP Turbo)
  263. * 4460 : 920 MHz (OPP Turbo) - DCC disabled
  264. */
  265. void configure_mpu_dpll(void)
  266. {
  267. const struct dpll_params *params;
  268. struct dpll_regs *mpu_dpll_regs;
  269. u32 omap_rev;
  270. omap_rev = omap_revision();
  271. /*
  272. * DCC and clock divider settings for 4460.
  273. * DCC is required, if more than a certain frequency is required.
  274. * For, 4460 > 1GHZ.
  275. * 5430 > 1.4GHZ.
  276. */
  277. if ((omap_rev >= OMAP4460_ES1_0) && (omap_rev < OMAP5430_ES1_0)) {
  278. mpu_dpll_regs =
  279. (struct dpll_regs *)((*prcm)->cm_clkmode_dpll_mpu);
  280. bypass_dpll((*prcm)->cm_clkmode_dpll_mpu);
  281. clrbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
  282. MPU_CLKCTRL_CLKSEL_EMIF_DIV_MODE_MASK);
  283. setbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
  284. MPU_CLKCTRL_CLKSEL_ABE_DIV_MODE_MASK);
  285. clrbits_le32(&mpu_dpll_regs->cm_clksel_dpll,
  286. CM_CLKSEL_DCC_EN_MASK);
  287. }
  288. params = get_mpu_dpll_params(*dplls_data);
  289. do_setup_dpll((*prcm)->cm_clkmode_dpll_mpu, params, DPLL_LOCK, "mpu");
  290. debug("MPU DPLL locked\n");
  291. }
  292. #if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \
  293. defined(CONFIG_USB_MUSB_OMAP2PLUS)
  294. static void setup_usb_dpll(void)
  295. {
  296. const struct dpll_params *params;
  297. u32 sys_clk_khz, sd_div, num, den;
  298. sys_clk_khz = get_sys_clk_freq() / 1000;
  299. /*
  300. * USB:
  301. * USB dpll is J-type. Need to set DPLL_SD_DIV for jitter correction
  302. * DPLL_SD_DIV = CEILING ([DPLL_MULT/(DPLL_DIV+1)]* CLKINP / 250)
  303. * - where CLKINP is sys_clk in MHz
  304. * Use CLKINP in KHz and adjust the denominator accordingly so
  305. * that we have enough accuracy and at the same time no overflow
  306. */
  307. params = get_usb_dpll_params(*dplls_data);
  308. num = params->m * sys_clk_khz;
  309. den = (params->n + 1) * 250 * 1000;
  310. num += den - 1;
  311. sd_div = num / den;
  312. clrsetbits_le32((*prcm)->cm_clksel_dpll_usb,
  313. CM_CLKSEL_DPLL_DPLL_SD_DIV_MASK,
  314. sd_div << CM_CLKSEL_DPLL_DPLL_SD_DIV_SHIFT);
  315. /* Now setup the dpll with the regular function */
  316. do_setup_dpll((*prcm)->cm_clkmode_dpll_usb, params, DPLL_LOCK, "usb");
  317. }
  318. #endif
  319. static void setup_dplls(void)
  320. {
  321. u32 temp;
  322. const struct dpll_params *params;
  323. struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE;
  324. debug("setup_dplls\n");
  325. /* CORE dpll */
  326. params = get_core_dpll_params(*dplls_data); /* default - safest */
  327. /*
  328. * Do not lock the core DPLL now. Just set it up.
  329. * Core DPLL will be locked after setting up EMIF
  330. * using the FREQ_UPDATE method(freq_update_core())
  331. */
  332. if (emif_sdram_type(readl(&emif->emif_sdram_config)) ==
  333. EMIF_SDRAM_TYPE_LPDDR2)
  334. do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
  335. DPLL_NO_LOCK, "core");
  336. else
  337. do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
  338. DPLL_LOCK, "core");
  339. /* Set the ratios for CORE_CLK, L3_CLK, L4_CLK */
  340. temp = (CLKSEL_CORE_X2_DIV_1 << CLKSEL_CORE_SHIFT) |
  341. (CLKSEL_L3_CORE_DIV_2 << CLKSEL_L3_SHIFT) |
  342. (CLKSEL_L4_L3_DIV_2 << CLKSEL_L4_SHIFT);
  343. writel(temp, (*prcm)->cm_clksel_core);
  344. debug("Core DPLL configured\n");
  345. /* lock PER dpll */
  346. params = get_per_dpll_params(*dplls_data);
  347. do_setup_dpll((*prcm)->cm_clkmode_dpll_per,
  348. params, DPLL_LOCK, "per");
  349. debug("PER DPLL locked\n");
  350. /* MPU dpll */
  351. configure_mpu_dpll();
  352. #if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \
  353. defined(CONFIG_USB_MUSB_OMAP2PLUS)
  354. setup_usb_dpll();
  355. #endif
  356. params = get_ddr_dpll_params(*dplls_data);
  357. do_setup_dpll((*prcm)->cm_clkmode_dpll_ddrphy,
  358. params, DPLL_LOCK, "ddr");
  359. #ifdef CONFIG_DRIVER_TI_CPSW
  360. params = get_gmac_dpll_params(*dplls_data);
  361. do_setup_dpll((*prcm)->cm_clkmode_dpll_gmac, params,
  362. DPLL_LOCK, "gmac");
  363. #endif
  364. }
  365. u32 get_offset_code(u32 volt_offset, struct pmic_data *pmic)
  366. {
  367. u32 offset_code;
  368. volt_offset -= pmic->base_offset;
  369. offset_code = (volt_offset + pmic->step - 1) / pmic->step;
  370. /*
  371. * Offset codes 1-6 all give the base voltage in Palmas
  372. * Offset code 0 switches OFF the SMPS
  373. */
  374. return offset_code + pmic->start_code;
  375. }
  376. void do_scale_vcore(u32 vcore_reg, u32 volt_mv, struct pmic_data *pmic)
  377. {
  378. u32 offset_code;
  379. u32 offset = volt_mv;
  380. int ret = 0;
  381. if (!volt_mv)
  382. return;
  383. pmic->pmic_bus_init();
  384. /* See if we can first get the GPIO if needed */
  385. if (pmic->gpio_en)
  386. ret = gpio_request(pmic->gpio, "PMIC_GPIO");
  387. if (ret < 0) {
  388. printf("%s: gpio %d request failed %d\n", __func__,
  389. pmic->gpio, ret);
  390. return;
  391. }
  392. /* Pull the GPIO low to select SET0 register, while we program SET1 */
  393. if (pmic->gpio_en)
  394. gpio_direction_output(pmic->gpio, 0);
  395. /* convert to uV for better accuracy in the calculations */
  396. offset *= 1000;
  397. offset_code = get_offset_code(offset, pmic);
  398. debug("do_scale_vcore: volt - %d offset_code - 0x%x\n", volt_mv,
  399. offset_code);
  400. if (pmic->pmic_write(pmic->i2c_slave_addr, vcore_reg, offset_code))
  401. printf("Scaling voltage failed for 0x%x\n", vcore_reg);
  402. if (pmic->gpio_en)
  403. gpio_direction_output(pmic->gpio, 1);
  404. }
  405. int __weak get_voltrail_opp(int rail_offset)
  406. {
  407. /*
  408. * By default return OPP_NOM for all voltage rails.
  409. */
  410. return OPP_NOM;
  411. }
  412. static u32 optimize_vcore_voltage(struct volts const *v, int opp)
  413. {
  414. u32 val;
  415. if (!v->value[opp])
  416. return 0;
  417. if (!v->efuse.reg[opp])
  418. return v->value[opp];
  419. switch (v->efuse.reg_bits) {
  420. case 16:
  421. val = readw(v->efuse.reg[opp]);
  422. break;
  423. case 32:
  424. val = readl(v->efuse.reg[opp]);
  425. break;
  426. default:
  427. printf("Error: efuse 0x%08x bits=%d unknown\n",
  428. v->efuse.reg[opp], v->efuse.reg_bits);
  429. return v->value[opp];
  430. }
  431. if (!val) {
  432. printf("Error: efuse 0x%08x bits=%d val=0, using %d\n",
  433. v->efuse.reg[opp], v->efuse.reg_bits, v->value[opp]);
  434. return v->value[opp];
  435. }
  436. debug("%s:efuse 0x%08x bits=%d Vnom=%d, using efuse value %d\n",
  437. __func__, v->efuse.reg[opp], v->efuse.reg_bits, v->value[opp],
  438. val);
  439. return val;
  440. }
  441. #ifdef CONFIG_IODELAY_RECALIBRATION
  442. void __weak recalibrate_iodelay(void)
  443. {
  444. }
  445. #endif
  446. /*
  447. * Setup the voltages for the main SoC core power domains.
  448. * We start with the maximum voltages allowed here, as set in the corresponding
  449. * vcores_data struct, and then scale (usually down) to the fused values that
  450. * are retrieved from the SoC. The scaling happens only if the efuse.reg fields
  451. * are initialised.
  452. * Rail grouping is supported for the DRA7xx SoCs only, therefore the code is
  453. * compiled conditionally. Note that the new code writes the scaled (or zeroed)
  454. * values back to the vcores_data struct for eventual reuse. Zero values mean
  455. * that the corresponding rails are not controlled separately, and are not sent
  456. * to the PMIC.
  457. */
  458. void scale_vcores(struct vcores_data const *vcores)
  459. {
  460. int i, opp, j, ol;
  461. struct volts *pv = (struct volts *)vcores;
  462. struct volts *px;
  463. for (i=0; i<(sizeof(struct vcores_data)/sizeof(struct volts)); i++) {
  464. opp = get_voltrail_opp(i);
  465. debug("%d -> ", pv->value[opp]);
  466. if (pv->value[opp]) {
  467. /* Handle non-empty members only */
  468. pv->value[opp] = optimize_vcore_voltage(pv, opp);
  469. px = (struct volts *)vcores;
  470. j = 0;
  471. while (px < pv) {
  472. /*
  473. * Scan already handled non-empty members to see
  474. * if we have a group and find the max voltage,
  475. * which is set to the first occurance of the
  476. * particular SMPS; the other group voltages are
  477. * zeroed.
  478. */
  479. ol = get_voltrail_opp(j);
  480. if (px->value[ol] &&
  481. (pv->pmic->i2c_slave_addr ==
  482. px->pmic->i2c_slave_addr) &&
  483. (pv->addr == px->addr)) {
  484. /* Same PMIC, same SMPS */
  485. if (pv->value[opp] > px->value[ol])
  486. px->value[ol] = pv->value[opp];
  487. pv->value[opp] = 0;
  488. }
  489. px++;
  490. j++;
  491. }
  492. }
  493. debug("%d\n", pv->value[opp]);
  494. pv++;
  495. }
  496. opp = get_voltrail_opp(VOLT_CORE);
  497. debug("cor: %d\n", vcores->core.value[opp]);
  498. do_scale_vcore(vcores->core.addr, vcores->core.value[opp],
  499. vcores->core.pmic);
  500. /*
  501. * IO delay recalibration should be done immediately after
  502. * adjusting AVS voltages for VDD_CORE_L.
  503. * Respective boards should call __recalibrate_iodelay()
  504. * with proper mux, virtual and manual mode configurations.
  505. */
  506. #ifdef CONFIG_IODELAY_RECALIBRATION
  507. recalibrate_iodelay();
  508. #endif
  509. opp = get_voltrail_opp(VOLT_MPU);
  510. debug("mpu: %d\n", vcores->mpu.value[opp]);
  511. do_scale_vcore(vcores->mpu.addr, vcores->mpu.value[opp],
  512. vcores->mpu.pmic);
  513. /* Configure MPU ABB LDO after scale */
  514. abb_setup(vcores->mpu.efuse.reg[opp],
  515. (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl,
  516. (*prcm)->prm_abbldo_mpu_setup,
  517. (*prcm)->prm_abbldo_mpu_ctrl,
  518. (*prcm)->prm_irqstatus_mpu_2,
  519. vcores->mpu.abb_tx_done_mask,
  520. OMAP_ABB_FAST_OPP);
  521. opp = get_voltrail_opp(VOLT_MM);
  522. debug("mm: %d\n", vcores->mm.value[opp]);
  523. do_scale_vcore(vcores->mm.addr, vcores->mm.value[opp],
  524. vcores->mm.pmic);
  525. /* Configure MM ABB LDO after scale */
  526. abb_setup(vcores->mm.efuse.reg[opp],
  527. (*ctrl)->control_wkup_ldovbb_mm_voltage_ctrl,
  528. (*prcm)->prm_abbldo_mm_setup,
  529. (*prcm)->prm_abbldo_mm_ctrl,
  530. (*prcm)->prm_irqstatus_mpu,
  531. vcores->mm.abb_tx_done_mask,
  532. OMAP_ABB_FAST_OPP);
  533. opp = get_voltrail_opp(VOLT_GPU);
  534. debug("gpu: %d\n", vcores->gpu.value[opp]);
  535. do_scale_vcore(vcores->gpu.addr, vcores->gpu.value[opp],
  536. vcores->gpu.pmic);
  537. /* Configure GPU ABB LDO after scale */
  538. abb_setup(vcores->gpu.efuse.reg[opp],
  539. (*ctrl)->control_wkup_ldovbb_gpu_voltage_ctrl,
  540. (*prcm)->prm_abbldo_gpu_setup,
  541. (*prcm)->prm_abbldo_gpu_ctrl,
  542. (*prcm)->prm_irqstatus_mpu,
  543. vcores->gpu.abb_tx_done_mask,
  544. OMAP_ABB_FAST_OPP);
  545. opp = get_voltrail_opp(VOLT_EVE);
  546. debug("eve: %d\n", vcores->eve.value[opp]);
  547. do_scale_vcore(vcores->eve.addr, vcores->eve.value[opp],
  548. vcores->eve.pmic);
  549. /* Configure EVE ABB LDO after scale */
  550. abb_setup(vcores->eve.efuse.reg[opp],
  551. (*ctrl)->control_wkup_ldovbb_eve_voltage_ctrl,
  552. (*prcm)->prm_abbldo_eve_setup,
  553. (*prcm)->prm_abbldo_eve_ctrl,
  554. (*prcm)->prm_irqstatus_mpu,
  555. vcores->eve.abb_tx_done_mask,
  556. OMAP_ABB_FAST_OPP);
  557. opp = get_voltrail_opp(VOLT_IVA);
  558. debug("iva: %d\n", vcores->iva.value[opp]);
  559. do_scale_vcore(vcores->iva.addr, vcores->iva.value[opp],
  560. vcores->iva.pmic);
  561. /* Configure IVA ABB LDO after scale */
  562. abb_setup(vcores->iva.efuse.reg[opp],
  563. (*ctrl)->control_wkup_ldovbb_iva_voltage_ctrl,
  564. (*prcm)->prm_abbldo_iva_setup,
  565. (*prcm)->prm_abbldo_iva_ctrl,
  566. (*prcm)->prm_irqstatus_mpu,
  567. vcores->iva.abb_tx_done_mask,
  568. OMAP_ABB_FAST_OPP);
  569. }
  570. static inline void enable_clock_domain(u32 const clkctrl_reg, u32 enable_mode)
  571. {
  572. clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
  573. enable_mode << CD_CLKCTRL_CLKTRCTRL_SHIFT);
  574. debug("Enable clock domain - %x\n", clkctrl_reg);
  575. }
  576. static inline void disable_clock_domain(u32 const clkctrl_reg)
  577. {
  578. clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
  579. CD_CLKCTRL_CLKTRCTRL_SW_SLEEP <<
  580. CD_CLKCTRL_CLKTRCTRL_SHIFT);
  581. debug("Disable clock domain - %x\n", clkctrl_reg);
  582. }
  583. static inline void wait_for_clk_enable(u32 clkctrl_addr)
  584. {
  585. u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_DISABLED;
  586. u32 bound = LDELAY;
  587. while ((idlest == MODULE_CLKCTRL_IDLEST_DISABLED) ||
  588. (idlest == MODULE_CLKCTRL_IDLEST_TRANSITIONING)) {
  589. clkctrl = readl(clkctrl_addr);
  590. idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
  591. MODULE_CLKCTRL_IDLEST_SHIFT;
  592. if (--bound == 0) {
  593. printf("Clock enable failed for 0x%x idlest 0x%x\n",
  594. clkctrl_addr, clkctrl);
  595. return;
  596. }
  597. }
  598. }
  599. static inline void enable_clock_module(u32 const clkctrl_addr, u32 enable_mode,
  600. u32 wait_for_enable)
  601. {
  602. clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
  603. enable_mode << MODULE_CLKCTRL_MODULEMODE_SHIFT);
  604. debug("Enable clock module - %x\n", clkctrl_addr);
  605. if (wait_for_enable)
  606. wait_for_clk_enable(clkctrl_addr);
  607. }
  608. static inline void wait_for_clk_disable(u32 clkctrl_addr)
  609. {
  610. u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_FULLY_FUNCTIONAL;
  611. u32 bound = LDELAY;
  612. while ((idlest != MODULE_CLKCTRL_IDLEST_DISABLED)) {
  613. clkctrl = readl(clkctrl_addr);
  614. idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
  615. MODULE_CLKCTRL_IDLEST_SHIFT;
  616. if (--bound == 0) {
  617. printf("Clock disable failed for 0x%x idlest 0x%x\n",
  618. clkctrl_addr, clkctrl);
  619. return;
  620. }
  621. }
  622. }
  623. static inline void disable_clock_module(u32 const clkctrl_addr,
  624. u32 wait_for_disable)
  625. {
  626. clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
  627. MODULE_CLKCTRL_MODULEMODE_SW_DISABLE <<
  628. MODULE_CLKCTRL_MODULEMODE_SHIFT);
  629. debug("Disable clock module - %x\n", clkctrl_addr);
  630. if (wait_for_disable)
  631. wait_for_clk_disable(clkctrl_addr);
  632. }
  633. void freq_update_core(void)
  634. {
  635. u32 freq_config1 = 0;
  636. const struct dpll_params *core_dpll_params;
  637. u32 omap_rev = omap_revision();
  638. core_dpll_params = get_core_dpll_params(*dplls_data);
  639. /* Put EMIF clock domain in sw wakeup mode */
  640. enable_clock_domain((*prcm)->cm_memif_clkstctrl,
  641. CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
  642. wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
  643. wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
  644. freq_config1 = SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK |
  645. SHADOW_FREQ_CONFIG1_DLL_RESET_MASK;
  646. freq_config1 |= (DPLL_EN_LOCK << SHADOW_FREQ_CONFIG1_DPLL_EN_SHIFT) &
  647. SHADOW_FREQ_CONFIG1_DPLL_EN_MASK;
  648. freq_config1 |= (core_dpll_params->m2 <<
  649. SHADOW_FREQ_CONFIG1_M2_DIV_SHIFT) &
  650. SHADOW_FREQ_CONFIG1_M2_DIV_MASK;
  651. writel(freq_config1, (*prcm)->cm_shadow_freq_config1);
  652. if (!wait_on_value(SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK, 0,
  653. (u32 *) (*prcm)->cm_shadow_freq_config1, LDELAY)) {
  654. puts("FREQ UPDATE procedure failed!!");
  655. hang();
  656. }
  657. /*
  658. * Putting EMIF in HW_AUTO is seen to be causing issues with
  659. * EMIF clocks and the master DLL. Keep EMIF in SW_WKUP
  660. * in OMAP5430 ES1.0 silicon
  661. */
  662. if (omap_rev != OMAP5430_ES1_0) {
  663. /* Put EMIF clock domain back in hw auto mode */
  664. enable_clock_domain((*prcm)->cm_memif_clkstctrl,
  665. CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
  666. wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
  667. wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
  668. }
  669. }
  670. void bypass_dpll(u32 const base)
  671. {
  672. do_bypass_dpll(base);
  673. wait_for_bypass(base);
  674. }
  675. void lock_dpll(u32 const base)
  676. {
  677. do_lock_dpll(base);
  678. wait_for_lock(base);
  679. }
  680. static void setup_clocks_for_console(void)
  681. {
  682. /* Do not add any spl_debug prints in this function */
  683. clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
  684. CD_CLKCTRL_CLKTRCTRL_SW_WKUP <<
  685. CD_CLKCTRL_CLKTRCTRL_SHIFT);
  686. /* Enable all UARTs - console will be on one of them */
  687. clrsetbits_le32((*prcm)->cm_l4per_uart1_clkctrl,
  688. MODULE_CLKCTRL_MODULEMODE_MASK,
  689. MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
  690. MODULE_CLKCTRL_MODULEMODE_SHIFT);
  691. clrsetbits_le32((*prcm)->cm_l4per_uart2_clkctrl,
  692. MODULE_CLKCTRL_MODULEMODE_MASK,
  693. MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
  694. MODULE_CLKCTRL_MODULEMODE_SHIFT);
  695. clrsetbits_le32((*prcm)->cm_l4per_uart3_clkctrl,
  696. MODULE_CLKCTRL_MODULEMODE_MASK,
  697. MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
  698. MODULE_CLKCTRL_MODULEMODE_SHIFT);
  699. clrsetbits_le32((*prcm)->cm_l4per_uart4_clkctrl,
  700. MODULE_CLKCTRL_MODULEMODE_MASK,
  701. MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
  702. MODULE_CLKCTRL_MODULEMODE_SHIFT);
  703. clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
  704. CD_CLKCTRL_CLKTRCTRL_HW_AUTO <<
  705. CD_CLKCTRL_CLKTRCTRL_SHIFT);
  706. }
  707. void do_enable_clocks(u32 const *clk_domains,
  708. u32 const *clk_modules_hw_auto,
  709. u32 const *clk_modules_explicit_en,
  710. u8 wait_for_enable)
  711. {
  712. u32 i, max = 100;
  713. /* Put the clock domains in SW_WKUP mode */
  714. for (i = 0; (i < max) && clk_domains[i]; i++) {
  715. enable_clock_domain(clk_domains[i],
  716. CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
  717. }
  718. /* Clock modules that need to be put in HW_AUTO */
  719. for (i = 0; (i < max) && clk_modules_hw_auto[i]; i++) {
  720. enable_clock_module(clk_modules_hw_auto[i],
  721. MODULE_CLKCTRL_MODULEMODE_HW_AUTO,
  722. wait_for_enable);
  723. };
  724. /* Clock modules that need to be put in SW_EXPLICIT_EN mode */
  725. for (i = 0; (i < max) && clk_modules_explicit_en[i]; i++) {
  726. enable_clock_module(clk_modules_explicit_en[i],
  727. MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN,
  728. wait_for_enable);
  729. };
  730. /* Put the clock domains in HW_AUTO mode now */
  731. for (i = 0; (i < max) && clk_domains[i]; i++) {
  732. enable_clock_domain(clk_domains[i],
  733. CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
  734. }
  735. }
  736. void do_disable_clocks(u32 const *clk_domains,
  737. u32 const *clk_modules_disable,
  738. u8 wait_for_disable)
  739. {
  740. u32 i, max = 100;
  741. /* Clock modules that need to be put in SW_DISABLE */
  742. for (i = 0; (i < max) && clk_modules_disable[i]; i++)
  743. disable_clock_module(clk_modules_disable[i],
  744. wait_for_disable);
  745. /* Put the clock domains in SW_SLEEP mode */
  746. for (i = 0; (i < max) && clk_domains[i]; i++)
  747. disable_clock_domain(clk_domains[i]);
  748. }
  749. /**
  750. * setup_early_clocks() - Setup early clocks needed for SoC
  751. *
  752. * Setup clocks for console, SPL basic initialization clocks and initialize
  753. * the timer. This is invoked prior prcm_init.
  754. */
  755. void setup_early_clocks(void)
  756. {
  757. switch (omap_hw_init_context()) {
  758. case OMAP_INIT_CONTEXT_SPL:
  759. case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
  760. case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
  761. setup_clocks_for_console();
  762. enable_basic_clocks();
  763. timer_init();
  764. /* Fall through */
  765. }
  766. }
  767. void prcm_init(void)
  768. {
  769. switch (omap_hw_init_context()) {
  770. case OMAP_INIT_CONTEXT_SPL:
  771. case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
  772. case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
  773. scale_vcores(*omap_vcores);
  774. setup_dplls();
  775. setup_warmreset_time();
  776. break;
  777. default:
  778. break;
  779. }
  780. if (OMAP_INIT_CONTEXT_SPL != omap_hw_init_context())
  781. enable_basic_uboot_clocks();
  782. }
  783. void gpi2c_init(void)
  784. {
  785. static int gpi2c = 1;
  786. if (gpi2c) {
  787. i2c_init(CONFIG_SYS_OMAP24_I2C_SPEED,
  788. CONFIG_SYS_OMAP24_I2C_SLAVE);
  789. gpi2c = 0;
  790. }
  791. }