cpu.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762
  1. /*
  2. * Copyright (c) 2016 Google, Inc
  3. *
  4. * SPDX-License-Identifier: GPL-2.0
  5. *
  6. * Based on code from coreboot src/soc/intel/broadwell/cpu.c
  7. */
  8. #include <common.h>
  9. #include <dm.h>
  10. #include <cpu.h>
  11. #include <asm/cpu.h>
  12. #include <asm/cpu_x86.h>
  13. #include <asm/cpu_common.h>
  14. #include <asm/intel_regs.h>
  15. #include <asm/msr.h>
  16. #include <asm/post.h>
  17. #include <asm/turbo.h>
  18. #include <asm/arch/cpu.h>
  19. #include <asm/arch/pch.h>
  20. #include <asm/arch/rcb.h>
  21. struct cpu_broadwell_priv {
  22. bool ht_disabled;
  23. };
  24. /* Convert time in seconds to POWER_LIMIT_1_TIME MSR value */
  25. static const u8 power_limit_time_sec_to_msr[] = {
  26. [0] = 0x00,
  27. [1] = 0x0a,
  28. [2] = 0x0b,
  29. [3] = 0x4b,
  30. [4] = 0x0c,
  31. [5] = 0x2c,
  32. [6] = 0x4c,
  33. [7] = 0x6c,
  34. [8] = 0x0d,
  35. [10] = 0x2d,
  36. [12] = 0x4d,
  37. [14] = 0x6d,
  38. [16] = 0x0e,
  39. [20] = 0x2e,
  40. [24] = 0x4e,
  41. [28] = 0x6e,
  42. [32] = 0x0f,
  43. [40] = 0x2f,
  44. [48] = 0x4f,
  45. [56] = 0x6f,
  46. [64] = 0x10,
  47. [80] = 0x30,
  48. [96] = 0x50,
  49. [112] = 0x70,
  50. [128] = 0x11,
  51. };
  52. /* Convert POWER_LIMIT_1_TIME MSR value to seconds */
  53. static const u8 power_limit_time_msr_to_sec[] = {
  54. [0x00] = 0,
  55. [0x0a] = 1,
  56. [0x0b] = 2,
  57. [0x4b] = 3,
  58. [0x0c] = 4,
  59. [0x2c] = 5,
  60. [0x4c] = 6,
  61. [0x6c] = 7,
  62. [0x0d] = 8,
  63. [0x2d] = 10,
  64. [0x4d] = 12,
  65. [0x6d] = 14,
  66. [0x0e] = 16,
  67. [0x2e] = 20,
  68. [0x4e] = 24,
  69. [0x6e] = 28,
  70. [0x0f] = 32,
  71. [0x2f] = 40,
  72. [0x4f] = 48,
  73. [0x6f] = 56,
  74. [0x10] = 64,
  75. [0x30] = 80,
  76. [0x50] = 96,
  77. [0x70] = 112,
  78. [0x11] = 128,
  79. };
  80. int arch_cpu_init_dm(void)
  81. {
  82. struct udevice *dev;
  83. int ret;
  84. /* Start up the LPC so we have serial */
  85. ret = uclass_first_device(UCLASS_LPC, &dev);
  86. if (ret)
  87. return ret;
  88. if (!dev)
  89. return -ENODEV;
  90. ret = cpu_set_flex_ratio_to_tdp_nominal();
  91. if (ret)
  92. return ret;
  93. return 0;
  94. }
  95. void set_max_freq(void)
  96. {
  97. msr_t msr, perf_ctl, platform_info;
  98. /* Check for configurable TDP option */
  99. platform_info = msr_read(MSR_PLATFORM_INFO);
  100. if ((platform_info.hi >> 1) & 3) {
  101. /* Set to nominal TDP ratio */
  102. msr = msr_read(MSR_CONFIG_TDP_NOMINAL);
  103. perf_ctl.lo = (msr.lo & 0xff) << 8;
  104. } else {
  105. /* Platform Info bits 15:8 give max ratio */
  106. msr = msr_read(MSR_PLATFORM_INFO);
  107. perf_ctl.lo = msr.lo & 0xff00;
  108. }
  109. perf_ctl.hi = 0;
  110. msr_write(IA32_PERF_CTL, perf_ctl);
  111. debug("CPU: frequency set to %d MHz\n",
  112. ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
  113. }
  114. int arch_cpu_init(void)
  115. {
  116. post_code(POST_CPU_INIT);
  117. return x86_cpu_init_f();
  118. }
  119. int print_cpuinfo(void)
  120. {
  121. char processor_name[CPU_MAX_NAME_LEN];
  122. const char *name;
  123. int ret;
  124. set_max_freq();
  125. ret = cpu_common_init();
  126. if (ret)
  127. return ret;
  128. gd->arch.pei_boot_mode = PEI_BOOT_NONE;
  129. /* Print processor name */
  130. name = cpu_get_name(processor_name);
  131. printf("CPU: %s\n", name);
  132. return 0;
  133. }
  134. /*
  135. * The core 100MHz BLCK is disabled in deeper c-states. One needs to calibrate
  136. * the 100MHz BCLCK against the 24MHz BLCK to restore the clocks properly
  137. * when a core is woken up
  138. */
  139. static int pcode_ready(void)
  140. {
  141. int wait_count;
  142. const int delay_step = 10;
  143. wait_count = 0;
  144. do {
  145. if (!(readl(MCHBAR_REG(BIOS_MAILBOX_INTERFACE)) &
  146. MAILBOX_RUN_BUSY))
  147. return 0;
  148. wait_count += delay_step;
  149. udelay(delay_step);
  150. } while (wait_count < 1000);
  151. return -ETIMEDOUT;
  152. }
  153. static u32 pcode_mailbox_read(u32 command)
  154. {
  155. int ret;
  156. ret = pcode_ready();
  157. if (ret) {
  158. debug("PCODE: mailbox timeout on wait ready\n");
  159. return ret;
  160. }
  161. /* Send command and start transaction */
  162. writel(command | MAILBOX_RUN_BUSY, MCHBAR_REG(BIOS_MAILBOX_INTERFACE));
  163. ret = pcode_ready();
  164. if (ret) {
  165. debug("PCODE: mailbox timeout on completion\n");
  166. return ret;
  167. }
  168. /* Read mailbox */
  169. return readl(MCHBAR_REG(BIOS_MAILBOX_DATA));
  170. }
  171. static int pcode_mailbox_write(u32 command, u32 data)
  172. {
  173. int ret;
  174. ret = pcode_ready();
  175. if (ret) {
  176. debug("PCODE: mailbox timeout on wait ready\n");
  177. return ret;
  178. }
  179. writel(data, MCHBAR_REG(BIOS_MAILBOX_DATA));
  180. /* Send command and start transaction */
  181. writel(command | MAILBOX_RUN_BUSY, MCHBAR_REG(BIOS_MAILBOX_INTERFACE));
  182. ret = pcode_ready();
  183. if (ret) {
  184. debug("PCODE: mailbox timeout on completion\n");
  185. return ret;
  186. }
  187. return 0;
  188. }
  189. /* @dev is the CPU device */
  190. static void initialize_vr_config(struct udevice *dev)
  191. {
  192. int ramp, min_vid;
  193. msr_t msr;
  194. debug("Initializing VR config\n");
  195. /* Configure VR_CURRENT_CONFIG */
  196. msr = msr_read(MSR_VR_CURRENT_CONFIG);
  197. /*
  198. * Preserve bits 63 and 62. Bit 62 is PSI4 enable, but it is only valid
  199. * on ULT systems
  200. */
  201. msr.hi &= 0xc0000000;
  202. msr.hi |= (0x01 << (52 - 32)); /* PSI3 threshold - 1A */
  203. msr.hi |= (0x05 << (42 - 32)); /* PSI2 threshold - 5A */
  204. msr.hi |= (0x14 << (32 - 32)); /* PSI1 threshold - 20A */
  205. msr.hi |= (1 << (62 - 32)); /* Enable PSI4 */
  206. /* Leave the max instantaneous current limit (12:0) to default */
  207. msr_write(MSR_VR_CURRENT_CONFIG, msr);
  208. /* Configure VR_MISC_CONFIG MSR */
  209. msr = msr_read(MSR_VR_MISC_CONFIG);
  210. /* Set the IOUT_SLOPE scalar applied to dIout in U10.1.9 format */
  211. msr.hi &= ~(0x3ff << (40 - 32));
  212. msr.hi |= (0x200 << (40 - 32)); /* 1.0 */
  213. /* Set IOUT_OFFSET to 0 */
  214. msr.hi &= ~0xff;
  215. /* Set entry ramp rate to slow */
  216. msr.hi &= ~(1 << (51 - 32));
  217. /* Enable decay mode on C-state entry */
  218. msr.hi |= (1 << (52 - 32));
  219. /* Set the slow ramp rate */
  220. msr.hi &= ~(0x3 << (53 - 32));
  221. /* Configure the C-state exit ramp rate */
  222. ramp = fdtdec_get_int(gd->fdt_blob, dev->of_offset, "intel,slow-ramp",
  223. -1);
  224. if (ramp != -1) {
  225. /* Configured slow ramp rate */
  226. msr.hi |= ((ramp & 0x3) << (53 - 32));
  227. /* Set exit ramp rate to slow */
  228. msr.hi &= ~(1 << (50 - 32));
  229. } else {
  230. /* Fast ramp rate / 4 */
  231. msr.hi |= (0x01 << (53 - 32));
  232. /* Set exit ramp rate to fast */
  233. msr.hi |= (1 << (50 - 32));
  234. }
  235. /* Set MIN_VID (31:24) to allow CPU to have full control */
  236. msr.lo &= ~0xff000000;
  237. min_vid = fdtdec_get_int(gd->fdt_blob, dev->of_offset, "intel,min-vid",
  238. 0);
  239. msr.lo |= (min_vid & 0xff) << 24;
  240. msr_write(MSR_VR_MISC_CONFIG, msr);
  241. /* Configure VR_MISC_CONFIG2 MSR */
  242. msr = msr_read(MSR_VR_MISC_CONFIG2);
  243. msr.lo &= ~0xffff;
  244. /*
  245. * Allow CPU to control minimum voltage completely (15:8) and
  246. * set the fast ramp voltage in 10mV steps
  247. */
  248. if (cpu_get_family_model() == BROADWELL_FAMILY_ULT)
  249. msr.lo |= 0x006a; /* 1.56V */
  250. else
  251. msr.lo |= 0x006f; /* 1.60V */
  252. msr_write(MSR_VR_MISC_CONFIG2, msr);
  253. /* Set C9/C10 VCC Min */
  254. pcode_mailbox_write(MAILBOX_BIOS_CMD_WRITE_C9C10_VOLTAGE, 0x1f1f);
  255. }
  256. static int calibrate_24mhz_bclk(void)
  257. {
  258. int err_code;
  259. int ret;
  260. ret = pcode_ready();
  261. if (ret)
  262. return ret;
  263. /* A non-zero value initiates the PCODE calibration */
  264. writel(~0, MCHBAR_REG(BIOS_MAILBOX_DATA));
  265. writel(MAILBOX_RUN_BUSY | MAILBOX_BIOS_CMD_FSM_MEASURE_INTVL,
  266. MCHBAR_REG(BIOS_MAILBOX_INTERFACE));
  267. ret = pcode_ready();
  268. if (ret)
  269. return ret;
  270. err_code = readl(MCHBAR_REG(BIOS_MAILBOX_INTERFACE)) & 0xff;
  271. debug("PCODE: 24MHz BLCK calibration response: %d\n", err_code);
  272. /* Read the calibrated value */
  273. writel(MAILBOX_RUN_BUSY | MAILBOX_BIOS_CMD_READ_CALIBRATION,
  274. MCHBAR_REG(BIOS_MAILBOX_INTERFACE));
  275. ret = pcode_ready();
  276. if (ret)
  277. return ret;
  278. debug("PCODE: 24MHz BLCK calibration value: 0x%08x\n",
  279. readl(MCHBAR_REG(BIOS_MAILBOX_DATA)));
  280. return 0;
  281. }
  282. static void configure_pch_power_sharing(void)
  283. {
  284. u32 pch_power, pch_power_ext, pmsync, pmsync2;
  285. int i;
  286. /* Read PCH Power levels from PCODE */
  287. pch_power = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER);
  288. pch_power_ext = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER_EXT);
  289. debug("PCH Power: PCODE Levels 0x%08x 0x%08x\n", pch_power,
  290. pch_power_ext);
  291. pmsync = readl(RCB_REG(PMSYNC_CONFIG));
  292. pmsync2 = readl(RCB_REG(PMSYNC_CONFIG2));
  293. /*
  294. * Program PMSYNC_TPR_CONFIG PCH power limit values
  295. * pmsync[0:4] = mailbox[0:5]
  296. * pmsync[8:12] = mailbox[6:11]
  297. * pmsync[16:20] = mailbox[12:17]
  298. */
  299. for (i = 0; i < 3; i++) {
  300. u32 level = pch_power & 0x3f;
  301. pch_power >>= 6;
  302. pmsync &= ~(0x1f << (i * 8));
  303. pmsync |= (level & 0x1f) << (i * 8);
  304. }
  305. writel(pmsync, RCB_REG(PMSYNC_CONFIG));
  306. /*
  307. * Program PMSYNC_TPR_CONFIG2 Extended PCH power limit values
  308. * pmsync2[0:4] = mailbox[23:18]
  309. * pmsync2[8:12] = mailbox_ext[6:11]
  310. * pmsync2[16:20] = mailbox_ext[12:17]
  311. * pmsync2[24:28] = mailbox_ext[18:22]
  312. */
  313. pmsync2 &= ~0x1f;
  314. pmsync2 |= pch_power & 0x1f;
  315. for (i = 1; i < 4; i++) {
  316. u32 level = pch_power_ext & 0x3f;
  317. pch_power_ext >>= 6;
  318. pmsync2 &= ~(0x1f << (i * 8));
  319. pmsync2 |= (level & 0x1f) << (i * 8);
  320. }
  321. writel(pmsync2, RCB_REG(PMSYNC_CONFIG2));
  322. }
  323. static int bsp_init_before_ap_bringup(struct udevice *dev)
  324. {
  325. int ret;
  326. initialize_vr_config(dev);
  327. ret = calibrate_24mhz_bclk();
  328. if (ret)
  329. return ret;
  330. configure_pch_power_sharing();
  331. return 0;
  332. }
  333. int cpu_config_tdp_levels(void)
  334. {
  335. msr_t platform_info;
  336. /* Bits 34:33 indicate how many levels supported */
  337. platform_info = msr_read(MSR_PLATFORM_INFO);
  338. return (platform_info.hi >> 1) & 3;
  339. }
  340. static void set_max_ratio(void)
  341. {
  342. msr_t msr, perf_ctl;
  343. perf_ctl.hi = 0;
  344. /* Check for configurable TDP option */
  345. if (turbo_get_state() == TURBO_ENABLED) {
  346. msr = msr_read(MSR_NHM_TURBO_RATIO_LIMIT);
  347. perf_ctl.lo = (msr.lo & 0xff) << 8;
  348. } else if (cpu_config_tdp_levels()) {
  349. /* Set to nominal TDP ratio */
  350. msr = msr_read(MSR_CONFIG_TDP_NOMINAL);
  351. perf_ctl.lo = (msr.lo & 0xff) << 8;
  352. } else {
  353. /* Platform Info bits 15:8 give max ratio */
  354. msr = msr_read(MSR_PLATFORM_INFO);
  355. perf_ctl.lo = msr.lo & 0xff00;
  356. }
  357. msr_write(IA32_PERF_CTL, perf_ctl);
  358. debug("cpu: frequency set to %d\n",
  359. ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
  360. }
  361. int broadwell_init(struct udevice *dev)
  362. {
  363. struct cpu_broadwell_priv *priv = dev_get_priv(dev);
  364. int num_threads;
  365. int num_cores;
  366. msr_t msr;
  367. int ret;
  368. msr = msr_read(CORE_THREAD_COUNT_MSR);
  369. num_threads = (msr.lo >> 0) & 0xffff;
  370. num_cores = (msr.lo >> 16) & 0xffff;
  371. debug("CPU has %u cores, %u threads enabled\n", num_cores,
  372. num_threads);
  373. priv->ht_disabled = num_threads == num_cores;
  374. ret = bsp_init_before_ap_bringup(dev);
  375. if (ret)
  376. return ret;
  377. set_max_ratio();
  378. return ret;
  379. }
  380. static void configure_mca(void)
  381. {
  382. msr_t msr;
  383. const unsigned int mcg_cap_msr = 0x179;
  384. int i;
  385. int num_banks;
  386. msr = msr_read(mcg_cap_msr);
  387. num_banks = msr.lo & 0xff;
  388. msr.lo = 0;
  389. msr.hi = 0;
  390. /*
  391. * TODO(adurbin): This should only be done on a cold boot. Also, some
  392. * of these banks are core vs package scope. For now every CPU clears
  393. * every bank
  394. */
  395. for (i = 0; i < num_banks; i++)
  396. msr_write(MSR_IA32_MC0_STATUS + (i * 4), msr);
  397. }
  398. static void enable_lapic_tpr(void)
  399. {
  400. msr_t msr;
  401. msr = msr_read(MSR_PIC_MSG_CONTROL);
  402. msr.lo &= ~(1 << 10); /* Enable APIC TPR updates */
  403. msr_write(MSR_PIC_MSG_CONTROL, msr);
  404. }
  405. static void configure_c_states(void)
  406. {
  407. msr_t msr;
  408. msr = msr_read(MSR_PMG_CST_CONFIG_CONTROL);
  409. msr.lo |= (1 << 31); /* Timed MWAIT Enable */
  410. msr.lo |= (1 << 30); /* Package c-state Undemotion Enable */
  411. msr.lo |= (1 << 29); /* Package c-state Demotion Enable */
  412. msr.lo |= (1 << 28); /* C1 Auto Undemotion Enable */
  413. msr.lo |= (1 << 27); /* C3 Auto Undemotion Enable */
  414. msr.lo |= (1 << 26); /* C1 Auto Demotion Enable */
  415. msr.lo |= (1 << 25); /* C3 Auto Demotion Enable */
  416. msr.lo &= ~(1 << 10); /* Disable IO MWAIT redirection */
  417. /* The deepest package c-state defaults to factory-configured value */
  418. msr_write(MSR_PMG_CST_CONFIG_CONTROL, msr);
  419. msr = msr_read(MSR_MISC_PWR_MGMT);
  420. msr.lo &= ~(1 << 0); /* Enable P-state HW_ALL coordination */
  421. msr_write(MSR_MISC_PWR_MGMT, msr);
  422. msr = msr_read(MSR_POWER_CTL);
  423. msr.lo |= (1 << 18); /* Enable Energy Perf Bias MSR 0x1b0 */
  424. msr.lo |= (1 << 1); /* C1E Enable */
  425. msr.lo |= (1 << 0); /* Bi-directional PROCHOT# */
  426. msr_write(MSR_POWER_CTL, msr);
  427. /* C-state Interrupt Response Latency Control 0 - package C3 latency */
  428. msr.hi = 0;
  429. msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_0_LIMIT;
  430. msr_write(MSR_C_STATE_LATENCY_CONTROL_0, msr);
  431. /* C-state Interrupt Response Latency Control 1 */
  432. msr.hi = 0;
  433. msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_1_LIMIT;
  434. msr_write(MSR_C_STATE_LATENCY_CONTROL_1, msr);
  435. /* C-state Interrupt Response Latency Control 2 - package C6/C7 short */
  436. msr.hi = 0;
  437. msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_2_LIMIT;
  438. msr_write(MSR_C_STATE_LATENCY_CONTROL_2, msr);
  439. /* C-state Interrupt Response Latency Control 3 - package C8 */
  440. msr.hi = 0;
  441. msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_3_LIMIT;
  442. msr_write(MSR_C_STATE_LATENCY_CONTROL_3, msr);
  443. /* C-state Interrupt Response Latency Control 4 - package C9 */
  444. msr.hi = 0;
  445. msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_4_LIMIT;
  446. msr_write(MSR_C_STATE_LATENCY_CONTROL_4, msr);
  447. /* C-state Interrupt Response Latency Control 5 - package C10 */
  448. msr.hi = 0;
  449. msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_5_LIMIT;
  450. msr_write(MSR_C_STATE_LATENCY_CONTROL_5, msr);
  451. }
  452. static void configure_misc(void)
  453. {
  454. msr_t msr;
  455. msr = msr_read(MSR_IA32_MISC_ENABLE);
  456. msr.lo |= (1 << 0); /* Fast String enable */
  457. msr.lo |= (1 << 3); /* TM1/TM2/EMTTM enable */
  458. msr.lo |= (1 << 16); /* Enhanced SpeedStep Enable */
  459. msr_write(MSR_IA32_MISC_ENABLE, msr);
  460. /* Disable thermal interrupts */
  461. msr.lo = 0;
  462. msr.hi = 0;
  463. msr_write(MSR_IA32_THERM_INTERRUPT, msr);
  464. /* Enable package critical interrupt only */
  465. msr.lo = 1 << 4;
  466. msr.hi = 0;
  467. msr_write(MSR_IA32_PACKAGE_THERM_INTERRUPT, msr);
  468. }
  469. static void configure_thermal_target(struct udevice *dev)
  470. {
  471. int tcc_offset;
  472. msr_t msr;
  473. tcc_offset = fdtdec_get_int(gd->fdt_blob, dev->of_offset,
  474. "intel,tcc-offset", 0);
  475. /* Set TCC activaiton offset if supported */
  476. msr = msr_read(MSR_PLATFORM_INFO);
  477. if ((msr.lo & (1 << 30)) && tcc_offset) {
  478. msr = msr_read(MSR_TEMPERATURE_TARGET);
  479. msr.lo &= ~(0xf << 24); /* Bits 27:24 */
  480. msr.lo |= (tcc_offset & 0xf) << 24;
  481. msr_write(MSR_TEMPERATURE_TARGET, msr);
  482. }
  483. }
  484. static void configure_dca_cap(void)
  485. {
  486. struct cpuid_result cpuid_regs;
  487. msr_t msr;
  488. /* Check feature flag in CPUID.(EAX=1):ECX[18]==1 */
  489. cpuid_regs = cpuid(1);
  490. if (cpuid_regs.ecx & (1 << 18)) {
  491. msr = msr_read(MSR_IA32_PLATFORM_DCA_CAP);
  492. msr.lo |= 1;
  493. msr_write(MSR_IA32_PLATFORM_DCA_CAP, msr);
  494. }
  495. }
  496. static void set_energy_perf_bias(u8 policy)
  497. {
  498. msr_t msr;
  499. int ecx;
  500. /* Determine if energy efficient policy is supported */
  501. ecx = cpuid_ecx(0x6);
  502. if (!(ecx & (1 << 3)))
  503. return;
  504. /* Energy Policy is bits 3:0 */
  505. msr = msr_read(MSR_IA32_ENERGY_PERFORMANCE_BIAS);
  506. msr.lo &= ~0xf;
  507. msr.lo |= policy & 0xf;
  508. msr_write(MSR_IA32_ENERGY_PERFORMANCE_BIAS, msr);
  509. debug("cpu: energy policy set to %u\n", policy);
  510. }
  511. /* All CPUs including BSP will run the following function */
  512. static void cpu_core_init(struct udevice *dev)
  513. {
  514. /* Clear out pending MCEs */
  515. configure_mca();
  516. /* Enable the local cpu apics */
  517. enable_lapic_tpr();
  518. /* Configure C States */
  519. configure_c_states();
  520. /* Configure Enhanced SpeedStep and Thermal Sensors */
  521. configure_misc();
  522. /* Thermal throttle activation offset */
  523. configure_thermal_target(dev);
  524. /* Enable Direct Cache Access */
  525. configure_dca_cap();
  526. /* Set energy policy */
  527. set_energy_perf_bias(ENERGY_POLICY_NORMAL);
  528. /* Enable Turbo */
  529. turbo_enable();
  530. }
  531. /*
  532. * Configure processor power limits if possible
  533. * This must be done AFTER set of BIOS_RESET_CPL
  534. */
  535. void cpu_set_power_limits(int power_limit_1_time)
  536. {
  537. msr_t msr;
  538. msr_t limit;
  539. unsigned power_unit;
  540. unsigned tdp, min_power, max_power, max_time;
  541. u8 power_limit_1_val;
  542. msr = msr_read(MSR_PLATFORM_INFO);
  543. if (power_limit_1_time > ARRAY_SIZE(power_limit_time_sec_to_msr))
  544. power_limit_1_time = 28;
  545. if (!(msr.lo & PLATFORM_INFO_SET_TDP))
  546. return;
  547. /* Get units */
  548. msr = msr_read(MSR_PKG_POWER_SKU_UNIT);
  549. power_unit = 2 << ((msr.lo & 0xf) - 1);
  550. /* Get power defaults for this SKU */
  551. msr = msr_read(MSR_PKG_POWER_SKU);
  552. tdp = msr.lo & 0x7fff;
  553. min_power = (msr.lo >> 16) & 0x7fff;
  554. max_power = msr.hi & 0x7fff;
  555. max_time = (msr.hi >> 16) & 0x7f;
  556. debug("CPU TDP: %u Watts\n", tdp / power_unit);
  557. if (power_limit_time_msr_to_sec[max_time] > power_limit_1_time)
  558. power_limit_1_time = power_limit_time_msr_to_sec[max_time];
  559. if (min_power > 0 && tdp < min_power)
  560. tdp = min_power;
  561. if (max_power > 0 && tdp > max_power)
  562. tdp = max_power;
  563. power_limit_1_val = power_limit_time_sec_to_msr[power_limit_1_time];
  564. /* Set long term power limit to TDP */
  565. limit.lo = 0;
  566. limit.lo |= tdp & PKG_POWER_LIMIT_MASK;
  567. limit.lo |= PKG_POWER_LIMIT_EN;
  568. limit.lo |= (power_limit_1_val & PKG_POWER_LIMIT_TIME_MASK) <<
  569. PKG_POWER_LIMIT_TIME_SHIFT;
  570. /* Set short term power limit to 1.25 * TDP */
  571. limit.hi = 0;
  572. limit.hi |= ((tdp * 125) / 100) & PKG_POWER_LIMIT_MASK;
  573. limit.hi |= PKG_POWER_LIMIT_EN;
  574. /* Power limit 2 time is only programmable on server SKU */
  575. msr_write(MSR_PKG_POWER_LIMIT, limit);
  576. /* Set power limit values in MCHBAR as well */
  577. writel(limit.lo, MCHBAR_REG(MCH_PKG_POWER_LIMIT_LO));
  578. writel(limit.hi, MCHBAR_REG(MCH_PKG_POWER_LIMIT_HI));
  579. /* Set DDR RAPL power limit by copying from MMIO to MSR */
  580. msr.lo = readl(MCHBAR_REG(MCH_DDR_POWER_LIMIT_LO));
  581. msr.hi = readl(MCHBAR_REG(MCH_DDR_POWER_LIMIT_HI));
  582. msr_write(MSR_DDR_RAPL_LIMIT, msr);
  583. /* Use nominal TDP values for CPUs with configurable TDP */
  584. if (cpu_config_tdp_levels()) {
  585. msr = msr_read(MSR_CONFIG_TDP_NOMINAL);
  586. limit.hi = 0;
  587. limit.lo = msr.lo & 0xff;
  588. msr_write(MSR_TURBO_ACTIVATION_RATIO, limit);
  589. }
  590. }
  591. static int broadwell_get_info(struct udevice *dev, struct cpu_info *info)
  592. {
  593. msr_t msr;
  594. msr = msr_read(IA32_PERF_CTL);
  595. info->cpu_freq = ((msr.lo >> 8) & 0xff) * BROADWELL_BCLK * 1000000;
  596. info->features = 1 << CPU_FEAT_L1_CACHE | 1 << CPU_FEAT_MMU |
  597. 1 << CPU_FEAT_UCODE | 1 << CPU_FEAT_DEVICE_ID;
  598. return 0;
  599. }
  600. static int broadwell_get_count(struct udevice *dev)
  601. {
  602. return 4;
  603. }
  604. static int cpu_x86_broadwell_probe(struct udevice *dev)
  605. {
  606. if (dev->seq == 0) {
  607. cpu_core_init(dev);
  608. return broadwell_init(dev);
  609. }
  610. return 0;
  611. }
  612. static const struct cpu_ops cpu_x86_broadwell_ops = {
  613. .get_desc = cpu_x86_get_desc,
  614. .get_info = broadwell_get_info,
  615. .get_count = broadwell_get_count,
  616. .get_vendor = cpu_x86_get_vendor,
  617. };
  618. static const struct udevice_id cpu_x86_broadwell_ids[] = {
  619. { .compatible = "intel,core-i3-gen5" },
  620. { }
  621. };
  622. U_BOOT_DRIVER(cpu_x86_broadwell_drv) = {
  623. .name = "cpu_x86_broadwell",
  624. .id = UCLASS_CPU,
  625. .of_match = cpu_x86_broadwell_ids,
  626. .bind = cpu_x86_bind,
  627. .probe = cpu_x86_broadwell_probe,
  628. .ops = &cpu_x86_broadwell_ops,
  629. .priv_auto_alloc_size = sizeof(struct cpu_broadwell_priv),
  630. };