cpu.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773
  1. /*
  2. * (C) Copyright 2008-2011
  3. * Graeme Russ, <graeme.russ@gmail.com>
  4. *
  5. * (C) Copyright 2002
  6. * Daniel Engström, Omicron Ceti AB, <daniel@omicron.se>
  7. *
  8. * (C) Copyright 2002
  9. * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
  10. * Marius Groeger <mgroeger@sysgo.de>
  11. *
  12. * (C) Copyright 2002
  13. * Sysgo Real-Time Solutions, GmbH <www.elinos.com>
  14. * Alex Zuepke <azu@sysgo.de>
  15. *
  16. * Part of this file is adapted from coreboot
  17. * src/arch/x86/lib/cpu.c
  18. *
  19. * SPDX-License-Identifier: GPL-2.0+
  20. */
  21. #include <common.h>
  22. #include <command.h>
  23. #include <dm.h>
  24. #include <errno.h>
  25. #include <malloc.h>
  26. #include <syscon.h>
  27. #include <asm/control_regs.h>
  28. #include <asm/coreboot_tables.h>
  29. #include <asm/cpu.h>
  30. #include <asm/lapic.h>
  31. #include <asm/microcode.h>
  32. #include <asm/mp.h>
  33. #include <asm/mrccache.h>
  34. #include <asm/msr.h>
  35. #include <asm/mtrr.h>
  36. #include <asm/post.h>
  37. #include <asm/processor.h>
  38. #include <asm/processor-flags.h>
  39. #include <asm/interrupt.h>
  40. #include <asm/tables.h>
  41. #include <linux/compiler.h>
  42. DECLARE_GLOBAL_DATA_PTR;
  43. /*
  44. * Constructor for a conventional segment GDT (or LDT) entry
  45. * This is a macro so it can be used in initialisers
  46. */
  47. #define GDT_ENTRY(flags, base, limit) \
  48. ((((base) & 0xff000000ULL) << (56-24)) | \
  49. (((flags) & 0x0000f0ffULL) << 40) | \
  50. (((limit) & 0x000f0000ULL) << (48-16)) | \
  51. (((base) & 0x00ffffffULL) << 16) | \
  52. (((limit) & 0x0000ffffULL)))
  53. struct gdt_ptr {
  54. u16 len;
  55. u32 ptr;
  56. } __packed;
  57. struct cpu_device_id {
  58. unsigned vendor;
  59. unsigned device;
  60. };
  61. struct cpuinfo_x86 {
  62. uint8_t x86; /* CPU family */
  63. uint8_t x86_vendor; /* CPU vendor */
  64. uint8_t x86_model;
  65. uint8_t x86_mask;
  66. };
  67. /*
  68. * List of cpu vendor strings along with their normalized
  69. * id values.
  70. */
  71. static const struct {
  72. int vendor;
  73. const char *name;
  74. } x86_vendors[] = {
  75. { X86_VENDOR_INTEL, "GenuineIntel", },
  76. { X86_VENDOR_CYRIX, "CyrixInstead", },
  77. { X86_VENDOR_AMD, "AuthenticAMD", },
  78. { X86_VENDOR_UMC, "UMC UMC UMC ", },
  79. { X86_VENDOR_NEXGEN, "NexGenDriven", },
  80. { X86_VENDOR_CENTAUR, "CentaurHauls", },
  81. { X86_VENDOR_RISE, "RiseRiseRise", },
  82. { X86_VENDOR_TRANSMETA, "GenuineTMx86", },
  83. { X86_VENDOR_TRANSMETA, "TransmetaCPU", },
  84. { X86_VENDOR_NSC, "Geode by NSC", },
  85. { X86_VENDOR_SIS, "SiS SiS SiS ", },
  86. };
  87. static const char *const x86_vendor_name[] = {
  88. [X86_VENDOR_INTEL] = "Intel",
  89. [X86_VENDOR_CYRIX] = "Cyrix",
  90. [X86_VENDOR_AMD] = "AMD",
  91. [X86_VENDOR_UMC] = "UMC",
  92. [X86_VENDOR_NEXGEN] = "NexGen",
  93. [X86_VENDOR_CENTAUR] = "Centaur",
  94. [X86_VENDOR_RISE] = "Rise",
  95. [X86_VENDOR_TRANSMETA] = "Transmeta",
  96. [X86_VENDOR_NSC] = "NSC",
  97. [X86_VENDOR_SIS] = "SiS",
  98. };
  99. static void load_ds(u32 segment)
  100. {
  101. asm volatile("movl %0, %%ds" : : "r" (segment * X86_GDT_ENTRY_SIZE));
  102. }
  103. static void load_es(u32 segment)
  104. {
  105. asm volatile("movl %0, %%es" : : "r" (segment * X86_GDT_ENTRY_SIZE));
  106. }
  107. static void load_fs(u32 segment)
  108. {
  109. asm volatile("movl %0, %%fs" : : "r" (segment * X86_GDT_ENTRY_SIZE));
  110. }
  111. static void load_gs(u32 segment)
  112. {
  113. asm volatile("movl %0, %%gs" : : "r" (segment * X86_GDT_ENTRY_SIZE));
  114. }
  115. static void load_ss(u32 segment)
  116. {
  117. asm volatile("movl %0, %%ss" : : "r" (segment * X86_GDT_ENTRY_SIZE));
  118. }
  119. static void load_gdt(const u64 *boot_gdt, u16 num_entries)
  120. {
  121. struct gdt_ptr gdt;
  122. gdt.len = (num_entries * X86_GDT_ENTRY_SIZE) - 1;
  123. gdt.ptr = (ulong)boot_gdt;
  124. asm volatile("lgdtl %0\n" : : "m" (gdt));
  125. }
  126. void arch_setup_gd(gd_t *new_gd)
  127. {
  128. u64 *gdt_addr;
  129. gdt_addr = new_gd->arch.gdt;
  130. /*
  131. * CS: code, read/execute, 4 GB, base 0
  132. *
  133. * Some OS (like VxWorks) requires GDT entry 1 to be the 32-bit CS
  134. */
  135. gdt_addr[X86_GDT_ENTRY_UNUSED] = GDT_ENTRY(0xc09b, 0, 0xfffff);
  136. gdt_addr[X86_GDT_ENTRY_32BIT_CS] = GDT_ENTRY(0xc09b, 0, 0xfffff);
  137. /* DS: data, read/write, 4 GB, base 0 */
  138. gdt_addr[X86_GDT_ENTRY_32BIT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff);
  139. /* FS: data, read/write, 4 GB, base (Global Data Pointer) */
  140. new_gd->arch.gd_addr = new_gd;
  141. gdt_addr[X86_GDT_ENTRY_32BIT_FS] = GDT_ENTRY(0xc093,
  142. (ulong)&new_gd->arch.gd_addr, 0xfffff);
  143. /* 16-bit CS: code, read/execute, 64 kB, base 0 */
  144. gdt_addr[X86_GDT_ENTRY_16BIT_CS] = GDT_ENTRY(0x009b, 0, 0x0ffff);
  145. /* 16-bit DS: data, read/write, 64 kB, base 0 */
  146. gdt_addr[X86_GDT_ENTRY_16BIT_DS] = GDT_ENTRY(0x0093, 0, 0x0ffff);
  147. gdt_addr[X86_GDT_ENTRY_16BIT_FLAT_CS] = GDT_ENTRY(0x809b, 0, 0xfffff);
  148. gdt_addr[X86_GDT_ENTRY_16BIT_FLAT_DS] = GDT_ENTRY(0x8093, 0, 0xfffff);
  149. load_gdt(gdt_addr, X86_GDT_NUM_ENTRIES);
  150. load_ds(X86_GDT_ENTRY_32BIT_DS);
  151. load_es(X86_GDT_ENTRY_32BIT_DS);
  152. load_gs(X86_GDT_ENTRY_32BIT_DS);
  153. load_ss(X86_GDT_ENTRY_32BIT_DS);
  154. load_fs(X86_GDT_ENTRY_32BIT_FS);
  155. }
  156. #ifdef CONFIG_HAVE_FSP
  157. /*
  158. * Setup FSP execution environment GDT
  159. *
  160. * Per Intel FSP external architecture specification, before calling any FSP
  161. * APIs, we need make sure the system is in flat 32-bit mode and both the code
  162. * and data selectors should have full 4GB access range. Here we reuse the one
  163. * we used in arch/x86/cpu/start16.S, and reload the segement registers.
  164. */
  165. void setup_fsp_gdt(void)
  166. {
  167. load_gdt((const u64 *)(gdt_rom + CONFIG_RESET_SEG_START), 4);
  168. load_ds(X86_GDT_ENTRY_32BIT_DS);
  169. load_ss(X86_GDT_ENTRY_32BIT_DS);
  170. load_es(X86_GDT_ENTRY_32BIT_DS);
  171. load_fs(X86_GDT_ENTRY_32BIT_DS);
  172. load_gs(X86_GDT_ENTRY_32BIT_DS);
  173. }
  174. #endif
  175. int __weak x86_cleanup_before_linux(void)
  176. {
  177. #ifdef CONFIG_BOOTSTAGE_STASH
  178. bootstage_stash((void *)CONFIG_BOOTSTAGE_STASH_ADDR,
  179. CONFIG_BOOTSTAGE_STASH_SIZE);
  180. #endif
  181. return 0;
  182. }
  183. /*
  184. * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
  185. * by the fact that they preserve the flags across the division of 5/2.
  186. * PII and PPro exhibit this behavior too, but they have cpuid available.
  187. */
  188. /*
  189. * Perform the Cyrix 5/2 test. A Cyrix won't change
  190. * the flags, while other 486 chips will.
  191. */
  192. static inline int test_cyrix_52div(void)
  193. {
  194. unsigned int test;
  195. __asm__ __volatile__(
  196. "sahf\n\t" /* clear flags (%eax = 0x0005) */
  197. "div %b2\n\t" /* divide 5 by 2 */
  198. "lahf" /* store flags into %ah */
  199. : "=a" (test)
  200. : "0" (5), "q" (2)
  201. : "cc");
  202. /* AH is 0x02 on Cyrix after the divide.. */
  203. return (unsigned char) (test >> 8) == 0x02;
  204. }
  205. /*
  206. * Detect a NexGen CPU running without BIOS hypercode new enough
  207. * to have CPUID. (Thanks to Herbert Oppmann)
  208. */
  209. static int deep_magic_nexgen_probe(void)
  210. {
  211. int ret;
  212. __asm__ __volatile__ (
  213. " movw $0x5555, %%ax\n"
  214. " xorw %%dx,%%dx\n"
  215. " movw $2, %%cx\n"
  216. " divw %%cx\n"
  217. " movl $0, %%eax\n"
  218. " jnz 1f\n"
  219. " movl $1, %%eax\n"
  220. "1:\n"
  221. : "=a" (ret) : : "cx", "dx");
  222. return ret;
  223. }
  224. static bool has_cpuid(void)
  225. {
  226. return flag_is_changeable_p(X86_EFLAGS_ID);
  227. }
  228. static bool has_mtrr(void)
  229. {
  230. return cpuid_edx(0x00000001) & (1 << 12) ? true : false;
  231. }
  232. static int build_vendor_name(char *vendor_name)
  233. {
  234. struct cpuid_result result;
  235. result = cpuid(0x00000000);
  236. unsigned int *name_as_ints = (unsigned int *)vendor_name;
  237. name_as_ints[0] = result.ebx;
  238. name_as_ints[1] = result.edx;
  239. name_as_ints[2] = result.ecx;
  240. return result.eax;
  241. }
  242. static void identify_cpu(struct cpu_device_id *cpu)
  243. {
  244. char vendor_name[16];
  245. int i;
  246. vendor_name[0] = '\0'; /* Unset */
  247. cpu->device = 0; /* fix gcc 4.4.4 warning */
  248. /* Find the id and vendor_name */
  249. if (!has_cpuid()) {
  250. /* Its a 486 if we can modify the AC flag */
  251. if (flag_is_changeable_p(X86_EFLAGS_AC))
  252. cpu->device = 0x00000400; /* 486 */
  253. else
  254. cpu->device = 0x00000300; /* 386 */
  255. if ((cpu->device == 0x00000400) && test_cyrix_52div()) {
  256. memcpy(vendor_name, "CyrixInstead", 13);
  257. /* If we ever care we can enable cpuid here */
  258. }
  259. /* Detect NexGen with old hypercode */
  260. else if (deep_magic_nexgen_probe())
  261. memcpy(vendor_name, "NexGenDriven", 13);
  262. }
  263. if (has_cpuid()) {
  264. int cpuid_level;
  265. cpuid_level = build_vendor_name(vendor_name);
  266. vendor_name[12] = '\0';
  267. /* Intel-defined flags: level 0x00000001 */
  268. if (cpuid_level >= 0x00000001) {
  269. cpu->device = cpuid_eax(0x00000001);
  270. } else {
  271. /* Have CPUID level 0 only unheard of */
  272. cpu->device = 0x00000400;
  273. }
  274. }
  275. cpu->vendor = X86_VENDOR_UNKNOWN;
  276. for (i = 0; i < ARRAY_SIZE(x86_vendors); i++) {
  277. if (memcmp(vendor_name, x86_vendors[i].name, 12) == 0) {
  278. cpu->vendor = x86_vendors[i].vendor;
  279. break;
  280. }
  281. }
  282. }
  283. static inline void get_fms(struct cpuinfo_x86 *c, uint32_t tfms)
  284. {
  285. c->x86 = (tfms >> 8) & 0xf;
  286. c->x86_model = (tfms >> 4) & 0xf;
  287. c->x86_mask = tfms & 0xf;
  288. if (c->x86 == 0xf)
  289. c->x86 += (tfms >> 20) & 0xff;
  290. if (c->x86 >= 0x6)
  291. c->x86_model += ((tfms >> 16) & 0xF) << 4;
  292. }
  293. u32 cpu_get_family_model(void)
  294. {
  295. return gd->arch.x86_device & 0x0fff0ff0;
  296. }
  297. u32 cpu_get_stepping(void)
  298. {
  299. return gd->arch.x86_mask;
  300. }
  301. int x86_cpu_init_f(void)
  302. {
  303. const u32 em_rst = ~X86_CR0_EM;
  304. const u32 mp_ne_set = X86_CR0_MP | X86_CR0_NE;
  305. if (ll_boot_init()) {
  306. /* initialize FPU, reset EM, set MP and NE */
  307. asm ("fninit\n" \
  308. "movl %%cr0, %%eax\n" \
  309. "andl %0, %%eax\n" \
  310. "orl %1, %%eax\n" \
  311. "movl %%eax, %%cr0\n" \
  312. : : "i" (em_rst), "i" (mp_ne_set) : "eax");
  313. }
  314. /* identify CPU via cpuid and store the decoded info into gd->arch */
  315. if (has_cpuid()) {
  316. struct cpu_device_id cpu;
  317. struct cpuinfo_x86 c;
  318. identify_cpu(&cpu);
  319. get_fms(&c, cpu.device);
  320. gd->arch.x86 = c.x86;
  321. gd->arch.x86_vendor = cpu.vendor;
  322. gd->arch.x86_model = c.x86_model;
  323. gd->arch.x86_mask = c.x86_mask;
  324. gd->arch.x86_device = cpu.device;
  325. gd->arch.has_mtrr = has_mtrr();
  326. }
  327. /* Don't allow PCI region 3 to use memory in the 2-4GB memory hole */
  328. gd->pci_ram_top = 0x80000000U;
  329. /* Configure fixed range MTRRs for some legacy regions */
  330. if (gd->arch.has_mtrr) {
  331. u64 mtrr_cap;
  332. mtrr_cap = native_read_msr(MTRR_CAP_MSR);
  333. if (mtrr_cap & MTRR_CAP_FIX) {
  334. /* Mark the VGA RAM area as uncacheable */
  335. native_write_msr(MTRR_FIX_16K_A0000_MSR,
  336. MTRR_FIX_TYPE(MTRR_TYPE_UNCACHEABLE),
  337. MTRR_FIX_TYPE(MTRR_TYPE_UNCACHEABLE));
  338. /*
  339. * Mark the PCI ROM area as cacheable to improve ROM
  340. * execution performance.
  341. */
  342. native_write_msr(MTRR_FIX_4K_C0000_MSR,
  343. MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
  344. MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
  345. native_write_msr(MTRR_FIX_4K_C8000_MSR,
  346. MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
  347. MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
  348. native_write_msr(MTRR_FIX_4K_D0000_MSR,
  349. MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
  350. MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
  351. native_write_msr(MTRR_FIX_4K_D8000_MSR,
  352. MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
  353. MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
  354. /* Enable the fixed range MTRRs */
  355. msr_setbits_64(MTRR_DEF_TYPE_MSR, MTRR_DEF_TYPE_FIX_EN);
  356. }
  357. }
  358. #ifdef CONFIG_I8254_TIMER
  359. /* Set up the i8254 timer if required */
  360. i8254_init();
  361. #endif
  362. return 0;
  363. }
  364. void x86_enable_caches(void)
  365. {
  366. unsigned long cr0;
  367. cr0 = read_cr0();
  368. cr0 &= ~(X86_CR0_NW | X86_CR0_CD);
  369. write_cr0(cr0);
  370. wbinvd();
  371. }
  372. void enable_caches(void) __attribute__((weak, alias("x86_enable_caches")));
  373. void x86_disable_caches(void)
  374. {
  375. unsigned long cr0;
  376. cr0 = read_cr0();
  377. cr0 |= X86_CR0_NW | X86_CR0_CD;
  378. wbinvd();
  379. write_cr0(cr0);
  380. wbinvd();
  381. }
  382. void disable_caches(void) __attribute__((weak, alias("x86_disable_caches")));
  383. int x86_init_cache(void)
  384. {
  385. enable_caches();
  386. return 0;
  387. }
  388. int init_cache(void) __attribute__((weak, alias("x86_init_cache")));
  389. int do_reset(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
  390. {
  391. printf("resetting ...\n");
  392. /* wait 50 ms */
  393. udelay(50000);
  394. disable_interrupts();
  395. reset_cpu(0);
  396. /*NOTREACHED*/
  397. return 0;
  398. }
  399. void flush_cache(unsigned long dummy1, unsigned long dummy2)
  400. {
  401. asm("wbinvd\n");
  402. }
  403. __weak void reset_cpu(ulong addr)
  404. {
  405. /* Do a hard reset through the chipset's reset control register */
  406. outb(SYS_RST | RST_CPU, IO_PORT_RESET);
  407. for (;;)
  408. cpu_hlt();
  409. }
  410. void x86_full_reset(void)
  411. {
  412. outb(FULL_RST | SYS_RST | RST_CPU, IO_PORT_RESET);
  413. }
  414. int dcache_status(void)
  415. {
  416. return !(read_cr0() & X86_CR0_CD);
  417. }
  418. /* Define these functions to allow ehch-hcd to function */
  419. void flush_dcache_range(unsigned long start, unsigned long stop)
  420. {
  421. }
  422. void invalidate_dcache_range(unsigned long start, unsigned long stop)
  423. {
  424. }
  425. void dcache_enable(void)
  426. {
  427. enable_caches();
  428. }
  429. void dcache_disable(void)
  430. {
  431. disable_caches();
  432. }
  433. void icache_enable(void)
  434. {
  435. }
  436. void icache_disable(void)
  437. {
  438. }
  439. int icache_status(void)
  440. {
  441. return 1;
  442. }
  443. void cpu_enable_paging_pae(ulong cr3)
  444. {
  445. __asm__ __volatile__(
  446. /* Load the page table address */
  447. "movl %0, %%cr3\n"
  448. /* Enable pae */
  449. "movl %%cr4, %%eax\n"
  450. "orl $0x00000020, %%eax\n"
  451. "movl %%eax, %%cr4\n"
  452. /* Enable paging */
  453. "movl %%cr0, %%eax\n"
  454. "orl $0x80000000, %%eax\n"
  455. "movl %%eax, %%cr0\n"
  456. :
  457. : "r" (cr3)
  458. : "eax");
  459. }
  460. void cpu_disable_paging_pae(void)
  461. {
  462. /* Turn off paging */
  463. __asm__ __volatile__ (
  464. /* Disable paging */
  465. "movl %%cr0, %%eax\n"
  466. "andl $0x7fffffff, %%eax\n"
  467. "movl %%eax, %%cr0\n"
  468. /* Disable pae */
  469. "movl %%cr4, %%eax\n"
  470. "andl $0xffffffdf, %%eax\n"
  471. "movl %%eax, %%cr4\n"
  472. :
  473. :
  474. : "eax");
  475. }
  476. static bool can_detect_long_mode(void)
  477. {
  478. return cpuid_eax(0x80000000) > 0x80000000UL;
  479. }
  480. static bool has_long_mode(void)
  481. {
  482. return cpuid_edx(0x80000001) & (1 << 29) ? true : false;
  483. }
  484. int cpu_has_64bit(void)
  485. {
  486. return has_cpuid() && can_detect_long_mode() &&
  487. has_long_mode();
  488. }
  489. const char *cpu_vendor_name(int vendor)
  490. {
  491. const char *name;
  492. name = "<invalid cpu vendor>";
  493. if ((vendor < (ARRAY_SIZE(x86_vendor_name))) &&
  494. (x86_vendor_name[vendor] != 0))
  495. name = x86_vendor_name[vendor];
  496. return name;
  497. }
  498. char *cpu_get_name(char *name)
  499. {
  500. unsigned int *name_as_ints = (unsigned int *)name;
  501. struct cpuid_result regs;
  502. char *ptr;
  503. int i;
  504. /* This bit adds up to 48 bytes */
  505. for (i = 0; i < 3; i++) {
  506. regs = cpuid(0x80000002 + i);
  507. name_as_ints[i * 4 + 0] = regs.eax;
  508. name_as_ints[i * 4 + 1] = regs.ebx;
  509. name_as_ints[i * 4 + 2] = regs.ecx;
  510. name_as_ints[i * 4 + 3] = regs.edx;
  511. }
  512. name[CPU_MAX_NAME_LEN - 1] = '\0';
  513. /* Skip leading spaces. */
  514. ptr = name;
  515. while (*ptr == ' ')
  516. ptr++;
  517. return ptr;
  518. }
  519. int default_print_cpuinfo(void)
  520. {
  521. printf("CPU: %s, vendor %s, device %xh\n",
  522. cpu_has_64bit() ? "x86_64" : "x86",
  523. cpu_vendor_name(gd->arch.x86_vendor), gd->arch.x86_device);
  524. return 0;
  525. }
  526. #define PAGETABLE_SIZE (6 * 4096)
  527. /**
  528. * build_pagetable() - build a flat 4GiB page table structure for 64-bti mode
  529. *
  530. * @pgtable: Pointer to a 24iKB block of memory
  531. */
  532. static void build_pagetable(uint32_t *pgtable)
  533. {
  534. uint i;
  535. memset(pgtable, '\0', PAGETABLE_SIZE);
  536. /* Level 4 needs a single entry */
  537. pgtable[0] = (ulong)&pgtable[1024] + 7;
  538. /* Level 3 has one 64-bit entry for each GiB of memory */
  539. for (i = 0; i < 4; i++)
  540. pgtable[1024 + i * 2] = (ulong)&pgtable[2048] + 0x1000 * i + 7;
  541. /* Level 2 has 2048 64-bit entries, each repesenting 2MiB */
  542. for (i = 0; i < 2048; i++)
  543. pgtable[2048 + i * 2] = 0x183 + (i << 21UL);
  544. }
  545. int cpu_jump_to_64bit(ulong setup_base, ulong target)
  546. {
  547. uint32_t *pgtable;
  548. pgtable = memalign(4096, PAGETABLE_SIZE);
  549. if (!pgtable)
  550. return -ENOMEM;
  551. build_pagetable(pgtable);
  552. cpu_call64((ulong)pgtable, setup_base, target);
  553. free(pgtable);
  554. return -EFAULT;
  555. }
  556. void show_boot_progress(int val)
  557. {
  558. outb(val, POST_PORT);
  559. }
  560. #ifndef CONFIG_SYS_COREBOOT
  561. /*
  562. * Implement a weak default function for boards that optionally
  563. * need to clean up the system before jumping to the kernel.
  564. */
  565. __weak void board_final_cleanup(void)
  566. {
  567. }
  568. int last_stage_init(void)
  569. {
  570. write_tables();
  571. board_final_cleanup();
  572. return 0;
  573. }
  574. #endif
  575. #ifdef CONFIG_SMP
  576. static int enable_smis(struct udevice *cpu, void *unused)
  577. {
  578. return 0;
  579. }
  580. static struct mp_flight_record mp_steps[] = {
  581. MP_FR_BLOCK_APS(mp_init_cpu, NULL, mp_init_cpu, NULL),
  582. /* Wait for APs to finish initialization before proceeding */
  583. MP_FR_BLOCK_APS(NULL, NULL, enable_smis, NULL),
  584. };
  585. static int x86_mp_init(void)
  586. {
  587. struct mp_params mp_params;
  588. mp_params.parallel_microcode_load = 0,
  589. mp_params.flight_plan = &mp_steps[0];
  590. mp_params.num_records = ARRAY_SIZE(mp_steps);
  591. mp_params.microcode_pointer = 0;
  592. if (mp_init(&mp_params)) {
  593. printf("Warning: MP init failure\n");
  594. return -EIO;
  595. }
  596. return 0;
  597. }
  598. #endif
  599. static int x86_init_cpus(void)
  600. {
  601. #ifdef CONFIG_SMP
  602. debug("Init additional CPUs\n");
  603. x86_mp_init();
  604. #else
  605. struct udevice *dev;
  606. /*
  607. * This causes the cpu-x86 driver to be probed.
  608. * We don't check return value here as we want to allow boards
  609. * which have not been converted to use cpu uclass driver to boot.
  610. */
  611. uclass_first_device(UCLASS_CPU, &dev);
  612. #endif
  613. return 0;
  614. }
  615. int cpu_init_r(void)
  616. {
  617. struct udevice *dev;
  618. int ret;
  619. if (!ll_boot_init())
  620. return 0;
  621. ret = x86_init_cpus();
  622. if (ret)
  623. return ret;
  624. /*
  625. * Set up the northbridge, PCH and LPC if available. Note that these
  626. * may have had some limited pre-relocation init if they were probed
  627. * before relocation, but this is post relocation.
  628. */
  629. uclass_first_device(UCLASS_NORTHBRIDGE, &dev);
  630. uclass_first_device(UCLASS_PCH, &dev);
  631. uclass_first_device(UCLASS_LPC, &dev);
  632. /* Set up pin control if available */
  633. ret = syscon_get_by_driver_data(X86_SYSCON_PINCONF, &dev);
  634. debug("%s, pinctrl=%p, ret=%d\n", __func__, dev, ret);
  635. return 0;
  636. }
  637. #ifndef CONFIG_EFI_STUB
  638. int reserve_arch(void)
  639. {
  640. #ifdef CONFIG_ENABLE_MRC_CACHE
  641. mrccache_reserve();
  642. #endif
  643. #ifdef CONFIG_SEABIOS
  644. high_table_reserve();
  645. #endif
  646. return 0;
  647. }
  648. #endif