init.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393
  1. /*
  2. * x86 FPU boot time init code:
  3. */
  4. #include <asm/fpu/internal.h>
  5. #include <asm/tlbflush.h>
  6. #include <asm/setup.h>
  7. #include <asm/cmdline.h>
  8. #include <linux/sched.h>
  9. #include <linux/init.h>
  10. /*
  11. * Initialize the TS bit in CR0 according to the style of context-switches
  12. * we are using:
  13. */
  14. static void fpu__init_cpu_ctx_switch(void)
  15. {
  16. if (!boot_cpu_has(X86_FEATURE_EAGER_FPU))
  17. stts();
  18. else
  19. clts();
  20. }
  21. /*
  22. * Initialize the registers found in all CPUs, CR0 and CR4:
  23. */
  24. static void fpu__init_cpu_generic(void)
  25. {
  26. unsigned long cr0;
  27. unsigned long cr4_mask = 0;
  28. if (boot_cpu_has(X86_FEATURE_FXSR))
  29. cr4_mask |= X86_CR4_OSFXSR;
  30. if (boot_cpu_has(X86_FEATURE_XMM))
  31. cr4_mask |= X86_CR4_OSXMMEXCPT;
  32. if (cr4_mask)
  33. cr4_set_bits(cr4_mask);
  34. cr0 = read_cr0();
  35. cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
  36. if (!boot_cpu_has(X86_FEATURE_FPU))
  37. cr0 |= X86_CR0_EM;
  38. write_cr0(cr0);
  39. /* Flush out any pending x87 state: */
  40. #ifdef CONFIG_MATH_EMULATION
  41. if (!boot_cpu_has(X86_FEATURE_FPU))
  42. fpstate_init_soft(&current->thread.fpu.state.soft);
  43. else
  44. #endif
  45. asm volatile ("fninit");
  46. }
  47. /*
  48. * Enable all supported FPU features. Called when a CPU is brought online:
  49. */
  50. void fpu__init_cpu(void)
  51. {
  52. fpu__init_cpu_generic();
  53. fpu__init_cpu_xstate();
  54. fpu__init_cpu_ctx_switch();
  55. }
  56. /*
  57. * The earliest FPU detection code.
  58. *
  59. * Set the X86_FEATURE_FPU CPU-capability bit based on
  60. * trying to execute an actual sequence of FPU instructions:
  61. */
  62. static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
  63. {
  64. unsigned long cr0;
  65. u16 fsw, fcw;
  66. fsw = fcw = 0xffff;
  67. cr0 = read_cr0();
  68. cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
  69. write_cr0(cr0);
  70. if (!test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
  71. asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
  72. : "+m" (fsw), "+m" (fcw));
  73. if (fsw == 0 && (fcw & 0x103f) == 0x003f)
  74. set_cpu_cap(c, X86_FEATURE_FPU);
  75. else
  76. clear_cpu_cap(c, X86_FEATURE_FPU);
  77. }
  78. #ifndef CONFIG_MATH_EMULATION
  79. if (!boot_cpu_has(X86_FEATURE_FPU)) {
  80. pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n");
  81. for (;;)
  82. asm volatile("hlt");
  83. }
  84. #endif
  85. }
  86. /*
  87. * Boot time FPU feature detection code:
  88. */
  89. unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
  90. EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
  91. static void __init fpu__init_system_mxcsr(void)
  92. {
  93. unsigned int mask = 0;
  94. if (boot_cpu_has(X86_FEATURE_FXSR)) {
  95. /* Static because GCC does not get 16-byte stack alignment right: */
  96. static struct fxregs_state fxregs __initdata;
  97. asm volatile("fxsave %0" : "+m" (fxregs));
  98. mask = fxregs.mxcsr_mask;
  99. /*
  100. * If zero then use the default features mask,
  101. * which has all features set, except the
  102. * denormals-are-zero feature bit:
  103. */
  104. if (mask == 0)
  105. mask = 0x0000ffbf;
  106. }
  107. mxcsr_feature_mask &= mask;
  108. }
  109. /*
  110. * Once per bootup FPU initialization sequences that will run on most x86 CPUs:
  111. */
  112. static void __init fpu__init_system_generic(void)
  113. {
  114. /*
  115. * Set up the legacy init FPU context. (xstate init might overwrite this
  116. * with a more modern format, if the CPU supports it.)
  117. */
  118. fpstate_init(&init_fpstate);
  119. fpu__init_system_mxcsr();
  120. }
  121. /*
  122. * Size of the FPU context state. All tasks in the system use the
  123. * same context size, regardless of what portion they use.
  124. * This is inherent to the XSAVE architecture which puts all state
  125. * components into a single, continuous memory block:
  126. */
  127. unsigned int fpu_kernel_xstate_size;
  128. EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size);
  129. /* Get alignment of the TYPE. */
  130. #define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test)
  131. /*
  132. * Enforce that 'MEMBER' is the last field of 'TYPE'.
  133. *
  134. * Align the computed size with alignment of the TYPE,
  135. * because that's how C aligns structs.
  136. */
  137. #define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
  138. BUILD_BUG_ON(sizeof(TYPE) != ALIGN(offsetofend(TYPE, MEMBER), \
  139. TYPE_ALIGN(TYPE)))
  140. /*
  141. * We append the 'struct fpu' to the task_struct:
  142. */
  143. static void __init fpu__init_task_struct_size(void)
  144. {
  145. int task_size = sizeof(struct task_struct);
  146. /*
  147. * Subtract off the static size of the register state.
  148. * It potentially has a bunch of padding.
  149. */
  150. task_size -= sizeof(((struct task_struct *)0)->thread.fpu.state);
  151. /*
  152. * Add back the dynamically-calculated register state
  153. * size.
  154. */
  155. task_size += fpu_kernel_xstate_size;
  156. /*
  157. * We dynamically size 'struct fpu', so we require that
  158. * it be at the end of 'thread_struct' and that
  159. * 'thread_struct' be at the end of 'task_struct'. If
  160. * you hit a compile error here, check the structure to
  161. * see if something got added to the end.
  162. */
  163. CHECK_MEMBER_AT_END_OF(struct fpu, state);
  164. CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu);
  165. CHECK_MEMBER_AT_END_OF(struct task_struct, thread);
  166. arch_task_struct_size = task_size;
  167. }
  168. /*
  169. * Set up the user and kernel xstate sizes based on the legacy FPU context size.
  170. *
  171. * We set this up first, and later it will be overwritten by
  172. * fpu__init_system_xstate() if the CPU knows about xstates.
  173. */
  174. static void __init fpu__init_system_xstate_size_legacy(void)
  175. {
  176. static int on_boot_cpu __initdata = 1;
  177. WARN_ON_FPU(!on_boot_cpu);
  178. on_boot_cpu = 0;
  179. /*
  180. * Note that xstate sizes might be overwritten later during
  181. * fpu__init_system_xstate().
  182. */
  183. if (!boot_cpu_has(X86_FEATURE_FPU)) {
  184. /*
  185. * Disable xsave as we do not support it if i387
  186. * emulation is enabled.
  187. */
  188. setup_clear_cpu_cap(X86_FEATURE_XSAVE);
  189. setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
  190. fpu_kernel_xstate_size = sizeof(struct swregs_state);
  191. } else {
  192. if (boot_cpu_has(X86_FEATURE_FXSR))
  193. fpu_kernel_xstate_size =
  194. sizeof(struct fxregs_state);
  195. else
  196. fpu_kernel_xstate_size =
  197. sizeof(struct fregs_state);
  198. }
  199. fpu_user_xstate_size = fpu_kernel_xstate_size;
  200. }
  201. /*
  202. * FPU context switching strategies:
  203. *
  204. * Against popular belief, we don't do lazy FPU saves, due to the
  205. * task migration complications it brings on SMP - we only do
  206. * lazy FPU restores.
  207. *
  208. * 'lazy' is the traditional strategy, which is based on setting
  209. * CR0::TS to 1 during context-switch (instead of doing a full
  210. * restore of the FPU state), which causes the first FPU instruction
  211. * after the context switch (whenever it is executed) to fault - at
  212. * which point we lazily restore the FPU state into FPU registers.
  213. *
  214. * Tasks are of course under no obligation to execute FPU instructions,
  215. * so it can easily happen that another context-switch occurs without
  216. * a single FPU instruction being executed. If we eventually switch
  217. * back to the original task (that still owns the FPU) then we have
  218. * not only saved the restores along the way, but we also have the
  219. * FPU ready to be used for the original task.
  220. *
  221. * 'lazy' is deprecated because it's almost never a performance win
  222. * and it's much more complicated than 'eager'.
  223. *
  224. * 'eager' switching is by default on all CPUs, there we switch the FPU
  225. * state during every context switch, regardless of whether the task
  226. * has used FPU instructions in that time slice or not. This is done
  227. * because modern FPU context saving instructions are able to optimize
  228. * state saving and restoration in hardware: they can detect both
  229. * unused and untouched FPU state and optimize accordingly.
  230. *
  231. * [ Note that even in 'lazy' mode we might optimize context switches
  232. * to use 'eager' restores, if we detect that a task is using the FPU
  233. * frequently. See the fpu->counter logic in fpu/internal.h for that. ]
  234. */
  235. static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
  236. /*
  237. * Find supported xfeatures based on cpu features and command-line input.
  238. * This must be called after fpu__init_parse_early_param() is called and
  239. * xfeatures_mask is enumerated.
  240. */
  241. u64 __init fpu__get_supported_xfeatures_mask(void)
  242. {
  243. /* Support all xfeatures known to us */
  244. if (eagerfpu != DISABLE)
  245. return XCNTXT_MASK;
  246. /* Warning of xfeatures being disabled for no eagerfpu mode */
  247. if (xfeatures_mask & XFEATURE_MASK_EAGER) {
  248. pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
  249. xfeatures_mask & XFEATURE_MASK_EAGER);
  250. }
  251. /* Return a mask that masks out all features requiring eagerfpu mode */
  252. return ~XFEATURE_MASK_EAGER;
  253. }
  254. /*
  255. * Disable features dependent on eagerfpu.
  256. */
  257. static void __init fpu__clear_eager_fpu_features(void)
  258. {
  259. setup_clear_cpu_cap(X86_FEATURE_MPX);
  260. }
  261. /*
  262. * Pick the FPU context switching strategy:
  263. *
  264. * When eagerfpu is AUTO or ENABLE, we ensure it is ENABLE if either of
  265. * the following is true:
  266. *
  267. * (1) the cpu has xsaveopt, as it has the optimization and doing eager
  268. * FPU switching has a relatively low cost compared to a plain xsave;
  269. * (2) the cpu has xsave features (e.g. MPX) that depend on eager FPU
  270. * switching. Should the kernel boot with noxsaveopt, we support MPX
  271. * with eager FPU switching at a higher cost.
  272. */
  273. static void __init fpu__init_system_ctx_switch(void)
  274. {
  275. static bool on_boot_cpu __initdata = 1;
  276. WARN_ON_FPU(!on_boot_cpu);
  277. on_boot_cpu = 0;
  278. WARN_ON_FPU(current->thread.fpu.fpstate_active);
  279. if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
  280. eagerfpu = ENABLE;
  281. if (xfeatures_mask & XFEATURE_MASK_EAGER)
  282. eagerfpu = ENABLE;
  283. if (eagerfpu == ENABLE)
  284. setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
  285. printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", eagerfpu == ENABLE ? "eager" : "lazy");
  286. }
  287. /*
  288. * We parse fpu parameters early because fpu__init_system() is executed
  289. * before parse_early_param().
  290. */
  291. static void __init fpu__init_parse_early_param(void)
  292. {
  293. if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) {
  294. eagerfpu = DISABLE;
  295. fpu__clear_eager_fpu_features();
  296. }
  297. if (cmdline_find_option_bool(boot_command_line, "no387"))
  298. setup_clear_cpu_cap(X86_FEATURE_FPU);
  299. if (cmdline_find_option_bool(boot_command_line, "nofxsr")) {
  300. setup_clear_cpu_cap(X86_FEATURE_FXSR);
  301. setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT);
  302. setup_clear_cpu_cap(X86_FEATURE_XMM);
  303. }
  304. if (cmdline_find_option_bool(boot_command_line, "noxsave"))
  305. fpu__xstate_clear_all_cpu_caps();
  306. if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
  307. setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
  308. if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
  309. setup_clear_cpu_cap(X86_FEATURE_XSAVES);
  310. }
  311. /*
  312. * Called on the boot CPU once per system bootup, to set up the initial
  313. * FPU state that is later cloned into all processes:
  314. */
  315. void __init fpu__init_system(struct cpuinfo_x86 *c)
  316. {
  317. fpu__init_parse_early_param();
  318. fpu__init_system_early_generic(c);
  319. /*
  320. * The FPU has to be operational for some of the
  321. * later FPU init activities:
  322. */
  323. fpu__init_cpu();
  324. /*
  325. * But don't leave CR0::TS set yet, as some of the FPU setup
  326. * methods depend on being able to execute FPU instructions
  327. * that will fault on a set TS, such as the FXSAVE in
  328. * fpu__init_system_mxcsr().
  329. */
  330. clts();
  331. fpu__init_system_generic();
  332. fpu__init_system_xstate_size_legacy();
  333. fpu__init_system_xstate();
  334. fpu__init_task_struct_size();
  335. fpu__init_system_ctx_switch();
  336. }