kexec.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. #ifndef _ASM_X86_KEXEC_H
  2. #define _ASM_X86_KEXEC_H
  3. #ifdef CONFIG_X86_32
  4. # define PA_CONTROL_PAGE 0
  5. # define VA_CONTROL_PAGE 1
  6. # define PA_PGD 2
  7. # define PA_SWAP_PAGE 3
  8. # define PAGES_NR 4
  9. #else
  10. # define PA_CONTROL_PAGE 0
  11. # define VA_CONTROL_PAGE 1
  12. # define PA_TABLE_PAGE 2
  13. # define PA_SWAP_PAGE 3
  14. # define PAGES_NR 4
  15. #endif
  16. # define KEXEC_CONTROL_CODE_MAX_SIZE 2048
  17. #ifndef __ASSEMBLY__
  18. #include <linux/string.h>
  19. #include <asm/page.h>
  20. #include <asm/ptrace.h>
  21. #include <asm/bootparam.h>
  22. struct kimage;
  23. /*
  24. * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
  25. * I.e. Maximum page that is mapped directly into kernel memory,
  26. * and kmap is not required.
  27. *
  28. * So far x86_64 is limited to 40 physical address bits.
  29. */
  30. #ifdef CONFIG_X86_32
  31. /* Maximum physical address we can use pages from */
  32. # define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
  33. /* Maximum address we can reach in physical address mode */
  34. # define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
  35. /* Maximum address we can use for the control code buffer */
  36. # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
  37. # define KEXEC_CONTROL_PAGE_SIZE 4096
  38. /* The native architecture */
  39. # define KEXEC_ARCH KEXEC_ARCH_386
  40. /* We can also handle crash dumps from 64 bit kernel. */
  41. # define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
  42. #else
  43. /* Maximum physical address we can use pages from */
  44. # define KEXEC_SOURCE_MEMORY_LIMIT (MAXMEM-1)
  45. /* Maximum address we can reach in physical address mode */
  46. # define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1)
  47. /* Maximum address we can use for the control pages */
  48. # define KEXEC_CONTROL_MEMORY_LIMIT (MAXMEM-1)
  49. /* Allocate one page for the pdp and the second for the code */
  50. # define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
  51. /* The native architecture */
  52. # define KEXEC_ARCH KEXEC_ARCH_X86_64
  53. #endif
  54. /* Memory to backup during crash kdump */
  55. #define KEXEC_BACKUP_SRC_START (0UL)
  56. #define KEXEC_BACKUP_SRC_END (640 * 1024UL) /* 640K */
  57. /*
  58. * CPU does not save ss and sp on stack if execution is already
  59. * running in kernel mode at the time of NMI occurrence. This code
  60. * fixes it.
  61. */
  62. static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
  63. struct pt_regs *oldregs)
  64. {
  65. #ifdef CONFIG_X86_32
  66. newregs->sp = (unsigned long)&(oldregs->sp);
  67. asm volatile("xorl %%eax, %%eax\n\t"
  68. "movw %%ss, %%ax\n\t"
  69. :"=a"(newregs->ss));
  70. #endif
  71. }
  72. /*
  73. * This function is responsible for capturing register states if coming
  74. * via panic otherwise just fix up the ss and sp if coming via kernel
  75. * mode exception.
  76. */
  77. static inline void crash_setup_regs(struct pt_regs *newregs,
  78. struct pt_regs *oldregs)
  79. {
  80. if (oldregs) {
  81. memcpy(newregs, oldregs, sizeof(*newregs));
  82. crash_fixup_ss_esp(newregs, oldregs);
  83. } else {
  84. #ifdef CONFIG_X86_32
  85. asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
  86. asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
  87. asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
  88. asm volatile("movl %%esi,%0" : "=m"(newregs->si));
  89. asm volatile("movl %%edi,%0" : "=m"(newregs->di));
  90. asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
  91. asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
  92. asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
  93. asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
  94. asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
  95. asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
  96. asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
  97. asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
  98. #else
  99. asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
  100. asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
  101. asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
  102. asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
  103. asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
  104. asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
  105. asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
  106. asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
  107. asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
  108. asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
  109. asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
  110. asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
  111. asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
  112. asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
  113. asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
  114. asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
  115. asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
  116. asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
  117. asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
  118. #endif
  119. newregs->ip = (unsigned long)current_text_addr();
  120. }
  121. }
  122. #ifdef CONFIG_X86_32
  123. asmlinkage unsigned long
  124. relocate_kernel(unsigned long indirection_page,
  125. unsigned long control_page,
  126. unsigned long start_address,
  127. unsigned int has_pae,
  128. unsigned int preserve_context);
  129. #else
  130. unsigned long
  131. relocate_kernel(unsigned long indirection_page,
  132. unsigned long page_list,
  133. unsigned long start_address,
  134. unsigned int preserve_context);
  135. #endif
  136. #define ARCH_HAS_KIMAGE_ARCH
  137. #ifdef CONFIG_X86_32
  138. struct kimage_arch {
  139. pgd_t *pgd;
  140. #ifdef CONFIG_X86_PAE
  141. pmd_t *pmd0;
  142. pmd_t *pmd1;
  143. #endif
  144. pte_t *pte0;
  145. pte_t *pte1;
  146. };
  147. #else
  148. struct kimage_arch {
  149. pud_t *pud;
  150. pmd_t *pmd;
  151. pte_t *pte;
  152. /* Details of backup region */
  153. unsigned long backup_src_start;
  154. unsigned long backup_src_sz;
  155. /* Physical address of backup segment */
  156. unsigned long backup_load_addr;
  157. /* Core ELF header buffer */
  158. void *elf_headers;
  159. unsigned long elf_headers_sz;
  160. unsigned long elf_load_addr;
  161. };
  162. #endif /* CONFIG_X86_32 */
  163. #ifdef CONFIG_X86_64
  164. /*
  165. * Number of elements and order of elements in this structure should match
  166. * with the ones in arch/x86/purgatory/entry64.S. If you make a change here
  167. * make an appropriate change in purgatory too.
  168. */
  169. struct kexec_entry64_regs {
  170. uint64_t rax;
  171. uint64_t rcx;
  172. uint64_t rdx;
  173. uint64_t rbx;
  174. uint64_t rsp;
  175. uint64_t rbp;
  176. uint64_t rsi;
  177. uint64_t rdi;
  178. uint64_t r8;
  179. uint64_t r9;
  180. uint64_t r10;
  181. uint64_t r11;
  182. uint64_t r12;
  183. uint64_t r13;
  184. uint64_t r14;
  185. uint64_t r15;
  186. uint64_t rip;
  187. };
  188. #endif
  189. typedef void crash_vmclear_fn(void);
  190. extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
  191. extern void kdump_nmi_shootdown_cpus(void);
  192. #endif /* __ASSEMBLY__ */
  193. #endif /* _ASM_X86_KEXEC_H */