hibernate_64.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. /*
  2. * Hibernation support for x86-64
  3. *
  4. * Distribute under GPLv2
  5. *
  6. * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
  7. * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
  8. * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
  9. */
  10. #include <linux/gfp.h>
  11. #include <linux/smp.h>
  12. #include <linux/suspend.h>
  13. #include <asm/init.h>
  14. #include <asm/proto.h>
  15. #include <asm/page.h>
  16. #include <asm/pgtable.h>
  17. #include <asm/mtrr.h>
  18. #include <asm/sections.h>
  19. #include <asm/suspend.h>
  20. #include <asm/tlbflush.h>
  21. /* Defined in hibernate_asm_64.S */
  22. extern asmlinkage __visible int restore_image(void);
  23. /*
  24. * Address to jump to in the last phase of restore in order to get to the image
  25. * kernel's text (this value is passed in the image header).
  26. */
  27. unsigned long restore_jump_address __visible;
  28. unsigned long jump_address_phys;
  29. /*
  30. * Value of the cr3 register from before the hibernation (this value is passed
  31. * in the image header).
  32. */
  33. unsigned long restore_cr3 __visible;
  34. unsigned long temp_level4_pgt __visible;
  35. unsigned long relocated_restore_code __visible;
  36. static int set_up_temporary_text_mapping(pgd_t *pgd)
  37. {
  38. pmd_t *pmd;
  39. pud_t *pud;
  40. /*
  41. * The new mapping only has to cover the page containing the image
  42. * kernel's entry point (jump_address_phys), because the switch over to
  43. * it is carried out by relocated code running from a page allocated
  44. * specifically for this purpose and covered by the identity mapping, so
  45. * the temporary kernel text mapping is only needed for the final jump.
  46. * Moreover, in that mapping the virtual address of the image kernel's
  47. * entry point must be the same as its virtual address in the image
  48. * kernel (restore_jump_address), so the image kernel's
  49. * restore_registers() code doesn't find itself in a different area of
  50. * the virtual address space after switching over to the original page
  51. * tables used by the image kernel.
  52. */
  53. pud = (pud_t *)get_safe_page(GFP_ATOMIC);
  54. if (!pud)
  55. return -ENOMEM;
  56. pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
  57. if (!pmd)
  58. return -ENOMEM;
  59. set_pmd(pmd + pmd_index(restore_jump_address),
  60. __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
  61. set_pud(pud + pud_index(restore_jump_address),
  62. __pud(__pa(pmd) | _KERNPG_TABLE));
  63. set_pgd(pgd + pgd_index(restore_jump_address),
  64. __pgd(__pa(pud) | _KERNPG_TABLE));
  65. return 0;
  66. }
  67. static void *alloc_pgt_page(void *context)
  68. {
  69. return (void *)get_safe_page(GFP_ATOMIC);
  70. }
  71. static int set_up_temporary_mappings(void)
  72. {
  73. struct x86_mapping_info info = {
  74. .alloc_pgt_page = alloc_pgt_page,
  75. .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
  76. .offset = __PAGE_OFFSET,
  77. };
  78. unsigned long mstart, mend;
  79. pgd_t *pgd;
  80. int result;
  81. int i;
  82. pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
  83. if (!pgd)
  84. return -ENOMEM;
  85. /* Prepare a temporary mapping for the kernel text */
  86. result = set_up_temporary_text_mapping(pgd);
  87. if (result)
  88. return result;
  89. /* Set up the direct mapping from scratch */
  90. for (i = 0; i < nr_pfn_mapped; i++) {
  91. mstart = pfn_mapped[i].start << PAGE_SHIFT;
  92. mend = pfn_mapped[i].end << PAGE_SHIFT;
  93. result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
  94. if (result)
  95. return result;
  96. }
  97. temp_level4_pgt = __pa(pgd);
  98. return 0;
  99. }
  100. static int relocate_restore_code(void)
  101. {
  102. pgd_t *pgd;
  103. pud_t *pud;
  104. relocated_restore_code = get_safe_page(GFP_ATOMIC);
  105. if (!relocated_restore_code)
  106. return -ENOMEM;
  107. memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
  108. /* Make the page containing the relocated code executable */
  109. pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
  110. pud = pud_offset(pgd, relocated_restore_code);
  111. if (pud_large(*pud)) {
  112. set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
  113. } else {
  114. pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
  115. if (pmd_large(*pmd)) {
  116. set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
  117. } else {
  118. pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
  119. set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
  120. }
  121. }
  122. __flush_tlb_all();
  123. return 0;
  124. }
  125. int swsusp_arch_resume(void)
  126. {
  127. int error;
  128. /* We have got enough memory and from now on we cannot recover */
  129. error = set_up_temporary_mappings();
  130. if (error)
  131. return error;
  132. error = relocate_restore_code();
  133. if (error)
  134. return error;
  135. restore_image();
  136. return 0;
  137. }
  138. /*
  139. * pfn_is_nosave - check if given pfn is in the 'nosave' section
  140. */
  141. int pfn_is_nosave(unsigned long pfn)
  142. {
  143. unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
  144. unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
  145. return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
  146. }
  147. struct restore_data_record {
  148. unsigned long jump_address;
  149. unsigned long jump_address_phys;
  150. unsigned long cr3;
  151. unsigned long magic;
  152. };
  153. #define RESTORE_MAGIC 0x123456789ABCDEF0UL
  154. /**
  155. * arch_hibernation_header_save - populate the architecture specific part
  156. * of a hibernation image header
  157. * @addr: address to save the data at
  158. */
  159. int arch_hibernation_header_save(void *addr, unsigned int max_size)
  160. {
  161. struct restore_data_record *rdr = addr;
  162. if (max_size < sizeof(struct restore_data_record))
  163. return -EOVERFLOW;
  164. rdr->jump_address = (unsigned long)&restore_registers;
  165. rdr->jump_address_phys = __pa_symbol(&restore_registers);
  166. rdr->cr3 = restore_cr3;
  167. rdr->magic = RESTORE_MAGIC;
  168. return 0;
  169. }
  170. /**
  171. * arch_hibernation_header_restore - read the architecture specific data
  172. * from the hibernation image header
  173. * @addr: address to read the data from
  174. */
  175. int arch_hibernation_header_restore(void *addr)
  176. {
  177. struct restore_data_record *rdr = addr;
  178. restore_jump_address = rdr->jump_address;
  179. jump_address_phys = rdr->jump_address_phys;
  180. restore_cr3 = rdr->cr3;
  181. return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
  182. }