efi.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. #ifndef _ASM_EFI_H
  2. #define _ASM_EFI_H
  3. #include <asm/cpufeature.h>
  4. #include <asm/io.h>
  5. #include <asm/mmu_context.h>
  6. #include <asm/neon.h>
  7. #include <asm/ptrace.h>
  8. #include <asm/tlbflush.h>
  9. #ifdef CONFIG_EFI
  10. extern void efi_init(void);
  11. #else
  12. #define efi_init()
  13. #endif
  14. int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
  15. int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
  16. #define arch_efi_call_virt_setup() \
  17. ({ \
  18. kernel_neon_begin(); \
  19. efi_virtmap_load(); \
  20. })
  21. #define arch_efi_call_virt(p, f, args...) \
  22. ({ \
  23. efi_##f##_t *__f; \
  24. __f = p->f; \
  25. __f(args); \
  26. })
  27. #define arch_efi_call_virt_teardown() \
  28. ({ \
  29. efi_virtmap_unload(); \
  30. kernel_neon_end(); \
  31. })
  32. #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
  33. /* arch specific definitions used by the stub code */
  34. /*
  35. * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from
  36. * start of kernel and may not cross a 2MiB boundary. We set alignment to
  37. * 2MiB so we know it won't cross a 2MiB boundary.
  38. */
  39. #define EFI_FDT_ALIGN SZ_2M /* used by allocate_new_fdt_and_exit_boot() */
  40. #define MAX_FDT_OFFSET SZ_512M
  41. #define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
  42. #define __efi_call_early(f, ...) f(__VA_ARGS__)
  43. #define efi_is_64bit() (true)
  44. #define alloc_screen_info(x...) &screen_info
  45. #define free_screen_info(x...)
  46. static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
  47. {
  48. }
  49. #define EFI_ALLOC_ALIGN SZ_64K
  50. /*
  51. * On ARM systems, virtually remapped UEFI runtime services are set up in two
  52. * distinct stages:
  53. * - The stub retrieves the final version of the memory map from UEFI, populates
  54. * the virt_addr fields and calls the SetVirtualAddressMap() [SVAM] runtime
  55. * service to communicate the new mapping to the firmware (Note that the new
  56. * mapping is not live at this time)
  57. * - During an early initcall(), the EFI system table is permanently remapped
  58. * and the virtual remapping of the UEFI Runtime Services regions is loaded
  59. * into a private set of page tables. If this all succeeds, the Runtime
  60. * Services are enabled and the EFI_RUNTIME_SERVICES bit set.
  61. */
  62. static inline void efi_set_pgd(struct mm_struct *mm)
  63. {
  64. __switch_mm(mm);
  65. if (system_uses_ttbr0_pan()) {
  66. if (mm != current->active_mm) {
  67. /*
  68. * Update the current thread's saved ttbr0 since it is
  69. * restored as part of a return from exception. Set
  70. * the hardware TTBR0_EL1 using cpu_switch_mm()
  71. * directly to enable potential errata workarounds.
  72. */
  73. update_saved_ttbr0(current, mm);
  74. cpu_switch_mm(mm->pgd, mm);
  75. } else {
  76. /*
  77. * Defer the switch to the current thread's TTBR0_EL1
  78. * until uaccess_enable(). Restore the current
  79. * thread's saved ttbr0 corresponding to its active_mm
  80. * (if different from init_mm).
  81. */
  82. cpu_set_reserved_ttbr0();
  83. if (current->active_mm != &init_mm)
  84. update_saved_ttbr0(current, current->active_mm);
  85. }
  86. }
  87. }
  88. void efi_virtmap_load(void);
  89. void efi_virtmap_unload(void);
  90. #endif /* _ASM_EFI_H */