page.h 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081
  1. #ifndef _ASM_X86_PAGE_H
  2. #define _ASM_X86_PAGE_H
  3. #include <linux/types.h>
  4. #ifdef __KERNEL__
  5. #include <asm/page_types.h>
  6. #ifdef CONFIG_X86_64
  7. #include <asm/page_64.h>
  8. #else
  9. #include <asm/page_32.h>
  10. #endif /* CONFIG_X86_64 */
  11. #ifndef __ASSEMBLY__
  12. struct page;
  13. #include <linux/range.h>
  14. extern struct range pfn_mapped[];
  15. extern int nr_pfn_mapped;
  16. static inline void clear_user_page(void *page, unsigned long vaddr,
  17. struct page *pg)
  18. {
  19. clear_page(page);
  20. }
  21. static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
  22. struct page *topage)
  23. {
  24. copy_page(to, from);
  25. }
  26. #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
  27. alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
  28. #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
  29. #ifndef __pa
  30. #define __pa(x) __phys_addr((unsigned long)(x))
  31. #endif
  32. #define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
  33. /* __pa_symbol should be used for C visible symbols.
  34. This seems to be the official gcc blessed way to do such arithmetic. */
  35. /*
  36. * We need __phys_reloc_hide() here because gcc may assume that there is no
  37. * overflow during __pa() calculation and can optimize it unexpectedly.
  38. * Newer versions of gcc provide -fno-strict-overflow switch to handle this
  39. * case properly. Once all supported versions of gcc understand it, we can
  40. * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
  41. */
  42. #define __pa_symbol(x) \
  43. __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
  44. #ifndef __va
  45. #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
  46. #endif
  47. #define __boot_va(x) __va(x)
  48. #define __boot_pa(x) __pa(x)
  49. /*
  50. * virt_to_page(kaddr) returns a valid pointer if and only if
  51. * virt_addr_valid(kaddr) returns true.
  52. */
  53. #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  54. #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
  55. extern bool __virt_addr_valid(unsigned long kaddr);
  56. #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
  57. #endif /* __ASSEMBLY__ */
  58. #include <asm-generic/memory_model.h>
  59. #include <asm-generic/getorder.h>
  60. #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  61. #endif /* __KERNEL__ */
  62. #endif /* _ASM_X86_PAGE_H */