pgalloc.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. #ifndef _ASM_X86_PGALLOC_H
  2. #define _ASM_X86_PGALLOC_H
  3. #include <linux/threads.h>
  4. #include <linux/mm.h> /* for struct page */
  5. #include <linux/pagemap.h>
  6. static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
  7. #ifdef CONFIG_PARAVIRT
  8. #include <asm/paravirt.h>
  9. #else
  10. #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm)
  11. static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
  12. static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
  13. static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
  14. static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
  15. unsigned long start, unsigned long count) {}
  16. static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
  17. static inline void paravirt_release_pte(unsigned long pfn) {}
  18. static inline void paravirt_release_pmd(unsigned long pfn) {}
  19. static inline void paravirt_release_pud(unsigned long pfn) {}
  20. #endif
  21. /*
  22. * Flags to use when allocating a user page table page.
  23. */
  24. extern gfp_t __userpte_alloc_gfp;
  25. /*
  26. * Allocate and free page tables.
  27. */
  28. extern pgd_t *pgd_alloc(struct mm_struct *);
  29. extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
  30. extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
  31. extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
  32. /* Should really implement gc for free page table pages. This could be
  33. done with a reference count in struct page. */
  34. static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  35. {
  36. BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
  37. free_page((unsigned long)pte);
  38. }
  39. static inline void pte_free(struct mm_struct *mm, struct page *pte)
  40. {
  41. pgtable_page_dtor(pte);
  42. __free_page(pte);
  43. }
  44. extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
  45. static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
  46. unsigned long address)
  47. {
  48. ___pte_free_tlb(tlb, pte);
  49. }
  50. static inline void pmd_populate_kernel(struct mm_struct *mm,
  51. pmd_t *pmd, pte_t *pte)
  52. {
  53. paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
  54. set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
  55. }
  56. static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
  57. struct page *pte)
  58. {
  59. unsigned long pfn = page_to_pfn(pte);
  60. paravirt_alloc_pte(mm, pfn);
  61. set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
  62. }
  63. #define pmd_pgtable(pmd) pmd_page(pmd)
  64. #if CONFIG_PGTABLE_LEVELS > 2
  65. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
  66. {
  67. struct page *page;
  68. gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
  69. if (mm == &init_mm)
  70. gfp &= ~__GFP_ACCOUNT;
  71. page = alloc_pages(gfp, 0);
  72. if (!page)
  73. return NULL;
  74. if (!pgtable_pmd_page_ctor(page)) {
  75. __free_pages(page, 0);
  76. return NULL;
  77. }
  78. return (pmd_t *)page_address(page);
  79. }
  80. static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  81. {
  82. BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
  83. pgtable_pmd_page_dtor(virt_to_page(pmd));
  84. free_page((unsigned long)pmd);
  85. }
  86. extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
  87. static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
  88. unsigned long address)
  89. {
  90. ___pmd_free_tlb(tlb, pmd);
  91. }
  92. #ifdef CONFIG_X86_PAE
  93. extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
  94. #else /* !CONFIG_X86_PAE */
  95. static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  96. {
  97. paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
  98. set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
  99. }
  100. #endif /* CONFIG_X86_PAE */
  101. #if CONFIG_PGTABLE_LEVELS > 3
  102. static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
  103. {
  104. paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
  105. set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
  106. }
  107. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
  108. {
  109. gfp_t gfp = GFP_KERNEL_ACCOUNT;
  110. if (mm == &init_mm)
  111. gfp &= ~__GFP_ACCOUNT;
  112. return (pud_t *)get_zeroed_page(gfp);
  113. }
  114. static inline void pud_free(struct mm_struct *mm, pud_t *pud)
  115. {
  116. BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
  117. free_page((unsigned long)pud);
  118. }
  119. extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
  120. static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
  121. unsigned long address)
  122. {
  123. ___pud_free_tlb(tlb, pud);
  124. }
  125. #endif /* CONFIG_PGTABLE_LEVELS > 3 */
  126. #endif /* CONFIG_PGTABLE_LEVELS > 2 */
  127. #endif /* _ASM_X86_PGALLOC_H */