pgtable-64.h 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
  7. * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
  8. */
  9. #ifndef _ASM_PGTABLE_64_H
  10. #define _ASM_PGTABLE_64_H
  11. #include <linux/compiler.h>
  12. #include <linux/linkage.h>
  13. #include <asm/addrspace.h>
  14. #include <asm/page.h>
  15. #include <asm/cachectl.h>
  16. #include <asm/fixmap.h>
  17. #if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
  18. #include <asm-generic/pgtable-nopmd.h>
  19. #else
  20. #include <asm-generic/pgtable-nopud.h>
  21. #endif
  22. /*
  23. * Each address space has 2 4K pages as its page directory, giving 1024
  24. * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
  25. * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
  26. * tables. Each page table is also a single 4K page, giving 512 (==
  27. * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
  28. * invalid_pmd_table, each pmd entry is initialized to point to
  29. * invalid_pte_table, each pte is initialized to 0. When memory is low,
  30. * and a pmd table or a page table allocation fails, empty_bad_pmd_table
  31. * and empty_bad_page_table is returned back to higher layer code, so
  32. * that the failure is recognized later on. Linux does not seem to
  33. * handle these failures very well though. The empty_bad_page_table has
  34. * invalid pte entries in it, to force page faults.
  35. *
  36. * Kernel mappings: kernel mappings are held in the swapper_pg_table.
  37. * The layout is identical to userspace except it's indexed with the
  38. * fault address - VMALLOC_START.
  39. */
  40. /* PGDIR_SHIFT determines what a third-level page table entry can map */
  41. #ifdef __PAGETABLE_PMD_FOLDED
  42. #define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
  43. #else
  44. /* PMD_SHIFT determines the size of the area a second-level page table can map */
  45. #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
  46. #define PMD_SIZE (1UL << PMD_SHIFT)
  47. #define PMD_MASK (~(PMD_SIZE-1))
  48. #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
  49. #endif
  50. #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  51. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  52. /*
  53. * For 4kB page size we use a 3 level page tree and an 8kB pud, which
  54. * permits us mapping 40 bits of virtual address space.
  55. *
  56. * We used to implement 41 bits by having an order 1 pmd level but that seemed
  57. * rather pointless.
  58. *
  59. * For 8kB page size we use a 3 level page tree which permits a total of
  60. * 8TB of address space. Alternatively a 33-bit / 8GB organization using
  61. * two levels would be easy to implement.
  62. *
  63. * For 16kB page size we use a 2 level page tree which permits a total of
  64. * 36 bits of virtual address space. We could add a third level but it seems
  65. * like at the moment there's no need for this.
  66. *
  67. * For 64kB page size we use a 2 level page table tree for a total of 42 bits
  68. * of virtual address space.
  69. */
  70. #ifdef CONFIG_PAGE_SIZE_4KB
  71. #define PGD_ORDER 1
  72. #define PUD_ORDER aieeee_attempt_to_allocate_pud
  73. #define PMD_ORDER 0
  74. #define PTE_ORDER 0
  75. #endif
  76. #ifdef CONFIG_PAGE_SIZE_8KB
  77. #define PGD_ORDER 0
  78. #define PUD_ORDER aieeee_attempt_to_allocate_pud
  79. #define PMD_ORDER 0
  80. #define PTE_ORDER 0
  81. #endif
  82. #ifdef CONFIG_PAGE_SIZE_16KB
  83. #ifdef CONFIG_MIPS_VA_BITS_48
  84. #define PGD_ORDER 1
  85. #else
  86. #define PGD_ORDER 0
  87. #endif
  88. #define PUD_ORDER aieeee_attempt_to_allocate_pud
  89. #define PMD_ORDER 0
  90. #define PTE_ORDER 0
  91. #endif
  92. #ifdef CONFIG_PAGE_SIZE_32KB
  93. #define PGD_ORDER 0
  94. #define PUD_ORDER aieeee_attempt_to_allocate_pud
  95. #define PMD_ORDER 0
  96. #define PTE_ORDER 0
  97. #endif
  98. #ifdef CONFIG_PAGE_SIZE_64KB
  99. #define PGD_ORDER 0
  100. #define PUD_ORDER aieeee_attempt_to_allocate_pud
  101. #ifdef CONFIG_MIPS_VA_BITS_48
  102. #define PMD_ORDER 0
  103. #else
  104. #define PMD_ORDER aieeee_attempt_to_allocate_pmd
  105. #endif
  106. #define PTE_ORDER 0
  107. #endif
  108. #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
  109. #ifndef __PAGETABLE_PMD_FOLDED
  110. #define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
  111. #endif
  112. #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
  113. #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
  114. #define FIRST_USER_ADDRESS 0UL
  115. /*
  116. * TLB refill handlers also map the vmalloc area into xuseg. Avoid
  117. * the first couple of pages so NULL pointer dereferences will still
  118. * reliably trap.
  119. */
  120. #define VMALLOC_START (MAP_BASE + (2 * PAGE_SIZE))
  121. #define VMALLOC_END \
  122. (MAP_BASE + \
  123. min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
  124. (1UL << cpu_vmbits)) - (1UL << 32))
  125. #if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
  126. VMALLOC_START != CKSSEG
  127. /* Load modules into 32bit-compatible segment. */
  128. #define MODULE_START CKSSEG
  129. #define MODULE_END (FIXADDR_START-2*PAGE_SIZE)
  130. #endif
  131. #define pte_ERROR(e) \
  132. printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
  133. #ifndef __PAGETABLE_PMD_FOLDED
  134. #define pmd_ERROR(e) \
  135. printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
  136. #endif
  137. #define pgd_ERROR(e) \
  138. printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
  139. extern pte_t invalid_pte_table[PTRS_PER_PTE];
  140. extern pte_t empty_bad_page_table[PTRS_PER_PTE];
  141. #ifndef __PAGETABLE_PMD_FOLDED
  142. /*
  143. * For 3-level pagetables we defines these ourselves, for 2-level the
  144. * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
  145. */
  146. typedef struct { unsigned long pmd; } pmd_t;
  147. #define pmd_val(x) ((x).pmd)
  148. #define __pmd(x) ((pmd_t) { (x) } )
  149. extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
  150. #endif
  151. /*
  152. * Empty pgd/pmd entries point to the invalid_pte_table.
  153. */
  154. static inline int pmd_none(pmd_t pmd)
  155. {
  156. return pmd_val(pmd) == (unsigned long) invalid_pte_table;
  157. }
  158. static inline int pmd_bad(pmd_t pmd)
  159. {
  160. #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
  161. /* pmd_huge(pmd) but inline */
  162. if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
  163. return 0;
  164. #endif
  165. if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
  166. return 1;
  167. return 0;
  168. }
  169. static inline int pmd_present(pmd_t pmd)
  170. {
  171. return pmd_val(pmd) != (unsigned long) invalid_pte_table;
  172. }
  173. static inline void pmd_clear(pmd_t *pmdp)
  174. {
  175. pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
  176. }
  177. #ifndef __PAGETABLE_PMD_FOLDED
  178. /*
  179. * Empty pud entries point to the invalid_pmd_table.
  180. */
  181. static inline int pud_none(pud_t pud)
  182. {
  183. return pud_val(pud) == (unsigned long) invalid_pmd_table;
  184. }
  185. static inline int pud_bad(pud_t pud)
  186. {
  187. return pud_val(pud) & ~PAGE_MASK;
  188. }
  189. static inline int pud_present(pud_t pud)
  190. {
  191. return pud_val(pud) != (unsigned long) invalid_pmd_table;
  192. }
  193. static inline void pud_clear(pud_t *pudp)
  194. {
  195. pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
  196. }
  197. #endif
  198. #define pte_page(x) pfn_to_page(pte_pfn(x))
  199. #ifdef CONFIG_CPU_VR41XX
  200. #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
  201. #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
  202. #else
  203. #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
  204. #define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
  205. #define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
  206. #endif
  207. #define __pgd_offset(address) pgd_index(address)
  208. #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
  209. #define __pmd_offset(address) pmd_index(address)
  210. /* to find an entry in a kernel page-table-directory */
  211. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  212. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  213. #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  214. /* to find an entry in a page-table-directory */
  215. #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
  216. #ifndef __PAGETABLE_PMD_FOLDED
  217. static inline unsigned long pud_page_vaddr(pud_t pud)
  218. {
  219. return pud_val(pud);
  220. }
  221. #define pud_phys(pud) virt_to_phys((void *)pud_val(pud))
  222. #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
  223. /* Find an entry in the second-level page table.. */
  224. static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
  225. {
  226. return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
  227. }
  228. #endif
  229. /* Find an entry in the third-level page table.. */
  230. #define __pte_offset(address) \
  231. (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  232. #define pte_offset(dir, address) \
  233. ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
  234. #define pte_offset_kernel(dir, address) \
  235. ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
  236. #define pte_offset_map(dir, address) \
  237. ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
  238. #define pte_unmap(pte) ((void)(pte))
  239. /*
  240. * Initialize a new pgd / pmd table with invalid pointers.
  241. */
  242. extern void pgd_init(unsigned long page);
  243. extern void pmd_init(unsigned long page, unsigned long pagetable);
  244. /*
  245. * Non-present pages: high 40 bits are offset, next 8 bits type,
  246. * low 16 bits zero.
  247. */
  248. static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
  249. { pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
  250. #define __swp_type(x) (((x).val >> 16) & 0xff)
  251. #define __swp_offset(x) ((x).val >> 24)
  252. #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
  253. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  254. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  255. #endif /* _ASM_PGTABLE_64_H */