pgtable.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986
  1. #ifndef _ASM_X86_PGTABLE_H
  2. #define _ASM_X86_PGTABLE_H
  3. #include <asm/page.h>
  4. #include <asm/e820.h>
  5. #include <asm/pgtable_types.h>
  6. /*
  7. * Macro to mark a page protection value as UC-
  8. */
  9. #define pgprot_noncached(prot) \
  10. ((boot_cpu_data.x86 > 3) \
  11. ? (__pgprot(pgprot_val(prot) | \
  12. cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
  13. : (prot))
  14. #ifndef __ASSEMBLY__
  15. #include <asm/x86_init.h>
  16. void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
  17. void ptdump_walk_pgd_level_checkwx(void);
  18. #ifdef CONFIG_DEBUG_WX
  19. #define debug_checkwx() ptdump_walk_pgd_level_checkwx()
  20. #else
  21. #define debug_checkwx() do { } while (0)
  22. #endif
  23. /*
  24. * ZERO_PAGE is a global shared page that is always zero: used
  25. * for zero-mapped memory areas etc..
  26. */
  27. extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
  28. __visible;
  29. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  30. extern spinlock_t pgd_lock;
  31. extern struct list_head pgd_list;
  32. extern struct mm_struct *pgd_page_get_mm(struct page *page);
  33. #ifdef CONFIG_PARAVIRT
  34. #include <asm/paravirt.h>
  35. #else /* !CONFIG_PARAVIRT */
  36. #define set_pte(ptep, pte) native_set_pte(ptep, pte)
  37. #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
  38. #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
  39. #define set_pte_atomic(ptep, pte) \
  40. native_set_pte_atomic(ptep, pte)
  41. #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
  42. #ifndef __PAGETABLE_PUD_FOLDED
  43. #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
  44. #define pgd_clear(pgd) native_pgd_clear(pgd)
  45. #endif
  46. #ifndef set_pud
  47. # define set_pud(pudp, pud) native_set_pud(pudp, pud)
  48. #endif
  49. #ifndef __PAGETABLE_PMD_FOLDED
  50. #define pud_clear(pud) native_pud_clear(pud)
  51. #endif
  52. #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
  53. #define pmd_clear(pmd) native_pmd_clear(pmd)
  54. #define pte_update(mm, addr, ptep) do { } while (0)
  55. #define pgd_val(x) native_pgd_val(x)
  56. #define __pgd(x) native_make_pgd(x)
  57. #ifndef __PAGETABLE_PUD_FOLDED
  58. #define pud_val(x) native_pud_val(x)
  59. #define __pud(x) native_make_pud(x)
  60. #endif
  61. #ifndef __PAGETABLE_PMD_FOLDED
  62. #define pmd_val(x) native_pmd_val(x)
  63. #define __pmd(x) native_make_pmd(x)
  64. #endif
  65. #define pte_val(x) native_pte_val(x)
  66. #define __pte(x) native_make_pte(x)
  67. #define arch_end_context_switch(prev) do {} while(0)
  68. #endif /* CONFIG_PARAVIRT */
  69. /*
  70. * The following only work if pte_present() is true.
  71. * Undefined behaviour if not..
  72. */
  73. static inline int pte_dirty(pte_t pte)
  74. {
  75. return pte_flags(pte) & _PAGE_DIRTY;
  76. }
  77. static inline u32 read_pkru(void)
  78. {
  79. if (boot_cpu_has(X86_FEATURE_OSPKE))
  80. return __read_pkru();
  81. return 0;
  82. }
  83. static inline void write_pkru(u32 pkru)
  84. {
  85. if (boot_cpu_has(X86_FEATURE_OSPKE))
  86. __write_pkru(pkru);
  87. }
  88. static inline int pte_young(pte_t pte)
  89. {
  90. return pte_flags(pte) & _PAGE_ACCESSED;
  91. }
  92. static inline int pmd_dirty(pmd_t pmd)
  93. {
  94. return pmd_flags(pmd) & _PAGE_DIRTY;
  95. }
  96. static inline int pmd_young(pmd_t pmd)
  97. {
  98. return pmd_flags(pmd) & _PAGE_ACCESSED;
  99. }
  100. static inline int pte_write(pte_t pte)
  101. {
  102. return pte_flags(pte) & _PAGE_RW;
  103. }
  104. static inline int pte_huge(pte_t pte)
  105. {
  106. return pte_flags(pte) & _PAGE_PSE;
  107. }
  108. static inline int pte_global(pte_t pte)
  109. {
  110. return pte_flags(pte) & _PAGE_GLOBAL;
  111. }
  112. static inline int pte_exec(pte_t pte)
  113. {
  114. return !(pte_flags(pte) & _PAGE_NX);
  115. }
  116. static inline int pte_special(pte_t pte)
  117. {
  118. return pte_flags(pte) & _PAGE_SPECIAL;
  119. }
  120. static inline unsigned long pte_pfn(pte_t pte)
  121. {
  122. return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
  123. }
  124. static inline unsigned long pmd_pfn(pmd_t pmd)
  125. {
  126. return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
  127. }
  128. static inline unsigned long pud_pfn(pud_t pud)
  129. {
  130. return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
  131. }
  132. #define pte_page(pte) pfn_to_page(pte_pfn(pte))
  133. static inline int pmd_large(pmd_t pte)
  134. {
  135. return pmd_flags(pte) & _PAGE_PSE;
  136. }
  137. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  138. static inline int pmd_trans_huge(pmd_t pmd)
  139. {
  140. return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
  141. }
  142. #define has_transparent_hugepage has_transparent_hugepage
  143. static inline int has_transparent_hugepage(void)
  144. {
  145. return boot_cpu_has(X86_FEATURE_PSE);
  146. }
  147. #ifdef __HAVE_ARCH_PTE_DEVMAP
  148. static inline int pmd_devmap(pmd_t pmd)
  149. {
  150. return !!(pmd_val(pmd) & _PAGE_DEVMAP);
  151. }
  152. #endif
  153. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  154. static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
  155. {
  156. pteval_t v = native_pte_val(pte);
  157. return native_make_pte(v | set);
  158. }
  159. static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
  160. {
  161. pteval_t v = native_pte_val(pte);
  162. return native_make_pte(v & ~clear);
  163. }
  164. static inline pte_t pte_mkclean(pte_t pte)
  165. {
  166. return pte_clear_flags(pte, _PAGE_DIRTY);
  167. }
  168. static inline pte_t pte_mkold(pte_t pte)
  169. {
  170. return pte_clear_flags(pte, _PAGE_ACCESSED);
  171. }
  172. static inline pte_t pte_wrprotect(pte_t pte)
  173. {
  174. return pte_clear_flags(pte, _PAGE_RW);
  175. }
  176. static inline pte_t pte_mkexec(pte_t pte)
  177. {
  178. return pte_clear_flags(pte, _PAGE_NX);
  179. }
  180. static inline pte_t pte_mkdirty(pte_t pte)
  181. {
  182. return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
  183. }
  184. static inline pte_t pte_mkyoung(pte_t pte)
  185. {
  186. return pte_set_flags(pte, _PAGE_ACCESSED);
  187. }
  188. static inline pte_t pte_mkwrite(pte_t pte)
  189. {
  190. return pte_set_flags(pte, _PAGE_RW);
  191. }
  192. static inline pte_t pte_mkhuge(pte_t pte)
  193. {
  194. return pte_set_flags(pte, _PAGE_PSE);
  195. }
  196. static inline pte_t pte_clrhuge(pte_t pte)
  197. {
  198. return pte_clear_flags(pte, _PAGE_PSE);
  199. }
  200. static inline pte_t pte_mkglobal(pte_t pte)
  201. {
  202. return pte_set_flags(pte, _PAGE_GLOBAL);
  203. }
  204. static inline pte_t pte_clrglobal(pte_t pte)
  205. {
  206. return pte_clear_flags(pte, _PAGE_GLOBAL);
  207. }
  208. static inline pte_t pte_mkspecial(pte_t pte)
  209. {
  210. return pte_set_flags(pte, _PAGE_SPECIAL);
  211. }
  212. static inline pte_t pte_mkdevmap(pte_t pte)
  213. {
  214. return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
  215. }
  216. static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
  217. {
  218. pmdval_t v = native_pmd_val(pmd);
  219. return __pmd(v | set);
  220. }
  221. static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
  222. {
  223. pmdval_t v = native_pmd_val(pmd);
  224. return __pmd(v & ~clear);
  225. }
  226. static inline pmd_t pmd_mkold(pmd_t pmd)
  227. {
  228. return pmd_clear_flags(pmd, _PAGE_ACCESSED);
  229. }
  230. static inline pmd_t pmd_mkclean(pmd_t pmd)
  231. {
  232. return pmd_clear_flags(pmd, _PAGE_DIRTY);
  233. }
  234. static inline pmd_t pmd_wrprotect(pmd_t pmd)
  235. {
  236. return pmd_clear_flags(pmd, _PAGE_RW);
  237. }
  238. static inline pmd_t pmd_mkdirty(pmd_t pmd)
  239. {
  240. return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
  241. }
  242. static inline pmd_t pmd_mkdevmap(pmd_t pmd)
  243. {
  244. return pmd_set_flags(pmd, _PAGE_DEVMAP);
  245. }
  246. static inline pmd_t pmd_mkhuge(pmd_t pmd)
  247. {
  248. return pmd_set_flags(pmd, _PAGE_PSE);
  249. }
  250. static inline pmd_t pmd_mkyoung(pmd_t pmd)
  251. {
  252. return pmd_set_flags(pmd, _PAGE_ACCESSED);
  253. }
  254. static inline pmd_t pmd_mkwrite(pmd_t pmd)
  255. {
  256. return pmd_set_flags(pmd, _PAGE_RW);
  257. }
  258. static inline pmd_t pmd_mknotpresent(pmd_t pmd)
  259. {
  260. return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
  261. }
  262. #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
  263. static inline int pte_soft_dirty(pte_t pte)
  264. {
  265. return pte_flags(pte) & _PAGE_SOFT_DIRTY;
  266. }
  267. static inline int pmd_soft_dirty(pmd_t pmd)
  268. {
  269. return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
  270. }
  271. static inline pte_t pte_mksoft_dirty(pte_t pte)
  272. {
  273. return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
  274. }
  275. static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
  276. {
  277. return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
  278. }
  279. static inline pte_t pte_clear_soft_dirty(pte_t pte)
  280. {
  281. return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
  282. }
  283. static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
  284. {
  285. return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
  286. }
  287. #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
  288. /*
  289. * Mask out unsupported bits in a present pgprot. Non-present pgprots
  290. * can use those bits for other purposes, so leave them be.
  291. */
  292. static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
  293. {
  294. pgprotval_t protval = pgprot_val(pgprot);
  295. if (protval & _PAGE_PRESENT)
  296. protval &= __supported_pte_mask;
  297. return protval;
  298. }
  299. static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
  300. {
  301. return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
  302. massage_pgprot(pgprot));
  303. }
  304. static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
  305. {
  306. return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
  307. massage_pgprot(pgprot));
  308. }
  309. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  310. {
  311. pteval_t val = pte_val(pte);
  312. /*
  313. * Chop off the NX bit (if present), and add the NX portion of
  314. * the newprot (if present):
  315. */
  316. val &= _PAGE_CHG_MASK;
  317. val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
  318. return __pte(val);
  319. }
  320. static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
  321. {
  322. pmdval_t val = pmd_val(pmd);
  323. val &= _HPAGE_CHG_MASK;
  324. val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
  325. return __pmd(val);
  326. }
  327. /* mprotect needs to preserve PAT bits when updating vm_page_prot */
  328. #define pgprot_modify pgprot_modify
  329. static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  330. {
  331. pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
  332. pgprotval_t addbits = pgprot_val(newprot);
  333. return __pgprot(preservebits | addbits);
  334. }
  335. #define pte_pgprot(x) __pgprot(pte_flags(x))
  336. #define pmd_pgprot(x) __pgprot(pmd_flags(x))
  337. #define pud_pgprot(x) __pgprot(pud_flags(x))
  338. #define canon_pgprot(p) __pgprot(massage_pgprot(p))
  339. static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
  340. enum page_cache_mode pcm,
  341. enum page_cache_mode new_pcm)
  342. {
  343. /*
  344. * PAT type is always WB for untracked ranges, so no need to check.
  345. */
  346. if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
  347. return 1;
  348. /*
  349. * Certain new memtypes are not allowed with certain
  350. * requested memtype:
  351. * - request is uncached, return cannot be write-back
  352. * - request is write-combine, return cannot be write-back
  353. * - request is write-through, return cannot be write-back
  354. * - request is write-through, return cannot be write-combine
  355. */
  356. if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
  357. new_pcm == _PAGE_CACHE_MODE_WB) ||
  358. (pcm == _PAGE_CACHE_MODE_WC &&
  359. new_pcm == _PAGE_CACHE_MODE_WB) ||
  360. (pcm == _PAGE_CACHE_MODE_WT &&
  361. new_pcm == _PAGE_CACHE_MODE_WB) ||
  362. (pcm == _PAGE_CACHE_MODE_WT &&
  363. new_pcm == _PAGE_CACHE_MODE_WC)) {
  364. return 0;
  365. }
  366. return 1;
  367. }
  368. pmd_t *populate_extra_pmd(unsigned long vaddr);
  369. pte_t *populate_extra_pte(unsigned long vaddr);
  370. #endif /* __ASSEMBLY__ */
  371. #ifdef CONFIG_X86_32
  372. # include <asm/pgtable_32.h>
  373. #else
  374. # include <asm/pgtable_64.h>
  375. #endif
  376. #ifndef __ASSEMBLY__
  377. #include <linux/mm_types.h>
  378. #include <linux/mmdebug.h>
  379. #include <linux/log2.h>
  380. static inline int pte_none(pte_t pte)
  381. {
  382. return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
  383. }
  384. #define __HAVE_ARCH_PTE_SAME
  385. static inline int pte_same(pte_t a, pte_t b)
  386. {
  387. return a.pte == b.pte;
  388. }
  389. static inline int pte_present(pte_t a)
  390. {
  391. return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
  392. }
  393. #ifdef __HAVE_ARCH_PTE_DEVMAP
  394. static inline int pte_devmap(pte_t a)
  395. {
  396. return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
  397. }
  398. #endif
  399. #define pte_accessible pte_accessible
  400. static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
  401. {
  402. if (pte_flags(a) & _PAGE_PRESENT)
  403. return true;
  404. if ((pte_flags(a) & _PAGE_PROTNONE) &&
  405. mm_tlb_flush_pending(mm))
  406. return true;
  407. return false;
  408. }
  409. static inline int pte_hidden(pte_t pte)
  410. {
  411. return pte_flags(pte) & _PAGE_HIDDEN;
  412. }
  413. static inline int pmd_present(pmd_t pmd)
  414. {
  415. /*
  416. * Checking for _PAGE_PSE is needed too because
  417. * split_huge_page will temporarily clear the present bit (but
  418. * the _PAGE_PSE flag will remain set at all times while the
  419. * _PAGE_PRESENT bit is clear).
  420. */
  421. return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
  422. }
  423. #ifdef CONFIG_NUMA_BALANCING
  424. /*
  425. * These work without NUMA balancing but the kernel does not care. See the
  426. * comment in include/asm-generic/pgtable.h
  427. */
  428. static inline int pte_protnone(pte_t pte)
  429. {
  430. return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
  431. == _PAGE_PROTNONE;
  432. }
  433. static inline int pmd_protnone(pmd_t pmd)
  434. {
  435. return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
  436. == _PAGE_PROTNONE;
  437. }
  438. #endif /* CONFIG_NUMA_BALANCING */
  439. static inline int pmd_none(pmd_t pmd)
  440. {
  441. /* Only check low word on 32-bit platforms, since it might be
  442. out of sync with upper half. */
  443. unsigned long val = native_pmd_val(pmd);
  444. return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
  445. }
  446. static inline unsigned long pmd_page_vaddr(pmd_t pmd)
  447. {
  448. return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
  449. }
  450. /*
  451. * Currently stuck as a macro due to indirect forward reference to
  452. * linux/mmzone.h's __section_mem_map_addr() definition:
  453. */
  454. #define pmd_page(pmd) \
  455. pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
  456. /*
  457. * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
  458. *
  459. * this macro returns the index of the entry in the pmd page which would
  460. * control the given virtual address
  461. */
  462. static inline unsigned long pmd_index(unsigned long address)
  463. {
  464. return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
  465. }
  466. /*
  467. * Conversion functions: convert a page and protection to a page entry,
  468. * and a page entry and page directory to the page they refer to.
  469. *
  470. * (Currently stuck as a macro because of indirect forward reference
  471. * to linux/mm.h:page_to_nid())
  472. */
  473. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  474. /*
  475. * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  476. *
  477. * this function returns the index of the entry in the pte page which would
  478. * control the given virtual address
  479. */
  480. static inline unsigned long pte_index(unsigned long address)
  481. {
  482. return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
  483. }
  484. static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
  485. {
  486. return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
  487. }
  488. static inline int pmd_bad(pmd_t pmd)
  489. {
  490. return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
  491. }
  492. static inline unsigned long pages_to_mb(unsigned long npg)
  493. {
  494. return npg >> (20 - PAGE_SHIFT);
  495. }
  496. #if CONFIG_PGTABLE_LEVELS > 2
  497. static inline int pud_none(pud_t pud)
  498. {
  499. return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
  500. }
  501. static inline int pud_present(pud_t pud)
  502. {
  503. return pud_flags(pud) & _PAGE_PRESENT;
  504. }
  505. static inline unsigned long pud_page_vaddr(pud_t pud)
  506. {
  507. return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
  508. }
  509. /*
  510. * Currently stuck as a macro due to indirect forward reference to
  511. * linux/mmzone.h's __section_mem_map_addr() definition:
  512. */
  513. #define pud_page(pud) \
  514. pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
  515. /* Find an entry in the second-level page table.. */
  516. static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
  517. {
  518. return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
  519. }
  520. static inline int pud_large(pud_t pud)
  521. {
  522. return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
  523. (_PAGE_PSE | _PAGE_PRESENT);
  524. }
  525. static inline int pud_bad(pud_t pud)
  526. {
  527. return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
  528. }
  529. #else
  530. static inline int pud_large(pud_t pud)
  531. {
  532. return 0;
  533. }
  534. #endif /* CONFIG_PGTABLE_LEVELS > 2 */
  535. #if CONFIG_PGTABLE_LEVELS > 3
  536. static inline int pgd_present(pgd_t pgd)
  537. {
  538. return pgd_flags(pgd) & _PAGE_PRESENT;
  539. }
  540. static inline unsigned long pgd_page_vaddr(pgd_t pgd)
  541. {
  542. return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
  543. }
  544. /*
  545. * Currently stuck as a macro due to indirect forward reference to
  546. * linux/mmzone.h's __section_mem_map_addr() definition:
  547. */
  548. #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
  549. /* to find an entry in a page-table-directory. */
  550. static inline unsigned long pud_index(unsigned long address)
  551. {
  552. return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
  553. }
  554. static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
  555. {
  556. return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
  557. }
  558. static inline int pgd_bad(pgd_t pgd)
  559. {
  560. return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
  561. }
  562. static inline int pgd_none(pgd_t pgd)
  563. {
  564. /*
  565. * There is no need to do a workaround for the KNL stray
  566. * A/D bit erratum here. PGDs only point to page tables
  567. * except on 32-bit non-PAE which is not supported on
  568. * KNL.
  569. */
  570. return !native_pgd_val(pgd);
  571. }
  572. #endif /* CONFIG_PGTABLE_LEVELS > 3 */
  573. #endif /* __ASSEMBLY__ */
  574. /*
  575. * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  576. *
  577. * this macro returns the index of the entry in the pgd page which would
  578. * control the given virtual address
  579. */
  580. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
  581. /*
  582. * pgd_offset() returns a (pgd_t *)
  583. * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  584. */
  585. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
  586. /*
  587. * a shortcut which implies the use of the kernel's pgd, instead
  588. * of a process's
  589. */
  590. #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
  591. #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
  592. #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
  593. #ifndef __ASSEMBLY__
  594. extern int direct_gbpages;
  595. void init_mem_mapping(void);
  596. void early_alloc_pgt_buf(void);
  597. #ifdef CONFIG_X86_64
  598. /* Realmode trampoline initialization. */
  599. extern pgd_t trampoline_pgd_entry;
  600. static inline void __meminit init_trampoline_default(void)
  601. {
  602. /* Default trampoline pgd value */
  603. trampoline_pgd_entry = init_level4_pgt[pgd_index(__PAGE_OFFSET)];
  604. }
  605. # ifdef CONFIG_RANDOMIZE_MEMORY
  606. void __meminit init_trampoline(void);
  607. # else
  608. # define init_trampoline init_trampoline_default
  609. # endif
  610. #else
  611. static inline void init_trampoline(void) { }
  612. #endif
  613. /* local pte updates need not use xchg for locking */
  614. static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
  615. {
  616. pte_t res = *ptep;
  617. /* Pure native function needs no input for mm, addr */
  618. native_pte_clear(NULL, 0, ptep);
  619. return res;
  620. }
  621. static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
  622. {
  623. pmd_t res = *pmdp;
  624. native_pmd_clear(pmdp);
  625. return res;
  626. }
  627. static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
  628. pte_t *ptep , pte_t pte)
  629. {
  630. native_set_pte(ptep, pte);
  631. }
  632. static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
  633. pmd_t *pmdp , pmd_t pmd)
  634. {
  635. native_set_pmd(pmdp, pmd);
  636. }
  637. #ifndef CONFIG_PARAVIRT
  638. /*
  639. * Rules for using pte_update - it must be called after any PTE update which
  640. * has not been done using the set_pte / clear_pte interfaces. It is used by
  641. * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
  642. * updates should either be sets, clears, or set_pte_atomic for P->P
  643. * transitions, which means this hook should only be called for user PTEs.
  644. * This hook implies a P->P protection or access change has taken place, which
  645. * requires a subsequent TLB flush.
  646. */
  647. #define pte_update(mm, addr, ptep) do { } while (0)
  648. #endif
  649. /*
  650. * We only update the dirty/accessed state if we set
  651. * the dirty bit by hand in the kernel, since the hardware
  652. * will do the accessed bit for us, and we don't want to
  653. * race with other CPU's that might be updating the dirty
  654. * bit at the same time.
  655. */
  656. struct vm_area_struct;
  657. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  658. extern int ptep_set_access_flags(struct vm_area_struct *vma,
  659. unsigned long address, pte_t *ptep,
  660. pte_t entry, int dirty);
  661. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  662. extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
  663. unsigned long addr, pte_t *ptep);
  664. #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  665. extern int ptep_clear_flush_young(struct vm_area_struct *vma,
  666. unsigned long address, pte_t *ptep);
  667. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  668. static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  669. pte_t *ptep)
  670. {
  671. pte_t pte = native_ptep_get_and_clear(ptep);
  672. pte_update(mm, addr, ptep);
  673. return pte;
  674. }
  675. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  676. static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  677. unsigned long addr, pte_t *ptep,
  678. int full)
  679. {
  680. pte_t pte;
  681. if (full) {
  682. /*
  683. * Full address destruction in progress; paravirt does not
  684. * care about updates and native needs no locking
  685. */
  686. pte = native_local_ptep_get_and_clear(ptep);
  687. } else {
  688. pte = ptep_get_and_clear(mm, addr, ptep);
  689. }
  690. return pte;
  691. }
  692. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  693. static inline void ptep_set_wrprotect(struct mm_struct *mm,
  694. unsigned long addr, pte_t *ptep)
  695. {
  696. clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
  697. pte_update(mm, addr, ptep);
  698. }
  699. #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
  700. #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
  701. #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  702. extern int pmdp_set_access_flags(struct vm_area_struct *vma,
  703. unsigned long address, pmd_t *pmdp,
  704. pmd_t entry, int dirty);
  705. #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
  706. extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  707. unsigned long addr, pmd_t *pmdp);
  708. #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
  709. extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
  710. unsigned long address, pmd_t *pmdp);
  711. #define __HAVE_ARCH_PMD_WRITE
  712. static inline int pmd_write(pmd_t pmd)
  713. {
  714. return pmd_flags(pmd) & _PAGE_RW;
  715. }
  716. #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
  717. static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
  718. pmd_t *pmdp)
  719. {
  720. return native_pmdp_get_and_clear(pmdp);
  721. }
  722. #define __HAVE_ARCH_PMDP_SET_WRPROTECT
  723. static inline void pmdp_set_wrprotect(struct mm_struct *mm,
  724. unsigned long addr, pmd_t *pmdp)
  725. {
  726. clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
  727. }
  728. /*
  729. * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  730. *
  731. * dst - pointer to pgd range anwhere on a pgd page
  732. * src - ""
  733. * count - the number of pgds to copy.
  734. *
  735. * dst and src can be on the same page, but the range must not overlap,
  736. * and must not cross a page boundary.
  737. */
  738. static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
  739. {
  740. memcpy(dst, src, count * sizeof(pgd_t));
  741. }
  742. #define PTE_SHIFT ilog2(PTRS_PER_PTE)
  743. static inline int page_level_shift(enum pg_level level)
  744. {
  745. return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
  746. }
  747. static inline unsigned long page_level_size(enum pg_level level)
  748. {
  749. return 1UL << page_level_shift(level);
  750. }
  751. static inline unsigned long page_level_mask(enum pg_level level)
  752. {
  753. return ~(page_level_size(level) - 1);
  754. }
  755. /*
  756. * The x86 doesn't have any external MMU info: the kernel page
  757. * tables contain all the necessary information.
  758. */
  759. static inline void update_mmu_cache(struct vm_area_struct *vma,
  760. unsigned long addr, pte_t *ptep)
  761. {
  762. }
  763. static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
  764. unsigned long addr, pmd_t *pmd)
  765. {
  766. }
  767. #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
  768. static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
  769. {
  770. return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
  771. }
  772. static inline int pte_swp_soft_dirty(pte_t pte)
  773. {
  774. return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
  775. }
  776. static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
  777. {
  778. return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
  779. }
  780. #endif
  781. #define PKRU_AD_BIT 0x1
  782. #define PKRU_WD_BIT 0x2
  783. #define PKRU_BITS_PER_PKEY 2
  784. static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
  785. {
  786. int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
  787. return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
  788. }
  789. static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
  790. {
  791. int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
  792. /*
  793. * Access-disable disables writes too so we need to check
  794. * both bits here.
  795. */
  796. return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
  797. }
  798. static inline u16 pte_flags_pkey(unsigned long pte_flags)
  799. {
  800. #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
  801. /* ifdef to avoid doing 59-bit shift on 32-bit values */
  802. return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
  803. #else
  804. return 0;
  805. #endif
  806. }
  807. #include <asm-generic/pgtable.h>
  808. #endif /* __ASSEMBLY__ */
  809. #endif /* _ASM_X86_PGTABLE_H */