tlbflush.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. /*
  2. * Based on arch/arm/include/asm/tlbflush.h
  3. *
  4. * Copyright (C) 1999-2003 Russell King
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #ifndef __ASM_TLBFLUSH_H
  20. #define __ASM_TLBFLUSH_H
  21. #ifndef __ASSEMBLY__
  22. #include <linux/sched.h>
  23. #include <asm/cputype.h>
  24. /*
  25. * Raw TLBI operations.
  26. *
  27. * Where necessary, use the __tlbi() macro to avoid asm()
  28. * boilerplate. Drivers and most kernel code should use the TLB
  29. * management routines in preference to the macro below.
  30. *
  31. * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
  32. * on whether a particular TLBI operation takes an argument or
  33. * not. The macros handles invoking the asm with or without the
  34. * register argument as appropriate.
  35. */
  36. #define __TLBI_0(op, arg) asm ("tlbi " #op)
  37. #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0" : : "r" (arg))
  38. #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
  39. #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
  40. /*
  41. * TLB Management
  42. * ==============
  43. *
  44. * The TLB specific code is expected to perform whatever tests it needs
  45. * to determine if it should invalidate the TLB for each call. Start
  46. * addresses are inclusive and end addresses are exclusive; it is safe to
  47. * round these addresses down.
  48. *
  49. * flush_tlb_all()
  50. *
  51. * Invalidate the entire TLB.
  52. *
  53. * flush_tlb_mm(mm)
  54. *
  55. * Invalidate all TLB entries in a particular address space.
  56. * - mm - mm_struct describing address space
  57. *
  58. * flush_tlb_range(mm,start,end)
  59. *
  60. * Invalidate a range of TLB entries in the specified address
  61. * space.
  62. * - mm - mm_struct describing address space
  63. * - start - start address (may not be aligned)
  64. * - end - end address (exclusive, may not be aligned)
  65. *
  66. * flush_tlb_page(vaddr,vma)
  67. *
  68. * Invalidate the specified page in the specified address range.
  69. * - vaddr - virtual address (may not be aligned)
  70. * - vma - vma_struct describing address range
  71. *
  72. * flush_kern_tlb_page(kaddr)
  73. *
  74. * Invalidate the TLB entry for the specified page. The address
  75. * will be in the kernels virtual memory space. Current uses
  76. * only require the D-TLB to be invalidated.
  77. * - kaddr - Kernel virtual memory address
  78. */
  79. static inline void local_flush_tlb_all(void)
  80. {
  81. dsb(nshst);
  82. __tlbi(vmalle1);
  83. dsb(nsh);
  84. isb();
  85. }
  86. static inline void flush_tlb_all(void)
  87. {
  88. dsb(ishst);
  89. __tlbi(vmalle1is);
  90. dsb(ish);
  91. isb();
  92. }
  93. static inline void flush_tlb_mm(struct mm_struct *mm)
  94. {
  95. unsigned long asid = ASID(mm) << 48;
  96. dsb(ishst);
  97. __tlbi(aside1is, asid);
  98. dsb(ish);
  99. }
  100. static inline void flush_tlb_page(struct vm_area_struct *vma,
  101. unsigned long uaddr)
  102. {
  103. unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
  104. dsb(ishst);
  105. __tlbi(vale1is, addr);
  106. dsb(ish);
  107. }
  108. /*
  109. * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
  110. * necessarily a performance improvement.
  111. */
  112. #define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
  113. static inline void __flush_tlb_range(struct vm_area_struct *vma,
  114. unsigned long start, unsigned long end,
  115. bool last_level)
  116. {
  117. unsigned long asid = ASID(vma->vm_mm) << 48;
  118. unsigned long addr;
  119. if ((end - start) > MAX_TLB_RANGE) {
  120. flush_tlb_mm(vma->vm_mm);
  121. return;
  122. }
  123. start = asid | (start >> 12);
  124. end = asid | (end >> 12);
  125. dsb(ishst);
  126. for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
  127. if (last_level)
  128. __tlbi(vale1is, addr);
  129. else
  130. __tlbi(vae1is, addr);
  131. }
  132. dsb(ish);
  133. }
  134. static inline void flush_tlb_range(struct vm_area_struct *vma,
  135. unsigned long start, unsigned long end)
  136. {
  137. __flush_tlb_range(vma, start, end, false);
  138. }
  139. static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  140. {
  141. unsigned long addr;
  142. if ((end - start) > MAX_TLB_RANGE) {
  143. flush_tlb_all();
  144. return;
  145. }
  146. start >>= 12;
  147. end >>= 12;
  148. dsb(ishst);
  149. for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
  150. __tlbi(vaae1is, addr);
  151. dsb(ish);
  152. isb();
  153. }
  154. /*
  155. * Used to invalidate the TLB (walk caches) corresponding to intermediate page
  156. * table levels (pgd/pud/pmd).
  157. */
  158. static inline void __flush_tlb_pgtable(struct mm_struct *mm,
  159. unsigned long uaddr)
  160. {
  161. unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
  162. __tlbi(vae1is, addr);
  163. dsb(ish);
  164. }
  165. #endif
  166. #endif