tlbflush.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. #ifndef _M68K_TLBFLUSH_H
  2. #define _M68K_TLBFLUSH_H
  3. #ifdef CONFIG_MMU
  4. #ifndef CONFIG_SUN3
  5. #include <asm/current.h>
  6. #include <asm/mcfmmu.h>
  7. static inline void flush_tlb_kernel_page(void *addr)
  8. {
  9. if (CPU_IS_COLDFIRE) {
  10. mmu_write(MMUOR, MMUOR_CNL);
  11. } else if (CPU_IS_040_OR_060) {
  12. mm_segment_t old_fs = get_fs();
  13. set_fs(KERNEL_DS);
  14. __asm__ __volatile__(".chip 68040\n\t"
  15. "pflush (%0)\n\t"
  16. ".chip 68k"
  17. : : "a" (addr));
  18. set_fs(old_fs);
  19. } else if (CPU_IS_020_OR_030)
  20. __asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
  21. }
  22. /*
  23. * flush all user-space atc entries.
  24. */
  25. static inline void __flush_tlb(void)
  26. {
  27. if (CPU_IS_COLDFIRE) {
  28. mmu_write(MMUOR, MMUOR_CNL);
  29. } else if (CPU_IS_040_OR_060) {
  30. __asm__ __volatile__(".chip 68040\n\t"
  31. "pflushan\n\t"
  32. ".chip 68k");
  33. } else if (CPU_IS_020_OR_030) {
  34. __asm__ __volatile__("pflush #0,#4");
  35. }
  36. }
  37. static inline void __flush_tlb040_one(unsigned long addr)
  38. {
  39. __asm__ __volatile__(".chip 68040\n\t"
  40. "pflush (%0)\n\t"
  41. ".chip 68k"
  42. : : "a" (addr));
  43. }
  44. static inline void __flush_tlb_one(unsigned long addr)
  45. {
  46. if (CPU_IS_COLDFIRE)
  47. mmu_write(MMUOR, MMUOR_CNL);
  48. else if (CPU_IS_040_OR_060)
  49. __flush_tlb040_one(addr);
  50. else if (CPU_IS_020_OR_030)
  51. __asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
  52. }
  53. #define flush_tlb() __flush_tlb()
  54. /*
  55. * flush all atc entries (both kernel and user-space entries).
  56. */
  57. static inline void flush_tlb_all(void)
  58. {
  59. if (CPU_IS_COLDFIRE) {
  60. mmu_write(MMUOR, MMUOR_CNL);
  61. } else if (CPU_IS_040_OR_060) {
  62. __asm__ __volatile__(".chip 68040\n\t"
  63. "pflusha\n\t"
  64. ".chip 68k");
  65. } else if (CPU_IS_020_OR_030) {
  66. __asm__ __volatile__("pflusha");
  67. }
  68. }
  69. static inline void flush_tlb_mm(struct mm_struct *mm)
  70. {
  71. if (mm == current->active_mm)
  72. __flush_tlb();
  73. }
  74. static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
  75. {
  76. if (vma->vm_mm == current->active_mm) {
  77. mm_segment_t old_fs = get_fs();
  78. set_fs(USER_DS);
  79. __flush_tlb_one(addr);
  80. set_fs(old_fs);
  81. }
  82. }
  83. static inline void flush_tlb_range(struct vm_area_struct *vma,
  84. unsigned long start, unsigned long end)
  85. {
  86. if (vma->vm_mm == current->active_mm)
  87. __flush_tlb();
  88. }
  89. static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  90. {
  91. flush_tlb_all();
  92. }
  93. #else
  94. /* Reserved PMEGs. */
  95. extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
  96. extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
  97. extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
  98. extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
  99. /* Flush all userspace mappings one by one... (why no flush command,
  100. sun?) */
  101. static inline void flush_tlb_all(void)
  102. {
  103. unsigned long addr;
  104. unsigned char ctx, oldctx;
  105. oldctx = sun3_get_context();
  106. for(addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
  107. for(ctx = 0; ctx < 8; ctx++) {
  108. sun3_put_context(ctx);
  109. sun3_put_segmap(addr, SUN3_INVALID_PMEG);
  110. }
  111. }
  112. sun3_put_context(oldctx);
  113. /* erase all of the userspace pmeg maps, we've clobbered them
  114. all anyway */
  115. for(addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
  116. if(pmeg_alloc[addr] == 1) {
  117. pmeg_alloc[addr] = 0;
  118. pmeg_ctx[addr] = 0;
  119. pmeg_vaddr[addr] = 0;
  120. }
  121. }
  122. }
  123. /* Clear user TLB entries within the context named in mm */
  124. static inline void flush_tlb_mm (struct mm_struct *mm)
  125. {
  126. unsigned char oldctx;
  127. unsigned char seg;
  128. unsigned long i;
  129. oldctx = sun3_get_context();
  130. sun3_put_context(mm->context);
  131. for(i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
  132. seg = sun3_get_segmap(i);
  133. if(seg == SUN3_INVALID_PMEG)
  134. continue;
  135. sun3_put_segmap(i, SUN3_INVALID_PMEG);
  136. pmeg_alloc[seg] = 0;
  137. pmeg_ctx[seg] = 0;
  138. pmeg_vaddr[seg] = 0;
  139. }
  140. sun3_put_context(oldctx);
  141. }
  142. /* Flush a single TLB page. In this case, we're limited to flushing a
  143. single PMEG */
  144. static inline void flush_tlb_page (struct vm_area_struct *vma,
  145. unsigned long addr)
  146. {
  147. unsigned char oldctx;
  148. unsigned char i;
  149. oldctx = sun3_get_context();
  150. sun3_put_context(vma->vm_mm->context);
  151. addr &= ~SUN3_PMEG_MASK;
  152. if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
  153. {
  154. pmeg_alloc[i] = 0;
  155. pmeg_ctx[i] = 0;
  156. pmeg_vaddr[i] = 0;
  157. sun3_put_segmap (addr, SUN3_INVALID_PMEG);
  158. }
  159. sun3_put_context(oldctx);
  160. }
  161. /* Flush a range of pages from TLB. */
  162. static inline void flush_tlb_range (struct vm_area_struct *vma,
  163. unsigned long start, unsigned long end)
  164. {
  165. struct mm_struct *mm = vma->vm_mm;
  166. unsigned char seg, oldctx;
  167. start &= ~SUN3_PMEG_MASK;
  168. oldctx = sun3_get_context();
  169. sun3_put_context(mm->context);
  170. while(start < end)
  171. {
  172. if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
  173. goto next;
  174. if(pmeg_ctx[seg] == mm->context) {
  175. pmeg_alloc[seg] = 0;
  176. pmeg_ctx[seg] = 0;
  177. pmeg_vaddr[seg] = 0;
  178. }
  179. sun3_put_segmap(start, SUN3_INVALID_PMEG);
  180. next:
  181. start += SUN3_PMEG_SIZE;
  182. }
  183. }
  184. static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  185. {
  186. flush_tlb_all();
  187. }
  188. /* Flush kernel page from TLB. */
  189. static inline void flush_tlb_kernel_page (unsigned long addr)
  190. {
  191. sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
  192. }
  193. #endif
  194. #else /* !CONFIG_MMU */
  195. /*
  196. * flush all user-space atc entries.
  197. */
  198. static inline void __flush_tlb(void)
  199. {
  200. BUG();
  201. }
  202. static inline void __flush_tlb_one(unsigned long addr)
  203. {
  204. BUG();
  205. }
  206. #define flush_tlb() __flush_tlb()
  207. /*
  208. * flush all atc entries (both kernel and user-space entries).
  209. */
  210. static inline void flush_tlb_all(void)
  211. {
  212. BUG();
  213. }
  214. static inline void flush_tlb_mm(struct mm_struct *mm)
  215. {
  216. BUG();
  217. }
  218. static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
  219. {
  220. BUG();
  221. }
  222. static inline void flush_tlb_range(struct mm_struct *mm,
  223. unsigned long start, unsigned long end)
  224. {
  225. BUG();
  226. }
  227. static inline void flush_tlb_kernel_page(unsigned long addr)
  228. {
  229. BUG();
  230. }
  231. #endif /* CONFIG_MMU */
  232. #endif /* _M68K_TLBFLUSH_H */