cacheflush.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. #ifndef _PARISC_CACHEFLUSH_H
  2. #define _PARISC_CACHEFLUSH_H
  3. #include <linux/mm.h>
  4. #include <linux/uaccess.h>
  5. #include <asm/tlbflush.h>
  6. /* The usual comment is "Caches aren't brain-dead on the <architecture>".
  7. * Unfortunately, that doesn't apply to PA-RISC. */
  8. /* Internal implementation */
  9. void flush_data_cache_local(void *); /* flushes local data-cache only */
  10. void flush_instruction_cache_local(void *); /* flushes local code-cache only */
  11. #ifdef CONFIG_SMP
  12. void flush_data_cache(void); /* flushes data-cache only (all processors) */
  13. void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
  14. #else
  15. #define flush_data_cache() flush_data_cache_local(NULL)
  16. #define flush_instruction_cache() flush_instruction_cache_local(NULL)
  17. #endif
  18. #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  19. void flush_user_icache_range_asm(unsigned long, unsigned long);
  20. void flush_kernel_icache_range_asm(unsigned long, unsigned long);
  21. void flush_user_dcache_range_asm(unsigned long, unsigned long);
  22. void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
  23. void flush_kernel_dcache_page_asm(void *);
  24. void flush_kernel_icache_page(void *);
  25. void flush_user_dcache_range(unsigned long, unsigned long);
  26. void flush_user_icache_range(unsigned long, unsigned long);
  27. /* Cache flush operations */
  28. void flush_cache_all_local(void);
  29. void flush_cache_all(void);
  30. void flush_cache_mm(struct mm_struct *mm);
  31. #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  32. void flush_kernel_dcache_page_addr(void *addr);
  33. static inline void flush_kernel_dcache_page(struct page *page)
  34. {
  35. flush_kernel_dcache_page_addr(page_address(page));
  36. }
  37. #define flush_kernel_dcache_range(start,size) \
  38. flush_kernel_dcache_range_asm((start), (start)+(size));
  39. void flush_kernel_vmap_range(void *vaddr, int size);
  40. void invalidate_kernel_vmap_range(void *vaddr, int size);
  41. #define flush_cache_vmap(start, end) flush_cache_all()
  42. #define flush_cache_vunmap(start, end) flush_cache_all()
  43. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  44. extern void flush_dcache_page(struct page *page);
  45. #define flush_dcache_mmap_lock(mapping) \
  46. spin_lock_irq(&(mapping)->tree_lock)
  47. #define flush_dcache_mmap_unlock(mapping) \
  48. spin_unlock_irq(&(mapping)->tree_lock)
  49. #define flush_icache_page(vma,page) do { \
  50. flush_kernel_dcache_page(page); \
  51. flush_kernel_icache_page(page_address(page)); \
  52. } while (0)
  53. #define flush_icache_range(s,e) do { \
  54. flush_kernel_dcache_range_asm(s,e); \
  55. flush_kernel_icache_range_asm(s,e); \
  56. } while (0)
  57. #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  58. do { \
  59. flush_cache_page(vma, vaddr, page_to_pfn(page)); \
  60. memcpy(dst, src, len); \
  61. flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
  62. } while (0)
  63. #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  64. do { \
  65. flush_cache_page(vma, vaddr, page_to_pfn(page)); \
  66. memcpy(dst, src, len); \
  67. } while (0)
  68. void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
  69. void flush_cache_range(struct vm_area_struct *vma,
  70. unsigned long start, unsigned long end);
  71. /* defined in pacache.S exported in cache.c used by flush_anon_page */
  72. void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
  73. #define ARCH_HAS_FLUSH_ANON_PAGE
  74. static inline void
  75. flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
  76. {
  77. if (PageAnon(page)) {
  78. flush_tlb_page(vma, vmaddr);
  79. preempt_disable();
  80. flush_dcache_page_asm(page_to_phys(page), vmaddr);
  81. preempt_enable();
  82. }
  83. }
  84. #include <asm/kmap_types.h>
  85. #define ARCH_HAS_KMAP
  86. static inline void *kmap(struct page *page)
  87. {
  88. might_sleep();
  89. return page_address(page);
  90. }
  91. static inline void kunmap(struct page *page)
  92. {
  93. flush_kernel_dcache_page_addr(page_address(page));
  94. }
  95. static inline void *kmap_atomic(struct page *page)
  96. {
  97. preempt_disable();
  98. pagefault_disable();
  99. return page_address(page);
  100. }
  101. static inline void __kunmap_atomic(void *addr)
  102. {
  103. flush_kernel_dcache_page_addr(addr);
  104. pagefault_enable();
  105. preempt_enable();
  106. }
  107. #define kmap_atomic_prot(page, prot) kmap_atomic(page)
  108. #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
  109. #endif /* _PARISC_CACHEFLUSH_H */