preempt.h 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. #ifndef __LINUX_PREEMPT_H
  2. #define __LINUX_PREEMPT_H
  3. /*
  4. * include/linux/preempt.h - macros for accessing and manipulating
  5. * preempt_count (used for kernel preemption, interrupt count, etc.)
  6. */
  7. #include <linux/linkage.h>
  8. #include <linux/list.h>
  9. /*
  10. * We put the hardirq and softirq counter into the preemption
  11. * counter. The bitmask has the following meaning:
  12. *
  13. * - bits 0-7 are the preemption count (max preemption depth: 256)
  14. * - bits 8-15 are the softirq count (max # of softirqs: 256)
  15. *
  16. * The hardirq count could in theory be the same as the number of
  17. * interrupts in the system, but we run all interrupt handlers with
  18. * interrupts disabled, so we cannot have nesting interrupts. Though
  19. * there are a few palaeontologic drivers which reenable interrupts in
  20. * the handler, so we need more than one bit here.
  21. *
  22. * PREEMPT_MASK: 0x000000ff
  23. * SOFTIRQ_MASK: 0x0000ff00
  24. * HARDIRQ_MASK: 0x000f0000
  25. * NMI_MASK: 0x00100000
  26. * PREEMPT_NEED_RESCHED: 0x80000000
  27. */
  28. #define PREEMPT_BITS 8
  29. #define SOFTIRQ_BITS 8
  30. #define HARDIRQ_BITS 4
  31. #define NMI_BITS 1
  32. #define PREEMPT_SHIFT 0
  33. #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
  34. #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
  35. #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
  36. #define __IRQ_MASK(x) ((1UL << (x))-1)
  37. #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
  38. #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
  39. #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
  40. #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
  41. #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
  42. #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
  43. #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
  44. #define NMI_OFFSET (1UL << NMI_SHIFT)
  45. #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
  46. /* We use the MSB mostly because its available */
  47. #define PREEMPT_NEED_RESCHED 0x80000000
  48. /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
  49. #include <asm/preempt.h>
  50. #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
  51. #define softirq_count() (preempt_count() & SOFTIRQ_MASK)
  52. #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
  53. | NMI_MASK))
  54. /*
  55. * Are we doing bottom half or hardware interrupt processing?
  56. * Are we in a softirq context? Interrupt context?
  57. * in_softirq - Are we currently processing softirq or have bh disabled?
  58. * in_serving_softirq - Are we currently processing softirq?
  59. */
  60. #define in_irq() (hardirq_count())
  61. #define in_softirq() (softirq_count())
  62. #define in_interrupt() (irq_count())
  63. #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
  64. /*
  65. * Are we in NMI context?
  66. */
  67. #define in_nmi() (preempt_count() & NMI_MASK)
  68. /*
  69. * The preempt_count offset after preempt_disable();
  70. */
  71. #if defined(CONFIG_PREEMPT_COUNT)
  72. # define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
  73. #else
  74. # define PREEMPT_DISABLE_OFFSET 0
  75. #endif
  76. /*
  77. * The preempt_count offset after spin_lock()
  78. */
  79. #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
  80. /*
  81. * The preempt_count offset needed for things like:
  82. *
  83. * spin_lock_bh()
  84. *
  85. * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
  86. * softirqs, such that unlock sequences of:
  87. *
  88. * spin_unlock();
  89. * local_bh_enable();
  90. *
  91. * Work as expected.
  92. */
  93. #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
  94. /*
  95. * Are we running in atomic context? WARNING: this macro cannot
  96. * always detect atomic context; in particular, it cannot know about
  97. * held spinlocks in non-preemptible kernels. Thus it should not be
  98. * used in the general case to determine whether sleeping is possible.
  99. * Do not use in_atomic() in driver code.
  100. */
  101. #define in_atomic() (preempt_count() != 0)
  102. /*
  103. * Check whether we were atomic before we did preempt_disable():
  104. * (used by the scheduler)
  105. */
  106. #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
  107. #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
  108. extern void preempt_count_add(int val);
  109. extern void preempt_count_sub(int val);
  110. #define preempt_count_dec_and_test() \
  111. ({ preempt_count_sub(1); should_resched(0); })
  112. #else
  113. #define preempt_count_add(val) __preempt_count_add(val)
  114. #define preempt_count_sub(val) __preempt_count_sub(val)
  115. #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
  116. #endif
  117. #define __preempt_count_inc() __preempt_count_add(1)
  118. #define __preempt_count_dec() __preempt_count_sub(1)
  119. #define preempt_count_inc() preempt_count_add(1)
  120. #define preempt_count_dec() preempt_count_sub(1)
  121. #ifdef CONFIG_PREEMPT_COUNT
  122. #define preempt_disable() \
  123. do { \
  124. preempt_count_inc(); \
  125. barrier(); \
  126. } while (0)
  127. #define sched_preempt_enable_no_resched() \
  128. do { \
  129. barrier(); \
  130. preempt_count_dec(); \
  131. } while (0)
  132. #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
  133. #define preemptible() (preempt_count() == 0 && !irqs_disabled())
  134. #ifdef CONFIG_PREEMPT
  135. #define preempt_enable() \
  136. do { \
  137. barrier(); \
  138. if (unlikely(preempt_count_dec_and_test())) \
  139. __preempt_schedule(); \
  140. } while (0)
  141. #define preempt_enable_notrace() \
  142. do { \
  143. barrier(); \
  144. if (unlikely(__preempt_count_dec_and_test())) \
  145. __preempt_schedule_notrace(); \
  146. } while (0)
  147. #define preempt_check_resched() \
  148. do { \
  149. if (should_resched(0)) \
  150. __preempt_schedule(); \
  151. } while (0)
  152. #else /* !CONFIG_PREEMPT */
  153. #define preempt_enable() \
  154. do { \
  155. barrier(); \
  156. preempt_count_dec(); \
  157. } while (0)
  158. #define preempt_enable_notrace() \
  159. do { \
  160. barrier(); \
  161. __preempt_count_dec(); \
  162. } while (0)
  163. #define preempt_check_resched() do { } while (0)
  164. #endif /* CONFIG_PREEMPT */
  165. #define preempt_disable_notrace() \
  166. do { \
  167. __preempt_count_inc(); \
  168. barrier(); \
  169. } while (0)
  170. #define preempt_enable_no_resched_notrace() \
  171. do { \
  172. barrier(); \
  173. __preempt_count_dec(); \
  174. } while (0)
  175. #else /* !CONFIG_PREEMPT_COUNT */
  176. /*
  177. * Even if we don't have any preemption, we need preempt disable/enable
  178. * to be barriers, so that we don't have things like get_user/put_user
  179. * that can cause faults and scheduling migrate into our preempt-protected
  180. * region.
  181. */
  182. #define preempt_disable() barrier()
  183. #define sched_preempt_enable_no_resched() barrier()
  184. #define preempt_enable_no_resched() barrier()
  185. #define preempt_enable() barrier()
  186. #define preempt_check_resched() do { } while (0)
  187. #define preempt_disable_notrace() barrier()
  188. #define preempt_enable_no_resched_notrace() barrier()
  189. #define preempt_enable_notrace() barrier()
  190. #define preemptible() 0
  191. #endif /* CONFIG_PREEMPT_COUNT */
  192. #ifdef MODULE
  193. /*
  194. * Modules have no business playing preemption tricks.
  195. */
  196. #undef sched_preempt_enable_no_resched
  197. #undef preempt_enable_no_resched
  198. #undef preempt_enable_no_resched_notrace
  199. #undef preempt_check_resched
  200. #endif
  201. #define preempt_set_need_resched() \
  202. do { \
  203. set_preempt_need_resched(); \
  204. } while (0)
  205. #define preempt_fold_need_resched() \
  206. do { \
  207. if (tif_need_resched()) \
  208. set_preempt_need_resched(); \
  209. } while (0)
  210. #ifdef CONFIG_PREEMPT_NOTIFIERS
  211. struct preempt_notifier;
  212. /**
  213. * preempt_ops - notifiers called when a task is preempted and rescheduled
  214. * @sched_in: we're about to be rescheduled:
  215. * notifier: struct preempt_notifier for the task being scheduled
  216. * cpu: cpu we're scheduled on
  217. * @sched_out: we've just been preempted
  218. * notifier: struct preempt_notifier for the task being preempted
  219. * next: the task that's kicking us out
  220. *
  221. * Please note that sched_in and out are called under different
  222. * contexts. sched_out is called with rq lock held and irq disabled
  223. * while sched_in is called without rq lock and irq enabled. This
  224. * difference is intentional and depended upon by its users.
  225. */
  226. struct preempt_ops {
  227. void (*sched_in)(struct preempt_notifier *notifier, int cpu);
  228. void (*sched_out)(struct preempt_notifier *notifier,
  229. struct task_struct *next);
  230. };
  231. /**
  232. * preempt_notifier - key for installing preemption notifiers
  233. * @link: internal use
  234. * @ops: defines the notifier functions to be called
  235. *
  236. * Usually used in conjunction with container_of().
  237. */
  238. struct preempt_notifier {
  239. struct hlist_node link;
  240. struct preempt_ops *ops;
  241. };
  242. void preempt_notifier_inc(void);
  243. void preempt_notifier_dec(void);
  244. void preempt_notifier_register(struct preempt_notifier *notifier);
  245. void preempt_notifier_unregister(struct preempt_notifier *notifier);
  246. static inline void preempt_notifier_init(struct preempt_notifier *notifier,
  247. struct preempt_ops *ops)
  248. {
  249. INIT_HLIST_NODE(&notifier->link);
  250. notifier->ops = ops;
  251. }
  252. #endif
  253. #endif /* __LINUX_PREEMPT_H */