tick.h 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /*
  2. * Tick related global functions
  3. */
  4. #ifndef _LINUX_TICK_H
  5. #define _LINUX_TICK_H
  6. #include <linux/clockchips.h>
  7. #include <linux/irqflags.h>
  8. #include <linux/percpu.h>
  9. #include <linux/context_tracking_state.h>
  10. #include <linux/cpumask.h>
  11. #include <linux/sched.h>
  12. #ifdef CONFIG_GENERIC_CLOCKEVENTS
  13. extern void __init tick_init(void);
  14. /* Should be core only, but ARM BL switcher requires it */
  15. extern void tick_suspend_local(void);
  16. /* Should be core only, but XEN resume magic and ARM BL switcher require it */
  17. extern void tick_resume_local(void);
  18. extern void tick_handover_do_timer(void);
  19. extern void tick_cleanup_dead_cpu(int cpu);
  20. #else /* CONFIG_GENERIC_CLOCKEVENTS */
  21. static inline void tick_init(void) { }
  22. static inline void tick_suspend_local(void) { }
  23. static inline void tick_resume_local(void) { }
  24. static inline void tick_handover_do_timer(void) { }
  25. static inline void tick_cleanup_dead_cpu(int cpu) { }
  26. #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
  27. #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND)
  28. extern void tick_freeze(void);
  29. extern void tick_unfreeze(void);
  30. #else
  31. static inline void tick_freeze(void) { }
  32. static inline void tick_unfreeze(void) { }
  33. #endif
  34. #ifdef CONFIG_TICK_ONESHOT
  35. extern void tick_irq_enter(void);
  36. # ifndef arch_needs_cpu
  37. # define arch_needs_cpu() (0)
  38. # endif
  39. # else
  40. static inline void tick_irq_enter(void) { }
  41. #endif
  42. #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
  43. extern void hotplug_cpu__broadcast_tick_pull(int dead_cpu);
  44. #else
  45. static inline void hotplug_cpu__broadcast_tick_pull(int dead_cpu) { }
  46. #endif
  47. enum tick_broadcast_mode {
  48. TICK_BROADCAST_OFF,
  49. TICK_BROADCAST_ON,
  50. TICK_BROADCAST_FORCE,
  51. };
  52. enum tick_broadcast_state {
  53. TICK_BROADCAST_EXIT,
  54. TICK_BROADCAST_ENTER,
  55. };
  56. #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
  57. extern void tick_broadcast_control(enum tick_broadcast_mode mode);
  58. #else
  59. static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
  60. #endif /* BROADCAST */
  61. #ifdef CONFIG_GENERIC_CLOCKEVENTS
  62. extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
  63. #else
  64. static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
  65. {
  66. return 0;
  67. }
  68. #endif
  69. static inline void tick_broadcast_enable(void)
  70. {
  71. tick_broadcast_control(TICK_BROADCAST_ON);
  72. }
  73. static inline void tick_broadcast_disable(void)
  74. {
  75. tick_broadcast_control(TICK_BROADCAST_OFF);
  76. }
  77. static inline void tick_broadcast_force(void)
  78. {
  79. tick_broadcast_control(TICK_BROADCAST_FORCE);
  80. }
  81. static inline int tick_broadcast_enter(void)
  82. {
  83. return tick_broadcast_oneshot_control(TICK_BROADCAST_ENTER);
  84. }
  85. static inline void tick_broadcast_exit(void)
  86. {
  87. tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT);
  88. }
  89. enum tick_dep_bits {
  90. TICK_DEP_BIT_POSIX_TIMER = 0,
  91. TICK_DEP_BIT_PERF_EVENTS = 1,
  92. TICK_DEP_BIT_SCHED = 2,
  93. TICK_DEP_BIT_CLOCK_UNSTABLE = 3
  94. };
  95. #define TICK_DEP_MASK_NONE 0
  96. #define TICK_DEP_MASK_POSIX_TIMER (1 << TICK_DEP_BIT_POSIX_TIMER)
  97. #define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS)
  98. #define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED)
  99. #define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
  100. #ifdef CONFIG_NO_HZ_COMMON
  101. extern bool tick_nohz_enabled;
  102. extern int tick_nohz_tick_stopped(void);
  103. extern void tick_nohz_idle_enter(void);
  104. extern void tick_nohz_idle_exit(void);
  105. extern void tick_nohz_irq_exit(void);
  106. extern ktime_t tick_nohz_get_sleep_length(void);
  107. extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
  108. extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
  109. #else /* !CONFIG_NO_HZ_COMMON */
  110. #define tick_nohz_enabled (0)
  111. static inline int tick_nohz_tick_stopped(void) { return 0; }
  112. static inline void tick_nohz_idle_enter(void) { }
  113. static inline void tick_nohz_idle_exit(void) { }
  114. static inline ktime_t tick_nohz_get_sleep_length(void)
  115. {
  116. ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
  117. return len;
  118. }
  119. static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
  120. static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
  121. #endif /* !CONFIG_NO_HZ_COMMON */
  122. #ifdef CONFIG_NO_HZ_FULL
  123. extern bool tick_nohz_full_running;
  124. extern cpumask_var_t tick_nohz_full_mask;
  125. extern cpumask_var_t housekeeping_mask;
  126. static inline bool tick_nohz_full_enabled(void)
  127. {
  128. if (!context_tracking_is_enabled())
  129. return false;
  130. return tick_nohz_full_running;
  131. }
  132. static inline bool tick_nohz_full_cpu(int cpu)
  133. {
  134. if (!tick_nohz_full_enabled())
  135. return false;
  136. return cpumask_test_cpu(cpu, tick_nohz_full_mask);
  137. }
  138. static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
  139. {
  140. if (tick_nohz_full_enabled())
  141. cpumask_or(mask, mask, tick_nohz_full_mask);
  142. }
  143. static inline int housekeeping_any_cpu(void)
  144. {
  145. return cpumask_any_and(housekeeping_mask, cpu_online_mask);
  146. }
  147. extern void tick_nohz_dep_set(enum tick_dep_bits bit);
  148. extern void tick_nohz_dep_clear(enum tick_dep_bits bit);
  149. extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit);
  150. extern void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit);
  151. extern void tick_nohz_dep_set_task(struct task_struct *tsk,
  152. enum tick_dep_bits bit);
  153. extern void tick_nohz_dep_clear_task(struct task_struct *tsk,
  154. enum tick_dep_bits bit);
  155. extern void tick_nohz_dep_set_signal(struct signal_struct *signal,
  156. enum tick_dep_bits bit);
  157. extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
  158. enum tick_dep_bits bit);
  159. /*
  160. * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
  161. * on top of static keys.
  162. */
  163. static inline void tick_dep_set(enum tick_dep_bits bit)
  164. {
  165. if (tick_nohz_full_enabled())
  166. tick_nohz_dep_set(bit);
  167. }
  168. static inline void tick_dep_clear(enum tick_dep_bits bit)
  169. {
  170. if (tick_nohz_full_enabled())
  171. tick_nohz_dep_clear(bit);
  172. }
  173. static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit)
  174. {
  175. if (tick_nohz_full_cpu(cpu))
  176. tick_nohz_dep_set_cpu(cpu, bit);
  177. }
  178. static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
  179. {
  180. if (tick_nohz_full_cpu(cpu))
  181. tick_nohz_dep_clear_cpu(cpu, bit);
  182. }
  183. static inline void tick_dep_set_task(struct task_struct *tsk,
  184. enum tick_dep_bits bit)
  185. {
  186. if (tick_nohz_full_enabled())
  187. tick_nohz_dep_set_task(tsk, bit);
  188. }
  189. static inline void tick_dep_clear_task(struct task_struct *tsk,
  190. enum tick_dep_bits bit)
  191. {
  192. if (tick_nohz_full_enabled())
  193. tick_nohz_dep_clear_task(tsk, bit);
  194. }
  195. static inline void tick_dep_set_signal(struct signal_struct *signal,
  196. enum tick_dep_bits bit)
  197. {
  198. if (tick_nohz_full_enabled())
  199. tick_nohz_dep_set_signal(signal, bit);
  200. }
  201. static inline void tick_dep_clear_signal(struct signal_struct *signal,
  202. enum tick_dep_bits bit)
  203. {
  204. if (tick_nohz_full_enabled())
  205. tick_nohz_dep_clear_signal(signal, bit);
  206. }
  207. extern void tick_nohz_full_kick_cpu(int cpu);
  208. extern void __tick_nohz_task_switch(void);
  209. #else
  210. static inline int housekeeping_any_cpu(void)
  211. {
  212. return smp_processor_id();
  213. }
  214. static inline bool tick_nohz_full_enabled(void) { return false; }
  215. static inline bool tick_nohz_full_cpu(int cpu) { return false; }
  216. static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
  217. static inline void tick_dep_set(enum tick_dep_bits bit) { }
  218. static inline void tick_dep_clear(enum tick_dep_bits bit) { }
  219. static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
  220. static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
  221. static inline void tick_dep_set_task(struct task_struct *tsk,
  222. enum tick_dep_bits bit) { }
  223. static inline void tick_dep_clear_task(struct task_struct *tsk,
  224. enum tick_dep_bits bit) { }
  225. static inline void tick_dep_set_signal(struct signal_struct *signal,
  226. enum tick_dep_bits bit) { }
  227. static inline void tick_dep_clear_signal(struct signal_struct *signal,
  228. enum tick_dep_bits bit) { }
  229. static inline void tick_nohz_full_kick_cpu(int cpu) { }
  230. static inline void __tick_nohz_task_switch(void) { }
  231. #endif
  232. static inline const struct cpumask *housekeeping_cpumask(void)
  233. {
  234. #ifdef CONFIG_NO_HZ_FULL
  235. if (tick_nohz_full_enabled())
  236. return housekeeping_mask;
  237. #endif
  238. return cpu_possible_mask;
  239. }
  240. static inline bool is_housekeeping_cpu(int cpu)
  241. {
  242. #ifdef CONFIG_NO_HZ_FULL
  243. if (tick_nohz_full_enabled())
  244. return cpumask_test_cpu(cpu, housekeeping_mask);
  245. #endif
  246. return true;
  247. }
  248. static inline void housekeeping_affine(struct task_struct *t)
  249. {
  250. #ifdef CONFIG_NO_HZ_FULL
  251. if (tick_nohz_full_enabled())
  252. set_cpus_allowed_ptr(t, housekeeping_mask);
  253. #endif
  254. }
  255. static inline void tick_nohz_task_switch(void)
  256. {
  257. if (tick_nohz_full_enabled())
  258. __tick_nohz_task_switch();
  259. }
  260. #endif