vgtod.h 2.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. #ifndef _ASM_X86_VGTOD_H
  2. #define _ASM_X86_VGTOD_H
  3. #include <linux/compiler.h>
  4. #include <linux/clocksource.h>
  5. #ifdef BUILD_VDSO32_64
  6. typedef u64 gtod_long_t;
  7. #else
  8. typedef unsigned long gtod_long_t;
  9. #endif
  10. /*
  11. * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
  12. * so be carefull by modifying this structure.
  13. */
  14. struct vsyscall_gtod_data {
  15. unsigned seq;
  16. int vclock_mode;
  17. cycle_t cycle_last;
  18. cycle_t mask;
  19. u32 mult;
  20. u32 shift;
  21. /* open coded 'struct timespec' */
  22. u64 wall_time_snsec;
  23. gtod_long_t wall_time_sec;
  24. gtod_long_t monotonic_time_sec;
  25. u64 monotonic_time_snsec;
  26. gtod_long_t wall_time_coarse_sec;
  27. gtod_long_t wall_time_coarse_nsec;
  28. gtod_long_t monotonic_time_coarse_sec;
  29. gtod_long_t monotonic_time_coarse_nsec;
  30. int tz_minuteswest;
  31. int tz_dsttime;
  32. };
  33. extern struct vsyscall_gtod_data vsyscall_gtod_data;
  34. extern int vclocks_used;
  35. static inline bool vclock_was_used(int vclock)
  36. {
  37. return READ_ONCE(vclocks_used) & (1 << vclock);
  38. }
  39. static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
  40. {
  41. unsigned ret;
  42. repeat:
  43. ret = ACCESS_ONCE(s->seq);
  44. if (unlikely(ret & 1)) {
  45. cpu_relax();
  46. goto repeat;
  47. }
  48. smp_rmb();
  49. return ret;
  50. }
  51. static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
  52. unsigned start)
  53. {
  54. smp_rmb();
  55. return unlikely(s->seq != start);
  56. }
  57. static inline void gtod_write_begin(struct vsyscall_gtod_data *s)
  58. {
  59. ++s->seq;
  60. smp_wmb();
  61. }
  62. static inline void gtod_write_end(struct vsyscall_gtod_data *s)
  63. {
  64. smp_wmb();
  65. ++s->seq;
  66. }
  67. #ifdef CONFIG_X86_64
  68. #define VGETCPU_CPU_MASK 0xfff
  69. static inline unsigned int __getcpu(void)
  70. {
  71. unsigned int p;
  72. /*
  73. * Load per CPU data from GDT. LSL is faster than RDTSCP and
  74. * works on all CPUs. This is volatile so that it orders
  75. * correctly wrt barrier() and to keep gcc from cleverly
  76. * hoisting it out of the calling function.
  77. */
  78. asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
  79. return p;
  80. }
  81. #endif /* CONFIG_X86_64 */
  82. #endif /* _ASM_X86_VGTOD_H */