main.h 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /*
  2. * Copyright (C) 2016 Red Hat, Inc.
  3. * Author: Michael S. Tsirkin <mst@redhat.com>
  4. * This work is licensed under the terms of the GNU GPL, version 2.
  5. *
  6. * Common macros and functions for ring benchmarking.
  7. */
  8. #ifndef MAIN_H
  9. #define MAIN_H
  10. #include <stdbool.h>
  11. extern bool do_exit;
  12. #if defined(__x86_64__) || defined(__i386__)
  13. #include "x86intrin.h"
  14. static inline void wait_cycles(unsigned long long cycles)
  15. {
  16. unsigned long long t;
  17. t = __rdtsc();
  18. while (__rdtsc() - t < cycles) {}
  19. }
  20. #define VMEXIT_CYCLES 500
  21. #define VMENTRY_CYCLES 500
  22. #else
  23. static inline void wait_cycles(unsigned long long cycles)
  24. {
  25. _Exit(5);
  26. }
  27. #define VMEXIT_CYCLES 0
  28. #define VMENTRY_CYCLES 0
  29. #endif
  30. static inline void vmexit(void)
  31. {
  32. if (!do_exit)
  33. return;
  34. wait_cycles(VMEXIT_CYCLES);
  35. }
  36. static inline void vmentry(void)
  37. {
  38. if (!do_exit)
  39. return;
  40. wait_cycles(VMENTRY_CYCLES);
  41. }
  42. /* implemented by ring */
  43. void alloc_ring(void);
  44. /* guest side */
  45. int add_inbuf(unsigned, void *, void *);
  46. void *get_buf(unsigned *, void **);
  47. void disable_call();
  48. bool used_empty();
  49. bool enable_call();
  50. void kick_available();
  51. /* host side */
  52. void disable_kick();
  53. bool avail_empty();
  54. bool enable_kick();
  55. bool use_buf(unsigned *, void **);
  56. void call_used();
  57. /* implemented by main */
  58. extern bool do_sleep;
  59. void kick(void);
  60. void wait_for_kick(void);
  61. void call(void);
  62. void wait_for_call(void);
  63. extern unsigned ring_size;
  64. /* Compiler barrier - similar to what Linux uses */
  65. #define barrier() asm volatile("" ::: "memory")
  66. /* Is there a portable way to do this? */
  67. #if defined(__x86_64__) || defined(__i386__)
  68. #define cpu_relax() asm ("rep; nop" ::: "memory")
  69. #else
  70. #define cpu_relax() assert(0)
  71. #endif
  72. extern bool do_relax;
  73. static inline void busy_wait(void)
  74. {
  75. if (do_relax)
  76. cpu_relax();
  77. else
  78. /* prevent compiler from removing busy loops */
  79. barrier();
  80. }
  81. /*
  82. * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
  83. * with other __ATOMIC_SEQ_CST calls.
  84. */
  85. #define smp_mb() __sync_synchronize()
  86. /*
  87. * This abuses the atomic builtins for thread fences, and
  88. * adds a compiler barrier.
  89. */
  90. #define smp_release() do { \
  91. barrier(); \
  92. __atomic_thread_fence(__ATOMIC_RELEASE); \
  93. } while (0)
  94. #define smp_acquire() do { \
  95. __atomic_thread_fence(__ATOMIC_ACQUIRE); \
  96. barrier(); \
  97. } while (0)
  98. #endif