main.h 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /*
  2. * Copyright (C) 2016 Red Hat, Inc.
  3. * Author: Michael S. Tsirkin <mst@redhat.com>
  4. * This work is licensed under the terms of the GNU GPL, version 2.
  5. *
  6. * Common macros and functions for ring benchmarking.
  7. */
  8. #ifndef MAIN_H
  9. #define MAIN_H
  10. #include <stdbool.h>
  11. extern bool do_exit;
  12. #if defined(__x86_64__) || defined(__i386__)
  13. #include "x86intrin.h"
  14. static inline void wait_cycles(unsigned long long cycles)
  15. {
  16. unsigned long long t;
  17. t = __rdtsc();
  18. while (__rdtsc() - t < cycles) {}
  19. }
  20. #define VMEXIT_CYCLES 500
  21. #define VMENTRY_CYCLES 500
  22. #elif defined(__s390x__)
  23. static inline void wait_cycles(unsigned long long cycles)
  24. {
  25. asm volatile("0: brctg %0,0b" : : "d" (cycles));
  26. }
  27. /* tweak me */
  28. #define VMEXIT_CYCLES 200
  29. #define VMENTRY_CYCLES 200
  30. #else
  31. static inline void wait_cycles(unsigned long long cycles)
  32. {
  33. _Exit(5);
  34. }
  35. #define VMEXIT_CYCLES 0
  36. #define VMENTRY_CYCLES 0
  37. #endif
  38. static inline void vmexit(void)
  39. {
  40. if (!do_exit)
  41. return;
  42. wait_cycles(VMEXIT_CYCLES);
  43. }
  44. static inline void vmentry(void)
  45. {
  46. if (!do_exit)
  47. return;
  48. wait_cycles(VMENTRY_CYCLES);
  49. }
  50. /* implemented by ring */
  51. void alloc_ring(void);
  52. /* guest side */
  53. int add_inbuf(unsigned, void *, void *);
  54. void *get_buf(unsigned *, void **);
  55. void disable_call();
  56. bool used_empty();
  57. bool enable_call();
  58. void kick_available();
  59. /* host side */
  60. void disable_kick();
  61. bool avail_empty();
  62. bool enable_kick();
  63. bool use_buf(unsigned *, void **);
  64. void call_used();
  65. /* implemented by main */
  66. extern bool do_sleep;
  67. void kick(void);
  68. void wait_for_kick(void);
  69. void call(void);
  70. void wait_for_call(void);
  71. extern unsigned ring_size;
  72. /* Compiler barrier - similar to what Linux uses */
  73. #define barrier() asm volatile("" ::: "memory")
  74. /* Is there a portable way to do this? */
  75. #if defined(__x86_64__) || defined(__i386__)
  76. #define cpu_relax() asm ("rep; nop" ::: "memory")
  77. #elif defined(__s390x__)
  78. #define cpu_relax() barrier()
  79. #else
  80. #define cpu_relax() assert(0)
  81. #endif
  82. extern bool do_relax;
  83. static inline void busy_wait(void)
  84. {
  85. if (do_relax)
  86. cpu_relax();
  87. else
  88. /* prevent compiler from removing busy loops */
  89. barrier();
  90. }
  91. /*
  92. * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
  93. * with other __ATOMIC_SEQ_CST calls.
  94. */
  95. #define smp_mb() __sync_synchronize()
  96. /*
  97. * This abuses the atomic builtins for thread fences, and
  98. * adds a compiler barrier.
  99. */
  100. #define smp_release() do { \
  101. barrier(); \
  102. __atomic_thread_fence(__ATOMIC_RELEASE); \
  103. } while (0)
  104. #define smp_acquire() do { \
  105. __atomic_thread_fence(__ATOMIC_ACQUIRE); \
  106. barrier(); \
  107. } while (0)
  108. #endif