percpu-rwsem.h 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283
  1. #ifndef _LINUX_PERCPU_RWSEM_H
  2. #define _LINUX_PERCPU_RWSEM_H
  3. #include <linux/mutex.h>
  4. #include <linux/percpu.h>
  5. #include <linux/rcupdate.h>
  6. #include <linux/delay.h>
  7. struct percpu_rw_semaphore {
  8. unsigned __percpu *counters;
  9. bool locked;
  10. struct mutex mtx;
  11. };
  12. #define light_mb() barrier()
  13. #define heavy_mb() synchronize_sched_expedited()
  14. static inline void percpu_down_read(struct percpu_rw_semaphore *p)
  15. {
  16. rcu_read_lock_sched();
  17. if (unlikely(p->locked)) {
  18. rcu_read_unlock_sched();
  19. mutex_lock(&p->mtx);
  20. this_cpu_inc(*p->counters);
  21. mutex_unlock(&p->mtx);
  22. return;
  23. }
  24. this_cpu_inc(*p->counters);
  25. rcu_read_unlock_sched();
  26. light_mb(); /* A, between read of p->locked and read of data, paired with D */
  27. }
  28. static inline void percpu_up_read(struct percpu_rw_semaphore *p)
  29. {
  30. light_mb(); /* B, between read of the data and write to p->counter, paired with C */
  31. this_cpu_dec(*p->counters);
  32. }
  33. static inline unsigned __percpu_count(unsigned __percpu *counters)
  34. {
  35. unsigned total = 0;
  36. int cpu;
  37. for_each_possible_cpu(cpu)
  38. total += ACCESS_ONCE(*per_cpu_ptr(counters, cpu));
  39. return total;
  40. }
  41. static inline void percpu_down_write(struct percpu_rw_semaphore *p)
  42. {
  43. mutex_lock(&p->mtx);
  44. p->locked = true;
  45. synchronize_sched_expedited(); /* make sure that all readers exit the rcu_read_lock_sched region */
  46. while (__percpu_count(p->counters))
  47. msleep(1);
  48. heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
  49. }
  50. static inline void percpu_up_write(struct percpu_rw_semaphore *p)
  51. {
  52. heavy_mb(); /* D, between write to data and write to p->locked, paired with A */
  53. p->locked = false;
  54. mutex_unlock(&p->mtx);
  55. }
  56. static inline int percpu_init_rwsem(struct percpu_rw_semaphore *p)
  57. {
  58. p->counters = alloc_percpu(unsigned);
  59. if (unlikely(!p->counters))
  60. return -ENOMEM;
  61. p->locked = false;
  62. mutex_init(&p->mtx);
  63. return 0;
  64. }
  65. static inline void percpu_free_rwsem(struct percpu_rw_semaphore *p)
  66. {
  67. free_percpu(p->counters);
  68. p->counters = NULL; /* catch use after free bugs */
  69. }
  70. #endif