flex_proportions.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. /*
  2. * Floating proportions with flexible aging period
  3. *
  4. * Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
  5. */
  6. #ifndef _LINUX_FLEX_PROPORTIONS_H
  7. #define _LINUX_FLEX_PROPORTIONS_H
  8. #include <linux/percpu_counter.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/seqlock.h>
  11. /*
  12. * When maximum proportion of some event type is specified, this is the
  13. * precision with which we allow limitting. Note that this creates an upper
  14. * bound on the number of events per period like
  15. * ULLONG_MAX >> FPROP_FRAC_SHIFT.
  16. */
  17. #define FPROP_FRAC_SHIFT 10
  18. #define FPROP_FRAC_BASE (1UL << FPROP_FRAC_SHIFT)
  19. /*
  20. * ---- Global proportion definitions ----
  21. */
  22. struct fprop_global {
  23. /* Number of events in the current period */
  24. struct percpu_counter events;
  25. /* Current period */
  26. unsigned int period;
  27. /* Synchronization with period transitions */
  28. seqcount_t sequence;
  29. };
  30. int fprop_global_init(struct fprop_global *p);
  31. void fprop_global_destroy(struct fprop_global *p);
  32. bool fprop_new_period(struct fprop_global *p, int periods);
  33. /*
  34. * ---- SINGLE ----
  35. */
  36. struct fprop_local_single {
  37. /* the local events counter */
  38. unsigned long events;
  39. /* Period in which we last updated events */
  40. unsigned int period;
  41. raw_spinlock_t lock; /* Protect period and numerator */
  42. };
  43. #define INIT_FPROP_LOCAL_SINGLE(name) \
  44. { .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
  45. }
  46. int fprop_local_init_single(struct fprop_local_single *pl);
  47. void fprop_local_destroy_single(struct fprop_local_single *pl);
  48. void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl);
  49. void fprop_fraction_single(struct fprop_global *p,
  50. struct fprop_local_single *pl, unsigned long *numerator,
  51. unsigned long *denominator);
  52. static inline
  53. void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
  54. {
  55. unsigned long flags;
  56. local_irq_save(flags);
  57. __fprop_inc_single(p, pl);
  58. local_irq_restore(flags);
  59. }
  60. /*
  61. * ---- PERCPU ----
  62. */
  63. struct fprop_local_percpu {
  64. /* the local events counter */
  65. struct percpu_counter events;
  66. /* Period in which we last updated events */
  67. unsigned int period;
  68. raw_spinlock_t lock; /* Protect period and numerator */
  69. };
  70. int fprop_local_init_percpu(struct fprop_local_percpu *pl);
  71. void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
  72. void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
  73. void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
  74. int max_frac);
  75. void fprop_fraction_percpu(struct fprop_global *p,
  76. struct fprop_local_percpu *pl, unsigned long *numerator,
  77. unsigned long *denominator);
  78. static inline
  79. void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
  80. {
  81. unsigned long flags;
  82. local_irq_save(flags);
  83. __fprop_inc_percpu(p, pl);
  84. local_irq_restore(flags);
  85. }
  86. #endif