flex_proportions.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. /*
  2. * Floating proportions with flexible aging period
  3. *
  4. * Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
  5. */
  6. #ifndef _LINUX_FLEX_PROPORTIONS_H
  7. #define _LINUX_FLEX_PROPORTIONS_H
  8. #include <linux/percpu_counter.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/seqlock.h>
  11. #include <linux/gfp.h>
  12. /*
  13. * When maximum proportion of some event type is specified, this is the
  14. * precision with which we allow limitting. Note that this creates an upper
  15. * bound on the number of events per period like
  16. * ULLONG_MAX >> FPROP_FRAC_SHIFT.
  17. */
  18. #define FPROP_FRAC_SHIFT 10
  19. #define FPROP_FRAC_BASE (1UL << FPROP_FRAC_SHIFT)
  20. /*
  21. * ---- Global proportion definitions ----
  22. */
  23. struct fprop_global {
  24. /* Number of events in the current period */
  25. struct percpu_counter events;
  26. /* Current period */
  27. unsigned int period;
  28. /* Synchronization with period transitions */
  29. seqcount_t sequence;
  30. };
  31. int fprop_global_init(struct fprop_global *p, gfp_t gfp);
  32. void fprop_global_destroy(struct fprop_global *p);
  33. bool fprop_new_period(struct fprop_global *p, int periods);
  34. /*
  35. * ---- SINGLE ----
  36. */
  37. struct fprop_local_single {
  38. /* the local events counter */
  39. unsigned long events;
  40. /* Period in which we last updated events */
  41. unsigned int period;
  42. raw_spinlock_t lock; /* Protect period and numerator */
  43. };
  44. #define INIT_FPROP_LOCAL_SINGLE(name) \
  45. { .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
  46. }
  47. int fprop_local_init_single(struct fprop_local_single *pl);
  48. void fprop_local_destroy_single(struct fprop_local_single *pl);
  49. void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl);
  50. void fprop_fraction_single(struct fprop_global *p,
  51. struct fprop_local_single *pl, unsigned long *numerator,
  52. unsigned long *denominator);
  53. static inline
  54. void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
  55. {
  56. unsigned long flags;
  57. local_irq_save(flags);
  58. __fprop_inc_single(p, pl);
  59. local_irq_restore(flags);
  60. }
  61. /*
  62. * ---- PERCPU ----
  63. */
  64. struct fprop_local_percpu {
  65. /* the local events counter */
  66. struct percpu_counter events;
  67. /* Period in which we last updated events */
  68. unsigned int period;
  69. raw_spinlock_t lock; /* Protect period and numerator */
  70. };
  71. int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
  72. void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
  73. void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
  74. void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
  75. int max_frac);
  76. void fprop_fraction_percpu(struct fprop_global *p,
  77. struct fprop_local_percpu *pl, unsigned long *numerator,
  78. unsigned long *denominator);
  79. static inline
  80. void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
  81. {
  82. unsigned long flags;
  83. local_irq_save(flags);
  84. __fprop_inc_percpu(p, pl);
  85. local_irq_restore(flags);
  86. }
  87. #endif