res_counter.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. /*
  2. * resource cgroups
  3. *
  4. * Copyright 2007 OpenVZ SWsoft Inc
  5. *
  6. * Author: Pavel Emelianov <xemul@openvz.org>
  7. *
  8. */
  9. #include <linux/types.h>
  10. #include <linux/parser.h>
  11. #include <linux/fs.h>
  12. #include <linux/res_counter.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/mm.h>
  15. void res_counter_init(struct res_counter *counter, struct res_counter *parent)
  16. {
  17. spin_lock_init(&counter->lock);
  18. counter->limit = RES_COUNTER_MAX;
  19. counter->soft_limit = RES_COUNTER_MAX;
  20. counter->parent = parent;
  21. }
  22. static u64 res_counter_uncharge_locked(struct res_counter *counter,
  23. unsigned long val)
  24. {
  25. if (WARN_ON(counter->usage < val))
  26. val = counter->usage;
  27. counter->usage -= val;
  28. return counter->usage;
  29. }
  30. static int res_counter_charge_locked(struct res_counter *counter,
  31. unsigned long val, bool force)
  32. {
  33. int ret = 0;
  34. if (counter->usage + val > counter->limit) {
  35. counter->failcnt++;
  36. ret = -ENOMEM;
  37. if (!force)
  38. return ret;
  39. }
  40. counter->usage += val;
  41. if (counter->usage > counter->max_usage)
  42. counter->max_usage = counter->usage;
  43. return ret;
  44. }
  45. static int __res_counter_charge(struct res_counter *counter, unsigned long val,
  46. struct res_counter **limit_fail_at, bool force)
  47. {
  48. int ret, r;
  49. unsigned long flags;
  50. struct res_counter *c, *u;
  51. r = ret = 0;
  52. *limit_fail_at = NULL;
  53. local_irq_save(flags);
  54. for (c = counter; c != NULL; c = c->parent) {
  55. spin_lock(&c->lock);
  56. r = res_counter_charge_locked(c, val, force);
  57. spin_unlock(&c->lock);
  58. if (r < 0 && !ret) {
  59. ret = r;
  60. *limit_fail_at = c;
  61. if (!force)
  62. break;
  63. }
  64. }
  65. if (ret < 0 && !force) {
  66. for (u = counter; u != c; u = u->parent) {
  67. spin_lock(&u->lock);
  68. res_counter_uncharge_locked(u, val);
  69. spin_unlock(&u->lock);
  70. }
  71. }
  72. local_irq_restore(flags);
  73. return ret;
  74. }
  75. int res_counter_charge(struct res_counter *counter, unsigned long val,
  76. struct res_counter **limit_fail_at)
  77. {
  78. return __res_counter_charge(counter, val, limit_fail_at, false);
  79. }
  80. int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
  81. struct res_counter **limit_fail_at)
  82. {
  83. return __res_counter_charge(counter, val, limit_fail_at, true);
  84. }
  85. u64 res_counter_uncharge_until(struct res_counter *counter,
  86. struct res_counter *top,
  87. unsigned long val)
  88. {
  89. unsigned long flags;
  90. struct res_counter *c;
  91. u64 ret = 0;
  92. local_irq_save(flags);
  93. for (c = counter; c != top; c = c->parent) {
  94. u64 r;
  95. spin_lock(&c->lock);
  96. r = res_counter_uncharge_locked(c, val);
  97. if (c == counter)
  98. ret = r;
  99. spin_unlock(&c->lock);
  100. }
  101. local_irq_restore(flags);
  102. return ret;
  103. }
  104. u64 res_counter_uncharge(struct res_counter *counter, unsigned long val)
  105. {
  106. return res_counter_uncharge_until(counter, NULL, val);
  107. }
  108. static inline unsigned long long *
  109. res_counter_member(struct res_counter *counter, int member)
  110. {
  111. switch (member) {
  112. case RES_USAGE:
  113. return &counter->usage;
  114. case RES_MAX_USAGE:
  115. return &counter->max_usage;
  116. case RES_LIMIT:
  117. return &counter->limit;
  118. case RES_FAILCNT:
  119. return &counter->failcnt;
  120. case RES_SOFT_LIMIT:
  121. return &counter->soft_limit;
  122. };
  123. BUG();
  124. return NULL;
  125. }
  126. ssize_t res_counter_read(struct res_counter *counter, int member,
  127. const char __user *userbuf, size_t nbytes, loff_t *pos,
  128. int (*read_strategy)(unsigned long long val, char *st_buf))
  129. {
  130. unsigned long long *val;
  131. char buf[64], *s;
  132. s = buf;
  133. val = res_counter_member(counter, member);
  134. if (read_strategy)
  135. s += read_strategy(*val, s);
  136. else
  137. s += sprintf(s, "%llu\n", *val);
  138. return simple_read_from_buffer((void __user *)userbuf, nbytes,
  139. pos, buf, s - buf);
  140. }
  141. #if BITS_PER_LONG == 32
  142. u64 res_counter_read_u64(struct res_counter *counter, int member)
  143. {
  144. unsigned long flags;
  145. u64 ret;
  146. spin_lock_irqsave(&counter->lock, flags);
  147. ret = *res_counter_member(counter, member);
  148. spin_unlock_irqrestore(&counter->lock, flags);
  149. return ret;
  150. }
  151. #else
  152. u64 res_counter_read_u64(struct res_counter *counter, int member)
  153. {
  154. return *res_counter_member(counter, member);
  155. }
  156. #endif
  157. int res_counter_memparse_write_strategy(const char *buf,
  158. unsigned long long *resp)
  159. {
  160. char *end;
  161. unsigned long long res;
  162. /* return RES_COUNTER_MAX(unlimited) if "-1" is specified */
  163. if (*buf == '-') {
  164. res = simple_strtoull(buf + 1, &end, 10);
  165. if (res != 1 || *end != '\0')
  166. return -EINVAL;
  167. *resp = RES_COUNTER_MAX;
  168. return 0;
  169. }
  170. res = memparse(buf, &end);
  171. if (*end != '\0')
  172. return -EINVAL;
  173. if (PAGE_ALIGN(res) >= res)
  174. res = PAGE_ALIGN(res);
  175. else
  176. res = RES_COUNTER_MAX;
  177. *resp = res;
  178. return 0;
  179. }