test_memcontrol.c 20 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #define _GNU_SOURCE
  3. #include <linux/limits.h>
  4. #include <fcntl.h>
  5. #include <stdio.h>
  6. #include <stdlib.h>
  7. #include <string.h>
  8. #include <sys/stat.h>
  9. #include <sys/types.h>
  10. #include <unistd.h>
  11. #include <sys/socket.h>
  12. #include <sys/wait.h>
  13. #include <arpa/inet.h>
  14. #include <netinet/in.h>
  15. #include <netdb.h>
  16. #include <errno.h>
  17. #include "../kselftest.h"
  18. #include "cgroup_util.h"
  19. /*
  20. * This test creates two nested cgroups with and without enabling
  21. * the memory controller.
  22. */
  23. static int test_memcg_subtree_control(const char *root)
  24. {
  25. char *parent, *child, *parent2, *child2;
  26. int ret = KSFT_FAIL;
  27. char buf[PAGE_SIZE];
  28. /* Create two nested cgroups with the memory controller enabled */
  29. parent = cg_name(root, "memcg_test_0");
  30. child = cg_name(root, "memcg_test_0/memcg_test_1");
  31. if (!parent || !child)
  32. goto cleanup;
  33. if (cg_create(parent))
  34. goto cleanup;
  35. if (cg_write(parent, "cgroup.subtree_control", "+memory"))
  36. goto cleanup;
  37. if (cg_create(child))
  38. goto cleanup;
  39. if (cg_read_strstr(child, "cgroup.controllers", "memory"))
  40. goto cleanup;
  41. /* Create two nested cgroups without enabling memory controller */
  42. parent2 = cg_name(root, "memcg_test_1");
  43. child2 = cg_name(root, "memcg_test_1/memcg_test_1");
  44. if (!parent2 || !child2)
  45. goto cleanup;
  46. if (cg_create(parent2))
  47. goto cleanup;
  48. if (cg_create(child2))
  49. goto cleanup;
  50. if (cg_read(child2, "cgroup.controllers", buf, sizeof(buf)))
  51. goto cleanup;
  52. if (!cg_read_strstr(child2, "cgroup.controllers", "memory"))
  53. goto cleanup;
  54. ret = KSFT_PASS;
  55. cleanup:
  56. cg_destroy(child);
  57. cg_destroy(parent);
  58. free(parent);
  59. free(child);
  60. cg_destroy(child2);
  61. cg_destroy(parent2);
  62. free(parent2);
  63. free(child2);
  64. return ret;
  65. }
  66. static int alloc_anon_50M_check(const char *cgroup, void *arg)
  67. {
  68. size_t size = MB(50);
  69. char *buf, *ptr;
  70. long anon, current;
  71. int ret = -1;
  72. buf = malloc(size);
  73. for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE)
  74. *ptr = 0;
  75. current = cg_read_long(cgroup, "memory.current");
  76. if (current < size)
  77. goto cleanup;
  78. if (!values_close(size, current, 3))
  79. goto cleanup;
  80. anon = cg_read_key_long(cgroup, "memory.stat", "anon ");
  81. if (anon < 0)
  82. goto cleanup;
  83. if (!values_close(anon, current, 3))
  84. goto cleanup;
  85. ret = 0;
  86. cleanup:
  87. free(buf);
  88. return ret;
  89. }
  90. static int alloc_pagecache_50M_check(const char *cgroup, void *arg)
  91. {
  92. size_t size = MB(50);
  93. int ret = -1;
  94. long current, file;
  95. int fd;
  96. fd = get_temp_fd();
  97. if (fd < 0)
  98. return -1;
  99. if (alloc_pagecache(fd, size))
  100. goto cleanup;
  101. current = cg_read_long(cgroup, "memory.current");
  102. if (current < size)
  103. goto cleanup;
  104. file = cg_read_key_long(cgroup, "memory.stat", "file ");
  105. if (file < 0)
  106. goto cleanup;
  107. if (!values_close(file, current, 10))
  108. goto cleanup;
  109. ret = 0;
  110. cleanup:
  111. close(fd);
  112. return ret;
  113. }
  114. /*
  115. * This test create a memory cgroup, allocates
  116. * some anonymous memory and some pagecache
  117. * and check memory.current and some memory.stat values.
  118. */
  119. static int test_memcg_current(const char *root)
  120. {
  121. int ret = KSFT_FAIL;
  122. long current;
  123. char *memcg;
  124. memcg = cg_name(root, "memcg_test");
  125. if (!memcg)
  126. goto cleanup;
  127. if (cg_create(memcg))
  128. goto cleanup;
  129. current = cg_read_long(memcg, "memory.current");
  130. if (current != 0)
  131. goto cleanup;
  132. if (cg_run(memcg, alloc_anon_50M_check, NULL))
  133. goto cleanup;
  134. if (cg_run(memcg, alloc_pagecache_50M_check, NULL))
  135. goto cleanup;
  136. ret = KSFT_PASS;
  137. cleanup:
  138. cg_destroy(memcg);
  139. free(memcg);
  140. return ret;
  141. }
  142. static int alloc_pagecache_50M(const char *cgroup, void *arg)
  143. {
  144. int fd = (long)arg;
  145. return alloc_pagecache(fd, MB(50));
  146. }
  147. static int alloc_pagecache_50M_noexit(const char *cgroup, void *arg)
  148. {
  149. int fd = (long)arg;
  150. int ppid = getppid();
  151. if (alloc_pagecache(fd, MB(50)))
  152. return -1;
  153. while (getppid() == ppid)
  154. sleep(1);
  155. return 0;
  156. }
  157. /*
  158. * First, this test creates the following hierarchy:
  159. * A memory.min = 50M, memory.max = 200M
  160. * A/B memory.min = 50M, memory.current = 50M
  161. * A/B/C memory.min = 75M, memory.current = 50M
  162. * A/B/D memory.min = 25M, memory.current = 50M
  163. * A/B/E memory.min = 500M, memory.current = 0
  164. * A/B/F memory.min = 0, memory.current = 50M
  165. *
  166. * Usages are pagecache, but the test keeps a running
  167. * process in every leaf cgroup.
  168. * Then it creates A/G and creates a significant
  169. * memory pressure in it.
  170. *
  171. * A/B memory.current ~= 50M
  172. * A/B/C memory.current ~= 33M
  173. * A/B/D memory.current ~= 17M
  174. * A/B/E memory.current ~= 0
  175. *
  176. * After that it tries to allocate more than there is
  177. * unprotected memory in A available, and checks
  178. * checks that memory.min protects pagecache even
  179. * in this case.
  180. */
  181. static int test_memcg_min(const char *root)
  182. {
  183. int ret = KSFT_FAIL;
  184. char *parent[3] = {NULL};
  185. char *children[4] = {NULL};
  186. long c[4];
  187. int i, attempts;
  188. int fd;
  189. fd = get_temp_fd();
  190. if (fd < 0)
  191. goto cleanup;
  192. parent[0] = cg_name(root, "memcg_test_0");
  193. if (!parent[0])
  194. goto cleanup;
  195. parent[1] = cg_name(parent[0], "memcg_test_1");
  196. if (!parent[1])
  197. goto cleanup;
  198. parent[2] = cg_name(parent[0], "memcg_test_2");
  199. if (!parent[2])
  200. goto cleanup;
  201. if (cg_create(parent[0]))
  202. goto cleanup;
  203. if (cg_read_long(parent[0], "memory.min")) {
  204. ret = KSFT_SKIP;
  205. goto cleanup;
  206. }
  207. if (cg_write(parent[0], "cgroup.subtree_control", "+memory"))
  208. goto cleanup;
  209. if (cg_write(parent[0], "memory.max", "200M"))
  210. goto cleanup;
  211. if (cg_write(parent[0], "memory.swap.max", "0"))
  212. goto cleanup;
  213. if (cg_create(parent[1]))
  214. goto cleanup;
  215. if (cg_write(parent[1], "cgroup.subtree_control", "+memory"))
  216. goto cleanup;
  217. if (cg_create(parent[2]))
  218. goto cleanup;
  219. for (i = 0; i < ARRAY_SIZE(children); i++) {
  220. children[i] = cg_name_indexed(parent[1], "child_memcg", i);
  221. if (!children[i])
  222. goto cleanup;
  223. if (cg_create(children[i]))
  224. goto cleanup;
  225. if (i == 2)
  226. continue;
  227. cg_run_nowait(children[i], alloc_pagecache_50M_noexit,
  228. (void *)(long)fd);
  229. }
  230. if (cg_write(parent[0], "memory.min", "50M"))
  231. goto cleanup;
  232. if (cg_write(parent[1], "memory.min", "50M"))
  233. goto cleanup;
  234. if (cg_write(children[0], "memory.min", "75M"))
  235. goto cleanup;
  236. if (cg_write(children[1], "memory.min", "25M"))
  237. goto cleanup;
  238. if (cg_write(children[2], "memory.min", "500M"))
  239. goto cleanup;
  240. if (cg_write(children[3], "memory.min", "0"))
  241. goto cleanup;
  242. attempts = 0;
  243. while (!values_close(cg_read_long(parent[1], "memory.current"),
  244. MB(150), 3)) {
  245. if (attempts++ > 5)
  246. break;
  247. sleep(1);
  248. }
  249. if (cg_run(parent[2], alloc_anon, (void *)MB(148)))
  250. goto cleanup;
  251. if (!values_close(cg_read_long(parent[1], "memory.current"), MB(50), 3))
  252. goto cleanup;
  253. for (i = 0; i < ARRAY_SIZE(children); i++)
  254. c[i] = cg_read_long(children[i], "memory.current");
  255. if (!values_close(c[0], MB(33), 10))
  256. goto cleanup;
  257. if (!values_close(c[1], MB(17), 10))
  258. goto cleanup;
  259. if (!values_close(c[2], 0, 1))
  260. goto cleanup;
  261. if (!cg_run(parent[2], alloc_anon, (void *)MB(170)))
  262. goto cleanup;
  263. if (!values_close(cg_read_long(parent[1], "memory.current"), MB(50), 3))
  264. goto cleanup;
  265. ret = KSFT_PASS;
  266. cleanup:
  267. for (i = ARRAY_SIZE(children) - 1; i >= 0; i--) {
  268. if (!children[i])
  269. continue;
  270. cg_destroy(children[i]);
  271. free(children[i]);
  272. }
  273. for (i = ARRAY_SIZE(parent) - 1; i >= 0; i--) {
  274. if (!parent[i])
  275. continue;
  276. cg_destroy(parent[i]);
  277. free(parent[i]);
  278. }
  279. close(fd);
  280. return ret;
  281. }
  282. /*
  283. * First, this test creates the following hierarchy:
  284. * A memory.low = 50M, memory.max = 200M
  285. * A/B memory.low = 50M, memory.current = 50M
  286. * A/B/C memory.low = 75M, memory.current = 50M
  287. * A/B/D memory.low = 25M, memory.current = 50M
  288. * A/B/E memory.low = 500M, memory.current = 0
  289. * A/B/F memory.low = 0, memory.current = 50M
  290. *
  291. * Usages are pagecache.
  292. * Then it creates A/G an creates a significant
  293. * memory pressure in it.
  294. *
  295. * Then it checks actual memory usages and expects that:
  296. * A/B memory.current ~= 50M
  297. * A/B/ memory.current ~= 33M
  298. * A/B/D memory.current ~= 17M
  299. * A/B/E memory.current ~= 0
  300. *
  301. * After that it tries to allocate more than there is
  302. * unprotected memory in A available,
  303. * and checks low and oom events in memory.events.
  304. */
  305. static int test_memcg_low(const char *root)
  306. {
  307. int ret = KSFT_FAIL;
  308. char *parent[3] = {NULL};
  309. char *children[4] = {NULL};
  310. long low, oom;
  311. long c[4];
  312. int i;
  313. int fd;
  314. fd = get_temp_fd();
  315. if (fd < 0)
  316. goto cleanup;
  317. parent[0] = cg_name(root, "memcg_test_0");
  318. if (!parent[0])
  319. goto cleanup;
  320. parent[1] = cg_name(parent[0], "memcg_test_1");
  321. if (!parent[1])
  322. goto cleanup;
  323. parent[2] = cg_name(parent[0], "memcg_test_2");
  324. if (!parent[2])
  325. goto cleanup;
  326. if (cg_create(parent[0]))
  327. goto cleanup;
  328. if (cg_read_long(parent[0], "memory.low"))
  329. goto cleanup;
  330. if (cg_write(parent[0], "cgroup.subtree_control", "+memory"))
  331. goto cleanup;
  332. if (cg_write(parent[0], "memory.max", "200M"))
  333. goto cleanup;
  334. if (cg_write(parent[0], "memory.swap.max", "0"))
  335. goto cleanup;
  336. if (cg_create(parent[1]))
  337. goto cleanup;
  338. if (cg_write(parent[1], "cgroup.subtree_control", "+memory"))
  339. goto cleanup;
  340. if (cg_create(parent[2]))
  341. goto cleanup;
  342. for (i = 0; i < ARRAY_SIZE(children); i++) {
  343. children[i] = cg_name_indexed(parent[1], "child_memcg", i);
  344. if (!children[i])
  345. goto cleanup;
  346. if (cg_create(children[i]))
  347. goto cleanup;
  348. if (i == 2)
  349. continue;
  350. if (cg_run(children[i], alloc_pagecache_50M, (void *)(long)fd))
  351. goto cleanup;
  352. }
  353. if (cg_write(parent[0], "memory.low", "50M"))
  354. goto cleanup;
  355. if (cg_write(parent[1], "memory.low", "50M"))
  356. goto cleanup;
  357. if (cg_write(children[0], "memory.low", "75M"))
  358. goto cleanup;
  359. if (cg_write(children[1], "memory.low", "25M"))
  360. goto cleanup;
  361. if (cg_write(children[2], "memory.low", "500M"))
  362. goto cleanup;
  363. if (cg_write(children[3], "memory.low", "0"))
  364. goto cleanup;
  365. if (cg_run(parent[2], alloc_anon, (void *)MB(148)))
  366. goto cleanup;
  367. if (!values_close(cg_read_long(parent[1], "memory.current"), MB(50), 3))
  368. goto cleanup;
  369. for (i = 0; i < ARRAY_SIZE(children); i++)
  370. c[i] = cg_read_long(children[i], "memory.current");
  371. if (!values_close(c[0], MB(33), 10))
  372. goto cleanup;
  373. if (!values_close(c[1], MB(17), 10))
  374. goto cleanup;
  375. if (!values_close(c[2], 0, 1))
  376. goto cleanup;
  377. if (cg_run(parent[2], alloc_anon, (void *)MB(166))) {
  378. fprintf(stderr,
  379. "memory.low prevents from allocating anon memory\n");
  380. goto cleanup;
  381. }
  382. for (i = 0; i < ARRAY_SIZE(children); i++) {
  383. oom = cg_read_key_long(children[i], "memory.events", "oom ");
  384. low = cg_read_key_long(children[i], "memory.events", "low ");
  385. if (oom)
  386. goto cleanup;
  387. if (i < 2 && low <= 0)
  388. goto cleanup;
  389. if (i >= 2 && low)
  390. goto cleanup;
  391. }
  392. ret = KSFT_PASS;
  393. cleanup:
  394. for (i = ARRAY_SIZE(children) - 1; i >= 0; i--) {
  395. if (!children[i])
  396. continue;
  397. cg_destroy(children[i]);
  398. free(children[i]);
  399. }
  400. for (i = ARRAY_SIZE(parent) - 1; i >= 0; i--) {
  401. if (!parent[i])
  402. continue;
  403. cg_destroy(parent[i]);
  404. free(parent[i]);
  405. }
  406. close(fd);
  407. return ret;
  408. }
  409. static int alloc_pagecache_max_30M(const char *cgroup, void *arg)
  410. {
  411. size_t size = MB(50);
  412. int ret = -1;
  413. long current;
  414. int fd;
  415. fd = get_temp_fd();
  416. if (fd < 0)
  417. return -1;
  418. if (alloc_pagecache(fd, size))
  419. goto cleanup;
  420. current = cg_read_long(cgroup, "memory.current");
  421. if (current <= MB(29) || current > MB(30))
  422. goto cleanup;
  423. ret = 0;
  424. cleanup:
  425. close(fd);
  426. return ret;
  427. }
  428. /*
  429. * This test checks that memory.high limits the amount of
  430. * memory which can be consumed by either anonymous memory
  431. * or pagecache.
  432. */
  433. static int test_memcg_high(const char *root)
  434. {
  435. int ret = KSFT_FAIL;
  436. char *memcg;
  437. long high;
  438. memcg = cg_name(root, "memcg_test");
  439. if (!memcg)
  440. goto cleanup;
  441. if (cg_create(memcg))
  442. goto cleanup;
  443. if (cg_read_strcmp(memcg, "memory.high", "max\n"))
  444. goto cleanup;
  445. if (cg_write(memcg, "memory.swap.max", "0"))
  446. goto cleanup;
  447. if (cg_write(memcg, "memory.high", "30M"))
  448. goto cleanup;
  449. if (cg_run(memcg, alloc_anon, (void *)MB(100)))
  450. goto cleanup;
  451. if (!cg_run(memcg, alloc_pagecache_50M_check, NULL))
  452. goto cleanup;
  453. if (cg_run(memcg, alloc_pagecache_max_30M, NULL))
  454. goto cleanup;
  455. high = cg_read_key_long(memcg, "memory.events", "high ");
  456. if (high <= 0)
  457. goto cleanup;
  458. ret = KSFT_PASS;
  459. cleanup:
  460. cg_destroy(memcg);
  461. free(memcg);
  462. return ret;
  463. }
  464. /*
  465. * This test checks that memory.max limits the amount of
  466. * memory which can be consumed by either anonymous memory
  467. * or pagecache.
  468. */
  469. static int test_memcg_max(const char *root)
  470. {
  471. int ret = KSFT_FAIL;
  472. char *memcg;
  473. long current, max;
  474. memcg = cg_name(root, "memcg_test");
  475. if (!memcg)
  476. goto cleanup;
  477. if (cg_create(memcg))
  478. goto cleanup;
  479. if (cg_read_strcmp(memcg, "memory.max", "max\n"))
  480. goto cleanup;
  481. if (cg_write(memcg, "memory.swap.max", "0"))
  482. goto cleanup;
  483. if (cg_write(memcg, "memory.max", "30M"))
  484. goto cleanup;
  485. /* Should be killed by OOM killer */
  486. if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
  487. goto cleanup;
  488. if (cg_run(memcg, alloc_pagecache_max_30M, NULL))
  489. goto cleanup;
  490. current = cg_read_long(memcg, "memory.current");
  491. if (current > MB(30) || !current)
  492. goto cleanup;
  493. max = cg_read_key_long(memcg, "memory.events", "max ");
  494. if (max <= 0)
  495. goto cleanup;
  496. ret = KSFT_PASS;
  497. cleanup:
  498. cg_destroy(memcg);
  499. free(memcg);
  500. return ret;
  501. }
  502. static int alloc_anon_50M_check_swap(const char *cgroup, void *arg)
  503. {
  504. long mem_max = (long)arg;
  505. size_t size = MB(50);
  506. char *buf, *ptr;
  507. long mem_current, swap_current;
  508. int ret = -1;
  509. buf = malloc(size);
  510. for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE)
  511. *ptr = 0;
  512. mem_current = cg_read_long(cgroup, "memory.current");
  513. if (!mem_current || !values_close(mem_current, mem_max, 3))
  514. goto cleanup;
  515. swap_current = cg_read_long(cgroup, "memory.swap.current");
  516. if (!swap_current ||
  517. !values_close(mem_current + swap_current, size, 3))
  518. goto cleanup;
  519. ret = 0;
  520. cleanup:
  521. free(buf);
  522. return ret;
  523. }
  524. /*
  525. * This test checks that memory.swap.max limits the amount of
  526. * anonymous memory which can be swapped out.
  527. */
  528. static int test_memcg_swap_max(const char *root)
  529. {
  530. int ret = KSFT_FAIL;
  531. char *memcg;
  532. long max;
  533. if (!is_swap_enabled())
  534. return KSFT_SKIP;
  535. memcg = cg_name(root, "memcg_test");
  536. if (!memcg)
  537. goto cleanup;
  538. if (cg_create(memcg))
  539. goto cleanup;
  540. if (cg_read_long(memcg, "memory.swap.current")) {
  541. ret = KSFT_SKIP;
  542. goto cleanup;
  543. }
  544. if (cg_read_strcmp(memcg, "memory.max", "max\n"))
  545. goto cleanup;
  546. if (cg_read_strcmp(memcg, "memory.swap.max", "max\n"))
  547. goto cleanup;
  548. if (cg_write(memcg, "memory.swap.max", "30M"))
  549. goto cleanup;
  550. if (cg_write(memcg, "memory.max", "30M"))
  551. goto cleanup;
  552. /* Should be killed by OOM killer */
  553. if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
  554. goto cleanup;
  555. if (cg_read_key_long(memcg, "memory.events", "oom ") != 1)
  556. goto cleanup;
  557. if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 1)
  558. goto cleanup;
  559. if (cg_run(memcg, alloc_anon_50M_check_swap, (void *)MB(30)))
  560. goto cleanup;
  561. max = cg_read_key_long(memcg, "memory.events", "max ");
  562. if (max <= 0)
  563. goto cleanup;
  564. ret = KSFT_PASS;
  565. cleanup:
  566. cg_destroy(memcg);
  567. free(memcg);
  568. return ret;
  569. }
  570. /*
  571. * This test disables swapping and tries to allocate anonymous memory
  572. * up to OOM. Then it checks for oom and oom_kill events in
  573. * memory.events.
  574. */
  575. static int test_memcg_oom_events(const char *root)
  576. {
  577. int ret = KSFT_FAIL;
  578. char *memcg;
  579. memcg = cg_name(root, "memcg_test");
  580. if (!memcg)
  581. goto cleanup;
  582. if (cg_create(memcg))
  583. goto cleanup;
  584. if (cg_write(memcg, "memory.max", "30M"))
  585. goto cleanup;
  586. if (cg_write(memcg, "memory.swap.max", "0"))
  587. goto cleanup;
  588. if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
  589. goto cleanup;
  590. if (cg_read_strcmp(memcg, "cgroup.procs", ""))
  591. goto cleanup;
  592. if (cg_read_key_long(memcg, "memory.events", "oom ") != 1)
  593. goto cleanup;
  594. if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 1)
  595. goto cleanup;
  596. ret = KSFT_PASS;
  597. cleanup:
  598. cg_destroy(memcg);
  599. free(memcg);
  600. return ret;
  601. }
  602. struct tcp_server_args {
  603. unsigned short port;
  604. int ctl[2];
  605. };
  606. static int tcp_server(const char *cgroup, void *arg)
  607. {
  608. struct tcp_server_args *srv_args = arg;
  609. struct sockaddr_in6 saddr = { 0 };
  610. socklen_t slen = sizeof(saddr);
  611. int sk, client_sk, ctl_fd, yes = 1, ret = -1;
  612. close(srv_args->ctl[0]);
  613. ctl_fd = srv_args->ctl[1];
  614. saddr.sin6_family = AF_INET6;
  615. saddr.sin6_addr = in6addr_any;
  616. saddr.sin6_port = htons(srv_args->port);
  617. sk = socket(AF_INET6, SOCK_STREAM, 0);
  618. if (sk < 0)
  619. return ret;
  620. if (setsockopt(sk, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) < 0)
  621. goto cleanup;
  622. if (bind(sk, (struct sockaddr *)&saddr, slen)) {
  623. write(ctl_fd, &errno, sizeof(errno));
  624. goto cleanup;
  625. }
  626. if (listen(sk, 1))
  627. goto cleanup;
  628. ret = 0;
  629. if (write(ctl_fd, &ret, sizeof(ret)) != sizeof(ret)) {
  630. ret = -1;
  631. goto cleanup;
  632. }
  633. client_sk = accept(sk, NULL, NULL);
  634. if (client_sk < 0)
  635. goto cleanup;
  636. ret = -1;
  637. for (;;) {
  638. uint8_t buf[0x100000];
  639. if (write(client_sk, buf, sizeof(buf)) <= 0) {
  640. if (errno == ECONNRESET)
  641. ret = 0;
  642. break;
  643. }
  644. }
  645. close(client_sk);
  646. cleanup:
  647. close(sk);
  648. return ret;
  649. }
  650. static int tcp_client(const char *cgroup, unsigned short port)
  651. {
  652. const char server[] = "localhost";
  653. struct addrinfo *ai;
  654. char servport[6];
  655. int retries = 0x10; /* nice round number */
  656. int sk, ret;
  657. snprintf(servport, sizeof(servport), "%hd", port);
  658. ret = getaddrinfo(server, servport, NULL, &ai);
  659. if (ret)
  660. return ret;
  661. sk = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
  662. if (sk < 0)
  663. goto free_ainfo;
  664. ret = connect(sk, ai->ai_addr, ai->ai_addrlen);
  665. if (ret < 0)
  666. goto close_sk;
  667. ret = KSFT_FAIL;
  668. while (retries--) {
  669. uint8_t buf[0x100000];
  670. long current, sock;
  671. if (read(sk, buf, sizeof(buf)) <= 0)
  672. goto close_sk;
  673. current = cg_read_long(cgroup, "memory.current");
  674. sock = cg_read_key_long(cgroup, "memory.stat", "sock ");
  675. if (current < 0 || sock < 0)
  676. goto close_sk;
  677. if (current < sock)
  678. goto close_sk;
  679. if (values_close(current, sock, 10)) {
  680. ret = KSFT_PASS;
  681. break;
  682. }
  683. }
  684. close_sk:
  685. close(sk);
  686. free_ainfo:
  687. freeaddrinfo(ai);
  688. return ret;
  689. }
  690. /*
  691. * This test checks socket memory accounting.
  692. * The test forks a TCP server listens on a random port between 1000
  693. * and 61000. Once it gets a client connection, it starts writing to
  694. * its socket.
  695. * The TCP client interleaves reads from the socket with check whether
  696. * memory.current and memory.stat.sock are similar.
  697. */
  698. static int test_memcg_sock(const char *root)
  699. {
  700. int bind_retries = 5, ret = KSFT_FAIL, pid, err;
  701. unsigned short port;
  702. char *memcg;
  703. memcg = cg_name(root, "memcg_test");
  704. if (!memcg)
  705. goto cleanup;
  706. if (cg_create(memcg))
  707. goto cleanup;
  708. while (bind_retries--) {
  709. struct tcp_server_args args;
  710. if (pipe(args.ctl))
  711. goto cleanup;
  712. port = args.port = 1000 + rand() % 60000;
  713. pid = cg_run_nowait(memcg, tcp_server, &args);
  714. if (pid < 0)
  715. goto cleanup;
  716. close(args.ctl[1]);
  717. if (read(args.ctl[0], &err, sizeof(err)) != sizeof(err))
  718. goto cleanup;
  719. close(args.ctl[0]);
  720. if (!err)
  721. break;
  722. if (err != EADDRINUSE)
  723. goto cleanup;
  724. waitpid(pid, NULL, 0);
  725. }
  726. if (err == EADDRINUSE) {
  727. ret = KSFT_SKIP;
  728. goto cleanup;
  729. }
  730. if (tcp_client(memcg, port) != KSFT_PASS)
  731. goto cleanup;
  732. waitpid(pid, &err, 0);
  733. if (WEXITSTATUS(err))
  734. goto cleanup;
  735. if (cg_read_long(memcg, "memory.current") < 0)
  736. goto cleanup;
  737. if (cg_read_key_long(memcg, "memory.stat", "sock "))
  738. goto cleanup;
  739. ret = KSFT_PASS;
  740. cleanup:
  741. cg_destroy(memcg);
  742. free(memcg);
  743. return ret;
  744. }
  745. #define T(x) { x, #x }
  746. struct memcg_test {
  747. int (*fn)(const char *root);
  748. const char *name;
  749. } tests[] = {
  750. T(test_memcg_subtree_control),
  751. T(test_memcg_current),
  752. T(test_memcg_min),
  753. T(test_memcg_low),
  754. T(test_memcg_high),
  755. T(test_memcg_max),
  756. T(test_memcg_oom_events),
  757. T(test_memcg_swap_max),
  758. T(test_memcg_sock),
  759. };
  760. #undef T
  761. int main(int argc, char **argv)
  762. {
  763. char root[PATH_MAX];
  764. int i, ret = EXIT_SUCCESS;
  765. if (cg_find_unified_root(root, sizeof(root)))
  766. ksft_exit_skip("cgroup v2 isn't mounted\n");
  767. /*
  768. * Check that memory controller is available:
  769. * memory is listed in cgroup.controllers
  770. */
  771. if (cg_read_strstr(root, "cgroup.controllers", "memory"))
  772. ksft_exit_skip("memory controller isn't available\n");
  773. for (i = 0; i < ARRAY_SIZE(tests); i++) {
  774. switch (tests[i].fn(root)) {
  775. case KSFT_PASS:
  776. ksft_test_result_pass("%s\n", tests[i].name);
  777. break;
  778. case KSFT_SKIP:
  779. ksft_test_result_skip("%s\n", tests[i].name);
  780. break;
  781. default:
  782. ret = EXIT_FAILURE;
  783. ksft_test_result_fail("%s\n", tests[i].name);
  784. break;
  785. }
  786. }
  787. return ret;
  788. }