seccomp_bpf.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157
  1. /*
  2. * Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
  3. * Use of this source code is governed by the GPLv2 license.
  4. *
  5. * Test code for seccomp bpf.
  6. */
  7. #include <asm/siginfo.h>
  8. #define __have_siginfo_t 1
  9. #define __have_sigval_t 1
  10. #define __have_sigevent_t 1
  11. #include <errno.h>
  12. #include <linux/filter.h>
  13. #include <sys/prctl.h>
  14. #include <sys/ptrace.h>
  15. #include <sys/types.h>
  16. #include <sys/user.h>
  17. #include <linux/prctl.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/seccomp.h>
  20. #include <poll.h>
  21. #include <pthread.h>
  22. #include <semaphore.h>
  23. #include <signal.h>
  24. #include <stddef.h>
  25. #include <stdbool.h>
  26. #include <string.h>
  27. #include <linux/elf.h>
  28. #include <sys/uio.h>
  29. #define _GNU_SOURCE
  30. #include <unistd.h>
  31. #include <sys/syscall.h>
  32. #include "test_harness.h"
  33. #ifndef PR_SET_PTRACER
  34. # define PR_SET_PTRACER 0x59616d61
  35. #endif
  36. #ifndef PR_SET_NO_NEW_PRIVS
  37. #define PR_SET_NO_NEW_PRIVS 38
  38. #define PR_GET_NO_NEW_PRIVS 39
  39. #endif
  40. #ifndef PR_SECCOMP_EXT
  41. #define PR_SECCOMP_EXT 43
  42. #endif
  43. #ifndef SECCOMP_EXT_ACT
  44. #define SECCOMP_EXT_ACT 1
  45. #endif
  46. #ifndef SECCOMP_EXT_ACT_TSYNC
  47. #define SECCOMP_EXT_ACT_TSYNC 1
  48. #endif
  49. #ifndef SECCOMP_MODE_STRICT
  50. #define SECCOMP_MODE_STRICT 1
  51. #endif
  52. #ifndef SECCOMP_MODE_FILTER
  53. #define SECCOMP_MODE_FILTER 2
  54. #endif
  55. #ifndef SECCOMP_RET_KILL
  56. #define SECCOMP_RET_KILL 0x00000000U /* kill the task immediately */
  57. #define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */
  58. #define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */
  59. #define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */
  60. #define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */
  61. /* Masks for the return value sections. */
  62. #define SECCOMP_RET_ACTION 0x7fff0000U
  63. #define SECCOMP_RET_DATA 0x0000ffffU
  64. struct seccomp_data {
  65. int nr;
  66. __u32 arch;
  67. __u64 instruction_pointer;
  68. __u64 args[6];
  69. };
  70. #endif
  71. #if __BYTE_ORDER == __LITTLE_ENDIAN
  72. #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]))
  73. #elif __BYTE_ORDER == __BIG_ENDIAN
  74. #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32))
  75. #else
  76. #error "wut? Unknown __BYTE_ORDER?!"
  77. #endif
  78. #define SIBLING_EXIT_UNKILLED 0xbadbeef
  79. #define SIBLING_EXIT_FAILURE 0xbadface
  80. #define SIBLING_EXIT_NEWPRIVS 0xbadfeed
  81. TEST(mode_strict_support)
  82. {
  83. long ret;
  84. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL);
  85. ASSERT_EQ(0, ret) {
  86. TH_LOG("Kernel does not support CONFIG_SECCOMP");
  87. }
  88. syscall(__NR_exit, 1);
  89. }
  90. TEST_SIGNAL(mode_strict_cannot_call_prctl, SIGKILL)
  91. {
  92. long ret;
  93. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL);
  94. ASSERT_EQ(0, ret) {
  95. TH_LOG("Kernel does not support CONFIG_SECCOMP");
  96. }
  97. syscall(__NR_prctl, PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
  98. NULL, NULL, NULL);
  99. EXPECT_FALSE(true) {
  100. TH_LOG("Unreachable!");
  101. }
  102. }
  103. /* Note! This doesn't test no new privs behavior */
  104. TEST(no_new_privs_support)
  105. {
  106. long ret;
  107. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  108. EXPECT_EQ(0, ret) {
  109. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  110. }
  111. }
  112. /* Tests kernel support by checking for a copy_from_user() fault on * NULL. */
  113. TEST(mode_filter_support)
  114. {
  115. long ret;
  116. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
  117. ASSERT_EQ(0, ret) {
  118. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  119. }
  120. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL);
  121. EXPECT_EQ(-1, ret);
  122. EXPECT_EQ(EFAULT, errno) {
  123. TH_LOG("Kernel does not support CONFIG_SECCOMP_FILTER!");
  124. }
  125. }
  126. TEST(mode_filter_without_nnp)
  127. {
  128. struct sock_filter filter[] = {
  129. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  130. };
  131. struct sock_fprog prog = {
  132. .len = (unsigned short)ARRAY_SIZE(filter),
  133. .filter = filter,
  134. };
  135. long ret;
  136. ret = prctl(PR_GET_NO_NEW_PRIVS, 0, NULL, 0, 0);
  137. ASSERT_LE(0, ret) {
  138. TH_LOG("Expected 0 or unsupported for NO_NEW_PRIVS");
  139. }
  140. errno = 0;
  141. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  142. /* Succeeds with CAP_SYS_ADMIN, fails without */
  143. /* TODO(wad) check caps not euid */
  144. if (geteuid()) {
  145. EXPECT_EQ(-1, ret);
  146. EXPECT_EQ(EACCES, errno);
  147. } else {
  148. EXPECT_EQ(0, ret);
  149. }
  150. }
  151. #define MAX_INSNS_PER_PATH 32768
  152. TEST(filter_size_limits)
  153. {
  154. int i;
  155. int count = BPF_MAXINSNS + 1;
  156. struct sock_filter allow[] = {
  157. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  158. };
  159. struct sock_filter *filter;
  160. struct sock_fprog prog = { };
  161. long ret;
  162. filter = calloc(count, sizeof(*filter));
  163. ASSERT_NE(NULL, filter);
  164. for (i = 0; i < count; i++)
  165. filter[i] = allow[0];
  166. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  167. ASSERT_EQ(0, ret);
  168. prog.filter = filter;
  169. prog.len = count;
  170. /* Too many filter instructions in a single filter. */
  171. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  172. ASSERT_NE(0, ret) {
  173. TH_LOG("Installing %d insn filter was allowed", prog.len);
  174. }
  175. /* One less is okay, though. */
  176. prog.len -= 1;
  177. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  178. ASSERT_EQ(0, ret) {
  179. TH_LOG("Installing %d insn filter wasn't allowed", prog.len);
  180. }
  181. }
  182. TEST(filter_chain_limits)
  183. {
  184. int i;
  185. int count = BPF_MAXINSNS;
  186. struct sock_filter allow[] = {
  187. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  188. };
  189. struct sock_filter *filter;
  190. struct sock_fprog prog = { };
  191. long ret;
  192. filter = calloc(count, sizeof(*filter));
  193. ASSERT_NE(NULL, filter);
  194. for (i = 0; i < count; i++)
  195. filter[i] = allow[0];
  196. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  197. ASSERT_EQ(0, ret);
  198. prog.filter = filter;
  199. prog.len = 1;
  200. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  201. ASSERT_EQ(0, ret);
  202. prog.len = count;
  203. /* Too many total filter instructions. */
  204. for (i = 0; i < MAX_INSNS_PER_PATH; i++) {
  205. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  206. if (ret != 0)
  207. break;
  208. }
  209. ASSERT_NE(0, ret) {
  210. TH_LOG("Allowed %d %d-insn filters (total with penalties:%d)",
  211. i, count, i * (count + 4));
  212. }
  213. }
  214. TEST(mode_filter_cannot_move_to_strict)
  215. {
  216. struct sock_filter filter[] = {
  217. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  218. };
  219. struct sock_fprog prog = {
  220. .len = (unsigned short)ARRAY_SIZE(filter),
  221. .filter = filter,
  222. };
  223. long ret;
  224. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  225. ASSERT_EQ(0, ret);
  226. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  227. ASSERT_EQ(0, ret);
  228. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, 0, 0);
  229. EXPECT_EQ(-1, ret);
  230. EXPECT_EQ(EINVAL, errno);
  231. }
  232. TEST(mode_filter_get_seccomp)
  233. {
  234. struct sock_filter filter[] = {
  235. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  236. };
  237. struct sock_fprog prog = {
  238. .len = (unsigned short)ARRAY_SIZE(filter),
  239. .filter = filter,
  240. };
  241. long ret;
  242. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  243. ASSERT_EQ(0, ret);
  244. ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
  245. EXPECT_EQ(0, ret);
  246. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  247. ASSERT_EQ(0, ret);
  248. ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
  249. EXPECT_EQ(2, ret);
  250. }
  251. TEST(ALLOW_all)
  252. {
  253. struct sock_filter filter[] = {
  254. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  255. };
  256. struct sock_fprog prog = {
  257. .len = (unsigned short)ARRAY_SIZE(filter),
  258. .filter = filter,
  259. };
  260. long ret;
  261. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  262. ASSERT_EQ(0, ret);
  263. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  264. ASSERT_EQ(0, ret);
  265. }
  266. TEST(empty_prog)
  267. {
  268. struct sock_filter filter[] = {
  269. };
  270. struct sock_fprog prog = {
  271. .len = (unsigned short)ARRAY_SIZE(filter),
  272. .filter = filter,
  273. };
  274. long ret;
  275. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  276. ASSERT_EQ(0, ret);
  277. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  278. EXPECT_EQ(-1, ret);
  279. EXPECT_EQ(EINVAL, errno);
  280. }
  281. TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS)
  282. {
  283. struct sock_filter filter[] = {
  284. BPF_STMT(BPF_RET|BPF_K, 0x10000000U),
  285. };
  286. struct sock_fprog prog = {
  287. .len = (unsigned short)ARRAY_SIZE(filter),
  288. .filter = filter,
  289. };
  290. long ret;
  291. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  292. ASSERT_EQ(0, ret);
  293. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  294. ASSERT_EQ(0, ret);
  295. EXPECT_EQ(0, syscall(__NR_getpid)) {
  296. TH_LOG("getpid() shouldn't ever return");
  297. }
  298. }
  299. /* return code >= 0x80000000 is unused. */
  300. TEST_SIGNAL(unknown_ret_is_kill_above_allow, SIGSYS)
  301. {
  302. struct sock_filter filter[] = {
  303. BPF_STMT(BPF_RET|BPF_K, 0x90000000U),
  304. };
  305. struct sock_fprog prog = {
  306. .len = (unsigned short)ARRAY_SIZE(filter),
  307. .filter = filter,
  308. };
  309. long ret;
  310. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  311. ASSERT_EQ(0, ret);
  312. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  313. ASSERT_EQ(0, ret);
  314. EXPECT_EQ(0, syscall(__NR_getpid)) {
  315. TH_LOG("getpid() shouldn't ever return");
  316. }
  317. }
  318. TEST_SIGNAL(KILL_all, SIGSYS)
  319. {
  320. struct sock_filter filter[] = {
  321. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  322. };
  323. struct sock_fprog prog = {
  324. .len = (unsigned short)ARRAY_SIZE(filter),
  325. .filter = filter,
  326. };
  327. long ret;
  328. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  329. ASSERT_EQ(0, ret);
  330. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  331. ASSERT_EQ(0, ret);
  332. }
  333. TEST_SIGNAL(KILL_one, SIGSYS)
  334. {
  335. struct sock_filter filter[] = {
  336. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  337. offsetof(struct seccomp_data, nr)),
  338. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
  339. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  340. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  341. };
  342. struct sock_fprog prog = {
  343. .len = (unsigned short)ARRAY_SIZE(filter),
  344. .filter = filter,
  345. };
  346. long ret;
  347. pid_t parent = getppid();
  348. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  349. ASSERT_EQ(0, ret);
  350. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  351. ASSERT_EQ(0, ret);
  352. EXPECT_EQ(parent, syscall(__NR_getppid));
  353. /* getpid() should never return. */
  354. EXPECT_EQ(0, syscall(__NR_getpid));
  355. }
  356. TEST_SIGNAL(KILL_one_arg_one, SIGSYS)
  357. {
  358. struct sock_filter filter[] = {
  359. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  360. offsetof(struct seccomp_data, nr)),
  361. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
  362. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  363. /* Only both with lower 32-bit for now. */
  364. BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(0)),
  365. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 0x0C0FFEE, 0, 1),
  366. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  367. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  368. };
  369. struct sock_fprog prog = {
  370. .len = (unsigned short)ARRAY_SIZE(filter),
  371. .filter = filter,
  372. };
  373. long ret;
  374. pid_t parent = getppid();
  375. pid_t pid = getpid();
  376. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  377. ASSERT_EQ(0, ret);
  378. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  379. ASSERT_EQ(0, ret);
  380. EXPECT_EQ(parent, syscall(__NR_getppid));
  381. EXPECT_EQ(pid, syscall(__NR_getpid));
  382. /* getpid() should never return. */
  383. EXPECT_EQ(0, syscall(__NR_getpid, 0x0C0FFEE));
  384. }
  385. TEST_SIGNAL(KILL_one_arg_six, SIGSYS)
  386. {
  387. struct sock_filter filter[] = {
  388. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  389. offsetof(struct seccomp_data, nr)),
  390. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
  391. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  392. /* Only both with lower 32-bit for now. */
  393. BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(5)),
  394. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 0x0C0FFEE, 0, 1),
  395. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  396. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  397. };
  398. struct sock_fprog prog = {
  399. .len = (unsigned short)ARRAY_SIZE(filter),
  400. .filter = filter,
  401. };
  402. long ret;
  403. pid_t parent = getppid();
  404. pid_t pid = getpid();
  405. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  406. ASSERT_EQ(0, ret);
  407. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  408. ASSERT_EQ(0, ret);
  409. EXPECT_EQ(parent, syscall(__NR_getppid));
  410. EXPECT_EQ(pid, syscall(__NR_getpid));
  411. /* getpid() should never return. */
  412. EXPECT_EQ(0, syscall(__NR_getpid, 1, 2, 3, 4, 5, 0x0C0FFEE));
  413. }
  414. /* TODO(wad) add 64-bit versus 32-bit arg tests. */
  415. TEST(arg_out_of_range)
  416. {
  417. struct sock_filter filter[] = {
  418. BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(6)),
  419. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  420. };
  421. struct sock_fprog prog = {
  422. .len = (unsigned short)ARRAY_SIZE(filter),
  423. .filter = filter,
  424. };
  425. long ret;
  426. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  427. ASSERT_EQ(0, ret);
  428. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  429. EXPECT_EQ(-1, ret);
  430. EXPECT_EQ(EINVAL, errno);
  431. }
  432. TEST(ERRNO_valid)
  433. {
  434. struct sock_filter filter[] = {
  435. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  436. offsetof(struct seccomp_data, nr)),
  437. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
  438. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | E2BIG),
  439. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  440. };
  441. struct sock_fprog prog = {
  442. .len = (unsigned short)ARRAY_SIZE(filter),
  443. .filter = filter,
  444. };
  445. long ret;
  446. pid_t parent = getppid();
  447. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  448. ASSERT_EQ(0, ret);
  449. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  450. ASSERT_EQ(0, ret);
  451. EXPECT_EQ(parent, syscall(__NR_getppid));
  452. EXPECT_EQ(-1, read(0, NULL, 0));
  453. EXPECT_EQ(E2BIG, errno);
  454. }
  455. TEST(ERRNO_zero)
  456. {
  457. struct sock_filter filter[] = {
  458. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  459. offsetof(struct seccomp_data, nr)),
  460. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
  461. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | 0),
  462. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  463. };
  464. struct sock_fprog prog = {
  465. .len = (unsigned short)ARRAY_SIZE(filter),
  466. .filter = filter,
  467. };
  468. long ret;
  469. pid_t parent = getppid();
  470. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  471. ASSERT_EQ(0, ret);
  472. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  473. ASSERT_EQ(0, ret);
  474. EXPECT_EQ(parent, syscall(__NR_getppid));
  475. /* "errno" of 0 is ok. */
  476. EXPECT_EQ(0, read(0, NULL, 0));
  477. }
  478. TEST(ERRNO_capped)
  479. {
  480. struct sock_filter filter[] = {
  481. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  482. offsetof(struct seccomp_data, nr)),
  483. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
  484. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | 4096),
  485. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  486. };
  487. struct sock_fprog prog = {
  488. .len = (unsigned short)ARRAY_SIZE(filter),
  489. .filter = filter,
  490. };
  491. long ret;
  492. pid_t parent = getppid();
  493. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  494. ASSERT_EQ(0, ret);
  495. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  496. ASSERT_EQ(0, ret);
  497. EXPECT_EQ(parent, syscall(__NR_getppid));
  498. EXPECT_EQ(-1, read(0, NULL, 0));
  499. EXPECT_EQ(4095, errno);
  500. }
  501. FIXTURE_DATA(TRAP) {
  502. struct sock_fprog prog;
  503. };
  504. FIXTURE_SETUP(TRAP)
  505. {
  506. struct sock_filter filter[] = {
  507. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  508. offsetof(struct seccomp_data, nr)),
  509. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
  510. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP),
  511. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  512. };
  513. memset(&self->prog, 0, sizeof(self->prog));
  514. self->prog.filter = malloc(sizeof(filter));
  515. ASSERT_NE(NULL, self->prog.filter);
  516. memcpy(self->prog.filter, filter, sizeof(filter));
  517. self->prog.len = (unsigned short)ARRAY_SIZE(filter);
  518. }
  519. FIXTURE_TEARDOWN(TRAP)
  520. {
  521. if (self->prog.filter)
  522. free(self->prog.filter);
  523. }
  524. TEST_F_SIGNAL(TRAP, dfl, SIGSYS)
  525. {
  526. long ret;
  527. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  528. ASSERT_EQ(0, ret);
  529. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
  530. ASSERT_EQ(0, ret);
  531. syscall(__NR_getpid);
  532. }
  533. /* Ensure that SIGSYS overrides SIG_IGN */
  534. TEST_F_SIGNAL(TRAP, ign, SIGSYS)
  535. {
  536. long ret;
  537. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  538. ASSERT_EQ(0, ret);
  539. signal(SIGSYS, SIG_IGN);
  540. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
  541. ASSERT_EQ(0, ret);
  542. syscall(__NR_getpid);
  543. }
  544. static struct siginfo TRAP_info;
  545. static volatile int TRAP_nr;
  546. static void TRAP_action(int nr, siginfo_t *info, void *void_context)
  547. {
  548. memcpy(&TRAP_info, info, sizeof(TRAP_info));
  549. TRAP_nr = nr;
  550. }
  551. TEST_F(TRAP, handler)
  552. {
  553. int ret, test;
  554. struct sigaction act;
  555. sigset_t mask;
  556. memset(&act, 0, sizeof(act));
  557. sigemptyset(&mask);
  558. sigaddset(&mask, SIGSYS);
  559. act.sa_sigaction = &TRAP_action;
  560. act.sa_flags = SA_SIGINFO;
  561. ret = sigaction(SIGSYS, &act, NULL);
  562. ASSERT_EQ(0, ret) {
  563. TH_LOG("sigaction failed");
  564. }
  565. ret = sigprocmask(SIG_UNBLOCK, &mask, NULL);
  566. ASSERT_EQ(0, ret) {
  567. TH_LOG("sigprocmask failed");
  568. }
  569. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  570. ASSERT_EQ(0, ret);
  571. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
  572. ASSERT_EQ(0, ret);
  573. TRAP_nr = 0;
  574. memset(&TRAP_info, 0, sizeof(TRAP_info));
  575. /* Expect the registers to be rolled back. (nr = error) may vary
  576. * based on arch. */
  577. ret = syscall(__NR_getpid);
  578. /* Silence gcc warning about volatile. */
  579. test = TRAP_nr;
  580. EXPECT_EQ(SIGSYS, test);
  581. struct local_sigsys {
  582. void *_call_addr; /* calling user insn */
  583. int _syscall; /* triggering system call number */
  584. unsigned int _arch; /* AUDIT_ARCH_* of syscall */
  585. } *sigsys = (struct local_sigsys *)
  586. #ifdef si_syscall
  587. &(TRAP_info.si_call_addr);
  588. #else
  589. &TRAP_info.si_pid;
  590. #endif
  591. EXPECT_EQ(__NR_getpid, sigsys->_syscall);
  592. /* Make sure arch is non-zero. */
  593. EXPECT_NE(0, sigsys->_arch);
  594. EXPECT_NE(0, (unsigned long)sigsys->_call_addr);
  595. }
  596. FIXTURE_DATA(precedence) {
  597. struct sock_fprog allow;
  598. struct sock_fprog trace;
  599. struct sock_fprog error;
  600. struct sock_fprog trap;
  601. struct sock_fprog kill;
  602. };
  603. FIXTURE_SETUP(precedence)
  604. {
  605. struct sock_filter allow_insns[] = {
  606. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  607. };
  608. struct sock_filter trace_insns[] = {
  609. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  610. offsetof(struct seccomp_data, nr)),
  611. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
  612. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  613. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE),
  614. };
  615. struct sock_filter error_insns[] = {
  616. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  617. offsetof(struct seccomp_data, nr)),
  618. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
  619. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  620. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO),
  621. };
  622. struct sock_filter trap_insns[] = {
  623. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  624. offsetof(struct seccomp_data, nr)),
  625. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
  626. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  627. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP),
  628. };
  629. struct sock_filter kill_insns[] = {
  630. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  631. offsetof(struct seccomp_data, nr)),
  632. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
  633. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  634. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  635. };
  636. memset(self, 0, sizeof(*self));
  637. #define FILTER_ALLOC(_x) \
  638. self->_x.filter = malloc(sizeof(_x##_insns)); \
  639. ASSERT_NE(NULL, self->_x.filter); \
  640. memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \
  641. self->_x.len = (unsigned short)ARRAY_SIZE(_x##_insns)
  642. FILTER_ALLOC(allow);
  643. FILTER_ALLOC(trace);
  644. FILTER_ALLOC(error);
  645. FILTER_ALLOC(trap);
  646. FILTER_ALLOC(kill);
  647. }
  648. FIXTURE_TEARDOWN(precedence)
  649. {
  650. #define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter)
  651. FILTER_FREE(allow);
  652. FILTER_FREE(trace);
  653. FILTER_FREE(error);
  654. FILTER_FREE(trap);
  655. FILTER_FREE(kill);
  656. }
  657. TEST_F(precedence, allow_ok)
  658. {
  659. pid_t parent, res = 0;
  660. long ret;
  661. parent = getppid();
  662. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  663. ASSERT_EQ(0, ret);
  664. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  665. ASSERT_EQ(0, ret);
  666. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  667. ASSERT_EQ(0, ret);
  668. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  669. ASSERT_EQ(0, ret);
  670. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
  671. ASSERT_EQ(0, ret);
  672. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
  673. ASSERT_EQ(0, ret);
  674. /* Should work just fine. */
  675. res = syscall(__NR_getppid);
  676. EXPECT_EQ(parent, res);
  677. }
  678. TEST_F_SIGNAL(precedence, kill_is_highest, SIGSYS)
  679. {
  680. pid_t parent, res = 0;
  681. long ret;
  682. parent = getppid();
  683. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  684. ASSERT_EQ(0, ret);
  685. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  686. ASSERT_EQ(0, ret);
  687. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  688. ASSERT_EQ(0, ret);
  689. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  690. ASSERT_EQ(0, ret);
  691. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
  692. ASSERT_EQ(0, ret);
  693. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
  694. ASSERT_EQ(0, ret);
  695. /* Should work just fine. */
  696. res = syscall(__NR_getppid);
  697. EXPECT_EQ(parent, res);
  698. /* getpid() should never return. */
  699. res = syscall(__NR_getpid);
  700. EXPECT_EQ(0, res);
  701. }
  702. TEST_F_SIGNAL(precedence, kill_is_highest_in_any_order, SIGSYS)
  703. {
  704. pid_t parent;
  705. long ret;
  706. parent = getppid();
  707. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  708. ASSERT_EQ(0, ret);
  709. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  710. ASSERT_EQ(0, ret);
  711. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
  712. ASSERT_EQ(0, ret);
  713. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  714. ASSERT_EQ(0, ret);
  715. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  716. ASSERT_EQ(0, ret);
  717. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
  718. ASSERT_EQ(0, ret);
  719. /* Should work just fine. */
  720. EXPECT_EQ(parent, syscall(__NR_getppid));
  721. /* getpid() should never return. */
  722. EXPECT_EQ(0, syscall(__NR_getpid));
  723. }
  724. TEST_F_SIGNAL(precedence, trap_is_second, SIGSYS)
  725. {
  726. pid_t parent;
  727. long ret;
  728. parent = getppid();
  729. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  730. ASSERT_EQ(0, ret);
  731. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  732. ASSERT_EQ(0, ret);
  733. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  734. ASSERT_EQ(0, ret);
  735. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  736. ASSERT_EQ(0, ret);
  737. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
  738. ASSERT_EQ(0, ret);
  739. /* Should work just fine. */
  740. EXPECT_EQ(parent, syscall(__NR_getppid));
  741. /* getpid() should never return. */
  742. EXPECT_EQ(0, syscall(__NR_getpid));
  743. }
  744. TEST_F_SIGNAL(precedence, trap_is_second_in_any_order, SIGSYS)
  745. {
  746. pid_t parent;
  747. long ret;
  748. parent = getppid();
  749. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  750. ASSERT_EQ(0, ret);
  751. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  752. ASSERT_EQ(0, ret);
  753. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
  754. ASSERT_EQ(0, ret);
  755. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  756. ASSERT_EQ(0, ret);
  757. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  758. ASSERT_EQ(0, ret);
  759. /* Should work just fine. */
  760. EXPECT_EQ(parent, syscall(__NR_getppid));
  761. /* getpid() should never return. */
  762. EXPECT_EQ(0, syscall(__NR_getpid));
  763. }
  764. TEST_F(precedence, errno_is_third)
  765. {
  766. pid_t parent;
  767. long ret;
  768. parent = getppid();
  769. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  770. ASSERT_EQ(0, ret);
  771. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  772. ASSERT_EQ(0, ret);
  773. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  774. ASSERT_EQ(0, ret);
  775. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  776. ASSERT_EQ(0, ret);
  777. /* Should work just fine. */
  778. EXPECT_EQ(parent, syscall(__NR_getppid));
  779. EXPECT_EQ(0, syscall(__NR_getpid));
  780. }
  781. TEST_F(precedence, errno_is_third_in_any_order)
  782. {
  783. pid_t parent;
  784. long ret;
  785. parent = getppid();
  786. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  787. ASSERT_EQ(0, ret);
  788. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  789. ASSERT_EQ(0, ret);
  790. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  791. ASSERT_EQ(0, ret);
  792. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  793. ASSERT_EQ(0, ret);
  794. /* Should work just fine. */
  795. EXPECT_EQ(parent, syscall(__NR_getppid));
  796. EXPECT_EQ(0, syscall(__NR_getpid));
  797. }
  798. TEST_F(precedence, trace_is_fourth)
  799. {
  800. pid_t parent;
  801. long ret;
  802. parent = getppid();
  803. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  804. ASSERT_EQ(0, ret);
  805. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  806. ASSERT_EQ(0, ret);
  807. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  808. ASSERT_EQ(0, ret);
  809. /* Should work just fine. */
  810. EXPECT_EQ(parent, syscall(__NR_getppid));
  811. /* No ptracer */
  812. EXPECT_EQ(-1, syscall(__NR_getpid));
  813. }
  814. TEST_F(precedence, trace_is_fourth_in_any_order)
  815. {
  816. pid_t parent;
  817. long ret;
  818. parent = getppid();
  819. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  820. ASSERT_EQ(0, ret);
  821. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  822. ASSERT_EQ(0, ret);
  823. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  824. ASSERT_EQ(0, ret);
  825. /* Should work just fine. */
  826. EXPECT_EQ(parent, syscall(__NR_getppid));
  827. /* No ptracer */
  828. EXPECT_EQ(-1, syscall(__NR_getpid));
  829. }
  830. #ifndef PTRACE_O_TRACESECCOMP
  831. #define PTRACE_O_TRACESECCOMP 0x00000080
  832. #endif
  833. /* Catch the Ubuntu 12.04 value error. */
  834. #if PTRACE_EVENT_SECCOMP != 7
  835. #undef PTRACE_EVENT_SECCOMP
  836. #endif
  837. #ifndef PTRACE_EVENT_SECCOMP
  838. #define PTRACE_EVENT_SECCOMP 7
  839. #endif
  840. #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP)
  841. bool tracer_running;
  842. void tracer_stop(int sig)
  843. {
  844. tracer_running = false;
  845. }
  846. typedef void tracer_func_t(struct __test_metadata *_metadata,
  847. pid_t tracee, int status, void *args);
  848. void tracer(struct __test_metadata *_metadata, int fd, pid_t tracee,
  849. tracer_func_t tracer_func, void *args)
  850. {
  851. int ret = -1;
  852. struct sigaction action = {
  853. .sa_handler = tracer_stop,
  854. };
  855. /* Allow external shutdown. */
  856. tracer_running = true;
  857. ASSERT_EQ(0, sigaction(SIGUSR1, &action, NULL));
  858. errno = 0;
  859. while (ret == -1 && errno != EINVAL)
  860. ret = ptrace(PTRACE_ATTACH, tracee, NULL, 0);
  861. ASSERT_EQ(0, ret) {
  862. kill(tracee, SIGKILL);
  863. }
  864. /* Wait for attach stop */
  865. wait(NULL);
  866. ret = ptrace(PTRACE_SETOPTIONS, tracee, NULL, PTRACE_O_TRACESECCOMP);
  867. ASSERT_EQ(0, ret) {
  868. TH_LOG("Failed to set PTRACE_O_TRACESECCOMP");
  869. kill(tracee, SIGKILL);
  870. }
  871. ptrace(PTRACE_CONT, tracee, NULL, 0);
  872. /* Unblock the tracee */
  873. ASSERT_EQ(1, write(fd, "A", 1));
  874. ASSERT_EQ(0, close(fd));
  875. /* Run until we're shut down. Must assert to stop execution. */
  876. while (tracer_running) {
  877. int status;
  878. if (wait(&status) != tracee)
  879. continue;
  880. if (WIFSIGNALED(status) || WIFEXITED(status))
  881. /* Child is dead. Time to go. */
  882. return;
  883. /* Make sure this is a seccomp event. */
  884. ASSERT_EQ(true, IS_SECCOMP_EVENT(status));
  885. tracer_func(_metadata, tracee, status, args);
  886. ret = ptrace(PTRACE_CONT, tracee, NULL, NULL);
  887. ASSERT_EQ(0, ret);
  888. }
  889. /* Directly report the status of our test harness results. */
  890. syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
  891. }
  892. /* Common tracer setup/teardown functions. */
  893. void cont_handler(int num)
  894. { }
  895. pid_t setup_trace_fixture(struct __test_metadata *_metadata,
  896. tracer_func_t func, void *args)
  897. {
  898. char sync;
  899. int pipefd[2];
  900. pid_t tracer_pid;
  901. pid_t tracee = getpid();
  902. /* Setup a pipe for clean synchronization. */
  903. ASSERT_EQ(0, pipe(pipefd));
  904. /* Fork a child which we'll promote to tracer */
  905. tracer_pid = fork();
  906. ASSERT_LE(0, tracer_pid);
  907. signal(SIGALRM, cont_handler);
  908. if (tracer_pid == 0) {
  909. close(pipefd[0]);
  910. tracer(_metadata, pipefd[1], tracee, func, args);
  911. syscall(__NR_exit, 0);
  912. }
  913. close(pipefd[1]);
  914. prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
  915. read(pipefd[0], &sync, 1);
  916. close(pipefd[0]);
  917. return tracer_pid;
  918. }
  919. void teardown_trace_fixture(struct __test_metadata *_metadata,
  920. pid_t tracer)
  921. {
  922. if (tracer) {
  923. int status;
  924. /*
  925. * Extract the exit code from the other process and
  926. * adopt it for ourselves in case its asserts failed.
  927. */
  928. ASSERT_EQ(0, kill(tracer, SIGUSR1));
  929. ASSERT_EQ(tracer, waitpid(tracer, &status, 0));
  930. if (WEXITSTATUS(status))
  931. _metadata->passed = 0;
  932. }
  933. }
  934. /* "poke" tracer arguments and function. */
  935. struct tracer_args_poke_t {
  936. unsigned long poke_addr;
  937. };
  938. void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status,
  939. void *args)
  940. {
  941. int ret;
  942. unsigned long msg;
  943. struct tracer_args_poke_t *info = (struct tracer_args_poke_t *)args;
  944. ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
  945. EXPECT_EQ(0, ret);
  946. /* If this fails, don't try to recover. */
  947. ASSERT_EQ(0x1001, msg) {
  948. kill(tracee, SIGKILL);
  949. }
  950. /*
  951. * Poke in the message.
  952. * Registers are not touched to try to keep this relatively arch
  953. * agnostic.
  954. */
  955. ret = ptrace(PTRACE_POKEDATA, tracee, info->poke_addr, 0x1001);
  956. EXPECT_EQ(0, ret);
  957. }
  958. FIXTURE_DATA(TRACE_poke) {
  959. struct sock_fprog prog;
  960. pid_t tracer;
  961. long poked;
  962. struct tracer_args_poke_t tracer_args;
  963. };
  964. FIXTURE_SETUP(TRACE_poke)
  965. {
  966. struct sock_filter filter[] = {
  967. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  968. offsetof(struct seccomp_data, nr)),
  969. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
  970. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1001),
  971. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  972. };
  973. self->poked = 0;
  974. memset(&self->prog, 0, sizeof(self->prog));
  975. self->prog.filter = malloc(sizeof(filter));
  976. ASSERT_NE(NULL, self->prog.filter);
  977. memcpy(self->prog.filter, filter, sizeof(filter));
  978. self->prog.len = (unsigned short)ARRAY_SIZE(filter);
  979. /* Set up tracer args. */
  980. self->tracer_args.poke_addr = (unsigned long)&self->poked;
  981. /* Launch tracer. */
  982. self->tracer = setup_trace_fixture(_metadata, tracer_poke,
  983. &self->tracer_args);
  984. }
  985. FIXTURE_TEARDOWN(TRACE_poke)
  986. {
  987. teardown_trace_fixture(_metadata, self->tracer);
  988. if (self->prog.filter)
  989. free(self->prog.filter);
  990. }
  991. TEST_F(TRACE_poke, read_has_side_effects)
  992. {
  993. ssize_t ret;
  994. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  995. ASSERT_EQ(0, ret);
  996. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
  997. ASSERT_EQ(0, ret);
  998. EXPECT_EQ(0, self->poked);
  999. ret = read(-1, NULL, 0);
  1000. EXPECT_EQ(-1, ret);
  1001. EXPECT_EQ(0x1001, self->poked);
  1002. }
  1003. TEST_F(TRACE_poke, getpid_runs_normally)
  1004. {
  1005. long ret;
  1006. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  1007. ASSERT_EQ(0, ret);
  1008. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
  1009. ASSERT_EQ(0, ret);
  1010. EXPECT_EQ(0, self->poked);
  1011. EXPECT_NE(0, syscall(__NR_getpid));
  1012. EXPECT_EQ(0, self->poked);
  1013. }
  1014. #if defined(__x86_64__)
  1015. # define ARCH_REGS struct user_regs_struct
  1016. # define SYSCALL_NUM orig_rax
  1017. # define SYSCALL_RET rax
  1018. #elif defined(__i386__)
  1019. # define ARCH_REGS struct user_regs_struct
  1020. # define SYSCALL_NUM orig_eax
  1021. # define SYSCALL_RET eax
  1022. #elif defined(__arm__)
  1023. # define ARCH_REGS struct pt_regs
  1024. # define SYSCALL_NUM ARM_r7
  1025. # define SYSCALL_RET ARM_r0
  1026. #elif defined(__aarch64__)
  1027. # define ARCH_REGS struct user_pt_regs
  1028. # define SYSCALL_NUM regs[8]
  1029. # define SYSCALL_RET regs[0]
  1030. #elif defined(__powerpc__)
  1031. # define ARCH_REGS struct pt_regs
  1032. # define SYSCALL_NUM gpr[0]
  1033. # define SYSCALL_RET gpr[3]
  1034. #elif defined(__s390__)
  1035. # define ARCH_REGS s390_regs
  1036. # define SYSCALL_NUM gprs[2]
  1037. # define SYSCALL_RET gprs[2]
  1038. #else
  1039. # error "Do not know how to find your architecture's registers and syscalls"
  1040. #endif
  1041. /* Architecture-specific syscall fetching routine. */
  1042. int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
  1043. {
  1044. struct iovec iov;
  1045. ARCH_REGS regs;
  1046. iov.iov_base = &regs;
  1047. iov.iov_len = sizeof(regs);
  1048. EXPECT_EQ(0, ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov)) {
  1049. TH_LOG("PTRACE_GETREGSET failed");
  1050. return -1;
  1051. }
  1052. return regs.SYSCALL_NUM;
  1053. }
  1054. /* Architecture-specific syscall changing routine. */
  1055. void change_syscall(struct __test_metadata *_metadata,
  1056. pid_t tracee, int syscall)
  1057. {
  1058. struct iovec iov;
  1059. int ret;
  1060. ARCH_REGS regs;
  1061. iov.iov_base = &regs;
  1062. iov.iov_len = sizeof(regs);
  1063. ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov);
  1064. EXPECT_EQ(0, ret);
  1065. #if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || \
  1066. defined(__powerpc__) || defined(__s390__)
  1067. {
  1068. regs.SYSCALL_NUM = syscall;
  1069. }
  1070. #elif defined(__arm__)
  1071. # ifndef PTRACE_SET_SYSCALL
  1072. # define PTRACE_SET_SYSCALL 23
  1073. # endif
  1074. {
  1075. ret = ptrace(PTRACE_SET_SYSCALL, tracee, NULL, syscall);
  1076. EXPECT_EQ(0, ret);
  1077. }
  1078. #else
  1079. ASSERT_EQ(1, 0) {
  1080. TH_LOG("How is the syscall changed on this architecture?");
  1081. }
  1082. #endif
  1083. /* If syscall is skipped, change return value. */
  1084. if (syscall == -1)
  1085. regs.SYSCALL_RET = 1;
  1086. ret = ptrace(PTRACE_SETREGSET, tracee, NT_PRSTATUS, &iov);
  1087. EXPECT_EQ(0, ret);
  1088. }
  1089. void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee,
  1090. int status, void *args)
  1091. {
  1092. int ret;
  1093. unsigned long msg;
  1094. /* Make sure we got the right message. */
  1095. ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
  1096. EXPECT_EQ(0, ret);
  1097. /* Validate and take action on expected syscalls. */
  1098. switch (msg) {
  1099. case 0x1002:
  1100. /* change getpid to getppid. */
  1101. EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
  1102. change_syscall(_metadata, tracee, __NR_getppid);
  1103. break;
  1104. case 0x1003:
  1105. /* skip gettid. */
  1106. EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
  1107. change_syscall(_metadata, tracee, -1);
  1108. break;
  1109. case 0x1004:
  1110. /* do nothing (allow getppid) */
  1111. EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
  1112. break;
  1113. default:
  1114. EXPECT_EQ(0, msg) {
  1115. TH_LOG("Unknown PTRACE_GETEVENTMSG: 0x%lx", msg);
  1116. kill(tracee, SIGKILL);
  1117. }
  1118. }
  1119. }
  1120. FIXTURE_DATA(TRACE_syscall) {
  1121. struct sock_fprog prog;
  1122. pid_t tracer, mytid, mypid, parent;
  1123. };
  1124. FIXTURE_SETUP(TRACE_syscall)
  1125. {
  1126. struct sock_filter filter[] = {
  1127. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  1128. offsetof(struct seccomp_data, nr)),
  1129. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
  1130. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
  1131. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
  1132. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
  1133. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
  1134. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
  1135. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1136. };
  1137. memset(&self->prog, 0, sizeof(self->prog));
  1138. self->prog.filter = malloc(sizeof(filter));
  1139. ASSERT_NE(NULL, self->prog.filter);
  1140. memcpy(self->prog.filter, filter, sizeof(filter));
  1141. self->prog.len = (unsigned short)ARRAY_SIZE(filter);
  1142. /* Prepare some testable syscall results. */
  1143. self->mytid = syscall(__NR_gettid);
  1144. ASSERT_GT(self->mytid, 0);
  1145. ASSERT_NE(self->mytid, 1) {
  1146. TH_LOG("Running this test as init is not supported. :)");
  1147. }
  1148. self->mypid = getpid();
  1149. ASSERT_GT(self->mypid, 0);
  1150. ASSERT_EQ(self->mytid, self->mypid);
  1151. self->parent = getppid();
  1152. ASSERT_GT(self->parent, 0);
  1153. ASSERT_NE(self->parent, self->mypid);
  1154. /* Launch tracer. */
  1155. self->tracer = setup_trace_fixture(_metadata, tracer_syscall, NULL);
  1156. }
  1157. FIXTURE_TEARDOWN(TRACE_syscall)
  1158. {
  1159. teardown_trace_fixture(_metadata, self->tracer);
  1160. if (self->prog.filter)
  1161. free(self->prog.filter);
  1162. }
  1163. TEST_F(TRACE_syscall, syscall_allowed)
  1164. {
  1165. long ret;
  1166. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  1167. ASSERT_EQ(0, ret);
  1168. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
  1169. ASSERT_EQ(0, ret);
  1170. /* getppid works as expected (no changes). */
  1171. EXPECT_EQ(self->parent, syscall(__NR_getppid));
  1172. EXPECT_NE(self->mypid, syscall(__NR_getppid));
  1173. }
  1174. TEST_F(TRACE_syscall, syscall_redirected)
  1175. {
  1176. long ret;
  1177. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  1178. ASSERT_EQ(0, ret);
  1179. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
  1180. ASSERT_EQ(0, ret);
  1181. /* getpid has been redirected to getppid as expected. */
  1182. EXPECT_EQ(self->parent, syscall(__NR_getpid));
  1183. EXPECT_NE(self->mypid, syscall(__NR_getpid));
  1184. }
  1185. TEST_F(TRACE_syscall, syscall_dropped)
  1186. {
  1187. long ret;
  1188. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  1189. ASSERT_EQ(0, ret);
  1190. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
  1191. ASSERT_EQ(0, ret);
  1192. /* gettid has been skipped and an altered return value stored. */
  1193. EXPECT_EQ(1, syscall(__NR_gettid));
  1194. EXPECT_NE(self->mytid, syscall(__NR_gettid));
  1195. }
  1196. #ifndef __NR_seccomp
  1197. # if defined(__i386__)
  1198. # define __NR_seccomp 354
  1199. # elif defined(__x86_64__)
  1200. # define __NR_seccomp 317
  1201. # elif defined(__arm__)
  1202. # define __NR_seccomp 383
  1203. # elif defined(__aarch64__)
  1204. # define __NR_seccomp 277
  1205. # elif defined(__powerpc__)
  1206. # define __NR_seccomp 358
  1207. # elif defined(__s390__)
  1208. # define __NR_seccomp 348
  1209. # else
  1210. # warning "seccomp syscall number unknown for this architecture"
  1211. # define __NR_seccomp 0xffff
  1212. # endif
  1213. #endif
  1214. #ifndef SECCOMP_SET_MODE_STRICT
  1215. #define SECCOMP_SET_MODE_STRICT 0
  1216. #endif
  1217. #ifndef SECCOMP_SET_MODE_FILTER
  1218. #define SECCOMP_SET_MODE_FILTER 1
  1219. #endif
  1220. #ifndef SECCOMP_FLAG_FILTER_TSYNC
  1221. #define SECCOMP_FLAG_FILTER_TSYNC 1
  1222. #endif
  1223. #ifndef seccomp
  1224. int seccomp(unsigned int op, unsigned int flags, struct sock_fprog *filter)
  1225. {
  1226. errno = 0;
  1227. return syscall(__NR_seccomp, op, flags, filter);
  1228. }
  1229. #endif
  1230. TEST(seccomp_syscall)
  1231. {
  1232. struct sock_filter filter[] = {
  1233. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1234. };
  1235. struct sock_fprog prog = {
  1236. .len = (unsigned short)ARRAY_SIZE(filter),
  1237. .filter = filter,
  1238. };
  1239. long ret;
  1240. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  1241. ASSERT_EQ(0, ret) {
  1242. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1243. }
  1244. /* Reject insane operation. */
  1245. ret = seccomp(-1, 0, &prog);
  1246. ASSERT_NE(ENOSYS, errno) {
  1247. TH_LOG("Kernel does not support seccomp syscall!");
  1248. }
  1249. EXPECT_EQ(EINVAL, errno) {
  1250. TH_LOG("Did not reject crazy op value!");
  1251. }
  1252. /* Reject strict with flags or pointer. */
  1253. ret = seccomp(SECCOMP_SET_MODE_STRICT, -1, NULL);
  1254. EXPECT_EQ(EINVAL, errno) {
  1255. TH_LOG("Did not reject mode strict with flags!");
  1256. }
  1257. ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, &prog);
  1258. EXPECT_EQ(EINVAL, errno) {
  1259. TH_LOG("Did not reject mode strict with uargs!");
  1260. }
  1261. /* Reject insane args for filter. */
  1262. ret = seccomp(SECCOMP_SET_MODE_FILTER, -1, &prog);
  1263. EXPECT_EQ(EINVAL, errno) {
  1264. TH_LOG("Did not reject crazy filter flags!");
  1265. }
  1266. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, NULL);
  1267. EXPECT_EQ(EFAULT, errno) {
  1268. TH_LOG("Did not reject NULL filter!");
  1269. }
  1270. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
  1271. EXPECT_EQ(0, errno) {
  1272. TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER: %s",
  1273. strerror(errno));
  1274. }
  1275. }
  1276. TEST(seccomp_syscall_mode_lock)
  1277. {
  1278. struct sock_filter filter[] = {
  1279. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1280. };
  1281. struct sock_fprog prog = {
  1282. .len = (unsigned short)ARRAY_SIZE(filter),
  1283. .filter = filter,
  1284. };
  1285. long ret;
  1286. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
  1287. ASSERT_EQ(0, ret) {
  1288. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1289. }
  1290. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
  1291. ASSERT_NE(ENOSYS, errno) {
  1292. TH_LOG("Kernel does not support seccomp syscall!");
  1293. }
  1294. EXPECT_EQ(0, ret) {
  1295. TH_LOG("Could not install filter!");
  1296. }
  1297. /* Make sure neither entry point will switch to strict. */
  1298. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, 0, 0, 0);
  1299. EXPECT_EQ(EINVAL, errno) {
  1300. TH_LOG("Switched to mode strict!");
  1301. }
  1302. ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, NULL);
  1303. EXPECT_EQ(EINVAL, errno) {
  1304. TH_LOG("Switched to mode strict!");
  1305. }
  1306. }
  1307. TEST(TSYNC_first)
  1308. {
  1309. struct sock_filter filter[] = {
  1310. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1311. };
  1312. struct sock_fprog prog = {
  1313. .len = (unsigned short)ARRAY_SIZE(filter),
  1314. .filter = filter,
  1315. };
  1316. long ret;
  1317. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
  1318. ASSERT_EQ(0, ret) {
  1319. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1320. }
  1321. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1322. &prog);
  1323. ASSERT_NE(ENOSYS, errno) {
  1324. TH_LOG("Kernel does not support seccomp syscall!");
  1325. }
  1326. EXPECT_EQ(0, ret) {
  1327. TH_LOG("Could not install initial filter with TSYNC!");
  1328. }
  1329. }
  1330. #define TSYNC_SIBLINGS 2
  1331. struct tsync_sibling {
  1332. pthread_t tid;
  1333. pid_t system_tid;
  1334. sem_t *started;
  1335. pthread_cond_t *cond;
  1336. pthread_mutex_t *mutex;
  1337. int diverge;
  1338. int num_waits;
  1339. struct sock_fprog *prog;
  1340. struct __test_metadata *metadata;
  1341. };
  1342. FIXTURE_DATA(TSYNC) {
  1343. struct sock_fprog root_prog, apply_prog;
  1344. struct tsync_sibling sibling[TSYNC_SIBLINGS];
  1345. sem_t started;
  1346. pthread_cond_t cond;
  1347. pthread_mutex_t mutex;
  1348. int sibling_count;
  1349. };
  1350. FIXTURE_SETUP(TSYNC)
  1351. {
  1352. struct sock_filter root_filter[] = {
  1353. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1354. };
  1355. struct sock_filter apply_filter[] = {
  1356. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  1357. offsetof(struct seccomp_data, nr)),
  1358. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
  1359. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  1360. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1361. };
  1362. memset(&self->root_prog, 0, sizeof(self->root_prog));
  1363. memset(&self->apply_prog, 0, sizeof(self->apply_prog));
  1364. memset(&self->sibling, 0, sizeof(self->sibling));
  1365. self->root_prog.filter = malloc(sizeof(root_filter));
  1366. ASSERT_NE(NULL, self->root_prog.filter);
  1367. memcpy(self->root_prog.filter, &root_filter, sizeof(root_filter));
  1368. self->root_prog.len = (unsigned short)ARRAY_SIZE(root_filter);
  1369. self->apply_prog.filter = malloc(sizeof(apply_filter));
  1370. ASSERT_NE(NULL, self->apply_prog.filter);
  1371. memcpy(self->apply_prog.filter, &apply_filter, sizeof(apply_filter));
  1372. self->apply_prog.len = (unsigned short)ARRAY_SIZE(apply_filter);
  1373. self->sibling_count = 0;
  1374. pthread_mutex_init(&self->mutex, NULL);
  1375. pthread_cond_init(&self->cond, NULL);
  1376. sem_init(&self->started, 0, 0);
  1377. self->sibling[0].tid = 0;
  1378. self->sibling[0].cond = &self->cond;
  1379. self->sibling[0].started = &self->started;
  1380. self->sibling[0].mutex = &self->mutex;
  1381. self->sibling[0].diverge = 0;
  1382. self->sibling[0].num_waits = 1;
  1383. self->sibling[0].prog = &self->root_prog;
  1384. self->sibling[0].metadata = _metadata;
  1385. self->sibling[1].tid = 0;
  1386. self->sibling[1].cond = &self->cond;
  1387. self->sibling[1].started = &self->started;
  1388. self->sibling[1].mutex = &self->mutex;
  1389. self->sibling[1].diverge = 0;
  1390. self->sibling[1].prog = &self->root_prog;
  1391. self->sibling[1].num_waits = 1;
  1392. self->sibling[1].metadata = _metadata;
  1393. }
  1394. FIXTURE_TEARDOWN(TSYNC)
  1395. {
  1396. int sib = 0;
  1397. if (self->root_prog.filter)
  1398. free(self->root_prog.filter);
  1399. if (self->apply_prog.filter)
  1400. free(self->apply_prog.filter);
  1401. for ( ; sib < self->sibling_count; ++sib) {
  1402. struct tsync_sibling *s = &self->sibling[sib];
  1403. void *status;
  1404. if (!s->tid)
  1405. continue;
  1406. if (pthread_kill(s->tid, 0)) {
  1407. pthread_cancel(s->tid);
  1408. pthread_join(s->tid, &status);
  1409. }
  1410. }
  1411. pthread_mutex_destroy(&self->mutex);
  1412. pthread_cond_destroy(&self->cond);
  1413. sem_destroy(&self->started);
  1414. }
  1415. void *tsync_sibling(void *data)
  1416. {
  1417. long ret = 0;
  1418. struct tsync_sibling *me = data;
  1419. me->system_tid = syscall(__NR_gettid);
  1420. pthread_mutex_lock(me->mutex);
  1421. if (me->diverge) {
  1422. /* Just re-apply the root prog to fork the tree */
  1423. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
  1424. me->prog, 0, 0);
  1425. }
  1426. sem_post(me->started);
  1427. /* Return outside of started so parent notices failures. */
  1428. if (ret) {
  1429. pthread_mutex_unlock(me->mutex);
  1430. return (void *)SIBLING_EXIT_FAILURE;
  1431. }
  1432. do {
  1433. pthread_cond_wait(me->cond, me->mutex);
  1434. me->num_waits = me->num_waits - 1;
  1435. } while (me->num_waits);
  1436. pthread_mutex_unlock(me->mutex);
  1437. ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
  1438. if (!ret)
  1439. return (void *)SIBLING_EXIT_NEWPRIVS;
  1440. read(0, NULL, 0);
  1441. return (void *)SIBLING_EXIT_UNKILLED;
  1442. }
  1443. void tsync_start_sibling(struct tsync_sibling *sibling)
  1444. {
  1445. pthread_create(&sibling->tid, NULL, tsync_sibling, (void *)sibling);
  1446. }
  1447. TEST_F(TSYNC, siblings_fail_prctl)
  1448. {
  1449. long ret;
  1450. void *status;
  1451. struct sock_filter filter[] = {
  1452. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  1453. offsetof(struct seccomp_data, nr)),
  1454. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
  1455. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EINVAL),
  1456. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1457. };
  1458. struct sock_fprog prog = {
  1459. .len = (unsigned short)ARRAY_SIZE(filter),
  1460. .filter = filter,
  1461. };
  1462. ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1463. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1464. }
  1465. /* Check prctl failure detection by requesting sib 0 diverge. */
  1466. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
  1467. ASSERT_NE(ENOSYS, errno) {
  1468. TH_LOG("Kernel does not support seccomp syscall!");
  1469. }
  1470. ASSERT_EQ(0, ret) {
  1471. TH_LOG("setting filter failed");
  1472. }
  1473. self->sibling[0].diverge = 1;
  1474. tsync_start_sibling(&self->sibling[0]);
  1475. tsync_start_sibling(&self->sibling[1]);
  1476. while (self->sibling_count < TSYNC_SIBLINGS) {
  1477. sem_wait(&self->started);
  1478. self->sibling_count++;
  1479. }
  1480. /* Signal the threads to clean up*/
  1481. pthread_mutex_lock(&self->mutex);
  1482. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1483. TH_LOG("cond broadcast non-zero");
  1484. }
  1485. pthread_mutex_unlock(&self->mutex);
  1486. /* Ensure diverging sibling failed to call prctl. */
  1487. pthread_join(self->sibling[0].tid, &status);
  1488. EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status);
  1489. pthread_join(self->sibling[1].tid, &status);
  1490. EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
  1491. }
  1492. TEST_F(TSYNC, two_siblings_with_ancestor)
  1493. {
  1494. long ret;
  1495. void *status;
  1496. ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1497. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1498. }
  1499. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
  1500. ASSERT_NE(ENOSYS, errno) {
  1501. TH_LOG("Kernel does not support seccomp syscall!");
  1502. }
  1503. ASSERT_EQ(0, ret) {
  1504. TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
  1505. }
  1506. tsync_start_sibling(&self->sibling[0]);
  1507. tsync_start_sibling(&self->sibling[1]);
  1508. while (self->sibling_count < TSYNC_SIBLINGS) {
  1509. sem_wait(&self->started);
  1510. self->sibling_count++;
  1511. }
  1512. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1513. &self->apply_prog);
  1514. ASSERT_EQ(0, ret) {
  1515. TH_LOG("Could install filter on all threads!");
  1516. }
  1517. /* Tell the siblings to test the policy */
  1518. pthread_mutex_lock(&self->mutex);
  1519. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1520. TH_LOG("cond broadcast non-zero");
  1521. }
  1522. pthread_mutex_unlock(&self->mutex);
  1523. /* Ensure they are both killed and don't exit cleanly. */
  1524. pthread_join(self->sibling[0].tid, &status);
  1525. EXPECT_EQ(0x0, (long)status);
  1526. pthread_join(self->sibling[1].tid, &status);
  1527. EXPECT_EQ(0x0, (long)status);
  1528. }
  1529. TEST_F(TSYNC, two_sibling_want_nnp)
  1530. {
  1531. void *status;
  1532. /* start siblings before any prctl() operations */
  1533. tsync_start_sibling(&self->sibling[0]);
  1534. tsync_start_sibling(&self->sibling[1]);
  1535. while (self->sibling_count < TSYNC_SIBLINGS) {
  1536. sem_wait(&self->started);
  1537. self->sibling_count++;
  1538. }
  1539. /* Tell the siblings to test no policy */
  1540. pthread_mutex_lock(&self->mutex);
  1541. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1542. TH_LOG("cond broadcast non-zero");
  1543. }
  1544. pthread_mutex_unlock(&self->mutex);
  1545. /* Ensure they are both upset about lacking nnp. */
  1546. pthread_join(self->sibling[0].tid, &status);
  1547. EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
  1548. pthread_join(self->sibling[1].tid, &status);
  1549. EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
  1550. }
  1551. TEST_F(TSYNC, two_siblings_with_no_filter)
  1552. {
  1553. long ret;
  1554. void *status;
  1555. /* start siblings before any prctl() operations */
  1556. tsync_start_sibling(&self->sibling[0]);
  1557. tsync_start_sibling(&self->sibling[1]);
  1558. while (self->sibling_count < TSYNC_SIBLINGS) {
  1559. sem_wait(&self->started);
  1560. self->sibling_count++;
  1561. }
  1562. ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1563. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1564. }
  1565. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1566. &self->apply_prog);
  1567. ASSERT_NE(ENOSYS, errno) {
  1568. TH_LOG("Kernel does not support seccomp syscall!");
  1569. }
  1570. ASSERT_EQ(0, ret) {
  1571. TH_LOG("Could install filter on all threads!");
  1572. }
  1573. /* Tell the siblings to test the policy */
  1574. pthread_mutex_lock(&self->mutex);
  1575. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1576. TH_LOG("cond broadcast non-zero");
  1577. }
  1578. pthread_mutex_unlock(&self->mutex);
  1579. /* Ensure they are both killed and don't exit cleanly. */
  1580. pthread_join(self->sibling[0].tid, &status);
  1581. EXPECT_EQ(0x0, (long)status);
  1582. pthread_join(self->sibling[1].tid, &status);
  1583. EXPECT_EQ(0x0, (long)status);
  1584. }
  1585. TEST_F(TSYNC, two_siblings_with_one_divergence)
  1586. {
  1587. long ret;
  1588. void *status;
  1589. ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1590. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1591. }
  1592. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
  1593. ASSERT_NE(ENOSYS, errno) {
  1594. TH_LOG("Kernel does not support seccomp syscall!");
  1595. }
  1596. ASSERT_EQ(0, ret) {
  1597. TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
  1598. }
  1599. self->sibling[0].diverge = 1;
  1600. tsync_start_sibling(&self->sibling[0]);
  1601. tsync_start_sibling(&self->sibling[1]);
  1602. while (self->sibling_count < TSYNC_SIBLINGS) {
  1603. sem_wait(&self->started);
  1604. self->sibling_count++;
  1605. }
  1606. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1607. &self->apply_prog);
  1608. ASSERT_EQ(self->sibling[0].system_tid, ret) {
  1609. TH_LOG("Did not fail on diverged sibling.");
  1610. }
  1611. /* Wake the threads */
  1612. pthread_mutex_lock(&self->mutex);
  1613. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1614. TH_LOG("cond broadcast non-zero");
  1615. }
  1616. pthread_mutex_unlock(&self->mutex);
  1617. /* Ensure they are both unkilled. */
  1618. pthread_join(self->sibling[0].tid, &status);
  1619. EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
  1620. pthread_join(self->sibling[1].tid, &status);
  1621. EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
  1622. }
  1623. TEST_F(TSYNC, two_siblings_not_under_filter)
  1624. {
  1625. long ret, sib;
  1626. void *status;
  1627. ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1628. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1629. }
  1630. /*
  1631. * Sibling 0 will have its own seccomp policy
  1632. * and Sibling 1 will not be under seccomp at
  1633. * all. Sibling 1 will enter seccomp and 0
  1634. * will cause failure.
  1635. */
  1636. self->sibling[0].diverge = 1;
  1637. tsync_start_sibling(&self->sibling[0]);
  1638. tsync_start_sibling(&self->sibling[1]);
  1639. while (self->sibling_count < TSYNC_SIBLINGS) {
  1640. sem_wait(&self->started);
  1641. self->sibling_count++;
  1642. }
  1643. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
  1644. ASSERT_NE(ENOSYS, errno) {
  1645. TH_LOG("Kernel does not support seccomp syscall!");
  1646. }
  1647. ASSERT_EQ(0, ret) {
  1648. TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
  1649. }
  1650. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1651. &self->apply_prog);
  1652. ASSERT_EQ(ret, self->sibling[0].system_tid) {
  1653. TH_LOG("Did not fail on diverged sibling.");
  1654. }
  1655. sib = 1;
  1656. if (ret == self->sibling[0].system_tid)
  1657. sib = 0;
  1658. pthread_mutex_lock(&self->mutex);
  1659. /* Increment the other siblings num_waits so we can clean up
  1660. * the one we just saw.
  1661. */
  1662. self->sibling[!sib].num_waits += 1;
  1663. /* Signal the thread to clean up*/
  1664. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1665. TH_LOG("cond broadcast non-zero");
  1666. }
  1667. pthread_mutex_unlock(&self->mutex);
  1668. pthread_join(self->sibling[sib].tid, &status);
  1669. EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
  1670. /* Poll for actual task death. pthread_join doesn't guarantee it. */
  1671. while (!kill(self->sibling[sib].system_tid, 0))
  1672. sleep(0.1);
  1673. /* Switch to the remaining sibling */
  1674. sib = !sib;
  1675. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1676. &self->apply_prog);
  1677. ASSERT_EQ(0, ret) {
  1678. TH_LOG("Expected the remaining sibling to sync");
  1679. };
  1680. pthread_mutex_lock(&self->mutex);
  1681. /* If remaining sibling didn't have a chance to wake up during
  1682. * the first broadcast, manually reduce the num_waits now.
  1683. */
  1684. if (self->sibling[sib].num_waits > 1)
  1685. self->sibling[sib].num_waits = 1;
  1686. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1687. TH_LOG("cond broadcast non-zero");
  1688. }
  1689. pthread_mutex_unlock(&self->mutex);
  1690. pthread_join(self->sibling[sib].tid, &status);
  1691. EXPECT_EQ(0, (long)status);
  1692. /* Poll for actual task death. pthread_join doesn't guarantee it. */
  1693. while (!kill(self->sibling[sib].system_tid, 0))
  1694. sleep(0.1);
  1695. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1696. &self->apply_prog);
  1697. ASSERT_EQ(0, ret); /* just us chickens */
  1698. }
  1699. /* Make sure restarted syscalls are seen directly as "restart_syscall". */
  1700. TEST(syscall_restart)
  1701. {
  1702. long ret;
  1703. unsigned long msg;
  1704. pid_t child_pid;
  1705. int pipefd[2];
  1706. int status;
  1707. siginfo_t info = { };
  1708. struct sock_filter filter[] = {
  1709. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  1710. offsetof(struct seccomp_data, nr)),
  1711. #ifdef __NR_sigreturn
  1712. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_sigreturn, 6, 0),
  1713. #endif
  1714. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 5, 0),
  1715. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_exit, 4, 0),
  1716. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_rt_sigreturn, 3, 0),
  1717. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_poll, 4, 0),
  1718. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_restart_syscall, 4, 0),
  1719. /* Allow __NR_write for easy logging. */
  1720. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_write, 0, 1),
  1721. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1722. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  1723. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x100), /* poll */
  1724. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x200), /* restart */
  1725. };
  1726. struct sock_fprog prog = {
  1727. .len = (unsigned short)ARRAY_SIZE(filter),
  1728. .filter = filter,
  1729. };
  1730. ASSERT_EQ(0, pipe(pipefd));
  1731. child_pid = fork();
  1732. ASSERT_LE(0, child_pid);
  1733. if (child_pid == 0) {
  1734. /* Child uses EXPECT not ASSERT to deliver status correctly. */
  1735. char buf = ' ';
  1736. struct pollfd fds = {
  1737. .fd = pipefd[0],
  1738. .events = POLLIN,
  1739. };
  1740. /* Attach parent as tracer and stop. */
  1741. EXPECT_EQ(0, ptrace(PTRACE_TRACEME));
  1742. EXPECT_EQ(0, raise(SIGSTOP));
  1743. EXPECT_EQ(0, close(pipefd[1]));
  1744. EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1745. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1746. }
  1747. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  1748. EXPECT_EQ(0, ret) {
  1749. TH_LOG("Failed to install filter!");
  1750. }
  1751. EXPECT_EQ(1, read(pipefd[0], &buf, 1)) {
  1752. TH_LOG("Failed to read() sync from parent");
  1753. }
  1754. EXPECT_EQ('.', buf) {
  1755. TH_LOG("Failed to get sync data from read()");
  1756. }
  1757. /* Start poll to be interrupted. */
  1758. errno = 0;
  1759. EXPECT_EQ(1, poll(&fds, 1, -1)) {
  1760. TH_LOG("Call to poll() failed (errno %d)", errno);
  1761. }
  1762. /* Read final sync from parent. */
  1763. EXPECT_EQ(1, read(pipefd[0], &buf, 1)) {
  1764. TH_LOG("Failed final read() from parent");
  1765. }
  1766. EXPECT_EQ('!', buf) {
  1767. TH_LOG("Failed to get final data from read()");
  1768. }
  1769. /* Directly report the status of our test harness results. */
  1770. syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS
  1771. : EXIT_FAILURE);
  1772. }
  1773. EXPECT_EQ(0, close(pipefd[0]));
  1774. /* Attach to child, setup options, and release. */
  1775. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1776. ASSERT_EQ(true, WIFSTOPPED(status));
  1777. ASSERT_EQ(0, ptrace(PTRACE_SETOPTIONS, child_pid, NULL,
  1778. PTRACE_O_TRACESECCOMP));
  1779. ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
  1780. ASSERT_EQ(1, write(pipefd[1], ".", 1));
  1781. /* Wait for poll() to start. */
  1782. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1783. ASSERT_EQ(true, WIFSTOPPED(status));
  1784. ASSERT_EQ(SIGTRAP, WSTOPSIG(status));
  1785. ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16));
  1786. ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg));
  1787. ASSERT_EQ(0x100, msg);
  1788. EXPECT_EQ(__NR_poll, get_syscall(_metadata, child_pid));
  1789. /* Might as well check siginfo for sanity while we're here. */
  1790. ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
  1791. ASSERT_EQ(SIGTRAP, info.si_signo);
  1792. ASSERT_EQ(SIGTRAP | (PTRACE_EVENT_SECCOMP << 8), info.si_code);
  1793. EXPECT_EQ(0, info.si_errno);
  1794. EXPECT_EQ(getuid(), info.si_uid);
  1795. /* Verify signal delivery came from child (seccomp-triggered). */
  1796. EXPECT_EQ(child_pid, info.si_pid);
  1797. /* Interrupt poll with SIGSTOP (which we'll need to handle). */
  1798. ASSERT_EQ(0, kill(child_pid, SIGSTOP));
  1799. ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
  1800. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1801. ASSERT_EQ(true, WIFSTOPPED(status));
  1802. ASSERT_EQ(SIGSTOP, WSTOPSIG(status));
  1803. /* Verify signal delivery came from parent now. */
  1804. ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
  1805. EXPECT_EQ(getpid(), info.si_pid);
  1806. /* Restart poll with SIGCONT, which triggers restart_syscall. */
  1807. ASSERT_EQ(0, kill(child_pid, SIGCONT));
  1808. ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
  1809. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1810. ASSERT_EQ(true, WIFSTOPPED(status));
  1811. ASSERT_EQ(SIGCONT, WSTOPSIG(status));
  1812. ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
  1813. /* Wait for restart_syscall() to start. */
  1814. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1815. ASSERT_EQ(true, WIFSTOPPED(status));
  1816. ASSERT_EQ(SIGTRAP, WSTOPSIG(status));
  1817. ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16));
  1818. ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg));
  1819. ASSERT_EQ(0x200, msg);
  1820. ret = get_syscall(_metadata, child_pid);
  1821. #if defined(__arm__)
  1822. /* FIXME: ARM does not expose true syscall in registers. */
  1823. EXPECT_EQ(__NR_poll, ret);
  1824. #else
  1825. EXPECT_EQ(__NR_restart_syscall, ret);
  1826. #endif
  1827. /* Write again to end poll. */
  1828. ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
  1829. ASSERT_EQ(1, write(pipefd[1], "!", 1));
  1830. EXPECT_EQ(0, close(pipefd[1]));
  1831. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1832. if (WIFSIGNALED(status) || WEXITSTATUS(status))
  1833. _metadata->passed = 0;
  1834. }
  1835. /*
  1836. * TODO:
  1837. * - add microbenchmarks
  1838. * - expand NNP testing
  1839. * - better arch-specific TRACE and TRAP handlers.
  1840. * - endianness checking when appropriate
  1841. * - 64-bit arg prodding
  1842. * - arch value testing (x86 modes especially)
  1843. * - ...
  1844. */
  1845. TEST_HARNESS_MAIN