seccomp_bpf.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109
  1. /*
  2. * Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
  3. * Use of this source code is governed by the GPLv2 license.
  4. *
  5. * Test code for seccomp bpf.
  6. */
  7. #include <asm/siginfo.h>
  8. #define __have_siginfo_t 1
  9. #define __have_sigval_t 1
  10. #define __have_sigevent_t 1
  11. #include <errno.h>
  12. #include <linux/filter.h>
  13. #include <sys/prctl.h>
  14. #include <sys/ptrace.h>
  15. #include <sys/user.h>
  16. #include <linux/prctl.h>
  17. #include <linux/ptrace.h>
  18. #include <linux/seccomp.h>
  19. #include <poll.h>
  20. #include <pthread.h>
  21. #include <semaphore.h>
  22. #include <signal.h>
  23. #include <stddef.h>
  24. #include <stdbool.h>
  25. #include <string.h>
  26. #include <linux/elf.h>
  27. #include <sys/uio.h>
  28. #define _GNU_SOURCE
  29. #include <unistd.h>
  30. #include <sys/syscall.h>
  31. #include "test_harness.h"
  32. #ifndef PR_SET_PTRACER
  33. # define PR_SET_PTRACER 0x59616d61
  34. #endif
  35. #ifndef PR_SET_NO_NEW_PRIVS
  36. #define PR_SET_NO_NEW_PRIVS 38
  37. #define PR_GET_NO_NEW_PRIVS 39
  38. #endif
  39. #ifndef PR_SECCOMP_EXT
  40. #define PR_SECCOMP_EXT 43
  41. #endif
  42. #ifndef SECCOMP_EXT_ACT
  43. #define SECCOMP_EXT_ACT 1
  44. #endif
  45. #ifndef SECCOMP_EXT_ACT_TSYNC
  46. #define SECCOMP_EXT_ACT_TSYNC 1
  47. #endif
  48. #ifndef SECCOMP_MODE_STRICT
  49. #define SECCOMP_MODE_STRICT 1
  50. #endif
  51. #ifndef SECCOMP_MODE_FILTER
  52. #define SECCOMP_MODE_FILTER 2
  53. #endif
  54. #ifndef SECCOMP_RET_KILL
  55. #define SECCOMP_RET_KILL 0x00000000U /* kill the task immediately */
  56. #define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */
  57. #define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */
  58. #define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */
  59. #define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */
  60. /* Masks for the return value sections. */
  61. #define SECCOMP_RET_ACTION 0x7fff0000U
  62. #define SECCOMP_RET_DATA 0x0000ffffU
  63. struct seccomp_data {
  64. int nr;
  65. __u32 arch;
  66. __u64 instruction_pointer;
  67. __u64 args[6];
  68. };
  69. #endif
  70. #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]))
  71. #define SIBLING_EXIT_UNKILLED 0xbadbeef
  72. #define SIBLING_EXIT_FAILURE 0xbadface
  73. #define SIBLING_EXIT_NEWPRIVS 0xbadfeed
  74. TEST(mode_strict_support)
  75. {
  76. long ret;
  77. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL);
  78. ASSERT_EQ(0, ret) {
  79. TH_LOG("Kernel does not support CONFIG_SECCOMP");
  80. }
  81. syscall(__NR_exit, 1);
  82. }
  83. TEST_SIGNAL(mode_strict_cannot_call_prctl, SIGKILL)
  84. {
  85. long ret;
  86. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL);
  87. ASSERT_EQ(0, ret) {
  88. TH_LOG("Kernel does not support CONFIG_SECCOMP");
  89. }
  90. syscall(__NR_prctl, PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
  91. NULL, NULL, NULL);
  92. EXPECT_FALSE(true) {
  93. TH_LOG("Unreachable!");
  94. }
  95. }
  96. /* Note! This doesn't test no new privs behavior */
  97. TEST(no_new_privs_support)
  98. {
  99. long ret;
  100. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  101. EXPECT_EQ(0, ret) {
  102. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  103. }
  104. }
  105. /* Tests kernel support by checking for a copy_from_user() fault on * NULL. */
  106. TEST(mode_filter_support)
  107. {
  108. long ret;
  109. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
  110. ASSERT_EQ(0, ret) {
  111. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  112. }
  113. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL);
  114. EXPECT_EQ(-1, ret);
  115. EXPECT_EQ(EFAULT, errno) {
  116. TH_LOG("Kernel does not support CONFIG_SECCOMP_FILTER!");
  117. }
  118. }
  119. TEST(mode_filter_without_nnp)
  120. {
  121. struct sock_filter filter[] = {
  122. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  123. };
  124. struct sock_fprog prog = {
  125. .len = (unsigned short)ARRAY_SIZE(filter),
  126. .filter = filter,
  127. };
  128. long ret;
  129. ret = prctl(PR_GET_NO_NEW_PRIVS, 0, NULL, 0, 0);
  130. ASSERT_LE(0, ret) {
  131. TH_LOG("Expected 0 or unsupported for NO_NEW_PRIVS");
  132. }
  133. errno = 0;
  134. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  135. /* Succeeds with CAP_SYS_ADMIN, fails without */
  136. /* TODO(wad) check caps not euid */
  137. if (geteuid()) {
  138. EXPECT_EQ(-1, ret);
  139. EXPECT_EQ(EACCES, errno);
  140. } else {
  141. EXPECT_EQ(0, ret);
  142. }
  143. }
  144. #define MAX_INSNS_PER_PATH 32768
  145. TEST(filter_size_limits)
  146. {
  147. int i;
  148. int count = BPF_MAXINSNS + 1;
  149. struct sock_filter allow[] = {
  150. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  151. };
  152. struct sock_filter *filter;
  153. struct sock_fprog prog = { };
  154. long ret;
  155. filter = calloc(count, sizeof(*filter));
  156. ASSERT_NE(NULL, filter);
  157. for (i = 0; i < count; i++)
  158. filter[i] = allow[0];
  159. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  160. ASSERT_EQ(0, ret);
  161. prog.filter = filter;
  162. prog.len = count;
  163. /* Too many filter instructions in a single filter. */
  164. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  165. ASSERT_NE(0, ret) {
  166. TH_LOG("Installing %d insn filter was allowed", prog.len);
  167. }
  168. /* One less is okay, though. */
  169. prog.len -= 1;
  170. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  171. ASSERT_EQ(0, ret) {
  172. TH_LOG("Installing %d insn filter wasn't allowed", prog.len);
  173. }
  174. }
  175. TEST(filter_chain_limits)
  176. {
  177. int i;
  178. int count = BPF_MAXINSNS;
  179. struct sock_filter allow[] = {
  180. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  181. };
  182. struct sock_filter *filter;
  183. struct sock_fprog prog = { };
  184. long ret;
  185. filter = calloc(count, sizeof(*filter));
  186. ASSERT_NE(NULL, filter);
  187. for (i = 0; i < count; i++)
  188. filter[i] = allow[0];
  189. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  190. ASSERT_EQ(0, ret);
  191. prog.filter = filter;
  192. prog.len = 1;
  193. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  194. ASSERT_EQ(0, ret);
  195. prog.len = count;
  196. /* Too many total filter instructions. */
  197. for (i = 0; i < MAX_INSNS_PER_PATH; i++) {
  198. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  199. if (ret != 0)
  200. break;
  201. }
  202. ASSERT_NE(0, ret) {
  203. TH_LOG("Allowed %d %d-insn filters (total with penalties:%d)",
  204. i, count, i * (count + 4));
  205. }
  206. }
  207. TEST(mode_filter_cannot_move_to_strict)
  208. {
  209. struct sock_filter filter[] = {
  210. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  211. };
  212. struct sock_fprog prog = {
  213. .len = (unsigned short)ARRAY_SIZE(filter),
  214. .filter = filter,
  215. };
  216. long ret;
  217. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  218. ASSERT_EQ(0, ret);
  219. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  220. ASSERT_EQ(0, ret);
  221. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, 0, 0);
  222. EXPECT_EQ(-1, ret);
  223. EXPECT_EQ(EINVAL, errno);
  224. }
  225. TEST(mode_filter_get_seccomp)
  226. {
  227. struct sock_filter filter[] = {
  228. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  229. };
  230. struct sock_fprog prog = {
  231. .len = (unsigned short)ARRAY_SIZE(filter),
  232. .filter = filter,
  233. };
  234. long ret;
  235. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  236. ASSERT_EQ(0, ret);
  237. ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
  238. EXPECT_EQ(0, ret);
  239. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  240. ASSERT_EQ(0, ret);
  241. ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
  242. EXPECT_EQ(2, ret);
  243. }
  244. TEST(ALLOW_all)
  245. {
  246. struct sock_filter filter[] = {
  247. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  248. };
  249. struct sock_fprog prog = {
  250. .len = (unsigned short)ARRAY_SIZE(filter),
  251. .filter = filter,
  252. };
  253. long ret;
  254. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  255. ASSERT_EQ(0, ret);
  256. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  257. ASSERT_EQ(0, ret);
  258. }
  259. TEST(empty_prog)
  260. {
  261. struct sock_filter filter[] = {
  262. };
  263. struct sock_fprog prog = {
  264. .len = (unsigned short)ARRAY_SIZE(filter),
  265. .filter = filter,
  266. };
  267. long ret;
  268. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  269. ASSERT_EQ(0, ret);
  270. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  271. EXPECT_EQ(-1, ret);
  272. EXPECT_EQ(EINVAL, errno);
  273. }
  274. TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS)
  275. {
  276. struct sock_filter filter[] = {
  277. BPF_STMT(BPF_RET|BPF_K, 0x10000000U),
  278. };
  279. struct sock_fprog prog = {
  280. .len = (unsigned short)ARRAY_SIZE(filter),
  281. .filter = filter,
  282. };
  283. long ret;
  284. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  285. ASSERT_EQ(0, ret);
  286. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  287. ASSERT_EQ(0, ret);
  288. EXPECT_EQ(0, syscall(__NR_getpid)) {
  289. TH_LOG("getpid() shouldn't ever return");
  290. }
  291. }
  292. /* return code >= 0x80000000 is unused. */
  293. TEST_SIGNAL(unknown_ret_is_kill_above_allow, SIGSYS)
  294. {
  295. struct sock_filter filter[] = {
  296. BPF_STMT(BPF_RET|BPF_K, 0x90000000U),
  297. };
  298. struct sock_fprog prog = {
  299. .len = (unsigned short)ARRAY_SIZE(filter),
  300. .filter = filter,
  301. };
  302. long ret;
  303. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  304. ASSERT_EQ(0, ret);
  305. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  306. ASSERT_EQ(0, ret);
  307. EXPECT_EQ(0, syscall(__NR_getpid)) {
  308. TH_LOG("getpid() shouldn't ever return");
  309. }
  310. }
  311. TEST_SIGNAL(KILL_all, SIGSYS)
  312. {
  313. struct sock_filter filter[] = {
  314. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  315. };
  316. struct sock_fprog prog = {
  317. .len = (unsigned short)ARRAY_SIZE(filter),
  318. .filter = filter,
  319. };
  320. long ret;
  321. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  322. ASSERT_EQ(0, ret);
  323. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  324. ASSERT_EQ(0, ret);
  325. }
  326. TEST_SIGNAL(KILL_one, SIGSYS)
  327. {
  328. struct sock_filter filter[] = {
  329. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  330. offsetof(struct seccomp_data, nr)),
  331. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
  332. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  333. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  334. };
  335. struct sock_fprog prog = {
  336. .len = (unsigned short)ARRAY_SIZE(filter),
  337. .filter = filter,
  338. };
  339. long ret;
  340. pid_t parent = getppid();
  341. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  342. ASSERT_EQ(0, ret);
  343. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  344. ASSERT_EQ(0, ret);
  345. EXPECT_EQ(parent, syscall(__NR_getppid));
  346. /* getpid() should never return. */
  347. EXPECT_EQ(0, syscall(__NR_getpid));
  348. }
  349. TEST_SIGNAL(KILL_one_arg_one, SIGSYS)
  350. {
  351. struct sock_filter filter[] = {
  352. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  353. offsetof(struct seccomp_data, nr)),
  354. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
  355. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  356. /* Only both with lower 32-bit for now. */
  357. BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(0)),
  358. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 0x0C0FFEE, 0, 1),
  359. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  360. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  361. };
  362. struct sock_fprog prog = {
  363. .len = (unsigned short)ARRAY_SIZE(filter),
  364. .filter = filter,
  365. };
  366. long ret;
  367. pid_t parent = getppid();
  368. pid_t pid = getpid();
  369. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  370. ASSERT_EQ(0, ret);
  371. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  372. ASSERT_EQ(0, ret);
  373. EXPECT_EQ(parent, syscall(__NR_getppid));
  374. EXPECT_EQ(pid, syscall(__NR_getpid));
  375. /* getpid() should never return. */
  376. EXPECT_EQ(0, syscall(__NR_getpid, 0x0C0FFEE));
  377. }
  378. TEST_SIGNAL(KILL_one_arg_six, SIGSYS)
  379. {
  380. struct sock_filter filter[] = {
  381. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  382. offsetof(struct seccomp_data, nr)),
  383. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
  384. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  385. /* Only both with lower 32-bit for now. */
  386. BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(5)),
  387. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 0x0C0FFEE, 0, 1),
  388. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  389. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  390. };
  391. struct sock_fprog prog = {
  392. .len = (unsigned short)ARRAY_SIZE(filter),
  393. .filter = filter,
  394. };
  395. long ret;
  396. pid_t parent = getppid();
  397. pid_t pid = getpid();
  398. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  399. ASSERT_EQ(0, ret);
  400. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  401. ASSERT_EQ(0, ret);
  402. EXPECT_EQ(parent, syscall(__NR_getppid));
  403. EXPECT_EQ(pid, syscall(__NR_getpid));
  404. /* getpid() should never return. */
  405. EXPECT_EQ(0, syscall(__NR_getpid, 1, 2, 3, 4, 5, 0x0C0FFEE));
  406. }
  407. /* TODO(wad) add 64-bit versus 32-bit arg tests. */
  408. TEST(arg_out_of_range)
  409. {
  410. struct sock_filter filter[] = {
  411. BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(6)),
  412. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  413. };
  414. struct sock_fprog prog = {
  415. .len = (unsigned short)ARRAY_SIZE(filter),
  416. .filter = filter,
  417. };
  418. long ret;
  419. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  420. ASSERT_EQ(0, ret);
  421. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  422. EXPECT_EQ(-1, ret);
  423. EXPECT_EQ(EINVAL, errno);
  424. }
  425. TEST(ERRNO_valid)
  426. {
  427. struct sock_filter filter[] = {
  428. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  429. offsetof(struct seccomp_data, nr)),
  430. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
  431. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | E2BIG),
  432. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  433. };
  434. struct sock_fprog prog = {
  435. .len = (unsigned short)ARRAY_SIZE(filter),
  436. .filter = filter,
  437. };
  438. long ret;
  439. pid_t parent = getppid();
  440. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  441. ASSERT_EQ(0, ret);
  442. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  443. ASSERT_EQ(0, ret);
  444. EXPECT_EQ(parent, syscall(__NR_getppid));
  445. EXPECT_EQ(-1, read(0, NULL, 0));
  446. EXPECT_EQ(E2BIG, errno);
  447. }
  448. TEST(ERRNO_zero)
  449. {
  450. struct sock_filter filter[] = {
  451. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  452. offsetof(struct seccomp_data, nr)),
  453. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
  454. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | 0),
  455. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  456. };
  457. struct sock_fprog prog = {
  458. .len = (unsigned short)ARRAY_SIZE(filter),
  459. .filter = filter,
  460. };
  461. long ret;
  462. pid_t parent = getppid();
  463. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  464. ASSERT_EQ(0, ret);
  465. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  466. ASSERT_EQ(0, ret);
  467. EXPECT_EQ(parent, syscall(__NR_getppid));
  468. /* "errno" of 0 is ok. */
  469. EXPECT_EQ(0, read(0, NULL, 0));
  470. }
  471. TEST(ERRNO_capped)
  472. {
  473. struct sock_filter filter[] = {
  474. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  475. offsetof(struct seccomp_data, nr)),
  476. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
  477. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | 4096),
  478. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  479. };
  480. struct sock_fprog prog = {
  481. .len = (unsigned short)ARRAY_SIZE(filter),
  482. .filter = filter,
  483. };
  484. long ret;
  485. pid_t parent = getppid();
  486. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  487. ASSERT_EQ(0, ret);
  488. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  489. ASSERT_EQ(0, ret);
  490. EXPECT_EQ(parent, syscall(__NR_getppid));
  491. EXPECT_EQ(-1, read(0, NULL, 0));
  492. EXPECT_EQ(4095, errno);
  493. }
  494. FIXTURE_DATA(TRAP) {
  495. struct sock_fprog prog;
  496. };
  497. FIXTURE_SETUP(TRAP)
  498. {
  499. struct sock_filter filter[] = {
  500. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  501. offsetof(struct seccomp_data, nr)),
  502. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
  503. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP),
  504. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  505. };
  506. memset(&self->prog, 0, sizeof(self->prog));
  507. self->prog.filter = malloc(sizeof(filter));
  508. ASSERT_NE(NULL, self->prog.filter);
  509. memcpy(self->prog.filter, filter, sizeof(filter));
  510. self->prog.len = (unsigned short)ARRAY_SIZE(filter);
  511. }
  512. FIXTURE_TEARDOWN(TRAP)
  513. {
  514. if (self->prog.filter)
  515. free(self->prog.filter);
  516. }
  517. TEST_F_SIGNAL(TRAP, dfl, SIGSYS)
  518. {
  519. long ret;
  520. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  521. ASSERT_EQ(0, ret);
  522. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
  523. ASSERT_EQ(0, ret);
  524. syscall(__NR_getpid);
  525. }
  526. /* Ensure that SIGSYS overrides SIG_IGN */
  527. TEST_F_SIGNAL(TRAP, ign, SIGSYS)
  528. {
  529. long ret;
  530. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  531. ASSERT_EQ(0, ret);
  532. signal(SIGSYS, SIG_IGN);
  533. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
  534. ASSERT_EQ(0, ret);
  535. syscall(__NR_getpid);
  536. }
  537. static struct siginfo TRAP_info;
  538. static volatile int TRAP_nr;
  539. static void TRAP_action(int nr, siginfo_t *info, void *void_context)
  540. {
  541. memcpy(&TRAP_info, info, sizeof(TRAP_info));
  542. TRAP_nr = nr;
  543. }
  544. TEST_F(TRAP, handler)
  545. {
  546. int ret, test;
  547. struct sigaction act;
  548. sigset_t mask;
  549. memset(&act, 0, sizeof(act));
  550. sigemptyset(&mask);
  551. sigaddset(&mask, SIGSYS);
  552. act.sa_sigaction = &TRAP_action;
  553. act.sa_flags = SA_SIGINFO;
  554. ret = sigaction(SIGSYS, &act, NULL);
  555. ASSERT_EQ(0, ret) {
  556. TH_LOG("sigaction failed");
  557. }
  558. ret = sigprocmask(SIG_UNBLOCK, &mask, NULL);
  559. ASSERT_EQ(0, ret) {
  560. TH_LOG("sigprocmask failed");
  561. }
  562. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  563. ASSERT_EQ(0, ret);
  564. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
  565. ASSERT_EQ(0, ret);
  566. TRAP_nr = 0;
  567. memset(&TRAP_info, 0, sizeof(TRAP_info));
  568. /* Expect the registers to be rolled back. (nr = error) may vary
  569. * based on arch. */
  570. ret = syscall(__NR_getpid);
  571. /* Silence gcc warning about volatile. */
  572. test = TRAP_nr;
  573. EXPECT_EQ(SIGSYS, test);
  574. struct local_sigsys {
  575. void *_call_addr; /* calling user insn */
  576. int _syscall; /* triggering system call number */
  577. unsigned int _arch; /* AUDIT_ARCH_* of syscall */
  578. } *sigsys = (struct local_sigsys *)
  579. #ifdef si_syscall
  580. &(TRAP_info.si_call_addr);
  581. #else
  582. &TRAP_info.si_pid;
  583. #endif
  584. EXPECT_EQ(__NR_getpid, sigsys->_syscall);
  585. /* Make sure arch is non-zero. */
  586. EXPECT_NE(0, sigsys->_arch);
  587. EXPECT_NE(0, (unsigned long)sigsys->_call_addr);
  588. }
  589. FIXTURE_DATA(precedence) {
  590. struct sock_fprog allow;
  591. struct sock_fprog trace;
  592. struct sock_fprog error;
  593. struct sock_fprog trap;
  594. struct sock_fprog kill;
  595. };
  596. FIXTURE_SETUP(precedence)
  597. {
  598. struct sock_filter allow_insns[] = {
  599. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  600. };
  601. struct sock_filter trace_insns[] = {
  602. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  603. offsetof(struct seccomp_data, nr)),
  604. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
  605. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  606. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE),
  607. };
  608. struct sock_filter error_insns[] = {
  609. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  610. offsetof(struct seccomp_data, nr)),
  611. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
  612. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  613. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO),
  614. };
  615. struct sock_filter trap_insns[] = {
  616. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  617. offsetof(struct seccomp_data, nr)),
  618. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
  619. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  620. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP),
  621. };
  622. struct sock_filter kill_insns[] = {
  623. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  624. offsetof(struct seccomp_data, nr)),
  625. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
  626. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  627. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  628. };
  629. memset(self, 0, sizeof(*self));
  630. #define FILTER_ALLOC(_x) \
  631. self->_x.filter = malloc(sizeof(_x##_insns)); \
  632. ASSERT_NE(NULL, self->_x.filter); \
  633. memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \
  634. self->_x.len = (unsigned short)ARRAY_SIZE(_x##_insns)
  635. FILTER_ALLOC(allow);
  636. FILTER_ALLOC(trace);
  637. FILTER_ALLOC(error);
  638. FILTER_ALLOC(trap);
  639. FILTER_ALLOC(kill);
  640. }
  641. FIXTURE_TEARDOWN(precedence)
  642. {
  643. #define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter)
  644. FILTER_FREE(allow);
  645. FILTER_FREE(trace);
  646. FILTER_FREE(error);
  647. FILTER_FREE(trap);
  648. FILTER_FREE(kill);
  649. }
  650. TEST_F(precedence, allow_ok)
  651. {
  652. pid_t parent, res = 0;
  653. long ret;
  654. parent = getppid();
  655. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  656. ASSERT_EQ(0, ret);
  657. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  658. ASSERT_EQ(0, ret);
  659. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  660. ASSERT_EQ(0, ret);
  661. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  662. ASSERT_EQ(0, ret);
  663. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
  664. ASSERT_EQ(0, ret);
  665. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
  666. ASSERT_EQ(0, ret);
  667. /* Should work just fine. */
  668. res = syscall(__NR_getppid);
  669. EXPECT_EQ(parent, res);
  670. }
  671. TEST_F_SIGNAL(precedence, kill_is_highest, SIGSYS)
  672. {
  673. pid_t parent, res = 0;
  674. long ret;
  675. parent = getppid();
  676. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  677. ASSERT_EQ(0, ret);
  678. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  679. ASSERT_EQ(0, ret);
  680. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  681. ASSERT_EQ(0, ret);
  682. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  683. ASSERT_EQ(0, ret);
  684. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
  685. ASSERT_EQ(0, ret);
  686. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
  687. ASSERT_EQ(0, ret);
  688. /* Should work just fine. */
  689. res = syscall(__NR_getppid);
  690. EXPECT_EQ(parent, res);
  691. /* getpid() should never return. */
  692. res = syscall(__NR_getpid);
  693. EXPECT_EQ(0, res);
  694. }
  695. TEST_F_SIGNAL(precedence, kill_is_highest_in_any_order, SIGSYS)
  696. {
  697. pid_t parent;
  698. long ret;
  699. parent = getppid();
  700. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  701. ASSERT_EQ(0, ret);
  702. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  703. ASSERT_EQ(0, ret);
  704. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
  705. ASSERT_EQ(0, ret);
  706. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  707. ASSERT_EQ(0, ret);
  708. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  709. ASSERT_EQ(0, ret);
  710. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
  711. ASSERT_EQ(0, ret);
  712. /* Should work just fine. */
  713. EXPECT_EQ(parent, syscall(__NR_getppid));
  714. /* getpid() should never return. */
  715. EXPECT_EQ(0, syscall(__NR_getpid));
  716. }
  717. TEST_F_SIGNAL(precedence, trap_is_second, SIGSYS)
  718. {
  719. pid_t parent;
  720. long ret;
  721. parent = getppid();
  722. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  723. ASSERT_EQ(0, ret);
  724. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  725. ASSERT_EQ(0, ret);
  726. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  727. ASSERT_EQ(0, ret);
  728. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  729. ASSERT_EQ(0, ret);
  730. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
  731. ASSERT_EQ(0, ret);
  732. /* Should work just fine. */
  733. EXPECT_EQ(parent, syscall(__NR_getppid));
  734. /* getpid() should never return. */
  735. EXPECT_EQ(0, syscall(__NR_getpid));
  736. }
  737. TEST_F_SIGNAL(precedence, trap_is_second_in_any_order, SIGSYS)
  738. {
  739. pid_t parent;
  740. long ret;
  741. parent = getppid();
  742. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  743. ASSERT_EQ(0, ret);
  744. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  745. ASSERT_EQ(0, ret);
  746. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
  747. ASSERT_EQ(0, ret);
  748. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  749. ASSERT_EQ(0, ret);
  750. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  751. ASSERT_EQ(0, ret);
  752. /* Should work just fine. */
  753. EXPECT_EQ(parent, syscall(__NR_getppid));
  754. /* getpid() should never return. */
  755. EXPECT_EQ(0, syscall(__NR_getpid));
  756. }
  757. TEST_F(precedence, errno_is_third)
  758. {
  759. pid_t parent;
  760. long ret;
  761. parent = getppid();
  762. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  763. ASSERT_EQ(0, ret);
  764. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  765. ASSERT_EQ(0, ret);
  766. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  767. ASSERT_EQ(0, ret);
  768. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  769. ASSERT_EQ(0, ret);
  770. /* Should work just fine. */
  771. EXPECT_EQ(parent, syscall(__NR_getppid));
  772. EXPECT_EQ(0, syscall(__NR_getpid));
  773. }
  774. TEST_F(precedence, errno_is_third_in_any_order)
  775. {
  776. pid_t parent;
  777. long ret;
  778. parent = getppid();
  779. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  780. ASSERT_EQ(0, ret);
  781. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  782. ASSERT_EQ(0, ret);
  783. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  784. ASSERT_EQ(0, ret);
  785. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  786. ASSERT_EQ(0, ret);
  787. /* Should work just fine. */
  788. EXPECT_EQ(parent, syscall(__NR_getppid));
  789. EXPECT_EQ(0, syscall(__NR_getpid));
  790. }
  791. TEST_F(precedence, trace_is_fourth)
  792. {
  793. pid_t parent;
  794. long ret;
  795. parent = getppid();
  796. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  797. ASSERT_EQ(0, ret);
  798. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  799. ASSERT_EQ(0, ret);
  800. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  801. ASSERT_EQ(0, ret);
  802. /* Should work just fine. */
  803. EXPECT_EQ(parent, syscall(__NR_getppid));
  804. /* No ptracer */
  805. EXPECT_EQ(-1, syscall(__NR_getpid));
  806. }
  807. TEST_F(precedence, trace_is_fourth_in_any_order)
  808. {
  809. pid_t parent;
  810. long ret;
  811. parent = getppid();
  812. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  813. ASSERT_EQ(0, ret);
  814. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  815. ASSERT_EQ(0, ret);
  816. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  817. ASSERT_EQ(0, ret);
  818. /* Should work just fine. */
  819. EXPECT_EQ(parent, syscall(__NR_getppid));
  820. /* No ptracer */
  821. EXPECT_EQ(-1, syscall(__NR_getpid));
  822. }
  823. #ifndef PTRACE_O_TRACESECCOMP
  824. #define PTRACE_O_TRACESECCOMP 0x00000080
  825. #endif
  826. /* Catch the Ubuntu 12.04 value error. */
  827. #if PTRACE_EVENT_SECCOMP != 7
  828. #undef PTRACE_EVENT_SECCOMP
  829. #endif
  830. #ifndef PTRACE_EVENT_SECCOMP
  831. #define PTRACE_EVENT_SECCOMP 7
  832. #endif
  833. #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP)
  834. bool tracer_running;
  835. void tracer_stop(int sig)
  836. {
  837. tracer_running = false;
  838. }
  839. typedef void tracer_func_t(struct __test_metadata *_metadata,
  840. pid_t tracee, int status, void *args);
  841. void tracer(struct __test_metadata *_metadata, int fd, pid_t tracee,
  842. tracer_func_t tracer_func, void *args)
  843. {
  844. int ret = -1;
  845. struct sigaction action = {
  846. .sa_handler = tracer_stop,
  847. };
  848. /* Allow external shutdown. */
  849. tracer_running = true;
  850. ASSERT_EQ(0, sigaction(SIGUSR1, &action, NULL));
  851. errno = 0;
  852. while (ret == -1 && errno != EINVAL)
  853. ret = ptrace(PTRACE_ATTACH, tracee, NULL, 0);
  854. ASSERT_EQ(0, ret) {
  855. kill(tracee, SIGKILL);
  856. }
  857. /* Wait for attach stop */
  858. wait(NULL);
  859. ret = ptrace(PTRACE_SETOPTIONS, tracee, NULL, PTRACE_O_TRACESECCOMP);
  860. ASSERT_EQ(0, ret) {
  861. TH_LOG("Failed to set PTRACE_O_TRACESECCOMP");
  862. kill(tracee, SIGKILL);
  863. }
  864. ptrace(PTRACE_CONT, tracee, NULL, 0);
  865. /* Unblock the tracee */
  866. ASSERT_EQ(1, write(fd, "A", 1));
  867. ASSERT_EQ(0, close(fd));
  868. /* Run until we're shut down. Must assert to stop execution. */
  869. while (tracer_running) {
  870. int status;
  871. if (wait(&status) != tracee)
  872. continue;
  873. if (WIFSIGNALED(status) || WIFEXITED(status))
  874. /* Child is dead. Time to go. */
  875. return;
  876. /* Make sure this is a seccomp event. */
  877. ASSERT_EQ(true, IS_SECCOMP_EVENT(status));
  878. tracer_func(_metadata, tracee, status, args);
  879. ret = ptrace(PTRACE_CONT, tracee, NULL, NULL);
  880. ASSERT_EQ(0, ret);
  881. }
  882. /* Directly report the status of our test harness results. */
  883. syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
  884. }
  885. /* Common tracer setup/teardown functions. */
  886. void cont_handler(int num)
  887. { }
  888. pid_t setup_trace_fixture(struct __test_metadata *_metadata,
  889. tracer_func_t func, void *args)
  890. {
  891. char sync;
  892. int pipefd[2];
  893. pid_t tracer_pid;
  894. pid_t tracee = getpid();
  895. /* Setup a pipe for clean synchronization. */
  896. ASSERT_EQ(0, pipe(pipefd));
  897. /* Fork a child which we'll promote to tracer */
  898. tracer_pid = fork();
  899. ASSERT_LE(0, tracer_pid);
  900. signal(SIGALRM, cont_handler);
  901. if (tracer_pid == 0) {
  902. close(pipefd[0]);
  903. tracer(_metadata, pipefd[1], tracee, func, args);
  904. syscall(__NR_exit, 0);
  905. }
  906. close(pipefd[1]);
  907. prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
  908. read(pipefd[0], &sync, 1);
  909. close(pipefd[0]);
  910. return tracer_pid;
  911. }
  912. void teardown_trace_fixture(struct __test_metadata *_metadata,
  913. pid_t tracer)
  914. {
  915. if (tracer) {
  916. int status;
  917. /*
  918. * Extract the exit code from the other process and
  919. * adopt it for ourselves in case its asserts failed.
  920. */
  921. ASSERT_EQ(0, kill(tracer, SIGUSR1));
  922. ASSERT_EQ(tracer, waitpid(tracer, &status, 0));
  923. if (WEXITSTATUS(status))
  924. _metadata->passed = 0;
  925. }
  926. }
  927. /* "poke" tracer arguments and function. */
  928. struct tracer_args_poke_t {
  929. unsigned long poke_addr;
  930. };
  931. void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status,
  932. void *args)
  933. {
  934. int ret;
  935. unsigned long msg;
  936. struct tracer_args_poke_t *info = (struct tracer_args_poke_t *)args;
  937. ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
  938. EXPECT_EQ(0, ret);
  939. /* If this fails, don't try to recover. */
  940. ASSERT_EQ(0x1001, msg) {
  941. kill(tracee, SIGKILL);
  942. }
  943. /*
  944. * Poke in the message.
  945. * Registers are not touched to try to keep this relatively arch
  946. * agnostic.
  947. */
  948. ret = ptrace(PTRACE_POKEDATA, tracee, info->poke_addr, 0x1001);
  949. EXPECT_EQ(0, ret);
  950. }
  951. FIXTURE_DATA(TRACE_poke) {
  952. struct sock_fprog prog;
  953. pid_t tracer;
  954. long poked;
  955. struct tracer_args_poke_t tracer_args;
  956. };
  957. FIXTURE_SETUP(TRACE_poke)
  958. {
  959. struct sock_filter filter[] = {
  960. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  961. offsetof(struct seccomp_data, nr)),
  962. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
  963. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1001),
  964. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  965. };
  966. self->poked = 0;
  967. memset(&self->prog, 0, sizeof(self->prog));
  968. self->prog.filter = malloc(sizeof(filter));
  969. ASSERT_NE(NULL, self->prog.filter);
  970. memcpy(self->prog.filter, filter, sizeof(filter));
  971. self->prog.len = (unsigned short)ARRAY_SIZE(filter);
  972. /* Set up tracer args. */
  973. self->tracer_args.poke_addr = (unsigned long)&self->poked;
  974. /* Launch tracer. */
  975. self->tracer = setup_trace_fixture(_metadata, tracer_poke,
  976. &self->tracer_args);
  977. }
  978. FIXTURE_TEARDOWN(TRACE_poke)
  979. {
  980. teardown_trace_fixture(_metadata, self->tracer);
  981. if (self->prog.filter)
  982. free(self->prog.filter);
  983. }
  984. TEST_F(TRACE_poke, read_has_side_effects)
  985. {
  986. ssize_t ret;
  987. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  988. ASSERT_EQ(0, ret);
  989. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
  990. ASSERT_EQ(0, ret);
  991. EXPECT_EQ(0, self->poked);
  992. ret = read(-1, NULL, 0);
  993. EXPECT_EQ(-1, ret);
  994. EXPECT_EQ(0x1001, self->poked);
  995. }
  996. TEST_F(TRACE_poke, getpid_runs_normally)
  997. {
  998. long ret;
  999. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  1000. ASSERT_EQ(0, ret);
  1001. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
  1002. ASSERT_EQ(0, ret);
  1003. EXPECT_EQ(0, self->poked);
  1004. EXPECT_NE(0, syscall(__NR_getpid));
  1005. EXPECT_EQ(0, self->poked);
  1006. }
  1007. #if defined(__x86_64__)
  1008. # define ARCH_REGS struct user_regs_struct
  1009. # define SYSCALL_NUM orig_rax
  1010. # define SYSCALL_RET rax
  1011. #elif defined(__i386__)
  1012. # define ARCH_REGS struct user_regs_struct
  1013. # define SYSCALL_NUM orig_eax
  1014. # define SYSCALL_RET eax
  1015. #elif defined(__arm__)
  1016. # define ARCH_REGS struct pt_regs
  1017. # define SYSCALL_NUM ARM_r7
  1018. # define SYSCALL_RET ARM_r0
  1019. #elif defined(__aarch64__)
  1020. # define ARCH_REGS struct user_pt_regs
  1021. # define SYSCALL_NUM regs[8]
  1022. # define SYSCALL_RET regs[0]
  1023. #else
  1024. # error "Do not know how to find your architecture's registers and syscalls"
  1025. #endif
  1026. /* Architecture-specific syscall fetching routine. */
  1027. int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
  1028. {
  1029. struct iovec iov;
  1030. ARCH_REGS regs;
  1031. iov.iov_base = &regs;
  1032. iov.iov_len = sizeof(regs);
  1033. EXPECT_EQ(0, ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov)) {
  1034. TH_LOG("PTRACE_GETREGSET failed");
  1035. return -1;
  1036. }
  1037. return regs.SYSCALL_NUM;
  1038. }
  1039. /* Architecture-specific syscall changing routine. */
  1040. void change_syscall(struct __test_metadata *_metadata,
  1041. pid_t tracee, int syscall)
  1042. {
  1043. struct iovec iov;
  1044. int ret;
  1045. ARCH_REGS regs;
  1046. iov.iov_base = &regs;
  1047. iov.iov_len = sizeof(regs);
  1048. ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov);
  1049. EXPECT_EQ(0, ret);
  1050. #if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__)
  1051. {
  1052. regs.SYSCALL_NUM = syscall;
  1053. }
  1054. #elif defined(__arm__)
  1055. # ifndef PTRACE_SET_SYSCALL
  1056. # define PTRACE_SET_SYSCALL 23
  1057. # endif
  1058. {
  1059. ret = ptrace(PTRACE_SET_SYSCALL, tracee, NULL, syscall);
  1060. EXPECT_EQ(0, ret);
  1061. }
  1062. #else
  1063. ASSERT_EQ(1, 0) {
  1064. TH_LOG("How is the syscall changed on this architecture?");
  1065. }
  1066. #endif
  1067. /* If syscall is skipped, change return value. */
  1068. if (syscall == -1)
  1069. regs.SYSCALL_RET = 1;
  1070. ret = ptrace(PTRACE_SETREGSET, tracee, NT_PRSTATUS, &iov);
  1071. EXPECT_EQ(0, ret);
  1072. }
  1073. void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee,
  1074. int status, void *args)
  1075. {
  1076. int ret;
  1077. unsigned long msg;
  1078. /* Make sure we got the right message. */
  1079. ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
  1080. EXPECT_EQ(0, ret);
  1081. switch (msg) {
  1082. case 0x1002:
  1083. /* change getpid to getppid. */
  1084. change_syscall(_metadata, tracee, __NR_getppid);
  1085. break;
  1086. case 0x1003:
  1087. /* skip gettid. */
  1088. change_syscall(_metadata, tracee, -1);
  1089. break;
  1090. case 0x1004:
  1091. /* do nothing (allow getppid) */
  1092. break;
  1093. default:
  1094. EXPECT_EQ(0, msg) {
  1095. TH_LOG("Unknown PTRACE_GETEVENTMSG: 0x%lx", msg);
  1096. kill(tracee, SIGKILL);
  1097. }
  1098. }
  1099. }
  1100. FIXTURE_DATA(TRACE_syscall) {
  1101. struct sock_fprog prog;
  1102. pid_t tracer, mytid, mypid, parent;
  1103. };
  1104. FIXTURE_SETUP(TRACE_syscall)
  1105. {
  1106. struct sock_filter filter[] = {
  1107. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  1108. offsetof(struct seccomp_data, nr)),
  1109. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
  1110. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
  1111. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
  1112. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
  1113. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
  1114. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
  1115. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1116. };
  1117. memset(&self->prog, 0, sizeof(self->prog));
  1118. self->prog.filter = malloc(sizeof(filter));
  1119. ASSERT_NE(NULL, self->prog.filter);
  1120. memcpy(self->prog.filter, filter, sizeof(filter));
  1121. self->prog.len = (unsigned short)ARRAY_SIZE(filter);
  1122. /* Prepare some testable syscall results. */
  1123. self->mytid = syscall(__NR_gettid);
  1124. ASSERT_GT(self->mytid, 0);
  1125. ASSERT_NE(self->mytid, 1) {
  1126. TH_LOG("Running this test as init is not supported. :)");
  1127. }
  1128. self->mypid = getpid();
  1129. ASSERT_GT(self->mypid, 0);
  1130. ASSERT_EQ(self->mytid, self->mypid);
  1131. self->parent = getppid();
  1132. ASSERT_GT(self->parent, 0);
  1133. ASSERT_NE(self->parent, self->mypid);
  1134. /* Launch tracer. */
  1135. self->tracer = setup_trace_fixture(_metadata, tracer_syscall, NULL);
  1136. }
  1137. FIXTURE_TEARDOWN(TRACE_syscall)
  1138. {
  1139. teardown_trace_fixture(_metadata, self->tracer);
  1140. if (self->prog.filter)
  1141. free(self->prog.filter);
  1142. }
  1143. TEST_F(TRACE_syscall, syscall_allowed)
  1144. {
  1145. long ret;
  1146. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  1147. ASSERT_EQ(0, ret);
  1148. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
  1149. ASSERT_EQ(0, ret);
  1150. /* getppid works as expected (no changes). */
  1151. EXPECT_EQ(self->parent, syscall(__NR_getppid));
  1152. EXPECT_NE(self->mypid, syscall(__NR_getppid));
  1153. }
  1154. TEST_F(TRACE_syscall, syscall_redirected)
  1155. {
  1156. long ret;
  1157. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  1158. ASSERT_EQ(0, ret);
  1159. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
  1160. ASSERT_EQ(0, ret);
  1161. /* getpid has been redirected to getppid as expected. */
  1162. EXPECT_EQ(self->parent, syscall(__NR_getpid));
  1163. EXPECT_NE(self->mypid, syscall(__NR_getpid));
  1164. }
  1165. TEST_F(TRACE_syscall, syscall_dropped)
  1166. {
  1167. long ret;
  1168. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  1169. ASSERT_EQ(0, ret);
  1170. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
  1171. ASSERT_EQ(0, ret);
  1172. /* gettid has been skipped and an altered return value stored. */
  1173. EXPECT_EQ(1, syscall(__NR_gettid));
  1174. EXPECT_NE(self->mytid, syscall(__NR_gettid));
  1175. }
  1176. #ifndef __NR_seccomp
  1177. # if defined(__i386__)
  1178. # define __NR_seccomp 354
  1179. # elif defined(__x86_64__)
  1180. # define __NR_seccomp 317
  1181. # elif defined(__arm__)
  1182. # define __NR_seccomp 383
  1183. # elif defined(__aarch64__)
  1184. # define __NR_seccomp 277
  1185. # else
  1186. # warning "seccomp syscall number unknown for this architecture"
  1187. # define __NR_seccomp 0xffff
  1188. # endif
  1189. #endif
  1190. #ifndef SECCOMP_SET_MODE_STRICT
  1191. #define SECCOMP_SET_MODE_STRICT 0
  1192. #endif
  1193. #ifndef SECCOMP_SET_MODE_FILTER
  1194. #define SECCOMP_SET_MODE_FILTER 1
  1195. #endif
  1196. #ifndef SECCOMP_FLAG_FILTER_TSYNC
  1197. #define SECCOMP_FLAG_FILTER_TSYNC 1
  1198. #endif
  1199. #ifndef seccomp
  1200. int seccomp(unsigned int op, unsigned int flags, struct sock_fprog *filter)
  1201. {
  1202. errno = 0;
  1203. return syscall(__NR_seccomp, op, flags, filter);
  1204. }
  1205. #endif
  1206. TEST(seccomp_syscall)
  1207. {
  1208. struct sock_filter filter[] = {
  1209. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1210. };
  1211. struct sock_fprog prog = {
  1212. .len = (unsigned short)ARRAY_SIZE(filter),
  1213. .filter = filter,
  1214. };
  1215. long ret;
  1216. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  1217. ASSERT_EQ(0, ret) {
  1218. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1219. }
  1220. /* Reject insane operation. */
  1221. ret = seccomp(-1, 0, &prog);
  1222. EXPECT_EQ(EINVAL, errno) {
  1223. TH_LOG("Did not reject crazy op value!");
  1224. }
  1225. /* Reject strict with flags or pointer. */
  1226. ret = seccomp(SECCOMP_SET_MODE_STRICT, -1, NULL);
  1227. EXPECT_EQ(EINVAL, errno) {
  1228. TH_LOG("Did not reject mode strict with flags!");
  1229. }
  1230. ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, &prog);
  1231. EXPECT_EQ(EINVAL, errno) {
  1232. TH_LOG("Did not reject mode strict with uargs!");
  1233. }
  1234. /* Reject insane args for filter. */
  1235. ret = seccomp(SECCOMP_SET_MODE_FILTER, -1, &prog);
  1236. EXPECT_EQ(EINVAL, errno) {
  1237. TH_LOG("Did not reject crazy filter flags!");
  1238. }
  1239. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, NULL);
  1240. EXPECT_EQ(EFAULT, errno) {
  1241. TH_LOG("Did not reject NULL filter!");
  1242. }
  1243. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
  1244. EXPECT_EQ(0, errno) {
  1245. TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER: %s",
  1246. strerror(errno));
  1247. }
  1248. }
  1249. TEST(seccomp_syscall_mode_lock)
  1250. {
  1251. struct sock_filter filter[] = {
  1252. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1253. };
  1254. struct sock_fprog prog = {
  1255. .len = (unsigned short)ARRAY_SIZE(filter),
  1256. .filter = filter,
  1257. };
  1258. long ret;
  1259. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
  1260. ASSERT_EQ(0, ret) {
  1261. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1262. }
  1263. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
  1264. EXPECT_EQ(0, ret) {
  1265. TH_LOG("Could not install filter!");
  1266. }
  1267. /* Make sure neither entry point will switch to strict. */
  1268. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, 0, 0, 0);
  1269. EXPECT_EQ(EINVAL, errno) {
  1270. TH_LOG("Switched to mode strict!");
  1271. }
  1272. ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, NULL);
  1273. EXPECT_EQ(EINVAL, errno) {
  1274. TH_LOG("Switched to mode strict!");
  1275. }
  1276. }
  1277. TEST(TSYNC_first)
  1278. {
  1279. struct sock_filter filter[] = {
  1280. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1281. };
  1282. struct sock_fprog prog = {
  1283. .len = (unsigned short)ARRAY_SIZE(filter),
  1284. .filter = filter,
  1285. };
  1286. long ret;
  1287. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
  1288. ASSERT_EQ(0, ret) {
  1289. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1290. }
  1291. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1292. &prog);
  1293. EXPECT_EQ(0, ret) {
  1294. TH_LOG("Could not install initial filter with TSYNC!");
  1295. }
  1296. }
  1297. #define TSYNC_SIBLINGS 2
  1298. struct tsync_sibling {
  1299. pthread_t tid;
  1300. pid_t system_tid;
  1301. sem_t *started;
  1302. pthread_cond_t *cond;
  1303. pthread_mutex_t *mutex;
  1304. int diverge;
  1305. int num_waits;
  1306. struct sock_fprog *prog;
  1307. struct __test_metadata *metadata;
  1308. };
  1309. FIXTURE_DATA(TSYNC) {
  1310. struct sock_fprog root_prog, apply_prog;
  1311. struct tsync_sibling sibling[TSYNC_SIBLINGS];
  1312. sem_t started;
  1313. pthread_cond_t cond;
  1314. pthread_mutex_t mutex;
  1315. int sibling_count;
  1316. };
  1317. FIXTURE_SETUP(TSYNC)
  1318. {
  1319. struct sock_filter root_filter[] = {
  1320. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1321. };
  1322. struct sock_filter apply_filter[] = {
  1323. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  1324. offsetof(struct seccomp_data, nr)),
  1325. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
  1326. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  1327. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1328. };
  1329. memset(&self->root_prog, 0, sizeof(self->root_prog));
  1330. memset(&self->apply_prog, 0, sizeof(self->apply_prog));
  1331. memset(&self->sibling, 0, sizeof(self->sibling));
  1332. self->root_prog.filter = malloc(sizeof(root_filter));
  1333. ASSERT_NE(NULL, self->root_prog.filter);
  1334. memcpy(self->root_prog.filter, &root_filter, sizeof(root_filter));
  1335. self->root_prog.len = (unsigned short)ARRAY_SIZE(root_filter);
  1336. self->apply_prog.filter = malloc(sizeof(apply_filter));
  1337. ASSERT_NE(NULL, self->apply_prog.filter);
  1338. memcpy(self->apply_prog.filter, &apply_filter, sizeof(apply_filter));
  1339. self->apply_prog.len = (unsigned short)ARRAY_SIZE(apply_filter);
  1340. self->sibling_count = 0;
  1341. pthread_mutex_init(&self->mutex, NULL);
  1342. pthread_cond_init(&self->cond, NULL);
  1343. sem_init(&self->started, 0, 0);
  1344. self->sibling[0].tid = 0;
  1345. self->sibling[0].cond = &self->cond;
  1346. self->sibling[0].started = &self->started;
  1347. self->sibling[0].mutex = &self->mutex;
  1348. self->sibling[0].diverge = 0;
  1349. self->sibling[0].num_waits = 1;
  1350. self->sibling[0].prog = &self->root_prog;
  1351. self->sibling[0].metadata = _metadata;
  1352. self->sibling[1].tid = 0;
  1353. self->sibling[1].cond = &self->cond;
  1354. self->sibling[1].started = &self->started;
  1355. self->sibling[1].mutex = &self->mutex;
  1356. self->sibling[1].diverge = 0;
  1357. self->sibling[1].prog = &self->root_prog;
  1358. self->sibling[1].num_waits = 1;
  1359. self->sibling[1].metadata = _metadata;
  1360. }
  1361. FIXTURE_TEARDOWN(TSYNC)
  1362. {
  1363. int sib = 0;
  1364. if (self->root_prog.filter)
  1365. free(self->root_prog.filter);
  1366. if (self->apply_prog.filter)
  1367. free(self->apply_prog.filter);
  1368. for ( ; sib < self->sibling_count; ++sib) {
  1369. struct tsync_sibling *s = &self->sibling[sib];
  1370. void *status;
  1371. if (!s->tid)
  1372. continue;
  1373. if (pthread_kill(s->tid, 0)) {
  1374. pthread_cancel(s->tid);
  1375. pthread_join(s->tid, &status);
  1376. }
  1377. }
  1378. pthread_mutex_destroy(&self->mutex);
  1379. pthread_cond_destroy(&self->cond);
  1380. sem_destroy(&self->started);
  1381. }
  1382. void *tsync_sibling(void *data)
  1383. {
  1384. long ret = 0;
  1385. struct tsync_sibling *me = data;
  1386. me->system_tid = syscall(__NR_gettid);
  1387. pthread_mutex_lock(me->mutex);
  1388. if (me->diverge) {
  1389. /* Just re-apply the root prog to fork the tree */
  1390. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
  1391. me->prog, 0, 0);
  1392. }
  1393. sem_post(me->started);
  1394. /* Return outside of started so parent notices failures. */
  1395. if (ret) {
  1396. pthread_mutex_unlock(me->mutex);
  1397. return (void *)SIBLING_EXIT_FAILURE;
  1398. }
  1399. do {
  1400. pthread_cond_wait(me->cond, me->mutex);
  1401. me->num_waits = me->num_waits - 1;
  1402. } while (me->num_waits);
  1403. pthread_mutex_unlock(me->mutex);
  1404. ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
  1405. if (!ret)
  1406. return (void *)SIBLING_EXIT_NEWPRIVS;
  1407. read(0, NULL, 0);
  1408. return (void *)SIBLING_EXIT_UNKILLED;
  1409. }
  1410. void tsync_start_sibling(struct tsync_sibling *sibling)
  1411. {
  1412. pthread_create(&sibling->tid, NULL, tsync_sibling, (void *)sibling);
  1413. }
  1414. TEST_F(TSYNC, siblings_fail_prctl)
  1415. {
  1416. long ret;
  1417. void *status;
  1418. struct sock_filter filter[] = {
  1419. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  1420. offsetof(struct seccomp_data, nr)),
  1421. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
  1422. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EINVAL),
  1423. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1424. };
  1425. struct sock_fprog prog = {
  1426. .len = (unsigned short)ARRAY_SIZE(filter),
  1427. .filter = filter,
  1428. };
  1429. ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1430. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1431. }
  1432. /* Check prctl failure detection by requesting sib 0 diverge. */
  1433. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
  1434. ASSERT_EQ(0, ret) {
  1435. TH_LOG("setting filter failed");
  1436. }
  1437. self->sibling[0].diverge = 1;
  1438. tsync_start_sibling(&self->sibling[0]);
  1439. tsync_start_sibling(&self->sibling[1]);
  1440. while (self->sibling_count < TSYNC_SIBLINGS) {
  1441. sem_wait(&self->started);
  1442. self->sibling_count++;
  1443. }
  1444. /* Signal the threads to clean up*/
  1445. pthread_mutex_lock(&self->mutex);
  1446. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1447. TH_LOG("cond broadcast non-zero");
  1448. }
  1449. pthread_mutex_unlock(&self->mutex);
  1450. /* Ensure diverging sibling failed to call prctl. */
  1451. pthread_join(self->sibling[0].tid, &status);
  1452. EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status);
  1453. pthread_join(self->sibling[1].tid, &status);
  1454. EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
  1455. }
  1456. TEST_F(TSYNC, two_siblings_with_ancestor)
  1457. {
  1458. long ret;
  1459. void *status;
  1460. ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1461. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1462. }
  1463. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
  1464. ASSERT_EQ(0, ret) {
  1465. TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
  1466. }
  1467. tsync_start_sibling(&self->sibling[0]);
  1468. tsync_start_sibling(&self->sibling[1]);
  1469. while (self->sibling_count < TSYNC_SIBLINGS) {
  1470. sem_wait(&self->started);
  1471. self->sibling_count++;
  1472. }
  1473. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1474. &self->apply_prog);
  1475. ASSERT_EQ(0, ret) {
  1476. TH_LOG("Could install filter on all threads!");
  1477. }
  1478. /* Tell the siblings to test the policy */
  1479. pthread_mutex_lock(&self->mutex);
  1480. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1481. TH_LOG("cond broadcast non-zero");
  1482. }
  1483. pthread_mutex_unlock(&self->mutex);
  1484. /* Ensure they are both killed and don't exit cleanly. */
  1485. pthread_join(self->sibling[0].tid, &status);
  1486. EXPECT_EQ(0x0, (long)status);
  1487. pthread_join(self->sibling[1].tid, &status);
  1488. EXPECT_EQ(0x0, (long)status);
  1489. }
  1490. TEST_F(TSYNC, two_sibling_want_nnp)
  1491. {
  1492. void *status;
  1493. /* start siblings before any prctl() operations */
  1494. tsync_start_sibling(&self->sibling[0]);
  1495. tsync_start_sibling(&self->sibling[1]);
  1496. while (self->sibling_count < TSYNC_SIBLINGS) {
  1497. sem_wait(&self->started);
  1498. self->sibling_count++;
  1499. }
  1500. /* Tell the siblings to test no policy */
  1501. pthread_mutex_lock(&self->mutex);
  1502. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1503. TH_LOG("cond broadcast non-zero");
  1504. }
  1505. pthread_mutex_unlock(&self->mutex);
  1506. /* Ensure they are both upset about lacking nnp. */
  1507. pthread_join(self->sibling[0].tid, &status);
  1508. EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
  1509. pthread_join(self->sibling[1].tid, &status);
  1510. EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
  1511. }
  1512. TEST_F(TSYNC, two_siblings_with_no_filter)
  1513. {
  1514. long ret;
  1515. void *status;
  1516. /* start siblings before any prctl() operations */
  1517. tsync_start_sibling(&self->sibling[0]);
  1518. tsync_start_sibling(&self->sibling[1]);
  1519. while (self->sibling_count < TSYNC_SIBLINGS) {
  1520. sem_wait(&self->started);
  1521. self->sibling_count++;
  1522. }
  1523. ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1524. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1525. }
  1526. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1527. &self->apply_prog);
  1528. ASSERT_EQ(0, ret) {
  1529. TH_LOG("Could install filter on all threads!");
  1530. }
  1531. /* Tell the siblings to test the policy */
  1532. pthread_mutex_lock(&self->mutex);
  1533. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1534. TH_LOG("cond broadcast non-zero");
  1535. }
  1536. pthread_mutex_unlock(&self->mutex);
  1537. /* Ensure they are both killed and don't exit cleanly. */
  1538. pthread_join(self->sibling[0].tid, &status);
  1539. EXPECT_EQ(0x0, (long)status);
  1540. pthread_join(self->sibling[1].tid, &status);
  1541. EXPECT_EQ(0x0, (long)status);
  1542. }
  1543. TEST_F(TSYNC, two_siblings_with_one_divergence)
  1544. {
  1545. long ret;
  1546. void *status;
  1547. ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1548. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1549. }
  1550. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
  1551. ASSERT_EQ(0, ret) {
  1552. TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
  1553. }
  1554. self->sibling[0].diverge = 1;
  1555. tsync_start_sibling(&self->sibling[0]);
  1556. tsync_start_sibling(&self->sibling[1]);
  1557. while (self->sibling_count < TSYNC_SIBLINGS) {
  1558. sem_wait(&self->started);
  1559. self->sibling_count++;
  1560. }
  1561. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1562. &self->apply_prog);
  1563. ASSERT_EQ(self->sibling[0].system_tid, ret) {
  1564. TH_LOG("Did not fail on diverged sibling.");
  1565. }
  1566. /* Wake the threads */
  1567. pthread_mutex_lock(&self->mutex);
  1568. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1569. TH_LOG("cond broadcast non-zero");
  1570. }
  1571. pthread_mutex_unlock(&self->mutex);
  1572. /* Ensure they are both unkilled. */
  1573. pthread_join(self->sibling[0].tid, &status);
  1574. EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
  1575. pthread_join(self->sibling[1].tid, &status);
  1576. EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
  1577. }
  1578. TEST_F(TSYNC, two_siblings_not_under_filter)
  1579. {
  1580. long ret, sib;
  1581. void *status;
  1582. ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1583. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1584. }
  1585. /*
  1586. * Sibling 0 will have its own seccomp policy
  1587. * and Sibling 1 will not be under seccomp at
  1588. * all. Sibling 1 will enter seccomp and 0
  1589. * will cause failure.
  1590. */
  1591. self->sibling[0].diverge = 1;
  1592. tsync_start_sibling(&self->sibling[0]);
  1593. tsync_start_sibling(&self->sibling[1]);
  1594. while (self->sibling_count < TSYNC_SIBLINGS) {
  1595. sem_wait(&self->started);
  1596. self->sibling_count++;
  1597. }
  1598. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
  1599. ASSERT_EQ(0, ret) {
  1600. TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
  1601. }
  1602. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1603. &self->apply_prog);
  1604. ASSERT_EQ(ret, self->sibling[0].system_tid) {
  1605. TH_LOG("Did not fail on diverged sibling.");
  1606. }
  1607. sib = 1;
  1608. if (ret == self->sibling[0].system_tid)
  1609. sib = 0;
  1610. pthread_mutex_lock(&self->mutex);
  1611. /* Increment the other siblings num_waits so we can clean up
  1612. * the one we just saw.
  1613. */
  1614. self->sibling[!sib].num_waits += 1;
  1615. /* Signal the thread to clean up*/
  1616. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1617. TH_LOG("cond broadcast non-zero");
  1618. }
  1619. pthread_mutex_unlock(&self->mutex);
  1620. pthread_join(self->sibling[sib].tid, &status);
  1621. EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
  1622. /* Poll for actual task death. pthread_join doesn't guarantee it. */
  1623. while (!kill(self->sibling[sib].system_tid, 0))
  1624. sleep(0.1);
  1625. /* Switch to the remaining sibling */
  1626. sib = !sib;
  1627. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1628. &self->apply_prog);
  1629. ASSERT_EQ(0, ret) {
  1630. TH_LOG("Expected the remaining sibling to sync");
  1631. };
  1632. pthread_mutex_lock(&self->mutex);
  1633. /* If remaining sibling didn't have a chance to wake up during
  1634. * the first broadcast, manually reduce the num_waits now.
  1635. */
  1636. if (self->sibling[sib].num_waits > 1)
  1637. self->sibling[sib].num_waits = 1;
  1638. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1639. TH_LOG("cond broadcast non-zero");
  1640. }
  1641. pthread_mutex_unlock(&self->mutex);
  1642. pthread_join(self->sibling[sib].tid, &status);
  1643. EXPECT_EQ(0, (long)status);
  1644. /* Poll for actual task death. pthread_join doesn't guarantee it. */
  1645. while (!kill(self->sibling[sib].system_tid, 0))
  1646. sleep(0.1);
  1647. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1648. &self->apply_prog);
  1649. ASSERT_EQ(0, ret); /* just us chickens */
  1650. }
  1651. /* Make sure restarted syscalls are seen directly as "restart_syscall". */
  1652. TEST(syscall_restart)
  1653. {
  1654. long ret;
  1655. unsigned long msg;
  1656. pid_t child_pid;
  1657. int pipefd[2];
  1658. int status;
  1659. siginfo_t info = { };
  1660. struct sock_filter filter[] = {
  1661. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  1662. offsetof(struct seccomp_data, nr)),
  1663. #ifdef __NR_sigreturn
  1664. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_sigreturn, 6, 0),
  1665. #endif
  1666. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 5, 0),
  1667. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_exit, 4, 0),
  1668. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_rt_sigreturn, 3, 0),
  1669. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_poll, 4, 0),
  1670. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_restart_syscall, 4, 0),
  1671. /* Allow __NR_write for easy logging. */
  1672. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_write, 0, 1),
  1673. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1674. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  1675. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x100), /* poll */
  1676. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x200), /* restart */
  1677. };
  1678. struct sock_fprog prog = {
  1679. .len = (unsigned short)ARRAY_SIZE(filter),
  1680. .filter = filter,
  1681. };
  1682. ASSERT_EQ(0, pipe(pipefd));
  1683. child_pid = fork();
  1684. ASSERT_LE(0, child_pid);
  1685. if (child_pid == 0) {
  1686. /* Child uses EXPECT not ASSERT to deliver status correctly. */
  1687. char buf = ' ';
  1688. struct pollfd fds = {
  1689. .fd = pipefd[0],
  1690. .events = POLLIN,
  1691. };
  1692. /* Attach parent as tracer and stop. */
  1693. EXPECT_EQ(0, ptrace(PTRACE_TRACEME));
  1694. EXPECT_EQ(0, raise(SIGSTOP));
  1695. EXPECT_EQ(0, close(pipefd[1]));
  1696. EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1697. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1698. }
  1699. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  1700. EXPECT_EQ(0, ret) {
  1701. TH_LOG("Failed to install filter!");
  1702. }
  1703. EXPECT_EQ(1, read(pipefd[0], &buf, 1)) {
  1704. TH_LOG("Failed to read() sync from parent");
  1705. }
  1706. EXPECT_EQ('.', buf) {
  1707. TH_LOG("Failed to get sync data from read()");
  1708. }
  1709. /* Start poll to be interrupted. */
  1710. errno = 0;
  1711. EXPECT_EQ(1, poll(&fds, 1, -1)) {
  1712. TH_LOG("Call to poll() failed (errno %d)", errno);
  1713. }
  1714. /* Read final sync from parent. */
  1715. EXPECT_EQ(1, read(pipefd[0], &buf, 1)) {
  1716. TH_LOG("Failed final read() from parent");
  1717. }
  1718. EXPECT_EQ('!', buf) {
  1719. TH_LOG("Failed to get final data from read()");
  1720. }
  1721. /* Directly report the status of our test harness results. */
  1722. syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS
  1723. : EXIT_FAILURE);
  1724. }
  1725. EXPECT_EQ(0, close(pipefd[0]));
  1726. /* Attach to child, setup options, and release. */
  1727. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1728. ASSERT_EQ(true, WIFSTOPPED(status));
  1729. ASSERT_EQ(0, ptrace(PTRACE_SETOPTIONS, child_pid, NULL,
  1730. PTRACE_O_TRACESECCOMP));
  1731. ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
  1732. ASSERT_EQ(1, write(pipefd[1], ".", 1));
  1733. /* Wait for poll() to start. */
  1734. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1735. ASSERT_EQ(true, WIFSTOPPED(status));
  1736. ASSERT_EQ(SIGTRAP, WSTOPSIG(status));
  1737. ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16));
  1738. ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg));
  1739. ASSERT_EQ(0x100, msg);
  1740. EXPECT_EQ(__NR_poll, get_syscall(_metadata, child_pid));
  1741. /* Might as well check siginfo for sanity while we're here. */
  1742. ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
  1743. ASSERT_EQ(SIGTRAP, info.si_signo);
  1744. ASSERT_EQ(SIGTRAP | (PTRACE_EVENT_SECCOMP << 8), info.si_code);
  1745. EXPECT_EQ(0, info.si_errno);
  1746. EXPECT_EQ(getuid(), info.si_uid);
  1747. /* Verify signal delivery came from child (seccomp-triggered). */
  1748. EXPECT_EQ(child_pid, info.si_pid);
  1749. /* Interrupt poll with SIGSTOP (which we'll need to handle). */
  1750. ASSERT_EQ(0, kill(child_pid, SIGSTOP));
  1751. ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
  1752. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1753. ASSERT_EQ(true, WIFSTOPPED(status));
  1754. ASSERT_EQ(SIGSTOP, WSTOPSIG(status));
  1755. /* Verify signal delivery came from parent now. */
  1756. ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
  1757. EXPECT_EQ(getpid(), info.si_pid);
  1758. /* Restart poll with SIGCONT, which triggers restart_syscall. */
  1759. ASSERT_EQ(0, kill(child_pid, SIGCONT));
  1760. ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
  1761. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1762. ASSERT_EQ(true, WIFSTOPPED(status));
  1763. ASSERT_EQ(SIGCONT, WSTOPSIG(status));
  1764. ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
  1765. /* Wait for restart_syscall() to start. */
  1766. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1767. ASSERT_EQ(true, WIFSTOPPED(status));
  1768. ASSERT_EQ(SIGTRAP, WSTOPSIG(status));
  1769. ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16));
  1770. ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg));
  1771. ASSERT_EQ(0x200, msg);
  1772. ret = get_syscall(_metadata, child_pid);
  1773. #if defined(__arm__)
  1774. /* FIXME: ARM does not expose true syscall in registers. */
  1775. EXPECT_EQ(__NR_poll, ret);
  1776. #else
  1777. EXPECT_EQ(__NR_restart_syscall, ret);
  1778. #endif
  1779. /* Write again to end poll. */
  1780. ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
  1781. ASSERT_EQ(1, write(pipefd[1], "!", 1));
  1782. EXPECT_EQ(0, close(pipefd[1]));
  1783. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1784. if (WIFSIGNALED(status) || WEXITSTATUS(status))
  1785. _metadata->passed = 0;
  1786. }
  1787. /*
  1788. * TODO:
  1789. * - add microbenchmarks
  1790. * - expand NNP testing
  1791. * - better arch-specific TRACE and TRAP handlers.
  1792. * - endianness checking when appropriate
  1793. * - 64-bit arg prodding
  1794. * - arch value testing (x86 modes especially)
  1795. * - ...
  1796. */
  1797. TEST_HARNESS_MAIN