seccomp_bpf.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232
  1. /*
  2. * Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
  3. * Use of this source code is governed by the GPLv2 license.
  4. *
  5. * Test code for seccomp bpf.
  6. */
  7. #include <asm/siginfo.h>
  8. #define __have_siginfo_t 1
  9. #define __have_sigval_t 1
  10. #define __have_sigevent_t 1
  11. #include <errno.h>
  12. #include <linux/filter.h>
  13. #include <sys/prctl.h>
  14. #include <sys/ptrace.h>
  15. #include <sys/types.h>
  16. #include <sys/user.h>
  17. #include <linux/prctl.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/seccomp.h>
  20. #include <pthread.h>
  21. #include <semaphore.h>
  22. #include <signal.h>
  23. #include <stddef.h>
  24. #include <stdbool.h>
  25. #include <string.h>
  26. #include <time.h>
  27. #include <linux/elf.h>
  28. #include <sys/uio.h>
  29. #include <sys/utsname.h>
  30. #include <sys/fcntl.h>
  31. #include <sys/mman.h>
  32. #include <sys/times.h>
  33. #define _GNU_SOURCE
  34. #include <unistd.h>
  35. #include <sys/syscall.h>
  36. #include "test_harness.h"
  37. #ifndef PR_SET_PTRACER
  38. # define PR_SET_PTRACER 0x59616d61
  39. #endif
  40. #ifndef PR_SET_NO_NEW_PRIVS
  41. #define PR_SET_NO_NEW_PRIVS 38
  42. #define PR_GET_NO_NEW_PRIVS 39
  43. #endif
  44. #ifndef PR_SECCOMP_EXT
  45. #define PR_SECCOMP_EXT 43
  46. #endif
  47. #ifndef SECCOMP_EXT_ACT
  48. #define SECCOMP_EXT_ACT 1
  49. #endif
  50. #ifndef SECCOMP_EXT_ACT_TSYNC
  51. #define SECCOMP_EXT_ACT_TSYNC 1
  52. #endif
  53. #ifndef SECCOMP_MODE_STRICT
  54. #define SECCOMP_MODE_STRICT 1
  55. #endif
  56. #ifndef SECCOMP_MODE_FILTER
  57. #define SECCOMP_MODE_FILTER 2
  58. #endif
  59. #ifndef SECCOMP_RET_KILL
  60. #define SECCOMP_RET_KILL 0x00000000U /* kill the task immediately */
  61. #define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */
  62. #define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */
  63. #define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */
  64. #define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */
  65. /* Masks for the return value sections. */
  66. #define SECCOMP_RET_ACTION 0x7fff0000U
  67. #define SECCOMP_RET_DATA 0x0000ffffU
  68. struct seccomp_data {
  69. int nr;
  70. __u32 arch;
  71. __u64 instruction_pointer;
  72. __u64 args[6];
  73. };
  74. #endif
  75. #if __BYTE_ORDER == __LITTLE_ENDIAN
  76. #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]))
  77. #elif __BYTE_ORDER == __BIG_ENDIAN
  78. #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32))
  79. #else
  80. #error "wut? Unknown __BYTE_ORDER?!"
  81. #endif
  82. #define SIBLING_EXIT_UNKILLED 0xbadbeef
  83. #define SIBLING_EXIT_FAILURE 0xbadface
  84. #define SIBLING_EXIT_NEWPRIVS 0xbadfeed
  85. TEST(mode_strict_support)
  86. {
  87. long ret;
  88. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL);
  89. ASSERT_EQ(0, ret) {
  90. TH_LOG("Kernel does not support CONFIG_SECCOMP");
  91. }
  92. syscall(__NR_exit, 1);
  93. }
  94. TEST_SIGNAL(mode_strict_cannot_call_prctl, SIGKILL)
  95. {
  96. long ret;
  97. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL);
  98. ASSERT_EQ(0, ret) {
  99. TH_LOG("Kernel does not support CONFIG_SECCOMP");
  100. }
  101. syscall(__NR_prctl, PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
  102. NULL, NULL, NULL);
  103. EXPECT_FALSE(true) {
  104. TH_LOG("Unreachable!");
  105. }
  106. }
  107. /* Note! This doesn't test no new privs behavior */
  108. TEST(no_new_privs_support)
  109. {
  110. long ret;
  111. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  112. EXPECT_EQ(0, ret) {
  113. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  114. }
  115. }
  116. /* Tests kernel support by checking for a copy_from_user() fault on * NULL. */
  117. TEST(mode_filter_support)
  118. {
  119. long ret;
  120. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
  121. ASSERT_EQ(0, ret) {
  122. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  123. }
  124. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL);
  125. EXPECT_EQ(-1, ret);
  126. EXPECT_EQ(EFAULT, errno) {
  127. TH_LOG("Kernel does not support CONFIG_SECCOMP_FILTER!");
  128. }
  129. }
  130. TEST(mode_filter_without_nnp)
  131. {
  132. struct sock_filter filter[] = {
  133. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  134. };
  135. struct sock_fprog prog = {
  136. .len = (unsigned short)ARRAY_SIZE(filter),
  137. .filter = filter,
  138. };
  139. long ret;
  140. ret = prctl(PR_GET_NO_NEW_PRIVS, 0, NULL, 0, 0);
  141. ASSERT_LE(0, ret) {
  142. TH_LOG("Expected 0 or unsupported for NO_NEW_PRIVS");
  143. }
  144. errno = 0;
  145. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  146. /* Succeeds with CAP_SYS_ADMIN, fails without */
  147. /* TODO(wad) check caps not euid */
  148. if (geteuid()) {
  149. EXPECT_EQ(-1, ret);
  150. EXPECT_EQ(EACCES, errno);
  151. } else {
  152. EXPECT_EQ(0, ret);
  153. }
  154. }
  155. #define MAX_INSNS_PER_PATH 32768
  156. TEST(filter_size_limits)
  157. {
  158. int i;
  159. int count = BPF_MAXINSNS + 1;
  160. struct sock_filter allow[] = {
  161. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  162. };
  163. struct sock_filter *filter;
  164. struct sock_fprog prog = { };
  165. long ret;
  166. filter = calloc(count, sizeof(*filter));
  167. ASSERT_NE(NULL, filter);
  168. for (i = 0; i < count; i++)
  169. filter[i] = allow[0];
  170. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  171. ASSERT_EQ(0, ret);
  172. prog.filter = filter;
  173. prog.len = count;
  174. /* Too many filter instructions in a single filter. */
  175. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  176. ASSERT_NE(0, ret) {
  177. TH_LOG("Installing %d insn filter was allowed", prog.len);
  178. }
  179. /* One less is okay, though. */
  180. prog.len -= 1;
  181. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  182. ASSERT_EQ(0, ret) {
  183. TH_LOG("Installing %d insn filter wasn't allowed", prog.len);
  184. }
  185. }
  186. TEST(filter_chain_limits)
  187. {
  188. int i;
  189. int count = BPF_MAXINSNS;
  190. struct sock_filter allow[] = {
  191. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  192. };
  193. struct sock_filter *filter;
  194. struct sock_fprog prog = { };
  195. long ret;
  196. filter = calloc(count, sizeof(*filter));
  197. ASSERT_NE(NULL, filter);
  198. for (i = 0; i < count; i++)
  199. filter[i] = allow[0];
  200. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  201. ASSERT_EQ(0, ret);
  202. prog.filter = filter;
  203. prog.len = 1;
  204. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  205. ASSERT_EQ(0, ret);
  206. prog.len = count;
  207. /* Too many total filter instructions. */
  208. for (i = 0; i < MAX_INSNS_PER_PATH; i++) {
  209. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  210. if (ret != 0)
  211. break;
  212. }
  213. ASSERT_NE(0, ret) {
  214. TH_LOG("Allowed %d %d-insn filters (total with penalties:%d)",
  215. i, count, i * (count + 4));
  216. }
  217. }
  218. TEST(mode_filter_cannot_move_to_strict)
  219. {
  220. struct sock_filter filter[] = {
  221. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  222. };
  223. struct sock_fprog prog = {
  224. .len = (unsigned short)ARRAY_SIZE(filter),
  225. .filter = filter,
  226. };
  227. long ret;
  228. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  229. ASSERT_EQ(0, ret);
  230. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  231. ASSERT_EQ(0, ret);
  232. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, 0, 0);
  233. EXPECT_EQ(-1, ret);
  234. EXPECT_EQ(EINVAL, errno);
  235. }
  236. TEST(mode_filter_get_seccomp)
  237. {
  238. struct sock_filter filter[] = {
  239. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  240. };
  241. struct sock_fprog prog = {
  242. .len = (unsigned short)ARRAY_SIZE(filter),
  243. .filter = filter,
  244. };
  245. long ret;
  246. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  247. ASSERT_EQ(0, ret);
  248. ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
  249. EXPECT_EQ(0, ret);
  250. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  251. ASSERT_EQ(0, ret);
  252. ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
  253. EXPECT_EQ(2, ret);
  254. }
  255. TEST(ALLOW_all)
  256. {
  257. struct sock_filter filter[] = {
  258. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  259. };
  260. struct sock_fprog prog = {
  261. .len = (unsigned short)ARRAY_SIZE(filter),
  262. .filter = filter,
  263. };
  264. long ret;
  265. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  266. ASSERT_EQ(0, ret);
  267. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  268. ASSERT_EQ(0, ret);
  269. }
  270. TEST(empty_prog)
  271. {
  272. struct sock_filter filter[] = {
  273. };
  274. struct sock_fprog prog = {
  275. .len = (unsigned short)ARRAY_SIZE(filter),
  276. .filter = filter,
  277. };
  278. long ret;
  279. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  280. ASSERT_EQ(0, ret);
  281. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  282. EXPECT_EQ(-1, ret);
  283. EXPECT_EQ(EINVAL, errno);
  284. }
  285. TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS)
  286. {
  287. struct sock_filter filter[] = {
  288. BPF_STMT(BPF_RET|BPF_K, 0x10000000U),
  289. };
  290. struct sock_fprog prog = {
  291. .len = (unsigned short)ARRAY_SIZE(filter),
  292. .filter = filter,
  293. };
  294. long ret;
  295. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  296. ASSERT_EQ(0, ret);
  297. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  298. ASSERT_EQ(0, ret);
  299. EXPECT_EQ(0, syscall(__NR_getpid)) {
  300. TH_LOG("getpid() shouldn't ever return");
  301. }
  302. }
  303. /* return code >= 0x80000000 is unused. */
  304. TEST_SIGNAL(unknown_ret_is_kill_above_allow, SIGSYS)
  305. {
  306. struct sock_filter filter[] = {
  307. BPF_STMT(BPF_RET|BPF_K, 0x90000000U),
  308. };
  309. struct sock_fprog prog = {
  310. .len = (unsigned short)ARRAY_SIZE(filter),
  311. .filter = filter,
  312. };
  313. long ret;
  314. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  315. ASSERT_EQ(0, ret);
  316. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  317. ASSERT_EQ(0, ret);
  318. EXPECT_EQ(0, syscall(__NR_getpid)) {
  319. TH_LOG("getpid() shouldn't ever return");
  320. }
  321. }
  322. TEST_SIGNAL(KILL_all, SIGSYS)
  323. {
  324. struct sock_filter filter[] = {
  325. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  326. };
  327. struct sock_fprog prog = {
  328. .len = (unsigned short)ARRAY_SIZE(filter),
  329. .filter = filter,
  330. };
  331. long ret;
  332. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  333. ASSERT_EQ(0, ret);
  334. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  335. ASSERT_EQ(0, ret);
  336. }
  337. TEST_SIGNAL(KILL_one, SIGSYS)
  338. {
  339. struct sock_filter filter[] = {
  340. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  341. offsetof(struct seccomp_data, nr)),
  342. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
  343. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  344. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  345. };
  346. struct sock_fprog prog = {
  347. .len = (unsigned short)ARRAY_SIZE(filter),
  348. .filter = filter,
  349. };
  350. long ret;
  351. pid_t parent = getppid();
  352. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  353. ASSERT_EQ(0, ret);
  354. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  355. ASSERT_EQ(0, ret);
  356. EXPECT_EQ(parent, syscall(__NR_getppid));
  357. /* getpid() should never return. */
  358. EXPECT_EQ(0, syscall(__NR_getpid));
  359. }
  360. TEST_SIGNAL(KILL_one_arg_one, SIGSYS)
  361. {
  362. void *fatal_address;
  363. struct sock_filter filter[] = {
  364. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  365. offsetof(struct seccomp_data, nr)),
  366. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_times, 1, 0),
  367. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  368. /* Only both with lower 32-bit for now. */
  369. BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(0)),
  370. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K,
  371. (unsigned long)&fatal_address, 0, 1),
  372. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  373. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  374. };
  375. struct sock_fprog prog = {
  376. .len = (unsigned short)ARRAY_SIZE(filter),
  377. .filter = filter,
  378. };
  379. long ret;
  380. pid_t parent = getppid();
  381. struct tms timebuf;
  382. clock_t clock = times(&timebuf);
  383. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  384. ASSERT_EQ(0, ret);
  385. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  386. ASSERT_EQ(0, ret);
  387. EXPECT_EQ(parent, syscall(__NR_getppid));
  388. EXPECT_LE(clock, syscall(__NR_times, &timebuf));
  389. /* times() should never return. */
  390. EXPECT_EQ(0, syscall(__NR_times, &fatal_address));
  391. }
  392. TEST_SIGNAL(KILL_one_arg_six, SIGSYS)
  393. {
  394. #ifndef __NR_mmap2
  395. int sysno = __NR_mmap;
  396. #else
  397. int sysno = __NR_mmap2;
  398. #endif
  399. struct sock_filter filter[] = {
  400. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  401. offsetof(struct seccomp_data, nr)),
  402. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, sysno, 1, 0),
  403. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  404. /* Only both with lower 32-bit for now. */
  405. BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(5)),
  406. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 0x0C0FFEE, 0, 1),
  407. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  408. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  409. };
  410. struct sock_fprog prog = {
  411. .len = (unsigned short)ARRAY_SIZE(filter),
  412. .filter = filter,
  413. };
  414. long ret;
  415. pid_t parent = getppid();
  416. int fd;
  417. void *map1, *map2;
  418. int page_size = sysconf(_SC_PAGESIZE);
  419. ASSERT_LT(0, page_size);
  420. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  421. ASSERT_EQ(0, ret);
  422. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  423. ASSERT_EQ(0, ret);
  424. fd = open("/dev/zero", O_RDONLY);
  425. ASSERT_NE(-1, fd);
  426. EXPECT_EQ(parent, syscall(__NR_getppid));
  427. map1 = (void *)syscall(sysno,
  428. NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size);
  429. EXPECT_NE(MAP_FAILED, map1);
  430. /* mmap2() should never return. */
  431. map2 = (void *)syscall(sysno,
  432. NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE);
  433. EXPECT_EQ(MAP_FAILED, map2);
  434. /* The test failed, so clean up the resources. */
  435. munmap(map1, page_size);
  436. munmap(map2, page_size);
  437. close(fd);
  438. }
  439. /* TODO(wad) add 64-bit versus 32-bit arg tests. */
  440. TEST(arg_out_of_range)
  441. {
  442. struct sock_filter filter[] = {
  443. BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(6)),
  444. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  445. };
  446. struct sock_fprog prog = {
  447. .len = (unsigned short)ARRAY_SIZE(filter),
  448. .filter = filter,
  449. };
  450. long ret;
  451. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  452. ASSERT_EQ(0, ret);
  453. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  454. EXPECT_EQ(-1, ret);
  455. EXPECT_EQ(EINVAL, errno);
  456. }
  457. TEST(ERRNO_valid)
  458. {
  459. struct sock_filter filter[] = {
  460. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  461. offsetof(struct seccomp_data, nr)),
  462. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
  463. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | E2BIG),
  464. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  465. };
  466. struct sock_fprog prog = {
  467. .len = (unsigned short)ARRAY_SIZE(filter),
  468. .filter = filter,
  469. };
  470. long ret;
  471. pid_t parent = getppid();
  472. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  473. ASSERT_EQ(0, ret);
  474. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  475. ASSERT_EQ(0, ret);
  476. EXPECT_EQ(parent, syscall(__NR_getppid));
  477. EXPECT_EQ(-1, read(0, NULL, 0));
  478. EXPECT_EQ(E2BIG, errno);
  479. }
  480. TEST(ERRNO_zero)
  481. {
  482. struct sock_filter filter[] = {
  483. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  484. offsetof(struct seccomp_data, nr)),
  485. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
  486. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | 0),
  487. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  488. };
  489. struct sock_fprog prog = {
  490. .len = (unsigned short)ARRAY_SIZE(filter),
  491. .filter = filter,
  492. };
  493. long ret;
  494. pid_t parent = getppid();
  495. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  496. ASSERT_EQ(0, ret);
  497. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  498. ASSERT_EQ(0, ret);
  499. EXPECT_EQ(parent, syscall(__NR_getppid));
  500. /* "errno" of 0 is ok. */
  501. EXPECT_EQ(0, read(0, NULL, 0));
  502. }
  503. TEST(ERRNO_capped)
  504. {
  505. struct sock_filter filter[] = {
  506. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  507. offsetof(struct seccomp_data, nr)),
  508. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
  509. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | 4096),
  510. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  511. };
  512. struct sock_fprog prog = {
  513. .len = (unsigned short)ARRAY_SIZE(filter),
  514. .filter = filter,
  515. };
  516. long ret;
  517. pid_t parent = getppid();
  518. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  519. ASSERT_EQ(0, ret);
  520. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
  521. ASSERT_EQ(0, ret);
  522. EXPECT_EQ(parent, syscall(__NR_getppid));
  523. EXPECT_EQ(-1, read(0, NULL, 0));
  524. EXPECT_EQ(4095, errno);
  525. }
  526. FIXTURE_DATA(TRAP) {
  527. struct sock_fprog prog;
  528. };
  529. FIXTURE_SETUP(TRAP)
  530. {
  531. struct sock_filter filter[] = {
  532. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  533. offsetof(struct seccomp_data, nr)),
  534. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
  535. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP),
  536. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  537. };
  538. memset(&self->prog, 0, sizeof(self->prog));
  539. self->prog.filter = malloc(sizeof(filter));
  540. ASSERT_NE(NULL, self->prog.filter);
  541. memcpy(self->prog.filter, filter, sizeof(filter));
  542. self->prog.len = (unsigned short)ARRAY_SIZE(filter);
  543. }
  544. FIXTURE_TEARDOWN(TRAP)
  545. {
  546. if (self->prog.filter)
  547. free(self->prog.filter);
  548. }
  549. TEST_F_SIGNAL(TRAP, dfl, SIGSYS)
  550. {
  551. long ret;
  552. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  553. ASSERT_EQ(0, ret);
  554. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
  555. ASSERT_EQ(0, ret);
  556. syscall(__NR_getpid);
  557. }
  558. /* Ensure that SIGSYS overrides SIG_IGN */
  559. TEST_F_SIGNAL(TRAP, ign, SIGSYS)
  560. {
  561. long ret;
  562. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  563. ASSERT_EQ(0, ret);
  564. signal(SIGSYS, SIG_IGN);
  565. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
  566. ASSERT_EQ(0, ret);
  567. syscall(__NR_getpid);
  568. }
  569. static struct siginfo TRAP_info;
  570. static volatile int TRAP_nr;
  571. static void TRAP_action(int nr, siginfo_t *info, void *void_context)
  572. {
  573. memcpy(&TRAP_info, info, sizeof(TRAP_info));
  574. TRAP_nr = nr;
  575. }
  576. TEST_F(TRAP, handler)
  577. {
  578. int ret, test;
  579. struct sigaction act;
  580. sigset_t mask;
  581. memset(&act, 0, sizeof(act));
  582. sigemptyset(&mask);
  583. sigaddset(&mask, SIGSYS);
  584. act.sa_sigaction = &TRAP_action;
  585. act.sa_flags = SA_SIGINFO;
  586. ret = sigaction(SIGSYS, &act, NULL);
  587. ASSERT_EQ(0, ret) {
  588. TH_LOG("sigaction failed");
  589. }
  590. ret = sigprocmask(SIG_UNBLOCK, &mask, NULL);
  591. ASSERT_EQ(0, ret) {
  592. TH_LOG("sigprocmask failed");
  593. }
  594. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  595. ASSERT_EQ(0, ret);
  596. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
  597. ASSERT_EQ(0, ret);
  598. TRAP_nr = 0;
  599. memset(&TRAP_info, 0, sizeof(TRAP_info));
  600. /* Expect the registers to be rolled back. (nr = error) may vary
  601. * based on arch. */
  602. ret = syscall(__NR_getpid);
  603. /* Silence gcc warning about volatile. */
  604. test = TRAP_nr;
  605. EXPECT_EQ(SIGSYS, test);
  606. struct local_sigsys {
  607. void *_call_addr; /* calling user insn */
  608. int _syscall; /* triggering system call number */
  609. unsigned int _arch; /* AUDIT_ARCH_* of syscall */
  610. } *sigsys = (struct local_sigsys *)
  611. #ifdef si_syscall
  612. &(TRAP_info.si_call_addr);
  613. #else
  614. &TRAP_info.si_pid;
  615. #endif
  616. EXPECT_EQ(__NR_getpid, sigsys->_syscall);
  617. /* Make sure arch is non-zero. */
  618. EXPECT_NE(0, sigsys->_arch);
  619. EXPECT_NE(0, (unsigned long)sigsys->_call_addr);
  620. }
  621. FIXTURE_DATA(precedence) {
  622. struct sock_fprog allow;
  623. struct sock_fprog trace;
  624. struct sock_fprog error;
  625. struct sock_fprog trap;
  626. struct sock_fprog kill;
  627. };
  628. FIXTURE_SETUP(precedence)
  629. {
  630. struct sock_filter allow_insns[] = {
  631. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  632. };
  633. struct sock_filter trace_insns[] = {
  634. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  635. offsetof(struct seccomp_data, nr)),
  636. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
  637. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  638. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE),
  639. };
  640. struct sock_filter error_insns[] = {
  641. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  642. offsetof(struct seccomp_data, nr)),
  643. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
  644. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  645. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO),
  646. };
  647. struct sock_filter trap_insns[] = {
  648. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  649. offsetof(struct seccomp_data, nr)),
  650. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
  651. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  652. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP),
  653. };
  654. struct sock_filter kill_insns[] = {
  655. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  656. offsetof(struct seccomp_data, nr)),
  657. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
  658. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  659. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  660. };
  661. memset(self, 0, sizeof(*self));
  662. #define FILTER_ALLOC(_x) \
  663. self->_x.filter = malloc(sizeof(_x##_insns)); \
  664. ASSERT_NE(NULL, self->_x.filter); \
  665. memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \
  666. self->_x.len = (unsigned short)ARRAY_SIZE(_x##_insns)
  667. FILTER_ALLOC(allow);
  668. FILTER_ALLOC(trace);
  669. FILTER_ALLOC(error);
  670. FILTER_ALLOC(trap);
  671. FILTER_ALLOC(kill);
  672. }
  673. FIXTURE_TEARDOWN(precedence)
  674. {
  675. #define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter)
  676. FILTER_FREE(allow);
  677. FILTER_FREE(trace);
  678. FILTER_FREE(error);
  679. FILTER_FREE(trap);
  680. FILTER_FREE(kill);
  681. }
  682. TEST_F(precedence, allow_ok)
  683. {
  684. pid_t parent, res = 0;
  685. long ret;
  686. parent = getppid();
  687. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  688. ASSERT_EQ(0, ret);
  689. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  690. ASSERT_EQ(0, ret);
  691. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  692. ASSERT_EQ(0, ret);
  693. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  694. ASSERT_EQ(0, ret);
  695. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
  696. ASSERT_EQ(0, ret);
  697. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
  698. ASSERT_EQ(0, ret);
  699. /* Should work just fine. */
  700. res = syscall(__NR_getppid);
  701. EXPECT_EQ(parent, res);
  702. }
  703. TEST_F_SIGNAL(precedence, kill_is_highest, SIGSYS)
  704. {
  705. pid_t parent, res = 0;
  706. long ret;
  707. parent = getppid();
  708. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  709. ASSERT_EQ(0, ret);
  710. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  711. ASSERT_EQ(0, ret);
  712. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  713. ASSERT_EQ(0, ret);
  714. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  715. ASSERT_EQ(0, ret);
  716. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
  717. ASSERT_EQ(0, ret);
  718. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
  719. ASSERT_EQ(0, ret);
  720. /* Should work just fine. */
  721. res = syscall(__NR_getppid);
  722. EXPECT_EQ(parent, res);
  723. /* getpid() should never return. */
  724. res = syscall(__NR_getpid);
  725. EXPECT_EQ(0, res);
  726. }
  727. TEST_F_SIGNAL(precedence, kill_is_highest_in_any_order, SIGSYS)
  728. {
  729. pid_t parent;
  730. long ret;
  731. parent = getppid();
  732. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  733. ASSERT_EQ(0, ret);
  734. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  735. ASSERT_EQ(0, ret);
  736. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
  737. ASSERT_EQ(0, ret);
  738. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  739. ASSERT_EQ(0, ret);
  740. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  741. ASSERT_EQ(0, ret);
  742. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
  743. ASSERT_EQ(0, ret);
  744. /* Should work just fine. */
  745. EXPECT_EQ(parent, syscall(__NR_getppid));
  746. /* getpid() should never return. */
  747. EXPECT_EQ(0, syscall(__NR_getpid));
  748. }
  749. TEST_F_SIGNAL(precedence, trap_is_second, SIGSYS)
  750. {
  751. pid_t parent;
  752. long ret;
  753. parent = getppid();
  754. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  755. ASSERT_EQ(0, ret);
  756. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  757. ASSERT_EQ(0, ret);
  758. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  759. ASSERT_EQ(0, ret);
  760. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  761. ASSERT_EQ(0, ret);
  762. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
  763. ASSERT_EQ(0, ret);
  764. /* Should work just fine. */
  765. EXPECT_EQ(parent, syscall(__NR_getppid));
  766. /* getpid() should never return. */
  767. EXPECT_EQ(0, syscall(__NR_getpid));
  768. }
  769. TEST_F_SIGNAL(precedence, trap_is_second_in_any_order, SIGSYS)
  770. {
  771. pid_t parent;
  772. long ret;
  773. parent = getppid();
  774. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  775. ASSERT_EQ(0, ret);
  776. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  777. ASSERT_EQ(0, ret);
  778. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
  779. ASSERT_EQ(0, ret);
  780. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  781. ASSERT_EQ(0, ret);
  782. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  783. ASSERT_EQ(0, ret);
  784. /* Should work just fine. */
  785. EXPECT_EQ(parent, syscall(__NR_getppid));
  786. /* getpid() should never return. */
  787. EXPECT_EQ(0, syscall(__NR_getpid));
  788. }
  789. TEST_F(precedence, errno_is_third)
  790. {
  791. pid_t parent;
  792. long ret;
  793. parent = getppid();
  794. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  795. ASSERT_EQ(0, ret);
  796. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  797. ASSERT_EQ(0, ret);
  798. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  799. ASSERT_EQ(0, ret);
  800. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  801. ASSERT_EQ(0, ret);
  802. /* Should work just fine. */
  803. EXPECT_EQ(parent, syscall(__NR_getppid));
  804. EXPECT_EQ(0, syscall(__NR_getpid));
  805. }
  806. TEST_F(precedence, errno_is_third_in_any_order)
  807. {
  808. pid_t parent;
  809. long ret;
  810. parent = getppid();
  811. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  812. ASSERT_EQ(0, ret);
  813. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
  814. ASSERT_EQ(0, ret);
  815. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  816. ASSERT_EQ(0, ret);
  817. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  818. ASSERT_EQ(0, ret);
  819. /* Should work just fine. */
  820. EXPECT_EQ(parent, syscall(__NR_getppid));
  821. EXPECT_EQ(0, syscall(__NR_getpid));
  822. }
  823. TEST_F(precedence, trace_is_fourth)
  824. {
  825. pid_t parent;
  826. long ret;
  827. parent = getppid();
  828. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  829. ASSERT_EQ(0, ret);
  830. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  831. ASSERT_EQ(0, ret);
  832. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  833. ASSERT_EQ(0, ret);
  834. /* Should work just fine. */
  835. EXPECT_EQ(parent, syscall(__NR_getppid));
  836. /* No ptracer */
  837. EXPECT_EQ(-1, syscall(__NR_getpid));
  838. }
  839. TEST_F(precedence, trace_is_fourth_in_any_order)
  840. {
  841. pid_t parent;
  842. long ret;
  843. parent = getppid();
  844. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  845. ASSERT_EQ(0, ret);
  846. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
  847. ASSERT_EQ(0, ret);
  848. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
  849. ASSERT_EQ(0, ret);
  850. /* Should work just fine. */
  851. EXPECT_EQ(parent, syscall(__NR_getppid));
  852. /* No ptracer */
  853. EXPECT_EQ(-1, syscall(__NR_getpid));
  854. }
  855. #ifndef PTRACE_O_TRACESECCOMP
  856. #define PTRACE_O_TRACESECCOMP 0x00000080
  857. #endif
  858. /* Catch the Ubuntu 12.04 value error. */
  859. #if PTRACE_EVENT_SECCOMP != 7
  860. #undef PTRACE_EVENT_SECCOMP
  861. #endif
  862. #ifndef PTRACE_EVENT_SECCOMP
  863. #define PTRACE_EVENT_SECCOMP 7
  864. #endif
  865. #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP)
  866. bool tracer_running;
  867. void tracer_stop(int sig)
  868. {
  869. tracer_running = false;
  870. }
  871. typedef void tracer_func_t(struct __test_metadata *_metadata,
  872. pid_t tracee, int status, void *args);
  873. void tracer(struct __test_metadata *_metadata, int fd, pid_t tracee,
  874. tracer_func_t tracer_func, void *args)
  875. {
  876. int ret = -1;
  877. struct sigaction action = {
  878. .sa_handler = tracer_stop,
  879. };
  880. /* Allow external shutdown. */
  881. tracer_running = true;
  882. ASSERT_EQ(0, sigaction(SIGUSR1, &action, NULL));
  883. errno = 0;
  884. while (ret == -1 && errno != EINVAL)
  885. ret = ptrace(PTRACE_ATTACH, tracee, NULL, 0);
  886. ASSERT_EQ(0, ret) {
  887. kill(tracee, SIGKILL);
  888. }
  889. /* Wait for attach stop */
  890. wait(NULL);
  891. ret = ptrace(PTRACE_SETOPTIONS, tracee, NULL, PTRACE_O_TRACESECCOMP);
  892. ASSERT_EQ(0, ret) {
  893. TH_LOG("Failed to set PTRACE_O_TRACESECCOMP");
  894. kill(tracee, SIGKILL);
  895. }
  896. ptrace(PTRACE_CONT, tracee, NULL, 0);
  897. /* Unblock the tracee */
  898. ASSERT_EQ(1, write(fd, "A", 1));
  899. ASSERT_EQ(0, close(fd));
  900. /* Run until we're shut down. Must assert to stop execution. */
  901. while (tracer_running) {
  902. int status;
  903. if (wait(&status) != tracee)
  904. continue;
  905. if (WIFSIGNALED(status) || WIFEXITED(status))
  906. /* Child is dead. Time to go. */
  907. return;
  908. /* Make sure this is a seccomp event. */
  909. ASSERT_EQ(true, IS_SECCOMP_EVENT(status));
  910. tracer_func(_metadata, tracee, status, args);
  911. ret = ptrace(PTRACE_CONT, tracee, NULL, NULL);
  912. ASSERT_EQ(0, ret);
  913. }
  914. /* Directly report the status of our test harness results. */
  915. syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
  916. }
  917. /* Common tracer setup/teardown functions. */
  918. void cont_handler(int num)
  919. { }
  920. pid_t setup_trace_fixture(struct __test_metadata *_metadata,
  921. tracer_func_t func, void *args)
  922. {
  923. char sync;
  924. int pipefd[2];
  925. pid_t tracer_pid;
  926. pid_t tracee = getpid();
  927. /* Setup a pipe for clean synchronization. */
  928. ASSERT_EQ(0, pipe(pipefd));
  929. /* Fork a child which we'll promote to tracer */
  930. tracer_pid = fork();
  931. ASSERT_LE(0, tracer_pid);
  932. signal(SIGALRM, cont_handler);
  933. if (tracer_pid == 0) {
  934. close(pipefd[0]);
  935. tracer(_metadata, pipefd[1], tracee, func, args);
  936. syscall(__NR_exit, 0);
  937. }
  938. close(pipefd[1]);
  939. prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
  940. read(pipefd[0], &sync, 1);
  941. close(pipefd[0]);
  942. return tracer_pid;
  943. }
  944. void teardown_trace_fixture(struct __test_metadata *_metadata,
  945. pid_t tracer)
  946. {
  947. if (tracer) {
  948. int status;
  949. /*
  950. * Extract the exit code from the other process and
  951. * adopt it for ourselves in case its asserts failed.
  952. */
  953. ASSERT_EQ(0, kill(tracer, SIGUSR1));
  954. ASSERT_EQ(tracer, waitpid(tracer, &status, 0));
  955. if (WEXITSTATUS(status))
  956. _metadata->passed = 0;
  957. }
  958. }
  959. /* "poke" tracer arguments and function. */
  960. struct tracer_args_poke_t {
  961. unsigned long poke_addr;
  962. };
  963. void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status,
  964. void *args)
  965. {
  966. int ret;
  967. unsigned long msg;
  968. struct tracer_args_poke_t *info = (struct tracer_args_poke_t *)args;
  969. ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
  970. EXPECT_EQ(0, ret);
  971. /* If this fails, don't try to recover. */
  972. ASSERT_EQ(0x1001, msg) {
  973. kill(tracee, SIGKILL);
  974. }
  975. /*
  976. * Poke in the message.
  977. * Registers are not touched to try to keep this relatively arch
  978. * agnostic.
  979. */
  980. ret = ptrace(PTRACE_POKEDATA, tracee, info->poke_addr, 0x1001);
  981. EXPECT_EQ(0, ret);
  982. }
  983. FIXTURE_DATA(TRACE_poke) {
  984. struct sock_fprog prog;
  985. pid_t tracer;
  986. long poked;
  987. struct tracer_args_poke_t tracer_args;
  988. };
  989. FIXTURE_SETUP(TRACE_poke)
  990. {
  991. struct sock_filter filter[] = {
  992. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  993. offsetof(struct seccomp_data, nr)),
  994. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
  995. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1001),
  996. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  997. };
  998. self->poked = 0;
  999. memset(&self->prog, 0, sizeof(self->prog));
  1000. self->prog.filter = malloc(sizeof(filter));
  1001. ASSERT_NE(NULL, self->prog.filter);
  1002. memcpy(self->prog.filter, filter, sizeof(filter));
  1003. self->prog.len = (unsigned short)ARRAY_SIZE(filter);
  1004. /* Set up tracer args. */
  1005. self->tracer_args.poke_addr = (unsigned long)&self->poked;
  1006. /* Launch tracer. */
  1007. self->tracer = setup_trace_fixture(_metadata, tracer_poke,
  1008. &self->tracer_args);
  1009. }
  1010. FIXTURE_TEARDOWN(TRACE_poke)
  1011. {
  1012. teardown_trace_fixture(_metadata, self->tracer);
  1013. if (self->prog.filter)
  1014. free(self->prog.filter);
  1015. }
  1016. TEST_F(TRACE_poke, read_has_side_effects)
  1017. {
  1018. ssize_t ret;
  1019. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  1020. ASSERT_EQ(0, ret);
  1021. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
  1022. ASSERT_EQ(0, ret);
  1023. EXPECT_EQ(0, self->poked);
  1024. ret = read(-1, NULL, 0);
  1025. EXPECT_EQ(-1, ret);
  1026. EXPECT_EQ(0x1001, self->poked);
  1027. }
  1028. TEST_F(TRACE_poke, getpid_runs_normally)
  1029. {
  1030. long ret;
  1031. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  1032. ASSERT_EQ(0, ret);
  1033. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
  1034. ASSERT_EQ(0, ret);
  1035. EXPECT_EQ(0, self->poked);
  1036. EXPECT_NE(0, syscall(__NR_getpid));
  1037. EXPECT_EQ(0, self->poked);
  1038. }
  1039. #if defined(__x86_64__)
  1040. # define ARCH_REGS struct user_regs_struct
  1041. # define SYSCALL_NUM orig_rax
  1042. # define SYSCALL_RET rax
  1043. #elif defined(__i386__)
  1044. # define ARCH_REGS struct user_regs_struct
  1045. # define SYSCALL_NUM orig_eax
  1046. # define SYSCALL_RET eax
  1047. #elif defined(__arm__)
  1048. # define ARCH_REGS struct pt_regs
  1049. # define SYSCALL_NUM ARM_r7
  1050. # define SYSCALL_RET ARM_r0
  1051. #elif defined(__aarch64__)
  1052. # define ARCH_REGS struct user_pt_regs
  1053. # define SYSCALL_NUM regs[8]
  1054. # define SYSCALL_RET regs[0]
  1055. #elif defined(__powerpc__)
  1056. # define ARCH_REGS struct pt_regs
  1057. # define SYSCALL_NUM gpr[0]
  1058. # define SYSCALL_RET gpr[3]
  1059. #elif defined(__s390__)
  1060. # define ARCH_REGS s390_regs
  1061. # define SYSCALL_NUM gprs[2]
  1062. # define SYSCALL_RET gprs[2]
  1063. #else
  1064. # error "Do not know how to find your architecture's registers and syscalls"
  1065. #endif
  1066. /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
  1067. * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux).
  1068. */
  1069. #if defined(__x86_64__) || defined(__i386__)
  1070. #define HAVE_GETREGS
  1071. #endif
  1072. /* Architecture-specific syscall fetching routine. */
  1073. int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
  1074. {
  1075. ARCH_REGS regs;
  1076. #ifdef HAVE_GETREGS
  1077. EXPECT_EQ(0, ptrace(PTRACE_GETREGS, tracee, 0, &regs)) {
  1078. TH_LOG("PTRACE_GETREGS failed");
  1079. return -1;
  1080. }
  1081. #else
  1082. struct iovec iov;
  1083. iov.iov_base = &regs;
  1084. iov.iov_len = sizeof(regs);
  1085. EXPECT_EQ(0, ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov)) {
  1086. TH_LOG("PTRACE_GETREGSET failed");
  1087. return -1;
  1088. }
  1089. #endif
  1090. return regs.SYSCALL_NUM;
  1091. }
  1092. /* Architecture-specific syscall changing routine. */
  1093. void change_syscall(struct __test_metadata *_metadata,
  1094. pid_t tracee, int syscall)
  1095. {
  1096. int ret;
  1097. ARCH_REGS regs;
  1098. #ifdef HAVE_GETREGS
  1099. ret = ptrace(PTRACE_GETREGS, tracee, 0, &regs);
  1100. #else
  1101. struct iovec iov;
  1102. iov.iov_base = &regs;
  1103. iov.iov_len = sizeof(regs);
  1104. ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov);
  1105. #endif
  1106. EXPECT_EQ(0, ret);
  1107. #if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \
  1108. defined(__s390__)
  1109. {
  1110. regs.SYSCALL_NUM = syscall;
  1111. }
  1112. #elif defined(__arm__)
  1113. # ifndef PTRACE_SET_SYSCALL
  1114. # define PTRACE_SET_SYSCALL 23
  1115. # endif
  1116. {
  1117. ret = ptrace(PTRACE_SET_SYSCALL, tracee, NULL, syscall);
  1118. EXPECT_EQ(0, ret);
  1119. }
  1120. #elif defined(__aarch64__)
  1121. # ifndef NT_ARM_SYSTEM_CALL
  1122. # define NT_ARM_SYSTEM_CALL 0x404
  1123. # endif
  1124. {
  1125. iov.iov_base = &syscall;
  1126. iov.iov_len = sizeof(syscall);
  1127. ret = ptrace(PTRACE_SETREGSET, tracee, NT_ARM_SYSTEM_CALL,
  1128. &iov);
  1129. EXPECT_EQ(0, ret);
  1130. }
  1131. #else
  1132. ASSERT_EQ(1, 0) {
  1133. TH_LOG("How is the syscall changed on this architecture?");
  1134. }
  1135. #endif
  1136. /* If syscall is skipped, change return value. */
  1137. if (syscall == -1)
  1138. regs.SYSCALL_RET = 1;
  1139. #ifdef HAVE_GETREGS
  1140. ret = ptrace(PTRACE_SETREGS, tracee, 0, &regs);
  1141. #else
  1142. iov.iov_base = &regs;
  1143. iov.iov_len = sizeof(regs);
  1144. ret = ptrace(PTRACE_SETREGSET, tracee, NT_PRSTATUS, &iov);
  1145. #endif
  1146. EXPECT_EQ(0, ret);
  1147. }
  1148. void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee,
  1149. int status, void *args)
  1150. {
  1151. int ret;
  1152. unsigned long msg;
  1153. /* Make sure we got the right message. */
  1154. ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
  1155. EXPECT_EQ(0, ret);
  1156. /* Validate and take action on expected syscalls. */
  1157. switch (msg) {
  1158. case 0x1002:
  1159. /* change getpid to getppid. */
  1160. EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
  1161. change_syscall(_metadata, tracee, __NR_getppid);
  1162. break;
  1163. case 0x1003:
  1164. /* skip gettid. */
  1165. EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
  1166. change_syscall(_metadata, tracee, -1);
  1167. break;
  1168. case 0x1004:
  1169. /* do nothing (allow getppid) */
  1170. EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
  1171. break;
  1172. default:
  1173. EXPECT_EQ(0, msg) {
  1174. TH_LOG("Unknown PTRACE_GETEVENTMSG: 0x%lx", msg);
  1175. kill(tracee, SIGKILL);
  1176. }
  1177. }
  1178. }
  1179. FIXTURE_DATA(TRACE_syscall) {
  1180. struct sock_fprog prog;
  1181. pid_t tracer, mytid, mypid, parent;
  1182. };
  1183. FIXTURE_SETUP(TRACE_syscall)
  1184. {
  1185. struct sock_filter filter[] = {
  1186. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  1187. offsetof(struct seccomp_data, nr)),
  1188. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
  1189. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
  1190. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
  1191. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
  1192. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
  1193. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
  1194. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1195. };
  1196. memset(&self->prog, 0, sizeof(self->prog));
  1197. self->prog.filter = malloc(sizeof(filter));
  1198. ASSERT_NE(NULL, self->prog.filter);
  1199. memcpy(self->prog.filter, filter, sizeof(filter));
  1200. self->prog.len = (unsigned short)ARRAY_SIZE(filter);
  1201. /* Prepare some testable syscall results. */
  1202. self->mytid = syscall(__NR_gettid);
  1203. ASSERT_GT(self->mytid, 0);
  1204. ASSERT_NE(self->mytid, 1) {
  1205. TH_LOG("Running this test as init is not supported. :)");
  1206. }
  1207. self->mypid = getpid();
  1208. ASSERT_GT(self->mypid, 0);
  1209. ASSERT_EQ(self->mytid, self->mypid);
  1210. self->parent = getppid();
  1211. ASSERT_GT(self->parent, 0);
  1212. ASSERT_NE(self->parent, self->mypid);
  1213. /* Launch tracer. */
  1214. self->tracer = setup_trace_fixture(_metadata, tracer_syscall, NULL);
  1215. }
  1216. FIXTURE_TEARDOWN(TRACE_syscall)
  1217. {
  1218. teardown_trace_fixture(_metadata, self->tracer);
  1219. if (self->prog.filter)
  1220. free(self->prog.filter);
  1221. }
  1222. TEST_F(TRACE_syscall, syscall_allowed)
  1223. {
  1224. long ret;
  1225. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  1226. ASSERT_EQ(0, ret);
  1227. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
  1228. ASSERT_EQ(0, ret);
  1229. /* getppid works as expected (no changes). */
  1230. EXPECT_EQ(self->parent, syscall(__NR_getppid));
  1231. EXPECT_NE(self->mypid, syscall(__NR_getppid));
  1232. }
  1233. TEST_F(TRACE_syscall, syscall_redirected)
  1234. {
  1235. long ret;
  1236. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  1237. ASSERT_EQ(0, ret);
  1238. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
  1239. ASSERT_EQ(0, ret);
  1240. /* getpid has been redirected to getppid as expected. */
  1241. EXPECT_EQ(self->parent, syscall(__NR_getpid));
  1242. EXPECT_NE(self->mypid, syscall(__NR_getpid));
  1243. }
  1244. TEST_F(TRACE_syscall, syscall_dropped)
  1245. {
  1246. long ret;
  1247. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  1248. ASSERT_EQ(0, ret);
  1249. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
  1250. ASSERT_EQ(0, ret);
  1251. /* gettid has been skipped and an altered return value stored. */
  1252. EXPECT_EQ(1, syscall(__NR_gettid));
  1253. EXPECT_NE(self->mytid, syscall(__NR_gettid));
  1254. }
  1255. #ifndef __NR_seccomp
  1256. # if defined(__i386__)
  1257. # define __NR_seccomp 354
  1258. # elif defined(__x86_64__)
  1259. # define __NR_seccomp 317
  1260. # elif defined(__arm__)
  1261. # define __NR_seccomp 383
  1262. # elif defined(__aarch64__)
  1263. # define __NR_seccomp 277
  1264. # elif defined(__powerpc__)
  1265. # define __NR_seccomp 358
  1266. # elif defined(__s390__)
  1267. # define __NR_seccomp 348
  1268. # else
  1269. # warning "seccomp syscall number unknown for this architecture"
  1270. # define __NR_seccomp 0xffff
  1271. # endif
  1272. #endif
  1273. #ifndef SECCOMP_SET_MODE_STRICT
  1274. #define SECCOMP_SET_MODE_STRICT 0
  1275. #endif
  1276. #ifndef SECCOMP_SET_MODE_FILTER
  1277. #define SECCOMP_SET_MODE_FILTER 1
  1278. #endif
  1279. #ifndef SECCOMP_FLAG_FILTER_TSYNC
  1280. #define SECCOMP_FLAG_FILTER_TSYNC 1
  1281. #endif
  1282. #ifndef seccomp
  1283. int seccomp(unsigned int op, unsigned int flags, struct sock_fprog *filter)
  1284. {
  1285. errno = 0;
  1286. return syscall(__NR_seccomp, op, flags, filter);
  1287. }
  1288. #endif
  1289. TEST(seccomp_syscall)
  1290. {
  1291. struct sock_filter filter[] = {
  1292. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1293. };
  1294. struct sock_fprog prog = {
  1295. .len = (unsigned short)ARRAY_SIZE(filter),
  1296. .filter = filter,
  1297. };
  1298. long ret;
  1299. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
  1300. ASSERT_EQ(0, ret) {
  1301. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1302. }
  1303. /* Reject insane operation. */
  1304. ret = seccomp(-1, 0, &prog);
  1305. ASSERT_NE(ENOSYS, errno) {
  1306. TH_LOG("Kernel does not support seccomp syscall!");
  1307. }
  1308. EXPECT_EQ(EINVAL, errno) {
  1309. TH_LOG("Did not reject crazy op value!");
  1310. }
  1311. /* Reject strict with flags or pointer. */
  1312. ret = seccomp(SECCOMP_SET_MODE_STRICT, -1, NULL);
  1313. EXPECT_EQ(EINVAL, errno) {
  1314. TH_LOG("Did not reject mode strict with flags!");
  1315. }
  1316. ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, &prog);
  1317. EXPECT_EQ(EINVAL, errno) {
  1318. TH_LOG("Did not reject mode strict with uargs!");
  1319. }
  1320. /* Reject insane args for filter. */
  1321. ret = seccomp(SECCOMP_SET_MODE_FILTER, -1, &prog);
  1322. EXPECT_EQ(EINVAL, errno) {
  1323. TH_LOG("Did not reject crazy filter flags!");
  1324. }
  1325. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, NULL);
  1326. EXPECT_EQ(EFAULT, errno) {
  1327. TH_LOG("Did not reject NULL filter!");
  1328. }
  1329. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
  1330. EXPECT_EQ(0, errno) {
  1331. TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER: %s",
  1332. strerror(errno));
  1333. }
  1334. }
  1335. TEST(seccomp_syscall_mode_lock)
  1336. {
  1337. struct sock_filter filter[] = {
  1338. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1339. };
  1340. struct sock_fprog prog = {
  1341. .len = (unsigned short)ARRAY_SIZE(filter),
  1342. .filter = filter,
  1343. };
  1344. long ret;
  1345. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
  1346. ASSERT_EQ(0, ret) {
  1347. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1348. }
  1349. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
  1350. ASSERT_NE(ENOSYS, errno) {
  1351. TH_LOG("Kernel does not support seccomp syscall!");
  1352. }
  1353. EXPECT_EQ(0, ret) {
  1354. TH_LOG("Could not install filter!");
  1355. }
  1356. /* Make sure neither entry point will switch to strict. */
  1357. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, 0, 0, 0);
  1358. EXPECT_EQ(EINVAL, errno) {
  1359. TH_LOG("Switched to mode strict!");
  1360. }
  1361. ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, NULL);
  1362. EXPECT_EQ(EINVAL, errno) {
  1363. TH_LOG("Switched to mode strict!");
  1364. }
  1365. }
  1366. TEST(TSYNC_first)
  1367. {
  1368. struct sock_filter filter[] = {
  1369. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1370. };
  1371. struct sock_fprog prog = {
  1372. .len = (unsigned short)ARRAY_SIZE(filter),
  1373. .filter = filter,
  1374. };
  1375. long ret;
  1376. ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
  1377. ASSERT_EQ(0, ret) {
  1378. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1379. }
  1380. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1381. &prog);
  1382. ASSERT_NE(ENOSYS, errno) {
  1383. TH_LOG("Kernel does not support seccomp syscall!");
  1384. }
  1385. EXPECT_EQ(0, ret) {
  1386. TH_LOG("Could not install initial filter with TSYNC!");
  1387. }
  1388. }
  1389. #define TSYNC_SIBLINGS 2
  1390. struct tsync_sibling {
  1391. pthread_t tid;
  1392. pid_t system_tid;
  1393. sem_t *started;
  1394. pthread_cond_t *cond;
  1395. pthread_mutex_t *mutex;
  1396. int diverge;
  1397. int num_waits;
  1398. struct sock_fprog *prog;
  1399. struct __test_metadata *metadata;
  1400. };
  1401. FIXTURE_DATA(TSYNC) {
  1402. struct sock_fprog root_prog, apply_prog;
  1403. struct tsync_sibling sibling[TSYNC_SIBLINGS];
  1404. sem_t started;
  1405. pthread_cond_t cond;
  1406. pthread_mutex_t mutex;
  1407. int sibling_count;
  1408. };
  1409. FIXTURE_SETUP(TSYNC)
  1410. {
  1411. struct sock_filter root_filter[] = {
  1412. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1413. };
  1414. struct sock_filter apply_filter[] = {
  1415. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  1416. offsetof(struct seccomp_data, nr)),
  1417. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
  1418. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  1419. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1420. };
  1421. memset(&self->root_prog, 0, sizeof(self->root_prog));
  1422. memset(&self->apply_prog, 0, sizeof(self->apply_prog));
  1423. memset(&self->sibling, 0, sizeof(self->sibling));
  1424. self->root_prog.filter = malloc(sizeof(root_filter));
  1425. ASSERT_NE(NULL, self->root_prog.filter);
  1426. memcpy(self->root_prog.filter, &root_filter, sizeof(root_filter));
  1427. self->root_prog.len = (unsigned short)ARRAY_SIZE(root_filter);
  1428. self->apply_prog.filter = malloc(sizeof(apply_filter));
  1429. ASSERT_NE(NULL, self->apply_prog.filter);
  1430. memcpy(self->apply_prog.filter, &apply_filter, sizeof(apply_filter));
  1431. self->apply_prog.len = (unsigned short)ARRAY_SIZE(apply_filter);
  1432. self->sibling_count = 0;
  1433. pthread_mutex_init(&self->mutex, NULL);
  1434. pthread_cond_init(&self->cond, NULL);
  1435. sem_init(&self->started, 0, 0);
  1436. self->sibling[0].tid = 0;
  1437. self->sibling[0].cond = &self->cond;
  1438. self->sibling[0].started = &self->started;
  1439. self->sibling[0].mutex = &self->mutex;
  1440. self->sibling[0].diverge = 0;
  1441. self->sibling[0].num_waits = 1;
  1442. self->sibling[0].prog = &self->root_prog;
  1443. self->sibling[0].metadata = _metadata;
  1444. self->sibling[1].tid = 0;
  1445. self->sibling[1].cond = &self->cond;
  1446. self->sibling[1].started = &self->started;
  1447. self->sibling[1].mutex = &self->mutex;
  1448. self->sibling[1].diverge = 0;
  1449. self->sibling[1].prog = &self->root_prog;
  1450. self->sibling[1].num_waits = 1;
  1451. self->sibling[1].metadata = _metadata;
  1452. }
  1453. FIXTURE_TEARDOWN(TSYNC)
  1454. {
  1455. int sib = 0;
  1456. if (self->root_prog.filter)
  1457. free(self->root_prog.filter);
  1458. if (self->apply_prog.filter)
  1459. free(self->apply_prog.filter);
  1460. for ( ; sib < self->sibling_count; ++sib) {
  1461. struct tsync_sibling *s = &self->sibling[sib];
  1462. void *status;
  1463. if (!s->tid)
  1464. continue;
  1465. if (pthread_kill(s->tid, 0)) {
  1466. pthread_cancel(s->tid);
  1467. pthread_join(s->tid, &status);
  1468. }
  1469. }
  1470. pthread_mutex_destroy(&self->mutex);
  1471. pthread_cond_destroy(&self->cond);
  1472. sem_destroy(&self->started);
  1473. }
  1474. void *tsync_sibling(void *data)
  1475. {
  1476. long ret = 0;
  1477. struct tsync_sibling *me = data;
  1478. me->system_tid = syscall(__NR_gettid);
  1479. pthread_mutex_lock(me->mutex);
  1480. if (me->diverge) {
  1481. /* Just re-apply the root prog to fork the tree */
  1482. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
  1483. me->prog, 0, 0);
  1484. }
  1485. sem_post(me->started);
  1486. /* Return outside of started so parent notices failures. */
  1487. if (ret) {
  1488. pthread_mutex_unlock(me->mutex);
  1489. return (void *)SIBLING_EXIT_FAILURE;
  1490. }
  1491. do {
  1492. pthread_cond_wait(me->cond, me->mutex);
  1493. me->num_waits = me->num_waits - 1;
  1494. } while (me->num_waits);
  1495. pthread_mutex_unlock(me->mutex);
  1496. ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
  1497. if (!ret)
  1498. return (void *)SIBLING_EXIT_NEWPRIVS;
  1499. read(0, NULL, 0);
  1500. return (void *)SIBLING_EXIT_UNKILLED;
  1501. }
  1502. void tsync_start_sibling(struct tsync_sibling *sibling)
  1503. {
  1504. pthread_create(&sibling->tid, NULL, tsync_sibling, (void *)sibling);
  1505. }
  1506. TEST_F(TSYNC, siblings_fail_prctl)
  1507. {
  1508. long ret;
  1509. void *status;
  1510. struct sock_filter filter[] = {
  1511. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  1512. offsetof(struct seccomp_data, nr)),
  1513. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
  1514. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EINVAL),
  1515. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1516. };
  1517. struct sock_fprog prog = {
  1518. .len = (unsigned short)ARRAY_SIZE(filter),
  1519. .filter = filter,
  1520. };
  1521. ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1522. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1523. }
  1524. /* Check prctl failure detection by requesting sib 0 diverge. */
  1525. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
  1526. ASSERT_NE(ENOSYS, errno) {
  1527. TH_LOG("Kernel does not support seccomp syscall!");
  1528. }
  1529. ASSERT_EQ(0, ret) {
  1530. TH_LOG("setting filter failed");
  1531. }
  1532. self->sibling[0].diverge = 1;
  1533. tsync_start_sibling(&self->sibling[0]);
  1534. tsync_start_sibling(&self->sibling[1]);
  1535. while (self->sibling_count < TSYNC_SIBLINGS) {
  1536. sem_wait(&self->started);
  1537. self->sibling_count++;
  1538. }
  1539. /* Signal the threads to clean up*/
  1540. pthread_mutex_lock(&self->mutex);
  1541. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1542. TH_LOG("cond broadcast non-zero");
  1543. }
  1544. pthread_mutex_unlock(&self->mutex);
  1545. /* Ensure diverging sibling failed to call prctl. */
  1546. pthread_join(self->sibling[0].tid, &status);
  1547. EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status);
  1548. pthread_join(self->sibling[1].tid, &status);
  1549. EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
  1550. }
  1551. TEST_F(TSYNC, two_siblings_with_ancestor)
  1552. {
  1553. long ret;
  1554. void *status;
  1555. ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1556. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1557. }
  1558. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
  1559. ASSERT_NE(ENOSYS, errno) {
  1560. TH_LOG("Kernel does not support seccomp syscall!");
  1561. }
  1562. ASSERT_EQ(0, ret) {
  1563. TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
  1564. }
  1565. tsync_start_sibling(&self->sibling[0]);
  1566. tsync_start_sibling(&self->sibling[1]);
  1567. while (self->sibling_count < TSYNC_SIBLINGS) {
  1568. sem_wait(&self->started);
  1569. self->sibling_count++;
  1570. }
  1571. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1572. &self->apply_prog);
  1573. ASSERT_EQ(0, ret) {
  1574. TH_LOG("Could install filter on all threads!");
  1575. }
  1576. /* Tell the siblings to test the policy */
  1577. pthread_mutex_lock(&self->mutex);
  1578. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1579. TH_LOG("cond broadcast non-zero");
  1580. }
  1581. pthread_mutex_unlock(&self->mutex);
  1582. /* Ensure they are both killed and don't exit cleanly. */
  1583. pthread_join(self->sibling[0].tid, &status);
  1584. EXPECT_EQ(0x0, (long)status);
  1585. pthread_join(self->sibling[1].tid, &status);
  1586. EXPECT_EQ(0x0, (long)status);
  1587. }
  1588. TEST_F(TSYNC, two_sibling_want_nnp)
  1589. {
  1590. void *status;
  1591. /* start siblings before any prctl() operations */
  1592. tsync_start_sibling(&self->sibling[0]);
  1593. tsync_start_sibling(&self->sibling[1]);
  1594. while (self->sibling_count < TSYNC_SIBLINGS) {
  1595. sem_wait(&self->started);
  1596. self->sibling_count++;
  1597. }
  1598. /* Tell the siblings to test no policy */
  1599. pthread_mutex_lock(&self->mutex);
  1600. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1601. TH_LOG("cond broadcast non-zero");
  1602. }
  1603. pthread_mutex_unlock(&self->mutex);
  1604. /* Ensure they are both upset about lacking nnp. */
  1605. pthread_join(self->sibling[0].tid, &status);
  1606. EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
  1607. pthread_join(self->sibling[1].tid, &status);
  1608. EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
  1609. }
  1610. TEST_F(TSYNC, two_siblings_with_no_filter)
  1611. {
  1612. long ret;
  1613. void *status;
  1614. /* start siblings before any prctl() operations */
  1615. tsync_start_sibling(&self->sibling[0]);
  1616. tsync_start_sibling(&self->sibling[1]);
  1617. while (self->sibling_count < TSYNC_SIBLINGS) {
  1618. sem_wait(&self->started);
  1619. self->sibling_count++;
  1620. }
  1621. ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1622. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1623. }
  1624. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1625. &self->apply_prog);
  1626. ASSERT_NE(ENOSYS, errno) {
  1627. TH_LOG("Kernel does not support seccomp syscall!");
  1628. }
  1629. ASSERT_EQ(0, ret) {
  1630. TH_LOG("Could install filter on all threads!");
  1631. }
  1632. /* Tell the siblings to test the policy */
  1633. pthread_mutex_lock(&self->mutex);
  1634. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1635. TH_LOG("cond broadcast non-zero");
  1636. }
  1637. pthread_mutex_unlock(&self->mutex);
  1638. /* Ensure they are both killed and don't exit cleanly. */
  1639. pthread_join(self->sibling[0].tid, &status);
  1640. EXPECT_EQ(0x0, (long)status);
  1641. pthread_join(self->sibling[1].tid, &status);
  1642. EXPECT_EQ(0x0, (long)status);
  1643. }
  1644. TEST_F(TSYNC, two_siblings_with_one_divergence)
  1645. {
  1646. long ret;
  1647. void *status;
  1648. ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1649. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1650. }
  1651. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
  1652. ASSERT_NE(ENOSYS, errno) {
  1653. TH_LOG("Kernel does not support seccomp syscall!");
  1654. }
  1655. ASSERT_EQ(0, ret) {
  1656. TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
  1657. }
  1658. self->sibling[0].diverge = 1;
  1659. tsync_start_sibling(&self->sibling[0]);
  1660. tsync_start_sibling(&self->sibling[1]);
  1661. while (self->sibling_count < TSYNC_SIBLINGS) {
  1662. sem_wait(&self->started);
  1663. self->sibling_count++;
  1664. }
  1665. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1666. &self->apply_prog);
  1667. ASSERT_EQ(self->sibling[0].system_tid, ret) {
  1668. TH_LOG("Did not fail on diverged sibling.");
  1669. }
  1670. /* Wake the threads */
  1671. pthread_mutex_lock(&self->mutex);
  1672. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1673. TH_LOG("cond broadcast non-zero");
  1674. }
  1675. pthread_mutex_unlock(&self->mutex);
  1676. /* Ensure they are both unkilled. */
  1677. pthread_join(self->sibling[0].tid, &status);
  1678. EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
  1679. pthread_join(self->sibling[1].tid, &status);
  1680. EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
  1681. }
  1682. TEST_F(TSYNC, two_siblings_not_under_filter)
  1683. {
  1684. long ret, sib;
  1685. void *status;
  1686. ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1687. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1688. }
  1689. /*
  1690. * Sibling 0 will have its own seccomp policy
  1691. * and Sibling 1 will not be under seccomp at
  1692. * all. Sibling 1 will enter seccomp and 0
  1693. * will cause failure.
  1694. */
  1695. self->sibling[0].diverge = 1;
  1696. tsync_start_sibling(&self->sibling[0]);
  1697. tsync_start_sibling(&self->sibling[1]);
  1698. while (self->sibling_count < TSYNC_SIBLINGS) {
  1699. sem_wait(&self->started);
  1700. self->sibling_count++;
  1701. }
  1702. ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
  1703. ASSERT_NE(ENOSYS, errno) {
  1704. TH_LOG("Kernel does not support seccomp syscall!");
  1705. }
  1706. ASSERT_EQ(0, ret) {
  1707. TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
  1708. }
  1709. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1710. &self->apply_prog);
  1711. ASSERT_EQ(ret, self->sibling[0].system_tid) {
  1712. TH_LOG("Did not fail on diverged sibling.");
  1713. }
  1714. sib = 1;
  1715. if (ret == self->sibling[0].system_tid)
  1716. sib = 0;
  1717. pthread_mutex_lock(&self->mutex);
  1718. /* Increment the other siblings num_waits so we can clean up
  1719. * the one we just saw.
  1720. */
  1721. self->sibling[!sib].num_waits += 1;
  1722. /* Signal the thread to clean up*/
  1723. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1724. TH_LOG("cond broadcast non-zero");
  1725. }
  1726. pthread_mutex_unlock(&self->mutex);
  1727. pthread_join(self->sibling[sib].tid, &status);
  1728. EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
  1729. /* Poll for actual task death. pthread_join doesn't guarantee it. */
  1730. while (!kill(self->sibling[sib].system_tid, 0))
  1731. sleep(0.1);
  1732. /* Switch to the remaining sibling */
  1733. sib = !sib;
  1734. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1735. &self->apply_prog);
  1736. ASSERT_EQ(0, ret) {
  1737. TH_LOG("Expected the remaining sibling to sync");
  1738. };
  1739. pthread_mutex_lock(&self->mutex);
  1740. /* If remaining sibling didn't have a chance to wake up during
  1741. * the first broadcast, manually reduce the num_waits now.
  1742. */
  1743. if (self->sibling[sib].num_waits > 1)
  1744. self->sibling[sib].num_waits = 1;
  1745. ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
  1746. TH_LOG("cond broadcast non-zero");
  1747. }
  1748. pthread_mutex_unlock(&self->mutex);
  1749. pthread_join(self->sibling[sib].tid, &status);
  1750. EXPECT_EQ(0, (long)status);
  1751. /* Poll for actual task death. pthread_join doesn't guarantee it. */
  1752. while (!kill(self->sibling[sib].system_tid, 0))
  1753. sleep(0.1);
  1754. ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
  1755. &self->apply_prog);
  1756. ASSERT_EQ(0, ret); /* just us chickens */
  1757. }
  1758. /* Make sure restarted syscalls are seen directly as "restart_syscall". */
  1759. TEST(syscall_restart)
  1760. {
  1761. long ret;
  1762. unsigned long msg;
  1763. pid_t child_pid;
  1764. int pipefd[2];
  1765. int status;
  1766. siginfo_t info = { };
  1767. struct sock_filter filter[] = {
  1768. BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
  1769. offsetof(struct seccomp_data, nr)),
  1770. #ifdef __NR_sigreturn
  1771. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_sigreturn, 6, 0),
  1772. #endif
  1773. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 5, 0),
  1774. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_exit, 4, 0),
  1775. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_rt_sigreturn, 3, 0),
  1776. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_nanosleep, 4, 0),
  1777. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_restart_syscall, 4, 0),
  1778. /* Allow __NR_write for easy logging. */
  1779. BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_write, 0, 1),
  1780. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
  1781. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
  1782. /* The nanosleep jump target. */
  1783. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x100),
  1784. /* The restart_syscall jump target. */
  1785. BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x200),
  1786. };
  1787. struct sock_fprog prog = {
  1788. .len = (unsigned short)ARRAY_SIZE(filter),
  1789. .filter = filter,
  1790. };
  1791. #if defined(__arm__)
  1792. struct utsname utsbuf;
  1793. #endif
  1794. ASSERT_EQ(0, pipe(pipefd));
  1795. child_pid = fork();
  1796. ASSERT_LE(0, child_pid);
  1797. if (child_pid == 0) {
  1798. /* Child uses EXPECT not ASSERT to deliver status correctly. */
  1799. char buf = ' ';
  1800. struct timespec timeout = { };
  1801. /* Attach parent as tracer and stop. */
  1802. EXPECT_EQ(0, ptrace(PTRACE_TRACEME));
  1803. EXPECT_EQ(0, raise(SIGSTOP));
  1804. EXPECT_EQ(0, close(pipefd[1]));
  1805. EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
  1806. TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
  1807. }
  1808. ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
  1809. EXPECT_EQ(0, ret) {
  1810. TH_LOG("Failed to install filter!");
  1811. }
  1812. EXPECT_EQ(1, read(pipefd[0], &buf, 1)) {
  1813. TH_LOG("Failed to read() sync from parent");
  1814. }
  1815. EXPECT_EQ('.', buf) {
  1816. TH_LOG("Failed to get sync data from read()");
  1817. }
  1818. /* Start nanosleep to be interrupted. */
  1819. timeout.tv_sec = 1;
  1820. errno = 0;
  1821. EXPECT_EQ(0, nanosleep(&timeout, NULL)) {
  1822. TH_LOG("Call to nanosleep() failed (errno %d)", errno);
  1823. }
  1824. /* Read final sync from parent. */
  1825. EXPECT_EQ(1, read(pipefd[0], &buf, 1)) {
  1826. TH_LOG("Failed final read() from parent");
  1827. }
  1828. EXPECT_EQ('!', buf) {
  1829. TH_LOG("Failed to get final data from read()");
  1830. }
  1831. /* Directly report the status of our test harness results. */
  1832. syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS
  1833. : EXIT_FAILURE);
  1834. }
  1835. EXPECT_EQ(0, close(pipefd[0]));
  1836. /* Attach to child, setup options, and release. */
  1837. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1838. ASSERT_EQ(true, WIFSTOPPED(status));
  1839. ASSERT_EQ(0, ptrace(PTRACE_SETOPTIONS, child_pid, NULL,
  1840. PTRACE_O_TRACESECCOMP));
  1841. ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
  1842. ASSERT_EQ(1, write(pipefd[1], ".", 1));
  1843. /* Wait for nanosleep() to start. */
  1844. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1845. ASSERT_EQ(true, WIFSTOPPED(status));
  1846. ASSERT_EQ(SIGTRAP, WSTOPSIG(status));
  1847. ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16));
  1848. ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg));
  1849. ASSERT_EQ(0x100, msg);
  1850. EXPECT_EQ(__NR_nanosleep, get_syscall(_metadata, child_pid));
  1851. /* Might as well check siginfo for sanity while we're here. */
  1852. ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
  1853. ASSERT_EQ(SIGTRAP, info.si_signo);
  1854. ASSERT_EQ(SIGTRAP | (PTRACE_EVENT_SECCOMP << 8), info.si_code);
  1855. EXPECT_EQ(0, info.si_errno);
  1856. EXPECT_EQ(getuid(), info.si_uid);
  1857. /* Verify signal delivery came from child (seccomp-triggered). */
  1858. EXPECT_EQ(child_pid, info.si_pid);
  1859. /* Interrupt nanosleep with SIGSTOP (which we'll need to handle). */
  1860. ASSERT_EQ(0, kill(child_pid, SIGSTOP));
  1861. ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
  1862. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1863. ASSERT_EQ(true, WIFSTOPPED(status));
  1864. ASSERT_EQ(SIGSTOP, WSTOPSIG(status));
  1865. /* Verify signal delivery came from parent now. */
  1866. ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
  1867. EXPECT_EQ(getpid(), info.si_pid);
  1868. /* Restart nanosleep with SIGCONT, which triggers restart_syscall. */
  1869. ASSERT_EQ(0, kill(child_pid, SIGCONT));
  1870. ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
  1871. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1872. ASSERT_EQ(true, WIFSTOPPED(status));
  1873. ASSERT_EQ(SIGCONT, WSTOPSIG(status));
  1874. ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
  1875. /* Wait for restart_syscall() to start. */
  1876. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1877. ASSERT_EQ(true, WIFSTOPPED(status));
  1878. ASSERT_EQ(SIGTRAP, WSTOPSIG(status));
  1879. ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16));
  1880. ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg));
  1881. ASSERT_EQ(0x200, msg);
  1882. ret = get_syscall(_metadata, child_pid);
  1883. #if defined(__arm__)
  1884. /*
  1885. * FIXME:
  1886. * - native ARM registers do NOT expose true syscall.
  1887. * - compat ARM registers on ARM64 DO expose true syscall.
  1888. */
  1889. ASSERT_EQ(0, uname(&utsbuf));
  1890. if (strncmp(utsbuf.machine, "arm", 3) == 0) {
  1891. EXPECT_EQ(__NR_nanosleep, ret);
  1892. } else
  1893. #endif
  1894. {
  1895. EXPECT_EQ(__NR_restart_syscall, ret);
  1896. }
  1897. /* Write again to end test. */
  1898. ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
  1899. ASSERT_EQ(1, write(pipefd[1], "!", 1));
  1900. EXPECT_EQ(0, close(pipefd[1]));
  1901. ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
  1902. if (WIFSIGNALED(status) || WEXITSTATUS(status))
  1903. _metadata->passed = 0;
  1904. }
  1905. /*
  1906. * TODO:
  1907. * - add microbenchmarks
  1908. * - expand NNP testing
  1909. * - better arch-specific TRACE and TRAP handlers.
  1910. * - endianness checking when appropriate
  1911. * - 64-bit arg prodding
  1912. * - arch value testing (x86 modes especially)
  1913. * - ...
  1914. */
  1915. TEST_HARNESS_MAIN