param_test.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329
  1. // SPDX-License-Identifier: LGPL-2.1
  2. #define _GNU_SOURCE
  3. #include <assert.h>
  4. #include <pthread.h>
  5. #include <sched.h>
  6. #include <stdint.h>
  7. #include <stdio.h>
  8. #include <stdlib.h>
  9. #include <string.h>
  10. #include <syscall.h>
  11. #include <unistd.h>
  12. #include <poll.h>
  13. #include <sys/types.h>
  14. #include <signal.h>
  15. #include <errno.h>
  16. #include <stddef.h>
  17. static inline pid_t gettid(void)
  18. {
  19. return syscall(__NR_gettid);
  20. }
  21. #define NR_INJECT 9
  22. static int loop_cnt[NR_INJECT + 1];
  23. static int loop_cnt_1 asm("asm_loop_cnt_1") __attribute__((used));
  24. static int loop_cnt_2 asm("asm_loop_cnt_2") __attribute__((used));
  25. static int loop_cnt_3 asm("asm_loop_cnt_3") __attribute__((used));
  26. static int loop_cnt_4 asm("asm_loop_cnt_4") __attribute__((used));
  27. static int loop_cnt_5 asm("asm_loop_cnt_5") __attribute__((used));
  28. static int loop_cnt_6 asm("asm_loop_cnt_6") __attribute__((used));
  29. static int opt_modulo, verbose;
  30. static int opt_yield, opt_signal, opt_sleep,
  31. opt_disable_rseq, opt_threads = 200,
  32. opt_disable_mod = 0, opt_test = 's', opt_mb = 0;
  33. #ifndef RSEQ_SKIP_FASTPATH
  34. static long long opt_reps = 5000;
  35. #else
  36. static long long opt_reps = 100;
  37. #endif
  38. static __thread __attribute__((tls_model("initial-exec")))
  39. unsigned int signals_delivered;
  40. #ifndef BENCHMARK
  41. static __thread __attribute__((tls_model("initial-exec"), unused))
  42. unsigned int yield_mod_cnt, nr_abort;
  43. #define printf_verbose(fmt, ...) \
  44. do { \
  45. if (verbose) \
  46. printf(fmt, ## __VA_ARGS__); \
  47. } while (0)
  48. #ifdef __i386__
  49. #define INJECT_ASM_REG "eax"
  50. #define RSEQ_INJECT_CLOBBER \
  51. , INJECT_ASM_REG
  52. #define RSEQ_INJECT_ASM(n) \
  53. "mov asm_loop_cnt_" #n ", %%" INJECT_ASM_REG "\n\t" \
  54. "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
  55. "jz 333f\n\t" \
  56. "222:\n\t" \
  57. "dec %%" INJECT_ASM_REG "\n\t" \
  58. "jnz 222b\n\t" \
  59. "333:\n\t"
  60. #elif defined(__x86_64__)
  61. #define INJECT_ASM_REG_P "rax"
  62. #define INJECT_ASM_REG "eax"
  63. #define RSEQ_INJECT_CLOBBER \
  64. , INJECT_ASM_REG_P \
  65. , INJECT_ASM_REG
  66. #define RSEQ_INJECT_ASM(n) \
  67. "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG_P "\n\t" \
  68. "mov (%%" INJECT_ASM_REG_P "), %%" INJECT_ASM_REG "\n\t" \
  69. "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
  70. "jz 333f\n\t" \
  71. "222:\n\t" \
  72. "dec %%" INJECT_ASM_REG "\n\t" \
  73. "jnz 222b\n\t" \
  74. "333:\n\t"
  75. #elif defined(__s390__)
  76. #define RSEQ_INJECT_INPUT \
  77. , [loop_cnt_1]"m"(loop_cnt[1]) \
  78. , [loop_cnt_2]"m"(loop_cnt[2]) \
  79. , [loop_cnt_3]"m"(loop_cnt[3]) \
  80. , [loop_cnt_4]"m"(loop_cnt[4]) \
  81. , [loop_cnt_5]"m"(loop_cnt[5]) \
  82. , [loop_cnt_6]"m"(loop_cnt[6])
  83. #define INJECT_ASM_REG "r12"
  84. #define RSEQ_INJECT_CLOBBER \
  85. , INJECT_ASM_REG
  86. #define RSEQ_INJECT_ASM(n) \
  87. "l %%" INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \
  88. "ltr %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG "\n\t" \
  89. "je 333f\n\t" \
  90. "222:\n\t" \
  91. "ahi %%" INJECT_ASM_REG ", -1\n\t" \
  92. "jnz 222b\n\t" \
  93. "333:\n\t"
  94. #elif defined(__ARMEL__)
  95. #define RSEQ_INJECT_INPUT \
  96. , [loop_cnt_1]"m"(loop_cnt[1]) \
  97. , [loop_cnt_2]"m"(loop_cnt[2]) \
  98. , [loop_cnt_3]"m"(loop_cnt[3]) \
  99. , [loop_cnt_4]"m"(loop_cnt[4]) \
  100. , [loop_cnt_5]"m"(loop_cnt[5]) \
  101. , [loop_cnt_6]"m"(loop_cnt[6])
  102. #define INJECT_ASM_REG "r4"
  103. #define RSEQ_INJECT_CLOBBER \
  104. , INJECT_ASM_REG
  105. #define RSEQ_INJECT_ASM(n) \
  106. "ldr " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \
  107. "cmp " INJECT_ASM_REG ", #0\n\t" \
  108. "beq 333f\n\t" \
  109. "222:\n\t" \
  110. "subs " INJECT_ASM_REG ", #1\n\t" \
  111. "bne 222b\n\t" \
  112. "333:\n\t"
  113. #elif defined(__AARCH64EL__)
  114. #define RSEQ_INJECT_INPUT \
  115. , [loop_cnt_1] "Qo" (loop_cnt[1]) \
  116. , [loop_cnt_2] "Qo" (loop_cnt[2]) \
  117. , [loop_cnt_3] "Qo" (loop_cnt[3]) \
  118. , [loop_cnt_4] "Qo" (loop_cnt[4]) \
  119. , [loop_cnt_5] "Qo" (loop_cnt[5]) \
  120. , [loop_cnt_6] "Qo" (loop_cnt[6])
  121. #define INJECT_ASM_REG RSEQ_ASM_TMP_REG32
  122. #define RSEQ_INJECT_ASM(n) \
  123. " ldr " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n" \
  124. " cbz " INJECT_ASM_REG ", 333f\n" \
  125. "222:\n" \
  126. " sub " INJECT_ASM_REG ", " INJECT_ASM_REG ", #1\n" \
  127. " cbnz " INJECT_ASM_REG ", 222b\n" \
  128. "333:\n"
  129. #elif __PPC__
  130. #define RSEQ_INJECT_INPUT \
  131. , [loop_cnt_1]"m"(loop_cnt[1]) \
  132. , [loop_cnt_2]"m"(loop_cnt[2]) \
  133. , [loop_cnt_3]"m"(loop_cnt[3]) \
  134. , [loop_cnt_4]"m"(loop_cnt[4]) \
  135. , [loop_cnt_5]"m"(loop_cnt[5]) \
  136. , [loop_cnt_6]"m"(loop_cnt[6])
  137. #define INJECT_ASM_REG "r18"
  138. #define RSEQ_INJECT_CLOBBER \
  139. , INJECT_ASM_REG
  140. #define RSEQ_INJECT_ASM(n) \
  141. "lwz %%" INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \
  142. "cmpwi %%" INJECT_ASM_REG ", 0\n\t" \
  143. "beq 333f\n\t" \
  144. "222:\n\t" \
  145. "subic. %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG ", 1\n\t" \
  146. "bne 222b\n\t" \
  147. "333:\n\t"
  148. #elif defined(__mips__)
  149. #define RSEQ_INJECT_INPUT \
  150. , [loop_cnt_1]"m"(loop_cnt[1]) \
  151. , [loop_cnt_2]"m"(loop_cnt[2]) \
  152. , [loop_cnt_3]"m"(loop_cnt[3]) \
  153. , [loop_cnt_4]"m"(loop_cnt[4]) \
  154. , [loop_cnt_5]"m"(loop_cnt[5]) \
  155. , [loop_cnt_6]"m"(loop_cnt[6])
  156. #define INJECT_ASM_REG "$5"
  157. #define RSEQ_INJECT_CLOBBER \
  158. , INJECT_ASM_REG
  159. #define RSEQ_INJECT_ASM(n) \
  160. "lw " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \
  161. "beqz " INJECT_ASM_REG ", 333f\n\t" \
  162. "222:\n\t" \
  163. "addiu " INJECT_ASM_REG ", -1\n\t" \
  164. "bnez " INJECT_ASM_REG ", 222b\n\t" \
  165. "333:\n\t"
  166. #else
  167. #error unsupported target
  168. #endif
  169. #define RSEQ_INJECT_FAILED \
  170. nr_abort++;
  171. #define RSEQ_INJECT_C(n) \
  172. { \
  173. int loc_i, loc_nr_loops = loop_cnt[n]; \
  174. \
  175. for (loc_i = 0; loc_i < loc_nr_loops; loc_i++) { \
  176. rseq_barrier(); \
  177. } \
  178. if (loc_nr_loops == -1 && opt_modulo) { \
  179. if (yield_mod_cnt == opt_modulo - 1) { \
  180. if (opt_sleep > 0) \
  181. poll(NULL, 0, opt_sleep); \
  182. if (opt_yield) \
  183. sched_yield(); \
  184. if (opt_signal) \
  185. raise(SIGUSR1); \
  186. yield_mod_cnt = 0; \
  187. } else { \
  188. yield_mod_cnt++; \
  189. } \
  190. } \
  191. }
  192. #else
  193. #define printf_verbose(fmt, ...)
  194. #endif /* BENCHMARK */
  195. #include "rseq.h"
  196. struct percpu_lock_entry {
  197. intptr_t v;
  198. } __attribute__((aligned(128)));
  199. struct percpu_lock {
  200. struct percpu_lock_entry c[CPU_SETSIZE];
  201. };
  202. struct test_data_entry {
  203. intptr_t count;
  204. } __attribute__((aligned(128)));
  205. struct spinlock_test_data {
  206. struct percpu_lock lock;
  207. struct test_data_entry c[CPU_SETSIZE];
  208. };
  209. struct spinlock_thread_test_data {
  210. struct spinlock_test_data *data;
  211. long long reps;
  212. int reg;
  213. };
  214. struct inc_test_data {
  215. struct test_data_entry c[CPU_SETSIZE];
  216. };
  217. struct inc_thread_test_data {
  218. struct inc_test_data *data;
  219. long long reps;
  220. int reg;
  221. };
  222. struct percpu_list_node {
  223. intptr_t data;
  224. struct percpu_list_node *next;
  225. };
  226. struct percpu_list_entry {
  227. struct percpu_list_node *head;
  228. } __attribute__((aligned(128)));
  229. struct percpu_list {
  230. struct percpu_list_entry c[CPU_SETSIZE];
  231. };
  232. #define BUFFER_ITEM_PER_CPU 100
  233. struct percpu_buffer_node {
  234. intptr_t data;
  235. };
  236. struct percpu_buffer_entry {
  237. intptr_t offset;
  238. intptr_t buflen;
  239. struct percpu_buffer_node **array;
  240. } __attribute__((aligned(128)));
  241. struct percpu_buffer {
  242. struct percpu_buffer_entry c[CPU_SETSIZE];
  243. };
  244. #define MEMCPY_BUFFER_ITEM_PER_CPU 100
  245. struct percpu_memcpy_buffer_node {
  246. intptr_t data1;
  247. uint64_t data2;
  248. };
  249. struct percpu_memcpy_buffer_entry {
  250. intptr_t offset;
  251. intptr_t buflen;
  252. struct percpu_memcpy_buffer_node *array;
  253. } __attribute__((aligned(128)));
  254. struct percpu_memcpy_buffer {
  255. struct percpu_memcpy_buffer_entry c[CPU_SETSIZE];
  256. };
  257. /* A simple percpu spinlock. Grabs lock on current cpu. */
  258. static int rseq_this_cpu_lock(struct percpu_lock *lock)
  259. {
  260. int cpu;
  261. for (;;) {
  262. int ret;
  263. cpu = rseq_cpu_start();
  264. ret = rseq_cmpeqv_storev(&lock->c[cpu].v,
  265. 0, 1, cpu);
  266. if (rseq_likely(!ret))
  267. break;
  268. /* Retry if comparison fails or rseq aborts. */
  269. }
  270. /*
  271. * Acquire semantic when taking lock after control dependency.
  272. * Matches rseq_smp_store_release().
  273. */
  274. rseq_smp_acquire__after_ctrl_dep();
  275. return cpu;
  276. }
  277. static void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
  278. {
  279. assert(lock->c[cpu].v == 1);
  280. /*
  281. * Release lock, with release semantic. Matches
  282. * rseq_smp_acquire__after_ctrl_dep().
  283. */
  284. rseq_smp_store_release(&lock->c[cpu].v, 0);
  285. }
  286. void *test_percpu_spinlock_thread(void *arg)
  287. {
  288. struct spinlock_thread_test_data *thread_data = arg;
  289. struct spinlock_test_data *data = thread_data->data;
  290. long long i, reps;
  291. if (!opt_disable_rseq && thread_data->reg &&
  292. rseq_register_current_thread())
  293. abort();
  294. reps = thread_data->reps;
  295. for (i = 0; i < reps; i++) {
  296. int cpu = rseq_cpu_start();
  297. cpu = rseq_this_cpu_lock(&data->lock);
  298. data->c[cpu].count++;
  299. rseq_percpu_unlock(&data->lock, cpu);
  300. #ifndef BENCHMARK
  301. if (i != 0 && !(i % (reps / 10)))
  302. printf_verbose("tid %d: count %lld\n", (int) gettid(), i);
  303. #endif
  304. }
  305. printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
  306. (int) gettid(), nr_abort, signals_delivered);
  307. if (!opt_disable_rseq && thread_data->reg &&
  308. rseq_unregister_current_thread())
  309. abort();
  310. return NULL;
  311. }
  312. /*
  313. * A simple test which implements a sharded counter using a per-cpu
  314. * lock. Obviously real applications might prefer to simply use a
  315. * per-cpu increment; however, this is reasonable for a test and the
  316. * lock can be extended to synchronize more complicated operations.
  317. */
  318. void test_percpu_spinlock(void)
  319. {
  320. const int num_threads = opt_threads;
  321. int i, ret;
  322. uint64_t sum;
  323. pthread_t test_threads[num_threads];
  324. struct spinlock_test_data data;
  325. struct spinlock_thread_test_data thread_data[num_threads];
  326. memset(&data, 0, sizeof(data));
  327. for (i = 0; i < num_threads; i++) {
  328. thread_data[i].reps = opt_reps;
  329. if (opt_disable_mod <= 0 || (i % opt_disable_mod))
  330. thread_data[i].reg = 1;
  331. else
  332. thread_data[i].reg = 0;
  333. thread_data[i].data = &data;
  334. ret = pthread_create(&test_threads[i], NULL,
  335. test_percpu_spinlock_thread,
  336. &thread_data[i]);
  337. if (ret) {
  338. errno = ret;
  339. perror("pthread_create");
  340. abort();
  341. }
  342. }
  343. for (i = 0; i < num_threads; i++) {
  344. ret = pthread_join(test_threads[i], NULL);
  345. if (ret) {
  346. errno = ret;
  347. perror("pthread_join");
  348. abort();
  349. }
  350. }
  351. sum = 0;
  352. for (i = 0; i < CPU_SETSIZE; i++)
  353. sum += data.c[i].count;
  354. assert(sum == (uint64_t)opt_reps * num_threads);
  355. }
  356. void *test_percpu_inc_thread(void *arg)
  357. {
  358. struct inc_thread_test_data *thread_data = arg;
  359. struct inc_test_data *data = thread_data->data;
  360. long long i, reps;
  361. if (!opt_disable_rseq && thread_data->reg &&
  362. rseq_register_current_thread())
  363. abort();
  364. reps = thread_data->reps;
  365. for (i = 0; i < reps; i++) {
  366. int ret;
  367. do {
  368. int cpu;
  369. cpu = rseq_cpu_start();
  370. ret = rseq_addv(&data->c[cpu].count, 1, cpu);
  371. } while (rseq_unlikely(ret));
  372. #ifndef BENCHMARK
  373. if (i != 0 && !(i % (reps / 10)))
  374. printf_verbose("tid %d: count %lld\n", (int) gettid(), i);
  375. #endif
  376. }
  377. printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
  378. (int) gettid(), nr_abort, signals_delivered);
  379. if (!opt_disable_rseq && thread_data->reg &&
  380. rseq_unregister_current_thread())
  381. abort();
  382. return NULL;
  383. }
  384. void test_percpu_inc(void)
  385. {
  386. const int num_threads = opt_threads;
  387. int i, ret;
  388. uint64_t sum;
  389. pthread_t test_threads[num_threads];
  390. struct inc_test_data data;
  391. struct inc_thread_test_data thread_data[num_threads];
  392. memset(&data, 0, sizeof(data));
  393. for (i = 0; i < num_threads; i++) {
  394. thread_data[i].reps = opt_reps;
  395. if (opt_disable_mod <= 0 || (i % opt_disable_mod))
  396. thread_data[i].reg = 1;
  397. else
  398. thread_data[i].reg = 0;
  399. thread_data[i].data = &data;
  400. ret = pthread_create(&test_threads[i], NULL,
  401. test_percpu_inc_thread,
  402. &thread_data[i]);
  403. if (ret) {
  404. errno = ret;
  405. perror("pthread_create");
  406. abort();
  407. }
  408. }
  409. for (i = 0; i < num_threads; i++) {
  410. ret = pthread_join(test_threads[i], NULL);
  411. if (ret) {
  412. errno = ret;
  413. perror("pthread_join");
  414. abort();
  415. }
  416. }
  417. sum = 0;
  418. for (i = 0; i < CPU_SETSIZE; i++)
  419. sum += data.c[i].count;
  420. assert(sum == (uint64_t)opt_reps * num_threads);
  421. }
  422. void this_cpu_list_push(struct percpu_list *list,
  423. struct percpu_list_node *node,
  424. int *_cpu)
  425. {
  426. int cpu;
  427. for (;;) {
  428. intptr_t *targetptr, newval, expect;
  429. int ret;
  430. cpu = rseq_cpu_start();
  431. /* Load list->c[cpu].head with single-copy atomicity. */
  432. expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
  433. newval = (intptr_t)node;
  434. targetptr = (intptr_t *)&list->c[cpu].head;
  435. node->next = (struct percpu_list_node *)expect;
  436. ret = rseq_cmpeqv_storev(targetptr, expect, newval, cpu);
  437. if (rseq_likely(!ret))
  438. break;
  439. /* Retry if comparison fails or rseq aborts. */
  440. }
  441. if (_cpu)
  442. *_cpu = cpu;
  443. }
  444. /*
  445. * Unlike a traditional lock-less linked list; the availability of a
  446. * rseq primitive allows us to implement pop without concerns over
  447. * ABA-type races.
  448. */
  449. struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list,
  450. int *_cpu)
  451. {
  452. struct percpu_list_node *node = NULL;
  453. int cpu;
  454. for (;;) {
  455. struct percpu_list_node *head;
  456. intptr_t *targetptr, expectnot, *load;
  457. off_t offset;
  458. int ret;
  459. cpu = rseq_cpu_start();
  460. targetptr = (intptr_t *)&list->c[cpu].head;
  461. expectnot = (intptr_t)NULL;
  462. offset = offsetof(struct percpu_list_node, next);
  463. load = (intptr_t *)&head;
  464. ret = rseq_cmpnev_storeoffp_load(targetptr, expectnot,
  465. offset, load, cpu);
  466. if (rseq_likely(!ret)) {
  467. node = head;
  468. break;
  469. }
  470. if (ret > 0)
  471. break;
  472. /* Retry if rseq aborts. */
  473. }
  474. if (_cpu)
  475. *_cpu = cpu;
  476. return node;
  477. }
  478. /*
  479. * __percpu_list_pop is not safe against concurrent accesses. Should
  480. * only be used on lists that are not concurrently modified.
  481. */
  482. struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu)
  483. {
  484. struct percpu_list_node *node;
  485. node = list->c[cpu].head;
  486. if (!node)
  487. return NULL;
  488. list->c[cpu].head = node->next;
  489. return node;
  490. }
  491. void *test_percpu_list_thread(void *arg)
  492. {
  493. long long i, reps;
  494. struct percpu_list *list = (struct percpu_list *)arg;
  495. if (!opt_disable_rseq && rseq_register_current_thread())
  496. abort();
  497. reps = opt_reps;
  498. for (i = 0; i < reps; i++) {
  499. struct percpu_list_node *node;
  500. node = this_cpu_list_pop(list, NULL);
  501. if (opt_yield)
  502. sched_yield(); /* encourage shuffling */
  503. if (node)
  504. this_cpu_list_push(list, node, NULL);
  505. }
  506. printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
  507. (int) gettid(), nr_abort, signals_delivered);
  508. if (!opt_disable_rseq && rseq_unregister_current_thread())
  509. abort();
  510. return NULL;
  511. }
  512. /* Simultaneous modification to a per-cpu linked list from many threads. */
  513. void test_percpu_list(void)
  514. {
  515. const int num_threads = opt_threads;
  516. int i, j, ret;
  517. uint64_t sum = 0, expected_sum = 0;
  518. struct percpu_list list;
  519. pthread_t test_threads[num_threads];
  520. cpu_set_t allowed_cpus;
  521. memset(&list, 0, sizeof(list));
  522. /* Generate list entries for every usable cpu. */
  523. sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
  524. for (i = 0; i < CPU_SETSIZE; i++) {
  525. if (!CPU_ISSET(i, &allowed_cpus))
  526. continue;
  527. for (j = 1; j <= 100; j++) {
  528. struct percpu_list_node *node;
  529. expected_sum += j;
  530. node = malloc(sizeof(*node));
  531. assert(node);
  532. node->data = j;
  533. node->next = list.c[i].head;
  534. list.c[i].head = node;
  535. }
  536. }
  537. for (i = 0; i < num_threads; i++) {
  538. ret = pthread_create(&test_threads[i], NULL,
  539. test_percpu_list_thread, &list);
  540. if (ret) {
  541. errno = ret;
  542. perror("pthread_create");
  543. abort();
  544. }
  545. }
  546. for (i = 0; i < num_threads; i++) {
  547. ret = pthread_join(test_threads[i], NULL);
  548. if (ret) {
  549. errno = ret;
  550. perror("pthread_join");
  551. abort();
  552. }
  553. }
  554. for (i = 0; i < CPU_SETSIZE; i++) {
  555. struct percpu_list_node *node;
  556. if (!CPU_ISSET(i, &allowed_cpus))
  557. continue;
  558. while ((node = __percpu_list_pop(&list, i))) {
  559. sum += node->data;
  560. free(node);
  561. }
  562. }
  563. /*
  564. * All entries should now be accounted for (unless some external
  565. * actor is interfering with our allowed affinity while this
  566. * test is running).
  567. */
  568. assert(sum == expected_sum);
  569. }
  570. bool this_cpu_buffer_push(struct percpu_buffer *buffer,
  571. struct percpu_buffer_node *node,
  572. int *_cpu)
  573. {
  574. bool result = false;
  575. int cpu;
  576. for (;;) {
  577. intptr_t *targetptr_spec, newval_spec;
  578. intptr_t *targetptr_final, newval_final;
  579. intptr_t offset;
  580. int ret;
  581. cpu = rseq_cpu_start();
  582. offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
  583. if (offset == buffer->c[cpu].buflen)
  584. break;
  585. newval_spec = (intptr_t)node;
  586. targetptr_spec = (intptr_t *)&buffer->c[cpu].array[offset];
  587. newval_final = offset + 1;
  588. targetptr_final = &buffer->c[cpu].offset;
  589. if (opt_mb)
  590. ret = rseq_cmpeqv_trystorev_storev_release(
  591. targetptr_final, offset, targetptr_spec,
  592. newval_spec, newval_final, cpu);
  593. else
  594. ret = rseq_cmpeqv_trystorev_storev(targetptr_final,
  595. offset, targetptr_spec, newval_spec,
  596. newval_final, cpu);
  597. if (rseq_likely(!ret)) {
  598. result = true;
  599. break;
  600. }
  601. /* Retry if comparison fails or rseq aborts. */
  602. }
  603. if (_cpu)
  604. *_cpu = cpu;
  605. return result;
  606. }
  607. struct percpu_buffer_node *this_cpu_buffer_pop(struct percpu_buffer *buffer,
  608. int *_cpu)
  609. {
  610. struct percpu_buffer_node *head;
  611. int cpu;
  612. for (;;) {
  613. intptr_t *targetptr, newval;
  614. intptr_t offset;
  615. int ret;
  616. cpu = rseq_cpu_start();
  617. /* Load offset with single-copy atomicity. */
  618. offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
  619. if (offset == 0) {
  620. head = NULL;
  621. break;
  622. }
  623. head = RSEQ_READ_ONCE(buffer->c[cpu].array[offset - 1]);
  624. newval = offset - 1;
  625. targetptr = (intptr_t *)&buffer->c[cpu].offset;
  626. ret = rseq_cmpeqv_cmpeqv_storev(targetptr, offset,
  627. (intptr_t *)&buffer->c[cpu].array[offset - 1],
  628. (intptr_t)head, newval, cpu);
  629. if (rseq_likely(!ret))
  630. break;
  631. /* Retry if comparison fails or rseq aborts. */
  632. }
  633. if (_cpu)
  634. *_cpu = cpu;
  635. return head;
  636. }
  637. /*
  638. * __percpu_buffer_pop is not safe against concurrent accesses. Should
  639. * only be used on buffers that are not concurrently modified.
  640. */
  641. struct percpu_buffer_node *__percpu_buffer_pop(struct percpu_buffer *buffer,
  642. int cpu)
  643. {
  644. struct percpu_buffer_node *head;
  645. intptr_t offset;
  646. offset = buffer->c[cpu].offset;
  647. if (offset == 0)
  648. return NULL;
  649. head = buffer->c[cpu].array[offset - 1];
  650. buffer->c[cpu].offset = offset - 1;
  651. return head;
  652. }
  653. void *test_percpu_buffer_thread(void *arg)
  654. {
  655. long long i, reps;
  656. struct percpu_buffer *buffer = (struct percpu_buffer *)arg;
  657. if (!opt_disable_rseq && rseq_register_current_thread())
  658. abort();
  659. reps = opt_reps;
  660. for (i = 0; i < reps; i++) {
  661. struct percpu_buffer_node *node;
  662. node = this_cpu_buffer_pop(buffer, NULL);
  663. if (opt_yield)
  664. sched_yield(); /* encourage shuffling */
  665. if (node) {
  666. if (!this_cpu_buffer_push(buffer, node, NULL)) {
  667. /* Should increase buffer size. */
  668. abort();
  669. }
  670. }
  671. }
  672. printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
  673. (int) gettid(), nr_abort, signals_delivered);
  674. if (!opt_disable_rseq && rseq_unregister_current_thread())
  675. abort();
  676. return NULL;
  677. }
  678. /* Simultaneous modification to a per-cpu buffer from many threads. */
  679. void test_percpu_buffer(void)
  680. {
  681. const int num_threads = opt_threads;
  682. int i, j, ret;
  683. uint64_t sum = 0, expected_sum = 0;
  684. struct percpu_buffer buffer;
  685. pthread_t test_threads[num_threads];
  686. cpu_set_t allowed_cpus;
  687. memset(&buffer, 0, sizeof(buffer));
  688. /* Generate list entries for every usable cpu. */
  689. sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
  690. for (i = 0; i < CPU_SETSIZE; i++) {
  691. if (!CPU_ISSET(i, &allowed_cpus))
  692. continue;
  693. /* Worse-case is every item in same CPU. */
  694. buffer.c[i].array =
  695. malloc(sizeof(*buffer.c[i].array) * CPU_SETSIZE *
  696. BUFFER_ITEM_PER_CPU);
  697. assert(buffer.c[i].array);
  698. buffer.c[i].buflen = CPU_SETSIZE * BUFFER_ITEM_PER_CPU;
  699. for (j = 1; j <= BUFFER_ITEM_PER_CPU; j++) {
  700. struct percpu_buffer_node *node;
  701. expected_sum += j;
  702. /*
  703. * We could theoretically put the word-sized
  704. * "data" directly in the buffer. However, we
  705. * want to model objects that would not fit
  706. * within a single word, so allocate an object
  707. * for each node.
  708. */
  709. node = malloc(sizeof(*node));
  710. assert(node);
  711. node->data = j;
  712. buffer.c[i].array[j - 1] = node;
  713. buffer.c[i].offset++;
  714. }
  715. }
  716. for (i = 0; i < num_threads; i++) {
  717. ret = pthread_create(&test_threads[i], NULL,
  718. test_percpu_buffer_thread, &buffer);
  719. if (ret) {
  720. errno = ret;
  721. perror("pthread_create");
  722. abort();
  723. }
  724. }
  725. for (i = 0; i < num_threads; i++) {
  726. ret = pthread_join(test_threads[i], NULL);
  727. if (ret) {
  728. errno = ret;
  729. perror("pthread_join");
  730. abort();
  731. }
  732. }
  733. for (i = 0; i < CPU_SETSIZE; i++) {
  734. struct percpu_buffer_node *node;
  735. if (!CPU_ISSET(i, &allowed_cpus))
  736. continue;
  737. while ((node = __percpu_buffer_pop(&buffer, i))) {
  738. sum += node->data;
  739. free(node);
  740. }
  741. free(buffer.c[i].array);
  742. }
  743. /*
  744. * All entries should now be accounted for (unless some external
  745. * actor is interfering with our allowed affinity while this
  746. * test is running).
  747. */
  748. assert(sum == expected_sum);
  749. }
  750. bool this_cpu_memcpy_buffer_push(struct percpu_memcpy_buffer *buffer,
  751. struct percpu_memcpy_buffer_node item,
  752. int *_cpu)
  753. {
  754. bool result = false;
  755. int cpu;
  756. for (;;) {
  757. intptr_t *targetptr_final, newval_final, offset;
  758. char *destptr, *srcptr;
  759. size_t copylen;
  760. int ret;
  761. cpu = rseq_cpu_start();
  762. /* Load offset with single-copy atomicity. */
  763. offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
  764. if (offset == buffer->c[cpu].buflen)
  765. break;
  766. destptr = (char *)&buffer->c[cpu].array[offset];
  767. srcptr = (char *)&item;
  768. /* copylen must be <= 4kB. */
  769. copylen = sizeof(item);
  770. newval_final = offset + 1;
  771. targetptr_final = &buffer->c[cpu].offset;
  772. if (opt_mb)
  773. ret = rseq_cmpeqv_trymemcpy_storev_release(
  774. targetptr_final, offset,
  775. destptr, srcptr, copylen,
  776. newval_final, cpu);
  777. else
  778. ret = rseq_cmpeqv_trymemcpy_storev(targetptr_final,
  779. offset, destptr, srcptr, copylen,
  780. newval_final, cpu);
  781. if (rseq_likely(!ret)) {
  782. result = true;
  783. break;
  784. }
  785. /* Retry if comparison fails or rseq aborts. */
  786. }
  787. if (_cpu)
  788. *_cpu = cpu;
  789. return result;
  790. }
  791. bool this_cpu_memcpy_buffer_pop(struct percpu_memcpy_buffer *buffer,
  792. struct percpu_memcpy_buffer_node *item,
  793. int *_cpu)
  794. {
  795. bool result = false;
  796. int cpu;
  797. for (;;) {
  798. intptr_t *targetptr_final, newval_final, offset;
  799. char *destptr, *srcptr;
  800. size_t copylen;
  801. int ret;
  802. cpu = rseq_cpu_start();
  803. /* Load offset with single-copy atomicity. */
  804. offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
  805. if (offset == 0)
  806. break;
  807. destptr = (char *)item;
  808. srcptr = (char *)&buffer->c[cpu].array[offset - 1];
  809. /* copylen must be <= 4kB. */
  810. copylen = sizeof(*item);
  811. newval_final = offset - 1;
  812. targetptr_final = &buffer->c[cpu].offset;
  813. ret = rseq_cmpeqv_trymemcpy_storev(targetptr_final,
  814. offset, destptr, srcptr, copylen,
  815. newval_final, cpu);
  816. if (rseq_likely(!ret)) {
  817. result = true;
  818. break;
  819. }
  820. /* Retry if comparison fails or rseq aborts. */
  821. }
  822. if (_cpu)
  823. *_cpu = cpu;
  824. return result;
  825. }
  826. /*
  827. * __percpu_memcpy_buffer_pop is not safe against concurrent accesses. Should
  828. * only be used on buffers that are not concurrently modified.
  829. */
  830. bool __percpu_memcpy_buffer_pop(struct percpu_memcpy_buffer *buffer,
  831. struct percpu_memcpy_buffer_node *item,
  832. int cpu)
  833. {
  834. intptr_t offset;
  835. offset = buffer->c[cpu].offset;
  836. if (offset == 0)
  837. return false;
  838. memcpy(item, &buffer->c[cpu].array[offset - 1], sizeof(*item));
  839. buffer->c[cpu].offset = offset - 1;
  840. return true;
  841. }
  842. void *test_percpu_memcpy_buffer_thread(void *arg)
  843. {
  844. long long i, reps;
  845. struct percpu_memcpy_buffer *buffer = (struct percpu_memcpy_buffer *)arg;
  846. if (!opt_disable_rseq && rseq_register_current_thread())
  847. abort();
  848. reps = opt_reps;
  849. for (i = 0; i < reps; i++) {
  850. struct percpu_memcpy_buffer_node item;
  851. bool result;
  852. result = this_cpu_memcpy_buffer_pop(buffer, &item, NULL);
  853. if (opt_yield)
  854. sched_yield(); /* encourage shuffling */
  855. if (result) {
  856. if (!this_cpu_memcpy_buffer_push(buffer, item, NULL)) {
  857. /* Should increase buffer size. */
  858. abort();
  859. }
  860. }
  861. }
  862. printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
  863. (int) gettid(), nr_abort, signals_delivered);
  864. if (!opt_disable_rseq && rseq_unregister_current_thread())
  865. abort();
  866. return NULL;
  867. }
  868. /* Simultaneous modification to a per-cpu buffer from many threads. */
  869. void test_percpu_memcpy_buffer(void)
  870. {
  871. const int num_threads = opt_threads;
  872. int i, j, ret;
  873. uint64_t sum = 0, expected_sum = 0;
  874. struct percpu_memcpy_buffer buffer;
  875. pthread_t test_threads[num_threads];
  876. cpu_set_t allowed_cpus;
  877. memset(&buffer, 0, sizeof(buffer));
  878. /* Generate list entries for every usable cpu. */
  879. sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
  880. for (i = 0; i < CPU_SETSIZE; i++) {
  881. if (!CPU_ISSET(i, &allowed_cpus))
  882. continue;
  883. /* Worse-case is every item in same CPU. */
  884. buffer.c[i].array =
  885. malloc(sizeof(*buffer.c[i].array) * CPU_SETSIZE *
  886. MEMCPY_BUFFER_ITEM_PER_CPU);
  887. assert(buffer.c[i].array);
  888. buffer.c[i].buflen = CPU_SETSIZE * MEMCPY_BUFFER_ITEM_PER_CPU;
  889. for (j = 1; j <= MEMCPY_BUFFER_ITEM_PER_CPU; j++) {
  890. expected_sum += 2 * j + 1;
  891. /*
  892. * We could theoretically put the word-sized
  893. * "data" directly in the buffer. However, we
  894. * want to model objects that would not fit
  895. * within a single word, so allocate an object
  896. * for each node.
  897. */
  898. buffer.c[i].array[j - 1].data1 = j;
  899. buffer.c[i].array[j - 1].data2 = j + 1;
  900. buffer.c[i].offset++;
  901. }
  902. }
  903. for (i = 0; i < num_threads; i++) {
  904. ret = pthread_create(&test_threads[i], NULL,
  905. test_percpu_memcpy_buffer_thread,
  906. &buffer);
  907. if (ret) {
  908. errno = ret;
  909. perror("pthread_create");
  910. abort();
  911. }
  912. }
  913. for (i = 0; i < num_threads; i++) {
  914. ret = pthread_join(test_threads[i], NULL);
  915. if (ret) {
  916. errno = ret;
  917. perror("pthread_join");
  918. abort();
  919. }
  920. }
  921. for (i = 0; i < CPU_SETSIZE; i++) {
  922. struct percpu_memcpy_buffer_node item;
  923. if (!CPU_ISSET(i, &allowed_cpus))
  924. continue;
  925. while (__percpu_memcpy_buffer_pop(&buffer, &item, i)) {
  926. sum += item.data1;
  927. sum += item.data2;
  928. }
  929. free(buffer.c[i].array);
  930. }
  931. /*
  932. * All entries should now be accounted for (unless some external
  933. * actor is interfering with our allowed affinity while this
  934. * test is running).
  935. */
  936. assert(sum == expected_sum);
  937. }
  938. static void test_signal_interrupt_handler(int signo)
  939. {
  940. signals_delivered++;
  941. }
  942. static int set_signal_handler(void)
  943. {
  944. int ret = 0;
  945. struct sigaction sa;
  946. sigset_t sigset;
  947. ret = sigemptyset(&sigset);
  948. if (ret < 0) {
  949. perror("sigemptyset");
  950. return ret;
  951. }
  952. sa.sa_handler = test_signal_interrupt_handler;
  953. sa.sa_mask = sigset;
  954. sa.sa_flags = 0;
  955. ret = sigaction(SIGUSR1, &sa, NULL);
  956. if (ret < 0) {
  957. perror("sigaction");
  958. return ret;
  959. }
  960. printf_verbose("Signal handler set for SIGUSR1\n");
  961. return ret;
  962. }
  963. static void show_usage(int argc, char **argv)
  964. {
  965. printf("Usage : %s <OPTIONS>\n",
  966. argv[0]);
  967. printf("OPTIONS:\n");
  968. printf(" [-1 loops] Number of loops for delay injection 1\n");
  969. printf(" [-2 loops] Number of loops for delay injection 2\n");
  970. printf(" [-3 loops] Number of loops for delay injection 3\n");
  971. printf(" [-4 loops] Number of loops for delay injection 4\n");
  972. printf(" [-5 loops] Number of loops for delay injection 5\n");
  973. printf(" [-6 loops] Number of loops for delay injection 6\n");
  974. printf(" [-7 loops] Number of loops for delay injection 7 (-1 to enable -m)\n");
  975. printf(" [-8 loops] Number of loops for delay injection 8 (-1 to enable -m)\n");
  976. printf(" [-9 loops] Number of loops for delay injection 9 (-1 to enable -m)\n");
  977. printf(" [-m N] Yield/sleep/kill every modulo N (default 0: disabled) (>= 0)\n");
  978. printf(" [-y] Yield\n");
  979. printf(" [-k] Kill thread with signal\n");
  980. printf(" [-s S] S: =0: disabled (default), >0: sleep time (ms)\n");
  981. printf(" [-t N] Number of threads (default 200)\n");
  982. printf(" [-r N] Number of repetitions per thread (default 5000)\n");
  983. printf(" [-d] Disable rseq system call (no initialization)\n");
  984. printf(" [-D M] Disable rseq for each M threads\n");
  985. printf(" [-T test] Choose test: (s)pinlock, (l)ist, (b)uffer, (m)emcpy, (i)ncrement\n");
  986. printf(" [-M] Push into buffer and memcpy buffer with memory barriers.\n");
  987. printf(" [-v] Verbose output.\n");
  988. printf(" [-h] Show this help.\n");
  989. printf("\n");
  990. }
  991. int main(int argc, char **argv)
  992. {
  993. int i;
  994. for (i = 1; i < argc; i++) {
  995. if (argv[i][0] != '-')
  996. continue;
  997. switch (argv[i][1]) {
  998. case '1':
  999. case '2':
  1000. case '3':
  1001. case '4':
  1002. case '5':
  1003. case '6':
  1004. case '7':
  1005. case '8':
  1006. case '9':
  1007. if (argc < i + 2) {
  1008. show_usage(argc, argv);
  1009. goto error;
  1010. }
  1011. loop_cnt[argv[i][1] - '0'] = atol(argv[i + 1]);
  1012. i++;
  1013. break;
  1014. case 'm':
  1015. if (argc < i + 2) {
  1016. show_usage(argc, argv);
  1017. goto error;
  1018. }
  1019. opt_modulo = atol(argv[i + 1]);
  1020. if (opt_modulo < 0) {
  1021. show_usage(argc, argv);
  1022. goto error;
  1023. }
  1024. i++;
  1025. break;
  1026. case 's':
  1027. if (argc < i + 2) {
  1028. show_usage(argc, argv);
  1029. goto error;
  1030. }
  1031. opt_sleep = atol(argv[i + 1]);
  1032. if (opt_sleep < 0) {
  1033. show_usage(argc, argv);
  1034. goto error;
  1035. }
  1036. i++;
  1037. break;
  1038. case 'y':
  1039. opt_yield = 1;
  1040. break;
  1041. case 'k':
  1042. opt_signal = 1;
  1043. break;
  1044. case 'd':
  1045. opt_disable_rseq = 1;
  1046. break;
  1047. case 'D':
  1048. if (argc < i + 2) {
  1049. show_usage(argc, argv);
  1050. goto error;
  1051. }
  1052. opt_disable_mod = atol(argv[i + 1]);
  1053. if (opt_disable_mod < 0) {
  1054. show_usage(argc, argv);
  1055. goto error;
  1056. }
  1057. i++;
  1058. break;
  1059. case 't':
  1060. if (argc < i + 2) {
  1061. show_usage(argc, argv);
  1062. goto error;
  1063. }
  1064. opt_threads = atol(argv[i + 1]);
  1065. if (opt_threads < 0) {
  1066. show_usage(argc, argv);
  1067. goto error;
  1068. }
  1069. i++;
  1070. break;
  1071. case 'r':
  1072. if (argc < i + 2) {
  1073. show_usage(argc, argv);
  1074. goto error;
  1075. }
  1076. opt_reps = atoll(argv[i + 1]);
  1077. if (opt_reps < 0) {
  1078. show_usage(argc, argv);
  1079. goto error;
  1080. }
  1081. i++;
  1082. break;
  1083. case 'h':
  1084. show_usage(argc, argv);
  1085. goto end;
  1086. case 'T':
  1087. if (argc < i + 2) {
  1088. show_usage(argc, argv);
  1089. goto error;
  1090. }
  1091. opt_test = *argv[i + 1];
  1092. switch (opt_test) {
  1093. case 's':
  1094. case 'l':
  1095. case 'i':
  1096. case 'b':
  1097. case 'm':
  1098. break;
  1099. default:
  1100. show_usage(argc, argv);
  1101. goto error;
  1102. }
  1103. i++;
  1104. break;
  1105. case 'v':
  1106. verbose = 1;
  1107. break;
  1108. case 'M':
  1109. opt_mb = 1;
  1110. break;
  1111. default:
  1112. show_usage(argc, argv);
  1113. goto error;
  1114. }
  1115. }
  1116. loop_cnt_1 = loop_cnt[1];
  1117. loop_cnt_2 = loop_cnt[2];
  1118. loop_cnt_3 = loop_cnt[3];
  1119. loop_cnt_4 = loop_cnt[4];
  1120. loop_cnt_5 = loop_cnt[5];
  1121. loop_cnt_6 = loop_cnt[6];
  1122. if (set_signal_handler())
  1123. goto error;
  1124. if (!opt_disable_rseq && rseq_register_current_thread())
  1125. goto error;
  1126. switch (opt_test) {
  1127. case 's':
  1128. printf_verbose("spinlock\n");
  1129. test_percpu_spinlock();
  1130. break;
  1131. case 'l':
  1132. printf_verbose("linked list\n");
  1133. test_percpu_list();
  1134. break;
  1135. case 'b':
  1136. printf_verbose("buffer\n");
  1137. test_percpu_buffer();
  1138. break;
  1139. case 'm':
  1140. printf_verbose("memcpy buffer\n");
  1141. test_percpu_memcpy_buffer();
  1142. break;
  1143. case 'i':
  1144. printf_verbose("counter increment\n");
  1145. test_percpu_inc();
  1146. break;
  1147. }
  1148. if (!opt_disable_rseq && rseq_unregister_current_thread())
  1149. abort();
  1150. end:
  1151. return 0;
  1152. error:
  1153. return -1;
  1154. }