jump_label.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719
  1. /*
  2. * jump label support
  3. *
  4. * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
  5. * Copyright (C) 2011 Peter Zijlstra
  6. *
  7. */
  8. #include <linux/memory.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/module.h>
  11. #include <linux/list.h>
  12. #include <linux/slab.h>
  13. #include <linux/sort.h>
  14. #include <linux/err.h>
  15. #include <linux/static_key.h>
  16. #include <linux/jump_label_ratelimit.h>
  17. #include <linux/bug.h>
  18. #ifdef HAVE_JUMP_LABEL
  19. /* mutex to protect coming/going of the the jump_label table */
  20. static DEFINE_MUTEX(jump_label_mutex);
  21. void jump_label_lock(void)
  22. {
  23. mutex_lock(&jump_label_mutex);
  24. }
  25. void jump_label_unlock(void)
  26. {
  27. mutex_unlock(&jump_label_mutex);
  28. }
  29. static int jump_label_cmp(const void *a, const void *b)
  30. {
  31. const struct jump_entry *jea = a;
  32. const struct jump_entry *jeb = b;
  33. if (jea->key < jeb->key)
  34. return -1;
  35. if (jea->key > jeb->key)
  36. return 1;
  37. return 0;
  38. }
  39. static void
  40. jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
  41. {
  42. unsigned long size;
  43. size = (((unsigned long)stop - (unsigned long)start)
  44. / sizeof(struct jump_entry));
  45. sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
  46. }
  47. static void jump_label_update(struct static_key *key);
  48. /*
  49. * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
  50. * The use of 'atomic_read()' requires atomic.h and its problematic for some
  51. * kernel headers such as kernel.h and others. Since static_key_count() is not
  52. * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
  53. * to have it be a function here. Similarly, for 'static_key_enable()' and
  54. * 'static_key_disable()', which require bug.h. This should allow jump_label.h
  55. * to be included from most/all places for HAVE_JUMP_LABEL.
  56. */
  57. int static_key_count(struct static_key *key)
  58. {
  59. /*
  60. * -1 means the first static_key_slow_inc() is in progress.
  61. * static_key_enabled() must return true, so return 1 here.
  62. */
  63. int n = atomic_read(&key->enabled);
  64. return n >= 0 ? n : 1;
  65. }
  66. EXPORT_SYMBOL_GPL(static_key_count);
  67. void static_key_enable(struct static_key *key)
  68. {
  69. int count = static_key_count(key);
  70. WARN_ON_ONCE(count < 0 || count > 1);
  71. if (!count)
  72. static_key_slow_inc(key);
  73. }
  74. EXPORT_SYMBOL_GPL(static_key_enable);
  75. void static_key_disable(struct static_key *key)
  76. {
  77. int count = static_key_count(key);
  78. WARN_ON_ONCE(count < 0 || count > 1);
  79. if (count)
  80. static_key_slow_dec(key);
  81. }
  82. EXPORT_SYMBOL_GPL(static_key_disable);
  83. void static_key_slow_inc(struct static_key *key)
  84. {
  85. int v, v1;
  86. STATIC_KEY_CHECK_USE();
  87. /*
  88. * Careful if we get concurrent static_key_slow_inc() calls;
  89. * later calls must wait for the first one to _finish_ the
  90. * jump_label_update() process. At the same time, however,
  91. * the jump_label_update() call below wants to see
  92. * static_key_enabled(&key) for jumps to be updated properly.
  93. *
  94. * So give a special meaning to negative key->enabled: it sends
  95. * static_key_slow_inc() down the slow path, and it is non-zero
  96. * so it counts as "enabled" in jump_label_update(). Note that
  97. * atomic_inc_unless_negative() checks >= 0, so roll our own.
  98. */
  99. for (v = atomic_read(&key->enabled); v > 0; v = v1) {
  100. v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
  101. if (likely(v1 == v))
  102. return;
  103. }
  104. jump_label_lock();
  105. if (atomic_read(&key->enabled) == 0) {
  106. atomic_set(&key->enabled, -1);
  107. jump_label_update(key);
  108. atomic_set(&key->enabled, 1);
  109. } else {
  110. atomic_inc(&key->enabled);
  111. }
  112. jump_label_unlock();
  113. }
  114. EXPORT_SYMBOL_GPL(static_key_slow_inc);
  115. static void __static_key_slow_dec(struct static_key *key,
  116. unsigned long rate_limit, struct delayed_work *work)
  117. {
  118. /*
  119. * The negative count check is valid even when a negative
  120. * key->enabled is in use by static_key_slow_inc(); a
  121. * __static_key_slow_dec() before the first static_key_slow_inc()
  122. * returns is unbalanced, because all other static_key_slow_inc()
  123. * instances block while the update is in progress.
  124. */
  125. if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
  126. WARN(atomic_read(&key->enabled) < 0,
  127. "jump label: negative count!\n");
  128. return;
  129. }
  130. if (rate_limit) {
  131. atomic_inc(&key->enabled);
  132. schedule_delayed_work(work, rate_limit);
  133. } else {
  134. jump_label_update(key);
  135. }
  136. jump_label_unlock();
  137. }
  138. static void jump_label_update_timeout(struct work_struct *work)
  139. {
  140. struct static_key_deferred *key =
  141. container_of(work, struct static_key_deferred, work.work);
  142. __static_key_slow_dec(&key->key, 0, NULL);
  143. }
  144. void static_key_slow_dec(struct static_key *key)
  145. {
  146. STATIC_KEY_CHECK_USE();
  147. __static_key_slow_dec(key, 0, NULL);
  148. }
  149. EXPORT_SYMBOL_GPL(static_key_slow_dec);
  150. void static_key_slow_dec_deferred(struct static_key_deferred *key)
  151. {
  152. STATIC_KEY_CHECK_USE();
  153. __static_key_slow_dec(&key->key, key->timeout, &key->work);
  154. }
  155. EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
  156. void static_key_deferred_flush(struct static_key_deferred *key)
  157. {
  158. STATIC_KEY_CHECK_USE();
  159. flush_delayed_work(&key->work);
  160. }
  161. EXPORT_SYMBOL_GPL(static_key_deferred_flush);
  162. void jump_label_rate_limit(struct static_key_deferred *key,
  163. unsigned long rl)
  164. {
  165. STATIC_KEY_CHECK_USE();
  166. key->timeout = rl;
  167. INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
  168. }
  169. EXPORT_SYMBOL_GPL(jump_label_rate_limit);
  170. static int addr_conflict(struct jump_entry *entry, void *start, void *end)
  171. {
  172. if (entry->code <= (unsigned long)end &&
  173. entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
  174. return 1;
  175. return 0;
  176. }
  177. static int __jump_label_text_reserved(struct jump_entry *iter_start,
  178. struct jump_entry *iter_stop, void *start, void *end)
  179. {
  180. struct jump_entry *iter;
  181. iter = iter_start;
  182. while (iter < iter_stop) {
  183. if (addr_conflict(iter, start, end))
  184. return 1;
  185. iter++;
  186. }
  187. return 0;
  188. }
  189. /*
  190. * Update code which is definitely not currently executing.
  191. * Architectures which need heavyweight synchronization to modify
  192. * running code can override this to make the non-live update case
  193. * cheaper.
  194. */
  195. void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
  196. enum jump_label_type type)
  197. {
  198. arch_jump_label_transform(entry, type);
  199. }
  200. static inline struct jump_entry *static_key_entries(struct static_key *key)
  201. {
  202. WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
  203. return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
  204. }
  205. static inline bool static_key_type(struct static_key *key)
  206. {
  207. return key->type & JUMP_TYPE_TRUE;
  208. }
  209. static inline bool static_key_linked(struct static_key *key)
  210. {
  211. return key->type & JUMP_TYPE_LINKED;
  212. }
  213. static inline void static_key_clear_linked(struct static_key *key)
  214. {
  215. key->type &= ~JUMP_TYPE_LINKED;
  216. }
  217. static inline void static_key_set_linked(struct static_key *key)
  218. {
  219. key->type |= JUMP_TYPE_LINKED;
  220. }
  221. static inline struct static_key *jump_entry_key(struct jump_entry *entry)
  222. {
  223. return (struct static_key *)((unsigned long)entry->key & ~1UL);
  224. }
  225. static bool jump_entry_branch(struct jump_entry *entry)
  226. {
  227. return (unsigned long)entry->key & 1UL;
  228. }
  229. /***
  230. * A 'struct static_key' uses a union such that it either points directly
  231. * to a table of 'struct jump_entry' or to a linked list of modules which in
  232. * turn point to 'struct jump_entry' tables.
  233. *
  234. * The two lower bits of the pointer are used to keep track of which pointer
  235. * type is in use and to store the initial branch direction, we use an access
  236. * function which preserves these bits.
  237. */
  238. static void static_key_set_entries(struct static_key *key,
  239. struct jump_entry *entries)
  240. {
  241. unsigned long type;
  242. WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
  243. type = key->type & JUMP_TYPE_MASK;
  244. key->entries = entries;
  245. key->type |= type;
  246. }
  247. static enum jump_label_type jump_label_type(struct jump_entry *entry)
  248. {
  249. struct static_key *key = jump_entry_key(entry);
  250. bool enabled = static_key_enabled(key);
  251. bool branch = jump_entry_branch(entry);
  252. /* See the comment in linux/jump_label.h */
  253. return enabled ^ branch;
  254. }
  255. static void __jump_label_update(struct static_key *key,
  256. struct jump_entry *entry,
  257. struct jump_entry *stop)
  258. {
  259. for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
  260. /*
  261. * entry->code set to 0 invalidates module init text sections
  262. * kernel_text_address() verifies we are not in core kernel
  263. * init code, see jump_label_invalidate_module_init().
  264. */
  265. if (entry->code && kernel_text_address(entry->code))
  266. arch_jump_label_transform(entry, jump_label_type(entry));
  267. }
  268. }
  269. void __init jump_label_init(void)
  270. {
  271. struct jump_entry *iter_start = __start___jump_table;
  272. struct jump_entry *iter_stop = __stop___jump_table;
  273. struct static_key *key = NULL;
  274. struct jump_entry *iter;
  275. /*
  276. * Since we are initializing the static_key.enabled field with
  277. * with the 'raw' int values (to avoid pulling in atomic.h) in
  278. * jump_label.h, let's make sure that is safe. There are only two
  279. * cases to check since we initialize to 0 or 1.
  280. */
  281. BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
  282. BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
  283. if (static_key_initialized)
  284. return;
  285. jump_label_lock();
  286. jump_label_sort_entries(iter_start, iter_stop);
  287. for (iter = iter_start; iter < iter_stop; iter++) {
  288. struct static_key *iterk;
  289. /* rewrite NOPs */
  290. if (jump_label_type(iter) == JUMP_LABEL_NOP)
  291. arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
  292. iterk = jump_entry_key(iter);
  293. if (iterk == key)
  294. continue;
  295. key = iterk;
  296. static_key_set_entries(key, iter);
  297. }
  298. static_key_initialized = true;
  299. jump_label_unlock();
  300. }
  301. #ifdef CONFIG_MODULES
  302. static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
  303. {
  304. struct static_key *key = jump_entry_key(entry);
  305. bool type = static_key_type(key);
  306. bool branch = jump_entry_branch(entry);
  307. /* See the comment in linux/jump_label.h */
  308. return type ^ branch;
  309. }
  310. struct static_key_mod {
  311. struct static_key_mod *next;
  312. struct jump_entry *entries;
  313. struct module *mod;
  314. };
  315. static inline struct static_key_mod *static_key_mod(struct static_key *key)
  316. {
  317. WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED));
  318. return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
  319. }
  320. /***
  321. * key->type and key->next are the same via union.
  322. * This sets key->next and preserves the type bits.
  323. *
  324. * See additional comments above static_key_set_entries().
  325. */
  326. static void static_key_set_mod(struct static_key *key,
  327. struct static_key_mod *mod)
  328. {
  329. unsigned long type;
  330. WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
  331. type = key->type & JUMP_TYPE_MASK;
  332. key->next = mod;
  333. key->type |= type;
  334. }
  335. static int __jump_label_mod_text_reserved(void *start, void *end)
  336. {
  337. struct module *mod;
  338. preempt_disable();
  339. mod = __module_text_address((unsigned long)start);
  340. WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
  341. preempt_enable();
  342. if (!mod)
  343. return 0;
  344. return __jump_label_text_reserved(mod->jump_entries,
  345. mod->jump_entries + mod->num_jump_entries,
  346. start, end);
  347. }
  348. static void __jump_label_mod_update(struct static_key *key)
  349. {
  350. struct static_key_mod *mod;
  351. for (mod = static_key_mod(key); mod; mod = mod->next) {
  352. struct jump_entry *stop;
  353. struct module *m;
  354. /*
  355. * NULL if the static_key is defined in a module
  356. * that does not use it
  357. */
  358. if (!mod->entries)
  359. continue;
  360. m = mod->mod;
  361. if (!m)
  362. stop = __stop___jump_table;
  363. else
  364. stop = m->jump_entries + m->num_jump_entries;
  365. __jump_label_update(key, mod->entries, stop);
  366. }
  367. }
  368. /***
  369. * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
  370. * @mod: module to patch
  371. *
  372. * Allow for run-time selection of the optimal nops. Before the module
  373. * loads patch these with arch_get_jump_label_nop(), which is specified by
  374. * the arch specific jump label code.
  375. */
  376. void jump_label_apply_nops(struct module *mod)
  377. {
  378. struct jump_entry *iter_start = mod->jump_entries;
  379. struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
  380. struct jump_entry *iter;
  381. /* if the module doesn't have jump label entries, just return */
  382. if (iter_start == iter_stop)
  383. return;
  384. for (iter = iter_start; iter < iter_stop; iter++) {
  385. /* Only write NOPs for arch_branch_static(). */
  386. if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
  387. arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
  388. }
  389. }
  390. static int jump_label_add_module(struct module *mod)
  391. {
  392. struct jump_entry *iter_start = mod->jump_entries;
  393. struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
  394. struct jump_entry *iter;
  395. struct static_key *key = NULL;
  396. struct static_key_mod *jlm, *jlm2;
  397. /* if the module doesn't have jump label entries, just return */
  398. if (iter_start == iter_stop)
  399. return 0;
  400. jump_label_sort_entries(iter_start, iter_stop);
  401. for (iter = iter_start; iter < iter_stop; iter++) {
  402. struct static_key *iterk;
  403. iterk = jump_entry_key(iter);
  404. if (iterk == key)
  405. continue;
  406. key = iterk;
  407. if (within_module(iter->key, mod)) {
  408. static_key_set_entries(key, iter);
  409. continue;
  410. }
  411. jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
  412. if (!jlm)
  413. return -ENOMEM;
  414. if (!static_key_linked(key)) {
  415. jlm2 = kzalloc(sizeof(struct static_key_mod),
  416. GFP_KERNEL);
  417. if (!jlm2) {
  418. kfree(jlm);
  419. return -ENOMEM;
  420. }
  421. preempt_disable();
  422. jlm2->mod = __module_address((unsigned long)key);
  423. preempt_enable();
  424. jlm2->entries = static_key_entries(key);
  425. jlm2->next = NULL;
  426. static_key_set_mod(key, jlm2);
  427. static_key_set_linked(key);
  428. }
  429. jlm->mod = mod;
  430. jlm->entries = iter;
  431. jlm->next = static_key_mod(key);
  432. static_key_set_mod(key, jlm);
  433. static_key_set_linked(key);
  434. /* Only update if we've changed from our initial state */
  435. if (jump_label_type(iter) != jump_label_init_type(iter))
  436. __jump_label_update(key, iter, iter_stop);
  437. }
  438. return 0;
  439. }
  440. static void jump_label_del_module(struct module *mod)
  441. {
  442. struct jump_entry *iter_start = mod->jump_entries;
  443. struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
  444. struct jump_entry *iter;
  445. struct static_key *key = NULL;
  446. struct static_key_mod *jlm, **prev;
  447. for (iter = iter_start; iter < iter_stop; iter++) {
  448. if (jump_entry_key(iter) == key)
  449. continue;
  450. key = jump_entry_key(iter);
  451. if (within_module(iter->key, mod))
  452. continue;
  453. /* No memory during module load */
  454. if (WARN_ON(!static_key_linked(key)))
  455. continue;
  456. prev = &key->next;
  457. jlm = static_key_mod(key);
  458. while (jlm && jlm->mod != mod) {
  459. prev = &jlm->next;
  460. jlm = jlm->next;
  461. }
  462. /* No memory during module load */
  463. if (WARN_ON(!jlm))
  464. continue;
  465. if (prev == &key->next)
  466. static_key_set_mod(key, jlm->next);
  467. else
  468. *prev = jlm->next;
  469. kfree(jlm);
  470. jlm = static_key_mod(key);
  471. /* if only one etry is left, fold it back into the static_key */
  472. if (jlm->next == NULL) {
  473. static_key_set_entries(key, jlm->entries);
  474. static_key_clear_linked(key);
  475. kfree(jlm);
  476. }
  477. }
  478. }
  479. static void jump_label_invalidate_module_init(struct module *mod)
  480. {
  481. struct jump_entry *iter_start = mod->jump_entries;
  482. struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
  483. struct jump_entry *iter;
  484. for (iter = iter_start; iter < iter_stop; iter++) {
  485. if (within_module_init(iter->code, mod))
  486. iter->code = 0;
  487. }
  488. }
  489. static int
  490. jump_label_module_notify(struct notifier_block *self, unsigned long val,
  491. void *data)
  492. {
  493. struct module *mod = data;
  494. int ret = 0;
  495. switch (val) {
  496. case MODULE_STATE_COMING:
  497. jump_label_lock();
  498. ret = jump_label_add_module(mod);
  499. if (ret) {
  500. WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
  501. jump_label_del_module(mod);
  502. }
  503. jump_label_unlock();
  504. break;
  505. case MODULE_STATE_GOING:
  506. jump_label_lock();
  507. jump_label_del_module(mod);
  508. jump_label_unlock();
  509. break;
  510. case MODULE_STATE_LIVE:
  511. jump_label_lock();
  512. jump_label_invalidate_module_init(mod);
  513. jump_label_unlock();
  514. break;
  515. }
  516. return notifier_from_errno(ret);
  517. }
  518. static struct notifier_block jump_label_module_nb = {
  519. .notifier_call = jump_label_module_notify,
  520. .priority = 1, /* higher than tracepoints */
  521. };
  522. static __init int jump_label_init_module(void)
  523. {
  524. return register_module_notifier(&jump_label_module_nb);
  525. }
  526. early_initcall(jump_label_init_module);
  527. #endif /* CONFIG_MODULES */
  528. /***
  529. * jump_label_text_reserved - check if addr range is reserved
  530. * @start: start text addr
  531. * @end: end text addr
  532. *
  533. * checks if the text addr located between @start and @end
  534. * overlaps with any of the jump label patch addresses. Code
  535. * that wants to modify kernel text should first verify that
  536. * it does not overlap with any of the jump label addresses.
  537. * Caller must hold jump_label_mutex.
  538. *
  539. * returns 1 if there is an overlap, 0 otherwise
  540. */
  541. int jump_label_text_reserved(void *start, void *end)
  542. {
  543. int ret = __jump_label_text_reserved(__start___jump_table,
  544. __stop___jump_table, start, end);
  545. if (ret)
  546. return ret;
  547. #ifdef CONFIG_MODULES
  548. ret = __jump_label_mod_text_reserved(start, end);
  549. #endif
  550. return ret;
  551. }
  552. static void jump_label_update(struct static_key *key)
  553. {
  554. struct jump_entry *stop = __stop___jump_table;
  555. struct jump_entry *entry;
  556. #ifdef CONFIG_MODULES
  557. struct module *mod;
  558. if (static_key_linked(key)) {
  559. __jump_label_mod_update(key);
  560. return;
  561. }
  562. preempt_disable();
  563. mod = __module_address((unsigned long)key);
  564. if (mod)
  565. stop = mod->jump_entries + mod->num_jump_entries;
  566. preempt_enable();
  567. #endif
  568. entry = static_key_entries(key);
  569. /* if there are no users, entry can be NULL */
  570. if (entry)
  571. __jump_label_update(key, entry, stop);
  572. }
  573. #ifdef CONFIG_STATIC_KEYS_SELFTEST
  574. static DEFINE_STATIC_KEY_TRUE(sk_true);
  575. static DEFINE_STATIC_KEY_FALSE(sk_false);
  576. static __init int jump_label_test(void)
  577. {
  578. int i;
  579. for (i = 0; i < 2; i++) {
  580. WARN_ON(static_key_enabled(&sk_true.key) != true);
  581. WARN_ON(static_key_enabled(&sk_false.key) != false);
  582. WARN_ON(!static_branch_likely(&sk_true));
  583. WARN_ON(!static_branch_unlikely(&sk_true));
  584. WARN_ON(static_branch_likely(&sk_false));
  585. WARN_ON(static_branch_unlikely(&sk_false));
  586. static_branch_disable(&sk_true);
  587. static_branch_enable(&sk_false);
  588. WARN_ON(static_key_enabled(&sk_true.key) == true);
  589. WARN_ON(static_key_enabled(&sk_false.key) == false);
  590. WARN_ON(static_branch_likely(&sk_true));
  591. WARN_ON(static_branch_unlikely(&sk_true));
  592. WARN_ON(!static_branch_likely(&sk_false));
  593. WARN_ON(!static_branch_unlikely(&sk_false));
  594. static_branch_enable(&sk_true);
  595. static_branch_disable(&sk_false);
  596. }
  597. return 0;
  598. }
  599. late_initcall(jump_label_test);
  600. #endif /* STATIC_KEYS_SELFTEST */
  601. #endif /* HAVE_JUMP_LABEL */