i915_sw_fence.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750
  1. /*
  2. * Copyright © 2017 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include <linux/completion.h>
  25. #include <linux/delay.h>
  26. #include <linux/prime_numbers.h>
  27. #include "../i915_selftest.h"
  28. static int __i915_sw_fence_call
  29. fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
  30. {
  31. switch (state) {
  32. case FENCE_COMPLETE:
  33. break;
  34. case FENCE_FREE:
  35. /* Leave the fence for the caller to free it after testing */
  36. break;
  37. }
  38. return NOTIFY_DONE;
  39. }
  40. static struct i915_sw_fence *alloc_fence(void)
  41. {
  42. struct i915_sw_fence *fence;
  43. fence = kmalloc(sizeof(*fence), GFP_KERNEL);
  44. if (!fence)
  45. return NULL;
  46. i915_sw_fence_init(fence, fence_notify);
  47. return fence;
  48. }
  49. static void free_fence(struct i915_sw_fence *fence)
  50. {
  51. i915_sw_fence_fini(fence);
  52. kfree(fence);
  53. }
  54. static int __test_self(struct i915_sw_fence *fence)
  55. {
  56. if (i915_sw_fence_done(fence))
  57. return -EINVAL;
  58. i915_sw_fence_commit(fence);
  59. if (!i915_sw_fence_done(fence))
  60. return -EINVAL;
  61. i915_sw_fence_wait(fence);
  62. if (!i915_sw_fence_done(fence))
  63. return -EINVAL;
  64. return 0;
  65. }
  66. static int test_self(void *arg)
  67. {
  68. struct i915_sw_fence *fence;
  69. int ret;
  70. /* Test i915_sw_fence signaling and completion testing */
  71. fence = alloc_fence();
  72. if (!fence)
  73. return -ENOMEM;
  74. ret = __test_self(fence);
  75. free_fence(fence);
  76. return ret;
  77. }
  78. static int test_dag(void *arg)
  79. {
  80. struct i915_sw_fence *A, *B, *C;
  81. int ret = -EINVAL;
  82. /* Test detection of cycles within the i915_sw_fence graphs */
  83. if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG))
  84. return 0;
  85. A = alloc_fence();
  86. if (!A)
  87. return -ENOMEM;
  88. if (i915_sw_fence_await_sw_fence_gfp(A, A, GFP_KERNEL) != -EINVAL) {
  89. pr_err("recursive cycle not detected (AA)\n");
  90. goto err_A;
  91. }
  92. B = alloc_fence();
  93. if (!B) {
  94. ret = -ENOMEM;
  95. goto err_A;
  96. }
  97. i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
  98. if (i915_sw_fence_await_sw_fence_gfp(B, A, GFP_KERNEL) != -EINVAL) {
  99. pr_err("single depth cycle not detected (BAB)\n");
  100. goto err_B;
  101. }
  102. C = alloc_fence();
  103. if (!C) {
  104. ret = -ENOMEM;
  105. goto err_B;
  106. }
  107. if (i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL) == -EINVAL) {
  108. pr_err("invalid cycle detected\n");
  109. goto err_C;
  110. }
  111. if (i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL) != -EINVAL) {
  112. pr_err("single depth cycle not detected (CBC)\n");
  113. goto err_C;
  114. }
  115. if (i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL) != -EINVAL) {
  116. pr_err("cycle not detected (BA, CB, AC)\n");
  117. goto err_C;
  118. }
  119. if (i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL) == -EINVAL) {
  120. pr_err("invalid cycle detected\n");
  121. goto err_C;
  122. }
  123. i915_sw_fence_commit(A);
  124. i915_sw_fence_commit(B);
  125. i915_sw_fence_commit(C);
  126. ret = 0;
  127. if (!i915_sw_fence_done(C)) {
  128. pr_err("fence C not done\n");
  129. ret = -EINVAL;
  130. }
  131. if (!i915_sw_fence_done(B)) {
  132. pr_err("fence B not done\n");
  133. ret = -EINVAL;
  134. }
  135. if (!i915_sw_fence_done(A)) {
  136. pr_err("fence A not done\n");
  137. ret = -EINVAL;
  138. }
  139. err_C:
  140. free_fence(C);
  141. err_B:
  142. free_fence(B);
  143. err_A:
  144. free_fence(A);
  145. return ret;
  146. }
  147. static int test_AB(void *arg)
  148. {
  149. struct i915_sw_fence *A, *B;
  150. int ret;
  151. /* Test i915_sw_fence (A) waiting on an event source (B) */
  152. A = alloc_fence();
  153. if (!A)
  154. return -ENOMEM;
  155. B = alloc_fence();
  156. if (!B) {
  157. ret = -ENOMEM;
  158. goto err_A;
  159. }
  160. ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
  161. if (ret < 0)
  162. goto err_B;
  163. if (ret == 0) {
  164. pr_err("Incorrectly reported fence A was complete before await\n");
  165. ret = -EINVAL;
  166. goto err_B;
  167. }
  168. ret = -EINVAL;
  169. i915_sw_fence_commit(A);
  170. if (i915_sw_fence_done(A))
  171. goto err_B;
  172. i915_sw_fence_commit(B);
  173. if (!i915_sw_fence_done(B)) {
  174. pr_err("Fence B is not done\n");
  175. goto err_B;
  176. }
  177. if (!i915_sw_fence_done(A)) {
  178. pr_err("Fence A is not done\n");
  179. goto err_B;
  180. }
  181. ret = 0;
  182. err_B:
  183. free_fence(B);
  184. err_A:
  185. free_fence(A);
  186. return ret;
  187. }
  188. static int test_ABC(void *arg)
  189. {
  190. struct i915_sw_fence *A, *B, *C;
  191. int ret;
  192. /* Test a chain of fences, A waits on B who waits on C */
  193. A = alloc_fence();
  194. if (!A)
  195. return -ENOMEM;
  196. B = alloc_fence();
  197. if (!B) {
  198. ret = -ENOMEM;
  199. goto err_A;
  200. }
  201. C = alloc_fence();
  202. if (!C) {
  203. ret = -ENOMEM;
  204. goto err_B;
  205. }
  206. ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
  207. if (ret < 0)
  208. goto err_C;
  209. if (ret == 0) {
  210. pr_err("Incorrectly reported fence B was complete before await\n");
  211. goto err_C;
  212. }
  213. ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
  214. if (ret < 0)
  215. goto err_C;
  216. if (ret == 0) {
  217. pr_err("Incorrectly reported fence C was complete before await\n");
  218. goto err_C;
  219. }
  220. ret = -EINVAL;
  221. i915_sw_fence_commit(A);
  222. if (i915_sw_fence_done(A)) {
  223. pr_err("Fence A completed early\n");
  224. goto err_C;
  225. }
  226. i915_sw_fence_commit(B);
  227. if (i915_sw_fence_done(B)) {
  228. pr_err("Fence B completed early\n");
  229. goto err_C;
  230. }
  231. if (i915_sw_fence_done(A)) {
  232. pr_err("Fence A completed early (after signaling B)\n");
  233. goto err_C;
  234. }
  235. i915_sw_fence_commit(C);
  236. ret = 0;
  237. if (!i915_sw_fence_done(C)) {
  238. pr_err("Fence C not done\n");
  239. ret = -EINVAL;
  240. }
  241. if (!i915_sw_fence_done(B)) {
  242. pr_err("Fence B not done\n");
  243. ret = -EINVAL;
  244. }
  245. if (!i915_sw_fence_done(A)) {
  246. pr_err("Fence A not done\n");
  247. ret = -EINVAL;
  248. }
  249. err_C:
  250. free_fence(C);
  251. err_B:
  252. free_fence(B);
  253. err_A:
  254. free_fence(A);
  255. return ret;
  256. }
  257. static int test_AB_C(void *arg)
  258. {
  259. struct i915_sw_fence *A, *B, *C;
  260. int ret = -EINVAL;
  261. /* Test multiple fences (AB) waiting on a single event (C) */
  262. A = alloc_fence();
  263. if (!A)
  264. return -ENOMEM;
  265. B = alloc_fence();
  266. if (!B) {
  267. ret = -ENOMEM;
  268. goto err_A;
  269. }
  270. C = alloc_fence();
  271. if (!C) {
  272. ret = -ENOMEM;
  273. goto err_B;
  274. }
  275. ret = i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL);
  276. if (ret < 0)
  277. goto err_C;
  278. if (ret == 0) {
  279. ret = -EINVAL;
  280. goto err_C;
  281. }
  282. ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
  283. if (ret < 0)
  284. goto err_C;
  285. if (ret == 0) {
  286. ret = -EINVAL;
  287. goto err_C;
  288. }
  289. i915_sw_fence_commit(A);
  290. i915_sw_fence_commit(B);
  291. ret = 0;
  292. if (i915_sw_fence_done(A)) {
  293. pr_err("Fence A completed early\n");
  294. ret = -EINVAL;
  295. }
  296. if (i915_sw_fence_done(B)) {
  297. pr_err("Fence B completed early\n");
  298. ret = -EINVAL;
  299. }
  300. i915_sw_fence_commit(C);
  301. if (!i915_sw_fence_done(C)) {
  302. pr_err("Fence C not done\n");
  303. ret = -EINVAL;
  304. }
  305. if (!i915_sw_fence_done(B)) {
  306. pr_err("Fence B not done\n");
  307. ret = -EINVAL;
  308. }
  309. if (!i915_sw_fence_done(A)) {
  310. pr_err("Fence A not done\n");
  311. ret = -EINVAL;
  312. }
  313. err_C:
  314. free_fence(C);
  315. err_B:
  316. free_fence(B);
  317. err_A:
  318. free_fence(A);
  319. return ret;
  320. }
  321. static int test_C_AB(void *arg)
  322. {
  323. struct i915_sw_fence *A, *B, *C;
  324. int ret;
  325. /* Test multiple event sources (A,B) for a single fence (C) */
  326. A = alloc_fence();
  327. if (!A)
  328. return -ENOMEM;
  329. B = alloc_fence();
  330. if (!B) {
  331. ret = -ENOMEM;
  332. goto err_A;
  333. }
  334. C = alloc_fence();
  335. if (!C) {
  336. ret = -ENOMEM;
  337. goto err_B;
  338. }
  339. ret = i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL);
  340. if (ret < 0)
  341. goto err_C;
  342. if (ret == 0) {
  343. ret = -EINVAL;
  344. goto err_C;
  345. }
  346. ret = i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL);
  347. if (ret < 0)
  348. goto err_C;
  349. if (ret == 0) {
  350. ret = -EINVAL;
  351. goto err_C;
  352. }
  353. ret = 0;
  354. i915_sw_fence_commit(C);
  355. if (i915_sw_fence_done(C))
  356. ret = -EINVAL;
  357. i915_sw_fence_commit(A);
  358. i915_sw_fence_commit(B);
  359. if (!i915_sw_fence_done(A)) {
  360. pr_err("Fence A not done\n");
  361. ret = -EINVAL;
  362. }
  363. if (!i915_sw_fence_done(B)) {
  364. pr_err("Fence B not done\n");
  365. ret = -EINVAL;
  366. }
  367. if (!i915_sw_fence_done(C)) {
  368. pr_err("Fence C not done\n");
  369. ret = -EINVAL;
  370. }
  371. err_C:
  372. free_fence(C);
  373. err_B:
  374. free_fence(B);
  375. err_A:
  376. free_fence(A);
  377. return ret;
  378. }
  379. static int test_chain(void *arg)
  380. {
  381. int nfences = 4096;
  382. struct i915_sw_fence **fences;
  383. int ret, i;
  384. /* Test a long chain of fences */
  385. fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL);
  386. if (!fences)
  387. return -ENOMEM;
  388. for (i = 0; i < nfences; i++) {
  389. fences[i] = alloc_fence();
  390. if (!fences[i]) {
  391. nfences = i;
  392. ret = -ENOMEM;
  393. goto err;
  394. }
  395. if (i > 0) {
  396. ret = i915_sw_fence_await_sw_fence_gfp(fences[i],
  397. fences[i - 1],
  398. GFP_KERNEL);
  399. if (ret < 0) {
  400. nfences = i + 1;
  401. goto err;
  402. }
  403. i915_sw_fence_commit(fences[i]);
  404. }
  405. }
  406. ret = 0;
  407. for (i = nfences; --i; ) {
  408. if (i915_sw_fence_done(fences[i])) {
  409. if (ret == 0)
  410. pr_err("Fence[%d] completed early\n", i);
  411. ret = -EINVAL;
  412. }
  413. }
  414. i915_sw_fence_commit(fences[0]);
  415. for (i = 0; ret == 0 && i < nfences; i++) {
  416. if (!i915_sw_fence_done(fences[i])) {
  417. pr_err("Fence[%d] is not done\n", i);
  418. ret = -EINVAL;
  419. }
  420. }
  421. err:
  422. for (i = 0; i < nfences; i++)
  423. free_fence(fences[i]);
  424. kfree(fences);
  425. return ret;
  426. }
  427. struct task_ipc {
  428. struct work_struct work;
  429. struct completion started;
  430. struct i915_sw_fence *in, *out;
  431. int value;
  432. };
  433. static void task_ipc(struct work_struct *work)
  434. {
  435. struct task_ipc *ipc = container_of(work, typeof(*ipc), work);
  436. complete(&ipc->started);
  437. i915_sw_fence_wait(ipc->in);
  438. smp_store_mb(ipc->value, 1);
  439. i915_sw_fence_commit(ipc->out);
  440. }
  441. static int test_ipc(void *arg)
  442. {
  443. struct task_ipc ipc;
  444. int ret = 0;
  445. /* Test use of i915_sw_fence as an interprocess signaling mechanism */
  446. ipc.in = alloc_fence();
  447. if (!ipc.in)
  448. return -ENOMEM;
  449. ipc.out = alloc_fence();
  450. if (!ipc.out) {
  451. ret = -ENOMEM;
  452. goto err_in;
  453. }
  454. /* use a completion to avoid chicken-and-egg testing */
  455. init_completion(&ipc.started);
  456. ipc.value = 0;
  457. INIT_WORK_ONSTACK(&ipc.work, task_ipc);
  458. schedule_work(&ipc.work);
  459. wait_for_completion(&ipc.started);
  460. usleep_range(1000, 2000);
  461. if (READ_ONCE(ipc.value)) {
  462. pr_err("worker updated value before i915_sw_fence was signaled\n");
  463. ret = -EINVAL;
  464. }
  465. i915_sw_fence_commit(ipc.in);
  466. i915_sw_fence_wait(ipc.out);
  467. if (!READ_ONCE(ipc.value)) {
  468. pr_err("worker signaled i915_sw_fence before value was posted\n");
  469. ret = -EINVAL;
  470. }
  471. flush_work(&ipc.work);
  472. destroy_work_on_stack(&ipc.work);
  473. free_fence(ipc.out);
  474. err_in:
  475. free_fence(ipc.in);
  476. return ret;
  477. }
  478. static int test_timer(void *arg)
  479. {
  480. unsigned long target, delay;
  481. struct timed_fence tf;
  482. timed_fence_init(&tf, target = jiffies);
  483. if (!i915_sw_fence_done(&tf.fence)) {
  484. pr_err("Fence with immediate expiration not signaled\n");
  485. goto err;
  486. }
  487. timed_fence_fini(&tf);
  488. for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
  489. timed_fence_init(&tf, target = jiffies + delay);
  490. if (i915_sw_fence_done(&tf.fence)) {
  491. pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
  492. goto err;
  493. }
  494. i915_sw_fence_wait(&tf.fence);
  495. if (!i915_sw_fence_done(&tf.fence)) {
  496. pr_err("Fence not signaled after wait\n");
  497. goto err;
  498. }
  499. if (time_before(jiffies, target)) {
  500. pr_err("Fence signaled too early, target=%lu, now=%lu\n",
  501. target, jiffies);
  502. goto err;
  503. }
  504. timed_fence_fini(&tf);
  505. }
  506. return 0;
  507. err:
  508. timed_fence_fini(&tf);
  509. return -EINVAL;
  510. }
  511. static const char *mock_name(struct dma_fence *fence)
  512. {
  513. return "mock";
  514. }
  515. static const struct dma_fence_ops mock_fence_ops = {
  516. .get_driver_name = mock_name,
  517. .get_timeline_name = mock_name,
  518. };
  519. static DEFINE_SPINLOCK(mock_fence_lock);
  520. static struct dma_fence *alloc_dma_fence(void)
  521. {
  522. struct dma_fence *dma;
  523. dma = kmalloc(sizeof(*dma), GFP_KERNEL);
  524. if (dma)
  525. dma_fence_init(dma, &mock_fence_ops, &mock_fence_lock, 0, 0);
  526. return dma;
  527. }
  528. static struct i915_sw_fence *
  529. wrap_dma_fence(struct dma_fence *dma, unsigned long delay)
  530. {
  531. struct i915_sw_fence *fence;
  532. int err;
  533. fence = alloc_fence();
  534. if (!fence)
  535. return ERR_PTR(-ENOMEM);
  536. err = i915_sw_fence_await_dma_fence(fence, dma, delay, GFP_NOWAIT);
  537. i915_sw_fence_commit(fence);
  538. if (err < 0) {
  539. free_fence(fence);
  540. return ERR_PTR(err);
  541. }
  542. return fence;
  543. }
  544. static int test_dma_fence(void *arg)
  545. {
  546. struct i915_sw_fence *timeout = NULL, *not = NULL;
  547. unsigned long delay = i915_selftest.timeout_jiffies;
  548. unsigned long end, sleep;
  549. struct dma_fence *dma;
  550. int err;
  551. dma = alloc_dma_fence();
  552. if (!dma)
  553. return -ENOMEM;
  554. timeout = wrap_dma_fence(dma, delay);
  555. if (IS_ERR(timeout)) {
  556. err = PTR_ERR(timeout);
  557. goto err;
  558. }
  559. not = wrap_dma_fence(dma, 0);
  560. if (IS_ERR(not)) {
  561. err = PTR_ERR(not);
  562. goto err;
  563. }
  564. err = -EINVAL;
  565. if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
  566. pr_err("Fences immediately signaled\n");
  567. goto err;
  568. }
  569. /* We round the timeout for the fence up to the next second */
  570. end = round_jiffies_up(jiffies + delay);
  571. sleep = jiffies_to_usecs(delay) / 3;
  572. usleep_range(sleep, 2 * sleep);
  573. if (time_after(jiffies, end)) {
  574. pr_debug("Slept too long, delay=%lu, (target=%lu, now=%lu) skipping\n",
  575. delay, end, jiffies);
  576. goto skip;
  577. }
  578. if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
  579. pr_err("Fences signaled too early\n");
  580. goto err;
  581. }
  582. if (!wait_event_timeout(timeout->wait,
  583. i915_sw_fence_done(timeout),
  584. 2 * (end - jiffies) + 1)) {
  585. pr_err("Timeout fence unsignaled!\n");
  586. goto err;
  587. }
  588. if (i915_sw_fence_done(not)) {
  589. pr_err("No timeout fence signaled!\n");
  590. goto err;
  591. }
  592. skip:
  593. dma_fence_signal(dma);
  594. if (!i915_sw_fence_done(timeout) || !i915_sw_fence_done(not)) {
  595. pr_err("Fences unsignaled\n");
  596. goto err;
  597. }
  598. free_fence(not);
  599. free_fence(timeout);
  600. dma_fence_put(dma);
  601. return 0;
  602. err:
  603. dma_fence_signal(dma);
  604. if (!IS_ERR_OR_NULL(timeout))
  605. free_fence(timeout);
  606. if (!IS_ERR_OR_NULL(not))
  607. free_fence(not);
  608. dma_fence_put(dma);
  609. return err;
  610. }
  611. int i915_sw_fence_mock_selftests(void)
  612. {
  613. static const struct i915_subtest tests[] = {
  614. SUBTEST(test_self),
  615. SUBTEST(test_dag),
  616. SUBTEST(test_AB),
  617. SUBTEST(test_ABC),
  618. SUBTEST(test_AB_C),
  619. SUBTEST(test_C_AB),
  620. SUBTEST(test_chain),
  621. SUBTEST(test_ipc),
  622. SUBTEST(test_timer),
  623. SUBTEST(test_dma_fence),
  624. };
  625. return i915_subtests(tests, NULL);
  626. }