nfs4session.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. /*
  2. * fs/nfs/nfs4session.c
  3. *
  4. * Copyright (c) 2012 Trond Myklebust <Trond.Myklebust@netapp.com>
  5. *
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/errno.h>
  9. #include <linux/string.h>
  10. #include <linux/printk.h>
  11. #include <linux/slab.h>
  12. #include <linux/sunrpc/sched.h>
  13. #include <linux/sunrpc/bc_xprt.h>
  14. #include <linux/nfs.h>
  15. #include <linux/nfs4.h>
  16. #include <linux/nfs_fs.h>
  17. #include <linux/module.h>
  18. #include "nfs4_fs.h"
  19. #include "internal.h"
  20. #include "nfs4session.h"
  21. #include "callback.h"
  22. #define NFSDBG_FACILITY NFSDBG_STATE
  23. static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue)
  24. {
  25. tbl->highest_used_slotid = NFS4_NO_SLOT;
  26. spin_lock_init(&tbl->slot_tbl_lock);
  27. rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue);
  28. init_waitqueue_head(&tbl->slot_waitq);
  29. init_completion(&tbl->complete);
  30. }
  31. /*
  32. * nfs4_shrink_slot_table - free retired slots from the slot table
  33. */
  34. static void nfs4_shrink_slot_table(struct nfs4_slot_table *tbl, u32 newsize)
  35. {
  36. struct nfs4_slot **p;
  37. if (newsize >= tbl->max_slots)
  38. return;
  39. p = &tbl->slots;
  40. while (newsize--)
  41. p = &(*p)->next;
  42. while (*p) {
  43. struct nfs4_slot *slot = *p;
  44. *p = slot->next;
  45. kfree(slot);
  46. tbl->max_slots--;
  47. }
  48. }
  49. /**
  50. * nfs4_slot_tbl_drain_complete - wake waiters when drain is complete
  51. * @tbl - controlling slot table
  52. *
  53. */
  54. void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl)
  55. {
  56. if (nfs4_slot_tbl_draining(tbl))
  57. complete(&tbl->complete);
  58. }
  59. /*
  60. * nfs4_free_slot - free a slot and efficiently update slot table.
  61. *
  62. * freeing a slot is trivially done by clearing its respective bit
  63. * in the bitmap.
  64. * If the freed slotid equals highest_used_slotid we want to update it
  65. * so that the server would be able to size down the slot table if needed,
  66. * otherwise we know that the highest_used_slotid is still in use.
  67. * When updating highest_used_slotid there may be "holes" in the bitmap
  68. * so we need to scan down from highest_used_slotid to 0 looking for the now
  69. * highest slotid in use.
  70. * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
  71. *
  72. * Must be called while holding tbl->slot_tbl_lock
  73. */
  74. void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
  75. {
  76. u32 slotid = slot->slot_nr;
  77. /* clear used bit in bitmap */
  78. __clear_bit(slotid, tbl->used_slots);
  79. /* update highest_used_slotid when it is freed */
  80. if (slotid == tbl->highest_used_slotid) {
  81. u32 new_max = find_last_bit(tbl->used_slots, slotid);
  82. if (new_max < slotid)
  83. tbl->highest_used_slotid = new_max;
  84. else {
  85. tbl->highest_used_slotid = NFS4_NO_SLOT;
  86. nfs4_slot_tbl_drain_complete(tbl);
  87. }
  88. }
  89. dprintk("%s: slotid %u highest_used_slotid %u\n", __func__,
  90. slotid, tbl->highest_used_slotid);
  91. }
  92. static struct nfs4_slot *nfs4_new_slot(struct nfs4_slot_table *tbl,
  93. u32 slotid, u32 seq_init, gfp_t gfp_mask)
  94. {
  95. struct nfs4_slot *slot;
  96. slot = kzalloc(sizeof(*slot), gfp_mask);
  97. if (slot) {
  98. slot->table = tbl;
  99. slot->slot_nr = slotid;
  100. slot->seq_nr = seq_init;
  101. }
  102. return slot;
  103. }
  104. static struct nfs4_slot *nfs4_find_or_create_slot(struct nfs4_slot_table *tbl,
  105. u32 slotid, u32 seq_init, gfp_t gfp_mask)
  106. {
  107. struct nfs4_slot **p, *slot;
  108. p = &tbl->slots;
  109. for (;;) {
  110. if (*p == NULL) {
  111. *p = nfs4_new_slot(tbl, tbl->max_slots,
  112. seq_init, gfp_mask);
  113. if (*p == NULL)
  114. break;
  115. tbl->max_slots++;
  116. }
  117. slot = *p;
  118. if (slot->slot_nr == slotid)
  119. return slot;
  120. p = &slot->next;
  121. }
  122. return ERR_PTR(-ENOMEM);
  123. }
  124. static void nfs4_lock_slot(struct nfs4_slot_table *tbl,
  125. struct nfs4_slot *slot)
  126. {
  127. u32 slotid = slot->slot_nr;
  128. __set_bit(slotid, tbl->used_slots);
  129. if (slotid > tbl->highest_used_slotid ||
  130. tbl->highest_used_slotid == NFS4_NO_SLOT)
  131. tbl->highest_used_slotid = slotid;
  132. slot->generation = tbl->generation;
  133. }
  134. /*
  135. * nfs4_try_to_lock_slot - Given a slot try to allocate it
  136. *
  137. * Note: must be called with the slot_tbl_lock held.
  138. */
  139. bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
  140. {
  141. if (nfs4_test_locked_slot(tbl, slot->slot_nr))
  142. return false;
  143. nfs4_lock_slot(tbl, slot);
  144. return true;
  145. }
  146. /*
  147. * nfs4_lookup_slot - Find a slot but don't allocate it
  148. *
  149. * Note: must be called with the slot_tbl_lock held.
  150. */
  151. struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid)
  152. {
  153. if (slotid <= tbl->max_slotid)
  154. return nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
  155. return ERR_PTR(-E2BIG);
  156. }
  157. static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid,
  158. u32 *seq_nr)
  159. __must_hold(&tbl->slot_tbl_lock)
  160. {
  161. struct nfs4_slot *slot;
  162. slot = nfs4_lookup_slot(tbl, slotid);
  163. if (IS_ERR(slot))
  164. return PTR_ERR(slot);
  165. *seq_nr = slot->seq_nr;
  166. return 0;
  167. }
  168. /*
  169. * nfs4_slot_seqid_in_use - test if a slot sequence id is still in use
  170. *
  171. * Given a slot table, slot id and sequence number, determine if the
  172. * RPC call in question is still in flight. This function is mainly
  173. * intended for use by the callback channel.
  174. */
  175. static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl,
  176. u32 slotid, u32 seq_nr)
  177. {
  178. u32 cur_seq;
  179. bool ret = false;
  180. spin_lock(&tbl->slot_tbl_lock);
  181. if (nfs4_slot_get_seqid(tbl, slotid, &cur_seq) == 0 &&
  182. cur_seq == seq_nr && test_bit(slotid, tbl->used_slots))
  183. ret = true;
  184. spin_unlock(&tbl->slot_tbl_lock);
  185. return ret;
  186. }
  187. /*
  188. * nfs4_slot_wait_on_seqid - wait until a slot sequence id is complete
  189. *
  190. * Given a slot table, slot id and sequence number, wait until the
  191. * corresponding RPC call completes. This function is mainly
  192. * intended for use by the callback channel.
  193. */
  194. int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
  195. u32 slotid, u32 seq_nr,
  196. unsigned long timeout)
  197. {
  198. if (wait_event_timeout(tbl->slot_waitq,
  199. !nfs4_slot_seqid_in_use(tbl, slotid, seq_nr),
  200. timeout) == 0)
  201. return -ETIMEDOUT;
  202. return 0;
  203. }
  204. /*
  205. * nfs4_alloc_slot - efficiently look for a free slot
  206. *
  207. * nfs4_alloc_slot looks for an unset bit in the used_slots bitmap.
  208. * If found, we mark the slot as used, update the highest_used_slotid,
  209. * and respectively set up the sequence operation args.
  210. *
  211. * Note: must be called with under the slot_tbl_lock.
  212. */
  213. struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl)
  214. {
  215. struct nfs4_slot *ret = ERR_PTR(-EBUSY);
  216. u32 slotid;
  217. dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
  218. __func__, tbl->used_slots[0], tbl->highest_used_slotid,
  219. tbl->max_slotid + 1);
  220. slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slotid + 1);
  221. if (slotid <= tbl->max_slotid) {
  222. ret = nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
  223. if (!IS_ERR(ret))
  224. nfs4_lock_slot(tbl, ret);
  225. }
  226. dprintk("<-- %s used_slots=%04lx highest_used=%u slotid=%u\n",
  227. __func__, tbl->used_slots[0], tbl->highest_used_slotid,
  228. !IS_ERR(ret) ? ret->slot_nr : NFS4_NO_SLOT);
  229. return ret;
  230. }
  231. static int nfs4_grow_slot_table(struct nfs4_slot_table *tbl,
  232. u32 max_reqs, u32 ivalue)
  233. {
  234. if (max_reqs <= tbl->max_slots)
  235. return 0;
  236. if (!IS_ERR(nfs4_find_or_create_slot(tbl, max_reqs - 1, ivalue, GFP_NOFS)))
  237. return 0;
  238. return -ENOMEM;
  239. }
  240. static void nfs4_reset_slot_table(struct nfs4_slot_table *tbl,
  241. u32 server_highest_slotid,
  242. u32 ivalue)
  243. {
  244. struct nfs4_slot **p;
  245. nfs4_shrink_slot_table(tbl, server_highest_slotid + 1);
  246. p = &tbl->slots;
  247. while (*p) {
  248. (*p)->seq_nr = ivalue;
  249. (*p)->interrupted = 0;
  250. p = &(*p)->next;
  251. }
  252. tbl->highest_used_slotid = NFS4_NO_SLOT;
  253. tbl->target_highest_slotid = server_highest_slotid;
  254. tbl->server_highest_slotid = server_highest_slotid;
  255. tbl->d_target_highest_slotid = 0;
  256. tbl->d2_target_highest_slotid = 0;
  257. tbl->max_slotid = server_highest_slotid;
  258. }
  259. /*
  260. * (re)Initialise a slot table
  261. */
  262. static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl,
  263. u32 max_reqs, u32 ivalue)
  264. {
  265. int ret;
  266. dprintk("--> %s: max_reqs=%u, tbl->max_slots %u\n", __func__,
  267. max_reqs, tbl->max_slots);
  268. if (max_reqs > NFS4_MAX_SLOT_TABLE)
  269. max_reqs = NFS4_MAX_SLOT_TABLE;
  270. ret = nfs4_grow_slot_table(tbl, max_reqs, ivalue);
  271. if (ret)
  272. goto out;
  273. spin_lock(&tbl->slot_tbl_lock);
  274. nfs4_reset_slot_table(tbl, max_reqs - 1, ivalue);
  275. spin_unlock(&tbl->slot_tbl_lock);
  276. dprintk("%s: tbl=%p slots=%p max_slots=%u\n", __func__,
  277. tbl, tbl->slots, tbl->max_slots);
  278. out:
  279. dprintk("<-- %s: return %d\n", __func__, ret);
  280. return ret;
  281. }
  282. /*
  283. * nfs4_release_slot_table - release all slot table entries
  284. */
  285. static void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
  286. {
  287. nfs4_shrink_slot_table(tbl, 0);
  288. }
  289. /**
  290. * nfs4_shutdown_slot_table - release resources attached to a slot table
  291. * @tbl: slot table to shut down
  292. *
  293. */
  294. void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl)
  295. {
  296. nfs4_release_slot_table(tbl);
  297. rpc_destroy_wait_queue(&tbl->slot_tbl_waitq);
  298. }
  299. /**
  300. * nfs4_setup_slot_table - prepare a stand-alone slot table for use
  301. * @tbl: slot table to set up
  302. * @max_reqs: maximum number of requests allowed
  303. * @queue: name to give RPC wait queue
  304. *
  305. * Returns zero on success, or a negative errno.
  306. */
  307. int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, unsigned int max_reqs,
  308. const char *queue)
  309. {
  310. nfs4_init_slot_table(tbl, queue);
  311. return nfs4_realloc_slot_table(tbl, max_reqs, 0);
  312. }
  313. static bool nfs41_assign_slot(struct rpc_task *task, void *pslot)
  314. {
  315. struct nfs4_sequence_args *args = task->tk_msg.rpc_argp;
  316. struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
  317. struct nfs4_slot *slot = pslot;
  318. struct nfs4_slot_table *tbl = slot->table;
  319. if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
  320. return false;
  321. slot->generation = tbl->generation;
  322. args->sa_slot = slot;
  323. res->sr_timestamp = jiffies;
  324. res->sr_slot = slot;
  325. res->sr_status_flags = 0;
  326. res->sr_status = 1;
  327. return true;
  328. }
  329. static bool __nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
  330. struct nfs4_slot *slot)
  331. {
  332. if (rpc_wake_up_first(&tbl->slot_tbl_waitq, nfs41_assign_slot, slot))
  333. return true;
  334. return false;
  335. }
  336. bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
  337. struct nfs4_slot *slot)
  338. {
  339. if (slot->slot_nr > tbl->max_slotid)
  340. return false;
  341. return __nfs41_wake_and_assign_slot(tbl, slot);
  342. }
  343. static bool nfs41_try_wake_next_slot_table_entry(struct nfs4_slot_table *tbl)
  344. {
  345. struct nfs4_slot *slot = nfs4_alloc_slot(tbl);
  346. if (!IS_ERR(slot)) {
  347. bool ret = __nfs41_wake_and_assign_slot(tbl, slot);
  348. if (ret)
  349. return ret;
  350. nfs4_free_slot(tbl, slot);
  351. }
  352. return false;
  353. }
  354. void nfs41_wake_slot_table(struct nfs4_slot_table *tbl)
  355. {
  356. for (;;) {
  357. if (!nfs41_try_wake_next_slot_table_entry(tbl))
  358. break;
  359. }
  360. }
  361. #if defined(CONFIG_NFS_V4_1)
  362. static void nfs41_set_max_slotid_locked(struct nfs4_slot_table *tbl,
  363. u32 target_highest_slotid)
  364. {
  365. u32 max_slotid;
  366. max_slotid = min(NFS4_MAX_SLOT_TABLE - 1, target_highest_slotid);
  367. if (max_slotid > tbl->server_highest_slotid)
  368. max_slotid = tbl->server_highest_slotid;
  369. if (max_slotid > tbl->target_highest_slotid)
  370. max_slotid = tbl->target_highest_slotid;
  371. tbl->max_slotid = max_slotid;
  372. nfs41_wake_slot_table(tbl);
  373. }
  374. /* Update the client's idea of target_highest_slotid */
  375. static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl,
  376. u32 target_highest_slotid)
  377. {
  378. if (tbl->target_highest_slotid == target_highest_slotid)
  379. return;
  380. tbl->target_highest_slotid = target_highest_slotid;
  381. tbl->generation++;
  382. }
  383. void nfs41_set_target_slotid(struct nfs4_slot_table *tbl,
  384. u32 target_highest_slotid)
  385. {
  386. spin_lock(&tbl->slot_tbl_lock);
  387. nfs41_set_target_slotid_locked(tbl, target_highest_slotid);
  388. tbl->d_target_highest_slotid = 0;
  389. tbl->d2_target_highest_slotid = 0;
  390. nfs41_set_max_slotid_locked(tbl, target_highest_slotid);
  391. spin_unlock(&tbl->slot_tbl_lock);
  392. }
  393. static void nfs41_set_server_slotid_locked(struct nfs4_slot_table *tbl,
  394. u32 highest_slotid)
  395. {
  396. if (tbl->server_highest_slotid == highest_slotid)
  397. return;
  398. if (tbl->highest_used_slotid > highest_slotid)
  399. return;
  400. /* Deallocate slots */
  401. nfs4_shrink_slot_table(tbl, highest_slotid + 1);
  402. tbl->server_highest_slotid = highest_slotid;
  403. }
  404. static s32 nfs41_derivative_target_slotid(s32 s1, s32 s2)
  405. {
  406. s1 -= s2;
  407. if (s1 == 0)
  408. return 0;
  409. if (s1 < 0)
  410. return (s1 - 1) >> 1;
  411. return (s1 + 1) >> 1;
  412. }
  413. static int nfs41_sign_s32(s32 s1)
  414. {
  415. if (s1 > 0)
  416. return 1;
  417. if (s1 < 0)
  418. return -1;
  419. return 0;
  420. }
  421. static bool nfs41_same_sign_or_zero_s32(s32 s1, s32 s2)
  422. {
  423. if (!s1 || !s2)
  424. return true;
  425. return nfs41_sign_s32(s1) == nfs41_sign_s32(s2);
  426. }
  427. /* Try to eliminate outliers by checking for sharp changes in the
  428. * derivatives and second derivatives
  429. */
  430. static bool nfs41_is_outlier_target_slotid(struct nfs4_slot_table *tbl,
  431. u32 new_target)
  432. {
  433. s32 d_target, d2_target;
  434. bool ret = true;
  435. d_target = nfs41_derivative_target_slotid(new_target,
  436. tbl->target_highest_slotid);
  437. d2_target = nfs41_derivative_target_slotid(d_target,
  438. tbl->d_target_highest_slotid);
  439. /* Is first derivative same sign? */
  440. if (nfs41_same_sign_or_zero_s32(d_target, tbl->d_target_highest_slotid))
  441. ret = false;
  442. /* Is second derivative same sign? */
  443. if (nfs41_same_sign_or_zero_s32(d2_target, tbl->d2_target_highest_slotid))
  444. ret = false;
  445. tbl->d_target_highest_slotid = d_target;
  446. tbl->d2_target_highest_slotid = d2_target;
  447. return ret;
  448. }
  449. void nfs41_update_target_slotid(struct nfs4_slot_table *tbl,
  450. struct nfs4_slot *slot,
  451. struct nfs4_sequence_res *res)
  452. {
  453. spin_lock(&tbl->slot_tbl_lock);
  454. if (!nfs41_is_outlier_target_slotid(tbl, res->sr_target_highest_slotid))
  455. nfs41_set_target_slotid_locked(tbl, res->sr_target_highest_slotid);
  456. if (tbl->generation == slot->generation)
  457. nfs41_set_server_slotid_locked(tbl, res->sr_highest_slotid);
  458. nfs41_set_max_slotid_locked(tbl, res->sr_target_highest_slotid);
  459. spin_unlock(&tbl->slot_tbl_lock);
  460. }
  461. static void nfs4_release_session_slot_tables(struct nfs4_session *session)
  462. {
  463. nfs4_release_slot_table(&session->fc_slot_table);
  464. nfs4_release_slot_table(&session->bc_slot_table);
  465. }
  466. /*
  467. * Initialize or reset the forechannel and backchannel tables
  468. */
  469. int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
  470. {
  471. struct nfs4_slot_table *tbl;
  472. int status;
  473. dprintk("--> %s\n", __func__);
  474. /* Fore channel */
  475. tbl = &ses->fc_slot_table;
  476. tbl->session = ses;
  477. status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
  478. if (status || !(ses->flags & SESSION4_BACK_CHAN)) /* -ENOMEM */
  479. return status;
  480. /* Back channel */
  481. tbl = &ses->bc_slot_table;
  482. tbl->session = ses;
  483. status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
  484. if (status && tbl->slots == NULL)
  485. /* Fore and back channel share a connection so get
  486. * both slot tables or neither */
  487. nfs4_release_session_slot_tables(ses);
  488. return status;
  489. }
  490. struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
  491. {
  492. struct nfs4_session *session;
  493. session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
  494. if (!session)
  495. return NULL;
  496. nfs4_init_slot_table(&session->fc_slot_table, "ForeChannel Slot table");
  497. nfs4_init_slot_table(&session->bc_slot_table, "BackChannel Slot table");
  498. session->session_state = 1<<NFS4_SESSION_INITING;
  499. session->clp = clp;
  500. return session;
  501. }
  502. static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
  503. {
  504. nfs4_shutdown_slot_table(&session->fc_slot_table);
  505. nfs4_shutdown_slot_table(&session->bc_slot_table);
  506. }
  507. void nfs4_destroy_session(struct nfs4_session *session)
  508. {
  509. struct rpc_xprt *xprt;
  510. struct rpc_cred *cred;
  511. cred = nfs4_get_clid_cred(session->clp);
  512. nfs4_proc_destroy_session(session, cred);
  513. if (cred)
  514. put_rpccred(cred);
  515. rcu_read_lock();
  516. xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
  517. rcu_read_unlock();
  518. dprintk("%s Destroy backchannel for xprt %p\n",
  519. __func__, xprt);
  520. xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
  521. nfs4_destroy_session_slot_tables(session);
  522. kfree(session);
  523. }
  524. /*
  525. * With sessions, the client is not marked ready until after a
  526. * successful EXCHANGE_ID and CREATE_SESSION.
  527. *
  528. * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
  529. * other versions of NFS can be tried.
  530. */
  531. static int nfs41_check_session_ready(struct nfs_client *clp)
  532. {
  533. int ret;
  534. if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
  535. ret = nfs4_client_recover_expired_lease(clp);
  536. if (ret)
  537. return ret;
  538. }
  539. if (clp->cl_cons_state < NFS_CS_READY)
  540. return -EPROTONOSUPPORT;
  541. smp_rmb();
  542. return 0;
  543. }
  544. int nfs4_init_session(struct nfs_client *clp)
  545. {
  546. if (!nfs4_has_session(clp))
  547. return 0;
  548. clear_bit(NFS4_SESSION_INITING, &clp->cl_session->session_state);
  549. return nfs41_check_session_ready(clp);
  550. }
  551. int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
  552. {
  553. struct nfs4_session *session = clp->cl_session;
  554. int ret;
  555. spin_lock(&clp->cl_lock);
  556. if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
  557. /*
  558. * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
  559. * DS lease to be equal to the MDS lease.
  560. */
  561. clp->cl_lease_time = lease_time;
  562. clp->cl_last_renewal = jiffies;
  563. }
  564. spin_unlock(&clp->cl_lock);
  565. ret = nfs41_check_session_ready(clp);
  566. if (ret)
  567. return ret;
  568. /* Test for the DS role */
  569. if (!is_ds_client(clp))
  570. return -ENODEV;
  571. return 0;
  572. }
  573. EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
  574. #endif /* defined(CONFIG_NFS_V4_1) */