nfs4state.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000
  1. /*
  2. * fs/nfs/nfs4state.c
  3. *
  4. * Client-side XDR for NFSv4.
  5. *
  6. * Copyright (c) 2002 The Regents of the University of Michigan.
  7. * All rights reserved.
  8. *
  9. * Kendrick Smith <kmsmith@umich.edu>
  10. *
  11. * Redistribution and use in source and binary forms, with or without
  12. * modification, are permitted provided that the following conditions
  13. * are met:
  14. *
  15. * 1. Redistributions of source code must retain the above copyright
  16. * notice, this list of conditions and the following disclaimer.
  17. * 2. Redistributions in binary form must reproduce the above copyright
  18. * notice, this list of conditions and the following disclaimer in the
  19. * documentation and/or other materials provided with the distribution.
  20. * 3. Neither the name of the University nor the names of its
  21. * contributors may be used to endorse or promote products derived
  22. * from this software without specific prior written permission.
  23. *
  24. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  25. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  26. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  27. * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  28. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  32. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  33. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  34. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. *
  36. * Implementation of the NFSv4 state model. For the time being,
  37. * this is minimal, but will be made much more complex in a
  38. * subsequent patch.
  39. */
  40. #include <linux/config.h>
  41. #include <linux/slab.h>
  42. #include <linux/smp_lock.h>
  43. #include <linux/nfs_fs.h>
  44. #include <linux/nfs_idmap.h>
  45. #include <linux/workqueue.h>
  46. #include <linux/bitops.h>
  47. #include "nfs4_fs.h"
  48. #include "callback.h"
  49. #include "delegation.h"
  50. #define OPENOWNER_POOL_SIZE 8
  51. const nfs4_stateid zero_stateid;
  52. static DEFINE_SPINLOCK(state_spinlock);
  53. static LIST_HEAD(nfs4_clientid_list);
  54. static void nfs4_recover_state(void *);
  55. void
  56. init_nfsv4_state(struct nfs_server *server)
  57. {
  58. server->nfs4_state = NULL;
  59. INIT_LIST_HEAD(&server->nfs4_siblings);
  60. }
  61. void
  62. destroy_nfsv4_state(struct nfs_server *server)
  63. {
  64. if (server->mnt_path) {
  65. kfree(server->mnt_path);
  66. server->mnt_path = NULL;
  67. }
  68. if (server->nfs4_state) {
  69. nfs4_put_client(server->nfs4_state);
  70. server->nfs4_state = NULL;
  71. }
  72. }
  73. /*
  74. * nfs4_get_client(): returns an empty client structure
  75. * nfs4_put_client(): drops reference to client structure
  76. *
  77. * Since these are allocated/deallocated very rarely, we don't
  78. * bother putting them in a slab cache...
  79. */
  80. static struct nfs4_client *
  81. nfs4_alloc_client(struct in_addr *addr)
  82. {
  83. struct nfs4_client *clp;
  84. if (nfs_callback_up() < 0)
  85. return NULL;
  86. if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
  87. nfs_callback_down();
  88. return NULL;
  89. }
  90. memset(clp, 0, sizeof(*clp));
  91. memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
  92. init_rwsem(&clp->cl_sem);
  93. INIT_LIST_HEAD(&clp->cl_delegations);
  94. INIT_LIST_HEAD(&clp->cl_state_owners);
  95. INIT_LIST_HEAD(&clp->cl_unused);
  96. spin_lock_init(&clp->cl_lock);
  97. atomic_set(&clp->cl_count, 1);
  98. INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp);
  99. INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
  100. INIT_LIST_HEAD(&clp->cl_superblocks);
  101. init_waitqueue_head(&clp->cl_waitq);
  102. rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
  103. clp->cl_rpcclient = ERR_PTR(-EINVAL);
  104. clp->cl_boot_time = CURRENT_TIME;
  105. clp->cl_state = 1 << NFS4CLNT_OK;
  106. return clp;
  107. }
  108. static void
  109. nfs4_free_client(struct nfs4_client *clp)
  110. {
  111. struct nfs4_state_owner *sp;
  112. while (!list_empty(&clp->cl_unused)) {
  113. sp = list_entry(clp->cl_unused.next,
  114. struct nfs4_state_owner,
  115. so_list);
  116. list_del(&sp->so_list);
  117. kfree(sp);
  118. }
  119. BUG_ON(!list_empty(&clp->cl_state_owners));
  120. if (clp->cl_cred)
  121. put_rpccred(clp->cl_cred);
  122. nfs_idmap_delete(clp);
  123. if (!IS_ERR(clp->cl_rpcclient))
  124. rpc_shutdown_client(clp->cl_rpcclient);
  125. kfree(clp);
  126. nfs_callback_down();
  127. }
  128. static struct nfs4_client *__nfs4_find_client(struct in_addr *addr)
  129. {
  130. struct nfs4_client *clp;
  131. list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {
  132. if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) {
  133. atomic_inc(&clp->cl_count);
  134. return clp;
  135. }
  136. }
  137. return NULL;
  138. }
  139. struct nfs4_client *nfs4_find_client(struct in_addr *addr)
  140. {
  141. struct nfs4_client *clp;
  142. spin_lock(&state_spinlock);
  143. clp = __nfs4_find_client(addr);
  144. spin_unlock(&state_spinlock);
  145. return clp;
  146. }
  147. struct nfs4_client *
  148. nfs4_get_client(struct in_addr *addr)
  149. {
  150. struct nfs4_client *clp, *new = NULL;
  151. spin_lock(&state_spinlock);
  152. for (;;) {
  153. clp = __nfs4_find_client(addr);
  154. if (clp != NULL)
  155. break;
  156. clp = new;
  157. if (clp != NULL) {
  158. list_add(&clp->cl_servers, &nfs4_clientid_list);
  159. new = NULL;
  160. break;
  161. }
  162. spin_unlock(&state_spinlock);
  163. new = nfs4_alloc_client(addr);
  164. spin_lock(&state_spinlock);
  165. if (new == NULL)
  166. break;
  167. }
  168. spin_unlock(&state_spinlock);
  169. if (new)
  170. nfs4_free_client(new);
  171. return clp;
  172. }
  173. void
  174. nfs4_put_client(struct nfs4_client *clp)
  175. {
  176. if (!atomic_dec_and_lock(&clp->cl_count, &state_spinlock))
  177. return;
  178. list_del(&clp->cl_servers);
  179. spin_unlock(&state_spinlock);
  180. BUG_ON(!list_empty(&clp->cl_superblocks));
  181. wake_up_all(&clp->cl_waitq);
  182. rpc_wake_up(&clp->cl_rpcwaitq);
  183. nfs4_kill_renewd(clp);
  184. nfs4_free_client(clp);
  185. }
  186. static int __nfs4_init_client(struct nfs4_client *clp)
  187. {
  188. int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport);
  189. if (status == 0)
  190. status = nfs4_proc_setclientid_confirm(clp);
  191. if (status == 0)
  192. nfs4_schedule_state_renewal(clp);
  193. return status;
  194. }
  195. int nfs4_init_client(struct nfs4_client *clp)
  196. {
  197. return nfs4_map_errors(__nfs4_init_client(clp));
  198. }
  199. u32
  200. nfs4_alloc_lockowner_id(struct nfs4_client *clp)
  201. {
  202. return clp->cl_lockowner_id ++;
  203. }
  204. static struct nfs4_state_owner *
  205. nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred)
  206. {
  207. struct nfs4_state_owner *sp = NULL;
  208. if (!list_empty(&clp->cl_unused)) {
  209. sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);
  210. atomic_inc(&sp->so_count);
  211. sp->so_cred = cred;
  212. list_move(&sp->so_list, &clp->cl_state_owners);
  213. clp->cl_nunused--;
  214. }
  215. return sp;
  216. }
  217. static struct nfs4_state_owner *
  218. nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred)
  219. {
  220. struct nfs4_state_owner *sp, *res = NULL;
  221. list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
  222. if (sp->so_cred != cred)
  223. continue;
  224. atomic_inc(&sp->so_count);
  225. /* Move to the head of the list */
  226. list_move(&sp->so_list, &clp->cl_state_owners);
  227. res = sp;
  228. break;
  229. }
  230. return res;
  231. }
  232. /*
  233. * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
  234. * create a new state_owner.
  235. *
  236. */
  237. static struct nfs4_state_owner *
  238. nfs4_alloc_state_owner(void)
  239. {
  240. struct nfs4_state_owner *sp;
  241. sp = kzalloc(sizeof(*sp),GFP_KERNEL);
  242. if (!sp)
  243. return NULL;
  244. init_MUTEX(&sp->so_sema);
  245. INIT_LIST_HEAD(&sp->so_states);
  246. INIT_LIST_HEAD(&sp->so_delegations);
  247. rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
  248. sp->so_seqid.sequence = &sp->so_sequence;
  249. spin_lock_init(&sp->so_sequence.lock);
  250. INIT_LIST_HEAD(&sp->so_sequence.list);
  251. atomic_set(&sp->so_count, 1);
  252. return sp;
  253. }
  254. void
  255. nfs4_drop_state_owner(struct nfs4_state_owner *sp)
  256. {
  257. struct nfs4_client *clp = sp->so_client;
  258. spin_lock(&clp->cl_lock);
  259. list_del_init(&sp->so_list);
  260. spin_unlock(&clp->cl_lock);
  261. }
  262. /*
  263. * Note: must be called with clp->cl_sem held in order to prevent races
  264. * with reboot recovery!
  265. */
  266. struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
  267. {
  268. struct nfs4_client *clp = server->nfs4_state;
  269. struct nfs4_state_owner *sp, *new;
  270. get_rpccred(cred);
  271. new = nfs4_alloc_state_owner();
  272. spin_lock(&clp->cl_lock);
  273. sp = nfs4_find_state_owner(clp, cred);
  274. if (sp == NULL)
  275. sp = nfs4_client_grab_unused(clp, cred);
  276. if (sp == NULL && new != NULL) {
  277. list_add(&new->so_list, &clp->cl_state_owners);
  278. new->so_client = clp;
  279. new->so_id = nfs4_alloc_lockowner_id(clp);
  280. new->so_cred = cred;
  281. sp = new;
  282. new = NULL;
  283. }
  284. spin_unlock(&clp->cl_lock);
  285. if (new)
  286. kfree(new);
  287. if (sp != NULL)
  288. return sp;
  289. put_rpccred(cred);
  290. return NULL;
  291. }
  292. /*
  293. * Must be called with clp->cl_sem held in order to avoid races
  294. * with state recovery...
  295. */
  296. void nfs4_put_state_owner(struct nfs4_state_owner *sp)
  297. {
  298. struct nfs4_client *clp = sp->so_client;
  299. struct rpc_cred *cred = sp->so_cred;
  300. if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
  301. return;
  302. if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)
  303. goto out_free;
  304. if (list_empty(&sp->so_list))
  305. goto out_free;
  306. list_move(&sp->so_list, &clp->cl_unused);
  307. clp->cl_nunused++;
  308. spin_unlock(&clp->cl_lock);
  309. put_rpccred(cred);
  310. cred = NULL;
  311. return;
  312. out_free:
  313. list_del(&sp->so_list);
  314. spin_unlock(&clp->cl_lock);
  315. put_rpccred(cred);
  316. kfree(sp);
  317. }
  318. static struct nfs4_state *
  319. nfs4_alloc_open_state(void)
  320. {
  321. struct nfs4_state *state;
  322. state = kmalloc(sizeof(*state), GFP_KERNEL);
  323. if (!state)
  324. return NULL;
  325. state->state = 0;
  326. state->nreaders = 0;
  327. state->nwriters = 0;
  328. state->flags = 0;
  329. memset(state->stateid.data, 0, sizeof(state->stateid.data));
  330. atomic_set(&state->count, 1);
  331. INIT_LIST_HEAD(&state->lock_states);
  332. init_MUTEX(&state->lock_sema);
  333. spin_lock_init(&state->state_lock);
  334. return state;
  335. }
  336. static struct nfs4_state *
  337. __nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
  338. {
  339. struct nfs_inode *nfsi = NFS_I(inode);
  340. struct nfs4_state *state;
  341. mode &= (FMODE_READ|FMODE_WRITE);
  342. list_for_each_entry(state, &nfsi->open_states, inode_states) {
  343. if (state->owner->so_cred != cred)
  344. continue;
  345. if ((mode & FMODE_READ) != 0 && state->nreaders == 0)
  346. continue;
  347. if ((mode & FMODE_WRITE) != 0 && state->nwriters == 0)
  348. continue;
  349. if ((state->state & mode) != mode)
  350. continue;
  351. atomic_inc(&state->count);
  352. if (mode & FMODE_READ)
  353. state->nreaders++;
  354. if (mode & FMODE_WRITE)
  355. state->nwriters++;
  356. return state;
  357. }
  358. return NULL;
  359. }
  360. static struct nfs4_state *
  361. __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
  362. {
  363. struct nfs_inode *nfsi = NFS_I(inode);
  364. struct nfs4_state *state;
  365. list_for_each_entry(state, &nfsi->open_states, inode_states) {
  366. /* Is this in the process of being freed? */
  367. if (state->nreaders == 0 && state->nwriters == 0)
  368. continue;
  369. if (state->owner == owner) {
  370. atomic_inc(&state->count);
  371. return state;
  372. }
  373. }
  374. return NULL;
  375. }
  376. struct nfs4_state *
  377. nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
  378. {
  379. struct nfs4_state *state;
  380. spin_lock(&inode->i_lock);
  381. state = __nfs4_find_state(inode, cred, mode);
  382. spin_unlock(&inode->i_lock);
  383. return state;
  384. }
  385. static void
  386. nfs4_free_open_state(struct nfs4_state *state)
  387. {
  388. kfree(state);
  389. }
  390. struct nfs4_state *
  391. nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
  392. {
  393. struct nfs4_state *state, *new;
  394. struct nfs_inode *nfsi = NFS_I(inode);
  395. spin_lock(&inode->i_lock);
  396. state = __nfs4_find_state_byowner(inode, owner);
  397. spin_unlock(&inode->i_lock);
  398. if (state)
  399. goto out;
  400. new = nfs4_alloc_open_state();
  401. spin_lock(&inode->i_lock);
  402. state = __nfs4_find_state_byowner(inode, owner);
  403. if (state == NULL && new != NULL) {
  404. state = new;
  405. /* Caller *must* be holding owner->so_sem */
  406. /* Note: The reclaim code dictates that we add stateless
  407. * and read-only stateids to the end of the list */
  408. list_add_tail(&state->open_states, &owner->so_states);
  409. state->owner = owner;
  410. atomic_inc(&owner->so_count);
  411. list_add(&state->inode_states, &nfsi->open_states);
  412. state->inode = igrab(inode);
  413. spin_unlock(&inode->i_lock);
  414. } else {
  415. spin_unlock(&inode->i_lock);
  416. if (new)
  417. nfs4_free_open_state(new);
  418. }
  419. out:
  420. return state;
  421. }
  422. /*
  423. * Beware! Caller must be holding exactly one
  424. * reference to clp->cl_sem and owner->so_sema!
  425. */
  426. void nfs4_put_open_state(struct nfs4_state *state)
  427. {
  428. struct inode *inode = state->inode;
  429. struct nfs4_state_owner *owner = state->owner;
  430. if (!atomic_dec_and_lock(&state->count, &inode->i_lock))
  431. return;
  432. if (!list_empty(&state->inode_states))
  433. list_del(&state->inode_states);
  434. spin_unlock(&inode->i_lock);
  435. list_del(&state->open_states);
  436. iput(inode);
  437. BUG_ON (state->state != 0);
  438. nfs4_free_open_state(state);
  439. nfs4_put_state_owner(owner);
  440. }
  441. /*
  442. * Beware! Caller must be holding no references to clp->cl_sem!
  443. * of owner->so_sema!
  444. */
  445. void nfs4_close_state(struct nfs4_state *state, mode_t mode)
  446. {
  447. struct inode *inode = state->inode;
  448. struct nfs4_state_owner *owner = state->owner;
  449. struct nfs4_client *clp = owner->so_client;
  450. int newstate;
  451. atomic_inc(&owner->so_count);
  452. down_read(&clp->cl_sem);
  453. down(&owner->so_sema);
  454. /* Protect against nfs4_find_state() */
  455. spin_lock(&inode->i_lock);
  456. if (mode & FMODE_READ)
  457. state->nreaders--;
  458. if (mode & FMODE_WRITE)
  459. state->nwriters--;
  460. if (state->nwriters == 0) {
  461. if (state->nreaders == 0)
  462. list_del_init(&state->inode_states);
  463. /* See reclaim code */
  464. list_move_tail(&state->open_states, &owner->so_states);
  465. }
  466. spin_unlock(&inode->i_lock);
  467. newstate = 0;
  468. if (state->state != 0) {
  469. if (state->nreaders)
  470. newstate |= FMODE_READ;
  471. if (state->nwriters)
  472. newstate |= FMODE_WRITE;
  473. if (state->state == newstate)
  474. goto out;
  475. if (nfs4_do_close(inode, state, newstate) == -EINPROGRESS)
  476. return;
  477. }
  478. out:
  479. nfs4_put_open_state(state);
  480. up(&owner->so_sema);
  481. nfs4_put_state_owner(owner);
  482. up_read(&clp->cl_sem);
  483. }
  484. /*
  485. * Search the state->lock_states for an existing lock_owner
  486. * that is compatible with current->files
  487. */
  488. static struct nfs4_lock_state *
  489. __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
  490. {
  491. struct nfs4_lock_state *pos;
  492. list_for_each_entry(pos, &state->lock_states, ls_locks) {
  493. if (pos->ls_owner != fl_owner)
  494. continue;
  495. atomic_inc(&pos->ls_count);
  496. return pos;
  497. }
  498. return NULL;
  499. }
  500. /*
  501. * Return a compatible lock_state. If no initialized lock_state structure
  502. * exists, return an uninitialized one.
  503. *
  504. * The caller must be holding state->lock_sema
  505. */
  506. static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
  507. {
  508. struct nfs4_lock_state *lsp;
  509. struct nfs4_client *clp = state->owner->so_client;
  510. lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
  511. if (lsp == NULL)
  512. return NULL;
  513. lsp->ls_seqid.sequence = &state->owner->so_sequence;
  514. atomic_set(&lsp->ls_count, 1);
  515. lsp->ls_owner = fl_owner;
  516. spin_lock(&clp->cl_lock);
  517. lsp->ls_id = nfs4_alloc_lockowner_id(clp);
  518. spin_unlock(&clp->cl_lock);
  519. INIT_LIST_HEAD(&lsp->ls_locks);
  520. return lsp;
  521. }
  522. /*
  523. * Return a compatible lock_state. If no initialized lock_state structure
  524. * exists, return an uninitialized one.
  525. *
  526. * The caller must be holding state->lock_sema and clp->cl_sem
  527. */
  528. static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
  529. {
  530. struct nfs4_lock_state *lsp, *new = NULL;
  531. for(;;) {
  532. spin_lock(&state->state_lock);
  533. lsp = __nfs4_find_lock_state(state, owner);
  534. if (lsp != NULL)
  535. break;
  536. if (new != NULL) {
  537. new->ls_state = state;
  538. list_add(&new->ls_locks, &state->lock_states);
  539. set_bit(LK_STATE_IN_USE, &state->flags);
  540. lsp = new;
  541. new = NULL;
  542. break;
  543. }
  544. spin_unlock(&state->state_lock);
  545. new = nfs4_alloc_lock_state(state, owner);
  546. if (new == NULL)
  547. return NULL;
  548. }
  549. spin_unlock(&state->state_lock);
  550. kfree(new);
  551. return lsp;
  552. }
  553. /*
  554. * Release reference to lock_state, and free it if we see that
  555. * it is no longer in use
  556. */
  557. static void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
  558. {
  559. struct nfs4_state *state;
  560. if (lsp == NULL)
  561. return;
  562. state = lsp->ls_state;
  563. if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
  564. return;
  565. list_del(&lsp->ls_locks);
  566. if (list_empty(&state->lock_states))
  567. clear_bit(LK_STATE_IN_USE, &state->flags);
  568. spin_unlock(&state->state_lock);
  569. kfree(lsp);
  570. }
  571. static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
  572. {
  573. struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
  574. dst->fl_u.nfs4_fl.owner = lsp;
  575. atomic_inc(&lsp->ls_count);
  576. }
  577. static void nfs4_fl_release_lock(struct file_lock *fl)
  578. {
  579. nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
  580. }
  581. static struct file_lock_operations nfs4_fl_lock_ops = {
  582. .fl_copy_lock = nfs4_fl_copy_lock,
  583. .fl_release_private = nfs4_fl_release_lock,
  584. };
  585. int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
  586. {
  587. struct nfs4_lock_state *lsp;
  588. if (fl->fl_ops != NULL)
  589. return 0;
  590. lsp = nfs4_get_lock_state(state, fl->fl_owner);
  591. if (lsp == NULL)
  592. return -ENOMEM;
  593. fl->fl_u.nfs4_fl.owner = lsp;
  594. fl->fl_ops = &nfs4_fl_lock_ops;
  595. return 0;
  596. }
  597. /*
  598. * Byte-range lock aware utility to initialize the stateid of read/write
  599. * requests.
  600. */
  601. void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
  602. {
  603. struct nfs4_lock_state *lsp;
  604. memcpy(dst, &state->stateid, sizeof(*dst));
  605. if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
  606. return;
  607. spin_lock(&state->state_lock);
  608. lsp = __nfs4_find_lock_state(state, fl_owner);
  609. if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
  610. memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
  611. spin_unlock(&state->state_lock);
  612. nfs4_put_lock_state(lsp);
  613. }
  614. struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
  615. {
  616. struct rpc_sequence *sequence = counter->sequence;
  617. struct nfs_seqid *new;
  618. new = kmalloc(sizeof(*new), GFP_KERNEL);
  619. if (new != NULL) {
  620. new->sequence = counter;
  621. new->task = NULL;
  622. spin_lock(&sequence->lock);
  623. list_add_tail(&new->list, &sequence->list);
  624. spin_unlock(&sequence->lock);
  625. }
  626. return new;
  627. }
  628. void nfs_free_seqid(struct nfs_seqid *seqid)
  629. {
  630. struct rpc_sequence *sequence = seqid->sequence->sequence;
  631. struct rpc_task *next = NULL;
  632. spin_lock(&sequence->lock);
  633. list_del(&seqid->list);
  634. if (!list_empty(&sequence->list)) {
  635. next = list_entry(sequence->list.next, struct nfs_seqid, list)->task;
  636. if (next)
  637. rpc_wake_up_task(next);
  638. }
  639. spin_unlock(&sequence->lock);
  640. kfree(seqid);
  641. }
  642. /*
  643. * Called with sp->so_sema and clp->cl_sem held.
  644. *
  645. * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
  646. * failed with a seqid incrementing error -
  647. * see comments nfs_fs.h:seqid_mutating_error()
  648. */
  649. static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
  650. {
  651. switch (status) {
  652. case 0:
  653. break;
  654. case -NFS4ERR_BAD_SEQID:
  655. case -NFS4ERR_STALE_CLIENTID:
  656. case -NFS4ERR_STALE_STATEID:
  657. case -NFS4ERR_BAD_STATEID:
  658. case -NFS4ERR_BADXDR:
  659. case -NFS4ERR_RESOURCE:
  660. case -NFS4ERR_NOFILEHANDLE:
  661. /* Non-seqid mutating errors */
  662. return;
  663. };
  664. /*
  665. * Note: no locking needed as we are guaranteed to be first
  666. * on the sequence list
  667. */
  668. seqid->sequence->counter++;
  669. }
  670. void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
  671. {
  672. if (status == -NFS4ERR_BAD_SEQID) {
  673. struct nfs4_state_owner *sp = container_of(seqid->sequence,
  674. struct nfs4_state_owner, so_seqid);
  675. nfs4_drop_state_owner(sp);
  676. }
  677. return nfs_increment_seqid(status, seqid);
  678. }
  679. /*
  680. * Called with ls->lock_sema and clp->cl_sem held.
  681. *
  682. * Increment the seqid if the LOCK/LOCKU succeeded, or
  683. * failed with a seqid incrementing error -
  684. * see comments nfs_fs.h:seqid_mutating_error()
  685. */
  686. void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
  687. {
  688. return nfs_increment_seqid(status, seqid);
  689. }
  690. int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
  691. {
  692. struct rpc_sequence *sequence = seqid->sequence->sequence;
  693. int status = 0;
  694. spin_lock(&sequence->lock);
  695. if (sequence->list.next != &seqid->list) {
  696. seqid->task = task;
  697. rpc_sleep_on(&sequence->wait, task, NULL, NULL);
  698. status = -EAGAIN;
  699. }
  700. spin_unlock(&sequence->lock);
  701. return status;
  702. }
  703. static int reclaimer(void *);
  704. struct reclaimer_args {
  705. struct nfs4_client *clp;
  706. struct completion complete;
  707. };
  708. /*
  709. * State recovery routine
  710. */
  711. void
  712. nfs4_recover_state(void *data)
  713. {
  714. struct nfs4_client *clp = (struct nfs4_client *)data;
  715. struct reclaimer_args args = {
  716. .clp = clp,
  717. };
  718. might_sleep();
  719. init_completion(&args.complete);
  720. if (kernel_thread(reclaimer, &args, CLONE_KERNEL) < 0)
  721. goto out_failed_clear;
  722. wait_for_completion(&args.complete);
  723. return;
  724. out_failed_clear:
  725. set_bit(NFS4CLNT_OK, &clp->cl_state);
  726. wake_up_all(&clp->cl_waitq);
  727. rpc_wake_up(&clp->cl_rpcwaitq);
  728. }
  729. /*
  730. * Schedule a state recovery attempt
  731. */
  732. void
  733. nfs4_schedule_state_recovery(struct nfs4_client *clp)
  734. {
  735. if (!clp)
  736. return;
  737. if (test_and_clear_bit(NFS4CLNT_OK, &clp->cl_state))
  738. schedule_work(&clp->cl_recoverd);
  739. }
  740. static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
  741. {
  742. struct inode *inode = state->inode;
  743. struct file_lock *fl;
  744. int status = 0;
  745. for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
  746. if (!(fl->fl_flags & FL_POSIX))
  747. continue;
  748. if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
  749. continue;
  750. status = ops->recover_lock(state, fl);
  751. if (status >= 0)
  752. continue;
  753. switch (status) {
  754. default:
  755. printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
  756. __FUNCTION__, status);
  757. case -NFS4ERR_EXPIRED:
  758. case -NFS4ERR_NO_GRACE:
  759. case -NFS4ERR_RECLAIM_BAD:
  760. case -NFS4ERR_RECLAIM_CONFLICT:
  761. /* kill_proc(fl->fl_owner, SIGLOST, 1); */
  762. break;
  763. case -NFS4ERR_STALE_CLIENTID:
  764. goto out_err;
  765. }
  766. }
  767. return 0;
  768. out_err:
  769. return status;
  770. }
  771. static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
  772. {
  773. struct nfs4_state *state;
  774. struct nfs4_lock_state *lock;
  775. int status = 0;
  776. /* Note: we rely on the sp->so_states list being ordered
  777. * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
  778. * states first.
  779. * This is needed to ensure that the server won't give us any
  780. * read delegations that we have to return if, say, we are
  781. * recovering after a network partition or a reboot from a
  782. * server that doesn't support a grace period.
  783. */
  784. list_for_each_entry(state, &sp->so_states, open_states) {
  785. if (state->state == 0)
  786. continue;
  787. status = ops->recover_open(sp, state);
  788. if (status >= 0) {
  789. status = nfs4_reclaim_locks(ops, state);
  790. if (status < 0)
  791. goto out_err;
  792. list_for_each_entry(lock, &state->lock_states, ls_locks) {
  793. if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
  794. printk("%s: Lock reclaim failed!\n",
  795. __FUNCTION__);
  796. }
  797. continue;
  798. }
  799. switch (status) {
  800. default:
  801. printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
  802. __FUNCTION__, status);
  803. case -ENOENT:
  804. case -NFS4ERR_RECLAIM_BAD:
  805. case -NFS4ERR_RECLAIM_CONFLICT:
  806. /*
  807. * Open state on this file cannot be recovered
  808. * All we can do is revert to using the zero stateid.
  809. */
  810. memset(state->stateid.data, 0,
  811. sizeof(state->stateid.data));
  812. /* Mark the file as being 'closed' */
  813. state->state = 0;
  814. break;
  815. case -NFS4ERR_EXPIRED:
  816. case -NFS4ERR_NO_GRACE:
  817. case -NFS4ERR_STALE_CLIENTID:
  818. goto out_err;
  819. }
  820. }
  821. return 0;
  822. out_err:
  823. return status;
  824. }
  825. static void nfs4_state_mark_reclaim(struct nfs4_client *clp)
  826. {
  827. struct nfs4_state_owner *sp;
  828. struct nfs4_state *state;
  829. struct nfs4_lock_state *lock;
  830. /* Reset all sequence ids to zero */
  831. list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
  832. sp->so_seqid.counter = 0;
  833. sp->so_seqid.flags = 0;
  834. list_for_each_entry(state, &sp->so_states, open_states) {
  835. list_for_each_entry(lock, &state->lock_states, ls_locks) {
  836. lock->ls_seqid.counter = 0;
  837. lock->ls_seqid.flags = 0;
  838. lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
  839. }
  840. }
  841. }
  842. }
  843. static int reclaimer(void *ptr)
  844. {
  845. struct reclaimer_args *args = (struct reclaimer_args *)ptr;
  846. struct nfs4_client *clp = args->clp;
  847. struct nfs4_state_owner *sp;
  848. struct nfs4_state_recovery_ops *ops;
  849. int status = 0;
  850. daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp->cl_addr));
  851. allow_signal(SIGKILL);
  852. atomic_inc(&clp->cl_count);
  853. complete(&args->complete);
  854. /* Ensure exclusive access to NFSv4 state */
  855. lock_kernel();
  856. down_write(&clp->cl_sem);
  857. /* Are there any NFS mounts out there? */
  858. if (list_empty(&clp->cl_superblocks))
  859. goto out;
  860. restart_loop:
  861. status = nfs4_proc_renew(clp);
  862. switch (status) {
  863. case 0:
  864. case -NFS4ERR_CB_PATH_DOWN:
  865. goto out;
  866. case -NFS4ERR_STALE_CLIENTID:
  867. case -NFS4ERR_LEASE_MOVED:
  868. ops = &nfs4_reboot_recovery_ops;
  869. break;
  870. default:
  871. ops = &nfs4_network_partition_recovery_ops;
  872. };
  873. nfs4_state_mark_reclaim(clp);
  874. status = __nfs4_init_client(clp);
  875. if (status)
  876. goto out_error;
  877. /* Mark all delegations for reclaim */
  878. nfs_delegation_mark_reclaim(clp);
  879. /* Note: list is protected by exclusive lock on cl->cl_sem */
  880. list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
  881. status = nfs4_reclaim_open_state(ops, sp);
  882. if (status < 0) {
  883. if (status == -NFS4ERR_NO_GRACE) {
  884. ops = &nfs4_network_partition_recovery_ops;
  885. status = nfs4_reclaim_open_state(ops, sp);
  886. }
  887. if (status == -NFS4ERR_STALE_CLIENTID)
  888. goto restart_loop;
  889. if (status == -NFS4ERR_EXPIRED)
  890. goto restart_loop;
  891. }
  892. }
  893. nfs_delegation_reap_unclaimed(clp);
  894. out:
  895. set_bit(NFS4CLNT_OK, &clp->cl_state);
  896. up_write(&clp->cl_sem);
  897. unlock_kernel();
  898. wake_up_all(&clp->cl_waitq);
  899. rpc_wake_up(&clp->cl_rpcwaitq);
  900. if (status == -NFS4ERR_CB_PATH_DOWN)
  901. nfs_handle_cb_pathdown(clp);
  902. nfs4_put_client(clp);
  903. return 0;
  904. out_error:
  905. printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
  906. NIPQUAD(clp->cl_addr.s_addr), -status);
  907. goto out;
  908. }
  909. /*
  910. * Local variables:
  911. * c-basic-offset: 8
  912. * End:
  913. */