nfs4layouts.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. /*
  2. * Copyright (c) 2014 Christoph Hellwig.
  3. */
  4. #include <linux/blkdev.h>
  5. #include <linux/kmod.h>
  6. #include <linux/file.h>
  7. #include <linux/jhash.h>
  8. #include <linux/sched.h>
  9. #include <linux/sunrpc/addr.h>
  10. #include "pnfs.h"
  11. #include "netns.h"
  12. #include "trace.h"
  13. #define NFSDDBG_FACILITY NFSDDBG_PNFS
  14. struct nfs4_layout {
  15. struct list_head lo_perstate;
  16. struct nfs4_layout_stateid *lo_state;
  17. struct nfsd4_layout_seg lo_seg;
  18. };
  19. static struct kmem_cache *nfs4_layout_cache;
  20. static struct kmem_cache *nfs4_layout_stateid_cache;
  21. static const struct nfsd4_callback_ops nfsd4_cb_layout_ops;
  22. static const struct lock_manager_operations nfsd4_layouts_lm_ops;
  23. const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] = {
  24. #ifdef CONFIG_NFSD_BLOCKLAYOUT
  25. [LAYOUT_BLOCK_VOLUME] = &bl_layout_ops,
  26. #endif
  27. #ifdef CONFIG_NFSD_SCSILAYOUT
  28. [LAYOUT_SCSI] = &scsi_layout_ops,
  29. #endif
  30. };
  31. /* pNFS device ID to export fsid mapping */
  32. #define DEVID_HASH_BITS 8
  33. #define DEVID_HASH_SIZE (1 << DEVID_HASH_BITS)
  34. #define DEVID_HASH_MASK (DEVID_HASH_SIZE - 1)
  35. static u64 nfsd_devid_seq = 1;
  36. static struct list_head nfsd_devid_hash[DEVID_HASH_SIZE];
  37. static DEFINE_SPINLOCK(nfsd_devid_lock);
  38. static inline u32 devid_hashfn(u64 idx)
  39. {
  40. return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK;
  41. }
  42. static void
  43. nfsd4_alloc_devid_map(const struct svc_fh *fhp)
  44. {
  45. const struct knfsd_fh *fh = &fhp->fh_handle;
  46. size_t fsid_len = key_len(fh->fh_fsid_type);
  47. struct nfsd4_deviceid_map *map, *old;
  48. int i;
  49. map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL);
  50. if (!map)
  51. return;
  52. map->fsid_type = fh->fh_fsid_type;
  53. memcpy(&map->fsid, fh->fh_fsid, fsid_len);
  54. spin_lock(&nfsd_devid_lock);
  55. if (fhp->fh_export->ex_devid_map)
  56. goto out_unlock;
  57. for (i = 0; i < DEVID_HASH_SIZE; i++) {
  58. list_for_each_entry(old, &nfsd_devid_hash[i], hash) {
  59. if (old->fsid_type != fh->fh_fsid_type)
  60. continue;
  61. if (memcmp(old->fsid, fh->fh_fsid,
  62. key_len(old->fsid_type)))
  63. continue;
  64. fhp->fh_export->ex_devid_map = old;
  65. goto out_unlock;
  66. }
  67. }
  68. map->idx = nfsd_devid_seq++;
  69. list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]);
  70. fhp->fh_export->ex_devid_map = map;
  71. map = NULL;
  72. out_unlock:
  73. spin_unlock(&nfsd_devid_lock);
  74. kfree(map);
  75. }
  76. struct nfsd4_deviceid_map *
  77. nfsd4_find_devid_map(int idx)
  78. {
  79. struct nfsd4_deviceid_map *map, *ret = NULL;
  80. rcu_read_lock();
  81. list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash)
  82. if (map->idx == idx)
  83. ret = map;
  84. rcu_read_unlock();
  85. return ret;
  86. }
  87. int
  88. nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
  89. u32 device_generation)
  90. {
  91. if (!fhp->fh_export->ex_devid_map) {
  92. nfsd4_alloc_devid_map(fhp);
  93. if (!fhp->fh_export->ex_devid_map)
  94. return -ENOMEM;
  95. }
  96. id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
  97. id->generation = device_generation;
  98. id->pad = 0;
  99. return 0;
  100. }
  101. void nfsd4_setup_layout_type(struct svc_export *exp)
  102. {
  103. struct super_block *sb = exp->ex_path.mnt->mnt_sb;
  104. if (!(exp->ex_flags & NFSEXP_PNFS))
  105. return;
  106. /*
  107. * Check if the file system supports exporting a block-like layout.
  108. * If the block device supports reservations prefer the SCSI layout,
  109. * otherwise advertise the block layout.
  110. */
  111. #ifdef CONFIG_NFSD_BLOCKLAYOUT
  112. if (sb->s_export_op->get_uuid &&
  113. sb->s_export_op->map_blocks &&
  114. sb->s_export_op->commit_blocks)
  115. exp->ex_layout_type = LAYOUT_BLOCK_VOLUME;
  116. #endif
  117. #ifdef CONFIG_NFSD_SCSILAYOUT
  118. /* overwrite block layout selection if needed */
  119. if (sb->s_export_op->map_blocks &&
  120. sb->s_export_op->commit_blocks &&
  121. sb->s_bdev && sb->s_bdev->bd_disk->fops->pr_ops)
  122. exp->ex_layout_type = LAYOUT_SCSI;
  123. #endif
  124. }
  125. static void
  126. nfsd4_free_layout_stateid(struct nfs4_stid *stid)
  127. {
  128. struct nfs4_layout_stateid *ls = layoutstateid(stid);
  129. struct nfs4_client *clp = ls->ls_stid.sc_client;
  130. struct nfs4_file *fp = ls->ls_stid.sc_file;
  131. trace_layoutstate_free(&ls->ls_stid.sc_stateid);
  132. spin_lock(&clp->cl_lock);
  133. list_del_init(&ls->ls_perclnt);
  134. spin_unlock(&clp->cl_lock);
  135. spin_lock(&fp->fi_lock);
  136. list_del_init(&ls->ls_perfile);
  137. spin_unlock(&fp->fi_lock);
  138. vfs_setlease(ls->ls_file, F_UNLCK, NULL, (void **)&ls);
  139. fput(ls->ls_file);
  140. if (ls->ls_recalled)
  141. atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls);
  142. kmem_cache_free(nfs4_layout_stateid_cache, ls);
  143. }
  144. static int
  145. nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
  146. {
  147. struct file_lock *fl;
  148. int status;
  149. fl = locks_alloc_lock();
  150. if (!fl)
  151. return -ENOMEM;
  152. locks_init_lock(fl);
  153. fl->fl_lmops = &nfsd4_layouts_lm_ops;
  154. fl->fl_flags = FL_LAYOUT;
  155. fl->fl_type = F_RDLCK;
  156. fl->fl_end = OFFSET_MAX;
  157. fl->fl_owner = ls;
  158. fl->fl_pid = current->tgid;
  159. fl->fl_file = ls->ls_file;
  160. status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL);
  161. if (status) {
  162. locks_free_lock(fl);
  163. return status;
  164. }
  165. BUG_ON(fl != NULL);
  166. return 0;
  167. }
  168. static struct nfs4_layout_stateid *
  169. nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
  170. struct nfs4_stid *parent, u32 layout_type)
  171. {
  172. struct nfs4_client *clp = cstate->clp;
  173. struct nfs4_file *fp = parent->sc_file;
  174. struct nfs4_layout_stateid *ls;
  175. struct nfs4_stid *stp;
  176. stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
  177. if (!stp)
  178. return NULL;
  179. stp->sc_free = nfsd4_free_layout_stateid;
  180. get_nfs4_file(fp);
  181. stp->sc_file = fp;
  182. ls = layoutstateid(stp);
  183. INIT_LIST_HEAD(&ls->ls_perclnt);
  184. INIT_LIST_HEAD(&ls->ls_perfile);
  185. spin_lock_init(&ls->ls_lock);
  186. INIT_LIST_HEAD(&ls->ls_layouts);
  187. mutex_init(&ls->ls_mutex);
  188. ls->ls_layout_type = layout_type;
  189. nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops,
  190. NFSPROC4_CLNT_CB_LAYOUT);
  191. if (parent->sc_type == NFS4_DELEG_STID)
  192. ls->ls_file = get_file(fp->fi_deleg_file);
  193. else
  194. ls->ls_file = find_any_file(fp);
  195. BUG_ON(!ls->ls_file);
  196. if (nfsd4_layout_setlease(ls)) {
  197. fput(ls->ls_file);
  198. put_nfs4_file(fp);
  199. kmem_cache_free(nfs4_layout_stateid_cache, ls);
  200. return NULL;
  201. }
  202. spin_lock(&clp->cl_lock);
  203. stp->sc_type = NFS4_LAYOUT_STID;
  204. list_add(&ls->ls_perclnt, &clp->cl_lo_states);
  205. spin_unlock(&clp->cl_lock);
  206. spin_lock(&fp->fi_lock);
  207. list_add(&ls->ls_perfile, &fp->fi_lo_states);
  208. spin_unlock(&fp->fi_lock);
  209. trace_layoutstate_alloc(&ls->ls_stid.sc_stateid);
  210. return ls;
  211. }
  212. __be32
  213. nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
  214. struct nfsd4_compound_state *cstate, stateid_t *stateid,
  215. bool create, u32 layout_type, struct nfs4_layout_stateid **lsp)
  216. {
  217. struct nfs4_layout_stateid *ls;
  218. struct nfs4_stid *stid;
  219. unsigned char typemask = NFS4_LAYOUT_STID;
  220. __be32 status;
  221. if (create)
  222. typemask |= (NFS4_OPEN_STID | NFS4_LOCK_STID | NFS4_DELEG_STID);
  223. status = nfsd4_lookup_stateid(cstate, stateid, typemask, &stid,
  224. net_generic(SVC_NET(rqstp), nfsd_net_id));
  225. if (status)
  226. goto out;
  227. if (!fh_match(&cstate->current_fh.fh_handle,
  228. &stid->sc_file->fi_fhandle)) {
  229. status = nfserr_bad_stateid;
  230. goto out_put_stid;
  231. }
  232. if (stid->sc_type != NFS4_LAYOUT_STID) {
  233. ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type);
  234. nfs4_put_stid(stid);
  235. status = nfserr_jukebox;
  236. if (!ls)
  237. goto out;
  238. mutex_lock(&ls->ls_mutex);
  239. } else {
  240. ls = container_of(stid, struct nfs4_layout_stateid, ls_stid);
  241. status = nfserr_bad_stateid;
  242. mutex_lock(&ls->ls_mutex);
  243. if (nfsd4_stateid_generation_after(stateid, &stid->sc_stateid))
  244. goto out_unlock_stid;
  245. if (layout_type != ls->ls_layout_type)
  246. goto out_unlock_stid;
  247. }
  248. *lsp = ls;
  249. return 0;
  250. out_unlock_stid:
  251. mutex_unlock(&ls->ls_mutex);
  252. out_put_stid:
  253. nfs4_put_stid(stid);
  254. out:
  255. return status;
  256. }
  257. static void
  258. nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
  259. {
  260. spin_lock(&ls->ls_lock);
  261. if (ls->ls_recalled)
  262. goto out_unlock;
  263. ls->ls_recalled = true;
  264. atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
  265. if (list_empty(&ls->ls_layouts))
  266. goto out_unlock;
  267. trace_layout_recall(&ls->ls_stid.sc_stateid);
  268. atomic_inc(&ls->ls_stid.sc_count);
  269. nfsd4_run_cb(&ls->ls_recall);
  270. out_unlock:
  271. spin_unlock(&ls->ls_lock);
  272. }
  273. static inline u64
  274. layout_end(struct nfsd4_layout_seg *seg)
  275. {
  276. u64 end = seg->offset + seg->length;
  277. return end >= seg->offset ? end : NFS4_MAX_UINT64;
  278. }
  279. static void
  280. layout_update_len(struct nfsd4_layout_seg *lo, u64 end)
  281. {
  282. if (end == NFS4_MAX_UINT64)
  283. lo->length = NFS4_MAX_UINT64;
  284. else
  285. lo->length = end - lo->offset;
  286. }
  287. static bool
  288. layouts_overlapping(struct nfs4_layout *lo, struct nfsd4_layout_seg *s)
  289. {
  290. if (s->iomode != IOMODE_ANY && s->iomode != lo->lo_seg.iomode)
  291. return false;
  292. if (layout_end(&lo->lo_seg) <= s->offset)
  293. return false;
  294. if (layout_end(s) <= lo->lo_seg.offset)
  295. return false;
  296. return true;
  297. }
  298. static bool
  299. layouts_try_merge(struct nfsd4_layout_seg *lo, struct nfsd4_layout_seg *new)
  300. {
  301. if (lo->iomode != new->iomode)
  302. return false;
  303. if (layout_end(new) < lo->offset)
  304. return false;
  305. if (layout_end(lo) < new->offset)
  306. return false;
  307. lo->offset = min(lo->offset, new->offset);
  308. layout_update_len(lo, max(layout_end(lo), layout_end(new)));
  309. return true;
  310. }
  311. static __be32
  312. nfsd4_recall_conflict(struct nfs4_layout_stateid *ls)
  313. {
  314. struct nfs4_file *fp = ls->ls_stid.sc_file;
  315. struct nfs4_layout_stateid *l, *n;
  316. __be32 nfserr = nfs_ok;
  317. assert_spin_locked(&fp->fi_lock);
  318. list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) {
  319. if (l != ls) {
  320. nfsd4_recall_file_layout(l);
  321. nfserr = nfserr_recallconflict;
  322. }
  323. }
  324. return nfserr;
  325. }
  326. __be32
  327. nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
  328. {
  329. struct nfsd4_layout_seg *seg = &lgp->lg_seg;
  330. struct nfs4_file *fp = ls->ls_stid.sc_file;
  331. struct nfs4_layout *lp, *new = NULL;
  332. __be32 nfserr;
  333. spin_lock(&fp->fi_lock);
  334. nfserr = nfsd4_recall_conflict(ls);
  335. if (nfserr)
  336. goto out;
  337. spin_lock(&ls->ls_lock);
  338. list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
  339. if (layouts_try_merge(&lp->lo_seg, seg))
  340. goto done;
  341. }
  342. spin_unlock(&ls->ls_lock);
  343. spin_unlock(&fp->fi_lock);
  344. new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL);
  345. if (!new)
  346. return nfserr_jukebox;
  347. memcpy(&new->lo_seg, seg, sizeof(lp->lo_seg));
  348. new->lo_state = ls;
  349. spin_lock(&fp->fi_lock);
  350. nfserr = nfsd4_recall_conflict(ls);
  351. if (nfserr)
  352. goto out;
  353. spin_lock(&ls->ls_lock);
  354. list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
  355. if (layouts_try_merge(&lp->lo_seg, seg))
  356. goto done;
  357. }
  358. atomic_inc(&ls->ls_stid.sc_count);
  359. list_add_tail(&new->lo_perstate, &ls->ls_layouts);
  360. new = NULL;
  361. done:
  362. nfs4_inc_and_copy_stateid(&lgp->lg_sid, &ls->ls_stid);
  363. spin_unlock(&ls->ls_lock);
  364. out:
  365. spin_unlock(&fp->fi_lock);
  366. if (new)
  367. kmem_cache_free(nfs4_layout_cache, new);
  368. return nfserr;
  369. }
  370. static void
  371. nfsd4_free_layouts(struct list_head *reaplist)
  372. {
  373. while (!list_empty(reaplist)) {
  374. struct nfs4_layout *lp = list_first_entry(reaplist,
  375. struct nfs4_layout, lo_perstate);
  376. list_del(&lp->lo_perstate);
  377. nfs4_put_stid(&lp->lo_state->ls_stid);
  378. kmem_cache_free(nfs4_layout_cache, lp);
  379. }
  380. }
  381. static void
  382. nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
  383. struct list_head *reaplist)
  384. {
  385. struct nfsd4_layout_seg *lo = &lp->lo_seg;
  386. u64 end = layout_end(lo);
  387. if (seg->offset <= lo->offset) {
  388. if (layout_end(seg) >= end) {
  389. list_move_tail(&lp->lo_perstate, reaplist);
  390. return;
  391. }
  392. lo->offset = layout_end(seg);
  393. } else {
  394. /* retain the whole layout segment on a split. */
  395. if (layout_end(seg) < end) {
  396. dprintk("%s: split not supported\n", __func__);
  397. return;
  398. }
  399. end = seg->offset;
  400. }
  401. layout_update_len(lo, end);
  402. }
  403. __be32
  404. nfsd4_return_file_layouts(struct svc_rqst *rqstp,
  405. struct nfsd4_compound_state *cstate,
  406. struct nfsd4_layoutreturn *lrp)
  407. {
  408. struct nfs4_layout_stateid *ls;
  409. struct nfs4_layout *lp, *n;
  410. LIST_HEAD(reaplist);
  411. __be32 nfserr;
  412. int found = 0;
  413. nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lrp->lr_sid,
  414. false, lrp->lr_layout_type,
  415. &ls);
  416. if (nfserr) {
  417. trace_layout_return_lookup_fail(&lrp->lr_sid);
  418. return nfserr;
  419. }
  420. spin_lock(&ls->ls_lock);
  421. list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) {
  422. if (layouts_overlapping(lp, &lrp->lr_seg)) {
  423. nfsd4_return_file_layout(lp, &lrp->lr_seg, &reaplist);
  424. found++;
  425. }
  426. }
  427. if (!list_empty(&ls->ls_layouts)) {
  428. if (found)
  429. nfs4_inc_and_copy_stateid(&lrp->lr_sid, &ls->ls_stid);
  430. lrp->lrs_present = 1;
  431. } else {
  432. trace_layoutstate_unhash(&ls->ls_stid.sc_stateid);
  433. nfs4_unhash_stid(&ls->ls_stid);
  434. lrp->lrs_present = 0;
  435. }
  436. spin_unlock(&ls->ls_lock);
  437. mutex_unlock(&ls->ls_mutex);
  438. nfs4_put_stid(&ls->ls_stid);
  439. nfsd4_free_layouts(&reaplist);
  440. return nfs_ok;
  441. }
  442. __be32
  443. nfsd4_return_client_layouts(struct svc_rqst *rqstp,
  444. struct nfsd4_compound_state *cstate,
  445. struct nfsd4_layoutreturn *lrp)
  446. {
  447. struct nfs4_layout_stateid *ls, *n;
  448. struct nfs4_client *clp = cstate->clp;
  449. struct nfs4_layout *lp, *t;
  450. LIST_HEAD(reaplist);
  451. lrp->lrs_present = 0;
  452. spin_lock(&clp->cl_lock);
  453. list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
  454. if (ls->ls_layout_type != lrp->lr_layout_type)
  455. continue;
  456. if (lrp->lr_return_type == RETURN_FSID &&
  457. !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
  458. &cstate->current_fh.fh_handle))
  459. continue;
  460. spin_lock(&ls->ls_lock);
  461. list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) {
  462. if (lrp->lr_seg.iomode == IOMODE_ANY ||
  463. lrp->lr_seg.iomode == lp->lo_seg.iomode)
  464. list_move_tail(&lp->lo_perstate, &reaplist);
  465. }
  466. spin_unlock(&ls->ls_lock);
  467. }
  468. spin_unlock(&clp->cl_lock);
  469. nfsd4_free_layouts(&reaplist);
  470. return 0;
  471. }
  472. static void
  473. nfsd4_return_all_layouts(struct nfs4_layout_stateid *ls,
  474. struct list_head *reaplist)
  475. {
  476. spin_lock(&ls->ls_lock);
  477. list_splice_init(&ls->ls_layouts, reaplist);
  478. spin_unlock(&ls->ls_lock);
  479. }
  480. void
  481. nfsd4_return_all_client_layouts(struct nfs4_client *clp)
  482. {
  483. struct nfs4_layout_stateid *ls, *n;
  484. LIST_HEAD(reaplist);
  485. spin_lock(&clp->cl_lock);
  486. list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt)
  487. nfsd4_return_all_layouts(ls, &reaplist);
  488. spin_unlock(&clp->cl_lock);
  489. nfsd4_free_layouts(&reaplist);
  490. }
  491. void
  492. nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp)
  493. {
  494. struct nfs4_layout_stateid *ls, *n;
  495. LIST_HEAD(reaplist);
  496. spin_lock(&fp->fi_lock);
  497. list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) {
  498. if (ls->ls_stid.sc_client == clp)
  499. nfsd4_return_all_layouts(ls, &reaplist);
  500. }
  501. spin_unlock(&fp->fi_lock);
  502. nfsd4_free_layouts(&reaplist);
  503. }
  504. static void
  505. nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
  506. {
  507. struct nfs4_client *clp = ls->ls_stid.sc_client;
  508. char addr_str[INET6_ADDRSTRLEN];
  509. static char *envp[] = {
  510. "HOME=/",
  511. "TERM=linux",
  512. "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
  513. NULL
  514. };
  515. char *argv[8];
  516. int error;
  517. rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
  518. printk(KERN_WARNING
  519. "nfsd: client %s failed to respond to layout recall. "
  520. " Fencing..\n", addr_str);
  521. argv[0] = "/sbin/nfsd-recall-failed";
  522. argv[1] = addr_str;
  523. argv[2] = ls->ls_file->f_path.mnt->mnt_sb->s_id;
  524. argv[3] = NULL;
  525. error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
  526. if (error) {
  527. printk(KERN_ERR "nfsd: fence failed for client %s: %d!\n",
  528. addr_str, error);
  529. }
  530. }
  531. static void
  532. nfsd4_cb_layout_prepare(struct nfsd4_callback *cb)
  533. {
  534. struct nfs4_layout_stateid *ls =
  535. container_of(cb, struct nfs4_layout_stateid, ls_recall);
  536. mutex_lock(&ls->ls_mutex);
  537. nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid);
  538. mutex_unlock(&ls->ls_mutex);
  539. }
  540. static int
  541. nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
  542. {
  543. struct nfs4_layout_stateid *ls =
  544. container_of(cb, struct nfs4_layout_stateid, ls_recall);
  545. struct nfsd_net *nn;
  546. ktime_t now, cutoff;
  547. const struct nfsd4_layout_ops *ops;
  548. LIST_HEAD(reaplist);
  549. switch (task->tk_status) {
  550. case 0:
  551. case -NFS4ERR_DELAY:
  552. /*
  553. * Anything left? If not, then call it done. Note that we don't
  554. * take the spinlock since this is an optimization and nothing
  555. * should get added until the cb counter goes to zero.
  556. */
  557. if (list_empty(&ls->ls_layouts))
  558. return 1;
  559. /* Poll the client until it's done with the layout */
  560. now = ktime_get();
  561. nn = net_generic(ls->ls_stid.sc_client->net, nfsd_net_id);
  562. /* Client gets 2 lease periods to return it */
  563. cutoff = ktime_add_ns(task->tk_start,
  564. nn->nfsd4_lease * NSEC_PER_SEC * 2);
  565. if (ktime_before(now, cutoff)) {
  566. rpc_delay(task, HZ/100); /* 10 mili-seconds */
  567. return 0;
  568. }
  569. /* Fallthrough */
  570. case -NFS4ERR_NOMATCHING_LAYOUT:
  571. trace_layout_recall_done(&ls->ls_stid.sc_stateid);
  572. task->tk_status = 0;
  573. return 1;
  574. default:
  575. /*
  576. * Unknown error or non-responding client, we'll need to fence.
  577. */
  578. trace_layout_recall_fail(&ls->ls_stid.sc_stateid);
  579. ops = nfsd4_layout_ops[ls->ls_layout_type];
  580. if (ops->fence_client)
  581. ops->fence_client(ls);
  582. else
  583. nfsd4_cb_layout_fail(ls);
  584. return -1;
  585. }
  586. }
  587. static void
  588. nfsd4_cb_layout_release(struct nfsd4_callback *cb)
  589. {
  590. struct nfs4_layout_stateid *ls =
  591. container_of(cb, struct nfs4_layout_stateid, ls_recall);
  592. LIST_HEAD(reaplist);
  593. trace_layout_recall_release(&ls->ls_stid.sc_stateid);
  594. nfsd4_return_all_layouts(ls, &reaplist);
  595. nfsd4_free_layouts(&reaplist);
  596. nfs4_put_stid(&ls->ls_stid);
  597. }
  598. static const struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
  599. .prepare = nfsd4_cb_layout_prepare,
  600. .done = nfsd4_cb_layout_done,
  601. .release = nfsd4_cb_layout_release,
  602. };
  603. static bool
  604. nfsd4_layout_lm_break(struct file_lock *fl)
  605. {
  606. /*
  607. * We don't want the locks code to timeout the lease for us;
  608. * we'll remove it ourself if a layout isn't returned
  609. * in time:
  610. */
  611. fl->fl_break_time = 0;
  612. nfsd4_recall_file_layout(fl->fl_owner);
  613. return false;
  614. }
  615. static int
  616. nfsd4_layout_lm_change(struct file_lock *onlist, int arg,
  617. struct list_head *dispose)
  618. {
  619. BUG_ON(!(arg & F_UNLCK));
  620. return lease_modify(onlist, arg, dispose);
  621. }
  622. static const struct lock_manager_operations nfsd4_layouts_lm_ops = {
  623. .lm_break = nfsd4_layout_lm_break,
  624. .lm_change = nfsd4_layout_lm_change,
  625. };
  626. int
  627. nfsd4_init_pnfs(void)
  628. {
  629. int i;
  630. for (i = 0; i < DEVID_HASH_SIZE; i++)
  631. INIT_LIST_HEAD(&nfsd_devid_hash[i]);
  632. nfs4_layout_cache = kmem_cache_create("nfs4_layout",
  633. sizeof(struct nfs4_layout), 0, 0, NULL);
  634. if (!nfs4_layout_cache)
  635. return -ENOMEM;
  636. nfs4_layout_stateid_cache = kmem_cache_create("nfs4_layout_stateid",
  637. sizeof(struct nfs4_layout_stateid), 0, 0, NULL);
  638. if (!nfs4_layout_stateid_cache) {
  639. kmem_cache_destroy(nfs4_layout_cache);
  640. return -ENOMEM;
  641. }
  642. return 0;
  643. }
  644. void
  645. nfsd4_exit_pnfs(void)
  646. {
  647. int i;
  648. kmem_cache_destroy(nfs4_layout_cache);
  649. kmem_cache_destroy(nfs4_layout_stateid_cache);
  650. for (i = 0; i < DEVID_HASH_SIZE; i++) {
  651. struct nfsd4_deviceid_map *map, *n;
  652. list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash)
  653. kfree(map);
  654. }
  655. }