dev.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. /*
  2. * Copyright (c) 2014 Christoph Hellwig.
  3. */
  4. #include <linux/sunrpc/svc.h>
  5. #include <linux/blkdev.h>
  6. #include <linux/nfs4.h>
  7. #include <linux/nfs_fs.h>
  8. #include <linux/nfs_xdr.h>
  9. #include "blocklayout.h"
  10. #define NFSDBG_FACILITY NFSDBG_PNFS_LD
  11. static void
  12. bl_free_device(struct pnfs_block_dev *dev)
  13. {
  14. if (dev->nr_children) {
  15. int i;
  16. for (i = 0; i < dev->nr_children; i++)
  17. bl_free_device(&dev->children[i]);
  18. kfree(dev->children);
  19. } else {
  20. if (dev->bdev)
  21. blkdev_put(dev->bdev, FMODE_READ | FMODE_WRITE);
  22. }
  23. }
  24. void
  25. bl_free_deviceid_node(struct nfs4_deviceid_node *d)
  26. {
  27. struct pnfs_block_dev *dev =
  28. container_of(d, struct pnfs_block_dev, node);
  29. bl_free_device(dev);
  30. kfree_rcu(dev, node.rcu);
  31. }
  32. static int
  33. nfs4_block_decode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
  34. {
  35. __be32 *p;
  36. int i;
  37. p = xdr_inline_decode(xdr, 4);
  38. if (!p)
  39. return -EIO;
  40. b->type = be32_to_cpup(p++);
  41. switch (b->type) {
  42. case PNFS_BLOCK_VOLUME_SIMPLE:
  43. p = xdr_inline_decode(xdr, 4);
  44. if (!p)
  45. return -EIO;
  46. b->simple.nr_sigs = be32_to_cpup(p++);
  47. if (!b->simple.nr_sigs) {
  48. dprintk("no signature\n");
  49. return -EIO;
  50. }
  51. b->simple.len = 4 + 4;
  52. for (i = 0; i < b->simple.nr_sigs; i++) {
  53. p = xdr_inline_decode(xdr, 8 + 4);
  54. if (!p)
  55. return -EIO;
  56. p = xdr_decode_hyper(p, &b->simple.sigs[i].offset);
  57. b->simple.sigs[i].sig_len = be32_to_cpup(p++);
  58. if (b->simple.sigs[i].sig_len > PNFS_BLOCK_UUID_LEN) {
  59. pr_info("signature too long: %d\n",
  60. b->simple.sigs[i].sig_len);
  61. return -EIO;
  62. }
  63. p = xdr_inline_decode(xdr, b->simple.sigs[i].sig_len);
  64. if (!p)
  65. return -EIO;
  66. memcpy(&b->simple.sigs[i].sig, p,
  67. b->simple.sigs[i].sig_len);
  68. b->simple.len += 8 + 4 + b->simple.sigs[i].sig_len;
  69. }
  70. break;
  71. case PNFS_BLOCK_VOLUME_SLICE:
  72. p = xdr_inline_decode(xdr, 8 + 8 + 4);
  73. if (!p)
  74. return -EIO;
  75. p = xdr_decode_hyper(p, &b->slice.start);
  76. p = xdr_decode_hyper(p, &b->slice.len);
  77. b->slice.volume = be32_to_cpup(p++);
  78. break;
  79. case PNFS_BLOCK_VOLUME_CONCAT:
  80. p = xdr_inline_decode(xdr, 4);
  81. if (!p)
  82. return -EIO;
  83. b->concat.volumes_count = be32_to_cpup(p++);
  84. p = xdr_inline_decode(xdr, b->concat.volumes_count * 4);
  85. if (!p)
  86. return -EIO;
  87. for (i = 0; i < b->concat.volumes_count; i++)
  88. b->concat.volumes[i] = be32_to_cpup(p++);
  89. break;
  90. case PNFS_BLOCK_VOLUME_STRIPE:
  91. p = xdr_inline_decode(xdr, 8 + 4);
  92. if (!p)
  93. return -EIO;
  94. p = xdr_decode_hyper(p, &b->stripe.chunk_size);
  95. b->stripe.volumes_count = be32_to_cpup(p++);
  96. p = xdr_inline_decode(xdr, b->stripe.volumes_count * 4);
  97. if (!p)
  98. return -EIO;
  99. for (i = 0; i < b->stripe.volumes_count; i++)
  100. b->stripe.volumes[i] = be32_to_cpup(p++);
  101. break;
  102. default:
  103. dprintk("unknown volume type!\n");
  104. return -EIO;
  105. }
  106. return 0;
  107. }
  108. static bool bl_map_simple(struct pnfs_block_dev *dev, u64 offset,
  109. struct pnfs_block_dev_map *map)
  110. {
  111. map->start = dev->start;
  112. map->len = dev->len;
  113. map->disk_offset = dev->disk_offset;
  114. map->bdev = dev->bdev;
  115. return true;
  116. }
  117. static bool bl_map_concat(struct pnfs_block_dev *dev, u64 offset,
  118. struct pnfs_block_dev_map *map)
  119. {
  120. int i;
  121. for (i = 0; i < dev->nr_children; i++) {
  122. struct pnfs_block_dev *child = &dev->children[i];
  123. if (child->start > offset ||
  124. child->start + child->len <= offset)
  125. continue;
  126. child->map(child, offset - child->start, map);
  127. return true;
  128. }
  129. dprintk("%s: ran off loop!\n", __func__);
  130. return false;
  131. }
  132. static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
  133. struct pnfs_block_dev_map *map)
  134. {
  135. struct pnfs_block_dev *child;
  136. u64 chunk;
  137. u32 chunk_idx;
  138. u64 disk_offset;
  139. chunk = div_u64(offset, dev->chunk_size);
  140. div_u64_rem(chunk, dev->nr_children, &chunk_idx);
  141. if (chunk_idx > dev->nr_children) {
  142. dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
  143. __func__, chunk_idx, offset, dev->chunk_size);
  144. /* error, should not happen */
  145. return false;
  146. }
  147. /* truncate offset to the beginning of the stripe */
  148. offset = chunk * dev->chunk_size;
  149. /* disk offset of the stripe */
  150. disk_offset = div_u64(offset, dev->nr_children);
  151. child = &dev->children[chunk_idx];
  152. child->map(child, disk_offset, map);
  153. map->start += offset;
  154. map->disk_offset += disk_offset;
  155. map->len = dev->chunk_size;
  156. return true;
  157. }
  158. static int
  159. bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d,
  160. struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask);
  161. static int
  162. bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d,
  163. struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
  164. {
  165. struct pnfs_block_volume *v = &volumes[idx];
  166. dev_t dev;
  167. dev = bl_resolve_deviceid(server, v, gfp_mask);
  168. if (!dev)
  169. return -EIO;
  170. d->bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_WRITE, NULL);
  171. if (IS_ERR(d->bdev)) {
  172. printk(KERN_WARNING "pNFS: failed to open device %d:%d (%ld)\n",
  173. MAJOR(dev), MINOR(dev), PTR_ERR(d->bdev));
  174. return PTR_ERR(d->bdev);
  175. }
  176. d->len = i_size_read(d->bdev->bd_inode);
  177. d->map = bl_map_simple;
  178. printk(KERN_INFO "pNFS: using block device %s\n",
  179. d->bdev->bd_disk->disk_name);
  180. return 0;
  181. }
  182. static int
  183. bl_parse_slice(struct nfs_server *server, struct pnfs_block_dev *d,
  184. struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
  185. {
  186. struct pnfs_block_volume *v = &volumes[idx];
  187. int ret;
  188. ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask);
  189. if (ret)
  190. return ret;
  191. d->disk_offset = v->slice.start;
  192. d->len = v->slice.len;
  193. return 0;
  194. }
  195. static int
  196. bl_parse_concat(struct nfs_server *server, struct pnfs_block_dev *d,
  197. struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
  198. {
  199. struct pnfs_block_volume *v = &volumes[idx];
  200. u64 len = 0;
  201. int ret, i;
  202. d->children = kcalloc(v->concat.volumes_count,
  203. sizeof(struct pnfs_block_dev), GFP_KERNEL);
  204. if (!d->children)
  205. return -ENOMEM;
  206. for (i = 0; i < v->concat.volumes_count; i++) {
  207. ret = bl_parse_deviceid(server, &d->children[i],
  208. volumes, v->concat.volumes[i], gfp_mask);
  209. if (ret)
  210. return ret;
  211. d->nr_children++;
  212. d->children[i].start += len;
  213. len += d->children[i].len;
  214. }
  215. d->len = len;
  216. d->map = bl_map_concat;
  217. return 0;
  218. }
  219. static int
  220. bl_parse_stripe(struct nfs_server *server, struct pnfs_block_dev *d,
  221. struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
  222. {
  223. struct pnfs_block_volume *v = &volumes[idx];
  224. u64 len = 0;
  225. int ret, i;
  226. d->children = kcalloc(v->stripe.volumes_count,
  227. sizeof(struct pnfs_block_dev), GFP_KERNEL);
  228. if (!d->children)
  229. return -ENOMEM;
  230. for (i = 0; i < v->stripe.volumes_count; i++) {
  231. ret = bl_parse_deviceid(server, &d->children[i],
  232. volumes, v->stripe.volumes[i], gfp_mask);
  233. if (ret)
  234. return ret;
  235. d->nr_children++;
  236. len += d->children[i].len;
  237. }
  238. d->len = len;
  239. d->chunk_size = v->stripe.chunk_size;
  240. d->map = bl_map_stripe;
  241. return 0;
  242. }
  243. static int
  244. bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d,
  245. struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
  246. {
  247. switch (volumes[idx].type) {
  248. case PNFS_BLOCK_VOLUME_SIMPLE:
  249. return bl_parse_simple(server, d, volumes, idx, gfp_mask);
  250. case PNFS_BLOCK_VOLUME_SLICE:
  251. return bl_parse_slice(server, d, volumes, idx, gfp_mask);
  252. case PNFS_BLOCK_VOLUME_CONCAT:
  253. return bl_parse_concat(server, d, volumes, idx, gfp_mask);
  254. case PNFS_BLOCK_VOLUME_STRIPE:
  255. return bl_parse_stripe(server, d, volumes, idx, gfp_mask);
  256. default:
  257. dprintk("unsupported volume type: %d\n", volumes[idx].type);
  258. return -EIO;
  259. }
  260. }
  261. struct nfs4_deviceid_node *
  262. bl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
  263. gfp_t gfp_mask)
  264. {
  265. struct nfs4_deviceid_node *node = NULL;
  266. struct pnfs_block_volume *volumes;
  267. struct pnfs_block_dev *top;
  268. struct xdr_stream xdr;
  269. struct xdr_buf buf;
  270. struct page *scratch;
  271. int nr_volumes, ret, i;
  272. __be32 *p;
  273. scratch = alloc_page(gfp_mask);
  274. if (!scratch)
  275. goto out;
  276. xdr_init_decode_pages(&xdr, &buf, pdev->pages, pdev->pglen);
  277. xdr_set_scratch_buffer(&xdr, page_address(scratch), PAGE_SIZE);
  278. p = xdr_inline_decode(&xdr, sizeof(__be32));
  279. if (!p)
  280. goto out_free_scratch;
  281. nr_volumes = be32_to_cpup(p++);
  282. volumes = kcalloc(nr_volumes, sizeof(struct pnfs_block_volume),
  283. gfp_mask);
  284. if (!volumes)
  285. goto out_free_scratch;
  286. for (i = 0; i < nr_volumes; i++) {
  287. ret = nfs4_block_decode_volume(&xdr, &volumes[i]);
  288. if (ret < 0)
  289. goto out_free_volumes;
  290. }
  291. top = kzalloc(sizeof(*top), gfp_mask);
  292. if (!top)
  293. goto out_free_volumes;
  294. ret = bl_parse_deviceid(server, top, volumes, nr_volumes - 1, gfp_mask);
  295. if (ret) {
  296. bl_free_device(top);
  297. kfree(top);
  298. goto out_free_volumes;
  299. }
  300. node = &top->node;
  301. nfs4_init_deviceid_node(node, server, &pdev->dev_id);
  302. out_free_volumes:
  303. kfree(volumes);
  304. out_free_scratch:
  305. __free_page(scratch);
  306. out:
  307. return node;
  308. }