mdsmap.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/bug.h>
  3. #include <linux/err.h>
  4. #include <linux/random.h>
  5. #include <linux/slab.h>
  6. #include <linux/types.h>
  7. #include <linux/ceph/mdsmap.h>
  8. #include <linux/ceph/messenger.h>
  9. #include <linux/ceph/decode.h>
  10. #include "super.h"
  11. /*
  12. * choose a random mds that is "up" (i.e. has a state > 0), or -1.
  13. */
  14. int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
  15. {
  16. int n = 0;
  17. int i;
  18. /* special case for one mds */
  19. if (1 == m->m_max_mds && m->m_info[0].state > 0)
  20. return 0;
  21. /* count */
  22. for (i = 0; i < m->m_max_mds; i++)
  23. if (m->m_info[i].state > 0)
  24. n++;
  25. if (n == 0)
  26. return -1;
  27. /* pick */
  28. n = prandom_u32() % n;
  29. i = 0;
  30. for (i = 0; n > 0; i++, n--)
  31. while (m->m_info[i].state <= 0)
  32. i++;
  33. return i;
  34. }
  35. #define __decode_and_drop_type(p, end, type, bad) \
  36. do { \
  37. if (*p + sizeof(type) > end) \
  38. goto bad; \
  39. *p += sizeof(type); \
  40. } while (0)
  41. #define __decode_and_drop_set(p, end, type, bad) \
  42. do { \
  43. u32 n; \
  44. size_t need; \
  45. ceph_decode_32_safe(p, end, n, bad); \
  46. need = sizeof(type) * n; \
  47. ceph_decode_need(p, end, need, bad); \
  48. *p += need; \
  49. } while (0)
  50. #define __decode_and_drop_map(p, end, ktype, vtype, bad) \
  51. do { \
  52. u32 n; \
  53. size_t need; \
  54. ceph_decode_32_safe(p, end, n, bad); \
  55. need = (sizeof(ktype) + sizeof(vtype)) * n; \
  56. ceph_decode_need(p, end, need, bad); \
  57. *p += need; \
  58. } while (0)
  59. static int __decode_and_drop_compat_set(void **p, void* end)
  60. {
  61. int i;
  62. /* compat, ro_compat, incompat*/
  63. for (i = 0; i < 3; i++) {
  64. u32 n;
  65. ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
  66. /* mask */
  67. *p += sizeof(u64);
  68. /* names (map<u64, string>) */
  69. n = ceph_decode_32(p);
  70. while (n-- > 0) {
  71. u32 len;
  72. ceph_decode_need(p, end, sizeof(u64) + sizeof(u32),
  73. bad);
  74. *p += sizeof(u64);
  75. len = ceph_decode_32(p);
  76. ceph_decode_need(p, end, len, bad);
  77. *p += len;
  78. }
  79. }
  80. return 0;
  81. bad:
  82. return -1;
  83. }
  84. /*
  85. * Decode an MDS map
  86. *
  87. * Ignore any fields we don't care about (there are quite a few of
  88. * them).
  89. */
  90. struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
  91. {
  92. struct ceph_mdsmap *m;
  93. const void *start = *p;
  94. int i, j, n;
  95. int err = -EINVAL;
  96. u8 mdsmap_v, mdsmap_cv;
  97. u16 mdsmap_ev;
  98. m = kzalloc(sizeof(*m), GFP_NOFS);
  99. if (m == NULL)
  100. return ERR_PTR(-ENOMEM);
  101. ceph_decode_need(p, end, 1 + 1, bad);
  102. mdsmap_v = ceph_decode_8(p);
  103. mdsmap_cv = ceph_decode_8(p);
  104. if (mdsmap_v >= 4) {
  105. u32 mdsmap_len;
  106. ceph_decode_32_safe(p, end, mdsmap_len, bad);
  107. if (end < *p + mdsmap_len)
  108. goto bad;
  109. end = *p + mdsmap_len;
  110. }
  111. ceph_decode_need(p, end, 8*sizeof(u32) + sizeof(u64), bad);
  112. m->m_epoch = ceph_decode_32(p);
  113. m->m_client_epoch = ceph_decode_32(p);
  114. m->m_last_failure = ceph_decode_32(p);
  115. m->m_root = ceph_decode_32(p);
  116. m->m_session_timeout = ceph_decode_32(p);
  117. m->m_session_autoclose = ceph_decode_32(p);
  118. m->m_max_file_size = ceph_decode_64(p);
  119. m->m_max_mds = ceph_decode_32(p);
  120. m->m_info = kcalloc(m->m_max_mds, sizeof(*m->m_info), GFP_NOFS);
  121. if (m->m_info == NULL)
  122. goto nomem;
  123. /* pick out active nodes from mds_info (state > 0) */
  124. n = ceph_decode_32(p);
  125. for (i = 0; i < n; i++) {
  126. u64 global_id;
  127. u32 namelen;
  128. s32 mds, inc, state;
  129. u64 state_seq;
  130. u8 info_v;
  131. void *info_end = NULL;
  132. struct ceph_entity_addr addr;
  133. u32 num_export_targets;
  134. void *pexport_targets = NULL;
  135. struct ceph_timespec laggy_since;
  136. struct ceph_mds_info *info;
  137. ceph_decode_need(p, end, sizeof(u64) + 1, bad);
  138. global_id = ceph_decode_64(p);
  139. info_v= ceph_decode_8(p);
  140. if (info_v >= 4) {
  141. u32 info_len;
  142. u8 info_cv;
  143. ceph_decode_need(p, end, 1 + sizeof(u32), bad);
  144. info_cv = ceph_decode_8(p);
  145. info_len = ceph_decode_32(p);
  146. info_end = *p + info_len;
  147. if (info_end > end)
  148. goto bad;
  149. }
  150. ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
  151. *p += sizeof(u64);
  152. namelen = ceph_decode_32(p); /* skip mds name */
  153. *p += namelen;
  154. ceph_decode_need(p, end,
  155. 4*sizeof(u32) + sizeof(u64) +
  156. sizeof(addr) + sizeof(struct ceph_timespec),
  157. bad);
  158. mds = ceph_decode_32(p);
  159. inc = ceph_decode_32(p);
  160. state = ceph_decode_32(p);
  161. state_seq = ceph_decode_64(p);
  162. ceph_decode_copy(p, &addr, sizeof(addr));
  163. ceph_decode_addr(&addr);
  164. ceph_decode_copy(p, &laggy_since, sizeof(laggy_since));
  165. *p += sizeof(u32);
  166. ceph_decode_32_safe(p, end, namelen, bad);
  167. *p += namelen;
  168. if (info_v >= 2) {
  169. ceph_decode_32_safe(p, end, num_export_targets, bad);
  170. pexport_targets = *p;
  171. *p += num_export_targets * sizeof(u32);
  172. } else {
  173. num_export_targets = 0;
  174. }
  175. if (info_end && *p != info_end) {
  176. if (*p > info_end)
  177. goto bad;
  178. *p = info_end;
  179. }
  180. dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n",
  181. i+1, n, global_id, mds, inc,
  182. ceph_pr_addr(&addr.in_addr),
  183. ceph_mds_state_name(state));
  184. if (mds < 0 || mds >= m->m_max_mds || state <= 0)
  185. continue;
  186. info = &m->m_info[mds];
  187. info->global_id = global_id;
  188. info->state = state;
  189. info->addr = addr;
  190. info->laggy = (laggy_since.tv_sec != 0 ||
  191. laggy_since.tv_nsec != 0);
  192. info->num_export_targets = num_export_targets;
  193. if (num_export_targets) {
  194. info->export_targets = kcalloc(num_export_targets,
  195. sizeof(u32), GFP_NOFS);
  196. if (info->export_targets == NULL)
  197. goto nomem;
  198. for (j = 0; j < num_export_targets; j++)
  199. info->export_targets[j] =
  200. ceph_decode_32(&pexport_targets);
  201. } else {
  202. info->export_targets = NULL;
  203. }
  204. }
  205. /* pg_pools */
  206. ceph_decode_32_safe(p, end, n, bad);
  207. m->m_num_data_pg_pools = n;
  208. m->m_data_pg_pools = kcalloc(n, sizeof(u64), GFP_NOFS);
  209. if (!m->m_data_pg_pools)
  210. goto nomem;
  211. ceph_decode_need(p, end, sizeof(u64)*(n+1), bad);
  212. for (i = 0; i < n; i++)
  213. m->m_data_pg_pools[i] = ceph_decode_64(p);
  214. m->m_cas_pg_pool = ceph_decode_64(p);
  215. m->m_enabled = m->m_epoch > 1;
  216. mdsmap_ev = 1;
  217. if (mdsmap_v >= 2) {
  218. ceph_decode_16_safe(p, end, mdsmap_ev, bad_ext);
  219. }
  220. if (mdsmap_ev >= 3) {
  221. if (__decode_and_drop_compat_set(p, end) < 0)
  222. goto bad_ext;
  223. }
  224. /* metadata_pool */
  225. if (mdsmap_ev < 5) {
  226. __decode_and_drop_type(p, end, u32, bad_ext);
  227. } else {
  228. __decode_and_drop_type(p, end, u64, bad_ext);
  229. }
  230. /* created + modified + tableserver */
  231. __decode_and_drop_type(p, end, struct ceph_timespec, bad_ext);
  232. __decode_and_drop_type(p, end, struct ceph_timespec, bad_ext);
  233. __decode_and_drop_type(p, end, u32, bad_ext);
  234. /* in */
  235. {
  236. int num_laggy = 0;
  237. ceph_decode_32_safe(p, end, n, bad_ext);
  238. ceph_decode_need(p, end, sizeof(u32) * n, bad_ext);
  239. for (i = 0; i < n; i++) {
  240. s32 mds = ceph_decode_32(p);
  241. if (mds >= 0 && mds < m->m_max_mds) {
  242. if (m->m_info[mds].laggy)
  243. num_laggy++;
  244. }
  245. }
  246. m->m_num_laggy = num_laggy;
  247. }
  248. /* inc */
  249. __decode_and_drop_map(p, end, u32, u32, bad_ext);
  250. /* up */
  251. __decode_and_drop_map(p, end, u32, u64, bad_ext);
  252. /* failed */
  253. __decode_and_drop_set(p, end, u32, bad_ext);
  254. /* stopped */
  255. __decode_and_drop_set(p, end, u32, bad_ext);
  256. if (mdsmap_ev >= 4) {
  257. /* last_failure_osd_epoch */
  258. __decode_and_drop_type(p, end, u32, bad_ext);
  259. }
  260. if (mdsmap_ev >= 6) {
  261. /* ever_allowed_snaps */
  262. __decode_and_drop_type(p, end, u8, bad_ext);
  263. /* explicitly_allowed_snaps */
  264. __decode_and_drop_type(p, end, u8, bad_ext);
  265. }
  266. if (mdsmap_ev >= 7) {
  267. /* inline_data_enabled */
  268. __decode_and_drop_type(p, end, u8, bad_ext);
  269. }
  270. if (mdsmap_ev >= 8) {
  271. u32 name_len;
  272. /* enabled */
  273. ceph_decode_8_safe(p, end, m->m_enabled, bad_ext);
  274. ceph_decode_32_safe(p, end, name_len, bad_ext);
  275. ceph_decode_need(p, end, name_len, bad_ext);
  276. *p += name_len;
  277. }
  278. /* damaged */
  279. if (mdsmap_ev >= 9) {
  280. size_t need;
  281. ceph_decode_32_safe(p, end, n, bad_ext);
  282. need = sizeof(u32) * n;
  283. ceph_decode_need(p, end, need, bad_ext);
  284. *p += need;
  285. m->m_damaged = n > 0;
  286. } else {
  287. m->m_damaged = false;
  288. }
  289. bad_ext:
  290. *p = end;
  291. dout("mdsmap_decode success epoch %u\n", m->m_epoch);
  292. return m;
  293. nomem:
  294. err = -ENOMEM;
  295. goto out_err;
  296. bad:
  297. pr_err("corrupt mdsmap\n");
  298. print_hex_dump(KERN_DEBUG, "mdsmap: ",
  299. DUMP_PREFIX_OFFSET, 16, 1,
  300. start, end - start, true);
  301. out_err:
  302. ceph_mdsmap_destroy(m);
  303. return ERR_PTR(err);
  304. }
  305. void ceph_mdsmap_destroy(struct ceph_mdsmap *m)
  306. {
  307. int i;
  308. for (i = 0; i < m->m_max_mds; i++)
  309. kfree(m->m_info[i].export_targets);
  310. kfree(m->m_info);
  311. kfree(m->m_data_pg_pools);
  312. kfree(m);
  313. }
  314. bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m)
  315. {
  316. int i, nr_active = 0;
  317. if (!m->m_enabled)
  318. return false;
  319. if (m->m_damaged)
  320. return false;
  321. if (m->m_num_laggy > 0)
  322. return false;
  323. for (i = 0; i < m->m_max_mds; i++) {
  324. if (m->m_info[i].state == CEPH_MDS_STATE_ACTIVE)
  325. nr_active++;
  326. }
  327. return nr_active > 0;
  328. }