label.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/device.h>
  14. #include <linux/ndctl.h>
  15. #include <linux/uuid.h>
  16. #include <linux/slab.h>
  17. #include <linux/io.h>
  18. #include <linux/nd.h>
  19. #include "nd-core.h"
  20. #include "label.h"
  21. #include "nd.h"
  22. static guid_t nvdimm_btt_guid;
  23. static guid_t nvdimm_btt2_guid;
  24. static guid_t nvdimm_pfn_guid;
  25. static guid_t nvdimm_dax_guid;
  26. static u32 best_seq(u32 a, u32 b)
  27. {
  28. a &= NSINDEX_SEQ_MASK;
  29. b &= NSINDEX_SEQ_MASK;
  30. if (a == 0 || a == b)
  31. return b;
  32. else if (b == 0)
  33. return a;
  34. else if (nd_inc_seq(a) == b)
  35. return b;
  36. else
  37. return a;
  38. }
  39. unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
  40. {
  41. return ndd->nslabel_size;
  42. }
  43. static size_t __sizeof_namespace_index(u32 nslot)
  44. {
  45. return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
  46. NSINDEX_ALIGN);
  47. }
  48. static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
  49. size_t index_size)
  50. {
  51. return (ndd->nsarea.config_size - index_size * 2) /
  52. sizeof_namespace_label(ndd);
  53. }
  54. int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
  55. {
  56. u32 tmp_nslot, n;
  57. tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
  58. n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
  59. return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
  60. }
  61. size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
  62. {
  63. u32 nslot, space, size;
  64. /*
  65. * Per UEFI 2.7, the minimum size of the Label Storage Area is large
  66. * enough to hold 2 index blocks and 2 labels. The minimum index
  67. * block size is 256 bytes. The label size is 128 for namespaces
  68. * prior to version 1.2 and at minimum 256 for version 1.2 and later.
  69. */
  70. nslot = nvdimm_num_label_slots(ndd);
  71. space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
  72. size = __sizeof_namespace_index(nslot) * 2;
  73. if (size <= space && nslot >= 2)
  74. return size / 2;
  75. dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
  76. ndd->nsarea.config_size, sizeof_namespace_label(ndd));
  77. return 0;
  78. }
  79. static int __nd_label_validate(struct nvdimm_drvdata *ndd)
  80. {
  81. /*
  82. * On media label format consists of two index blocks followed
  83. * by an array of labels. None of these structures are ever
  84. * updated in place. A sequence number tracks the current
  85. * active index and the next one to write, while labels are
  86. * written to free slots.
  87. *
  88. * +------------+
  89. * | |
  90. * | nsindex0 |
  91. * | |
  92. * +------------+
  93. * | |
  94. * | nsindex1 |
  95. * | |
  96. * +------------+
  97. * | label0 |
  98. * +------------+
  99. * | label1 |
  100. * +------------+
  101. * | |
  102. * ....nslot...
  103. * | |
  104. * +------------+
  105. * | labelN |
  106. * +------------+
  107. */
  108. struct nd_namespace_index *nsindex[] = {
  109. to_namespace_index(ndd, 0),
  110. to_namespace_index(ndd, 1),
  111. };
  112. const int num_index = ARRAY_SIZE(nsindex);
  113. struct device *dev = ndd->dev;
  114. bool valid[2] = { 0 };
  115. int i, num_valid = 0;
  116. u32 seq;
  117. for (i = 0; i < num_index; i++) {
  118. u32 nslot;
  119. u8 sig[NSINDEX_SIG_LEN];
  120. u64 sum_save, sum, size;
  121. unsigned int version, labelsize;
  122. memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
  123. if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
  124. dev_dbg(dev, "nsindex%d signature invalid\n", i);
  125. continue;
  126. }
  127. /* label sizes larger than 128 arrived with v1.2 */
  128. version = __le16_to_cpu(nsindex[i]->major) * 100
  129. + __le16_to_cpu(nsindex[i]->minor);
  130. if (version >= 102)
  131. labelsize = 1 << (7 + nsindex[i]->labelsize);
  132. else
  133. labelsize = 128;
  134. if (labelsize != sizeof_namespace_label(ndd)) {
  135. dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
  136. i, nsindex[i]->labelsize);
  137. continue;
  138. }
  139. sum_save = __le64_to_cpu(nsindex[i]->checksum);
  140. nsindex[i]->checksum = __cpu_to_le64(0);
  141. sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
  142. nsindex[i]->checksum = __cpu_to_le64(sum_save);
  143. if (sum != sum_save) {
  144. dev_dbg(dev, "nsindex%d checksum invalid\n", i);
  145. continue;
  146. }
  147. seq = __le32_to_cpu(nsindex[i]->seq);
  148. if ((seq & NSINDEX_SEQ_MASK) == 0) {
  149. dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
  150. continue;
  151. }
  152. /* sanity check the index against expected values */
  153. if (__le64_to_cpu(nsindex[i]->myoff)
  154. != i * sizeof_namespace_index(ndd)) {
  155. dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
  156. i, (unsigned long long)
  157. __le64_to_cpu(nsindex[i]->myoff));
  158. continue;
  159. }
  160. if (__le64_to_cpu(nsindex[i]->otheroff)
  161. != (!i) * sizeof_namespace_index(ndd)) {
  162. dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
  163. i, (unsigned long long)
  164. __le64_to_cpu(nsindex[i]->otheroff));
  165. continue;
  166. }
  167. if (__le64_to_cpu(nsindex[i]->labeloff)
  168. != 2 * sizeof_namespace_index(ndd)) {
  169. dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
  170. i, (unsigned long long)
  171. __le64_to_cpu(nsindex[i]->labeloff));
  172. continue;
  173. }
  174. size = __le64_to_cpu(nsindex[i]->mysize);
  175. if (size > sizeof_namespace_index(ndd)
  176. || size < sizeof(struct nd_namespace_index)) {
  177. dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
  178. continue;
  179. }
  180. nslot = __le32_to_cpu(nsindex[i]->nslot);
  181. if (nslot * sizeof_namespace_label(ndd)
  182. + 2 * sizeof_namespace_index(ndd)
  183. > ndd->nsarea.config_size) {
  184. dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
  185. i, nslot, ndd->nsarea.config_size);
  186. continue;
  187. }
  188. valid[i] = true;
  189. num_valid++;
  190. }
  191. switch (num_valid) {
  192. case 0:
  193. break;
  194. case 1:
  195. for (i = 0; i < num_index; i++)
  196. if (valid[i])
  197. return i;
  198. /* can't have num_valid > 0 but valid[] = { false, false } */
  199. WARN_ON(1);
  200. break;
  201. default:
  202. /* pick the best index... */
  203. seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
  204. __le32_to_cpu(nsindex[1]->seq));
  205. if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
  206. return 1;
  207. else
  208. return 0;
  209. break;
  210. }
  211. return -1;
  212. }
  213. static int nd_label_validate(struct nvdimm_drvdata *ndd)
  214. {
  215. /*
  216. * In order to probe for and validate namespace index blocks we
  217. * need to know the size of the labels, and we can't trust the
  218. * size of the labels until we validate the index blocks.
  219. * Resolve this dependency loop by probing for known label
  220. * sizes, but default to v1.2 256-byte namespace labels if
  221. * discovery fails.
  222. */
  223. int label_size[] = { 128, 256 };
  224. int i, rc;
  225. for (i = 0; i < ARRAY_SIZE(label_size); i++) {
  226. ndd->nslabel_size = label_size[i];
  227. rc = __nd_label_validate(ndd);
  228. if (rc >= 0)
  229. return rc;
  230. }
  231. return -1;
  232. }
  233. static void nd_label_copy(struct nvdimm_drvdata *ndd,
  234. struct nd_namespace_index *dst,
  235. struct nd_namespace_index *src)
  236. {
  237. /* just exit if either destination or source is NULL */
  238. if (!dst || !src)
  239. return;
  240. memcpy(dst, src, sizeof_namespace_index(ndd));
  241. }
  242. static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
  243. {
  244. void *base = to_namespace_index(ndd, 0);
  245. return base + 2 * sizeof_namespace_index(ndd);
  246. }
  247. static int to_slot(struct nvdimm_drvdata *ndd,
  248. struct nd_namespace_label *nd_label)
  249. {
  250. unsigned long label, base;
  251. label = (unsigned long) nd_label;
  252. base = (unsigned long) nd_label_base(ndd);
  253. return (label - base) / sizeof_namespace_label(ndd);
  254. }
  255. static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
  256. {
  257. unsigned long label, base;
  258. base = (unsigned long) nd_label_base(ndd);
  259. label = base + sizeof_namespace_label(ndd) * slot;
  260. return (struct nd_namespace_label *) label;
  261. }
  262. #define for_each_clear_bit_le(bit, addr, size) \
  263. for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
  264. (bit) < (size); \
  265. (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
  266. /**
  267. * preamble_index - common variable initialization for nd_label_* routines
  268. * @ndd: dimm container for the relevant label set
  269. * @idx: namespace_index index
  270. * @nsindex_out: on return set to the currently active namespace index
  271. * @free: on return set to the free label bitmap in the index
  272. * @nslot: on return set to the number of slots in the label space
  273. */
  274. static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
  275. struct nd_namespace_index **nsindex_out,
  276. unsigned long **free, u32 *nslot)
  277. {
  278. struct nd_namespace_index *nsindex;
  279. nsindex = to_namespace_index(ndd, idx);
  280. if (nsindex == NULL)
  281. return false;
  282. *free = (unsigned long *) nsindex->free;
  283. *nslot = __le32_to_cpu(nsindex->nslot);
  284. *nsindex_out = nsindex;
  285. return true;
  286. }
  287. char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
  288. {
  289. if (!label_id || !uuid)
  290. return NULL;
  291. snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
  292. flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
  293. return label_id->id;
  294. }
  295. static bool preamble_current(struct nvdimm_drvdata *ndd,
  296. struct nd_namespace_index **nsindex,
  297. unsigned long **free, u32 *nslot)
  298. {
  299. return preamble_index(ndd, ndd->ns_current, nsindex,
  300. free, nslot);
  301. }
  302. static bool preamble_next(struct nvdimm_drvdata *ndd,
  303. struct nd_namespace_index **nsindex,
  304. unsigned long **free, u32 *nslot)
  305. {
  306. return preamble_index(ndd, ndd->ns_next, nsindex,
  307. free, nslot);
  308. }
  309. static bool slot_valid(struct nvdimm_drvdata *ndd,
  310. struct nd_namespace_label *nd_label, u32 slot)
  311. {
  312. /* check that we are written where we expect to be written */
  313. if (slot != __le32_to_cpu(nd_label->slot))
  314. return false;
  315. /* check that DPA allocations are page aligned */
  316. if ((__le64_to_cpu(nd_label->dpa)
  317. | __le64_to_cpu(nd_label->rawsize)) % SZ_4K)
  318. return false;
  319. /* check checksum */
  320. if (namespace_label_has(ndd, checksum)) {
  321. u64 sum, sum_save;
  322. sum_save = __le64_to_cpu(nd_label->checksum);
  323. nd_label->checksum = __cpu_to_le64(0);
  324. sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
  325. nd_label->checksum = __cpu_to_le64(sum_save);
  326. if (sum != sum_save) {
  327. dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
  328. slot, sum);
  329. return false;
  330. }
  331. }
  332. return true;
  333. }
  334. int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
  335. {
  336. struct nd_namespace_index *nsindex;
  337. unsigned long *free;
  338. u32 nslot, slot;
  339. if (!preamble_current(ndd, &nsindex, &free, &nslot))
  340. return 0; /* no label, nothing to reserve */
  341. for_each_clear_bit_le(slot, free, nslot) {
  342. struct nd_namespace_label *nd_label;
  343. struct nd_region *nd_region = NULL;
  344. u8 label_uuid[NSLABEL_UUID_LEN];
  345. struct nd_label_id label_id;
  346. struct resource *res;
  347. u32 flags;
  348. nd_label = to_label(ndd, slot);
  349. if (!slot_valid(ndd, nd_label, slot))
  350. continue;
  351. memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
  352. flags = __le32_to_cpu(nd_label->flags);
  353. nd_label_gen_id(&label_id, label_uuid, flags);
  354. res = nvdimm_allocate_dpa(ndd, &label_id,
  355. __le64_to_cpu(nd_label->dpa),
  356. __le64_to_cpu(nd_label->rawsize));
  357. nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
  358. if (!res)
  359. return -EBUSY;
  360. }
  361. return 0;
  362. }
  363. int nd_label_data_init(struct nvdimm_drvdata *ndd)
  364. {
  365. size_t config_size, read_size, max_xfer, offset;
  366. struct nd_namespace_index *nsindex;
  367. unsigned int i;
  368. int rc = 0;
  369. u32 nslot;
  370. if (ndd->data)
  371. return 0;
  372. if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
  373. dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
  374. ndd->nsarea.max_xfer, ndd->nsarea.config_size);
  375. return -ENXIO;
  376. }
  377. /*
  378. * We need to determine the maximum index area as this is the section
  379. * we must read and validate before we can start processing labels.
  380. *
  381. * If the area is too small to contain the two indexes and 2 labels
  382. * then we abort.
  383. *
  384. * Start at a label size of 128 as this should result in the largest
  385. * possible namespace index size.
  386. */
  387. ndd->nslabel_size = 128;
  388. read_size = sizeof_namespace_index(ndd) * 2;
  389. if (!read_size)
  390. return -ENXIO;
  391. /* Allocate config data */
  392. config_size = ndd->nsarea.config_size;
  393. ndd->data = kvzalloc(config_size, GFP_KERNEL);
  394. if (!ndd->data)
  395. return -ENOMEM;
  396. /*
  397. * We want to guarantee as few reads as possible while conserving
  398. * memory. To do that we figure out how much unused space will be left
  399. * in the last read, divide that by the total number of reads it is
  400. * going to take given our maximum transfer size, and then reduce our
  401. * maximum transfer size based on that result.
  402. */
  403. max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
  404. if (read_size < max_xfer) {
  405. /* trim waste */
  406. max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
  407. DIV_ROUND_UP(config_size, max_xfer);
  408. /* make certain we read indexes in exactly 1 read */
  409. if (max_xfer < read_size)
  410. max_xfer = read_size;
  411. }
  412. /* Make our initial read size a multiple of max_xfer size */
  413. read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
  414. config_size);
  415. /* Read the index data */
  416. rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
  417. if (rc)
  418. goto out_err;
  419. /* Validate index data, if not valid assume all labels are invalid */
  420. ndd->ns_current = nd_label_validate(ndd);
  421. if (ndd->ns_current < 0)
  422. return 0;
  423. /* Record our index values */
  424. ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
  425. /* Copy "current" index on top of the "next" index */
  426. nsindex = to_current_namespace_index(ndd);
  427. nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
  428. /* Determine starting offset for label data */
  429. offset = __le64_to_cpu(nsindex->labeloff);
  430. nslot = __le32_to_cpu(nsindex->nslot);
  431. /* Loop through the free list pulling in any active labels */
  432. for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
  433. size_t label_read_size;
  434. /* zero out the unused labels */
  435. if (test_bit_le(i, nsindex->free)) {
  436. memset(ndd->data + offset, 0, ndd->nslabel_size);
  437. continue;
  438. }
  439. /* if we already read past here then just continue */
  440. if (offset + ndd->nslabel_size <= read_size)
  441. continue;
  442. /* if we haven't read in a while reset our read_size offset */
  443. if (read_size < offset)
  444. read_size = offset;
  445. /* determine how much more will be read after this next call. */
  446. label_read_size = offset + ndd->nslabel_size - read_size;
  447. label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
  448. max_xfer;
  449. /* truncate last read if needed */
  450. if (read_size + label_read_size > config_size)
  451. label_read_size = config_size - read_size;
  452. /* Read the label data */
  453. rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
  454. read_size, label_read_size);
  455. if (rc)
  456. goto out_err;
  457. /* push read_size to next read offset */
  458. read_size += label_read_size;
  459. }
  460. dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
  461. out_err:
  462. return rc;
  463. }
  464. int nd_label_active_count(struct nvdimm_drvdata *ndd)
  465. {
  466. struct nd_namespace_index *nsindex;
  467. unsigned long *free;
  468. u32 nslot, slot;
  469. int count = 0;
  470. if (!preamble_current(ndd, &nsindex, &free, &nslot))
  471. return 0;
  472. for_each_clear_bit_le(slot, free, nslot) {
  473. struct nd_namespace_label *nd_label;
  474. nd_label = to_label(ndd, slot);
  475. if (!slot_valid(ndd, nd_label, slot)) {
  476. u32 label_slot = __le32_to_cpu(nd_label->slot);
  477. u64 size = __le64_to_cpu(nd_label->rawsize);
  478. u64 dpa = __le64_to_cpu(nd_label->dpa);
  479. dev_dbg(ndd->dev,
  480. "slot%d invalid slot: %d dpa: %llx size: %llx\n",
  481. slot, label_slot, dpa, size);
  482. continue;
  483. }
  484. count++;
  485. }
  486. return count;
  487. }
  488. struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
  489. {
  490. struct nd_namespace_index *nsindex;
  491. unsigned long *free;
  492. u32 nslot, slot;
  493. if (!preamble_current(ndd, &nsindex, &free, &nslot))
  494. return NULL;
  495. for_each_clear_bit_le(slot, free, nslot) {
  496. struct nd_namespace_label *nd_label;
  497. nd_label = to_label(ndd, slot);
  498. if (!slot_valid(ndd, nd_label, slot))
  499. continue;
  500. if (n-- == 0)
  501. return to_label(ndd, slot);
  502. }
  503. return NULL;
  504. }
  505. u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
  506. {
  507. struct nd_namespace_index *nsindex;
  508. unsigned long *free;
  509. u32 nslot, slot;
  510. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  511. return UINT_MAX;
  512. WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
  513. slot = find_next_bit_le(free, nslot, 0);
  514. if (slot == nslot)
  515. return UINT_MAX;
  516. clear_bit_le(slot, free);
  517. return slot;
  518. }
  519. bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
  520. {
  521. struct nd_namespace_index *nsindex;
  522. unsigned long *free;
  523. u32 nslot;
  524. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  525. return false;
  526. WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
  527. if (slot < nslot)
  528. return !test_and_set_bit_le(slot, free);
  529. return false;
  530. }
  531. u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
  532. {
  533. struct nd_namespace_index *nsindex;
  534. unsigned long *free;
  535. u32 nslot;
  536. WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
  537. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  538. return nvdimm_num_label_slots(ndd);
  539. return bitmap_weight(free, nslot);
  540. }
  541. static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
  542. unsigned long flags)
  543. {
  544. struct nd_namespace_index *nsindex;
  545. unsigned long offset;
  546. u64 checksum;
  547. u32 nslot;
  548. int rc;
  549. nsindex = to_namespace_index(ndd, index);
  550. if (flags & ND_NSINDEX_INIT)
  551. nslot = nvdimm_num_label_slots(ndd);
  552. else
  553. nslot = __le32_to_cpu(nsindex->nslot);
  554. memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
  555. memset(&nsindex->flags, 0, 3);
  556. nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
  557. nsindex->seq = __cpu_to_le32(seq);
  558. offset = (unsigned long) nsindex
  559. - (unsigned long) to_namespace_index(ndd, 0);
  560. nsindex->myoff = __cpu_to_le64(offset);
  561. nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
  562. offset = (unsigned long) to_namespace_index(ndd,
  563. nd_label_next_nsindex(index))
  564. - (unsigned long) to_namespace_index(ndd, 0);
  565. nsindex->otheroff = __cpu_to_le64(offset);
  566. offset = (unsigned long) nd_label_base(ndd)
  567. - (unsigned long) to_namespace_index(ndd, 0);
  568. nsindex->labeloff = __cpu_to_le64(offset);
  569. nsindex->nslot = __cpu_to_le32(nslot);
  570. nsindex->major = __cpu_to_le16(1);
  571. if (sizeof_namespace_label(ndd) < 256)
  572. nsindex->minor = __cpu_to_le16(1);
  573. else
  574. nsindex->minor = __cpu_to_le16(2);
  575. nsindex->checksum = __cpu_to_le64(0);
  576. if (flags & ND_NSINDEX_INIT) {
  577. unsigned long *free = (unsigned long *) nsindex->free;
  578. u32 nfree = ALIGN(nslot, BITS_PER_LONG);
  579. int last_bits, i;
  580. memset(nsindex->free, 0xff, nfree / 8);
  581. for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
  582. clear_bit_le(nslot + i, free);
  583. }
  584. checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
  585. nsindex->checksum = __cpu_to_le64(checksum);
  586. rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
  587. nsindex, sizeof_namespace_index(ndd));
  588. if (rc < 0)
  589. return rc;
  590. if (flags & ND_NSINDEX_INIT)
  591. return 0;
  592. /* copy the index we just wrote to the new 'next' */
  593. WARN_ON(index != ndd->ns_next);
  594. nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
  595. ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
  596. ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
  597. WARN_ON(ndd->ns_current == ndd->ns_next);
  598. return 0;
  599. }
  600. static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
  601. struct nd_namespace_label *nd_label)
  602. {
  603. return (unsigned long) nd_label
  604. - (unsigned long) to_namespace_index(ndd, 0);
  605. }
  606. enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
  607. {
  608. if (guid_equal(guid, &nvdimm_btt_guid))
  609. return NVDIMM_CCLASS_BTT;
  610. else if (guid_equal(guid, &nvdimm_btt2_guid))
  611. return NVDIMM_CCLASS_BTT2;
  612. else if (guid_equal(guid, &nvdimm_pfn_guid))
  613. return NVDIMM_CCLASS_PFN;
  614. else if (guid_equal(guid, &nvdimm_dax_guid))
  615. return NVDIMM_CCLASS_DAX;
  616. else if (guid_equal(guid, &guid_null))
  617. return NVDIMM_CCLASS_NONE;
  618. return NVDIMM_CCLASS_UNKNOWN;
  619. }
  620. static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
  621. guid_t *target)
  622. {
  623. if (claim_class == NVDIMM_CCLASS_BTT)
  624. return &nvdimm_btt_guid;
  625. else if (claim_class == NVDIMM_CCLASS_BTT2)
  626. return &nvdimm_btt2_guid;
  627. else if (claim_class == NVDIMM_CCLASS_PFN)
  628. return &nvdimm_pfn_guid;
  629. else if (claim_class == NVDIMM_CCLASS_DAX)
  630. return &nvdimm_dax_guid;
  631. else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
  632. /*
  633. * If we're modifying a namespace for which we don't
  634. * know the claim_class, don't touch the existing guid.
  635. */
  636. return target;
  637. } else
  638. return &guid_null;
  639. }
  640. static int __pmem_label_update(struct nd_region *nd_region,
  641. struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
  642. int pos)
  643. {
  644. struct nd_namespace_common *ndns = &nspm->nsio.common;
  645. struct nd_interleave_set *nd_set = nd_region->nd_set;
  646. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  647. struct nd_label_ent *label_ent, *victim = NULL;
  648. struct nd_namespace_label *nd_label;
  649. struct nd_namespace_index *nsindex;
  650. struct nd_label_id label_id;
  651. struct resource *res;
  652. unsigned long *free;
  653. u32 nslot, slot;
  654. size_t offset;
  655. u64 cookie;
  656. int rc;
  657. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  658. return -ENXIO;
  659. cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
  660. nd_label_gen_id(&label_id, nspm->uuid, 0);
  661. for_each_dpa_resource(ndd, res)
  662. if (strcmp(res->name, label_id.id) == 0)
  663. break;
  664. if (!res) {
  665. WARN_ON_ONCE(1);
  666. return -ENXIO;
  667. }
  668. /* allocate and write the label to the staging (next) index */
  669. slot = nd_label_alloc_slot(ndd);
  670. if (slot == UINT_MAX)
  671. return -ENXIO;
  672. dev_dbg(ndd->dev, "allocated: %d\n", slot);
  673. nd_label = to_label(ndd, slot);
  674. memset(nd_label, 0, sizeof_namespace_label(ndd));
  675. memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
  676. if (nspm->alt_name)
  677. memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
  678. nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING);
  679. nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
  680. nd_label->position = __cpu_to_le16(pos);
  681. nd_label->isetcookie = __cpu_to_le64(cookie);
  682. nd_label->rawsize = __cpu_to_le64(resource_size(res));
  683. nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
  684. nd_label->dpa = __cpu_to_le64(res->start);
  685. nd_label->slot = __cpu_to_le32(slot);
  686. if (namespace_label_has(ndd, type_guid))
  687. guid_copy(&nd_label->type_guid, &nd_set->type_guid);
  688. if (namespace_label_has(ndd, abstraction_guid))
  689. guid_copy(&nd_label->abstraction_guid,
  690. to_abstraction_guid(ndns->claim_class,
  691. &nd_label->abstraction_guid));
  692. if (namespace_label_has(ndd, checksum)) {
  693. u64 sum;
  694. nd_label->checksum = __cpu_to_le64(0);
  695. sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
  696. nd_label->checksum = __cpu_to_le64(sum);
  697. }
  698. nd_dbg_dpa(nd_region, ndd, res, "\n");
  699. /* update label */
  700. offset = nd_label_offset(ndd, nd_label);
  701. rc = nvdimm_set_config_data(ndd, offset, nd_label,
  702. sizeof_namespace_label(ndd));
  703. if (rc < 0)
  704. return rc;
  705. /* Garbage collect the previous label */
  706. mutex_lock(&nd_mapping->lock);
  707. list_for_each_entry(label_ent, &nd_mapping->labels, list) {
  708. if (!label_ent->label)
  709. continue;
  710. if (memcmp(nspm->uuid, label_ent->label->uuid,
  711. NSLABEL_UUID_LEN) != 0)
  712. continue;
  713. victim = label_ent;
  714. list_move_tail(&victim->list, &nd_mapping->labels);
  715. break;
  716. }
  717. if (victim) {
  718. dev_dbg(ndd->dev, "free: %d\n", slot);
  719. slot = to_slot(ndd, victim->label);
  720. nd_label_free_slot(ndd, slot);
  721. victim->label = NULL;
  722. }
  723. /* update index */
  724. rc = nd_label_write_index(ndd, ndd->ns_next,
  725. nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
  726. if (rc == 0) {
  727. list_for_each_entry(label_ent, &nd_mapping->labels, list)
  728. if (!label_ent->label) {
  729. label_ent->label = nd_label;
  730. nd_label = NULL;
  731. break;
  732. }
  733. dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
  734. "failed to track label: %d\n",
  735. to_slot(ndd, nd_label));
  736. if (nd_label)
  737. rc = -ENXIO;
  738. }
  739. mutex_unlock(&nd_mapping->lock);
  740. return rc;
  741. }
  742. static bool is_old_resource(struct resource *res, struct resource **list, int n)
  743. {
  744. int i;
  745. if (res->flags & DPA_RESOURCE_ADJUSTED)
  746. return false;
  747. for (i = 0; i < n; i++)
  748. if (res == list[i])
  749. return true;
  750. return false;
  751. }
  752. static struct resource *to_resource(struct nvdimm_drvdata *ndd,
  753. struct nd_namespace_label *nd_label)
  754. {
  755. struct resource *res;
  756. for_each_dpa_resource(ndd, res) {
  757. if (res->start != __le64_to_cpu(nd_label->dpa))
  758. continue;
  759. if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
  760. continue;
  761. return res;
  762. }
  763. return NULL;
  764. }
  765. /*
  766. * 1/ Account all the labels that can be freed after this update
  767. * 2/ Allocate and write the label to the staging (next) index
  768. * 3/ Record the resources in the namespace device
  769. */
  770. static int __blk_label_update(struct nd_region *nd_region,
  771. struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
  772. int num_labels)
  773. {
  774. int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
  775. struct nd_interleave_set *nd_set = nd_region->nd_set;
  776. struct nd_namespace_common *ndns = &nsblk->common;
  777. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  778. struct nd_namespace_label *nd_label;
  779. struct nd_label_ent *label_ent, *e;
  780. struct nd_namespace_index *nsindex;
  781. unsigned long *free, *victim_map = NULL;
  782. struct resource *res, **old_res_list;
  783. struct nd_label_id label_id;
  784. u8 uuid[NSLABEL_UUID_LEN];
  785. int min_dpa_idx = 0;
  786. LIST_HEAD(list);
  787. u32 nslot, slot;
  788. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  789. return -ENXIO;
  790. old_res_list = nsblk->res;
  791. nfree = nd_label_nfree(ndd);
  792. old_num_resources = nsblk->num_resources;
  793. nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
  794. /*
  795. * We need to loop over the old resources a few times, which seems a
  796. * bit inefficient, but we need to know that we have the label
  797. * space before we start mutating the tracking structures.
  798. * Otherwise the recovery method of last resort for userspace is
  799. * disable and re-enable the parent region.
  800. */
  801. alloc = 0;
  802. for_each_dpa_resource(ndd, res) {
  803. if (strcmp(res->name, label_id.id) != 0)
  804. continue;
  805. if (!is_old_resource(res, old_res_list, old_num_resources))
  806. alloc++;
  807. }
  808. victims = 0;
  809. if (old_num_resources) {
  810. /* convert old local-label-map to dimm-slot victim-map */
  811. victim_map = kcalloc(BITS_TO_LONGS(nslot), sizeof(long),
  812. GFP_KERNEL);
  813. if (!victim_map)
  814. return -ENOMEM;
  815. /* mark unused labels for garbage collection */
  816. for_each_clear_bit_le(slot, free, nslot) {
  817. nd_label = to_label(ndd, slot);
  818. memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
  819. if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
  820. continue;
  821. res = to_resource(ndd, nd_label);
  822. if (res && is_old_resource(res, old_res_list,
  823. old_num_resources))
  824. continue;
  825. slot = to_slot(ndd, nd_label);
  826. set_bit(slot, victim_map);
  827. victims++;
  828. }
  829. }
  830. /* don't allow updates that consume the last label */
  831. if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
  832. dev_info(&nsblk->common.dev, "insufficient label space\n");
  833. kfree(victim_map);
  834. return -ENOSPC;
  835. }
  836. /* from here on we need to abort on error */
  837. /* assign all resources to the namespace before writing the labels */
  838. nsblk->res = NULL;
  839. nsblk->num_resources = 0;
  840. for_each_dpa_resource(ndd, res) {
  841. if (strcmp(res->name, label_id.id) != 0)
  842. continue;
  843. if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
  844. rc = -ENOMEM;
  845. goto abort;
  846. }
  847. }
  848. /*
  849. * Find the resource associated with the first label in the set
  850. * per the v1.2 namespace specification.
  851. */
  852. for (i = 0; i < nsblk->num_resources; i++) {
  853. struct resource *min = nsblk->res[min_dpa_idx];
  854. res = nsblk->res[i];
  855. if (res->start < min->start)
  856. min_dpa_idx = i;
  857. }
  858. for (i = 0; i < nsblk->num_resources; i++) {
  859. size_t offset;
  860. res = nsblk->res[i];
  861. if (is_old_resource(res, old_res_list, old_num_resources))
  862. continue; /* carry-over */
  863. slot = nd_label_alloc_slot(ndd);
  864. if (slot == UINT_MAX)
  865. goto abort;
  866. dev_dbg(ndd->dev, "allocated: %d\n", slot);
  867. nd_label = to_label(ndd, slot);
  868. memset(nd_label, 0, sizeof_namespace_label(ndd));
  869. memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
  870. if (nsblk->alt_name)
  871. memcpy(nd_label->name, nsblk->alt_name,
  872. NSLABEL_NAME_LEN);
  873. nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
  874. /*
  875. * Use the presence of the type_guid as a flag to
  876. * determine isetcookie usage and nlabel + position
  877. * policy for blk-aperture namespaces.
  878. */
  879. if (namespace_label_has(ndd, type_guid)) {
  880. if (i == min_dpa_idx) {
  881. nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
  882. nd_label->position = __cpu_to_le16(0);
  883. } else {
  884. nd_label->nlabel = __cpu_to_le16(0xffff);
  885. nd_label->position = __cpu_to_le16(0xffff);
  886. }
  887. nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
  888. } else {
  889. nd_label->nlabel = __cpu_to_le16(0); /* N/A */
  890. nd_label->position = __cpu_to_le16(0); /* N/A */
  891. nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
  892. }
  893. nd_label->dpa = __cpu_to_le64(res->start);
  894. nd_label->rawsize = __cpu_to_le64(resource_size(res));
  895. nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
  896. nd_label->slot = __cpu_to_le32(slot);
  897. if (namespace_label_has(ndd, type_guid))
  898. guid_copy(&nd_label->type_guid, &nd_set->type_guid);
  899. if (namespace_label_has(ndd, abstraction_guid))
  900. guid_copy(&nd_label->abstraction_guid,
  901. to_abstraction_guid(ndns->claim_class,
  902. &nd_label->abstraction_guid));
  903. if (namespace_label_has(ndd, checksum)) {
  904. u64 sum;
  905. nd_label->checksum = __cpu_to_le64(0);
  906. sum = nd_fletcher64(nd_label,
  907. sizeof_namespace_label(ndd), 1);
  908. nd_label->checksum = __cpu_to_le64(sum);
  909. }
  910. /* update label */
  911. offset = nd_label_offset(ndd, nd_label);
  912. rc = nvdimm_set_config_data(ndd, offset, nd_label,
  913. sizeof_namespace_label(ndd));
  914. if (rc < 0)
  915. goto abort;
  916. }
  917. /* free up now unused slots in the new index */
  918. for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
  919. dev_dbg(ndd->dev, "free: %d\n", slot);
  920. nd_label_free_slot(ndd, slot);
  921. }
  922. /* update index */
  923. rc = nd_label_write_index(ndd, ndd->ns_next,
  924. nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
  925. if (rc)
  926. goto abort;
  927. /*
  928. * Now that the on-dimm labels are up to date, fix up the tracking
  929. * entries in nd_mapping->labels
  930. */
  931. nlabel = 0;
  932. mutex_lock(&nd_mapping->lock);
  933. list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
  934. nd_label = label_ent->label;
  935. if (!nd_label)
  936. continue;
  937. nlabel++;
  938. memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
  939. if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
  940. continue;
  941. nlabel--;
  942. list_move(&label_ent->list, &list);
  943. label_ent->label = NULL;
  944. }
  945. list_splice_tail_init(&list, &nd_mapping->labels);
  946. mutex_unlock(&nd_mapping->lock);
  947. if (nlabel + nsblk->num_resources > num_labels) {
  948. /*
  949. * Bug, we can't end up with more resources than
  950. * available labels
  951. */
  952. WARN_ON_ONCE(1);
  953. rc = -ENXIO;
  954. goto out;
  955. }
  956. mutex_lock(&nd_mapping->lock);
  957. label_ent = list_first_entry_or_null(&nd_mapping->labels,
  958. typeof(*label_ent), list);
  959. if (!label_ent) {
  960. WARN_ON(1);
  961. mutex_unlock(&nd_mapping->lock);
  962. rc = -ENXIO;
  963. goto out;
  964. }
  965. for_each_clear_bit_le(slot, free, nslot) {
  966. nd_label = to_label(ndd, slot);
  967. memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
  968. if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
  969. continue;
  970. res = to_resource(ndd, nd_label);
  971. res->flags &= ~DPA_RESOURCE_ADJUSTED;
  972. dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
  973. list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
  974. if (label_ent->label)
  975. continue;
  976. label_ent->label = nd_label;
  977. nd_label = NULL;
  978. break;
  979. }
  980. if (nd_label)
  981. dev_WARN(&nsblk->common.dev,
  982. "failed to track label slot%d\n", slot);
  983. }
  984. mutex_unlock(&nd_mapping->lock);
  985. out:
  986. kfree(old_res_list);
  987. kfree(victim_map);
  988. return rc;
  989. abort:
  990. /*
  991. * 1/ repair the allocated label bitmap in the index
  992. * 2/ restore the resource list
  993. */
  994. nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
  995. kfree(nsblk->res);
  996. nsblk->res = old_res_list;
  997. nsblk->num_resources = old_num_resources;
  998. old_res_list = NULL;
  999. goto out;
  1000. }
  1001. static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
  1002. {
  1003. int i, old_num_labels = 0;
  1004. struct nd_label_ent *label_ent;
  1005. struct nd_namespace_index *nsindex;
  1006. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  1007. mutex_lock(&nd_mapping->lock);
  1008. list_for_each_entry(label_ent, &nd_mapping->labels, list)
  1009. old_num_labels++;
  1010. mutex_unlock(&nd_mapping->lock);
  1011. /*
  1012. * We need to preserve all the old labels for the mapping so
  1013. * they can be garbage collected after writing the new labels.
  1014. */
  1015. for (i = old_num_labels; i < num_labels; i++) {
  1016. label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
  1017. if (!label_ent)
  1018. return -ENOMEM;
  1019. mutex_lock(&nd_mapping->lock);
  1020. list_add_tail(&label_ent->list, &nd_mapping->labels);
  1021. mutex_unlock(&nd_mapping->lock);
  1022. }
  1023. if (ndd->ns_current == -1 || ndd->ns_next == -1)
  1024. /* pass */;
  1025. else
  1026. return max(num_labels, old_num_labels);
  1027. nsindex = to_namespace_index(ndd, 0);
  1028. memset(nsindex, 0, ndd->nsarea.config_size);
  1029. for (i = 0; i < 2; i++) {
  1030. int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
  1031. if (rc)
  1032. return rc;
  1033. }
  1034. ndd->ns_next = 1;
  1035. ndd->ns_current = 0;
  1036. return max(num_labels, old_num_labels);
  1037. }
  1038. static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
  1039. {
  1040. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  1041. struct nd_label_ent *label_ent, *e;
  1042. struct nd_namespace_index *nsindex;
  1043. u8 label_uuid[NSLABEL_UUID_LEN];
  1044. unsigned long *free;
  1045. LIST_HEAD(list);
  1046. u32 nslot, slot;
  1047. int active = 0;
  1048. if (!uuid)
  1049. return 0;
  1050. /* no index || no labels == nothing to delete */
  1051. if (!preamble_next(ndd, &nsindex, &free, &nslot))
  1052. return 0;
  1053. mutex_lock(&nd_mapping->lock);
  1054. list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
  1055. struct nd_namespace_label *nd_label = label_ent->label;
  1056. if (!nd_label)
  1057. continue;
  1058. active++;
  1059. memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
  1060. if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
  1061. continue;
  1062. active--;
  1063. slot = to_slot(ndd, nd_label);
  1064. nd_label_free_slot(ndd, slot);
  1065. dev_dbg(ndd->dev, "free: %d\n", slot);
  1066. list_move_tail(&label_ent->list, &list);
  1067. label_ent->label = NULL;
  1068. }
  1069. list_splice_tail_init(&list, &nd_mapping->labels);
  1070. if (active == 0) {
  1071. nd_mapping_free_labels(nd_mapping);
  1072. dev_dbg(ndd->dev, "no more active labels\n");
  1073. }
  1074. mutex_unlock(&nd_mapping->lock);
  1075. return nd_label_write_index(ndd, ndd->ns_next,
  1076. nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
  1077. }
  1078. int nd_pmem_namespace_label_update(struct nd_region *nd_region,
  1079. struct nd_namespace_pmem *nspm, resource_size_t size)
  1080. {
  1081. int i;
  1082. for (i = 0; i < nd_region->ndr_mappings; i++) {
  1083. struct nd_mapping *nd_mapping = &nd_region->mapping[i];
  1084. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  1085. struct resource *res;
  1086. int rc, count = 0;
  1087. if (size == 0) {
  1088. rc = del_labels(nd_mapping, nspm->uuid);
  1089. if (rc)
  1090. return rc;
  1091. continue;
  1092. }
  1093. for_each_dpa_resource(ndd, res)
  1094. if (strncmp(res->name, "pmem", 4) == 0)
  1095. count++;
  1096. WARN_ON_ONCE(!count);
  1097. rc = init_labels(nd_mapping, count);
  1098. if (rc < 0)
  1099. return rc;
  1100. rc = __pmem_label_update(nd_region, nd_mapping, nspm, i);
  1101. if (rc)
  1102. return rc;
  1103. }
  1104. return 0;
  1105. }
  1106. int nd_blk_namespace_label_update(struct nd_region *nd_region,
  1107. struct nd_namespace_blk *nsblk, resource_size_t size)
  1108. {
  1109. struct nd_mapping *nd_mapping = &nd_region->mapping[0];
  1110. struct resource *res;
  1111. int count = 0;
  1112. if (size == 0)
  1113. return del_labels(nd_mapping, nsblk->uuid);
  1114. for_each_dpa_resource(to_ndd(nd_mapping), res)
  1115. count++;
  1116. count = init_labels(nd_mapping, count);
  1117. if (count < 0)
  1118. return count;
  1119. return __blk_label_update(nd_region, nd_mapping, nsblk, count);
  1120. }
  1121. int __init nd_label_init(void)
  1122. {
  1123. WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
  1124. WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
  1125. WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
  1126. WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
  1127. return 0;
  1128. }