pblk-init.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962
  1. /*
  2. * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
  3. * Copyright (C) 2016 CNEX Labs
  4. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  5. * Matias Bjorling <matias@cnexlabs.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License version
  9. * 2 as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * Implementation of a physical block-device target for Open-channel SSDs.
  17. *
  18. * pblk-init.c - pblk's initialization.
  19. */
  20. #include "pblk.h"
  21. static struct kmem_cache *pblk_blk_ws_cache, *pblk_rec_cache, *pblk_r_rq_cache,
  22. *pblk_w_rq_cache, *pblk_line_meta_cache;
  23. static DECLARE_RWSEM(pblk_lock);
  24. static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
  25. struct bio *bio)
  26. {
  27. int ret;
  28. /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
  29. * constraint. Writes can be of arbitrary size.
  30. */
  31. if (bio_data_dir(bio) == READ) {
  32. blk_queue_split(q, &bio);
  33. ret = pblk_submit_read(pblk, bio);
  34. if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
  35. bio_put(bio);
  36. return ret;
  37. }
  38. /* Prevent deadlock in the case of a modest LUN configuration and large
  39. * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
  40. * available for user I/O.
  41. */
  42. if (unlikely(pblk_get_secs(bio) >= pblk_rl_sysfs_rate_show(&pblk->rl)))
  43. blk_queue_split(q, &bio);
  44. return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
  45. }
  46. static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
  47. {
  48. struct pblk *pblk = q->queuedata;
  49. if (bio_op(bio) == REQ_OP_DISCARD) {
  50. pblk_discard(pblk, bio);
  51. if (!(bio->bi_opf & REQ_PREFLUSH)) {
  52. bio_endio(bio);
  53. return BLK_QC_T_NONE;
  54. }
  55. }
  56. switch (pblk_rw_io(q, pblk, bio)) {
  57. case NVM_IO_ERR:
  58. bio_io_error(bio);
  59. break;
  60. case NVM_IO_DONE:
  61. bio_endio(bio);
  62. break;
  63. }
  64. return BLK_QC_T_NONE;
  65. }
  66. static void pblk_l2p_free(struct pblk *pblk)
  67. {
  68. vfree(pblk->trans_map);
  69. }
  70. static int pblk_l2p_init(struct pblk *pblk)
  71. {
  72. sector_t i;
  73. struct ppa_addr ppa;
  74. int entry_size = 8;
  75. if (pblk->ppaf_bitsize < 32)
  76. entry_size = 4;
  77. pblk->trans_map = vmalloc(entry_size * pblk->rl.nr_secs);
  78. if (!pblk->trans_map)
  79. return -ENOMEM;
  80. pblk_ppa_set_empty(&ppa);
  81. for (i = 0; i < pblk->rl.nr_secs; i++)
  82. pblk_trans_map_set(pblk, i, ppa);
  83. return 0;
  84. }
  85. static void pblk_rwb_free(struct pblk *pblk)
  86. {
  87. if (pblk_rb_tear_down_check(&pblk->rwb))
  88. pr_err("pblk: write buffer error on tear down\n");
  89. pblk_rb_data_free(&pblk->rwb);
  90. vfree(pblk_rb_entries_ref(&pblk->rwb));
  91. }
  92. static int pblk_rwb_init(struct pblk *pblk)
  93. {
  94. struct nvm_tgt_dev *dev = pblk->dev;
  95. struct nvm_geo *geo = &dev->geo;
  96. struct pblk_rb_entry *entries;
  97. unsigned long nr_entries;
  98. unsigned int power_size, power_seg_sz;
  99. nr_entries = pblk_rb_calculate_size(pblk->pgs_in_buffer);
  100. entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
  101. if (!entries)
  102. return -ENOMEM;
  103. power_size = get_count_order(nr_entries);
  104. power_seg_sz = get_count_order(geo->sec_size);
  105. return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
  106. }
  107. /* Minimum pages needed within a lun */
  108. #define PAGE_POOL_SIZE 16
  109. #define ADDR_POOL_SIZE 64
  110. static int pblk_set_ppaf(struct pblk *pblk)
  111. {
  112. struct nvm_tgt_dev *dev = pblk->dev;
  113. struct nvm_geo *geo = &dev->geo;
  114. struct nvm_addr_format ppaf = geo->ppaf;
  115. int power_len;
  116. /* Re-calculate channel and lun format to adapt to configuration */
  117. power_len = get_count_order(geo->nr_chnls);
  118. if (1 << power_len != geo->nr_chnls) {
  119. pr_err("pblk: supports only power-of-two channel config.\n");
  120. return -EINVAL;
  121. }
  122. ppaf.ch_len = power_len;
  123. power_len = get_count_order(geo->luns_per_chnl);
  124. if (1 << power_len != geo->luns_per_chnl) {
  125. pr_err("pblk: supports only power-of-two LUN config.\n");
  126. return -EINVAL;
  127. }
  128. ppaf.lun_len = power_len;
  129. pblk->ppaf.sec_offset = 0;
  130. pblk->ppaf.pln_offset = ppaf.sect_len;
  131. pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len;
  132. pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len;
  133. pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len;
  134. pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len;
  135. pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1;
  136. pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) <<
  137. pblk->ppaf.pln_offset;
  138. pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) <<
  139. pblk->ppaf.ch_offset;
  140. pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) <<
  141. pblk->ppaf.lun_offset;
  142. pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) <<
  143. pblk->ppaf.pg_offset;
  144. pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) <<
  145. pblk->ppaf.blk_offset;
  146. pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len;
  147. return 0;
  148. }
  149. static int pblk_init_global_caches(struct pblk *pblk)
  150. {
  151. char cache_name[PBLK_CACHE_NAME_LEN];
  152. down_write(&pblk_lock);
  153. pblk_blk_ws_cache = kmem_cache_create("pblk_blk_ws",
  154. sizeof(struct pblk_line_ws), 0, 0, NULL);
  155. if (!pblk_blk_ws_cache) {
  156. up_write(&pblk_lock);
  157. return -ENOMEM;
  158. }
  159. pblk_rec_cache = kmem_cache_create("pblk_rec",
  160. sizeof(struct pblk_rec_ctx), 0, 0, NULL);
  161. if (!pblk_rec_cache) {
  162. kmem_cache_destroy(pblk_blk_ws_cache);
  163. up_write(&pblk_lock);
  164. return -ENOMEM;
  165. }
  166. pblk_r_rq_cache = kmem_cache_create("pblk_r_rq", pblk_r_rq_size,
  167. 0, 0, NULL);
  168. if (!pblk_r_rq_cache) {
  169. kmem_cache_destroy(pblk_blk_ws_cache);
  170. kmem_cache_destroy(pblk_rec_cache);
  171. up_write(&pblk_lock);
  172. return -ENOMEM;
  173. }
  174. pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
  175. 0, 0, NULL);
  176. if (!pblk_w_rq_cache) {
  177. kmem_cache_destroy(pblk_blk_ws_cache);
  178. kmem_cache_destroy(pblk_rec_cache);
  179. kmem_cache_destroy(pblk_r_rq_cache);
  180. up_write(&pblk_lock);
  181. return -ENOMEM;
  182. }
  183. snprintf(cache_name, sizeof(cache_name), "pblk_line_m_%s",
  184. pblk->disk->disk_name);
  185. pblk_line_meta_cache = kmem_cache_create(cache_name,
  186. pblk->lm.sec_bitmap_len, 0, 0, NULL);
  187. if (!pblk_line_meta_cache) {
  188. kmem_cache_destroy(pblk_blk_ws_cache);
  189. kmem_cache_destroy(pblk_rec_cache);
  190. kmem_cache_destroy(pblk_r_rq_cache);
  191. kmem_cache_destroy(pblk_w_rq_cache);
  192. up_write(&pblk_lock);
  193. return -ENOMEM;
  194. }
  195. up_write(&pblk_lock);
  196. return 0;
  197. }
  198. static int pblk_core_init(struct pblk *pblk)
  199. {
  200. struct nvm_tgt_dev *dev = pblk->dev;
  201. struct nvm_geo *geo = &dev->geo;
  202. int max_write_ppas;
  203. int mod;
  204. pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
  205. max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
  206. pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
  207. max_write_ppas : nvm_max_phys_sects(dev);
  208. pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
  209. geo->nr_planes * geo->nr_luns;
  210. if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
  211. pr_err("pblk: cannot support device max_phys_sect\n");
  212. return -EINVAL;
  213. }
  214. div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
  215. if (mod) {
  216. pr_err("pblk: bad configuration of sectors/pages\n");
  217. return -EINVAL;
  218. }
  219. if (pblk_init_global_caches(pblk))
  220. return -ENOMEM;
  221. pblk->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
  222. if (!pblk->page_pool)
  223. return -ENOMEM;
  224. pblk->line_ws_pool = mempool_create_slab_pool(geo->nr_luns,
  225. pblk_blk_ws_cache);
  226. if (!pblk->line_ws_pool)
  227. goto free_page_pool;
  228. pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
  229. if (!pblk->rec_pool)
  230. goto free_blk_ws_pool;
  231. pblk->r_rq_pool = mempool_create_slab_pool(64, pblk_r_rq_cache);
  232. if (!pblk->r_rq_pool)
  233. goto free_rec_pool;
  234. pblk->w_rq_pool = mempool_create_slab_pool(64, pblk_w_rq_cache);
  235. if (!pblk->w_rq_pool)
  236. goto free_r_rq_pool;
  237. pblk->line_meta_pool =
  238. mempool_create_slab_pool(16, pblk_line_meta_cache);
  239. if (!pblk->line_meta_pool)
  240. goto free_w_rq_pool;
  241. pblk->kw_wq = alloc_workqueue("pblk-aux-wq",
  242. WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
  243. if (!pblk->kw_wq)
  244. goto free_line_meta_pool;
  245. if (pblk_set_ppaf(pblk))
  246. goto free_kw_wq;
  247. if (pblk_rwb_init(pblk))
  248. goto free_kw_wq;
  249. INIT_LIST_HEAD(&pblk->compl_list);
  250. return 0;
  251. free_kw_wq:
  252. destroy_workqueue(pblk->kw_wq);
  253. free_line_meta_pool:
  254. mempool_destroy(pblk->line_meta_pool);
  255. free_w_rq_pool:
  256. mempool_destroy(pblk->w_rq_pool);
  257. free_r_rq_pool:
  258. mempool_destroy(pblk->r_rq_pool);
  259. free_rec_pool:
  260. mempool_destroy(pblk->rec_pool);
  261. free_blk_ws_pool:
  262. mempool_destroy(pblk->line_ws_pool);
  263. free_page_pool:
  264. mempool_destroy(pblk->page_pool);
  265. return -ENOMEM;
  266. }
  267. static void pblk_core_free(struct pblk *pblk)
  268. {
  269. if (pblk->kw_wq)
  270. destroy_workqueue(pblk->kw_wq);
  271. mempool_destroy(pblk->page_pool);
  272. mempool_destroy(pblk->line_ws_pool);
  273. mempool_destroy(pblk->rec_pool);
  274. mempool_destroy(pblk->r_rq_pool);
  275. mempool_destroy(pblk->w_rq_pool);
  276. mempool_destroy(pblk->line_meta_pool);
  277. kmem_cache_destroy(pblk_blk_ws_cache);
  278. kmem_cache_destroy(pblk_rec_cache);
  279. kmem_cache_destroy(pblk_r_rq_cache);
  280. kmem_cache_destroy(pblk_w_rq_cache);
  281. kmem_cache_destroy(pblk_line_meta_cache);
  282. }
  283. static void pblk_luns_free(struct pblk *pblk)
  284. {
  285. kfree(pblk->luns);
  286. }
  287. static void pblk_lines_free(struct pblk *pblk)
  288. {
  289. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  290. struct pblk_line *line;
  291. int i;
  292. spin_lock(&l_mg->free_lock);
  293. for (i = 0; i < l_mg->nr_lines; i++) {
  294. line = &pblk->lines[i];
  295. pblk_line_free(pblk, line);
  296. kfree(line->blk_bitmap);
  297. kfree(line->erase_bitmap);
  298. }
  299. spin_unlock(&l_mg->free_lock);
  300. }
  301. static void pblk_line_meta_free(struct pblk *pblk)
  302. {
  303. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  304. int i;
  305. kfree(l_mg->bb_template);
  306. kfree(l_mg->bb_aux);
  307. for (i = 0; i < PBLK_DATA_LINES; i++) {
  308. pblk_mfree(l_mg->sline_meta[i].meta, l_mg->smeta_alloc_type);
  309. pblk_mfree(l_mg->eline_meta[i].meta, l_mg->emeta_alloc_type);
  310. }
  311. kfree(pblk->lines);
  312. }
  313. static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
  314. {
  315. struct nvm_geo *geo = &dev->geo;
  316. struct ppa_addr ppa;
  317. u8 *blks;
  318. int nr_blks, ret;
  319. nr_blks = geo->blks_per_lun * geo->plane_mode;
  320. blks = kmalloc(nr_blks, GFP_KERNEL);
  321. if (!blks)
  322. return -ENOMEM;
  323. ppa.ppa = 0;
  324. ppa.g.ch = rlun->bppa.g.ch;
  325. ppa.g.lun = rlun->bppa.g.lun;
  326. ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
  327. if (ret)
  328. goto out;
  329. nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
  330. if (nr_blks < 0) {
  331. ret = nr_blks;
  332. goto out;
  333. }
  334. rlun->bb_list = blks;
  335. return 0;
  336. out:
  337. kfree(blks);
  338. return ret;
  339. }
  340. static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line)
  341. {
  342. struct pblk_line_meta *lm = &pblk->lm;
  343. struct pblk_lun *rlun;
  344. int bb_cnt = 0;
  345. int i;
  346. line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
  347. if (!line->blk_bitmap)
  348. return -ENOMEM;
  349. line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
  350. if (!line->erase_bitmap) {
  351. kfree(line->blk_bitmap);
  352. return -ENOMEM;
  353. }
  354. for (i = 0; i < lm->blk_per_line; i++) {
  355. rlun = &pblk->luns[i];
  356. if (rlun->bb_list[line->id] == NVM_BLK_T_FREE)
  357. continue;
  358. set_bit(i, line->blk_bitmap);
  359. bb_cnt++;
  360. }
  361. return bb_cnt;
  362. }
  363. static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
  364. {
  365. struct nvm_tgt_dev *dev = pblk->dev;
  366. struct nvm_geo *geo = &dev->geo;
  367. struct pblk_lun *rlun;
  368. int i, ret;
  369. /* TODO: Implement unbalanced LUN support */
  370. if (geo->luns_per_chnl < 0) {
  371. pr_err("pblk: unbalanced LUN config.\n");
  372. return -EINVAL;
  373. }
  374. pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL);
  375. if (!pblk->luns)
  376. return -ENOMEM;
  377. for (i = 0; i < geo->nr_luns; i++) {
  378. /* Stripe across channels */
  379. int ch = i % geo->nr_chnls;
  380. int lun_raw = i / geo->nr_chnls;
  381. int lunid = lun_raw + ch * geo->luns_per_chnl;
  382. rlun = &pblk->luns[i];
  383. rlun->bppa = luns[lunid];
  384. sema_init(&rlun->wr_sem, 1);
  385. ret = pblk_bb_discovery(dev, rlun);
  386. if (ret) {
  387. while (--i >= 0)
  388. kfree(pblk->luns[i].bb_list);
  389. return ret;
  390. }
  391. }
  392. return 0;
  393. }
  394. static int pblk_lines_configure(struct pblk *pblk, int flags)
  395. {
  396. struct pblk_line *line = NULL;
  397. int ret = 0;
  398. if (!(flags & NVM_TARGET_FACTORY)) {
  399. line = pblk_recov_l2p(pblk);
  400. if (IS_ERR(line)) {
  401. pr_err("pblk: could not recover l2p table\n");
  402. ret = -EFAULT;
  403. }
  404. }
  405. if (!line) {
  406. /* Configure next line for user data */
  407. line = pblk_line_get_first_data(pblk);
  408. if (!line) {
  409. pr_err("pblk: line list corrupted\n");
  410. ret = -EFAULT;
  411. }
  412. }
  413. return ret;
  414. }
  415. /* See comment over struct line_emeta definition */
  416. static unsigned int calc_emeta_len(struct pblk *pblk, struct pblk_line_meta *lm)
  417. {
  418. return (sizeof(struct line_emeta) +
  419. ((lm->sec_per_line - lm->emeta_sec) * sizeof(u64)) +
  420. (pblk->l_mg.nr_lines * sizeof(u32)) +
  421. lm->blk_bitmap_len);
  422. }
  423. static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
  424. {
  425. struct nvm_tgt_dev *dev = pblk->dev;
  426. struct nvm_geo *geo = &dev->geo;
  427. sector_t provisioned;
  428. pblk->over_pct = 20;
  429. provisioned = nr_free_blks;
  430. provisioned *= (100 - pblk->over_pct);
  431. sector_div(provisioned, 100);
  432. /* Internally pblk manages all free blocks, but all calculations based
  433. * on user capacity consider only provisioned blocks
  434. */
  435. pblk->rl.total_blocks = nr_free_blks;
  436. pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
  437. pblk->capacity = provisioned * geo->sec_per_blk;
  438. atomic_set(&pblk->rl.free_blocks, nr_free_blks);
  439. }
  440. static int pblk_lines_init(struct pblk *pblk)
  441. {
  442. struct nvm_tgt_dev *dev = pblk->dev;
  443. struct nvm_geo *geo = &dev->geo;
  444. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  445. struct pblk_line_meta *lm = &pblk->lm;
  446. struct pblk_line *line;
  447. unsigned int smeta_len, emeta_len;
  448. long nr_bad_blks, nr_meta_blks, nr_free_blks;
  449. int bb_distance;
  450. int i;
  451. int ret;
  452. lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
  453. lm->blk_per_line = geo->nr_luns;
  454. lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
  455. lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
  456. lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
  457. lm->high_thrs = lm->sec_per_line / 2;
  458. lm->mid_thrs = lm->sec_per_line / 4;
  459. /* Calculate necessary pages for smeta. See comment over struct
  460. * line_smeta definition
  461. */
  462. lm->smeta_len = sizeof(struct line_smeta) +
  463. PBLK_LINE_NR_LUN_BITMAP * lm->lun_bitmap_len;
  464. i = 1;
  465. add_smeta_page:
  466. lm->smeta_sec = i * geo->sec_per_pl;
  467. lm->smeta_len = lm->smeta_sec * geo->sec_size;
  468. smeta_len = sizeof(struct line_smeta) +
  469. PBLK_LINE_NR_LUN_BITMAP * lm->lun_bitmap_len;
  470. if (smeta_len > lm->smeta_len) {
  471. i++;
  472. goto add_smeta_page;
  473. }
  474. /* Calculate necessary pages for emeta. See comment over struct
  475. * line_emeta definition
  476. */
  477. i = 1;
  478. add_emeta_page:
  479. lm->emeta_sec = i * geo->sec_per_pl;
  480. lm->emeta_len = lm->emeta_sec * geo->sec_size;
  481. emeta_len = calc_emeta_len(pblk, lm);
  482. if (emeta_len > lm->emeta_len) {
  483. i++;
  484. goto add_emeta_page;
  485. }
  486. lm->emeta_bb = geo->nr_luns - i;
  487. nr_meta_blks = (lm->smeta_sec + lm->emeta_sec +
  488. (geo->sec_per_blk / 2)) / geo->sec_per_blk;
  489. lm->min_blk_line = nr_meta_blks + 1;
  490. l_mg->nr_lines = geo->blks_per_lun;
  491. l_mg->log_line = l_mg->data_line = NULL;
  492. l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
  493. l_mg->nr_free_lines = 0;
  494. bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
  495. /* smeta is always small enough to fit on a kmalloc memory allocation,
  496. * emeta depends on the number of LUNs allocated to the pblk instance
  497. */
  498. l_mg->smeta_alloc_type = PBLK_KMALLOC_META;
  499. for (i = 0; i < PBLK_DATA_LINES; i++) {
  500. l_mg->sline_meta[i].meta = kmalloc(lm->smeta_len, GFP_KERNEL);
  501. if (!l_mg->sline_meta[i].meta)
  502. while (--i >= 0) {
  503. kfree(l_mg->sline_meta[i].meta);
  504. ret = -ENOMEM;
  505. goto fail;
  506. }
  507. }
  508. if (lm->emeta_len > KMALLOC_MAX_CACHE_SIZE) {
  509. l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
  510. for (i = 0; i < PBLK_DATA_LINES; i++) {
  511. l_mg->eline_meta[i].meta = vmalloc(lm->emeta_len);
  512. if (!l_mg->eline_meta[i].meta)
  513. while (--i >= 0) {
  514. vfree(l_mg->eline_meta[i].meta);
  515. ret = -ENOMEM;
  516. goto fail;
  517. }
  518. }
  519. } else {
  520. l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
  521. for (i = 0; i < PBLK_DATA_LINES; i++) {
  522. l_mg->eline_meta[i].meta =
  523. kmalloc(lm->emeta_len, GFP_KERNEL);
  524. if (!l_mg->eline_meta[i].meta)
  525. while (--i >= 0) {
  526. kfree(l_mg->eline_meta[i].meta);
  527. ret = -ENOMEM;
  528. goto fail;
  529. }
  530. }
  531. }
  532. l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
  533. if (!l_mg->bb_template) {
  534. ret = -ENOMEM;
  535. goto fail_free_meta;
  536. }
  537. l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
  538. if (!l_mg->bb_aux) {
  539. ret = -ENOMEM;
  540. goto fail_free_bb_template;
  541. }
  542. bb_distance = (geo->nr_luns) * geo->sec_per_pl;
  543. for (i = 0; i < lm->sec_per_line; i += bb_distance)
  544. bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
  545. INIT_LIST_HEAD(&l_mg->free_list);
  546. INIT_LIST_HEAD(&l_mg->corrupt_list);
  547. INIT_LIST_HEAD(&l_mg->bad_list);
  548. INIT_LIST_HEAD(&l_mg->gc_full_list);
  549. INIT_LIST_HEAD(&l_mg->gc_high_list);
  550. INIT_LIST_HEAD(&l_mg->gc_mid_list);
  551. INIT_LIST_HEAD(&l_mg->gc_low_list);
  552. INIT_LIST_HEAD(&l_mg->gc_empty_list);
  553. l_mg->gc_lists[0] = &l_mg->gc_high_list;
  554. l_mg->gc_lists[1] = &l_mg->gc_mid_list;
  555. l_mg->gc_lists[2] = &l_mg->gc_low_list;
  556. spin_lock_init(&l_mg->free_lock);
  557. spin_lock_init(&l_mg->gc_lock);
  558. pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
  559. GFP_KERNEL);
  560. if (!pblk->lines) {
  561. ret = -ENOMEM;
  562. goto fail_free_bb_aux;
  563. }
  564. nr_free_blks = 0;
  565. for (i = 0; i < l_mg->nr_lines; i++) {
  566. int blk_in_line;
  567. line = &pblk->lines[i];
  568. line->pblk = pblk;
  569. line->id = i;
  570. line->type = PBLK_LINETYPE_FREE;
  571. line->state = PBLK_LINESTATE_FREE;
  572. line->gc_group = PBLK_LINEGC_NONE;
  573. spin_lock_init(&line->lock);
  574. nr_bad_blks = pblk_bb_line(pblk, line);
  575. if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line) {
  576. ret = -EINVAL;
  577. goto fail_free_lines;
  578. }
  579. blk_in_line = lm->blk_per_line - nr_bad_blks;
  580. if (blk_in_line < lm->min_blk_line) {
  581. line->state = PBLK_LINESTATE_BAD;
  582. list_add_tail(&line->list, &l_mg->bad_list);
  583. continue;
  584. }
  585. nr_free_blks += blk_in_line;
  586. atomic_set(&line->blk_in_line, blk_in_line);
  587. l_mg->nr_free_lines++;
  588. list_add_tail(&line->list, &l_mg->free_list);
  589. }
  590. pblk_set_provision(pblk, nr_free_blks);
  591. sema_init(&pblk->erase_sem, 1);
  592. /* Cleanup per-LUN bad block lists - managed within lines on run-time */
  593. for (i = 0; i < geo->nr_luns; i++)
  594. kfree(pblk->luns[i].bb_list);
  595. return 0;
  596. fail_free_lines:
  597. kfree(pblk->lines);
  598. fail_free_bb_aux:
  599. kfree(l_mg->bb_aux);
  600. fail_free_bb_template:
  601. kfree(l_mg->bb_template);
  602. fail_free_meta:
  603. for (i = 0; i < PBLK_DATA_LINES; i++) {
  604. pblk_mfree(l_mg->sline_meta[i].meta, l_mg->smeta_alloc_type);
  605. pblk_mfree(l_mg->eline_meta[i].meta, l_mg->emeta_alloc_type);
  606. }
  607. fail:
  608. for (i = 0; i < geo->nr_luns; i++)
  609. kfree(pblk->luns[i].bb_list);
  610. return ret;
  611. }
  612. static int pblk_writer_init(struct pblk *pblk)
  613. {
  614. setup_timer(&pblk->wtimer, pblk_write_timer_fn, (unsigned long)pblk);
  615. mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
  616. pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
  617. if (IS_ERR(pblk->writer_ts)) {
  618. pr_err("pblk: could not allocate writer kthread\n");
  619. return PTR_ERR(pblk->writer_ts);
  620. }
  621. return 0;
  622. }
  623. static void pblk_writer_stop(struct pblk *pblk)
  624. {
  625. if (pblk->writer_ts)
  626. kthread_stop(pblk->writer_ts);
  627. del_timer(&pblk->wtimer);
  628. }
  629. static void pblk_free(struct pblk *pblk)
  630. {
  631. pblk_luns_free(pblk);
  632. pblk_lines_free(pblk);
  633. pblk_line_meta_free(pblk);
  634. pblk_core_free(pblk);
  635. pblk_l2p_free(pblk);
  636. kfree(pblk);
  637. }
  638. static void pblk_tear_down(struct pblk *pblk)
  639. {
  640. pblk_flush_writer(pblk);
  641. pblk_writer_stop(pblk);
  642. pblk_rb_sync_l2p(&pblk->rwb);
  643. pblk_recov_pad(pblk);
  644. pblk_rwb_free(pblk);
  645. pblk_rl_free(&pblk->rl);
  646. pr_debug("pblk: consistent tear down\n");
  647. }
  648. static void pblk_exit(void *private)
  649. {
  650. struct pblk *pblk = private;
  651. down_write(&pblk_lock);
  652. pblk_gc_exit(pblk);
  653. pblk_tear_down(pblk);
  654. pblk_free(pblk);
  655. up_write(&pblk_lock);
  656. }
  657. static sector_t pblk_capacity(void *private)
  658. {
  659. struct pblk *pblk = private;
  660. return pblk->capacity * NR_PHY_IN_LOG;
  661. }
  662. static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
  663. int flags)
  664. {
  665. struct nvm_geo *geo = &dev->geo;
  666. struct request_queue *bqueue = dev->q;
  667. struct request_queue *tqueue = tdisk->queue;
  668. struct pblk *pblk;
  669. int ret;
  670. if (dev->identity.dom & NVM_RSP_L2P) {
  671. pr_err("pblk: device-side L2P table not supported. (%x)\n",
  672. dev->identity.dom);
  673. return ERR_PTR(-EINVAL);
  674. }
  675. pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
  676. if (!pblk)
  677. return ERR_PTR(-ENOMEM);
  678. pblk->dev = dev;
  679. pblk->disk = tdisk;
  680. spin_lock_init(&pblk->trans_lock);
  681. spin_lock_init(&pblk->lock);
  682. if (flags & NVM_TARGET_FACTORY)
  683. pblk_setup_uuid(pblk);
  684. #ifdef CONFIG_NVM_DEBUG
  685. atomic_long_set(&pblk->inflight_writes, 0);
  686. atomic_long_set(&pblk->padded_writes, 0);
  687. atomic_long_set(&pblk->padded_wb, 0);
  688. atomic_long_set(&pblk->nr_flush, 0);
  689. atomic_long_set(&pblk->req_writes, 0);
  690. atomic_long_set(&pblk->sub_writes, 0);
  691. atomic_long_set(&pblk->sync_writes, 0);
  692. atomic_long_set(&pblk->compl_writes, 0);
  693. atomic_long_set(&pblk->inflight_reads, 0);
  694. atomic_long_set(&pblk->sync_reads, 0);
  695. atomic_long_set(&pblk->recov_writes, 0);
  696. atomic_long_set(&pblk->recov_writes, 0);
  697. atomic_long_set(&pblk->recov_gc_writes, 0);
  698. #endif
  699. atomic_long_set(&pblk->read_failed, 0);
  700. atomic_long_set(&pblk->read_empty, 0);
  701. atomic_long_set(&pblk->read_high_ecc, 0);
  702. atomic_long_set(&pblk->read_failed_gc, 0);
  703. atomic_long_set(&pblk->write_failed, 0);
  704. atomic_long_set(&pblk->erase_failed, 0);
  705. ret = pblk_luns_init(pblk, dev->luns);
  706. if (ret) {
  707. pr_err("pblk: could not initialize luns\n");
  708. goto fail;
  709. }
  710. ret = pblk_lines_init(pblk);
  711. if (ret) {
  712. pr_err("pblk: could not initialize lines\n");
  713. goto fail_free_luns;
  714. }
  715. ret = pblk_core_init(pblk);
  716. if (ret) {
  717. pr_err("pblk: could not initialize core\n");
  718. goto fail_free_line_meta;
  719. }
  720. ret = pblk_l2p_init(pblk);
  721. if (ret) {
  722. pr_err("pblk: could not initialize maps\n");
  723. goto fail_free_core;
  724. }
  725. ret = pblk_lines_configure(pblk, flags);
  726. if (ret) {
  727. pr_err("pblk: could not configure lines\n");
  728. goto fail_free_l2p;
  729. }
  730. ret = pblk_writer_init(pblk);
  731. if (ret) {
  732. pr_err("pblk: could not initialize write thread\n");
  733. goto fail_free_lines;
  734. }
  735. ret = pblk_gc_init(pblk);
  736. if (ret) {
  737. pr_err("pblk: could not initialize gc\n");
  738. goto fail_stop_writer;
  739. }
  740. /* inherit the size from the underlying device */
  741. blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
  742. blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
  743. blk_queue_write_cache(tqueue, true, false);
  744. tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
  745. tqueue->limits.discard_alignment = 0;
  746. blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
  747. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
  748. pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
  749. geo->nr_luns, pblk->l_mg.nr_lines,
  750. (unsigned long long)pblk->rl.nr_secs,
  751. pblk->rwb.nr_entries);
  752. wake_up_process(pblk->writer_ts);
  753. return pblk;
  754. fail_stop_writer:
  755. pblk_writer_stop(pblk);
  756. fail_free_lines:
  757. pblk_lines_free(pblk);
  758. fail_free_l2p:
  759. pblk_l2p_free(pblk);
  760. fail_free_core:
  761. pblk_core_free(pblk);
  762. fail_free_line_meta:
  763. pblk_line_meta_free(pblk);
  764. fail_free_luns:
  765. pblk_luns_free(pblk);
  766. fail:
  767. kfree(pblk);
  768. return ERR_PTR(ret);
  769. }
  770. /* physical block device target */
  771. static struct nvm_tgt_type tt_pblk = {
  772. .name = "pblk",
  773. .version = {1, 0, 0},
  774. .make_rq = pblk_make_rq,
  775. .capacity = pblk_capacity,
  776. .init = pblk_init,
  777. .exit = pblk_exit,
  778. .sysfs_init = pblk_sysfs_init,
  779. .sysfs_exit = pblk_sysfs_exit,
  780. };
  781. static int __init pblk_module_init(void)
  782. {
  783. return nvm_register_tgt_type(&tt_pblk);
  784. }
  785. static void pblk_module_exit(void)
  786. {
  787. nvm_unregister_tgt_type(&tt_pblk);
  788. }
  789. module_init(pblk_module_init);
  790. module_exit(pblk_module_exit);
  791. MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
  792. MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
  793. MODULE_LICENSE("GPL v2");
  794. MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");