pblk.h 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269
  1. /*
  2. * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
  3. * Copyright (C) 2016 CNEX Labs
  4. * Initial release: Matias Bjorling <matias@cnexlabs.com>
  5. * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License version
  9. * 2 as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * Implementation of a Physical Block-device target for Open-channel SSDs.
  17. *
  18. */
  19. #ifndef PBLK_H_
  20. #define PBLK_H_
  21. #include <linux/blkdev.h>
  22. #include <linux/blk-mq.h>
  23. #include <linux/bio.h>
  24. #include <linux/module.h>
  25. #include <linux/kthread.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/crc32.h>
  28. #include <linux/uuid.h>
  29. #include <linux/lightnvm.h>
  30. /* Run only GC if less than 1/X blocks are free */
  31. #define GC_LIMIT_INVERSE 5
  32. #define GC_TIME_MSECS 1000
  33. #define PBLK_SECTOR (512)
  34. #define PBLK_EXPOSED_PAGE_SIZE (4096)
  35. #define PBLK_MAX_REQ_ADDRS (64)
  36. #define PBLK_MAX_REQ_ADDRS_PW (6)
  37. #define PBLK_WS_POOL_SIZE (128)
  38. #define PBLK_META_POOL_SIZE (128)
  39. #define PBLK_READ_REQ_POOL_SIZE (1024)
  40. #define PBLK_NR_CLOSE_JOBS (4)
  41. #define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
  42. #define PBLK_COMMAND_TIMEOUT_MS 30000
  43. /* Max 512 LUNs per device */
  44. #define PBLK_MAX_LUNS_BITMAP (4)
  45. #define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
  46. #define pblk_for_each_lun(pblk, rlun, i) \
  47. for ((i) = 0, rlun = &(pblk)->luns[0]; \
  48. (i) < (pblk)->nr_luns; (i)++, rlun = &(pblk)->luns[(i)])
  49. #define ERASE 2 /* READ = 0, WRITE = 1 */
  50. enum {
  51. /* IO Types */
  52. PBLK_IOTYPE_USER = 1 << 0,
  53. PBLK_IOTYPE_GC = 1 << 1,
  54. /* Write buffer flags */
  55. PBLK_FLUSH_ENTRY = 1 << 2,
  56. PBLK_WRITTEN_DATA = 1 << 3,
  57. PBLK_SUBMITTED_ENTRY = 1 << 4,
  58. PBLK_WRITABLE_ENTRY = 1 << 5,
  59. };
  60. enum {
  61. PBLK_BLK_ST_OPEN = 0x1,
  62. PBLK_BLK_ST_CLOSED = 0x2,
  63. };
  64. struct pblk_sec_meta {
  65. u64 reserved;
  66. __le64 lba;
  67. };
  68. /* The number of GC lists and the rate-limiter states go together. This way the
  69. * rate-limiter can dictate how much GC is needed based on resource utilization.
  70. */
  71. #define PBLK_GC_NR_LISTS 3
  72. enum {
  73. PBLK_RL_HIGH = 1,
  74. PBLK_RL_MID = 2,
  75. PBLK_RL_LOW = 3,
  76. };
  77. #define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
  78. /* write buffer completion context */
  79. struct pblk_c_ctx {
  80. struct list_head list; /* Head for out-of-order completion */
  81. unsigned long *lun_bitmap; /* Luns used on current request */
  82. unsigned int sentry;
  83. unsigned int nr_valid;
  84. unsigned int nr_padded;
  85. };
  86. /* generic context */
  87. struct pblk_g_ctx {
  88. void *private;
  89. };
  90. /* Pad context */
  91. struct pblk_pad_rq {
  92. struct pblk *pblk;
  93. struct completion wait;
  94. struct kref ref;
  95. };
  96. /* Recovery context */
  97. struct pblk_rec_ctx {
  98. struct pblk *pblk;
  99. struct nvm_rq *rqd;
  100. struct list_head failed;
  101. struct work_struct ws_rec;
  102. };
  103. /* Write context */
  104. struct pblk_w_ctx {
  105. struct bio_list bios; /* Original bios - used for completion
  106. * in REQ_FUA, REQ_FLUSH case
  107. */
  108. u64 lba; /* Logic addr. associated with entry */
  109. struct ppa_addr ppa; /* Physic addr. associated with entry */
  110. int flags; /* Write context flags */
  111. };
  112. struct pblk_rb_entry {
  113. struct ppa_addr cacheline; /* Cacheline for this entry */
  114. void *data; /* Pointer to data on this entry */
  115. struct pblk_w_ctx w_ctx; /* Context for this entry */
  116. struct list_head index; /* List head to enable indexes */
  117. };
  118. #define EMPTY_ENTRY (~0U)
  119. struct pblk_rb_pages {
  120. struct page *pages;
  121. int order;
  122. struct list_head list;
  123. };
  124. struct pblk_rb {
  125. struct pblk_rb_entry *entries; /* Ring buffer entries */
  126. unsigned int mem; /* Write offset - points to next
  127. * writable entry in memory
  128. */
  129. unsigned int subm; /* Read offset - points to last entry
  130. * that has been submitted to the media
  131. * to be persisted
  132. */
  133. unsigned int sync; /* Synced - backpointer that signals
  134. * the last submitted entry that has
  135. * been successfully persisted to media
  136. */
  137. unsigned int sync_point; /* Sync point - last entry that must be
  138. * flushed to the media. Used with
  139. * REQ_FLUSH and REQ_FUA
  140. */
  141. unsigned int l2p_update; /* l2p update point - next entry for
  142. * which l2p mapping will be updated to
  143. * contain a device ppa address (instead
  144. * of a cacheline
  145. */
  146. unsigned int nr_entries; /* Number of entries in write buffer -
  147. * must be a power of two
  148. */
  149. unsigned int seg_size; /* Size of the data segments being
  150. * stored on each entry. Typically this
  151. * will be 4KB
  152. */
  153. struct list_head pages; /* List of data pages */
  154. spinlock_t w_lock; /* Write lock */
  155. spinlock_t s_lock; /* Sync lock */
  156. #ifdef CONFIG_NVM_DEBUG
  157. atomic_t inflight_sync_point; /* Not served REQ_FLUSH | REQ_FUA */
  158. #endif
  159. };
  160. #define PBLK_RECOVERY_SECTORS 16
  161. struct pblk_lun {
  162. struct ppa_addr bppa;
  163. u8 *bb_list; /* Bad block list for LUN. Only used on
  164. * bring up. Bad blocks are managed
  165. * within lines on run-time.
  166. */
  167. struct semaphore wr_sem;
  168. };
  169. struct pblk_gc_rq {
  170. struct pblk_line *line;
  171. void *data;
  172. u64 lba_list[PBLK_MAX_REQ_ADDRS];
  173. int nr_secs;
  174. int secs_to_gc;
  175. struct list_head list;
  176. };
  177. struct pblk_gc {
  178. /* These states are not protected by a lock since (i) they are in the
  179. * fast path, and (ii) they are not critical.
  180. */
  181. int gc_active;
  182. int gc_enabled;
  183. int gc_forced;
  184. struct task_struct *gc_ts;
  185. struct task_struct *gc_writer_ts;
  186. struct task_struct *gc_reader_ts;
  187. struct workqueue_struct *gc_line_reader_wq;
  188. struct workqueue_struct *gc_reader_wq;
  189. struct timer_list gc_timer;
  190. struct semaphore gc_sem;
  191. atomic_t inflight_gc;
  192. int w_entries;
  193. struct list_head w_list;
  194. struct list_head r_list;
  195. spinlock_t lock;
  196. spinlock_t w_lock;
  197. spinlock_t r_lock;
  198. };
  199. struct pblk_rl {
  200. unsigned int high; /* Upper threshold for rate limiter (free run -
  201. * user I/O rate limiter
  202. */
  203. unsigned int low; /* Lower threshold for rate limiter (user I/O
  204. * rate limiter - stall)
  205. */
  206. unsigned int high_pw; /* High rounded up as a power of 2 */
  207. #define PBLK_USER_HIGH_THRS 8 /* Begin write limit at 12% available blks */
  208. #define PBLK_USER_LOW_THRS 10 /* Aggressive GC at 10% available blocks */
  209. int rb_windows_pw; /* Number of rate windows in the write buffer
  210. * given as a power-of-2. This guarantees that
  211. * when user I/O is being rate limited, there
  212. * will be reserved enough space for the GC to
  213. * place its payload. A window is of
  214. * pblk->max_write_pgs size, which in NVMe is
  215. * 64, i.e., 256kb.
  216. */
  217. int rb_budget; /* Total number of entries available for I/O */
  218. int rb_user_max; /* Max buffer entries available for user I/O */
  219. int rb_gc_max; /* Max buffer entries available for GC I/O */
  220. int rb_gc_rsv; /* Reserved buffer entries for GC I/O */
  221. int rb_state; /* Rate-limiter current state */
  222. atomic_t rb_user_cnt; /* User I/O buffer counter */
  223. atomic_t rb_gc_cnt; /* GC I/O buffer counter */
  224. atomic_t rb_space; /* Space limit in case of reaching capacity */
  225. int rsv_blocks; /* Reserved blocks for GC */
  226. int rb_user_active;
  227. int rb_gc_active;
  228. struct timer_list u_timer;
  229. unsigned long long nr_secs;
  230. unsigned long total_blocks;
  231. atomic_t free_blocks;
  232. };
  233. #define PBLK_LINE_EMPTY (~0U)
  234. enum {
  235. /* Line Types */
  236. PBLK_LINETYPE_FREE = 0,
  237. PBLK_LINETYPE_LOG = 1,
  238. PBLK_LINETYPE_DATA = 2,
  239. /* Line state */
  240. PBLK_LINESTATE_FREE = 10,
  241. PBLK_LINESTATE_OPEN = 11,
  242. PBLK_LINESTATE_CLOSED = 12,
  243. PBLK_LINESTATE_GC = 13,
  244. PBLK_LINESTATE_BAD = 14,
  245. PBLK_LINESTATE_CORRUPT = 15,
  246. /* GC group */
  247. PBLK_LINEGC_NONE = 20,
  248. PBLK_LINEGC_EMPTY = 21,
  249. PBLK_LINEGC_LOW = 22,
  250. PBLK_LINEGC_MID = 23,
  251. PBLK_LINEGC_HIGH = 24,
  252. PBLK_LINEGC_FULL = 25,
  253. };
  254. #define PBLK_MAGIC 0x70626c6b /*pblk*/
  255. struct line_header {
  256. __le32 crc;
  257. __le32 identifier; /* pblk identifier */
  258. __u8 uuid[16]; /* instance uuid */
  259. __le16 type; /* line type */
  260. __le16 version; /* type version */
  261. __le32 id; /* line id for current line */
  262. };
  263. struct line_smeta {
  264. struct line_header header;
  265. __le32 crc; /* Full structure including struct crc */
  266. /* Previous line metadata */
  267. __le32 prev_id; /* Line id for previous line */
  268. /* Current line metadata */
  269. __le64 seq_nr; /* Sequence number for current line */
  270. /* Active writers */
  271. __le32 window_wr_lun; /* Number of parallel LUNs to write */
  272. __le32 rsvd[2];
  273. __le64 lun_bitmap[];
  274. };
  275. /*
  276. * Metadata layout in media:
  277. * First sector:
  278. * 1. struct line_emeta
  279. * 2. bad block bitmap (u64 * window_wr_lun)
  280. * Mid sectors (start at lbas_sector):
  281. * 3. nr_lbas (u64) forming lba list
  282. * Last sectors (start at vsc_sector):
  283. * 4. u32 valid sector count (vsc) for all lines (~0U: free line)
  284. */
  285. struct line_emeta {
  286. struct line_header header;
  287. __le32 crc; /* Full structure including struct crc */
  288. /* Previous line metadata */
  289. __le32 prev_id; /* Line id for prev line */
  290. /* Current line metadata */
  291. __le64 seq_nr; /* Sequence number for current line */
  292. /* Active writers */
  293. __le32 window_wr_lun; /* Number of parallel LUNs to write */
  294. /* Bookkeeping for recovery */
  295. __le32 next_id; /* Line id for next line */
  296. __le64 nr_lbas; /* Number of lbas mapped in line */
  297. __le64 nr_valid_lbas; /* Number of valid lbas mapped in line */
  298. __le64 bb_bitmap[]; /* Updated bad block bitmap for line */
  299. };
  300. struct pblk_emeta {
  301. struct line_emeta *buf; /* emeta buffer in media format */
  302. int mem; /* Write offset - points to next
  303. * writable entry in memory
  304. */
  305. atomic_t sync; /* Synced - backpointer that signals the
  306. * last entry that has been successfully
  307. * persisted to media
  308. */
  309. unsigned int nr_entries; /* Number of emeta entries */
  310. };
  311. struct pblk_smeta {
  312. struct line_smeta *buf; /* smeta buffer in persistent format */
  313. };
  314. struct pblk_line {
  315. struct pblk *pblk;
  316. unsigned int id; /* Line number corresponds to the
  317. * block line
  318. */
  319. unsigned int seq_nr; /* Unique line sequence number */
  320. int state; /* PBLK_LINESTATE_X */
  321. int type; /* PBLK_LINETYPE_X */
  322. int gc_group; /* PBLK_LINEGC_X */
  323. struct list_head list; /* Free, GC lists */
  324. unsigned long *lun_bitmap; /* Bitmap for LUNs mapped in line */
  325. struct pblk_smeta *smeta; /* Start metadata */
  326. struct pblk_emeta *emeta; /* End medatada */
  327. int meta_line; /* Metadata line id */
  328. int meta_distance; /* Distance between data and metadata */
  329. u64 smeta_ssec; /* Sector where smeta starts */
  330. u64 emeta_ssec; /* Sector where emeta starts */
  331. unsigned int sec_in_line; /* Number of usable secs in line */
  332. atomic_t blk_in_line; /* Number of good blocks in line */
  333. unsigned long *blk_bitmap; /* Bitmap for valid/invalid blocks */
  334. unsigned long *erase_bitmap; /* Bitmap for erased blocks */
  335. unsigned long *map_bitmap; /* Bitmap for mapped sectors in line */
  336. unsigned long *invalid_bitmap; /* Bitmap for invalid sectors in line */
  337. atomic_t left_eblks; /* Blocks left for erasing */
  338. atomic_t left_seblks; /* Blocks left for sync erasing */
  339. int left_msecs; /* Sectors left for mapping */
  340. unsigned int cur_sec; /* Sector map pointer */
  341. unsigned int nr_valid_lbas; /* Number of valid lbas in line */
  342. __le32 *vsc; /* Valid sector count in line */
  343. struct kref ref; /* Write buffer L2P references */
  344. spinlock_t lock; /* Necessary for invalid_bitmap only */
  345. };
  346. #define PBLK_DATA_LINES 4
  347. enum {
  348. PBLK_KMALLOC_META = 1,
  349. PBLK_VMALLOC_META = 2,
  350. };
  351. enum {
  352. PBLK_EMETA_TYPE_HEADER = 1, /* struct line_emeta first sector */
  353. PBLK_EMETA_TYPE_LLBA = 2, /* lba list - type: __le64 */
  354. PBLK_EMETA_TYPE_VSC = 3, /* vsc list - type: __le32 */
  355. };
  356. struct pblk_line_mgmt {
  357. int nr_lines; /* Total number of full lines */
  358. int nr_free_lines; /* Number of full lines in free list */
  359. /* Free lists - use free_lock */
  360. struct list_head free_list; /* Full lines ready to use */
  361. struct list_head corrupt_list; /* Full lines corrupted */
  362. struct list_head bad_list; /* Full lines bad */
  363. /* GC lists - use gc_lock */
  364. struct list_head *gc_lists[PBLK_GC_NR_LISTS];
  365. struct list_head gc_high_list; /* Full lines ready to GC, high isc */
  366. struct list_head gc_mid_list; /* Full lines ready to GC, mid isc */
  367. struct list_head gc_low_list; /* Full lines ready to GC, low isc */
  368. struct list_head gc_full_list; /* Full lines ready to GC, no valid */
  369. struct list_head gc_empty_list; /* Full lines close, all valid */
  370. struct pblk_line *log_line; /* Current FTL log line */
  371. struct pblk_line *data_line; /* Current data line */
  372. struct pblk_line *log_next; /* Next FTL log line */
  373. struct pblk_line *data_next; /* Next data line */
  374. struct list_head emeta_list; /* Lines queued to schedule emeta */
  375. __le32 *vsc_list; /* Valid sector counts for all lines */
  376. /* Metadata allocation type: VMALLOC | KMALLOC */
  377. int emeta_alloc_type;
  378. /* Pre-allocated metadata for data lines */
  379. struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
  380. struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
  381. unsigned long meta_bitmap;
  382. /* Helpers for fast bitmap calculations */
  383. unsigned long *bb_template;
  384. unsigned long *bb_aux;
  385. unsigned long d_seq_nr; /* Data line unique sequence number */
  386. unsigned long l_seq_nr; /* Log line unique sequence number */
  387. spinlock_t free_lock;
  388. spinlock_t close_lock;
  389. spinlock_t gc_lock;
  390. };
  391. struct pblk_line_meta {
  392. unsigned int smeta_len; /* Total length for smeta */
  393. unsigned int smeta_sec; /* Sectors needed for smeta */
  394. unsigned int emeta_len[4]; /* Lengths for emeta:
  395. * [0]: Total length
  396. * [1]: struct line_emeta length
  397. * [2]: L2P portion length
  398. * [3]: vsc list length
  399. */
  400. unsigned int emeta_sec[4]; /* Sectors needed for emeta. Same layout
  401. * as emeta_len
  402. */
  403. unsigned int emeta_bb; /* Boundary for bb that affects emeta */
  404. unsigned int vsc_list_len; /* Length for vsc list */
  405. unsigned int sec_bitmap_len; /* Length for sector bitmap in line */
  406. unsigned int blk_bitmap_len; /* Length for block bitmap in line */
  407. unsigned int lun_bitmap_len; /* Length for lun bitmap in line */
  408. unsigned int blk_per_line; /* Number of blocks in a full line */
  409. unsigned int sec_per_line; /* Number of sectors in a line */
  410. unsigned int dsec_per_line; /* Number of data sectors in a line */
  411. unsigned int min_blk_line; /* Min. number of good blocks in line */
  412. unsigned int mid_thrs; /* Threshold for GC mid list */
  413. unsigned int high_thrs; /* Threshold for GC high list */
  414. unsigned int meta_distance; /* Distance between data and metadata */
  415. };
  416. struct pblk_addr_format {
  417. u64 ch_mask;
  418. u64 lun_mask;
  419. u64 pln_mask;
  420. u64 blk_mask;
  421. u64 pg_mask;
  422. u64 sec_mask;
  423. u8 ch_offset;
  424. u8 lun_offset;
  425. u8 pln_offset;
  426. u8 blk_offset;
  427. u8 pg_offset;
  428. u8 sec_offset;
  429. };
  430. enum {
  431. PBLK_STATE_RUNNING = 0,
  432. PBLK_STATE_STOPPING = 1,
  433. PBLK_STATE_RECOVERING = 2,
  434. PBLK_STATE_STOPPED = 3,
  435. };
  436. struct pblk {
  437. struct nvm_tgt_dev *dev;
  438. struct gendisk *disk;
  439. struct kobject kobj;
  440. struct pblk_lun *luns;
  441. struct pblk_line *lines; /* Line array */
  442. struct pblk_line_mgmt l_mg; /* Line management */
  443. struct pblk_line_meta lm; /* Line metadata */
  444. int ppaf_bitsize;
  445. struct pblk_addr_format ppaf;
  446. struct pblk_rb rwb;
  447. int state; /* pblk line state */
  448. int min_write_pgs; /* Minimum amount of pages required by controller */
  449. int max_write_pgs; /* Maximum amount of pages supported by controller */
  450. int pgs_in_buffer; /* Number of pages that need to be held in buffer to
  451. * guarantee successful reads.
  452. */
  453. sector_t capacity; /* Device capacity when bad blocks are subtracted */
  454. int over_pct; /* Percentage of device used for over-provisioning */
  455. /* pblk provisioning values. Used by rate limiter */
  456. struct pblk_rl rl;
  457. int sec_per_write;
  458. unsigned char instance_uuid[16];
  459. #ifdef CONFIG_NVM_DEBUG
  460. /* All debug counters apply to 4kb sector I/Os */
  461. atomic_long_t inflight_writes; /* Inflight writes (user and gc) */
  462. atomic_long_t padded_writes; /* Sectors padded due to flush/fua */
  463. atomic_long_t padded_wb; /* Sectors padded in write buffer */
  464. atomic_long_t nr_flush; /* Number of flush/fua I/O */
  465. atomic_long_t req_writes; /* Sectors stored on write buffer */
  466. atomic_long_t sub_writes; /* Sectors submitted from buffer */
  467. atomic_long_t sync_writes; /* Sectors synced to media */
  468. atomic_long_t inflight_reads; /* Inflight sector read requests */
  469. atomic_long_t cache_reads; /* Read requests that hit the cache */
  470. atomic_long_t sync_reads; /* Completed sector read requests */
  471. atomic_long_t recov_writes; /* Sectors submitted from recovery */
  472. atomic_long_t recov_gc_writes; /* Sectors submitted from write GC */
  473. atomic_long_t recov_gc_reads; /* Sectors submitted from read GC */
  474. #endif
  475. spinlock_t lock;
  476. atomic_long_t read_failed;
  477. atomic_long_t read_empty;
  478. atomic_long_t read_high_ecc;
  479. atomic_long_t read_failed_gc;
  480. atomic_long_t write_failed;
  481. atomic_long_t erase_failed;
  482. atomic_t inflight_io; /* General inflight I/O counter */
  483. struct task_struct *writer_ts;
  484. /* Simple translation map of logical addresses to physical addresses.
  485. * The logical addresses is known by the host system, while the physical
  486. * addresses are used when writing to the disk block device.
  487. */
  488. unsigned char *trans_map;
  489. spinlock_t trans_lock;
  490. struct list_head compl_list;
  491. mempool_t *page_pool;
  492. mempool_t *line_ws_pool;
  493. mempool_t *rec_pool;
  494. mempool_t *g_rq_pool;
  495. mempool_t *w_rq_pool;
  496. mempool_t *line_meta_pool;
  497. struct workqueue_struct *close_wq;
  498. struct workqueue_struct *bb_wq;
  499. struct timer_list wtimer;
  500. struct pblk_gc gc;
  501. };
  502. struct pblk_line_ws {
  503. struct pblk *pblk;
  504. struct pblk_line *line;
  505. void *priv;
  506. struct work_struct ws;
  507. };
  508. #define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
  509. #define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
  510. /*
  511. * pblk ring buffer operations
  512. */
  513. int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
  514. unsigned int power_size, unsigned int power_seg_sz);
  515. unsigned int pblk_rb_calculate_size(unsigned int nr_entries);
  516. void *pblk_rb_entries_ref(struct pblk_rb *rb);
  517. int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
  518. unsigned int nr_entries, unsigned int *pos);
  519. int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
  520. unsigned int *pos);
  521. void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
  522. struct pblk_w_ctx w_ctx, unsigned int pos);
  523. void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
  524. struct pblk_w_ctx w_ctx, struct pblk_line *gc_line,
  525. unsigned int pos);
  526. struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
  527. void pblk_rb_flush(struct pblk_rb *rb);
  528. void pblk_rb_sync_l2p(struct pblk_rb *rb);
  529. unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
  530. struct bio *bio, unsigned int pos,
  531. unsigned int nr_entries, unsigned int count);
  532. unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
  533. struct list_head *list,
  534. unsigned int max);
  535. int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
  536. struct ppa_addr ppa, int bio_iter, bool advanced_bio);
  537. unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
  538. unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
  539. unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
  540. struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
  541. struct ppa_addr *ppa);
  542. void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
  543. unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb);
  544. unsigned int pblk_rb_read_count(struct pblk_rb *rb);
  545. unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
  546. unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
  547. int pblk_rb_tear_down_check(struct pblk_rb *rb);
  548. int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
  549. void pblk_rb_data_free(struct pblk_rb *rb);
  550. ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
  551. /*
  552. * pblk core
  553. */
  554. struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw);
  555. void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
  556. int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
  557. struct pblk_c_ctx *c_ctx);
  558. void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw);
  559. void pblk_wait_for_meta(struct pblk *pblk);
  560. struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba);
  561. void pblk_discard(struct pblk *pblk, struct bio *bio);
  562. void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
  563. void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
  564. int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
  565. int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
  566. struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
  567. unsigned int nr_secs, unsigned int len,
  568. int alloc_type, gfp_t gfp_mask);
  569. struct pblk_line *pblk_line_get(struct pblk *pblk);
  570. struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
  571. void pblk_line_replace_data(struct pblk *pblk);
  572. int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
  573. void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
  574. struct pblk_line *pblk_line_get_data(struct pblk *pblk);
  575. struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
  576. int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
  577. int pblk_line_is_full(struct pblk_line *line);
  578. void pblk_line_free(struct pblk *pblk, struct pblk_line *line);
  579. void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
  580. void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
  581. void pblk_line_close_meta_sync(struct pblk *pblk);
  582. void pblk_line_close_ws(struct work_struct *work);
  583. void pblk_pipeline_stop(struct pblk *pblk);
  584. void pblk_line_mark_bb(struct work_struct *work);
  585. void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
  586. void (*work)(struct work_struct *),
  587. struct workqueue_struct *wq);
  588. u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
  589. int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line);
  590. int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
  591. void *emeta_buf);
  592. int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
  593. void pblk_line_put(struct kref *ref);
  594. struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
  595. u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
  596. void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
  597. u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
  598. u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
  599. int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
  600. unsigned long secs_to_flush);
  601. void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
  602. void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  603. unsigned long *lun_bitmap);
  604. void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
  605. void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  606. unsigned long *lun_bitmap);
  607. void pblk_end_bio_sync(struct bio *bio);
  608. void pblk_end_io_sync(struct nvm_rq *rqd);
  609. int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
  610. int nr_pages);
  611. void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
  612. int nr_pages);
  613. void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
  614. void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
  615. u64 paddr);
  616. void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
  617. void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
  618. struct ppa_addr ppa);
  619. void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
  620. struct ppa_addr ppa, struct ppa_addr entry_line);
  621. int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
  622. struct pblk_line *gc_line);
  623. void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
  624. u64 *lba_list, int nr_secs);
  625. void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
  626. sector_t blba, int nr_secs);
  627. /*
  628. * pblk user I/O write path
  629. */
  630. int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
  631. unsigned long flags);
  632. int pblk_write_gc_to_cache(struct pblk *pblk, void *data, u64 *lba_list,
  633. unsigned int nr_entries, unsigned int nr_rec_entries,
  634. struct pblk_line *gc_line, unsigned long flags);
  635. /*
  636. * pblk map
  637. */
  638. void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
  639. unsigned int sentry, unsigned long *lun_bitmap,
  640. unsigned int valid_secs, struct ppa_addr *erase_ppa);
  641. void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
  642. unsigned long *lun_bitmap, unsigned int valid_secs,
  643. unsigned int off);
  644. /*
  645. * pblk write thread
  646. */
  647. int pblk_write_ts(void *data);
  648. void pblk_write_timer_fn(unsigned long data);
  649. void pblk_write_should_kick(struct pblk *pblk);
  650. /*
  651. * pblk read path
  652. */
  653. extern struct bio_set *pblk_bio_set;
  654. int pblk_submit_read(struct pblk *pblk, struct bio *bio);
  655. int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
  656. unsigned int nr_secs, unsigned int *secs_to_gc,
  657. struct pblk_line *line);
  658. /*
  659. * pblk recovery
  660. */
  661. void pblk_submit_rec(struct work_struct *work);
  662. struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
  663. int pblk_recov_pad(struct pblk *pblk);
  664. __le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta);
  665. int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
  666. struct pblk_rec_ctx *recovery, u64 *comp_bits,
  667. unsigned int comp);
  668. /*
  669. * pblk gc
  670. */
  671. #define PBLK_GC_MAX_READERS 8 /* Max number of outstanding GC reader jobs */
  672. #define PBLK_GC_W_QD 128 /* Queue depth for inflight GC write I/Os */
  673. #define PBLK_GC_L_QD 4 /* Queue depth for inflight GC lines */
  674. #define PBLK_GC_RSV_LINE 1 /* Reserved lines for GC */
  675. int pblk_gc_init(struct pblk *pblk);
  676. void pblk_gc_exit(struct pblk *pblk);
  677. void pblk_gc_should_start(struct pblk *pblk);
  678. void pblk_gc_should_stop(struct pblk *pblk);
  679. void pblk_gc_should_kick(struct pblk *pblk);
  680. void pblk_gc_kick(struct pblk *pblk);
  681. void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
  682. int *gc_active);
  683. int pblk_gc_sysfs_force(struct pblk *pblk, int force);
  684. /*
  685. * pblk rate limiter
  686. */
  687. void pblk_rl_init(struct pblk_rl *rl, int budget);
  688. void pblk_rl_free(struct pblk_rl *rl);
  689. int pblk_rl_high_thrs(struct pblk_rl *rl);
  690. int pblk_rl_low_thrs(struct pblk_rl *rl);
  691. unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
  692. int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
  693. void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
  694. void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
  695. int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
  696. void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
  697. void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
  698. int pblk_rl_sysfs_rate_show(struct pblk_rl *rl);
  699. void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
  700. void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line);
  701. void pblk_rl_set_space_limit(struct pblk_rl *rl, int entries_left);
  702. int pblk_rl_is_limit(struct pblk_rl *rl);
  703. /*
  704. * pblk sysfs
  705. */
  706. int pblk_sysfs_init(struct gendisk *tdisk);
  707. void pblk_sysfs_exit(struct gendisk *tdisk);
  708. static inline void *pblk_malloc(size_t size, int type, gfp_t flags)
  709. {
  710. if (type == PBLK_KMALLOC_META)
  711. return kmalloc(size, flags);
  712. return vmalloc(size);
  713. }
  714. static inline void pblk_mfree(void *ptr, int type)
  715. {
  716. if (type == PBLK_KMALLOC_META)
  717. kfree(ptr);
  718. else
  719. vfree(ptr);
  720. }
  721. static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
  722. {
  723. return c_ctx - sizeof(struct nvm_rq);
  724. }
  725. static inline void *emeta_to_bb(struct line_emeta *emeta)
  726. {
  727. return emeta->bb_bitmap;
  728. }
  729. static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
  730. {
  731. return ((void *)emeta + pblk->lm.emeta_len[1]);
  732. }
  733. static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
  734. {
  735. return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
  736. }
  737. static inline int pblk_line_vsc(struct pblk_line *line)
  738. {
  739. int vsc;
  740. spin_lock(&line->lock);
  741. vsc = le32_to_cpu(*line->vsc);
  742. spin_unlock(&line->lock);
  743. return vsc;
  744. }
  745. #define NVM_MEM_PAGE_WRITE (8)
  746. static inline int pblk_pad_distance(struct pblk *pblk)
  747. {
  748. struct nvm_tgt_dev *dev = pblk->dev;
  749. struct nvm_geo *geo = &dev->geo;
  750. return NVM_MEM_PAGE_WRITE * geo->nr_luns * geo->sec_per_pl;
  751. }
  752. static inline int pblk_dev_ppa_to_line(struct ppa_addr p)
  753. {
  754. return p.g.blk;
  755. }
  756. static inline int pblk_tgt_ppa_to_line(struct ppa_addr p)
  757. {
  758. return p.g.blk;
  759. }
  760. static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
  761. {
  762. return p.g.lun * geo->nr_chnls + p.g.ch;
  763. }
  764. /* A block within a line corresponds to the lun */
  765. static inline int pblk_dev_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
  766. {
  767. return p.g.lun * geo->nr_chnls + p.g.ch;
  768. }
  769. static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
  770. {
  771. struct ppa_addr ppa64;
  772. ppa64.ppa = 0;
  773. if (ppa32 == -1) {
  774. ppa64.ppa = ADDR_EMPTY;
  775. } else if (ppa32 & (1U << 31)) {
  776. ppa64.c.line = ppa32 & ((~0U) >> 1);
  777. ppa64.c.is_cached = 1;
  778. } else {
  779. ppa64.g.blk = (ppa32 & pblk->ppaf.blk_mask) >>
  780. pblk->ppaf.blk_offset;
  781. ppa64.g.pg = (ppa32 & pblk->ppaf.pg_mask) >>
  782. pblk->ppaf.pg_offset;
  783. ppa64.g.lun = (ppa32 & pblk->ppaf.lun_mask) >>
  784. pblk->ppaf.lun_offset;
  785. ppa64.g.ch = (ppa32 & pblk->ppaf.ch_mask) >>
  786. pblk->ppaf.ch_offset;
  787. ppa64.g.pl = (ppa32 & pblk->ppaf.pln_mask) >>
  788. pblk->ppaf.pln_offset;
  789. ppa64.g.sec = (ppa32 & pblk->ppaf.sec_mask) >>
  790. pblk->ppaf.sec_offset;
  791. }
  792. return ppa64;
  793. }
  794. static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
  795. sector_t lba)
  796. {
  797. struct ppa_addr ppa;
  798. if (pblk->ppaf_bitsize < 32) {
  799. u32 *map = (u32 *)pblk->trans_map;
  800. ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
  801. } else {
  802. struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
  803. ppa = map[lba];
  804. }
  805. return ppa;
  806. }
  807. static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
  808. {
  809. u32 ppa32 = 0;
  810. if (ppa64.ppa == ADDR_EMPTY) {
  811. ppa32 = ~0U;
  812. } else if (ppa64.c.is_cached) {
  813. ppa32 |= ppa64.c.line;
  814. ppa32 |= 1U << 31;
  815. } else {
  816. ppa32 |= ppa64.g.blk << pblk->ppaf.blk_offset;
  817. ppa32 |= ppa64.g.pg << pblk->ppaf.pg_offset;
  818. ppa32 |= ppa64.g.lun << pblk->ppaf.lun_offset;
  819. ppa32 |= ppa64.g.ch << pblk->ppaf.ch_offset;
  820. ppa32 |= ppa64.g.pl << pblk->ppaf.pln_offset;
  821. ppa32 |= ppa64.g.sec << pblk->ppaf.sec_offset;
  822. }
  823. return ppa32;
  824. }
  825. static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
  826. struct ppa_addr ppa)
  827. {
  828. if (pblk->ppaf_bitsize < 32) {
  829. u32 *map = (u32 *)pblk->trans_map;
  830. map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
  831. } else {
  832. u64 *map = (u64 *)pblk->trans_map;
  833. map[lba] = ppa.ppa;
  834. }
  835. }
  836. static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
  837. struct ppa_addr p)
  838. {
  839. u64 paddr;
  840. paddr = 0;
  841. paddr |= (u64)p.g.pg << pblk->ppaf.pg_offset;
  842. paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset;
  843. paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset;
  844. paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset;
  845. paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset;
  846. return paddr;
  847. }
  848. static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
  849. {
  850. return (ppa_addr.ppa == ADDR_EMPTY);
  851. }
  852. static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
  853. {
  854. ppa_addr->ppa = ADDR_EMPTY;
  855. }
  856. static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
  857. {
  858. if (lppa.ppa == rppa.ppa)
  859. return true;
  860. return false;
  861. }
  862. static inline int pblk_addr_in_cache(struct ppa_addr ppa)
  863. {
  864. return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
  865. }
  866. static inline int pblk_addr_to_cacheline(struct ppa_addr ppa)
  867. {
  868. return ppa.c.line;
  869. }
  870. static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
  871. {
  872. struct ppa_addr p;
  873. p.c.line = addr;
  874. p.c.is_cached = 1;
  875. return p;
  876. }
  877. static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
  878. u64 line_id)
  879. {
  880. struct ppa_addr ppa;
  881. ppa.ppa = 0;
  882. ppa.g.blk = line_id;
  883. ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset;
  884. ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset;
  885. ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset;
  886. ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset;
  887. ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset;
  888. return ppa;
  889. }
  890. static inline struct ppa_addr addr_to_pblk_ppa(struct pblk *pblk, u64 paddr,
  891. u64 line_id)
  892. {
  893. struct ppa_addr ppa;
  894. ppa = addr_to_gen_ppa(pblk, paddr, line_id);
  895. return ppa;
  896. }
  897. static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
  898. struct line_header *header)
  899. {
  900. u32 crc = ~(u32)0;
  901. crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
  902. sizeof(struct line_header) - sizeof(crc));
  903. return crc;
  904. }
  905. static inline u32 pblk_calc_smeta_crc(struct pblk *pblk,
  906. struct line_smeta *smeta)
  907. {
  908. struct pblk_line_meta *lm = &pblk->lm;
  909. u32 crc = ~(u32)0;
  910. crc = crc32_le(crc, (unsigned char *)smeta +
  911. sizeof(struct line_header) + sizeof(crc),
  912. lm->smeta_len -
  913. sizeof(struct line_header) - sizeof(crc));
  914. return crc;
  915. }
  916. static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
  917. struct line_emeta *emeta)
  918. {
  919. struct pblk_line_meta *lm = &pblk->lm;
  920. u32 crc = ~(u32)0;
  921. crc = crc32_le(crc, (unsigned char *)emeta +
  922. sizeof(struct line_header) + sizeof(crc),
  923. lm->emeta_len[0] -
  924. sizeof(struct line_header) - sizeof(crc));
  925. return crc;
  926. }
  927. static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
  928. {
  929. struct nvm_tgt_dev *dev = pblk->dev;
  930. struct nvm_geo *geo = &dev->geo;
  931. int flags;
  932. flags = geo->plane_mode >> 1;
  933. if (type == WRITE)
  934. flags |= NVM_IO_SCRAMBLE_ENABLE;
  935. return flags;
  936. }
  937. enum {
  938. PBLK_READ_RANDOM = 0,
  939. PBLK_READ_SEQUENTIAL = 1,
  940. };
  941. static inline int pblk_set_read_mode(struct pblk *pblk, int type)
  942. {
  943. struct nvm_tgt_dev *dev = pblk->dev;
  944. struct nvm_geo *geo = &dev->geo;
  945. int flags;
  946. flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
  947. if (type == PBLK_READ_SEQUENTIAL)
  948. flags |= geo->plane_mode >> 1;
  949. return flags;
  950. }
  951. static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
  952. {
  953. return !(nr_secs % pblk->min_write_pgs);
  954. }
  955. #ifdef CONFIG_NVM_DEBUG
  956. static inline void print_ppa(struct ppa_addr *p, char *msg, int error)
  957. {
  958. if (p->c.is_cached) {
  959. pr_err("ppa: (%s: %x) cache line: %llu\n",
  960. msg, error, (u64)p->c.line);
  961. } else {
  962. pr_err("ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
  963. msg, error,
  964. p->g.ch, p->g.lun, p->g.blk,
  965. p->g.pg, p->g.pl, p->g.sec);
  966. }
  967. }
  968. static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
  969. int error)
  970. {
  971. int bit = -1;
  972. if (rqd->nr_ppas == 1) {
  973. print_ppa(&rqd->ppa_addr, "rqd", error);
  974. return;
  975. }
  976. while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
  977. bit + 1)) < rqd->nr_ppas) {
  978. print_ppa(&rqd->ppa_list[bit], "rqd", error);
  979. }
  980. pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
  981. }
  982. #endif
  983. static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
  984. struct ppa_addr *ppas, int nr_ppas)
  985. {
  986. struct nvm_geo *geo = &tgt_dev->geo;
  987. struct ppa_addr *ppa;
  988. int i;
  989. for (i = 0; i < nr_ppas; i++) {
  990. ppa = &ppas[i];
  991. if (!ppa->c.is_cached &&
  992. ppa->g.ch < geo->nr_chnls &&
  993. ppa->g.lun < geo->luns_per_chnl &&
  994. ppa->g.pl < geo->nr_planes &&
  995. ppa->g.blk < geo->blks_per_lun &&
  996. ppa->g.pg < geo->pgs_per_blk &&
  997. ppa->g.sec < geo->sec_per_pg)
  998. continue;
  999. #ifdef CONFIG_NVM_DEBUG
  1000. print_ppa(ppa, "boundary", i);
  1001. #endif
  1002. return 1;
  1003. }
  1004. return 0;
  1005. }
  1006. static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
  1007. {
  1008. struct pblk_line_meta *lm = &pblk->lm;
  1009. if (paddr > lm->sec_per_line)
  1010. return 1;
  1011. return 0;
  1012. }
  1013. static inline unsigned int pblk_get_bi_idx(struct bio *bio)
  1014. {
  1015. return bio->bi_iter.bi_idx;
  1016. }
  1017. static inline sector_t pblk_get_lba(struct bio *bio)
  1018. {
  1019. return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
  1020. }
  1021. static inline unsigned int pblk_get_secs(struct bio *bio)
  1022. {
  1023. return bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
  1024. }
  1025. static inline sector_t pblk_get_sector(sector_t lba)
  1026. {
  1027. return lba * NR_PHY_IN_LOG;
  1028. }
  1029. static inline void pblk_setup_uuid(struct pblk *pblk)
  1030. {
  1031. uuid_le uuid;
  1032. uuid_le_gen(&uuid);
  1033. memcpy(pblk->instance_uuid, uuid.b, 16);
  1034. }
  1035. #endif /* PBLK_H_ */