pblk.h 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412
  1. /*
  2. * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
  3. * Copyright (C) 2016 CNEX Labs
  4. * Initial release: Matias Bjorling <matias@cnexlabs.com>
  5. * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License version
  9. * 2 as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * Implementation of a Physical Block-device target for Open-channel SSDs.
  17. *
  18. */
  19. #ifndef PBLK_H_
  20. #define PBLK_H_
  21. #include <linux/blkdev.h>
  22. #include <linux/blk-mq.h>
  23. #include <linux/bio.h>
  24. #include <linux/module.h>
  25. #include <linux/kthread.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/crc32.h>
  28. #include <linux/uuid.h>
  29. #include <linux/lightnvm.h>
  30. /* Run only GC if less than 1/X blocks are free */
  31. #define GC_LIMIT_INVERSE 5
  32. #define GC_TIME_MSECS 1000
  33. #define PBLK_SECTOR (512)
  34. #define PBLK_EXPOSED_PAGE_SIZE (4096)
  35. #define PBLK_MAX_REQ_ADDRS (64)
  36. #define PBLK_MAX_REQ_ADDRS_PW (6)
  37. #define PBLK_NR_CLOSE_JOBS (4)
  38. #define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
  39. #define PBLK_COMMAND_TIMEOUT_MS 30000
  40. /* Max 512 LUNs per device */
  41. #define PBLK_MAX_LUNS_BITMAP (4)
  42. #define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
  43. /* Static pool sizes */
  44. #define PBLK_GEN_WS_POOL_SIZE (2)
  45. #define PBLK_DEFAULT_OP (11)
  46. enum {
  47. PBLK_READ = READ,
  48. PBLK_WRITE = WRITE,/* Write from write buffer */
  49. PBLK_WRITE_INT, /* Internal write - no write buffer */
  50. PBLK_READ_RECOV, /* Recovery read - errors allowed */
  51. PBLK_ERASE,
  52. };
  53. enum {
  54. /* IO Types */
  55. PBLK_IOTYPE_USER = 1 << 0,
  56. PBLK_IOTYPE_GC = 1 << 1,
  57. /* Write buffer flags */
  58. PBLK_FLUSH_ENTRY = 1 << 2,
  59. PBLK_WRITTEN_DATA = 1 << 3,
  60. PBLK_SUBMITTED_ENTRY = 1 << 4,
  61. PBLK_WRITABLE_ENTRY = 1 << 5,
  62. };
  63. enum {
  64. PBLK_BLK_ST_OPEN = 0x1,
  65. PBLK_BLK_ST_CLOSED = 0x2,
  66. };
  67. struct pblk_sec_meta {
  68. u64 reserved;
  69. __le64 lba;
  70. };
  71. /* The number of GC lists and the rate-limiter states go together. This way the
  72. * rate-limiter can dictate how much GC is needed based on resource utilization.
  73. */
  74. #define PBLK_GC_NR_LISTS 3
  75. enum {
  76. PBLK_RL_HIGH = 1,
  77. PBLK_RL_MID = 2,
  78. PBLK_RL_LOW = 3,
  79. };
  80. #define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
  81. #define pblk_dma_ppa_size (sizeof(u64) * PBLK_MAX_REQ_ADDRS)
  82. /* write buffer completion context */
  83. struct pblk_c_ctx {
  84. struct list_head list; /* Head for out-of-order completion */
  85. unsigned long *lun_bitmap; /* Luns used on current request */
  86. unsigned int sentry;
  87. unsigned int nr_valid;
  88. unsigned int nr_padded;
  89. };
  90. /* read context */
  91. struct pblk_g_ctx {
  92. void *private;
  93. unsigned long start_time;
  94. u64 lba;
  95. };
  96. /* Pad context */
  97. struct pblk_pad_rq {
  98. struct pblk *pblk;
  99. struct completion wait;
  100. struct kref ref;
  101. };
  102. /* Recovery context */
  103. struct pblk_rec_ctx {
  104. struct pblk *pblk;
  105. struct nvm_rq *rqd;
  106. struct list_head failed;
  107. struct work_struct ws_rec;
  108. };
  109. /* Write context */
  110. struct pblk_w_ctx {
  111. struct bio_list bios; /* Original bios - used for completion
  112. * in REQ_FUA, REQ_FLUSH case
  113. */
  114. u64 lba; /* Logic addr. associated with entry */
  115. struct ppa_addr ppa; /* Physic addr. associated with entry */
  116. int flags; /* Write context flags */
  117. };
  118. struct pblk_rb_entry {
  119. struct ppa_addr cacheline; /* Cacheline for this entry */
  120. void *data; /* Pointer to data on this entry */
  121. struct pblk_w_ctx w_ctx; /* Context for this entry */
  122. struct list_head index; /* List head to enable indexes */
  123. };
  124. #define EMPTY_ENTRY (~0U)
  125. struct pblk_rb_pages {
  126. struct page *pages;
  127. int order;
  128. struct list_head list;
  129. };
  130. struct pblk_rb {
  131. struct pblk_rb_entry *entries; /* Ring buffer entries */
  132. unsigned int mem; /* Write offset - points to next
  133. * writable entry in memory
  134. */
  135. unsigned int subm; /* Read offset - points to last entry
  136. * that has been submitted to the media
  137. * to be persisted
  138. */
  139. unsigned int sync; /* Synced - backpointer that signals
  140. * the last submitted entry that has
  141. * been successfully persisted to media
  142. */
  143. unsigned int flush_point; /* Sync point - last entry that must be
  144. * flushed to the media. Used with
  145. * REQ_FLUSH and REQ_FUA
  146. */
  147. unsigned int l2p_update; /* l2p update point - next entry for
  148. * which l2p mapping will be updated to
  149. * contain a device ppa address (instead
  150. * of a cacheline
  151. */
  152. unsigned int nr_entries; /* Number of entries in write buffer -
  153. * must be a power of two
  154. */
  155. unsigned int seg_size; /* Size of the data segments being
  156. * stored on each entry. Typically this
  157. * will be 4KB
  158. */
  159. struct list_head pages; /* List of data pages */
  160. spinlock_t w_lock; /* Write lock */
  161. spinlock_t s_lock; /* Sync lock */
  162. #ifdef CONFIG_NVM_DEBUG
  163. atomic_t inflight_flush_point; /* Not served REQ_FLUSH | REQ_FUA */
  164. #endif
  165. };
  166. #define PBLK_RECOVERY_SECTORS 16
  167. struct pblk_lun {
  168. struct ppa_addr bppa;
  169. struct semaphore wr_sem;
  170. };
  171. struct pblk_gc_rq {
  172. struct pblk_line *line;
  173. void *data;
  174. u64 paddr_list[PBLK_MAX_REQ_ADDRS];
  175. u64 lba_list[PBLK_MAX_REQ_ADDRS];
  176. int nr_secs;
  177. int secs_to_gc;
  178. struct list_head list;
  179. };
  180. struct pblk_gc {
  181. /* These states are not protected by a lock since (i) they are in the
  182. * fast path, and (ii) they are not critical.
  183. */
  184. int gc_active;
  185. int gc_enabled;
  186. int gc_forced;
  187. struct task_struct *gc_ts;
  188. struct task_struct *gc_writer_ts;
  189. struct task_struct *gc_reader_ts;
  190. struct workqueue_struct *gc_line_reader_wq;
  191. struct workqueue_struct *gc_reader_wq;
  192. struct timer_list gc_timer;
  193. struct semaphore gc_sem;
  194. atomic_t read_inflight_gc; /* Number of lines with inflight GC reads */
  195. atomic_t pipeline_gc; /* Number of lines in the GC pipeline -
  196. * started reads to finished writes
  197. */
  198. int w_entries;
  199. struct list_head w_list;
  200. struct list_head r_list;
  201. spinlock_t lock;
  202. spinlock_t w_lock;
  203. spinlock_t r_lock;
  204. };
  205. struct pblk_rl {
  206. unsigned int high; /* Upper threshold for rate limiter (free run -
  207. * user I/O rate limiter
  208. */
  209. unsigned int high_pw; /* High rounded up as a power of 2 */
  210. #define PBLK_USER_HIGH_THRS 8 /* Begin write limit at 12% available blks */
  211. #define PBLK_USER_LOW_THRS 10 /* Aggressive GC at 10% available blocks */
  212. int rb_windows_pw; /* Number of rate windows in the write buffer
  213. * given as a power-of-2. This guarantees that
  214. * when user I/O is being rate limited, there
  215. * will be reserved enough space for the GC to
  216. * place its payload. A window is of
  217. * pblk->max_write_pgs size, which in NVMe is
  218. * 64, i.e., 256kb.
  219. */
  220. int rb_budget; /* Total number of entries available for I/O */
  221. int rb_user_max; /* Max buffer entries available for user I/O */
  222. int rb_gc_max; /* Max buffer entries available for GC I/O */
  223. int rb_gc_rsv; /* Reserved buffer entries for GC I/O */
  224. int rb_state; /* Rate-limiter current state */
  225. int rb_max_io; /* Maximum size for an I/O giving the config */
  226. atomic_t rb_user_cnt; /* User I/O buffer counter */
  227. atomic_t rb_gc_cnt; /* GC I/O buffer counter */
  228. atomic_t rb_space; /* Space limit in case of reaching capacity */
  229. int rsv_blocks; /* Reserved blocks for GC */
  230. int rb_user_active;
  231. int rb_gc_active;
  232. struct timer_list u_timer;
  233. unsigned long long nr_secs;
  234. unsigned long total_blocks;
  235. atomic_t free_blocks; /* Total number of free blocks (+ OP) */
  236. atomic_t free_user_blocks; /* Number of user free blocks (no OP) */
  237. };
  238. #define PBLK_LINE_EMPTY (~0U)
  239. enum {
  240. /* Line Types */
  241. PBLK_LINETYPE_FREE = 0,
  242. PBLK_LINETYPE_LOG = 1,
  243. PBLK_LINETYPE_DATA = 2,
  244. /* Line state */
  245. PBLK_LINESTATE_NEW = 9,
  246. PBLK_LINESTATE_FREE = 10,
  247. PBLK_LINESTATE_OPEN = 11,
  248. PBLK_LINESTATE_CLOSED = 12,
  249. PBLK_LINESTATE_GC = 13,
  250. PBLK_LINESTATE_BAD = 14,
  251. PBLK_LINESTATE_CORRUPT = 15,
  252. /* GC group */
  253. PBLK_LINEGC_NONE = 20,
  254. PBLK_LINEGC_EMPTY = 21,
  255. PBLK_LINEGC_LOW = 22,
  256. PBLK_LINEGC_MID = 23,
  257. PBLK_LINEGC_HIGH = 24,
  258. PBLK_LINEGC_FULL = 25,
  259. };
  260. #define PBLK_MAGIC 0x70626c6b /*pblk*/
  261. /* emeta/smeta persistent storage format versions:
  262. * Changes in major version requires offline migration.
  263. * Changes in minor version are handled automatically during
  264. * recovery.
  265. */
  266. #define SMETA_VERSION_MAJOR (0)
  267. #define SMETA_VERSION_MINOR (1)
  268. #define EMETA_VERSION_MAJOR (0)
  269. #define EMETA_VERSION_MINOR (2)
  270. struct line_header {
  271. __le32 crc;
  272. __le32 identifier; /* pblk identifier */
  273. __u8 uuid[16]; /* instance uuid */
  274. __le16 type; /* line type */
  275. __u8 version_major; /* version major */
  276. __u8 version_minor; /* version minor */
  277. __le32 id; /* line id for current line */
  278. };
  279. struct line_smeta {
  280. struct line_header header;
  281. __le32 crc; /* Full structure including struct crc */
  282. /* Previous line metadata */
  283. __le32 prev_id; /* Line id for previous line */
  284. /* Current line metadata */
  285. __le64 seq_nr; /* Sequence number for current line */
  286. /* Active writers */
  287. __le32 window_wr_lun; /* Number of parallel LUNs to write */
  288. __le32 rsvd[2];
  289. __le64 lun_bitmap[];
  290. };
  291. /*
  292. * Metadata layout in media:
  293. * First sector:
  294. * 1. struct line_emeta
  295. * 2. bad block bitmap (u64 * window_wr_lun)
  296. * 3. write amplification counters
  297. * Mid sectors (start at lbas_sector):
  298. * 3. nr_lbas (u64) forming lba list
  299. * Last sectors (start at vsc_sector):
  300. * 4. u32 valid sector count (vsc) for all lines (~0U: free line)
  301. */
  302. struct line_emeta {
  303. struct line_header header;
  304. __le32 crc; /* Full structure including struct crc */
  305. /* Previous line metadata */
  306. __le32 prev_id; /* Line id for prev line */
  307. /* Current line metadata */
  308. __le64 seq_nr; /* Sequence number for current line */
  309. /* Active writers */
  310. __le32 window_wr_lun; /* Number of parallel LUNs to write */
  311. /* Bookkeeping for recovery */
  312. __le32 next_id; /* Line id for next line */
  313. __le64 nr_lbas; /* Number of lbas mapped in line */
  314. __le64 nr_valid_lbas; /* Number of valid lbas mapped in line */
  315. __le64 bb_bitmap[]; /* Updated bad block bitmap for line */
  316. };
  317. /* Write amplification counters stored on media */
  318. struct wa_counters {
  319. __le64 user; /* Number of user written sectors */
  320. __le64 gc; /* Number of sectors written by GC*/
  321. __le64 pad; /* Number of padded sectors */
  322. };
  323. struct pblk_emeta {
  324. struct line_emeta *buf; /* emeta buffer in media format */
  325. int mem; /* Write offset - points to next
  326. * writable entry in memory
  327. */
  328. atomic_t sync; /* Synced - backpointer that signals the
  329. * last entry that has been successfully
  330. * persisted to media
  331. */
  332. unsigned int nr_entries; /* Number of emeta entries */
  333. };
  334. struct pblk_smeta {
  335. struct line_smeta *buf; /* smeta buffer in persistent format */
  336. };
  337. struct pblk_line {
  338. struct pblk *pblk;
  339. unsigned int id; /* Line number corresponds to the
  340. * block line
  341. */
  342. unsigned int seq_nr; /* Unique line sequence number */
  343. int state; /* PBLK_LINESTATE_X */
  344. int type; /* PBLK_LINETYPE_X */
  345. int gc_group; /* PBLK_LINEGC_X */
  346. struct list_head list; /* Free, GC lists */
  347. unsigned long *lun_bitmap; /* Bitmap for LUNs mapped in line */
  348. struct nvm_chk_meta *chks; /* Chunks forming line */
  349. struct pblk_smeta *smeta; /* Start metadata */
  350. struct pblk_emeta *emeta; /* End medatada */
  351. int meta_line; /* Metadata line id */
  352. int meta_distance; /* Distance between data and metadata */
  353. u64 smeta_ssec; /* Sector where smeta starts */
  354. u64 emeta_ssec; /* Sector where emeta starts */
  355. unsigned int sec_in_line; /* Number of usable secs in line */
  356. atomic_t blk_in_line; /* Number of good blocks in line */
  357. unsigned long *blk_bitmap; /* Bitmap for valid/invalid blocks */
  358. unsigned long *erase_bitmap; /* Bitmap for erased blocks */
  359. unsigned long *map_bitmap; /* Bitmap for mapped sectors in line */
  360. unsigned long *invalid_bitmap; /* Bitmap for invalid sectors in line */
  361. atomic_t left_eblks; /* Blocks left for erasing */
  362. atomic_t left_seblks; /* Blocks left for sync erasing */
  363. int left_msecs; /* Sectors left for mapping */
  364. unsigned int cur_sec; /* Sector map pointer */
  365. unsigned int nr_valid_lbas; /* Number of valid lbas in line */
  366. __le32 *vsc; /* Valid sector count in line */
  367. struct kref ref; /* Write buffer L2P references */
  368. spinlock_t lock; /* Necessary for invalid_bitmap only */
  369. };
  370. #define PBLK_DATA_LINES 4
  371. enum {
  372. PBLK_KMALLOC_META = 1,
  373. PBLK_VMALLOC_META = 2,
  374. };
  375. enum {
  376. PBLK_EMETA_TYPE_HEADER = 1, /* struct line_emeta first sector */
  377. PBLK_EMETA_TYPE_LLBA = 2, /* lba list - type: __le64 */
  378. PBLK_EMETA_TYPE_VSC = 3, /* vsc list - type: __le32 */
  379. };
  380. struct pblk_line_mgmt {
  381. int nr_lines; /* Total number of full lines */
  382. int nr_free_lines; /* Number of full lines in free list */
  383. /* Free lists - use free_lock */
  384. struct list_head free_list; /* Full lines ready to use */
  385. struct list_head corrupt_list; /* Full lines corrupted */
  386. struct list_head bad_list; /* Full lines bad */
  387. /* GC lists - use gc_lock */
  388. struct list_head *gc_lists[PBLK_GC_NR_LISTS];
  389. struct list_head gc_high_list; /* Full lines ready to GC, high isc */
  390. struct list_head gc_mid_list; /* Full lines ready to GC, mid isc */
  391. struct list_head gc_low_list; /* Full lines ready to GC, low isc */
  392. struct list_head gc_full_list; /* Full lines ready to GC, no valid */
  393. struct list_head gc_empty_list; /* Full lines close, all valid */
  394. struct pblk_line *log_line; /* Current FTL log line */
  395. struct pblk_line *data_line; /* Current data line */
  396. struct pblk_line *log_next; /* Next FTL log line */
  397. struct pblk_line *data_next; /* Next data line */
  398. struct list_head emeta_list; /* Lines queued to schedule emeta */
  399. __le32 *vsc_list; /* Valid sector counts for all lines */
  400. /* Metadata allocation type: VMALLOC | KMALLOC */
  401. int emeta_alloc_type;
  402. /* Pre-allocated metadata for data lines */
  403. struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
  404. struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
  405. unsigned long meta_bitmap;
  406. /* Helpers for fast bitmap calculations */
  407. unsigned long *bb_template;
  408. unsigned long *bb_aux;
  409. unsigned long d_seq_nr; /* Data line unique sequence number */
  410. unsigned long l_seq_nr; /* Log line unique sequence number */
  411. spinlock_t free_lock;
  412. spinlock_t close_lock;
  413. spinlock_t gc_lock;
  414. };
  415. struct pblk_line_meta {
  416. unsigned int smeta_len; /* Total length for smeta */
  417. unsigned int smeta_sec; /* Sectors needed for smeta */
  418. unsigned int emeta_len[4]; /* Lengths for emeta:
  419. * [0]: Total
  420. * [1]: struct line_emeta +
  421. * bb_bitmap + struct wa_counters
  422. * [2]: L2P portion
  423. * [3]: vsc
  424. */
  425. unsigned int emeta_sec[4]; /* Sectors needed for emeta. Same layout
  426. * as emeta_len
  427. */
  428. unsigned int emeta_bb; /* Boundary for bb that affects emeta */
  429. unsigned int vsc_list_len; /* Length for vsc list */
  430. unsigned int sec_bitmap_len; /* Length for sector bitmap in line */
  431. unsigned int blk_bitmap_len; /* Length for block bitmap in line */
  432. unsigned int lun_bitmap_len; /* Length for lun bitmap in line */
  433. unsigned int blk_per_line; /* Number of blocks in a full line */
  434. unsigned int sec_per_line; /* Number of sectors in a line */
  435. unsigned int dsec_per_line; /* Number of data sectors in a line */
  436. unsigned int min_blk_line; /* Min. number of good blocks in line */
  437. unsigned int mid_thrs; /* Threshold for GC mid list */
  438. unsigned int high_thrs; /* Threshold for GC high list */
  439. unsigned int meta_distance; /* Distance between data and metadata */
  440. };
  441. enum {
  442. PBLK_STATE_RUNNING = 0,
  443. PBLK_STATE_STOPPING = 1,
  444. PBLK_STATE_RECOVERING = 2,
  445. PBLK_STATE_STOPPED = 3,
  446. };
  447. /* Internal format to support not power-of-2 device formats */
  448. struct pblk_addrf {
  449. /* gen to dev */
  450. int sec_stripe;
  451. int ch_stripe;
  452. int lun_stripe;
  453. /* dev to gen */
  454. int sec_lun_stripe;
  455. int sec_ws_stripe;
  456. };
  457. struct pblk {
  458. struct nvm_tgt_dev *dev;
  459. struct gendisk *disk;
  460. struct kobject kobj;
  461. struct pblk_lun *luns;
  462. struct pblk_line *lines; /* Line array */
  463. struct pblk_line_mgmt l_mg; /* Line management */
  464. struct pblk_line_meta lm; /* Line metadata */
  465. struct nvm_addrf addrf; /* Aligned address format */
  466. struct pblk_addrf uaddrf; /* Unaligned address format */
  467. int addrf_len;
  468. struct pblk_rb rwb;
  469. int state; /* pblk line state */
  470. int min_write_pgs; /* Minimum amount of pages required by controller */
  471. int max_write_pgs; /* Maximum amount of pages supported by controller */
  472. int pgs_in_buffer; /* Number of pages that need to be held in buffer to
  473. * guarantee successful reads.
  474. */
  475. sector_t capacity; /* Device capacity when bad blocks are subtracted */
  476. int op; /* Percentage of device used for over-provisioning */
  477. int op_blks; /* Number of blocks used for over-provisioning */
  478. /* pblk provisioning values. Used by rate limiter */
  479. struct pblk_rl rl;
  480. int sec_per_write;
  481. unsigned char instance_uuid[16];
  482. /* Persistent write amplification counters, 4kb sector I/Os */
  483. atomic64_t user_wa; /* Sectors written by user */
  484. atomic64_t gc_wa; /* Sectors written by GC */
  485. atomic64_t pad_wa; /* Padded sectors written */
  486. /* Reset values for delta write amplification measurements */
  487. u64 user_rst_wa;
  488. u64 gc_rst_wa;
  489. u64 pad_rst_wa;
  490. /* Counters used for calculating padding distribution */
  491. atomic64_t *pad_dist; /* Padding distribution buckets */
  492. u64 nr_flush_rst; /* Flushes reset value for pad dist.*/
  493. atomic64_t nr_flush; /* Number of flush/fua I/O */
  494. #ifdef CONFIG_NVM_DEBUG
  495. /* Non-persistent debug counters, 4kb sector I/Os */
  496. atomic_long_t inflight_writes; /* Inflight writes (user and gc) */
  497. atomic_long_t padded_writes; /* Sectors padded due to flush/fua */
  498. atomic_long_t padded_wb; /* Sectors padded in write buffer */
  499. atomic_long_t req_writes; /* Sectors stored on write buffer */
  500. atomic_long_t sub_writes; /* Sectors submitted from buffer */
  501. atomic_long_t sync_writes; /* Sectors synced to media */
  502. atomic_long_t inflight_reads; /* Inflight sector read requests */
  503. atomic_long_t cache_reads; /* Read requests that hit the cache */
  504. atomic_long_t sync_reads; /* Completed sector read requests */
  505. atomic_long_t recov_writes; /* Sectors submitted from recovery */
  506. atomic_long_t recov_gc_writes; /* Sectors submitted from write GC */
  507. atomic_long_t recov_gc_reads; /* Sectors submitted from read GC */
  508. #endif
  509. spinlock_t lock;
  510. atomic_long_t read_failed;
  511. atomic_long_t read_empty;
  512. atomic_long_t read_high_ecc;
  513. atomic_long_t read_failed_gc;
  514. atomic_long_t write_failed;
  515. atomic_long_t erase_failed;
  516. atomic_t inflight_io; /* General inflight I/O counter */
  517. struct task_struct *writer_ts;
  518. /* Simple translation map of logical addresses to physical addresses.
  519. * The logical addresses is known by the host system, while the physical
  520. * addresses are used when writing to the disk block device.
  521. */
  522. unsigned char *trans_map;
  523. spinlock_t trans_lock;
  524. struct list_head compl_list;
  525. mempool_t *page_bio_pool;
  526. mempool_t *gen_ws_pool;
  527. mempool_t *rec_pool;
  528. mempool_t *r_rq_pool;
  529. mempool_t *w_rq_pool;
  530. mempool_t *e_rq_pool;
  531. struct workqueue_struct *close_wq;
  532. struct workqueue_struct *bb_wq;
  533. struct workqueue_struct *r_end_wq;
  534. struct timer_list wtimer;
  535. struct pblk_gc gc;
  536. };
  537. struct pblk_line_ws {
  538. struct pblk *pblk;
  539. struct pblk_line *line;
  540. void *priv;
  541. struct work_struct ws;
  542. };
  543. #define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
  544. #define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
  545. /*
  546. * pblk ring buffer operations
  547. */
  548. int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
  549. unsigned int power_size, unsigned int power_seg_sz);
  550. unsigned int pblk_rb_calculate_size(unsigned int nr_entries);
  551. void *pblk_rb_entries_ref(struct pblk_rb *rb);
  552. int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
  553. unsigned int nr_entries, unsigned int *pos);
  554. int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
  555. unsigned int *pos);
  556. void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
  557. struct pblk_w_ctx w_ctx, unsigned int pos);
  558. void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
  559. struct pblk_w_ctx w_ctx, struct pblk_line *line,
  560. u64 paddr, unsigned int pos);
  561. struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
  562. void pblk_rb_flush(struct pblk_rb *rb);
  563. void pblk_rb_sync_l2p(struct pblk_rb *rb);
  564. unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
  565. unsigned int pos, unsigned int nr_entries,
  566. unsigned int count);
  567. unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
  568. struct list_head *list,
  569. unsigned int max);
  570. int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
  571. struct ppa_addr ppa, int bio_iter, bool advanced_bio);
  572. unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
  573. unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
  574. unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
  575. struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
  576. struct ppa_addr *ppa);
  577. void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
  578. unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
  579. unsigned int pblk_rb_read_count(struct pblk_rb *rb);
  580. unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
  581. unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
  582. int pblk_rb_tear_down_check(struct pblk_rb *rb);
  583. int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
  584. void pblk_rb_data_free(struct pblk_rb *rb);
  585. ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
  586. /*
  587. * pblk core
  588. */
  589. struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
  590. void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
  591. void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
  592. int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
  593. struct pblk_c_ctx *c_ctx);
  594. void pblk_discard(struct pblk *pblk, struct bio *bio);
  595. struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk);
  596. struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
  597. struct nvm_chk_meta *lp,
  598. struct ppa_addr ppa);
  599. void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
  600. void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
  601. int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
  602. int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd);
  603. int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
  604. struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
  605. unsigned int nr_secs, unsigned int len,
  606. int alloc_type, gfp_t gfp_mask);
  607. struct pblk_line *pblk_line_get(struct pblk *pblk);
  608. struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
  609. struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
  610. int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
  611. void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
  612. struct pblk_line *pblk_line_get_data(struct pblk *pblk);
  613. struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
  614. int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
  615. int pblk_line_is_full(struct pblk_line *line);
  616. void pblk_line_free(struct pblk *pblk, struct pblk_line *line);
  617. void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
  618. void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
  619. void pblk_line_close_ws(struct work_struct *work);
  620. void pblk_pipeline_stop(struct pblk *pblk);
  621. void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
  622. void (*work)(struct work_struct *), gfp_t gfp_mask,
  623. struct workqueue_struct *wq);
  624. u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
  625. int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line);
  626. int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
  627. void *emeta_buf);
  628. int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
  629. void pblk_line_put(struct kref *ref);
  630. void pblk_line_put_wq(struct kref *ref);
  631. struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
  632. u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
  633. void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
  634. u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
  635. u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
  636. int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
  637. unsigned long secs_to_flush);
  638. void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
  639. void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  640. unsigned long *lun_bitmap);
  641. void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
  642. void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
  643. unsigned long *lun_bitmap);
  644. void pblk_end_io_sync(struct nvm_rq *rqd);
  645. int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
  646. int nr_pages);
  647. void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
  648. int nr_pages);
  649. void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
  650. void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
  651. u64 paddr);
  652. void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
  653. void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
  654. struct ppa_addr ppa);
  655. void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
  656. struct ppa_addr ppa, struct ppa_addr entry_line);
  657. int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
  658. struct pblk_line *gc_line, u64 paddr);
  659. void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
  660. u64 *lba_list, int nr_secs);
  661. void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
  662. sector_t blba, int nr_secs);
  663. /*
  664. * pblk user I/O write path
  665. */
  666. int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
  667. unsigned long flags);
  668. int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
  669. /*
  670. * pblk map
  671. */
  672. void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
  673. unsigned int sentry, unsigned long *lun_bitmap,
  674. unsigned int valid_secs, struct ppa_addr *erase_ppa);
  675. void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
  676. unsigned long *lun_bitmap, unsigned int valid_secs,
  677. unsigned int off);
  678. /*
  679. * pblk write thread
  680. */
  681. int pblk_write_ts(void *data);
  682. void pblk_write_timer_fn(struct timer_list *t);
  683. void pblk_write_should_kick(struct pblk *pblk);
  684. /*
  685. * pblk read path
  686. */
  687. extern struct bio_set *pblk_bio_set;
  688. int pblk_submit_read(struct pblk *pblk, struct bio *bio);
  689. int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
  690. /*
  691. * pblk recovery
  692. */
  693. void pblk_submit_rec(struct work_struct *work);
  694. struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
  695. int pblk_recov_pad(struct pblk *pblk);
  696. int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
  697. int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
  698. struct pblk_rec_ctx *recovery, u64 *comp_bits,
  699. unsigned int comp);
  700. /*
  701. * pblk gc
  702. */
  703. #define PBLK_GC_MAX_READERS 8 /* Max number of outstanding GC reader jobs */
  704. #define PBLK_GC_RQ_QD 128 /* Queue depth for inflight GC requests */
  705. #define PBLK_GC_L_QD 4 /* Queue depth for inflight GC lines */
  706. #define PBLK_GC_RSV_LINE 1 /* Reserved lines for GC */
  707. int pblk_gc_init(struct pblk *pblk);
  708. void pblk_gc_exit(struct pblk *pblk);
  709. void pblk_gc_should_start(struct pblk *pblk);
  710. void pblk_gc_should_stop(struct pblk *pblk);
  711. void pblk_gc_should_kick(struct pblk *pblk);
  712. void pblk_gc_free_full_lines(struct pblk *pblk);
  713. void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
  714. int *gc_active);
  715. int pblk_gc_sysfs_force(struct pblk *pblk, int force);
  716. /*
  717. * pblk rate limiter
  718. */
  719. void pblk_rl_init(struct pblk_rl *rl, int budget);
  720. void pblk_rl_free(struct pblk_rl *rl);
  721. void pblk_rl_update_rates(struct pblk_rl *rl);
  722. int pblk_rl_high_thrs(struct pblk_rl *rl);
  723. unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
  724. unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
  725. int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
  726. void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
  727. void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
  728. int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
  729. void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
  730. void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
  731. int pblk_rl_max_io(struct pblk_rl *rl);
  732. void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
  733. void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
  734. bool used);
  735. int pblk_rl_is_limit(struct pblk_rl *rl);
  736. /*
  737. * pblk sysfs
  738. */
  739. int pblk_sysfs_init(struct gendisk *tdisk);
  740. void pblk_sysfs_exit(struct gendisk *tdisk);
  741. static inline void *pblk_malloc(size_t size, int type, gfp_t flags)
  742. {
  743. if (type == PBLK_KMALLOC_META)
  744. return kmalloc(size, flags);
  745. return vmalloc(size);
  746. }
  747. static inline void pblk_mfree(void *ptr, int type)
  748. {
  749. if (type == PBLK_KMALLOC_META)
  750. kfree(ptr);
  751. else
  752. vfree(ptr);
  753. }
  754. static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
  755. {
  756. return c_ctx - sizeof(struct nvm_rq);
  757. }
  758. static inline void *emeta_to_bb(struct line_emeta *emeta)
  759. {
  760. return emeta->bb_bitmap;
  761. }
  762. static inline void *emeta_to_wa(struct pblk_line_meta *lm,
  763. struct line_emeta *emeta)
  764. {
  765. return emeta->bb_bitmap + lm->blk_bitmap_len;
  766. }
  767. static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
  768. {
  769. return ((void *)emeta + pblk->lm.emeta_len[1]);
  770. }
  771. static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
  772. {
  773. return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
  774. }
  775. static inline int pblk_line_vsc(struct pblk_line *line)
  776. {
  777. return le32_to_cpu(*line->vsc);
  778. }
  779. static inline int pblk_pad_distance(struct pblk *pblk)
  780. {
  781. struct nvm_tgt_dev *dev = pblk->dev;
  782. struct nvm_geo *geo = &dev->geo;
  783. return geo->mw_cunits * geo->all_luns * geo->ws_opt;
  784. }
  785. static inline int pblk_ppa_to_line(struct ppa_addr p)
  786. {
  787. return p.a.blk;
  788. }
  789. static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
  790. {
  791. return p.a.lun * geo->num_ch + p.a.ch;
  792. }
  793. static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
  794. u64 line_id)
  795. {
  796. struct nvm_tgt_dev *dev = pblk->dev;
  797. struct nvm_geo *geo = &dev->geo;
  798. struct ppa_addr ppa;
  799. if (geo->version == NVM_OCSSD_SPEC_12) {
  800. struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
  801. ppa.ppa = 0;
  802. ppa.g.blk = line_id;
  803. ppa.g.pg = (paddr & ppaf->pg_mask) >> ppaf->pg_offset;
  804. ppa.g.lun = (paddr & ppaf->lun_mask) >> ppaf->lun_offset;
  805. ppa.g.ch = (paddr & ppaf->ch_mask) >> ppaf->ch_offset;
  806. ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset;
  807. ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sec_offset;
  808. } else {
  809. struct pblk_addrf *uaddrf = &pblk->uaddrf;
  810. int secs, chnls, luns;
  811. ppa.ppa = 0;
  812. ppa.m.chk = line_id;
  813. paddr = div_u64_rem(paddr, uaddrf->sec_stripe, &secs);
  814. ppa.m.sec = secs;
  815. paddr = div_u64_rem(paddr, uaddrf->ch_stripe, &chnls);
  816. ppa.m.grp = chnls;
  817. paddr = div_u64_rem(paddr, uaddrf->lun_stripe, &luns);
  818. ppa.m.pu = luns;
  819. ppa.m.sec += uaddrf->sec_stripe * paddr;
  820. }
  821. return ppa;
  822. }
  823. static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
  824. struct ppa_addr p)
  825. {
  826. struct nvm_tgt_dev *dev = pblk->dev;
  827. struct nvm_geo *geo = &dev->geo;
  828. u64 paddr;
  829. if (geo->version == NVM_OCSSD_SPEC_12) {
  830. struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
  831. paddr = (u64)p.g.ch << ppaf->ch_offset;
  832. paddr |= (u64)p.g.lun << ppaf->lun_offset;
  833. paddr |= (u64)p.g.pg << ppaf->pg_offset;
  834. paddr |= (u64)p.g.pl << ppaf->pln_offset;
  835. paddr |= (u64)p.g.sec << ppaf->sec_offset;
  836. } else {
  837. struct pblk_addrf *uaddrf = &pblk->uaddrf;
  838. u64 secs = p.m.sec;
  839. int sec_stripe;
  840. paddr = (u64)p.m.grp * uaddrf->sec_stripe;
  841. paddr += (u64)p.m.pu * uaddrf->sec_lun_stripe;
  842. secs = div_u64_rem(secs, uaddrf->sec_stripe, &sec_stripe);
  843. paddr += secs * uaddrf->sec_ws_stripe;
  844. paddr += sec_stripe;
  845. }
  846. return paddr;
  847. }
  848. static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
  849. {
  850. struct ppa_addr ppa64;
  851. ppa64.ppa = 0;
  852. if (ppa32 == -1) {
  853. ppa64.ppa = ADDR_EMPTY;
  854. } else if (ppa32 & (1U << 31)) {
  855. ppa64.c.line = ppa32 & ((~0U) >> 1);
  856. ppa64.c.is_cached = 1;
  857. } else {
  858. struct nvm_tgt_dev *dev = pblk->dev;
  859. struct nvm_geo *geo = &dev->geo;
  860. if (geo->version == NVM_OCSSD_SPEC_12) {
  861. struct nvm_addrf_12 *ppaf =
  862. (struct nvm_addrf_12 *)&pblk->addrf;
  863. ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
  864. ppaf->ch_offset;
  865. ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
  866. ppaf->lun_offset;
  867. ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
  868. ppaf->blk_offset;
  869. ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
  870. ppaf->pg_offset;
  871. ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
  872. ppaf->pln_offset;
  873. ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
  874. ppaf->sec_offset;
  875. } else {
  876. struct nvm_addrf *lbaf = &pblk->addrf;
  877. ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
  878. lbaf->ch_offset;
  879. ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
  880. lbaf->lun_offset;
  881. ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
  882. lbaf->chk_offset;
  883. ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
  884. lbaf->sec_offset;
  885. }
  886. }
  887. return ppa64;
  888. }
  889. static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
  890. {
  891. u32 ppa32 = 0;
  892. if (ppa64.ppa == ADDR_EMPTY) {
  893. ppa32 = ~0U;
  894. } else if (ppa64.c.is_cached) {
  895. ppa32 |= ppa64.c.line;
  896. ppa32 |= 1U << 31;
  897. } else {
  898. struct nvm_tgt_dev *dev = pblk->dev;
  899. struct nvm_geo *geo = &dev->geo;
  900. if (geo->version == NVM_OCSSD_SPEC_12) {
  901. struct nvm_addrf_12 *ppaf =
  902. (struct nvm_addrf_12 *)&pblk->addrf;
  903. ppa32 |= ppa64.g.ch << ppaf->ch_offset;
  904. ppa32 |= ppa64.g.lun << ppaf->lun_offset;
  905. ppa32 |= ppa64.g.blk << ppaf->blk_offset;
  906. ppa32 |= ppa64.g.pg << ppaf->pg_offset;
  907. ppa32 |= ppa64.g.pl << ppaf->pln_offset;
  908. ppa32 |= ppa64.g.sec << ppaf->sec_offset;
  909. } else {
  910. struct nvm_addrf *lbaf = &pblk->addrf;
  911. ppa32 |= ppa64.m.grp << lbaf->ch_offset;
  912. ppa32 |= ppa64.m.pu << lbaf->lun_offset;
  913. ppa32 |= ppa64.m.chk << lbaf->chk_offset;
  914. ppa32 |= ppa64.m.sec << lbaf->sec_offset;
  915. }
  916. }
  917. return ppa32;
  918. }
  919. static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
  920. sector_t lba)
  921. {
  922. struct ppa_addr ppa;
  923. if (pblk->addrf_len < 32) {
  924. u32 *map = (u32 *)pblk->trans_map;
  925. ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
  926. } else {
  927. struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
  928. ppa = map[lba];
  929. }
  930. return ppa;
  931. }
  932. static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
  933. struct ppa_addr ppa)
  934. {
  935. if (pblk->addrf_len < 32) {
  936. u32 *map = (u32 *)pblk->trans_map;
  937. map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
  938. } else {
  939. u64 *map = (u64 *)pblk->trans_map;
  940. map[lba] = ppa.ppa;
  941. }
  942. }
  943. static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
  944. {
  945. return (ppa_addr.ppa == ADDR_EMPTY);
  946. }
  947. static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
  948. {
  949. ppa_addr->ppa = ADDR_EMPTY;
  950. }
  951. static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
  952. {
  953. return (lppa.ppa == rppa.ppa);
  954. }
  955. static inline int pblk_addr_in_cache(struct ppa_addr ppa)
  956. {
  957. return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
  958. }
  959. static inline int pblk_addr_to_cacheline(struct ppa_addr ppa)
  960. {
  961. return ppa.c.line;
  962. }
  963. static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
  964. {
  965. struct ppa_addr p;
  966. p.c.line = addr;
  967. p.c.is_cached = 1;
  968. return p;
  969. }
  970. static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
  971. struct line_header *header)
  972. {
  973. u32 crc = ~(u32)0;
  974. crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
  975. sizeof(struct line_header) - sizeof(crc));
  976. return crc;
  977. }
  978. static inline u32 pblk_calc_smeta_crc(struct pblk *pblk,
  979. struct line_smeta *smeta)
  980. {
  981. struct pblk_line_meta *lm = &pblk->lm;
  982. u32 crc = ~(u32)0;
  983. crc = crc32_le(crc, (unsigned char *)smeta +
  984. sizeof(struct line_header) + sizeof(crc),
  985. lm->smeta_len -
  986. sizeof(struct line_header) - sizeof(crc));
  987. return crc;
  988. }
  989. static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
  990. struct line_emeta *emeta)
  991. {
  992. struct pblk_line_meta *lm = &pblk->lm;
  993. u32 crc = ~(u32)0;
  994. crc = crc32_le(crc, (unsigned char *)emeta +
  995. sizeof(struct line_header) + sizeof(crc),
  996. lm->emeta_len[0] -
  997. sizeof(struct line_header) - sizeof(crc));
  998. return crc;
  999. }
  1000. static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
  1001. {
  1002. struct nvm_tgt_dev *dev = pblk->dev;
  1003. struct nvm_geo *geo = &dev->geo;
  1004. int flags;
  1005. if (geo->version == NVM_OCSSD_SPEC_20)
  1006. return 0;
  1007. flags = geo->pln_mode >> 1;
  1008. if (type == PBLK_WRITE)
  1009. flags |= NVM_IO_SCRAMBLE_ENABLE;
  1010. return flags;
  1011. }
  1012. enum {
  1013. PBLK_READ_RANDOM = 0,
  1014. PBLK_READ_SEQUENTIAL = 1,
  1015. };
  1016. static inline int pblk_set_read_mode(struct pblk *pblk, int type)
  1017. {
  1018. struct nvm_tgt_dev *dev = pblk->dev;
  1019. struct nvm_geo *geo = &dev->geo;
  1020. int flags;
  1021. if (geo->version == NVM_OCSSD_SPEC_20)
  1022. return 0;
  1023. flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
  1024. if (type == PBLK_READ_SEQUENTIAL)
  1025. flags |= geo->pln_mode >> 1;
  1026. return flags;
  1027. }
  1028. static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
  1029. {
  1030. return !(nr_secs % pblk->min_write_pgs);
  1031. }
  1032. #ifdef CONFIG_NVM_DEBUG
  1033. static inline void print_ppa(struct nvm_geo *geo, struct ppa_addr *p,
  1034. char *msg, int error)
  1035. {
  1036. if (p->c.is_cached) {
  1037. pr_err("ppa: (%s: %x) cache line: %llu\n",
  1038. msg, error, (u64)p->c.line);
  1039. } else if (geo->version == NVM_OCSSD_SPEC_12) {
  1040. pr_err("ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
  1041. msg, error,
  1042. p->g.ch, p->g.lun, p->g.blk,
  1043. p->g.pg, p->g.pl, p->g.sec);
  1044. } else {
  1045. pr_err("ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n",
  1046. msg, error,
  1047. p->m.grp, p->m.pu, p->m.chk, p->m.sec);
  1048. }
  1049. }
  1050. static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
  1051. int error)
  1052. {
  1053. int bit = -1;
  1054. if (rqd->nr_ppas == 1) {
  1055. print_ppa(&pblk->dev->geo, &rqd->ppa_addr, "rqd", error);
  1056. return;
  1057. }
  1058. while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
  1059. bit + 1)) < rqd->nr_ppas) {
  1060. print_ppa(&pblk->dev->geo, &rqd->ppa_list[bit], "rqd", error);
  1061. }
  1062. pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
  1063. }
  1064. static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
  1065. struct ppa_addr *ppas, int nr_ppas)
  1066. {
  1067. struct nvm_geo *geo = &tgt_dev->geo;
  1068. struct ppa_addr *ppa;
  1069. int i;
  1070. for (i = 0; i < nr_ppas; i++) {
  1071. ppa = &ppas[i];
  1072. if (geo->version == NVM_OCSSD_SPEC_12) {
  1073. if (!ppa->c.is_cached &&
  1074. ppa->g.ch < geo->num_ch &&
  1075. ppa->g.lun < geo->num_lun &&
  1076. ppa->g.pl < geo->num_pln &&
  1077. ppa->g.blk < geo->num_chk &&
  1078. ppa->g.pg < geo->num_pg &&
  1079. ppa->g.sec < geo->ws_min)
  1080. continue;
  1081. } else {
  1082. if (!ppa->c.is_cached &&
  1083. ppa->m.grp < geo->num_ch &&
  1084. ppa->m.pu < geo->num_lun &&
  1085. ppa->m.chk < geo->num_chk &&
  1086. ppa->m.sec < geo->clba)
  1087. continue;
  1088. }
  1089. print_ppa(geo, ppa, "boundary", i);
  1090. return 1;
  1091. }
  1092. return 0;
  1093. }
  1094. static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
  1095. {
  1096. struct nvm_tgt_dev *dev = pblk->dev;
  1097. struct ppa_addr *ppa_list;
  1098. ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
  1099. if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
  1100. WARN_ON(1);
  1101. return -EINVAL;
  1102. }
  1103. if (rqd->opcode == NVM_OP_PWRITE) {
  1104. struct pblk_line *line;
  1105. struct ppa_addr ppa;
  1106. int i;
  1107. for (i = 0; i < rqd->nr_ppas; i++) {
  1108. ppa = ppa_list[i];
  1109. line = &pblk->lines[pblk_ppa_to_line(ppa)];
  1110. spin_lock(&line->lock);
  1111. if (line->state != PBLK_LINESTATE_OPEN) {
  1112. pr_err("pblk: bad ppa: line:%d,state:%d\n",
  1113. line->id, line->state);
  1114. WARN_ON(1);
  1115. spin_unlock(&line->lock);
  1116. return -EINVAL;
  1117. }
  1118. spin_unlock(&line->lock);
  1119. }
  1120. }
  1121. return 0;
  1122. }
  1123. #endif
  1124. static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
  1125. {
  1126. struct pblk_line_meta *lm = &pblk->lm;
  1127. if (paddr > lm->sec_per_line)
  1128. return 1;
  1129. return 0;
  1130. }
  1131. static inline unsigned int pblk_get_bi_idx(struct bio *bio)
  1132. {
  1133. return bio->bi_iter.bi_idx;
  1134. }
  1135. static inline sector_t pblk_get_lba(struct bio *bio)
  1136. {
  1137. return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
  1138. }
  1139. static inline unsigned int pblk_get_secs(struct bio *bio)
  1140. {
  1141. return bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
  1142. }
  1143. static inline void pblk_setup_uuid(struct pblk *pblk)
  1144. {
  1145. uuid_le uuid;
  1146. uuid_le_gen(&uuid);
  1147. memcpy(pblk->instance_uuid, uuid.b, 16);
  1148. }
  1149. #endif /* PBLK_H_ */