nand.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright 2017 - Free Electrons
  4. *
  5. * Authors:
  6. * Boris Brezillon <boris.brezillon@free-electrons.com>
  7. * Peter Pan <peterpandong@micron.com>
  8. */
  9. #ifndef __LINUX_MTD_NAND_H
  10. #define __LINUX_MTD_NAND_H
  11. #include <linux/mtd/mtd.h>
  12. /**
  13. * struct nand_memory_organization - Memory organization structure
  14. * @bits_per_cell: number of bits per NAND cell
  15. * @pagesize: page size
  16. * @oobsize: OOB area size
  17. * @pages_per_eraseblock: number of pages per eraseblock
  18. * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
  19. * @planes_per_lun: number of planes per LUN
  20. * @luns_per_target: number of LUN per target (target is a synonym for die)
  21. * @ntargets: total number of targets exposed by the NAND device
  22. */
  23. struct nand_memory_organization {
  24. unsigned int bits_per_cell;
  25. unsigned int pagesize;
  26. unsigned int oobsize;
  27. unsigned int pages_per_eraseblock;
  28. unsigned int eraseblocks_per_lun;
  29. unsigned int planes_per_lun;
  30. unsigned int luns_per_target;
  31. unsigned int ntargets;
  32. };
  33. #define NAND_MEMORG(bpc, ps, os, ppe, epl, ppl, lpt, nt) \
  34. { \
  35. .bits_per_cell = (bpc), \
  36. .pagesize = (ps), \
  37. .oobsize = (os), \
  38. .pages_per_eraseblock = (ppe), \
  39. .eraseblocks_per_lun = (epl), \
  40. .planes_per_lun = (ppl), \
  41. .luns_per_target = (lpt), \
  42. .ntargets = (nt), \
  43. }
  44. /**
  45. * struct nand_row_converter - Information needed to convert an absolute offset
  46. * into a row address
  47. * @lun_addr_shift: position of the LUN identifier in the row address
  48. * @eraseblock_addr_shift: position of the eraseblock identifier in the row
  49. * address
  50. */
  51. struct nand_row_converter {
  52. unsigned int lun_addr_shift;
  53. unsigned int eraseblock_addr_shift;
  54. };
  55. /**
  56. * struct nand_pos - NAND position object
  57. * @target: the NAND target/die
  58. * @lun: the LUN identifier
  59. * @plane: the plane within the LUN
  60. * @eraseblock: the eraseblock within the LUN
  61. * @page: the page within the LUN
  62. *
  63. * These information are usually used by specific sub-layers to select the
  64. * appropriate target/die and generate a row address to pass to the device.
  65. */
  66. struct nand_pos {
  67. unsigned int target;
  68. unsigned int lun;
  69. unsigned int plane;
  70. unsigned int eraseblock;
  71. unsigned int page;
  72. };
  73. /**
  74. * struct nand_page_io_req - NAND I/O request object
  75. * @pos: the position this I/O request is targeting
  76. * @dataoffs: the offset within the page
  77. * @datalen: number of data bytes to read from/write to this page
  78. * @databuf: buffer to store data in or get data from
  79. * @ooboffs: the OOB offset within the page
  80. * @ooblen: the number of OOB bytes to read from/write to this page
  81. * @oobbuf: buffer to store OOB data in or get OOB data from
  82. *
  83. * This object is used to pass per-page I/O requests to NAND sub-layers. This
  84. * way all useful information are already formatted in a useful way and
  85. * specific NAND layers can focus on translating these information into
  86. * specific commands/operations.
  87. */
  88. struct nand_page_io_req {
  89. struct nand_pos pos;
  90. unsigned int dataoffs;
  91. unsigned int datalen;
  92. union {
  93. const void *out;
  94. void *in;
  95. } databuf;
  96. unsigned int ooboffs;
  97. unsigned int ooblen;
  98. union {
  99. const void *out;
  100. void *in;
  101. } oobbuf;
  102. };
  103. /**
  104. * struct nand_ecc_req - NAND ECC requirements
  105. * @strength: ECC strength
  106. * @step_size: ECC step/block size
  107. */
  108. struct nand_ecc_req {
  109. unsigned int strength;
  110. unsigned int step_size;
  111. };
  112. #define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
  113. /**
  114. * struct nand_bbt - bad block table object
  115. * @cache: in memory BBT cache
  116. */
  117. struct nand_bbt {
  118. unsigned long *cache;
  119. };
  120. struct nand_device;
  121. /**
  122. * struct nand_ops - NAND operations
  123. * @erase: erase a specific block. No need to check if the block is bad before
  124. * erasing, this has been taken care of by the generic NAND layer
  125. * @markbad: mark a specific block bad. No need to check if the block is
  126. * already marked bad, this has been taken care of by the generic
  127. * NAND layer. This method should just write the BBM (Bad Block
  128. * Marker) so that future call to struct_nand_ops->isbad() return
  129. * true
  130. * @isbad: check whether a block is bad or not. This method should just read
  131. * the BBM and return whether the block is bad or not based on what it
  132. * reads
  133. *
  134. * These are all low level operations that should be implemented by specialized
  135. * NAND layers (SPI NAND, raw NAND, ...).
  136. */
  137. struct nand_ops {
  138. int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
  139. int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
  140. bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
  141. };
  142. /**
  143. * struct nand_device - NAND device
  144. * @mtd: MTD instance attached to the NAND device
  145. * @memorg: memory layout
  146. * @eccreq: ECC requirements
  147. * @rowconv: position to row address converter
  148. * @bbt: bad block table info
  149. * @ops: NAND operations attached to the NAND device
  150. *
  151. * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
  152. * should declare their own NAND object embedding a nand_device struct (that's
  153. * how inheritance is done).
  154. * struct_nand_device->memorg and struct_nand_device->eccreq should be filled
  155. * at device detection time to reflect the NAND device
  156. * capabilities/requirements. Once this is done nanddev_init() can be called.
  157. * It will take care of converting NAND information into MTD ones, which means
  158. * the specialized NAND layers should never manually tweak
  159. * struct_nand_device->mtd except for the ->_read/write() hooks.
  160. */
  161. struct nand_device {
  162. struct mtd_info mtd;
  163. struct nand_memory_organization memorg;
  164. struct nand_ecc_req eccreq;
  165. struct nand_row_converter rowconv;
  166. struct nand_bbt bbt;
  167. const struct nand_ops *ops;
  168. };
  169. /**
  170. * struct nand_io_iter - NAND I/O iterator
  171. * @req: current I/O request
  172. * @oobbytes_per_page: maximum number of OOB bytes per page
  173. * @dataleft: remaining number of data bytes to read/write
  174. * @oobleft: remaining number of OOB bytes to read/write
  175. *
  176. * Can be used by specialized NAND layers to iterate over all pages covered
  177. * by an MTD I/O request, which should greatly simplifies the boiler-plate
  178. * code needed to read/write data from/to a NAND device.
  179. */
  180. struct nand_io_iter {
  181. struct nand_page_io_req req;
  182. unsigned int oobbytes_per_page;
  183. unsigned int dataleft;
  184. unsigned int oobleft;
  185. };
  186. /**
  187. * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
  188. * @mtd: MTD instance
  189. *
  190. * Return: the NAND device embedding @mtd.
  191. */
  192. static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
  193. {
  194. return container_of(mtd, struct nand_device, mtd);
  195. }
  196. /**
  197. * nanddev_to_mtd() - Get the MTD device attached to a NAND device
  198. * @nand: NAND device
  199. *
  200. * Return: the MTD device embedded in @nand.
  201. */
  202. static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
  203. {
  204. return &nand->mtd;
  205. }
  206. /*
  207. * nanddev_bits_per_cell() - Get the number of bits per cell
  208. * @nand: NAND device
  209. *
  210. * Return: the number of bits per cell.
  211. */
  212. static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
  213. {
  214. return nand->memorg.bits_per_cell;
  215. }
  216. /**
  217. * nanddev_page_size() - Get NAND page size
  218. * @nand: NAND device
  219. *
  220. * Return: the page size.
  221. */
  222. static inline size_t nanddev_page_size(const struct nand_device *nand)
  223. {
  224. return nand->memorg.pagesize;
  225. }
  226. /**
  227. * nanddev_per_page_oobsize() - Get NAND OOB size
  228. * @nand: NAND device
  229. *
  230. * Return: the OOB size.
  231. */
  232. static inline unsigned int
  233. nanddev_per_page_oobsize(const struct nand_device *nand)
  234. {
  235. return nand->memorg.oobsize;
  236. }
  237. /**
  238. * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
  239. * @nand: NAND device
  240. *
  241. * Return: the number of pages per eraseblock.
  242. */
  243. static inline unsigned int
  244. nanddev_pages_per_eraseblock(const struct nand_device *nand)
  245. {
  246. return nand->memorg.pages_per_eraseblock;
  247. }
  248. /**
  249. * nanddev_per_page_oobsize() - Get NAND erase block size
  250. * @nand: NAND device
  251. *
  252. * Return: the eraseblock size.
  253. */
  254. static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
  255. {
  256. return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
  257. }
  258. /**
  259. * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
  260. * @nand: NAND device
  261. *
  262. * Return: the number of eraseblocks per LUN.
  263. */
  264. static inline unsigned int
  265. nanddev_eraseblocks_per_lun(const struct nand_device *nand)
  266. {
  267. return nand->memorg.eraseblocks_per_lun;
  268. }
  269. /**
  270. * nanddev_target_size() - Get the total size provided by a single target/die
  271. * @nand: NAND device
  272. *
  273. * Return: the total size exposed by a single target/die in bytes.
  274. */
  275. static inline u64 nanddev_target_size(const struct nand_device *nand)
  276. {
  277. return (u64)nand->memorg.luns_per_target *
  278. nand->memorg.eraseblocks_per_lun *
  279. nand->memorg.pages_per_eraseblock *
  280. nand->memorg.pagesize;
  281. }
  282. /**
  283. * nanddev_ntarget() - Get the total of targets
  284. * @nand: NAND device
  285. *
  286. * Return: the number of targets/dies exposed by @nand.
  287. */
  288. static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
  289. {
  290. return nand->memorg.ntargets;
  291. }
  292. /**
  293. * nanddev_neraseblocks() - Get the total number of erasablocks
  294. * @nand: NAND device
  295. *
  296. * Return: the total number of eraseblocks exposed by @nand.
  297. */
  298. static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
  299. {
  300. return (u64)nand->memorg.luns_per_target *
  301. nand->memorg.eraseblocks_per_lun *
  302. nand->memorg.pages_per_eraseblock;
  303. }
  304. /**
  305. * nanddev_size() - Get NAND size
  306. * @nand: NAND device
  307. *
  308. * Return: the total size (in bytes) exposed by @nand.
  309. */
  310. static inline u64 nanddev_size(const struct nand_device *nand)
  311. {
  312. return nanddev_target_size(nand) * nanddev_ntargets(nand);
  313. }
  314. /**
  315. * nanddev_get_memorg() - Extract memory organization info from a NAND device
  316. * @nand: NAND device
  317. *
  318. * This can be used by the upper layer to fill the memorg info before calling
  319. * nanddev_init().
  320. *
  321. * Return: the memorg object embedded in the NAND device.
  322. */
  323. static inline struct nand_memory_organization *
  324. nanddev_get_memorg(struct nand_device *nand)
  325. {
  326. return &nand->memorg;
  327. }
  328. int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
  329. struct module *owner);
  330. void nanddev_cleanup(struct nand_device *nand);
  331. /**
  332. * nanddev_register() - Register a NAND device
  333. * @nand: NAND device
  334. *
  335. * Register a NAND device.
  336. * This function is just a wrapper around mtd_device_register()
  337. * registering the MTD device embedded in @nand.
  338. *
  339. * Return: 0 in case of success, a negative error code otherwise.
  340. */
  341. static inline int nanddev_register(struct nand_device *nand)
  342. {
  343. return mtd_device_register(&nand->mtd, NULL, 0);
  344. }
  345. /**
  346. * nanddev_unregister() - Unregister a NAND device
  347. * @nand: NAND device
  348. *
  349. * Unregister a NAND device.
  350. * This function is just a wrapper around mtd_device_unregister()
  351. * unregistering the MTD device embedded in @nand.
  352. *
  353. * Return: 0 in case of success, a negative error code otherwise.
  354. */
  355. static inline int nanddev_unregister(struct nand_device *nand)
  356. {
  357. return mtd_device_unregister(&nand->mtd);
  358. }
  359. /**
  360. * nanddev_set_of_node() - Attach a DT node to a NAND device
  361. * @nand: NAND device
  362. * @np: DT node
  363. *
  364. * Attach a DT node to a NAND device.
  365. */
  366. static inline void nanddev_set_of_node(struct nand_device *nand,
  367. struct device_node *np)
  368. {
  369. mtd_set_of_node(&nand->mtd, np);
  370. }
  371. /**
  372. * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
  373. * @nand: NAND device
  374. *
  375. * Return: the DT node attached to @nand.
  376. */
  377. static inline struct device_node *nanddev_get_of_node(struct nand_device *nand)
  378. {
  379. return mtd_get_of_node(&nand->mtd);
  380. }
  381. /**
  382. * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
  383. * @nand: NAND device
  384. * @offs: absolute NAND offset (usually passed by the MTD layer)
  385. * @pos: a NAND position object to fill in
  386. *
  387. * Converts @offs into a nand_pos representation.
  388. *
  389. * Return: the offset within the NAND page pointed by @pos.
  390. */
  391. static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
  392. loff_t offs,
  393. struct nand_pos *pos)
  394. {
  395. unsigned int pageoffs;
  396. u64 tmp = offs;
  397. pageoffs = do_div(tmp, nand->memorg.pagesize);
  398. pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
  399. pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
  400. pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
  401. pos->lun = do_div(tmp, nand->memorg.luns_per_target);
  402. pos->target = tmp;
  403. return pageoffs;
  404. }
  405. /**
  406. * nanddev_pos_cmp() - Compare two NAND positions
  407. * @a: First NAND position
  408. * @b: Second NAND position
  409. *
  410. * Compares two NAND positions.
  411. *
  412. * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
  413. */
  414. static inline int nanddev_pos_cmp(const struct nand_pos *a,
  415. const struct nand_pos *b)
  416. {
  417. if (a->target != b->target)
  418. return a->target < b->target ? -1 : 1;
  419. if (a->lun != b->lun)
  420. return a->lun < b->lun ? -1 : 1;
  421. if (a->eraseblock != b->eraseblock)
  422. return a->eraseblock < b->eraseblock ? -1 : 1;
  423. if (a->page != b->page)
  424. return a->page < b->page ? -1 : 1;
  425. return 0;
  426. }
  427. /**
  428. * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
  429. * @nand: NAND device
  430. * @pos: the NAND position to convert
  431. *
  432. * Converts @pos NAND position into an absolute offset.
  433. *
  434. * Return: the absolute offset. Note that @pos points to the beginning of a
  435. * page, if one wants to point to a specific offset within this page
  436. * the returned offset has to be adjusted manually.
  437. */
  438. static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
  439. const struct nand_pos *pos)
  440. {
  441. unsigned int npages;
  442. npages = pos->page +
  443. ((pos->eraseblock +
  444. (pos->lun +
  445. (pos->target * nand->memorg.luns_per_target)) *
  446. nand->memorg.eraseblocks_per_lun) *
  447. nand->memorg.pages_per_eraseblock);
  448. return (loff_t)npages * nand->memorg.pagesize;
  449. }
  450. /**
  451. * nanddev_pos_to_row() - Extract a row address from a NAND position
  452. * @nand: NAND device
  453. * @pos: the position to convert
  454. *
  455. * Converts a NAND position into a row address that can then be passed to the
  456. * device.
  457. *
  458. * Return: the row address extracted from @pos.
  459. */
  460. static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
  461. const struct nand_pos *pos)
  462. {
  463. return (pos->lun << nand->rowconv.lun_addr_shift) |
  464. (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
  465. pos->page;
  466. }
  467. /**
  468. * nanddev_pos_next_target() - Move a position to the next target/die
  469. * @nand: NAND device
  470. * @pos: the position to update
  471. *
  472. * Updates @pos to point to the start of the next target/die. Useful when you
  473. * want to iterate over all targets/dies of a NAND device.
  474. */
  475. static inline void nanddev_pos_next_target(struct nand_device *nand,
  476. struct nand_pos *pos)
  477. {
  478. pos->page = 0;
  479. pos->plane = 0;
  480. pos->eraseblock = 0;
  481. pos->lun = 0;
  482. pos->target++;
  483. }
  484. /**
  485. * nanddev_pos_next_lun() - Move a position to the next LUN
  486. * @nand: NAND device
  487. * @pos: the position to update
  488. *
  489. * Updates @pos to point to the start of the next LUN. Useful when you want to
  490. * iterate over all LUNs of a NAND device.
  491. */
  492. static inline void nanddev_pos_next_lun(struct nand_device *nand,
  493. struct nand_pos *pos)
  494. {
  495. if (pos->lun >= nand->memorg.luns_per_target - 1)
  496. return nanddev_pos_next_target(nand, pos);
  497. pos->lun++;
  498. pos->page = 0;
  499. pos->plane = 0;
  500. pos->eraseblock = 0;
  501. }
  502. /**
  503. * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
  504. * @nand: NAND device
  505. * @pos: the position to update
  506. *
  507. * Updates @pos to point to the start of the next eraseblock. Useful when you
  508. * want to iterate over all eraseblocks of a NAND device.
  509. */
  510. static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
  511. struct nand_pos *pos)
  512. {
  513. if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
  514. return nanddev_pos_next_lun(nand, pos);
  515. pos->eraseblock++;
  516. pos->page = 0;
  517. pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
  518. }
  519. /**
  520. * nanddev_pos_next_eraseblock() - Move a position to the next page
  521. * @nand: NAND device
  522. * @pos: the position to update
  523. *
  524. * Updates @pos to point to the start of the next page. Useful when you want to
  525. * iterate over all pages of a NAND device.
  526. */
  527. static inline void nanddev_pos_next_page(struct nand_device *nand,
  528. struct nand_pos *pos)
  529. {
  530. if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
  531. return nanddev_pos_next_eraseblock(nand, pos);
  532. pos->page++;
  533. }
  534. /**
  535. * nand_io_iter_init - Initialize a NAND I/O iterator
  536. * @nand: NAND device
  537. * @offs: absolute offset
  538. * @req: MTD request
  539. * @iter: NAND I/O iterator
  540. *
  541. * Initializes a NAND iterator based on the information passed by the MTD
  542. * layer.
  543. */
  544. static inline void nanddev_io_iter_init(struct nand_device *nand,
  545. loff_t offs, struct mtd_oob_ops *req,
  546. struct nand_io_iter *iter)
  547. {
  548. struct mtd_info *mtd = nanddev_to_mtd(nand);
  549. iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
  550. iter->req.ooboffs = req->ooboffs;
  551. iter->oobbytes_per_page = mtd_oobavail(mtd, req);
  552. iter->dataleft = req->len;
  553. iter->oobleft = req->ooblen;
  554. iter->req.databuf.in = req->datbuf;
  555. iter->req.datalen = min_t(unsigned int,
  556. nand->memorg.pagesize - iter->req.dataoffs,
  557. iter->dataleft);
  558. iter->req.oobbuf.in = req->oobbuf;
  559. iter->req.ooblen = min_t(unsigned int,
  560. iter->oobbytes_per_page - iter->req.ooboffs,
  561. iter->oobleft);
  562. }
  563. /**
  564. * nand_io_iter_next_page - Move to the next page
  565. * @nand: NAND device
  566. * @iter: NAND I/O iterator
  567. *
  568. * Updates the @iter to point to the next page.
  569. */
  570. static inline void nanddev_io_iter_next_page(struct nand_device *nand,
  571. struct nand_io_iter *iter)
  572. {
  573. nanddev_pos_next_page(nand, &iter->req.pos);
  574. iter->dataleft -= iter->req.datalen;
  575. iter->req.databuf.in += iter->req.datalen;
  576. iter->oobleft -= iter->req.ooblen;
  577. iter->req.oobbuf.in += iter->req.ooblen;
  578. iter->req.dataoffs = 0;
  579. iter->req.ooboffs = 0;
  580. iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
  581. iter->dataleft);
  582. iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
  583. iter->oobleft);
  584. }
  585. /**
  586. * nand_io_iter_end - Should end iteration or not
  587. * @nand: NAND device
  588. * @iter: NAND I/O iterator
  589. *
  590. * Check whether @iter has reached the end of the NAND portion it was asked to
  591. * iterate on or not.
  592. *
  593. * Return: true if @iter has reached the end of the iteration request, false
  594. * otherwise.
  595. */
  596. static inline bool nanddev_io_iter_end(struct nand_device *nand,
  597. const struct nand_io_iter *iter)
  598. {
  599. if (iter->dataleft || iter->oobleft)
  600. return false;
  601. return true;
  602. }
  603. /**
  604. * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
  605. * request
  606. * @nand: NAND device
  607. * @start: start address to read/write from
  608. * @req: MTD I/O request
  609. * @iter: NAND I/O iterator
  610. *
  611. * Should be used for iterate over pages that are contained in an MTD request.
  612. */
  613. #define nanddev_io_for_each_page(nand, start, req, iter) \
  614. for (nanddev_io_iter_init(nand, start, req, iter); \
  615. !nanddev_io_iter_end(nand, iter); \
  616. nanddev_io_iter_next_page(nand, iter))
  617. bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
  618. bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
  619. int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
  620. int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
  621. /* BBT related functions */
  622. enum nand_bbt_block_status {
  623. NAND_BBT_BLOCK_STATUS_UNKNOWN,
  624. NAND_BBT_BLOCK_GOOD,
  625. NAND_BBT_BLOCK_WORN,
  626. NAND_BBT_BLOCK_RESERVED,
  627. NAND_BBT_BLOCK_FACTORY_BAD,
  628. NAND_BBT_BLOCK_NUM_STATUS,
  629. };
  630. int nanddev_bbt_init(struct nand_device *nand);
  631. void nanddev_bbt_cleanup(struct nand_device *nand);
  632. int nanddev_bbt_update(struct nand_device *nand);
  633. int nanddev_bbt_get_block_status(const struct nand_device *nand,
  634. unsigned int entry);
  635. int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
  636. enum nand_bbt_block_status status);
  637. int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
  638. /**
  639. * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
  640. * @nand: NAND device
  641. * @pos: the NAND position we want to get BBT entry for
  642. *
  643. * Return the BBT entry used to store information about the eraseblock pointed
  644. * by @pos.
  645. *
  646. * Return: the BBT entry storing information about eraseblock pointed by @pos.
  647. */
  648. static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
  649. const struct nand_pos *pos)
  650. {
  651. return pos->eraseblock +
  652. ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
  653. nand->memorg.eraseblocks_per_lun);
  654. }
  655. /**
  656. * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
  657. * @nand: NAND device
  658. *
  659. * Return: true if the BBT has been initialized, false otherwise.
  660. */
  661. static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
  662. {
  663. return !!nand->bbt.cache;
  664. }
  665. /* MTD -> NAND helper functions. */
  666. int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
  667. #endif /* __LINUX_MTD_NAND_H */