spi-nor.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558
  1. /*
  2. * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
  3. * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
  4. *
  5. * Copyright (C) 2005, Intec Automation Inc.
  6. * Copyright (C) 2014, Freescale Semiconductor, Inc.
  7. *
  8. * This code is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/err.h>
  13. #include <linux/errno.h>
  14. #include <linux/module.h>
  15. #include <linux/device.h>
  16. #include <linux/mutex.h>
  17. #include <linux/math64.h>
  18. #include <linux/sizes.h>
  19. #include <linux/mtd/mtd.h>
  20. #include <linux/of_platform.h>
  21. #include <linux/spi/flash.h>
  22. #include <linux/mtd/spi-nor.h>
  23. /* Define max times to check status register before we give up. */
  24. /*
  25. * For everything but full-chip erase; probably could be much smaller, but kept
  26. * around for safety for now
  27. */
  28. #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
  29. /*
  30. * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
  31. * for larger flash
  32. */
  33. #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
  34. #define SPI_NOR_MAX_ID_LEN 6
  35. #define SPI_NOR_MAX_ADDR_WIDTH 4
  36. struct flash_info {
  37. char *name;
  38. /*
  39. * This array stores the ID bytes.
  40. * The first three bytes are the JEDIC ID.
  41. * JEDEC ID zero means "no ID" (mostly older chips).
  42. */
  43. u8 id[SPI_NOR_MAX_ID_LEN];
  44. u8 id_len;
  45. /* The size listed here is what works with SPINOR_OP_SE, which isn't
  46. * necessarily called a "sector" by the vendor.
  47. */
  48. unsigned sector_size;
  49. u16 n_sectors;
  50. u16 page_size;
  51. u16 addr_width;
  52. u16 flags;
  53. #define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
  54. #define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
  55. #define SST_WRITE BIT(2) /* use SST byte programming */
  56. #define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
  57. #define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
  58. #define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
  59. #define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
  60. #define USE_FSR BIT(7) /* use flag status register */
  61. #define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
  62. #define SPI_NOR_HAS_TB BIT(9) /*
  63. * Flash SR has Top/Bottom (TB) protect
  64. * bit. Must be used with
  65. * SPI_NOR_HAS_LOCK.
  66. */
  67. };
  68. #define JEDEC_MFR(info) ((info)->id[0])
  69. static const struct flash_info *spi_nor_match_id(const char *name);
  70. /*
  71. * Read the status register, returning its value in the location
  72. * Return the status register value.
  73. * Returns negative if error occurred.
  74. */
  75. static int read_sr(struct spi_nor *nor)
  76. {
  77. int ret;
  78. u8 val;
  79. ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
  80. if (ret < 0) {
  81. pr_err("error %d reading SR\n", (int) ret);
  82. return ret;
  83. }
  84. return val;
  85. }
  86. /*
  87. * Read the flag status register, returning its value in the location
  88. * Return the status register value.
  89. * Returns negative if error occurred.
  90. */
  91. static int read_fsr(struct spi_nor *nor)
  92. {
  93. int ret;
  94. u8 val;
  95. ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
  96. if (ret < 0) {
  97. pr_err("error %d reading FSR\n", ret);
  98. return ret;
  99. }
  100. return val;
  101. }
  102. /*
  103. * Read configuration register, returning its value in the
  104. * location. Return the configuration register value.
  105. * Returns negative if error occured.
  106. */
  107. static int read_cr(struct spi_nor *nor)
  108. {
  109. int ret;
  110. u8 val;
  111. ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
  112. if (ret < 0) {
  113. dev_err(nor->dev, "error %d reading CR\n", ret);
  114. return ret;
  115. }
  116. return val;
  117. }
  118. /*
  119. * Dummy Cycle calculation for different type of read.
  120. * It can be used to support more commands with
  121. * different dummy cycle requirements.
  122. */
  123. static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
  124. {
  125. switch (nor->flash_read) {
  126. case SPI_NOR_FAST:
  127. case SPI_NOR_DUAL:
  128. case SPI_NOR_QUAD:
  129. return 8;
  130. case SPI_NOR_NORMAL:
  131. return 0;
  132. }
  133. return 0;
  134. }
  135. /*
  136. * Write status register 1 byte
  137. * Returns negative if error occurred.
  138. */
  139. static inline int write_sr(struct spi_nor *nor, u8 val)
  140. {
  141. nor->cmd_buf[0] = val;
  142. return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
  143. }
  144. /*
  145. * Set write enable latch with Write Enable command.
  146. * Returns negative if error occurred.
  147. */
  148. static inline int write_enable(struct spi_nor *nor)
  149. {
  150. return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
  151. }
  152. /*
  153. * Send write disble instruction to the chip.
  154. */
  155. static inline int write_disable(struct spi_nor *nor)
  156. {
  157. return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
  158. }
  159. static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
  160. {
  161. return mtd->priv;
  162. }
  163. /* Enable/disable 4-byte addressing mode. */
  164. static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
  165. int enable)
  166. {
  167. int status;
  168. bool need_wren = false;
  169. u8 cmd;
  170. switch (JEDEC_MFR(info)) {
  171. case SNOR_MFR_MICRON:
  172. /* Some Micron need WREN command; all will accept it */
  173. need_wren = true;
  174. case SNOR_MFR_MACRONIX:
  175. case SNOR_MFR_WINBOND:
  176. if (need_wren)
  177. write_enable(nor);
  178. cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
  179. status = nor->write_reg(nor, cmd, NULL, 0);
  180. if (need_wren)
  181. write_disable(nor);
  182. return status;
  183. default:
  184. /* Spansion style */
  185. nor->cmd_buf[0] = enable << 7;
  186. return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
  187. }
  188. }
  189. static inline int spi_nor_sr_ready(struct spi_nor *nor)
  190. {
  191. int sr = read_sr(nor);
  192. if (sr < 0)
  193. return sr;
  194. else
  195. return !(sr & SR_WIP);
  196. }
  197. static inline int spi_nor_fsr_ready(struct spi_nor *nor)
  198. {
  199. int fsr = read_fsr(nor);
  200. if (fsr < 0)
  201. return fsr;
  202. else
  203. return fsr & FSR_READY;
  204. }
  205. static int spi_nor_ready(struct spi_nor *nor)
  206. {
  207. int sr, fsr;
  208. sr = spi_nor_sr_ready(nor);
  209. if (sr < 0)
  210. return sr;
  211. fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
  212. if (fsr < 0)
  213. return fsr;
  214. return sr && fsr;
  215. }
  216. /*
  217. * Service routine to read status register until ready, or timeout occurs.
  218. * Returns non-zero if error.
  219. */
  220. static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
  221. unsigned long timeout_jiffies)
  222. {
  223. unsigned long deadline;
  224. int timeout = 0, ret;
  225. deadline = jiffies + timeout_jiffies;
  226. while (!timeout) {
  227. if (time_after_eq(jiffies, deadline))
  228. timeout = 1;
  229. ret = spi_nor_ready(nor);
  230. if (ret < 0)
  231. return ret;
  232. if (ret)
  233. return 0;
  234. cond_resched();
  235. }
  236. dev_err(nor->dev, "flash operation timed out\n");
  237. return -ETIMEDOUT;
  238. }
  239. static int spi_nor_wait_till_ready(struct spi_nor *nor)
  240. {
  241. return spi_nor_wait_till_ready_with_timeout(nor,
  242. DEFAULT_READY_WAIT_JIFFIES);
  243. }
  244. /*
  245. * Erase the whole flash memory
  246. *
  247. * Returns 0 if successful, non-zero otherwise.
  248. */
  249. static int erase_chip(struct spi_nor *nor)
  250. {
  251. dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
  252. return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
  253. }
  254. static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
  255. {
  256. int ret = 0;
  257. mutex_lock(&nor->lock);
  258. if (nor->prepare) {
  259. ret = nor->prepare(nor, ops);
  260. if (ret) {
  261. dev_err(nor->dev, "failed in the preparation.\n");
  262. mutex_unlock(&nor->lock);
  263. return ret;
  264. }
  265. }
  266. return ret;
  267. }
  268. static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
  269. {
  270. if (nor->unprepare)
  271. nor->unprepare(nor, ops);
  272. mutex_unlock(&nor->lock);
  273. }
  274. /*
  275. * Initiate the erasure of a single sector
  276. */
  277. static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
  278. {
  279. u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
  280. int i;
  281. if (nor->erase)
  282. return nor->erase(nor, addr);
  283. /*
  284. * Default implementation, if driver doesn't have a specialized HW
  285. * control
  286. */
  287. for (i = nor->addr_width - 1; i >= 0; i--) {
  288. buf[i] = addr & 0xff;
  289. addr >>= 8;
  290. }
  291. return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
  292. }
  293. /*
  294. * Erase an address range on the nor chip. The address range may extend
  295. * one or more erase sectors. Return an error is there is a problem erasing.
  296. */
  297. static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
  298. {
  299. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  300. u32 addr, len;
  301. uint32_t rem;
  302. int ret;
  303. dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
  304. (long long)instr->len);
  305. div_u64_rem(instr->len, mtd->erasesize, &rem);
  306. if (rem)
  307. return -EINVAL;
  308. addr = instr->addr;
  309. len = instr->len;
  310. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
  311. if (ret)
  312. return ret;
  313. /* whole-chip erase? */
  314. if (len == mtd->size) {
  315. unsigned long timeout;
  316. write_enable(nor);
  317. if (erase_chip(nor)) {
  318. ret = -EIO;
  319. goto erase_err;
  320. }
  321. /*
  322. * Scale the timeout linearly with the size of the flash, with
  323. * a minimum calibrated to an old 2MB flash. We could try to
  324. * pull these from CFI/SFDP, but these values should be good
  325. * enough for now.
  326. */
  327. timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
  328. CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
  329. (unsigned long)(mtd->size / SZ_2M));
  330. ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
  331. if (ret)
  332. goto erase_err;
  333. /* REVISIT in some cases we could speed up erasing large regions
  334. * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
  335. * to use "small sector erase", but that's not always optimal.
  336. */
  337. /* "sector"-at-a-time erase */
  338. } else {
  339. while (len) {
  340. write_enable(nor);
  341. ret = spi_nor_erase_sector(nor, addr);
  342. if (ret)
  343. goto erase_err;
  344. addr += mtd->erasesize;
  345. len -= mtd->erasesize;
  346. ret = spi_nor_wait_till_ready(nor);
  347. if (ret)
  348. goto erase_err;
  349. }
  350. }
  351. write_disable(nor);
  352. erase_err:
  353. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
  354. instr->state = ret ? MTD_ERASE_FAILED : MTD_ERASE_DONE;
  355. mtd_erase_callback(instr);
  356. return ret;
  357. }
  358. static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
  359. uint64_t *len)
  360. {
  361. struct mtd_info *mtd = &nor->mtd;
  362. u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
  363. int shift = ffs(mask) - 1;
  364. int pow;
  365. if (!(sr & mask)) {
  366. /* No protection */
  367. *ofs = 0;
  368. *len = 0;
  369. } else {
  370. pow = ((sr & mask) ^ mask) >> shift;
  371. *len = mtd->size >> pow;
  372. if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
  373. *ofs = 0;
  374. else
  375. *ofs = mtd->size - *len;
  376. }
  377. }
  378. /*
  379. * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
  380. * @locked is false); 0 otherwise
  381. */
  382. static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
  383. u8 sr, bool locked)
  384. {
  385. loff_t lock_offs;
  386. uint64_t lock_len;
  387. if (!len)
  388. return 1;
  389. stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
  390. if (locked)
  391. /* Requested range is a sub-range of locked range */
  392. return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
  393. else
  394. /* Requested range does not overlap with locked range */
  395. return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
  396. }
  397. static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
  398. u8 sr)
  399. {
  400. return stm_check_lock_status_sr(nor, ofs, len, sr, true);
  401. }
  402. static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
  403. u8 sr)
  404. {
  405. return stm_check_lock_status_sr(nor, ofs, len, sr, false);
  406. }
  407. /*
  408. * Lock a region of the flash. Compatible with ST Micro and similar flash.
  409. * Supports the block protection bits BP{0,1,2} in the status register
  410. * (SR). Does not support these features found in newer SR bitfields:
  411. * - SEC: sector/block protect - only handle SEC=0 (block protect)
  412. * - CMP: complement protect - only support CMP=0 (range is not complemented)
  413. *
  414. * Support for the following is provided conditionally for some flash:
  415. * - TB: top/bottom protect
  416. *
  417. * Sample table portion for 8MB flash (Winbond w25q64fw):
  418. *
  419. * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
  420. * --------------------------------------------------------------------------
  421. * X | X | 0 | 0 | 0 | NONE | NONE
  422. * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
  423. * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
  424. * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
  425. * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
  426. * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
  427. * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
  428. * X | X | 1 | 1 | 1 | 8 MB | ALL
  429. * ------|-------|-------|-------|-------|---------------|-------------------
  430. * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
  431. * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
  432. * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
  433. * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
  434. * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
  435. * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
  436. *
  437. * Returns negative on errors, 0 on success.
  438. */
  439. static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
  440. {
  441. struct mtd_info *mtd = &nor->mtd;
  442. int status_old, status_new;
  443. u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
  444. u8 shift = ffs(mask) - 1, pow, val;
  445. loff_t lock_len;
  446. bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
  447. bool use_top;
  448. int ret;
  449. status_old = read_sr(nor);
  450. if (status_old < 0)
  451. return status_old;
  452. /* If nothing in our range is unlocked, we don't need to do anything */
  453. if (stm_is_locked_sr(nor, ofs, len, status_old))
  454. return 0;
  455. /* If anything below us is unlocked, we can't use 'bottom' protection */
  456. if (!stm_is_locked_sr(nor, 0, ofs, status_old))
  457. can_be_bottom = false;
  458. /* If anything above us is unlocked, we can't use 'top' protection */
  459. if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
  460. status_old))
  461. can_be_top = false;
  462. if (!can_be_bottom && !can_be_top)
  463. return -EINVAL;
  464. /* Prefer top, if both are valid */
  465. use_top = can_be_top;
  466. /* lock_len: length of region that should end up locked */
  467. if (use_top)
  468. lock_len = mtd->size - ofs;
  469. else
  470. lock_len = ofs + len;
  471. /*
  472. * Need smallest pow such that:
  473. *
  474. * 1 / (2^pow) <= (len / size)
  475. *
  476. * so (assuming power-of-2 size) we do:
  477. *
  478. * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
  479. */
  480. pow = ilog2(mtd->size) - ilog2(lock_len);
  481. val = mask - (pow << shift);
  482. if (val & ~mask)
  483. return -EINVAL;
  484. /* Don't "lock" with no region! */
  485. if (!(val & mask))
  486. return -EINVAL;
  487. status_new = (status_old & ~mask & ~SR_TB) | val;
  488. /* Disallow further writes if WP pin is asserted */
  489. status_new |= SR_SRWD;
  490. if (!use_top)
  491. status_new |= SR_TB;
  492. /* Don't bother if they're the same */
  493. if (status_new == status_old)
  494. return 0;
  495. /* Only modify protection if it will not unlock other areas */
  496. if ((status_new & mask) < (status_old & mask))
  497. return -EINVAL;
  498. write_enable(nor);
  499. ret = write_sr(nor, status_new);
  500. if (ret)
  501. return ret;
  502. return spi_nor_wait_till_ready(nor);
  503. }
  504. /*
  505. * Unlock a region of the flash. See stm_lock() for more info
  506. *
  507. * Returns negative on errors, 0 on success.
  508. */
  509. static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
  510. {
  511. struct mtd_info *mtd = &nor->mtd;
  512. int status_old, status_new;
  513. u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
  514. u8 shift = ffs(mask) - 1, pow, val;
  515. loff_t lock_len;
  516. bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
  517. bool use_top;
  518. int ret;
  519. status_old = read_sr(nor);
  520. if (status_old < 0)
  521. return status_old;
  522. /* If nothing in our range is locked, we don't need to do anything */
  523. if (stm_is_unlocked_sr(nor, ofs, len, status_old))
  524. return 0;
  525. /* If anything below us is locked, we can't use 'top' protection */
  526. if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
  527. can_be_top = false;
  528. /* If anything above us is locked, we can't use 'bottom' protection */
  529. if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
  530. status_old))
  531. can_be_bottom = false;
  532. if (!can_be_bottom && !can_be_top)
  533. return -EINVAL;
  534. /* Prefer top, if both are valid */
  535. use_top = can_be_top;
  536. /* lock_len: length of region that should remain locked */
  537. if (use_top)
  538. lock_len = mtd->size - (ofs + len);
  539. else
  540. lock_len = ofs;
  541. /*
  542. * Need largest pow such that:
  543. *
  544. * 1 / (2^pow) >= (len / size)
  545. *
  546. * so (assuming power-of-2 size) we do:
  547. *
  548. * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
  549. */
  550. pow = ilog2(mtd->size) - order_base_2(lock_len);
  551. if (lock_len == 0) {
  552. val = 0; /* fully unlocked */
  553. } else {
  554. val = mask - (pow << shift);
  555. /* Some power-of-two sizes are not supported */
  556. if (val & ~mask)
  557. return -EINVAL;
  558. }
  559. status_new = (status_old & ~mask & ~SR_TB) | val;
  560. /* Don't protect status register if we're fully unlocked */
  561. if (lock_len == 0)
  562. status_new &= ~SR_SRWD;
  563. if (!use_top)
  564. status_new |= SR_TB;
  565. /* Don't bother if they're the same */
  566. if (status_new == status_old)
  567. return 0;
  568. /* Only modify protection if it will not lock other areas */
  569. if ((status_new & mask) > (status_old & mask))
  570. return -EINVAL;
  571. write_enable(nor);
  572. ret = write_sr(nor, status_new);
  573. if (ret)
  574. return ret;
  575. return spi_nor_wait_till_ready(nor);
  576. }
  577. /*
  578. * Check if a region of the flash is (completely) locked. See stm_lock() for
  579. * more info.
  580. *
  581. * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
  582. * negative on errors.
  583. */
  584. static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
  585. {
  586. int status;
  587. status = read_sr(nor);
  588. if (status < 0)
  589. return status;
  590. return stm_is_locked_sr(nor, ofs, len, status);
  591. }
  592. static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  593. {
  594. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  595. int ret;
  596. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
  597. if (ret)
  598. return ret;
  599. ret = nor->flash_lock(nor, ofs, len);
  600. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
  601. return ret;
  602. }
  603. static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  604. {
  605. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  606. int ret;
  607. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
  608. if (ret)
  609. return ret;
  610. ret = nor->flash_unlock(nor, ofs, len);
  611. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
  612. return ret;
  613. }
  614. static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  615. {
  616. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  617. int ret;
  618. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
  619. if (ret)
  620. return ret;
  621. ret = nor->flash_is_locked(nor, ofs, len);
  622. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
  623. return ret;
  624. }
  625. /* Used when the "_ext_id" is two bytes at most */
  626. #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
  627. .id = { \
  628. ((_jedec_id) >> 16) & 0xff, \
  629. ((_jedec_id) >> 8) & 0xff, \
  630. (_jedec_id) & 0xff, \
  631. ((_ext_id) >> 8) & 0xff, \
  632. (_ext_id) & 0xff, \
  633. }, \
  634. .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
  635. .sector_size = (_sector_size), \
  636. .n_sectors = (_n_sectors), \
  637. .page_size = 256, \
  638. .flags = (_flags),
  639. #define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
  640. .id = { \
  641. ((_jedec_id) >> 16) & 0xff, \
  642. ((_jedec_id) >> 8) & 0xff, \
  643. (_jedec_id) & 0xff, \
  644. ((_ext_id) >> 16) & 0xff, \
  645. ((_ext_id) >> 8) & 0xff, \
  646. (_ext_id) & 0xff, \
  647. }, \
  648. .id_len = 6, \
  649. .sector_size = (_sector_size), \
  650. .n_sectors = (_n_sectors), \
  651. .page_size = 256, \
  652. .flags = (_flags),
  653. #define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
  654. .sector_size = (_sector_size), \
  655. .n_sectors = (_n_sectors), \
  656. .page_size = (_page_size), \
  657. .addr_width = (_addr_width), \
  658. .flags = (_flags),
  659. /* NOTE: double check command sets and memory organization when you add
  660. * more nor chips. This current list focusses on newer chips, which
  661. * have been converging on command sets which including JEDEC ID.
  662. *
  663. * All newly added entries should describe *hardware* and should use SECT_4K
  664. * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
  665. * scenarios excluding small sectors there is config option that can be
  666. * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
  667. * For historical (and compatibility) reasons (before we got above config) some
  668. * old entries may be missing 4K flag.
  669. */
  670. static const struct flash_info spi_nor_ids[] = {
  671. /* Atmel -- some are (confusingly) marketed as "DataFlash" */
  672. { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
  673. { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
  674. { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
  675. { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
  676. { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
  677. { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
  678. { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
  679. { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
  680. { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
  681. { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
  682. { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
  683. /* EON -- en25xxx */
  684. { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
  685. { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
  686. { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
  687. { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
  688. { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
  689. { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
  690. { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
  691. { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
  692. /* ESMT */
  693. { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
  694. /* Everspin */
  695. { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  696. { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  697. { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  698. /* Fujitsu */
  699. { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
  700. /* GigaDevice */
  701. {
  702. "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
  703. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
  704. SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
  705. },
  706. {
  707. "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
  708. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
  709. SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
  710. },
  711. {
  712. "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
  713. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
  714. SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
  715. },
  716. {
  717. "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
  718. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
  719. SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
  720. },
  721. /* Intel/Numonyx -- xxxs33b */
  722. { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
  723. { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
  724. { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
  725. /* ISSI */
  726. { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
  727. /* Macronix */
  728. { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
  729. { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
  730. { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
  731. { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
  732. { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
  733. { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
  734. { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
  735. { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
  736. { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
  737. { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
  738. { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
  739. { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
  740. { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K) },
  741. { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
  742. { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
  743. { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
  744. /* Micron */
  745. { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) },
  746. { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
  747. { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
  748. { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
  749. { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
  750. { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
  751. { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
  752. { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
  753. { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
  754. { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
  755. { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
  756. { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
  757. /* PMC */
  758. { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
  759. { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
  760. { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
  761. /* Spansion -- single (large) sector size only, at least
  762. * for the chips listed here (without boot sectors).
  763. */
  764. { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  765. { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  766. { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
  767. { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  768. { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  769. { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
  770. { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
  771. { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
  772. { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  773. { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  774. { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  775. { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
  776. { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
  777. { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
  778. { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
  779. { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
  780. { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  781. { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  782. { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  783. { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
  784. { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  785. { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
  786. { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
  787. { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
  788. { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
  789. /* SST -- large erase sizes are "overlays", "sectors" are 4K */
  790. { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
  791. { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
  792. { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
  793. { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
  794. { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
  795. { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
  796. { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
  797. { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
  798. { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
  799. { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
  800. { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
  801. { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
  802. /* ST Microelectronics -- newer production may have feature updates */
  803. { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
  804. { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
  805. { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
  806. { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
  807. { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
  808. { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
  809. { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
  810. { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
  811. { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
  812. { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
  813. { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
  814. { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
  815. { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
  816. { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
  817. { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
  818. { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
  819. { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
  820. { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
  821. { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
  822. { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
  823. { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
  824. { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
  825. { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
  826. { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
  827. { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
  828. { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
  829. { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
  830. { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
  831. { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
  832. { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
  833. /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
  834. { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
  835. { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
  836. { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
  837. { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
  838. { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
  839. { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
  840. { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
  841. { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
  842. {
  843. "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
  844. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
  845. SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
  846. },
  847. { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
  848. { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
  849. {
  850. "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
  851. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
  852. SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
  853. },
  854. {
  855. "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
  856. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
  857. SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
  858. },
  859. { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
  860. { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
  861. { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
  862. { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
  863. /* Catalyst / On Semiconductor -- non-JEDEC */
  864. { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  865. { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  866. { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  867. { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  868. { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  869. { },
  870. };
  871. static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
  872. {
  873. int tmp;
  874. u8 id[SPI_NOR_MAX_ID_LEN];
  875. const struct flash_info *info;
  876. tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
  877. if (tmp < 0) {
  878. dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
  879. return ERR_PTR(tmp);
  880. }
  881. for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
  882. info = &spi_nor_ids[tmp];
  883. if (info->id_len) {
  884. if (!memcmp(info->id, id, info->id_len))
  885. return &spi_nor_ids[tmp];
  886. }
  887. }
  888. dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
  889. id[0], id[1], id[2]);
  890. return ERR_PTR(-ENODEV);
  891. }
  892. static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
  893. size_t *retlen, u_char *buf)
  894. {
  895. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  896. int ret;
  897. dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
  898. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
  899. if (ret)
  900. return ret;
  901. while (len) {
  902. ret = nor->read(nor, from, len, buf);
  903. if (ret == 0) {
  904. /* We shouldn't see 0-length reads */
  905. ret = -EIO;
  906. goto read_err;
  907. }
  908. if (ret < 0)
  909. goto read_err;
  910. WARN_ON(ret > len);
  911. *retlen += ret;
  912. buf += ret;
  913. from += ret;
  914. len -= ret;
  915. }
  916. ret = 0;
  917. read_err:
  918. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
  919. return ret;
  920. }
  921. static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
  922. size_t *retlen, const u_char *buf)
  923. {
  924. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  925. size_t actual;
  926. int ret;
  927. dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
  928. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
  929. if (ret)
  930. return ret;
  931. write_enable(nor);
  932. nor->sst_write_second = false;
  933. actual = to % 2;
  934. /* Start write from odd address. */
  935. if (actual) {
  936. nor->program_opcode = SPINOR_OP_BP;
  937. /* write one byte. */
  938. ret = nor->write(nor, to, 1, buf);
  939. if (ret < 0)
  940. goto sst_write_err;
  941. WARN(ret != 1, "While writing 1 byte written %i bytes\n",
  942. (int)ret);
  943. ret = spi_nor_wait_till_ready(nor);
  944. if (ret)
  945. goto sst_write_err;
  946. }
  947. to += actual;
  948. /* Write out most of the data here. */
  949. for (; actual < len - 1; actual += 2) {
  950. nor->program_opcode = SPINOR_OP_AAI_WP;
  951. /* write two bytes. */
  952. ret = nor->write(nor, to, 2, buf + actual);
  953. if (ret < 0)
  954. goto sst_write_err;
  955. WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
  956. (int)ret);
  957. ret = spi_nor_wait_till_ready(nor);
  958. if (ret)
  959. goto sst_write_err;
  960. to += 2;
  961. nor->sst_write_second = true;
  962. }
  963. nor->sst_write_second = false;
  964. write_disable(nor);
  965. ret = spi_nor_wait_till_ready(nor);
  966. if (ret)
  967. goto sst_write_err;
  968. /* Write out trailing byte if it exists. */
  969. if (actual != len) {
  970. write_enable(nor);
  971. nor->program_opcode = SPINOR_OP_BP;
  972. ret = nor->write(nor, to, 1, buf + actual);
  973. if (ret < 0)
  974. goto sst_write_err;
  975. WARN(ret != 1, "While writing 1 byte written %i bytes\n",
  976. (int)ret);
  977. ret = spi_nor_wait_till_ready(nor);
  978. if (ret)
  979. goto sst_write_err;
  980. write_disable(nor);
  981. actual += 1;
  982. }
  983. sst_write_err:
  984. *retlen += actual;
  985. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
  986. return ret;
  987. }
  988. /*
  989. * Write an address range to the nor chip. Data must be written in
  990. * FLASH_PAGESIZE chunks. The address range may be any size provided
  991. * it is within the physical boundaries.
  992. */
  993. static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
  994. size_t *retlen, const u_char *buf)
  995. {
  996. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  997. size_t page_offset, page_remain, i;
  998. ssize_t ret;
  999. dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
  1000. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
  1001. if (ret)
  1002. return ret;
  1003. for (i = 0; i < len; ) {
  1004. ssize_t written;
  1005. page_offset = (to + i) & (nor->page_size - 1);
  1006. WARN_ONCE(page_offset,
  1007. "Writing at offset %zu into a NOR page. Writing partial pages may decrease reliability and increase wear of NOR flash.",
  1008. page_offset);
  1009. /* the size of data remaining on the first page */
  1010. page_remain = min_t(size_t,
  1011. nor->page_size - page_offset, len - i);
  1012. write_enable(nor);
  1013. ret = nor->write(nor, to + i, page_remain, buf + i);
  1014. if (ret < 0)
  1015. goto write_err;
  1016. written = ret;
  1017. ret = spi_nor_wait_till_ready(nor);
  1018. if (ret)
  1019. goto write_err;
  1020. *retlen += written;
  1021. i += written;
  1022. if (written != page_remain) {
  1023. dev_err(nor->dev,
  1024. "While writing %zu bytes written %zd bytes\n",
  1025. page_remain, written);
  1026. ret = -EIO;
  1027. goto write_err;
  1028. }
  1029. }
  1030. write_err:
  1031. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
  1032. return ret;
  1033. }
  1034. static int macronix_quad_enable(struct spi_nor *nor)
  1035. {
  1036. int ret, val;
  1037. val = read_sr(nor);
  1038. if (val < 0)
  1039. return val;
  1040. write_enable(nor);
  1041. write_sr(nor, val | SR_QUAD_EN_MX);
  1042. if (spi_nor_wait_till_ready(nor))
  1043. return 1;
  1044. ret = read_sr(nor);
  1045. if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
  1046. dev_err(nor->dev, "Macronix Quad bit not set\n");
  1047. return -EINVAL;
  1048. }
  1049. return 0;
  1050. }
  1051. /*
  1052. * Write status Register and configuration register with 2 bytes
  1053. * The first byte will be written to the status register, while the
  1054. * second byte will be written to the configuration register.
  1055. * Return negative if error occured.
  1056. */
  1057. static int write_sr_cr(struct spi_nor *nor, u16 val)
  1058. {
  1059. nor->cmd_buf[0] = val & 0xff;
  1060. nor->cmd_buf[1] = (val >> 8);
  1061. return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2);
  1062. }
  1063. static int spansion_quad_enable(struct spi_nor *nor)
  1064. {
  1065. int ret;
  1066. int quad_en = CR_QUAD_EN_SPAN << 8;
  1067. write_enable(nor);
  1068. ret = write_sr_cr(nor, quad_en);
  1069. if (ret < 0) {
  1070. dev_err(nor->dev,
  1071. "error while writing configuration register\n");
  1072. return -EINVAL;
  1073. }
  1074. ret = spi_nor_wait_till_ready(nor);
  1075. if (ret) {
  1076. dev_err(nor->dev,
  1077. "timeout while writing configuration register\n");
  1078. return ret;
  1079. }
  1080. /* read back and check it */
  1081. ret = read_cr(nor);
  1082. if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
  1083. dev_err(nor->dev, "Spansion Quad bit not set\n");
  1084. return -EINVAL;
  1085. }
  1086. return 0;
  1087. }
  1088. static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
  1089. {
  1090. int status;
  1091. switch (JEDEC_MFR(info)) {
  1092. case SNOR_MFR_MACRONIX:
  1093. status = macronix_quad_enable(nor);
  1094. if (status) {
  1095. dev_err(nor->dev, "Macronix quad-read not enabled\n");
  1096. return -EINVAL;
  1097. }
  1098. return status;
  1099. case SNOR_MFR_MICRON:
  1100. return 0;
  1101. default:
  1102. status = spansion_quad_enable(nor);
  1103. if (status) {
  1104. dev_err(nor->dev, "Spansion quad-read not enabled\n");
  1105. return -EINVAL;
  1106. }
  1107. return status;
  1108. }
  1109. }
  1110. static int spi_nor_check(struct spi_nor *nor)
  1111. {
  1112. if (!nor->dev || !nor->read || !nor->write ||
  1113. !nor->read_reg || !nor->write_reg) {
  1114. pr_err("spi-nor: please fill all the necessary fields!\n");
  1115. return -EINVAL;
  1116. }
  1117. return 0;
  1118. }
  1119. int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
  1120. {
  1121. const struct flash_info *info = NULL;
  1122. struct device *dev = nor->dev;
  1123. struct mtd_info *mtd = &nor->mtd;
  1124. struct device_node *np = spi_nor_get_flash_node(nor);
  1125. int ret;
  1126. int i;
  1127. ret = spi_nor_check(nor);
  1128. if (ret)
  1129. return ret;
  1130. if (name)
  1131. info = spi_nor_match_id(name);
  1132. /* Try to auto-detect if chip name wasn't specified or not found */
  1133. if (!info)
  1134. info = spi_nor_read_id(nor);
  1135. if (IS_ERR_OR_NULL(info))
  1136. return -ENOENT;
  1137. /*
  1138. * If caller has specified name of flash model that can normally be
  1139. * detected using JEDEC, let's verify it.
  1140. */
  1141. if (name && info->id_len) {
  1142. const struct flash_info *jinfo;
  1143. jinfo = spi_nor_read_id(nor);
  1144. if (IS_ERR(jinfo)) {
  1145. return PTR_ERR(jinfo);
  1146. } else if (jinfo != info) {
  1147. /*
  1148. * JEDEC knows better, so overwrite platform ID. We
  1149. * can't trust partitions any longer, but we'll let
  1150. * mtd apply them anyway, since some partitions may be
  1151. * marked read-only, and we don't want to lose that
  1152. * information, even if it's not 100% accurate.
  1153. */
  1154. dev_warn(dev, "found %s, expected %s\n",
  1155. jinfo->name, info->name);
  1156. info = jinfo;
  1157. }
  1158. }
  1159. mutex_init(&nor->lock);
  1160. /*
  1161. * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
  1162. * with the software protection bits set
  1163. */
  1164. if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
  1165. JEDEC_MFR(info) == SNOR_MFR_INTEL ||
  1166. JEDEC_MFR(info) == SNOR_MFR_SST ||
  1167. info->flags & SPI_NOR_HAS_LOCK) {
  1168. write_enable(nor);
  1169. write_sr(nor, 0);
  1170. spi_nor_wait_till_ready(nor);
  1171. }
  1172. if (!mtd->name)
  1173. mtd->name = dev_name(dev);
  1174. mtd->priv = nor;
  1175. mtd->type = MTD_NORFLASH;
  1176. mtd->writesize = 1;
  1177. mtd->flags = MTD_CAP_NORFLASH;
  1178. mtd->size = info->sector_size * info->n_sectors;
  1179. mtd->_erase = spi_nor_erase;
  1180. mtd->_read = spi_nor_read;
  1181. /* NOR protection support for STmicro/Micron chips and similar */
  1182. if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
  1183. info->flags & SPI_NOR_HAS_LOCK) {
  1184. nor->flash_lock = stm_lock;
  1185. nor->flash_unlock = stm_unlock;
  1186. nor->flash_is_locked = stm_is_locked;
  1187. }
  1188. if (nor->flash_lock && nor->flash_unlock && nor->flash_is_locked) {
  1189. mtd->_lock = spi_nor_lock;
  1190. mtd->_unlock = spi_nor_unlock;
  1191. mtd->_is_locked = spi_nor_is_locked;
  1192. }
  1193. /* sst nor chips use AAI word program */
  1194. if (info->flags & SST_WRITE)
  1195. mtd->_write = sst_write;
  1196. else
  1197. mtd->_write = spi_nor_write;
  1198. if (info->flags & USE_FSR)
  1199. nor->flags |= SNOR_F_USE_FSR;
  1200. if (info->flags & SPI_NOR_HAS_TB)
  1201. nor->flags |= SNOR_F_HAS_SR_TB;
  1202. #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
  1203. /* prefer "small sector" erase if possible */
  1204. if (info->flags & SECT_4K) {
  1205. nor->erase_opcode = SPINOR_OP_BE_4K;
  1206. mtd->erasesize = 4096;
  1207. } else if (info->flags & SECT_4K_PMC) {
  1208. nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
  1209. mtd->erasesize = 4096;
  1210. } else
  1211. #endif
  1212. {
  1213. nor->erase_opcode = SPINOR_OP_SE;
  1214. mtd->erasesize = info->sector_size;
  1215. }
  1216. if (info->flags & SPI_NOR_NO_ERASE)
  1217. mtd->flags |= MTD_NO_ERASE;
  1218. mtd->dev.parent = dev;
  1219. nor->page_size = info->page_size;
  1220. mtd->writebufsize = nor->page_size;
  1221. if (np) {
  1222. /* If we were instantiated by DT, use it */
  1223. if (of_property_read_bool(np, "m25p,fast-read"))
  1224. nor->flash_read = SPI_NOR_FAST;
  1225. else
  1226. nor->flash_read = SPI_NOR_NORMAL;
  1227. } else {
  1228. /* If we weren't instantiated by DT, default to fast-read */
  1229. nor->flash_read = SPI_NOR_FAST;
  1230. }
  1231. /* Some devices cannot do fast-read, no matter what DT tells us */
  1232. if (info->flags & SPI_NOR_NO_FR)
  1233. nor->flash_read = SPI_NOR_NORMAL;
  1234. /* Quad/Dual-read mode takes precedence over fast/normal */
  1235. if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
  1236. ret = set_quad_mode(nor, info);
  1237. if (ret) {
  1238. dev_err(dev, "quad mode not supported\n");
  1239. return ret;
  1240. }
  1241. nor->flash_read = SPI_NOR_QUAD;
  1242. } else if (mode == SPI_NOR_DUAL && info->flags & SPI_NOR_DUAL_READ) {
  1243. nor->flash_read = SPI_NOR_DUAL;
  1244. }
  1245. /* Default commands */
  1246. switch (nor->flash_read) {
  1247. case SPI_NOR_QUAD:
  1248. nor->read_opcode = SPINOR_OP_READ_1_1_4;
  1249. break;
  1250. case SPI_NOR_DUAL:
  1251. nor->read_opcode = SPINOR_OP_READ_1_1_2;
  1252. break;
  1253. case SPI_NOR_FAST:
  1254. nor->read_opcode = SPINOR_OP_READ_FAST;
  1255. break;
  1256. case SPI_NOR_NORMAL:
  1257. nor->read_opcode = SPINOR_OP_READ;
  1258. break;
  1259. default:
  1260. dev_err(dev, "No Read opcode defined\n");
  1261. return -EINVAL;
  1262. }
  1263. nor->program_opcode = SPINOR_OP_PP;
  1264. if (info->addr_width)
  1265. nor->addr_width = info->addr_width;
  1266. else if (mtd->size > 0x1000000) {
  1267. /* enable 4-byte addressing if the device exceeds 16MiB */
  1268. nor->addr_width = 4;
  1269. if (JEDEC_MFR(info) == SNOR_MFR_SPANSION) {
  1270. /* Dedicated 4-byte command set */
  1271. switch (nor->flash_read) {
  1272. case SPI_NOR_QUAD:
  1273. nor->read_opcode = SPINOR_OP_READ4_1_1_4;
  1274. break;
  1275. case SPI_NOR_DUAL:
  1276. nor->read_opcode = SPINOR_OP_READ4_1_1_2;
  1277. break;
  1278. case SPI_NOR_FAST:
  1279. nor->read_opcode = SPINOR_OP_READ4_FAST;
  1280. break;
  1281. case SPI_NOR_NORMAL:
  1282. nor->read_opcode = SPINOR_OP_READ4;
  1283. break;
  1284. }
  1285. nor->program_opcode = SPINOR_OP_PP_4B;
  1286. /* No small sector erase for 4-byte command set */
  1287. nor->erase_opcode = SPINOR_OP_SE_4B;
  1288. mtd->erasesize = info->sector_size;
  1289. } else
  1290. set_4byte(nor, info, 1);
  1291. } else {
  1292. nor->addr_width = 3;
  1293. }
  1294. if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
  1295. dev_err(dev, "address width is too large: %u\n",
  1296. nor->addr_width);
  1297. return -EINVAL;
  1298. }
  1299. nor->read_dummy = spi_nor_read_dummy_cycles(nor);
  1300. dev_info(dev, "%s (%lld Kbytes)\n", info->name,
  1301. (long long)mtd->size >> 10);
  1302. dev_dbg(dev,
  1303. "mtd .name = %s, .size = 0x%llx (%lldMiB), "
  1304. ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
  1305. mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
  1306. mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
  1307. if (mtd->numeraseregions)
  1308. for (i = 0; i < mtd->numeraseregions; i++)
  1309. dev_dbg(dev,
  1310. "mtd.eraseregions[%d] = { .offset = 0x%llx, "
  1311. ".erasesize = 0x%.8x (%uKiB), "
  1312. ".numblocks = %d }\n",
  1313. i, (long long)mtd->eraseregions[i].offset,
  1314. mtd->eraseregions[i].erasesize,
  1315. mtd->eraseregions[i].erasesize / 1024,
  1316. mtd->eraseregions[i].numblocks);
  1317. return 0;
  1318. }
  1319. EXPORT_SYMBOL_GPL(spi_nor_scan);
  1320. static const struct flash_info *spi_nor_match_id(const char *name)
  1321. {
  1322. const struct flash_info *id = spi_nor_ids;
  1323. while (id->name) {
  1324. if (!strcmp(name, id->name))
  1325. return id;
  1326. id++;
  1327. }
  1328. return NULL;
  1329. }
  1330. MODULE_LICENSE("GPL");
  1331. MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
  1332. MODULE_AUTHOR("Mike Lavender");
  1333. MODULE_DESCRIPTION("framework for SPI NOR");