spi-nor.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781
  1. /*
  2. * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
  3. * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
  4. *
  5. * Copyright (C) 2005, Intec Automation Inc.
  6. * Copyright (C) 2014, Freescale Semiconductor, Inc.
  7. *
  8. * This code is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/err.h>
  13. #include <linux/errno.h>
  14. #include <linux/module.h>
  15. #include <linux/device.h>
  16. #include <linux/mutex.h>
  17. #include <linux/math64.h>
  18. #include <linux/sizes.h>
  19. #include <linux/mtd/mtd.h>
  20. #include <linux/of_platform.h>
  21. #include <linux/spi/flash.h>
  22. #include <linux/mtd/spi-nor.h>
  23. /* Define max times to check status register before we give up. */
  24. /*
  25. * For everything but full-chip erase; probably could be much smaller, but kept
  26. * around for safety for now
  27. */
  28. #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
  29. /*
  30. * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
  31. * for larger flash
  32. */
  33. #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
  34. #define SPI_NOR_MAX_ID_LEN 6
  35. #define SPI_NOR_MAX_ADDR_WIDTH 4
  36. struct flash_info {
  37. char *name;
  38. /*
  39. * This array stores the ID bytes.
  40. * The first three bytes are the JEDIC ID.
  41. * JEDEC ID zero means "no ID" (mostly older chips).
  42. */
  43. u8 id[SPI_NOR_MAX_ID_LEN];
  44. u8 id_len;
  45. /* The size listed here is what works with SPINOR_OP_SE, which isn't
  46. * necessarily called a "sector" by the vendor.
  47. */
  48. unsigned sector_size;
  49. u16 n_sectors;
  50. u16 page_size;
  51. u16 addr_width;
  52. u16 flags;
  53. #define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
  54. #define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
  55. #define SST_WRITE BIT(2) /* use SST byte programming */
  56. #define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
  57. #define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
  58. #define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
  59. #define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
  60. #define USE_FSR BIT(7) /* use flag status register */
  61. #define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
  62. #define SPI_NOR_HAS_TB BIT(9) /*
  63. * Flash SR has Top/Bottom (TB) protect
  64. * bit. Must be used with
  65. * SPI_NOR_HAS_LOCK.
  66. */
  67. #define SPI_S3AN BIT(10) /*
  68. * Xilinx Spartan 3AN In-System Flash
  69. * (MFR cannot be used for probing
  70. * because it has the same value as
  71. * ATMEL flashes)
  72. */
  73. #define SPI_NOR_4B_OPCODES BIT(11) /*
  74. * Use dedicated 4byte address op codes
  75. * to support memory size above 128Mib.
  76. */
  77. #define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
  78. };
  79. #define JEDEC_MFR(info) ((info)->id[0])
  80. static const struct flash_info *spi_nor_match_id(const char *name);
  81. /*
  82. * Read the status register, returning its value in the location
  83. * Return the status register value.
  84. * Returns negative if error occurred.
  85. */
  86. static int read_sr(struct spi_nor *nor)
  87. {
  88. int ret;
  89. u8 val;
  90. ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
  91. if (ret < 0) {
  92. pr_err("error %d reading SR\n", (int) ret);
  93. return ret;
  94. }
  95. return val;
  96. }
  97. /*
  98. * Read the flag status register, returning its value in the location
  99. * Return the status register value.
  100. * Returns negative if error occurred.
  101. */
  102. static int read_fsr(struct spi_nor *nor)
  103. {
  104. int ret;
  105. u8 val;
  106. ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
  107. if (ret < 0) {
  108. pr_err("error %d reading FSR\n", ret);
  109. return ret;
  110. }
  111. return val;
  112. }
  113. /*
  114. * Read configuration register, returning its value in the
  115. * location. Return the configuration register value.
  116. * Returns negative if error occurred.
  117. */
  118. static int read_cr(struct spi_nor *nor)
  119. {
  120. int ret;
  121. u8 val;
  122. ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
  123. if (ret < 0) {
  124. dev_err(nor->dev, "error %d reading CR\n", ret);
  125. return ret;
  126. }
  127. return val;
  128. }
  129. /*
  130. * Dummy Cycle calculation for different type of read.
  131. * It can be used to support more commands with
  132. * different dummy cycle requirements.
  133. */
  134. static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
  135. {
  136. switch (nor->flash_read) {
  137. case SPI_NOR_FAST:
  138. case SPI_NOR_DUAL:
  139. case SPI_NOR_QUAD:
  140. return 8;
  141. case SPI_NOR_NORMAL:
  142. return 0;
  143. }
  144. return 0;
  145. }
  146. /*
  147. * Write status register 1 byte
  148. * Returns negative if error occurred.
  149. */
  150. static inline int write_sr(struct spi_nor *nor, u8 val)
  151. {
  152. nor->cmd_buf[0] = val;
  153. return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
  154. }
  155. /*
  156. * Set write enable latch with Write Enable command.
  157. * Returns negative if error occurred.
  158. */
  159. static inline int write_enable(struct spi_nor *nor)
  160. {
  161. return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
  162. }
  163. /*
  164. * Send write disable instruction to the chip.
  165. */
  166. static inline int write_disable(struct spi_nor *nor)
  167. {
  168. return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
  169. }
  170. static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
  171. {
  172. return mtd->priv;
  173. }
  174. static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
  175. {
  176. size_t i;
  177. for (i = 0; i < size; i++)
  178. if (table[i][0] == opcode)
  179. return table[i][1];
  180. /* No conversion found, keep input op code. */
  181. return opcode;
  182. }
  183. static inline u8 spi_nor_convert_3to4_read(u8 opcode)
  184. {
  185. static const u8 spi_nor_3to4_read[][2] = {
  186. { SPINOR_OP_READ, SPINOR_OP_READ_4B },
  187. { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
  188. { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
  189. { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
  190. { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
  191. { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
  192. };
  193. return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
  194. ARRAY_SIZE(spi_nor_3to4_read));
  195. }
  196. static inline u8 spi_nor_convert_3to4_program(u8 opcode)
  197. {
  198. static const u8 spi_nor_3to4_program[][2] = {
  199. { SPINOR_OP_PP, SPINOR_OP_PP_4B },
  200. { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
  201. { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
  202. };
  203. return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
  204. ARRAY_SIZE(spi_nor_3to4_program));
  205. }
  206. static inline u8 spi_nor_convert_3to4_erase(u8 opcode)
  207. {
  208. static const u8 spi_nor_3to4_erase[][2] = {
  209. { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
  210. { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
  211. { SPINOR_OP_SE, SPINOR_OP_SE_4B },
  212. };
  213. return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
  214. ARRAY_SIZE(spi_nor_3to4_erase));
  215. }
  216. static void spi_nor_set_4byte_opcodes(struct spi_nor *nor,
  217. const struct flash_info *info)
  218. {
  219. /* Do some manufacturer fixups first */
  220. switch (JEDEC_MFR(info)) {
  221. case SNOR_MFR_SPANSION:
  222. /* No small sector erase for 4-byte command set */
  223. nor->erase_opcode = SPINOR_OP_SE;
  224. nor->mtd.erasesize = info->sector_size;
  225. break;
  226. default:
  227. break;
  228. }
  229. nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
  230. nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
  231. nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
  232. }
  233. /* Enable/disable 4-byte addressing mode. */
  234. static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
  235. int enable)
  236. {
  237. int status;
  238. bool need_wren = false;
  239. u8 cmd;
  240. switch (JEDEC_MFR(info)) {
  241. case SNOR_MFR_MICRON:
  242. /* Some Micron need WREN command; all will accept it */
  243. need_wren = true;
  244. case SNOR_MFR_MACRONIX:
  245. case SNOR_MFR_WINBOND:
  246. if (need_wren)
  247. write_enable(nor);
  248. cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
  249. status = nor->write_reg(nor, cmd, NULL, 0);
  250. if (need_wren)
  251. write_disable(nor);
  252. return status;
  253. default:
  254. /* Spansion style */
  255. nor->cmd_buf[0] = enable << 7;
  256. return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
  257. }
  258. }
  259. static int s3an_sr_ready(struct spi_nor *nor)
  260. {
  261. int ret;
  262. u8 val;
  263. ret = nor->read_reg(nor, SPINOR_OP_XRDSR, &val, 1);
  264. if (ret < 0) {
  265. dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
  266. return ret;
  267. }
  268. return !!(val & XSR_RDY);
  269. }
  270. static inline int spi_nor_sr_ready(struct spi_nor *nor)
  271. {
  272. int sr = read_sr(nor);
  273. if (sr < 0)
  274. return sr;
  275. else
  276. return !(sr & SR_WIP);
  277. }
  278. static inline int spi_nor_fsr_ready(struct spi_nor *nor)
  279. {
  280. int fsr = read_fsr(nor);
  281. if (fsr < 0)
  282. return fsr;
  283. else
  284. return fsr & FSR_READY;
  285. }
  286. static int spi_nor_ready(struct spi_nor *nor)
  287. {
  288. int sr, fsr;
  289. if (nor->flags & SNOR_F_READY_XSR_RDY)
  290. sr = s3an_sr_ready(nor);
  291. else
  292. sr = spi_nor_sr_ready(nor);
  293. if (sr < 0)
  294. return sr;
  295. fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
  296. if (fsr < 0)
  297. return fsr;
  298. return sr && fsr;
  299. }
  300. /*
  301. * Service routine to read status register until ready, or timeout occurs.
  302. * Returns non-zero if error.
  303. */
  304. static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
  305. unsigned long timeout_jiffies)
  306. {
  307. unsigned long deadline;
  308. int timeout = 0, ret;
  309. deadline = jiffies + timeout_jiffies;
  310. while (!timeout) {
  311. if (time_after_eq(jiffies, deadline))
  312. timeout = 1;
  313. ret = spi_nor_ready(nor);
  314. if (ret < 0)
  315. return ret;
  316. if (ret)
  317. return 0;
  318. cond_resched();
  319. }
  320. dev_err(nor->dev, "flash operation timed out\n");
  321. return -ETIMEDOUT;
  322. }
  323. static int spi_nor_wait_till_ready(struct spi_nor *nor)
  324. {
  325. return spi_nor_wait_till_ready_with_timeout(nor,
  326. DEFAULT_READY_WAIT_JIFFIES);
  327. }
  328. /*
  329. * Erase the whole flash memory
  330. *
  331. * Returns 0 if successful, non-zero otherwise.
  332. */
  333. static int erase_chip(struct spi_nor *nor)
  334. {
  335. dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
  336. return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
  337. }
  338. static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
  339. {
  340. int ret = 0;
  341. mutex_lock(&nor->lock);
  342. if (nor->prepare) {
  343. ret = nor->prepare(nor, ops);
  344. if (ret) {
  345. dev_err(nor->dev, "failed in the preparation.\n");
  346. mutex_unlock(&nor->lock);
  347. return ret;
  348. }
  349. }
  350. return ret;
  351. }
  352. static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
  353. {
  354. if (nor->unprepare)
  355. nor->unprepare(nor, ops);
  356. mutex_unlock(&nor->lock);
  357. }
  358. /*
  359. * This code converts an address to the Default Address Mode, that has non
  360. * power of two page sizes. We must support this mode because it is the default
  361. * mode supported by Xilinx tools, it can access the whole flash area and
  362. * changing over to the Power-of-two mode is irreversible and corrupts the
  363. * original data.
  364. * Addr can safely be unsigned int, the biggest S3AN device is smaller than
  365. * 4 MiB.
  366. */
  367. static loff_t spi_nor_s3an_addr_convert(struct spi_nor *nor, unsigned int addr)
  368. {
  369. unsigned int offset;
  370. unsigned int page;
  371. offset = addr % nor->page_size;
  372. page = addr / nor->page_size;
  373. page <<= (nor->page_size > 512) ? 10 : 9;
  374. return page | offset;
  375. }
  376. /*
  377. * Initiate the erasure of a single sector
  378. */
  379. static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
  380. {
  381. u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
  382. int i;
  383. if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
  384. addr = spi_nor_s3an_addr_convert(nor, addr);
  385. if (nor->erase)
  386. return nor->erase(nor, addr);
  387. /*
  388. * Default implementation, if driver doesn't have a specialized HW
  389. * control
  390. */
  391. for (i = nor->addr_width - 1; i >= 0; i--) {
  392. buf[i] = addr & 0xff;
  393. addr >>= 8;
  394. }
  395. return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
  396. }
  397. /*
  398. * Erase an address range on the nor chip. The address range may extend
  399. * one or more erase sectors. Return an error is there is a problem erasing.
  400. */
  401. static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
  402. {
  403. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  404. u32 addr, len;
  405. uint32_t rem;
  406. int ret;
  407. dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
  408. (long long)instr->len);
  409. div_u64_rem(instr->len, mtd->erasesize, &rem);
  410. if (rem)
  411. return -EINVAL;
  412. addr = instr->addr;
  413. len = instr->len;
  414. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
  415. if (ret)
  416. return ret;
  417. /* whole-chip erase? */
  418. if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
  419. unsigned long timeout;
  420. write_enable(nor);
  421. if (erase_chip(nor)) {
  422. ret = -EIO;
  423. goto erase_err;
  424. }
  425. /*
  426. * Scale the timeout linearly with the size of the flash, with
  427. * a minimum calibrated to an old 2MB flash. We could try to
  428. * pull these from CFI/SFDP, but these values should be good
  429. * enough for now.
  430. */
  431. timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
  432. CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
  433. (unsigned long)(mtd->size / SZ_2M));
  434. ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
  435. if (ret)
  436. goto erase_err;
  437. /* REVISIT in some cases we could speed up erasing large regions
  438. * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
  439. * to use "small sector erase", but that's not always optimal.
  440. */
  441. /* "sector"-at-a-time erase */
  442. } else {
  443. while (len) {
  444. write_enable(nor);
  445. ret = spi_nor_erase_sector(nor, addr);
  446. if (ret)
  447. goto erase_err;
  448. addr += mtd->erasesize;
  449. len -= mtd->erasesize;
  450. ret = spi_nor_wait_till_ready(nor);
  451. if (ret)
  452. goto erase_err;
  453. }
  454. }
  455. write_disable(nor);
  456. erase_err:
  457. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
  458. instr->state = ret ? MTD_ERASE_FAILED : MTD_ERASE_DONE;
  459. mtd_erase_callback(instr);
  460. return ret;
  461. }
  462. static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
  463. uint64_t *len)
  464. {
  465. struct mtd_info *mtd = &nor->mtd;
  466. u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
  467. int shift = ffs(mask) - 1;
  468. int pow;
  469. if (!(sr & mask)) {
  470. /* No protection */
  471. *ofs = 0;
  472. *len = 0;
  473. } else {
  474. pow = ((sr & mask) ^ mask) >> shift;
  475. *len = mtd->size >> pow;
  476. if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
  477. *ofs = 0;
  478. else
  479. *ofs = mtd->size - *len;
  480. }
  481. }
  482. /*
  483. * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
  484. * @locked is false); 0 otherwise
  485. */
  486. static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
  487. u8 sr, bool locked)
  488. {
  489. loff_t lock_offs;
  490. uint64_t lock_len;
  491. if (!len)
  492. return 1;
  493. stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
  494. if (locked)
  495. /* Requested range is a sub-range of locked range */
  496. return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
  497. else
  498. /* Requested range does not overlap with locked range */
  499. return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
  500. }
  501. static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
  502. u8 sr)
  503. {
  504. return stm_check_lock_status_sr(nor, ofs, len, sr, true);
  505. }
  506. static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
  507. u8 sr)
  508. {
  509. return stm_check_lock_status_sr(nor, ofs, len, sr, false);
  510. }
  511. /*
  512. * Lock a region of the flash. Compatible with ST Micro and similar flash.
  513. * Supports the block protection bits BP{0,1,2} in the status register
  514. * (SR). Does not support these features found in newer SR bitfields:
  515. * - SEC: sector/block protect - only handle SEC=0 (block protect)
  516. * - CMP: complement protect - only support CMP=0 (range is not complemented)
  517. *
  518. * Support for the following is provided conditionally for some flash:
  519. * - TB: top/bottom protect
  520. *
  521. * Sample table portion for 8MB flash (Winbond w25q64fw):
  522. *
  523. * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
  524. * --------------------------------------------------------------------------
  525. * X | X | 0 | 0 | 0 | NONE | NONE
  526. * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
  527. * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
  528. * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
  529. * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
  530. * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
  531. * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
  532. * X | X | 1 | 1 | 1 | 8 MB | ALL
  533. * ------|-------|-------|-------|-------|---------------|-------------------
  534. * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
  535. * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
  536. * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
  537. * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
  538. * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
  539. * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
  540. *
  541. * Returns negative on errors, 0 on success.
  542. */
  543. static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
  544. {
  545. struct mtd_info *mtd = &nor->mtd;
  546. int status_old, status_new;
  547. u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
  548. u8 shift = ffs(mask) - 1, pow, val;
  549. loff_t lock_len;
  550. bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
  551. bool use_top;
  552. int ret;
  553. status_old = read_sr(nor);
  554. if (status_old < 0)
  555. return status_old;
  556. /* If nothing in our range is unlocked, we don't need to do anything */
  557. if (stm_is_locked_sr(nor, ofs, len, status_old))
  558. return 0;
  559. /* If anything below us is unlocked, we can't use 'bottom' protection */
  560. if (!stm_is_locked_sr(nor, 0, ofs, status_old))
  561. can_be_bottom = false;
  562. /* If anything above us is unlocked, we can't use 'top' protection */
  563. if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
  564. status_old))
  565. can_be_top = false;
  566. if (!can_be_bottom && !can_be_top)
  567. return -EINVAL;
  568. /* Prefer top, if both are valid */
  569. use_top = can_be_top;
  570. /* lock_len: length of region that should end up locked */
  571. if (use_top)
  572. lock_len = mtd->size - ofs;
  573. else
  574. lock_len = ofs + len;
  575. /*
  576. * Need smallest pow such that:
  577. *
  578. * 1 / (2^pow) <= (len / size)
  579. *
  580. * so (assuming power-of-2 size) we do:
  581. *
  582. * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
  583. */
  584. pow = ilog2(mtd->size) - ilog2(lock_len);
  585. val = mask - (pow << shift);
  586. if (val & ~mask)
  587. return -EINVAL;
  588. /* Don't "lock" with no region! */
  589. if (!(val & mask))
  590. return -EINVAL;
  591. status_new = (status_old & ~mask & ~SR_TB) | val;
  592. /* Disallow further writes if WP pin is asserted */
  593. status_new |= SR_SRWD;
  594. if (!use_top)
  595. status_new |= SR_TB;
  596. /* Don't bother if they're the same */
  597. if (status_new == status_old)
  598. return 0;
  599. /* Only modify protection if it will not unlock other areas */
  600. if ((status_new & mask) < (status_old & mask))
  601. return -EINVAL;
  602. write_enable(nor);
  603. ret = write_sr(nor, status_new);
  604. if (ret)
  605. return ret;
  606. return spi_nor_wait_till_ready(nor);
  607. }
  608. /*
  609. * Unlock a region of the flash. See stm_lock() for more info
  610. *
  611. * Returns negative on errors, 0 on success.
  612. */
  613. static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
  614. {
  615. struct mtd_info *mtd = &nor->mtd;
  616. int status_old, status_new;
  617. u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
  618. u8 shift = ffs(mask) - 1, pow, val;
  619. loff_t lock_len;
  620. bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
  621. bool use_top;
  622. int ret;
  623. status_old = read_sr(nor);
  624. if (status_old < 0)
  625. return status_old;
  626. /* If nothing in our range is locked, we don't need to do anything */
  627. if (stm_is_unlocked_sr(nor, ofs, len, status_old))
  628. return 0;
  629. /* If anything below us is locked, we can't use 'top' protection */
  630. if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
  631. can_be_top = false;
  632. /* If anything above us is locked, we can't use 'bottom' protection */
  633. if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
  634. status_old))
  635. can_be_bottom = false;
  636. if (!can_be_bottom && !can_be_top)
  637. return -EINVAL;
  638. /* Prefer top, if both are valid */
  639. use_top = can_be_top;
  640. /* lock_len: length of region that should remain locked */
  641. if (use_top)
  642. lock_len = mtd->size - (ofs + len);
  643. else
  644. lock_len = ofs;
  645. /*
  646. * Need largest pow such that:
  647. *
  648. * 1 / (2^pow) >= (len / size)
  649. *
  650. * so (assuming power-of-2 size) we do:
  651. *
  652. * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
  653. */
  654. pow = ilog2(mtd->size) - order_base_2(lock_len);
  655. if (lock_len == 0) {
  656. val = 0; /* fully unlocked */
  657. } else {
  658. val = mask - (pow << shift);
  659. /* Some power-of-two sizes are not supported */
  660. if (val & ~mask)
  661. return -EINVAL;
  662. }
  663. status_new = (status_old & ~mask & ~SR_TB) | val;
  664. /* Don't protect status register if we're fully unlocked */
  665. if (lock_len == 0)
  666. status_new &= ~SR_SRWD;
  667. if (!use_top)
  668. status_new |= SR_TB;
  669. /* Don't bother if they're the same */
  670. if (status_new == status_old)
  671. return 0;
  672. /* Only modify protection if it will not lock other areas */
  673. if ((status_new & mask) > (status_old & mask))
  674. return -EINVAL;
  675. write_enable(nor);
  676. ret = write_sr(nor, status_new);
  677. if (ret)
  678. return ret;
  679. return spi_nor_wait_till_ready(nor);
  680. }
  681. /*
  682. * Check if a region of the flash is (completely) locked. See stm_lock() for
  683. * more info.
  684. *
  685. * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
  686. * negative on errors.
  687. */
  688. static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
  689. {
  690. int status;
  691. status = read_sr(nor);
  692. if (status < 0)
  693. return status;
  694. return stm_is_locked_sr(nor, ofs, len, status);
  695. }
  696. static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  697. {
  698. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  699. int ret;
  700. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
  701. if (ret)
  702. return ret;
  703. ret = nor->flash_lock(nor, ofs, len);
  704. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
  705. return ret;
  706. }
  707. static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  708. {
  709. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  710. int ret;
  711. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
  712. if (ret)
  713. return ret;
  714. ret = nor->flash_unlock(nor, ofs, len);
  715. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
  716. return ret;
  717. }
  718. static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  719. {
  720. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  721. int ret;
  722. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
  723. if (ret)
  724. return ret;
  725. ret = nor->flash_is_locked(nor, ofs, len);
  726. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
  727. return ret;
  728. }
  729. /* Used when the "_ext_id" is two bytes at most */
  730. #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
  731. .id = { \
  732. ((_jedec_id) >> 16) & 0xff, \
  733. ((_jedec_id) >> 8) & 0xff, \
  734. (_jedec_id) & 0xff, \
  735. ((_ext_id) >> 8) & 0xff, \
  736. (_ext_id) & 0xff, \
  737. }, \
  738. .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
  739. .sector_size = (_sector_size), \
  740. .n_sectors = (_n_sectors), \
  741. .page_size = 256, \
  742. .flags = (_flags),
  743. #define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
  744. .id = { \
  745. ((_jedec_id) >> 16) & 0xff, \
  746. ((_jedec_id) >> 8) & 0xff, \
  747. (_jedec_id) & 0xff, \
  748. ((_ext_id) >> 16) & 0xff, \
  749. ((_ext_id) >> 8) & 0xff, \
  750. (_ext_id) & 0xff, \
  751. }, \
  752. .id_len = 6, \
  753. .sector_size = (_sector_size), \
  754. .n_sectors = (_n_sectors), \
  755. .page_size = 256, \
  756. .flags = (_flags),
  757. #define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
  758. .sector_size = (_sector_size), \
  759. .n_sectors = (_n_sectors), \
  760. .page_size = (_page_size), \
  761. .addr_width = (_addr_width), \
  762. .flags = (_flags),
  763. #define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
  764. .id = { \
  765. ((_jedec_id) >> 16) & 0xff, \
  766. ((_jedec_id) >> 8) & 0xff, \
  767. (_jedec_id) & 0xff \
  768. }, \
  769. .id_len = 3, \
  770. .sector_size = (8*_page_size), \
  771. .n_sectors = (_n_sectors), \
  772. .page_size = _page_size, \
  773. .addr_width = 3, \
  774. .flags = SPI_NOR_NO_FR | SPI_S3AN,
  775. /* NOTE: double check command sets and memory organization when you add
  776. * more nor chips. This current list focusses on newer chips, which
  777. * have been converging on command sets which including JEDEC ID.
  778. *
  779. * All newly added entries should describe *hardware* and should use SECT_4K
  780. * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
  781. * scenarios excluding small sectors there is config option that can be
  782. * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
  783. * For historical (and compatibility) reasons (before we got above config) some
  784. * old entries may be missing 4K flag.
  785. */
  786. static const struct flash_info spi_nor_ids[] = {
  787. /* Atmel -- some are (confusingly) marketed as "DataFlash" */
  788. { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
  789. { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
  790. { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
  791. { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
  792. { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
  793. { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
  794. { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
  795. { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
  796. { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
  797. { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
  798. { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
  799. /* EON -- en25xxx */
  800. { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
  801. { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
  802. { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
  803. { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
  804. { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
  805. { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
  806. { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
  807. { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
  808. /* ESMT */
  809. { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
  810. { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
  811. { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
  812. /* Everspin */
  813. { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  814. { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  815. { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  816. /* Fujitsu */
  817. { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
  818. /* GigaDevice */
  819. {
  820. "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
  821. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
  822. SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
  823. },
  824. {
  825. "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
  826. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
  827. SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
  828. },
  829. {
  830. "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
  831. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
  832. SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
  833. },
  834. {
  835. "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
  836. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
  837. SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
  838. },
  839. {
  840. "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
  841. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
  842. SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
  843. },
  844. /* Intel/Numonyx -- xxxs33b */
  845. { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
  846. { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
  847. { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
  848. /* ISSI */
  849. { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
  850. /* Macronix */
  851. { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
  852. { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
  853. { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
  854. { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
  855. { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
  856. { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
  857. { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
  858. { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
  859. { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
  860. { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
  861. { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
  862. { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
  863. { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
  864. { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
  865. { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
  866. { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
  867. { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
  868. { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
  869. { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
  870. /* Micron */
  871. { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) },
  872. { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
  873. { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
  874. { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
  875. { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
  876. { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
  877. { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
  878. { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
  879. { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
  880. { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
  881. { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
  882. { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
  883. { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
  884. /* PMC */
  885. { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
  886. { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
  887. { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
  888. /* Spansion -- single (large) sector size only, at least
  889. * for the chips listed here (without boot sectors).
  890. */
  891. { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  892. { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  893. { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
  894. { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  895. { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  896. { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
  897. { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
  898. { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
  899. { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  900. { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  901. { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  902. { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
  903. { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
  904. { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
  905. { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
  906. { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
  907. { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  908. { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  909. { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  910. { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
  911. { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  912. { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
  913. { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
  914. { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
  915. { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
  916. /* SST -- large erase sizes are "overlays", "sectors" are 4K */
  917. { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
  918. { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
  919. { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
  920. { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
  921. { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
  922. { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
  923. { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
  924. { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
  925. { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
  926. { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
  927. { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
  928. { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
  929. /* ST Microelectronics -- newer production may have feature updates */
  930. { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
  931. { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
  932. { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
  933. { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
  934. { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
  935. { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
  936. { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
  937. { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
  938. { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
  939. { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
  940. { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
  941. { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
  942. { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
  943. { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
  944. { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
  945. { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
  946. { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
  947. { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
  948. { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
  949. { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
  950. { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
  951. { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
  952. { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
  953. { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
  954. { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
  955. { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
  956. { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
  957. { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
  958. { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
  959. { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
  960. /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
  961. { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
  962. { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
  963. { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
  964. { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
  965. { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
  966. { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
  967. { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
  968. { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
  969. { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
  970. { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
  971. { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
  972. {
  973. "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
  974. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
  975. SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
  976. },
  977. { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
  978. { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
  979. {
  980. "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
  981. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
  982. SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
  983. },
  984. {
  985. "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
  986. SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
  987. SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
  988. },
  989. { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
  990. { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
  991. { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
  992. { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
  993. /* Catalyst / On Semiconductor -- non-JEDEC */
  994. { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  995. { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  996. { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  997. { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  998. { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
  999. /* Xilinx S3AN Internal Flash */
  1000. { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
  1001. { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
  1002. { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
  1003. { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
  1004. { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
  1005. { },
  1006. };
  1007. static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
  1008. {
  1009. int tmp;
  1010. u8 id[SPI_NOR_MAX_ID_LEN];
  1011. const struct flash_info *info;
  1012. tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
  1013. if (tmp < 0) {
  1014. dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
  1015. return ERR_PTR(tmp);
  1016. }
  1017. for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
  1018. info = &spi_nor_ids[tmp];
  1019. if (info->id_len) {
  1020. if (!memcmp(info->id, id, info->id_len))
  1021. return &spi_nor_ids[tmp];
  1022. }
  1023. }
  1024. dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
  1025. id[0], id[1], id[2]);
  1026. return ERR_PTR(-ENODEV);
  1027. }
  1028. static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
  1029. size_t *retlen, u_char *buf)
  1030. {
  1031. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  1032. int ret;
  1033. dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
  1034. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
  1035. if (ret)
  1036. return ret;
  1037. while (len) {
  1038. loff_t addr = from;
  1039. if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
  1040. addr = spi_nor_s3an_addr_convert(nor, addr);
  1041. ret = nor->read(nor, addr, len, buf);
  1042. if (ret == 0) {
  1043. /* We shouldn't see 0-length reads */
  1044. ret = -EIO;
  1045. goto read_err;
  1046. }
  1047. if (ret < 0)
  1048. goto read_err;
  1049. WARN_ON(ret > len);
  1050. *retlen += ret;
  1051. buf += ret;
  1052. from += ret;
  1053. len -= ret;
  1054. }
  1055. ret = 0;
  1056. read_err:
  1057. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
  1058. return ret;
  1059. }
  1060. static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
  1061. size_t *retlen, const u_char *buf)
  1062. {
  1063. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  1064. size_t actual;
  1065. int ret;
  1066. dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
  1067. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
  1068. if (ret)
  1069. return ret;
  1070. write_enable(nor);
  1071. nor->sst_write_second = false;
  1072. actual = to % 2;
  1073. /* Start write from odd address. */
  1074. if (actual) {
  1075. nor->program_opcode = SPINOR_OP_BP;
  1076. /* write one byte. */
  1077. ret = nor->write(nor, to, 1, buf);
  1078. if (ret < 0)
  1079. goto sst_write_err;
  1080. WARN(ret != 1, "While writing 1 byte written %i bytes\n",
  1081. (int)ret);
  1082. ret = spi_nor_wait_till_ready(nor);
  1083. if (ret)
  1084. goto sst_write_err;
  1085. }
  1086. to += actual;
  1087. /* Write out most of the data here. */
  1088. for (; actual < len - 1; actual += 2) {
  1089. nor->program_opcode = SPINOR_OP_AAI_WP;
  1090. /* write two bytes. */
  1091. ret = nor->write(nor, to, 2, buf + actual);
  1092. if (ret < 0)
  1093. goto sst_write_err;
  1094. WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
  1095. (int)ret);
  1096. ret = spi_nor_wait_till_ready(nor);
  1097. if (ret)
  1098. goto sst_write_err;
  1099. to += 2;
  1100. nor->sst_write_second = true;
  1101. }
  1102. nor->sst_write_second = false;
  1103. write_disable(nor);
  1104. ret = spi_nor_wait_till_ready(nor);
  1105. if (ret)
  1106. goto sst_write_err;
  1107. /* Write out trailing byte if it exists. */
  1108. if (actual != len) {
  1109. write_enable(nor);
  1110. nor->program_opcode = SPINOR_OP_BP;
  1111. ret = nor->write(nor, to, 1, buf + actual);
  1112. if (ret < 0)
  1113. goto sst_write_err;
  1114. WARN(ret != 1, "While writing 1 byte written %i bytes\n",
  1115. (int)ret);
  1116. ret = spi_nor_wait_till_ready(nor);
  1117. if (ret)
  1118. goto sst_write_err;
  1119. write_disable(nor);
  1120. actual += 1;
  1121. }
  1122. sst_write_err:
  1123. *retlen += actual;
  1124. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
  1125. return ret;
  1126. }
  1127. /*
  1128. * Write an address range to the nor chip. Data must be written in
  1129. * FLASH_PAGESIZE chunks. The address range may be any size provided
  1130. * it is within the physical boundaries.
  1131. */
  1132. static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
  1133. size_t *retlen, const u_char *buf)
  1134. {
  1135. struct spi_nor *nor = mtd_to_spi_nor(mtd);
  1136. size_t page_offset, page_remain, i;
  1137. ssize_t ret;
  1138. dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
  1139. ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
  1140. if (ret)
  1141. return ret;
  1142. for (i = 0; i < len; ) {
  1143. ssize_t written;
  1144. loff_t addr = to + i;
  1145. /*
  1146. * If page_size is a power of two, the offset can be quickly
  1147. * calculated with an AND operation. On the other cases we
  1148. * need to do a modulus operation (more expensive).
  1149. * Power of two numbers have only one bit set and we can use
  1150. * the instruction hweight32 to detect if we need to do a
  1151. * modulus (do_div()) or not.
  1152. */
  1153. if (hweight32(nor->page_size) == 1) {
  1154. page_offset = addr & (nor->page_size - 1);
  1155. } else {
  1156. uint64_t aux = addr;
  1157. page_offset = do_div(aux, nor->page_size);
  1158. }
  1159. /* the size of data remaining on the first page */
  1160. page_remain = min_t(size_t,
  1161. nor->page_size - page_offset, len - i);
  1162. if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
  1163. addr = spi_nor_s3an_addr_convert(nor, addr);
  1164. write_enable(nor);
  1165. ret = nor->write(nor, addr, page_remain, buf + i);
  1166. if (ret < 0)
  1167. goto write_err;
  1168. written = ret;
  1169. ret = spi_nor_wait_till_ready(nor);
  1170. if (ret)
  1171. goto write_err;
  1172. *retlen += written;
  1173. i += written;
  1174. if (written != page_remain) {
  1175. dev_err(nor->dev,
  1176. "While writing %zu bytes written %zd bytes\n",
  1177. page_remain, written);
  1178. ret = -EIO;
  1179. goto write_err;
  1180. }
  1181. }
  1182. write_err:
  1183. spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
  1184. return ret;
  1185. }
  1186. static int macronix_quad_enable(struct spi_nor *nor)
  1187. {
  1188. int ret, val;
  1189. val = read_sr(nor);
  1190. if (val < 0)
  1191. return val;
  1192. if (val & SR_QUAD_EN_MX)
  1193. return 0;
  1194. write_enable(nor);
  1195. write_sr(nor, val | SR_QUAD_EN_MX);
  1196. if (spi_nor_wait_till_ready(nor))
  1197. return 1;
  1198. ret = read_sr(nor);
  1199. if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
  1200. dev_err(nor->dev, "Macronix Quad bit not set\n");
  1201. return -EINVAL;
  1202. }
  1203. return 0;
  1204. }
  1205. /*
  1206. * Write status Register and configuration register with 2 bytes
  1207. * The first byte will be written to the status register, while the
  1208. * second byte will be written to the configuration register.
  1209. * Return negative if error occurred.
  1210. */
  1211. static int write_sr_cr(struct spi_nor *nor, u16 val)
  1212. {
  1213. nor->cmd_buf[0] = val & 0xff;
  1214. nor->cmd_buf[1] = (val >> 8);
  1215. return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2);
  1216. }
  1217. static int spansion_quad_enable(struct spi_nor *nor)
  1218. {
  1219. int ret;
  1220. int quad_en = CR_QUAD_EN_SPAN << 8;
  1221. write_enable(nor);
  1222. ret = write_sr_cr(nor, quad_en);
  1223. if (ret < 0) {
  1224. dev_err(nor->dev,
  1225. "error while writing configuration register\n");
  1226. return -EINVAL;
  1227. }
  1228. ret = spi_nor_wait_till_ready(nor);
  1229. if (ret) {
  1230. dev_err(nor->dev,
  1231. "timeout while writing configuration register\n");
  1232. return ret;
  1233. }
  1234. /* read back and check it */
  1235. ret = read_cr(nor);
  1236. if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
  1237. dev_err(nor->dev, "Spansion Quad bit not set\n");
  1238. return -EINVAL;
  1239. }
  1240. return 0;
  1241. }
  1242. static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
  1243. {
  1244. int status;
  1245. switch (JEDEC_MFR(info)) {
  1246. case SNOR_MFR_MACRONIX:
  1247. status = macronix_quad_enable(nor);
  1248. if (status) {
  1249. dev_err(nor->dev, "Macronix quad-read not enabled\n");
  1250. return -EINVAL;
  1251. }
  1252. return status;
  1253. case SNOR_MFR_MICRON:
  1254. return 0;
  1255. default:
  1256. status = spansion_quad_enable(nor);
  1257. if (status) {
  1258. dev_err(nor->dev, "Spansion quad-read not enabled\n");
  1259. return -EINVAL;
  1260. }
  1261. return status;
  1262. }
  1263. }
  1264. static int spi_nor_check(struct spi_nor *nor)
  1265. {
  1266. if (!nor->dev || !nor->read || !nor->write ||
  1267. !nor->read_reg || !nor->write_reg) {
  1268. pr_err("spi-nor: please fill all the necessary fields!\n");
  1269. return -EINVAL;
  1270. }
  1271. return 0;
  1272. }
  1273. static int s3an_nor_scan(const struct flash_info *info, struct spi_nor *nor)
  1274. {
  1275. int ret;
  1276. u8 val;
  1277. ret = nor->read_reg(nor, SPINOR_OP_XRDSR, &val, 1);
  1278. if (ret < 0) {
  1279. dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
  1280. return ret;
  1281. }
  1282. nor->erase_opcode = SPINOR_OP_XSE;
  1283. nor->program_opcode = SPINOR_OP_XPP;
  1284. nor->read_opcode = SPINOR_OP_READ;
  1285. nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
  1286. /*
  1287. * This flashes have a page size of 264 or 528 bytes (known as
  1288. * Default addressing mode). It can be changed to a more standard
  1289. * Power of two mode where the page size is 256/512. This comes
  1290. * with a price: there is 3% less of space, the data is corrupted
  1291. * and the page size cannot be changed back to default addressing
  1292. * mode.
  1293. *
  1294. * The current addressing mode can be read from the XRDSR register
  1295. * and should not be changed, because is a destructive operation.
  1296. */
  1297. if (val & XSR_PAGESIZE) {
  1298. /* Flash in Power of 2 mode */
  1299. nor->page_size = (nor->page_size == 264) ? 256 : 512;
  1300. nor->mtd.writebufsize = nor->page_size;
  1301. nor->mtd.size = 8 * nor->page_size * info->n_sectors;
  1302. nor->mtd.erasesize = 8 * nor->page_size;
  1303. } else {
  1304. /* Flash in Default addressing mode */
  1305. nor->flags |= SNOR_F_S3AN_ADDR_DEFAULT;
  1306. }
  1307. return 0;
  1308. }
  1309. int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
  1310. {
  1311. const struct flash_info *info = NULL;
  1312. struct device *dev = nor->dev;
  1313. struct mtd_info *mtd = &nor->mtd;
  1314. struct device_node *np = spi_nor_get_flash_node(nor);
  1315. int ret;
  1316. int i;
  1317. ret = spi_nor_check(nor);
  1318. if (ret)
  1319. return ret;
  1320. if (name)
  1321. info = spi_nor_match_id(name);
  1322. /* Try to auto-detect if chip name wasn't specified or not found */
  1323. if (!info)
  1324. info = spi_nor_read_id(nor);
  1325. if (IS_ERR_OR_NULL(info))
  1326. return -ENOENT;
  1327. /*
  1328. * If caller has specified name of flash model that can normally be
  1329. * detected using JEDEC, let's verify it.
  1330. */
  1331. if (name && info->id_len) {
  1332. const struct flash_info *jinfo;
  1333. jinfo = spi_nor_read_id(nor);
  1334. if (IS_ERR(jinfo)) {
  1335. return PTR_ERR(jinfo);
  1336. } else if (jinfo != info) {
  1337. /*
  1338. * JEDEC knows better, so overwrite platform ID. We
  1339. * can't trust partitions any longer, but we'll let
  1340. * mtd apply them anyway, since some partitions may be
  1341. * marked read-only, and we don't want to lose that
  1342. * information, even if it's not 100% accurate.
  1343. */
  1344. dev_warn(dev, "found %s, expected %s\n",
  1345. jinfo->name, info->name);
  1346. info = jinfo;
  1347. }
  1348. }
  1349. mutex_init(&nor->lock);
  1350. /*
  1351. * Make sure the XSR_RDY flag is set before calling
  1352. * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
  1353. * with Atmel spi-nor
  1354. */
  1355. if (info->flags & SPI_S3AN)
  1356. nor->flags |= SNOR_F_READY_XSR_RDY;
  1357. /*
  1358. * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
  1359. * with the software protection bits set
  1360. */
  1361. if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
  1362. JEDEC_MFR(info) == SNOR_MFR_INTEL ||
  1363. JEDEC_MFR(info) == SNOR_MFR_SST ||
  1364. info->flags & SPI_NOR_HAS_LOCK) {
  1365. write_enable(nor);
  1366. write_sr(nor, 0);
  1367. spi_nor_wait_till_ready(nor);
  1368. }
  1369. if (!mtd->name)
  1370. mtd->name = dev_name(dev);
  1371. mtd->priv = nor;
  1372. mtd->type = MTD_NORFLASH;
  1373. mtd->writesize = 1;
  1374. mtd->flags = MTD_CAP_NORFLASH;
  1375. mtd->size = info->sector_size * info->n_sectors;
  1376. mtd->_erase = spi_nor_erase;
  1377. mtd->_read = spi_nor_read;
  1378. /* NOR protection support for STmicro/Micron chips and similar */
  1379. if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
  1380. info->flags & SPI_NOR_HAS_LOCK) {
  1381. nor->flash_lock = stm_lock;
  1382. nor->flash_unlock = stm_unlock;
  1383. nor->flash_is_locked = stm_is_locked;
  1384. }
  1385. if (nor->flash_lock && nor->flash_unlock && nor->flash_is_locked) {
  1386. mtd->_lock = spi_nor_lock;
  1387. mtd->_unlock = spi_nor_unlock;
  1388. mtd->_is_locked = spi_nor_is_locked;
  1389. }
  1390. /* sst nor chips use AAI word program */
  1391. if (info->flags & SST_WRITE)
  1392. mtd->_write = sst_write;
  1393. else
  1394. mtd->_write = spi_nor_write;
  1395. if (info->flags & USE_FSR)
  1396. nor->flags |= SNOR_F_USE_FSR;
  1397. if (info->flags & SPI_NOR_HAS_TB)
  1398. nor->flags |= SNOR_F_HAS_SR_TB;
  1399. if (info->flags & NO_CHIP_ERASE)
  1400. nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
  1401. #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
  1402. /* prefer "small sector" erase if possible */
  1403. if (info->flags & SECT_4K) {
  1404. nor->erase_opcode = SPINOR_OP_BE_4K;
  1405. mtd->erasesize = 4096;
  1406. } else if (info->flags & SECT_4K_PMC) {
  1407. nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
  1408. mtd->erasesize = 4096;
  1409. } else
  1410. #endif
  1411. {
  1412. nor->erase_opcode = SPINOR_OP_SE;
  1413. mtd->erasesize = info->sector_size;
  1414. }
  1415. if (info->flags & SPI_NOR_NO_ERASE)
  1416. mtd->flags |= MTD_NO_ERASE;
  1417. mtd->dev.parent = dev;
  1418. nor->page_size = info->page_size;
  1419. mtd->writebufsize = nor->page_size;
  1420. if (np) {
  1421. /* If we were instantiated by DT, use it */
  1422. if (of_property_read_bool(np, "m25p,fast-read"))
  1423. nor->flash_read = SPI_NOR_FAST;
  1424. else
  1425. nor->flash_read = SPI_NOR_NORMAL;
  1426. } else {
  1427. /* If we weren't instantiated by DT, default to fast-read */
  1428. nor->flash_read = SPI_NOR_FAST;
  1429. }
  1430. /* Some devices cannot do fast-read, no matter what DT tells us */
  1431. if (info->flags & SPI_NOR_NO_FR)
  1432. nor->flash_read = SPI_NOR_NORMAL;
  1433. /* Quad/Dual-read mode takes precedence over fast/normal */
  1434. if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
  1435. ret = set_quad_mode(nor, info);
  1436. if (ret) {
  1437. dev_err(dev, "quad mode not supported\n");
  1438. return ret;
  1439. }
  1440. nor->flash_read = SPI_NOR_QUAD;
  1441. } else if (mode == SPI_NOR_DUAL && info->flags & SPI_NOR_DUAL_READ) {
  1442. nor->flash_read = SPI_NOR_DUAL;
  1443. }
  1444. /* Default commands */
  1445. switch (nor->flash_read) {
  1446. case SPI_NOR_QUAD:
  1447. nor->read_opcode = SPINOR_OP_READ_1_1_4;
  1448. break;
  1449. case SPI_NOR_DUAL:
  1450. nor->read_opcode = SPINOR_OP_READ_1_1_2;
  1451. break;
  1452. case SPI_NOR_FAST:
  1453. nor->read_opcode = SPINOR_OP_READ_FAST;
  1454. break;
  1455. case SPI_NOR_NORMAL:
  1456. nor->read_opcode = SPINOR_OP_READ;
  1457. break;
  1458. default:
  1459. dev_err(dev, "No Read opcode defined\n");
  1460. return -EINVAL;
  1461. }
  1462. nor->program_opcode = SPINOR_OP_PP;
  1463. if (info->addr_width)
  1464. nor->addr_width = info->addr_width;
  1465. else if (mtd->size > 0x1000000) {
  1466. /* enable 4-byte addressing if the device exceeds 16MiB */
  1467. nor->addr_width = 4;
  1468. if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
  1469. info->flags & SPI_NOR_4B_OPCODES)
  1470. spi_nor_set_4byte_opcodes(nor, info);
  1471. else
  1472. set_4byte(nor, info, 1);
  1473. } else {
  1474. nor->addr_width = 3;
  1475. }
  1476. if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
  1477. dev_err(dev, "address width is too large: %u\n",
  1478. nor->addr_width);
  1479. return -EINVAL;
  1480. }
  1481. nor->read_dummy = spi_nor_read_dummy_cycles(nor);
  1482. if (info->flags & SPI_S3AN) {
  1483. ret = s3an_nor_scan(info, nor);
  1484. if (ret)
  1485. return ret;
  1486. }
  1487. dev_info(dev, "%s (%lld Kbytes)\n", info->name,
  1488. (long long)mtd->size >> 10);
  1489. dev_dbg(dev,
  1490. "mtd .name = %s, .size = 0x%llx (%lldMiB), "
  1491. ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
  1492. mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
  1493. mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
  1494. if (mtd->numeraseregions)
  1495. for (i = 0; i < mtd->numeraseregions; i++)
  1496. dev_dbg(dev,
  1497. "mtd.eraseregions[%d] = { .offset = 0x%llx, "
  1498. ".erasesize = 0x%.8x (%uKiB), "
  1499. ".numblocks = %d }\n",
  1500. i, (long long)mtd->eraseregions[i].offset,
  1501. mtd->eraseregions[i].erasesize,
  1502. mtd->eraseregions[i].erasesize / 1024,
  1503. mtd->eraseregions[i].numblocks);
  1504. return 0;
  1505. }
  1506. EXPORT_SYMBOL_GPL(spi_nor_scan);
  1507. static const struct flash_info *spi_nor_match_id(const char *name)
  1508. {
  1509. const struct flash_info *id = spi_nor_ids;
  1510. while (id->name) {
  1511. if (!strcmp(name, id->name))
  1512. return id;
  1513. id++;
  1514. }
  1515. return NULL;
  1516. }
  1517. MODULE_LICENSE("GPL");
  1518. MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
  1519. MODULE_AUTHOR("Mike Lavender");
  1520. MODULE_DESCRIPTION("framework for SPI NOR");