Browse Source

Merge tag 'mtd/for-4.18' of git://git.infradead.org/linux-mtd

Pull MTD updates from Boris Brezillon:
 "Core changes:
   - Add a sysfs attribute to expose available OOB size

  Driver changes:
   - Remove HAS_DMA dependency on various drivers
   - Use dev_get_drvdata() instead of platform_get_drvdata() in docg3
   - Replace msleep by usleep_range() in the dataflash driver
   - Avoid VLA usage in nftl layers
   - Remove useless .owner assignment in pismo
   - Fix various issues in the CFI driver
   - Improve TRX partition handling expose a DT compat for this part
     parser
   - Clarify OFFSET_CONTINUOUS meaning

  NAND core changes:
   - Add Miquel as a NAND maintainer
   - Add access mode to the nand_page_io_req struct
   - Fix kernel-doc in rawnand.h
   - Support bit-wise majority to recover from corrupted ONFI parameter
     pages
   - Stop checking FAIL bit after a SET_FEATURES, as documented in the
     ONFI spec

  Raw NAND Driver changes:
   - Fix and cleanup the error path of many NAND controller drivers
   - GPMI:
      + Cleanup/simplification of a few aspects in the driver
      + Take ECC setup specified in the DT into account
   - sunxi: remove support for GPIO-based R/B polling
   - MTK:
      + Use of_device_get_match_data() instead of of_match_device()
      + Add an entry in MAINTAINERS for this driver
      + Fix nand-ecc-step-size and nand-ecc-strength description in the
        DT bindings doc
   - fsl_ifc: fix ->cmdfunc() to read more than one ONFI parameter page

  OneNAND driver changes:
   - samsung: use dev_get_drvdata() instead of platform_get_drvdata()

  SPI NOR core changes:
   - Add support for a bunch of SPI NOR chips
   - Clear EAR reg when switching to 3-byte addressing mode on Winbond
     chips

  SPI NOR controller driver changes:
   - cadence: Add DMA support for direct mode reads
   - hisi: Prefix a few functions with hisi_
   - intel:
      + Mark the driver as "dangerous" in Kconfig
      + Fix atomic sequence handling
      + Pass a 40us delay (instead of 0us) to readl_poll_timeout()
   - fsl:
      + fix a typo in a function name
      + add support for IP variants embedded in the ls2080a and ls1080a
        SoCs
   - stm32: request exclusive control of the reset line"

* tag 'mtd/for-4.18' of git://git.infradead.org/linux-mtd: (66 commits)
  mtd: nand: Pass mode information to nand_page_io_req
  mtd: cfi_cmdset_0002: Change erase one block to enable XIP once
  mtd: cfi_cmdset_0002: Change erase functions to check chip good only
  mtd: cfi_cmdset_0002: Change erase functions to retry for error
  mtd: cfi_cmdset_0002: Change definition naming to retry write operation
  mtd: cfi_cmdset_0002: Change write buffer to check correct value
  mtd: cmdlinepart: Update comment for introduction of OFFSET_CONTINUOUS
  mtd: bcm47xxpart: add of_match_table with a new DT binding
  dt-bindings: mtd: document Broadcom's BCM47xx partitions
  mtd: spi-nor: Add support for EN25QH32
  mtd: spi-nor: Add support for is25wp series chips
  mtd: spi-nor: Add Winbond w25q32jv support
  mtd: spi-nor: fsl-quadspi: add support for ls2080a/ls1080a
  mtd: spi-nor: stm32-quadspi: explicitly request exclusive reset control
  mtd: spi-nor: intel: provide a range for poll_timout
  mtd: spi-nor: fsl-quadspi: fix api naming typo _init_ahb_read
  mtd: spi-nor: intel-spi: Explicitly mark the driver as dangerous in Kconfig
  mtd: spi-nor: intel-spi: Fix atomic sequence handling
  mtd: rawnand: Do not check FAIL bit when executing a SET_FEATURES op
  mtd: rawnand: use bit-wise majority to recover the ONFI param page
  ...
Linus Torvalds 7 years ago
parent
commit
f4e70c2e5f
46 changed files with 773 additions and 536 deletions
  1. 8 0
      Documentation/ABI/testing/sysfs-class-mtd
  2. 5 0
      Documentation/devicetree/bindings/mtd/gpmi-nand.txt
  3. 19 5
      Documentation/devicetree/bindings/mtd/mtk-nand.txt
  4. 1 1
      Documentation/devicetree/bindings/mtd/partition.txt
  5. 42 0
      Documentation/devicetree/bindings/mtd/partitions/brcm,bcm947xx-cfe-partitions.txt
  6. 0 2
      Documentation/devicetree/bindings/mtd/sunxi-nand.txt
  7. 8 0
      MAINTAINERS
  8. 25 4
      drivers/mtd/bcm47xxpart.c
  9. 36 26
      drivers/mtd/chips/cfi_cmdset_0002.c
  10. 42 0
      drivers/mtd/chips/cfi_probe.c
  11. 4 1
      drivers/mtd/cmdlinepart.c
  12. 1 2
      drivers/mtd/devices/docg3.c
  13. 1 1
      drivers/mtd/devices/mtd_dataflash.c
  14. 16 7
      drivers/mtd/inftlmount.c
  15. 0 1
      drivers/mtd/maps/pismo.c
  16. 14 10
      drivers/mtd/mtdcore.c
  17. 0 1
      drivers/mtd/mtdcore.h
  18. 16 28
      drivers/mtd/mtdpart.c
  19. 2 4
      drivers/mtd/nand/onenand/samsung.c
  20. 2 6
      drivers/mtd/nand/raw/Kconfig
  21. 1 24
      drivers/mtd/nand/raw/davinci_nand.c
  22. 2 2
      drivers/mtd/nand/raw/diskonchip.c
  23. 9 4
      drivers/mtd/nand/raw/fsl_elbc_nand.c
  24. 19 10
      drivers/mtd/nand/raw/fsl_ifc_nand.c
  25. 15 12
      drivers/mtd/nand/raw/fsmc_nand.c
  26. 31 23
      drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
  27. 59 129
      drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
  28. 4 21
      drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
  29. 12 23
      drivers/mtd/nand/raw/hisi504_nand.c
  30. 20 18
      drivers/mtd/nand/raw/lpc32xx_mlc.c
  31. 14 12
      drivers/mtd/nand/raw/lpc32xx_slc.c
  32. 1 6
      drivers/mtd/nand/raw/mtk_ecc.c
  33. 1 9
      drivers/mtd/nand/raw/mtk_nand.c
  34. 62 27
      drivers/mtd/nand/raw/nand_base.c
  35. 15 76
      drivers/mtd/nand/raw/sunxi_nand.c
  36. 16 7
      drivers/mtd/nftlmount.c
  37. 3 3
      drivers/mtd/spi-nor/Kconfig
  38. 94 2
      drivers/mtd/spi-nor/cadence-quadspi.c
  39. 13 2
      drivers/mtd/spi-nor/fsl-quadspi.c
  40. 6 6
      drivers/mtd/spi-nor/hisi-sfc.c
  41. 69 11
      drivers/mtd/spi-nor/intel-spi.c
  42. 33 0
      drivers/mtd/spi-nor/spi-nor.c
  43. 1 1
      drivers/mtd/spi-nor/stm32-quadspi.c
  44. 3 0
      include/linux/mtd/nand.h
  45. 26 9
      include/linux/mtd/rawnand.h
  46. 2 0
      include/linux/mtd/spi-nor.h

+ 8 - 0
Documentation/ABI/testing/sysfs-class-mtd

@@ -232,3 +232,11 @@ Description:
 		of the parent (another partition or a flash device) in bytes.
 		of the parent (another partition or a flash device) in bytes.
 		This attribute is absent on flash devices, so it can be used
 		This attribute is absent on flash devices, so it can be used
 		to distinguish them from partitions.
 		to distinguish them from partitions.
+
+What:		/sys/class/mtd/mtdX/oobavail
+Date:		April 2018
+KernelVersion:	4.16
+Contact:	linux-mtd@lists.infradead.org
+Description:
+		Number of bytes available for a client to place data into
+		the out of band area.

+ 5 - 0
Documentation/devicetree/bindings/mtd/gpmi-nand.txt

@@ -47,6 +47,11 @@ Optional properties:
                        partitions written from Linux with this feature
                        partitions written from Linux with this feature
                        turned on may not be accessible by the BootROM
                        turned on may not be accessible by the BootROM
                        code.
                        code.
+  - nand-ecc-strength: integer representing the number of bits to correct
+                       per ECC step. Needs to be a multiple of 2.
+  - nand-ecc-step-size: integer representing the number of data bytes
+                       that are covered by a single ECC step. The driver
+                       supports 512 and 1024.
 
 
 The device tree may optionally contain sub-nodes describing partitions of the
 The device tree may optionally contain sub-nodes describing partitions of the
 address space. See partition.txt for more detail.
 address space. See partition.txt for more detail.

+ 19 - 5
Documentation/devicetree/bindings/mtd/mtk-nand.txt

@@ -48,14 +48,19 @@ Optional:
 - nand-on-flash-bbt:	Store BBT on NAND Flash.
 - nand-on-flash-bbt:	Store BBT on NAND Flash.
 - nand-ecc-mode:	the NAND ecc mode (check driver for supported modes)
 - nand-ecc-mode:	the NAND ecc mode (check driver for supported modes)
 - nand-ecc-step-size:	Number of data bytes covered by a single ECC step.
 - nand-ecc-step-size:	Number of data bytes covered by a single ECC step.
-			valid values: 512 and 1024.
+			valid values:
+			512 and 1024 on mt2701 and mt2712.
+			512 only on mt7622.
 			1024 is recommended for large page NANDs.
 			1024 is recommended for large page NANDs.
 - nand-ecc-strength:	Number of bits to correct per ECC step.
 - nand-ecc-strength:	Number of bits to correct per ECC step.
-			The valid values that the controller supports are: 4, 6,
-			8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, 40, 44,
-			48, 52, 56, 60.
+			The valid values that each controller supports:
+			mt2701: 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28,
+				32, 36, 40, 44, 48, 52, 56, 60.
+			mt2712: 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28,
+				32, 36, 40, 44, 48, 52, 56, 60, 68, 72, 80.
+			mt7622: 4, 6, 8, 10, 12, 14, 16.
 			The strength should be calculated as follows:
 			The strength should be calculated as follows:
-			E = (S - F) * 8 / 14
+			E = (S - F) * 8 / B
 			S = O / (P / Q)
 			S = O / (P / Q)
 				E :	nand-ecc-strength.
 				E :	nand-ecc-strength.
 				S :	spare size per sector.
 				S :	spare size per sector.
@@ -64,6 +69,15 @@ Optional:
 				O :	oob size.
 				O :	oob size.
 				P :	page size.
 				P :	page size.
 				Q :	nand-ecc-step-size.
 				Q :	nand-ecc-step-size.
+				B :	number of parity bits needed to correct
+					1 bitflip.
+					According to MTK NAND controller design,
+					this number depends on max ecc step size
+					that MTK NAND controller supports.
+					If max ecc step size supported is 1024,
+					then it should be always 14. And if max
+					ecc step size is 512, then it should be
+					always 13.
 			If the result does not match any one of the listed
 			If the result does not match any one of the listed
 			choices above, please select the smaller valid value from
 			choices above, please select the smaller valid value from
 			the list.
 			the list.

+ 1 - 1
Documentation/devicetree/bindings/mtd/partition.txt

@@ -14,7 +14,7 @@ method is used for a given flash device. To describe the method there should be
 a subnode of the flash device that is named 'partitions'. It must have a
 a subnode of the flash device that is named 'partitions'. It must have a
 'compatible' property, which is used to identify the method to use.
 'compatible' property, which is used to identify the method to use.
 
 
-We currently only document a binding for fixed layouts.
+Available bindings are listed in the "partitions" subdirectory.
 
 
 
 
 Fixed Partitions
 Fixed Partitions

+ 42 - 0
Documentation/devicetree/bindings/mtd/partitions/brcm,bcm947xx-cfe-partitions.txt

@@ -0,0 +1,42 @@
+Broadcom BCM47xx Partitions
+===========================
+
+Broadcom is one of hardware manufacturers providing SoCs (BCM47xx) used in
+home routers. Their BCM947xx boards using CFE bootloader have several partitions
+without any on-flash partition table. On some devices their sizes and/or
+meanings can also vary so fixed partitioning can't be used.
+
+Discovering partitions on these devices is possible thanks to having a special
+header and/or magic signature at the beginning of each of them. They are also
+block aligned which is important for determinig a size.
+
+Most of partitions use ASCII text based magic for determining a type. More
+complex partitions (like TRX with its HDR0 magic) may include extra header
+containing some details, including a length.
+
+A list of supported partitions includes:
+1) Bootloader with Broadcom's CFE (Common Firmware Environment)
+2) NVRAM with configuration/calibration data
+3) Device manufacturer's data with some default values (e.g. SSIDs)
+4) TRX firmware container which can hold up to 4 subpartitions
+5) Backup TRX firmware used after failed upgrade
+
+As mentioned earlier, role of some partitions may depend on extra configuration.
+For example both: main firmware and backup firmware use the same TRX format with
+the same header. To distinguish currently used firmware a CFE's environment
+variable "bootpartition" is used.
+
+
+Devices using Broadcom partitions described above should should have flash node
+with a subnode named "partitions" using following properties:
+
+Required properties:
+- compatible : (required) must be "brcm,bcm947xx-cfe-partitions"
+
+Example:
+
+flash@0 {
+	partitions {
+		compatible = "brcm,bcm947xx-cfe-partitions";
+	};
+};

+ 0 - 2
Documentation/devicetree/bindings/mtd/sunxi-nand.txt

@@ -22,8 +22,6 @@ Optional properties:
 - reset : phandle + reset specifier pair
 - reset : phandle + reset specifier pair
 - reset-names : must contain "ahb"
 - reset-names : must contain "ahb"
 - allwinner,rb : shall contain the native Ready/Busy ids.
 - allwinner,rb : shall contain the native Ready/Busy ids.
- or
-- rb-gpios : shall contain the gpios used as R/B pins.
 - nand-ecc-mode : one of the supported ECC modes ("hw", "soft", "soft_bch" or
 - nand-ecc-mode : one of the supported ECC modes ("hw", "soft", "soft_bch" or
 		  "none")
 		  "none")
 
 

+ 8 - 0
MAINTAINERS

@@ -9022,6 +9022,13 @@ L:	linux-wireless@vger.kernel.org
 S:	Maintained
 S:	Maintained
 F:	drivers/net/wireless/mediatek/mt7601u/
 F:	drivers/net/wireless/mediatek/mt7601u/
 
 
+MEDIATEK NAND CONTROLLER DRIVER
+M:	Xiaolei Li <xiaolei.li@mediatek.com>
+L:	linux-mtd@lists.infradead.org
+S:	Maintained
+F:	drivers/mtd/nand/raw/mtk_*
+F:	Documentation/devicetree/bindings/mtd/mtk-nand.txt
+
 MEDIATEK RANDOM NUMBER GENERATOR SUPPORT
 MEDIATEK RANDOM NUMBER GENERATOR SUPPORT
 M:	Sean Wang <sean.wang@mediatek.com>
 M:	Sean Wang <sean.wang@mediatek.com>
 S:	Maintained
 S:	Maintained
@@ -9666,6 +9673,7 @@ F:	drivers/net/ethernet/myricom/myri10ge/
 
 
 NAND FLASH SUBSYSTEM
 NAND FLASH SUBSYSTEM
 M:	Boris Brezillon <boris.brezillon@bootlin.com>
 M:	Boris Brezillon <boris.brezillon@bootlin.com>
+M:	Miquel Raynal <miquel.raynal@bootlin.com>
 R:	Richard Weinberger <richard@nod.at>
 R:	Richard Weinberger <richard@nod.at>
 L:	linux-mtd@lists.infradead.org
 L:	linux-mtd@lists.infradead.org
 W:	http://www.linux-mtd.infradead.org/
 W:	http://www.linux-mtd.infradead.org/

+ 25 - 4
drivers/mtd/bcm47xxpart.c

@@ -186,6 +186,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
 		/* TRX */
 		/* TRX */
 		if (buf[0x000 / 4] == TRX_MAGIC) {
 		if (buf[0x000 / 4] == TRX_MAGIC) {
 			struct trx_header *trx;
 			struct trx_header *trx;
+			uint32_t last_subpart;
+			uint32_t trx_size;
 
 
 			if (trx_num >= ARRAY_SIZE(trx_parts))
 			if (trx_num >= ARRAY_SIZE(trx_parts))
 				pr_warn("No enough space to store another TRX found at 0x%X\n",
 				pr_warn("No enough space to store another TRX found at 0x%X\n",
@@ -195,11 +197,23 @@ static int bcm47xxpart_parse(struct mtd_info *master,
 			bcm47xxpart_add_part(&parts[curr_part++], "firmware",
 			bcm47xxpart_add_part(&parts[curr_part++], "firmware",
 					     offset, 0);
 					     offset, 0);
 
 
-			/* Jump to the end of TRX */
+			/*
+			 * Try to find TRX size. The "length" field isn't fully
+			 * reliable as it could be decreased to make CRC32 cover
+			 * only part of TRX data. It's commonly used as checksum
+			 * can't cover e.g. ever-changing rootfs partition.
+			 * Use offsets as helpers for assuming min TRX size.
+			 */
 			trx = (struct trx_header *)buf;
 			trx = (struct trx_header *)buf;
-			offset = roundup(offset + trx->length, blocksize);
-			/* Next loop iteration will increase the offset */
-			offset -= blocksize;
+			last_subpart = max3(trx->offset[0], trx->offset[1],
+					    trx->offset[2]);
+			trx_size = max(trx->length, last_subpart + blocksize);
+
+			/*
+			 * Skip the TRX data. Decrease offset by block size as
+			 * the next loop iteration will increase it.
+			 */
+			offset += roundup(trx_size, blocksize) - blocksize;
 			continue;
 			continue;
 		}
 		}
 
 
@@ -290,9 +304,16 @@ static int bcm47xxpart_parse(struct mtd_info *master,
 	return curr_part;
 	return curr_part;
 };
 };
 
 
+static const struct of_device_id bcm47xxpart_of_match_table[] = {
+	{ .compatible = "brcm,bcm947xx-cfe-partitions" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, bcm47xxpart_of_match_table);
+
 static struct mtd_part_parser bcm47xxpart_mtd_parser = {
 static struct mtd_part_parser bcm47xxpart_mtd_parser = {
 	.parse_fn = bcm47xxpart_parse,
 	.parse_fn = bcm47xxpart_parse,
 	.name = "bcm47xxpart",
 	.name = "bcm47xxpart",
+	.of_match_table = bcm47xxpart_of_match_table,
 };
 };
 module_mtd_part_parser(bcm47xxpart_mtd_parser);
 module_mtd_part_parser(bcm47xxpart_mtd_parser);
 
 

+ 36 - 26
drivers/mtd/chips/cfi_cmdset_0002.c

@@ -42,10 +42,10 @@
 #define AMD_BOOTLOC_BUG
 #define AMD_BOOTLOC_BUG
 #define FORCE_WORD_WRITE 0
 #define FORCE_WORD_WRITE 0
 
 
-#define MAX_WORD_RETRIES 3
+#define MAX_RETRIES 3
 
 
-#define SST49LF004B	        0x0060
-#define SST49LF040B	        0x0050
+#define SST49LF004B		0x0060
+#define SST49LF040B		0x0050
 #define SST49LF008A		0x005a
 #define SST49LF008A		0x005a
 #define AT49BV6416		0x00d6
 #define AT49BV6416		0x00d6
 
 
@@ -207,7 +207,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd)
 	struct map_info *map = mtd->priv;
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
 	struct cfi_private *cfi = map->fldrv_priv;
 	if (cfi->cfiq->BufWriteTimeoutTyp) {
 	if (cfi->cfiq->BufWriteTimeoutTyp) {
-		pr_debug("Using buffer write method\n" );
+		pr_debug("Using buffer write method\n");
 		mtd->_write = cfi_amdstd_write_buffers;
 		mtd->_write = cfi_amdstd_write_buffers;
 	}
 	}
 }
 }
@@ -1563,7 +1563,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
 	 * depending of the conditions.	 The ' + 1' is to avoid having a
 	 * depending of the conditions.	 The ' + 1' is to avoid having a
 	 * timeout of 0 jiffies if HZ is smaller than 1000.
 	 * timeout of 0 jiffies if HZ is smaller than 1000.
 	 */
 	 */
-	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
+	unsigned long uWriteTimeout = (HZ / 1000) + 1;
 	int ret = 0;
 	int ret = 0;
 	map_word oldd;
 	map_word oldd;
 	int retry_cnt = 0;
 	int retry_cnt = 0;
@@ -1578,7 +1578,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
 	}
 	}
 
 
 	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
 	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
-	       __func__, adr, datum.x[0] );
+		 __func__, adr, datum.x[0]);
 
 
 	if (mode == FL_OTP_WRITE)
 	if (mode == FL_OTP_WRITE)
 		otp_enter(map, chip, adr, map_bankwidth(map));
 		otp_enter(map, chip, adr, map_bankwidth(map));
@@ -1644,10 +1644,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
 	/* Did we succeed? */
 	/* Did we succeed? */
 	if (!chip_good(map, adr, datum)) {
 	if (!chip_good(map, adr, datum)) {
 		/* reset on all failures. */
 		/* reset on all failures. */
-		map_write( map, CMD(0xF0), chip->start );
+		map_write(map, CMD(0xF0), chip->start);
 		/* FIXME - should have reset delay before continuing */
 		/* FIXME - should have reset delay before continuing */
 
 
-		if (++retry_cnt <= MAX_WORD_RETRIES)
+		if (++retry_cnt <= MAX_RETRIES)
 			goto retry;
 			goto retry;
 
 
 		ret = -EIO;
 		ret = -EIO;
@@ -1822,7 +1822,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
 	datum = map_word_load(map, buf);
 	datum = map_word_load(map, buf);
 
 
 	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
 	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
-	       __func__, adr, datum.x[0] );
+		 __func__, adr, datum.x[0]);
 
 
 	XIP_INVAL_CACHED_RANGE(map, adr, len);
 	XIP_INVAL_CACHED_RANGE(map, adr, len);
 	ENABLE_VPP(map);
 	ENABLE_VPP(map);
@@ -1880,7 +1880,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
 		if (time_after(jiffies, timeo) && !chip_ready(map, adr))
 		if (time_after(jiffies, timeo) && !chip_ready(map, adr))
 			break;
 			break;
 
 
-		if (chip_ready(map, adr)) {
+		if (chip_good(map, adr, datum)) {
 			xip_enable(map, chip, adr);
 			xip_enable(map, chip, adr);
 			goto op_done;
 			goto op_done;
 		}
 		}
@@ -2106,7 +2106,7 @@ retry:
 		map_write(map, CMD(0xF0), chip->start);
 		map_write(map, CMD(0xF0), chip->start);
 		/* FIXME - should have reset delay before continuing */
 		/* FIXME - should have reset delay before continuing */
 
 
-		if (++retry_cnt <= MAX_WORD_RETRIES)
+		if (++retry_cnt <= MAX_RETRIES)
 			goto retry;
 			goto retry;
 
 
 		ret = -EIO;
 		ret = -EIO;
@@ -2241,6 +2241,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
 	unsigned long int adr;
 	unsigned long int adr;
 	DECLARE_WAITQUEUE(wait, current);
 	DECLARE_WAITQUEUE(wait, current);
 	int ret = 0;
 	int ret = 0;
+	int retry_cnt = 0;
 
 
 	adr = cfi->addr_unlock1;
 	adr = cfi->addr_unlock1;
 
 
@@ -2252,12 +2253,13 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
 	}
 	}
 
 
 	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
 	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
-	       __func__, chip->start );
+	       __func__, chip->start);
 
 
 	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
 	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
 	ENABLE_VPP(map);
 	ENABLE_VPP(map);
 	xip_disable(map, chip, adr);
 	xip_disable(map, chip, adr);
 
 
+ retry:
 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
@@ -2294,12 +2296,13 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
 			chip->erase_suspended = 0;
 			chip->erase_suspended = 0;
 		}
 		}
 
 
-		if (chip_ready(map, adr))
+		if (chip_good(map, adr, map_word_ff(map)))
 			break;
 			break;
 
 
 		if (time_after(jiffies, timeo)) {
 		if (time_after(jiffies, timeo)) {
 			printk(KERN_WARNING "MTD %s(): software timeout\n",
 			printk(KERN_WARNING "MTD %s(): software timeout\n",
-				__func__ );
+			       __func__);
+			ret = -EIO;
 			break;
 			break;
 		}
 		}
 
 
@@ -2307,12 +2310,15 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
 		UDELAY(map, chip, adr, 1000000/HZ);
 		UDELAY(map, chip, adr, 1000000/HZ);
 	}
 	}
 	/* Did we succeed? */
 	/* Did we succeed? */
-	if (!chip_good(map, adr, map_word_ff(map))) {
+	if (ret) {
 		/* reset on all failures. */
 		/* reset on all failures. */
-		map_write( map, CMD(0xF0), chip->start );
+		map_write(map, CMD(0xF0), chip->start);
 		/* FIXME - should have reset delay before continuing */
 		/* FIXME - should have reset delay before continuing */
 
 
-		ret = -EIO;
+		if (++retry_cnt <= MAX_RETRIES) {
+			ret = 0;
+			goto retry;
+		}
 	}
 	}
 
 
 	chip->state = FL_READY;
 	chip->state = FL_READY;
@@ -2331,6 +2337,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
 	unsigned long timeo = jiffies + HZ;
 	unsigned long timeo = jiffies + HZ;
 	DECLARE_WAITQUEUE(wait, current);
 	DECLARE_WAITQUEUE(wait, current);
 	int ret = 0;
 	int ret = 0;
+	int retry_cnt = 0;
 
 
 	adr += chip->start;
 	adr += chip->start;
 
 
@@ -2342,12 +2349,13 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
 	}
 	}
 
 
 	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
 	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
-	       __func__, adr );
+		 __func__, adr);
 
 
 	XIP_INVAL_CACHED_RANGE(map, adr, len);
 	XIP_INVAL_CACHED_RANGE(map, adr, len);
 	ENABLE_VPP(map);
 	ENABLE_VPP(map);
 	xip_disable(map, chip, adr);
 	xip_disable(map, chip, adr);
 
 
+ retry:
 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
@@ -2384,15 +2392,13 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
 			chip->erase_suspended = 0;
 			chip->erase_suspended = 0;
 		}
 		}
 
 
-		if (chip_ready(map, adr)) {
-			xip_enable(map, chip, adr);
+		if (chip_good(map, adr, map_word_ff(map)))
 			break;
 			break;
-		}
 
 
 		if (time_after(jiffies, timeo)) {
 		if (time_after(jiffies, timeo)) {
-			xip_enable(map, chip, adr);
 			printk(KERN_WARNING "MTD %s(): software timeout\n",
 			printk(KERN_WARNING "MTD %s(): software timeout\n",
-				__func__ );
+			       __func__);
+			ret = -EIO;
 			break;
 			break;
 		}
 		}
 
 
@@ -2400,15 +2406,19 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
 		UDELAY(map, chip, adr, 1000000/HZ);
 		UDELAY(map, chip, adr, 1000000/HZ);
 	}
 	}
 	/* Did we succeed? */
 	/* Did we succeed? */
-	if (!chip_good(map, adr, map_word_ff(map))) {
+	if (ret) {
 		/* reset on all failures. */
 		/* reset on all failures. */
-		map_write( map, CMD(0xF0), chip->start );
+		map_write(map, CMD(0xF0), chip->start);
 		/* FIXME - should have reset delay before continuing */
 		/* FIXME - should have reset delay before continuing */
 
 
-		ret = -EIO;
+		if (++retry_cnt <= MAX_RETRIES) {
+			ret = 0;
+			goto retry;
+		}
 	}
 	}
 
 
 	chip->state = FL_READY;
 	chip->state = FL_READY;
+	xip_enable(map, chip, adr);
 	DISABLE_VPP(map);
 	DISABLE_VPP(map);
 	put_chip(map, chip, adr);
 	put_chip(map, chip, adr);
 	mutex_unlock(&chip->mutex);
 	mutex_unlock(&chip->mutex);

+ 42 - 0
drivers/mtd/chips/cfi_probe.c

@@ -63,6 +63,30 @@ do { \
 
 
 #endif
 #endif
 
 
+/*
+ * This fixup occurs immediately after reading the CFI structure and can affect
+ * the number of chips detected, unlike cfi_fixup, which occurs after an
+ * mtd_info structure has been created for the chip.
+ */
+struct cfi_early_fixup {
+	uint16_t mfr;
+	uint16_t id;
+	void (*fixup)(struct cfi_private *cfi);
+};
+
+static void cfi_early_fixup(struct cfi_private *cfi,
+			    const struct cfi_early_fixup *fixups)
+{
+	const struct cfi_early_fixup *f;
+
+	for (f = fixups; f->fixup; f++) {
+		if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
+		    ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
+			f->fixup(cfi);
+		}
+	}
+}
+
 /* check for QRY.
 /* check for QRY.
    in: interleave,type,mode
    in: interleave,type,mode
    ret: table index, <0 for error
    ret: table index, <0 for error
@@ -151,6 +175,22 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
 	return 1;
 	return 1;
 }
 }
 
 
+static void fixup_s70gl02gs_chips(struct cfi_private *cfi)
+{
+	/*
+	 * S70GL02GS flash reports a single 256 MiB chip, but is really made up
+	 * of two 128 MiB chips with 1024 sectors each.
+	 */
+	cfi->cfiq->DevSize = 27;
+	cfi->cfiq->EraseRegionInfo[0] = 0x20003ff;
+	pr_warn("Bad S70GL02GS CFI data; adjust to detect 2 chips\n");
+}
+
+static const struct cfi_early_fixup cfi_early_fixup_table[] = {
+	{ CFI_MFR_AMD, 0x4801, fixup_s70gl02gs_chips },
+	{ },
+};
+
 static int __xipram cfi_chip_setup(struct map_info *map,
 static int __xipram cfi_chip_setup(struct map_info *map,
 				   struct cfi_private *cfi)
 				   struct cfi_private *cfi)
 {
 {
@@ -235,6 +275,8 @@ static int __xipram cfi_chip_setup(struct map_info *map,
 	cfi_qry_mode_off(base, map, cfi);
 	cfi_qry_mode_off(base, map, cfi);
 	xip_allowed(base, map);
 	xip_allowed(base, map);
 
 
+	cfi_early_fixup(cfi, cfi_early_fixup_table);
+
 	printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank. Manufacturer ID %#08x Chip ID %#08x\n",
 	printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank. Manufacturer ID %#08x Chip ID %#08x\n",
 	       map->name, cfi->interleave, cfi->device_type*8, base,
 	       map->name, cfi->interleave, cfi->device_type*8, base,
 	       map->bankwidth*8, cfi->mfr, cfi->id);
 	       map->bankwidth*8, cfi->mfr, cfi->id);

+ 4 - 1
drivers/mtd/cmdlinepart.c

@@ -190,7 +190,10 @@ static struct mtd_partition * newpart(char *s,
 		extra_mem = (unsigned char *)(parts + *num_parts);
 		extra_mem = (unsigned char *)(parts + *num_parts);
 	}
 	}
 
 
-	/* enter this partition (offset will be calculated later if it is zero at this point) */
+	/*
+	 * enter this partition (offset will be calculated later if it is
+	 * OFFSET_CONTINUOUS at this point)
+	 */
 	parts[this_part].size = size;
 	parts[this_part].size = size;
 	parts[this_part].offset = offset;
 	parts[this_part].offset = offset;
 	parts[this_part].mask_flags = mask_flags;
 	parts[this_part].mask_flags = mask_flags;

+ 1 - 2
drivers/mtd/devices/docg3.c

@@ -1470,8 +1470,7 @@ static struct docg3 *sysfs_dev2docg3(struct device *dev,
 				     struct device_attribute *attr)
 				     struct device_attribute *attr)
 {
 {
 	int floor;
 	int floor;
-	struct platform_device *pdev = to_platform_device(dev);
-	struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
+	struct mtd_info **docg3_floors = dev_get_drvdata(dev);
 
 
 	floor = attr->attr.name[1] - '0';
 	floor = attr->attr.name[1] - '0';
 	if (floor < 0 || floor >= DOC_MAX_NBFLOORS)
 	if (floor < 0 || floor >= DOC_MAX_NBFLOORS)

+ 1 - 1
drivers/mtd/devices/mtd_dataflash.c

@@ -140,7 +140,7 @@ static int dataflash_waitready(struct spi_device *spi)
 		if (status & (1 << 7))	/* RDY/nBSY */
 		if (status & (1 << 7))	/* RDY/nBSY */
 			return status;
 			return status;
 
 
-		msleep(3);
+		usleep_range(3000, 4000);
 	}
 	}
 }
 }
 
 

+ 16 - 7
drivers/mtd/inftlmount.c

@@ -334,28 +334,37 @@ static int memcmpb(void *a, int c, int n)
 static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
 static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
 	int len, int check_oob)
 	int len, int check_oob)
 {
 {
-	u8 buf[SECTORSIZE + inftl->mbd.mtd->oobsize];
 	struct mtd_info *mtd = inftl->mbd.mtd;
 	struct mtd_info *mtd = inftl->mbd.mtd;
 	size_t retlen;
 	size_t retlen;
-	int i;
+	int i, ret;
+	u8 *buf;
+
+	buf = kmalloc(SECTORSIZE + mtd->oobsize, GFP_KERNEL);
+	if (!buf)
+		return -1;
 
 
+	ret = -1;
 	for (i = 0; i < len; i += SECTORSIZE) {
 	for (i = 0; i < len; i += SECTORSIZE) {
 		if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
 		if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
-			return -1;
+			goto out;
 		if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
 		if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
-			return -1;
+			goto out;
 
 
 		if (check_oob) {
 		if (check_oob) {
 			if(inftl_read_oob(mtd, address, mtd->oobsize,
 			if(inftl_read_oob(mtd, address, mtd->oobsize,
 					  &retlen, &buf[SECTORSIZE]) < 0)
 					  &retlen, &buf[SECTORSIZE]) < 0)
-				return -1;
+				goto out;
 			if (memcmpb(buf + SECTORSIZE, 0xff, mtd->oobsize) != 0)
 			if (memcmpb(buf + SECTORSIZE, 0xff, mtd->oobsize) != 0)
-				return -1;
+				goto out;
 		}
 		}
 		address += SECTORSIZE;
 		address += SECTORSIZE;
 	}
 	}
 
 
-	return 0;
+	ret = 0;
+
+out:
+	kfree(buf);
+	return ret;
 }
 }
 
 
 /*
 /*

+ 0 - 1
drivers/mtd/maps/pismo.c

@@ -265,7 +265,6 @@ MODULE_DEVICE_TABLE(i2c, pismo_id);
 static struct i2c_driver pismo_driver = {
 static struct i2c_driver pismo_driver = {
 	.driver	= {
 	.driver	= {
 		.name	= "pismo",
 		.name	= "pismo",
-		.owner	= THIS_MODULE,
 	},
 	},
 	.probe		= pismo_probe,
 	.probe		= pismo_probe,
 	.remove		= pismo_remove,
 	.remove		= pismo_remove,

+ 14 - 10
drivers/mtd/mtdcore.c

@@ -210,6 +210,15 @@ static ssize_t mtd_oobsize_show(struct device *dev,
 }
 }
 static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
 static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
 
 
+static ssize_t mtd_oobavail_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", mtd->oobavail);
+}
+static DEVICE_ATTR(oobavail, S_IRUGO, mtd_oobavail_show, NULL);
+
 static ssize_t mtd_numeraseregions_show(struct device *dev,
 static ssize_t mtd_numeraseregions_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 		struct device_attribute *attr, char *buf)
 {
 {
@@ -327,6 +336,7 @@ static struct attribute *mtd_attrs[] = {
 	&dev_attr_writesize.attr,
 	&dev_attr_writesize.attr,
 	&dev_attr_subpagesize.attr,
 	&dev_attr_subpagesize.attr,
 	&dev_attr_oobsize.attr,
 	&dev_attr_oobsize.attr,
+	&dev_attr_oobavail.attr,
 	&dev_attr_numeraseregions.attr,
 	&dev_attr_numeraseregions.attr,
 	&dev_attr_name.attr,
 	&dev_attr_name.attr,
 	&dev_attr_ecc_strength.attr,
 	&dev_attr_ecc_strength.attr,
@@ -690,7 +700,6 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
 			      const struct mtd_partition *parts,
 			      const struct mtd_partition *parts,
 			      int nr_parts)
 			      int nr_parts)
 {
 {
-	struct mtd_partitions parsed = { };
 	int ret;
 	int ret;
 
 
 	mtd_set_dev_defaults(mtd);
 	mtd_set_dev_defaults(mtd);
@@ -702,13 +711,10 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
 	}
 	}
 
 
 	/* Prefer parsed partitions over driver-provided fallback */
 	/* Prefer parsed partitions over driver-provided fallback */
-	ret = parse_mtd_partitions(mtd, types, &parsed, parser_data);
-	if (!ret && parsed.nr_parts) {
-		parts = parsed.parts;
-		nr_parts = parsed.nr_parts;
-	}
-
-	if (nr_parts)
+	ret = parse_mtd_partitions(mtd, types, parser_data);
+	if (ret > 0)
+		ret = 0;
+	else if (nr_parts)
 		ret = add_mtd_partitions(mtd, parts, nr_parts);
 		ret = add_mtd_partitions(mtd, parts, nr_parts);
 	else if (!device_is_registered(&mtd->dev))
 	else if (!device_is_registered(&mtd->dev))
 		ret = add_mtd_device(mtd);
 		ret = add_mtd_device(mtd);
@@ -734,8 +740,6 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
 	}
 	}
 
 
 out:
 out:
-	/* Cleanup any parsed partitions */
-	mtd_part_parser_cleanup(&parsed);
 	if (ret && device_is_registered(&mtd->dev))
 	if (ret && device_is_registered(&mtd->dev))
 		del_mtd_device(mtd);
 		del_mtd_device(mtd);
 
 

+ 0 - 1
drivers/mtd/mtdcore.h

@@ -15,7 +15,6 @@ int del_mtd_partitions(struct mtd_info *);
 struct mtd_partitions;
 struct mtd_partitions;
 
 
 int parse_mtd_partitions(struct mtd_info *master, const char * const *types,
 int parse_mtd_partitions(struct mtd_info *master, const char * const *types,
-			 struct mtd_partitions *pparts,
 			 struct mtd_part_parser_data *data);
 			 struct mtd_part_parser_data *data);
 
 
 void mtd_part_parser_cleanup(struct mtd_partitions *parts);
 void mtd_part_parser_cleanup(struct mtd_partitions *parts);

+ 16 - 28
drivers/mtd/mtdpart.c

@@ -335,20 +335,7 @@ static inline void free_partition(struct mtd_part *p)
  */
  */
 static int mtd_parse_part(struct mtd_part *slave, const char *const *types)
 static int mtd_parse_part(struct mtd_part *slave, const char *const *types)
 {
 {
-	struct mtd_partitions parsed;
-	int err;
-
-	err = parse_mtd_partitions(&slave->mtd, types, &parsed, NULL);
-	if (err)
-		return err;
-	else if (!parsed.nr_parts)
-		return -ENOENT;
-
-	err = add_mtd_partitions(&slave->mtd, parsed.parts, parsed.nr_parts);
-
-	mtd_part_parser_cleanup(&parsed);
-
-	return err;
+	return parse_mtd_partitions(&slave->mtd, types, NULL);
 }
 }
 
 
 static struct mtd_part *allocate_partition(struct mtd_info *parent,
 static struct mtd_part *allocate_partition(struct mtd_info *parent,
@@ -933,30 +920,27 @@ static int mtd_part_of_parse(struct mtd_info *master,
 }
 }
 
 
 /**
 /**
- * parse_mtd_partitions - parse MTD partitions
+ * parse_mtd_partitions - parse and register MTD partitions
+ *
  * @master: the master partition (describes whole MTD device)
  * @master: the master partition (describes whole MTD device)
  * @types: names of partition parsers to try or %NULL
  * @types: names of partition parsers to try or %NULL
- * @pparts: info about partitions found is returned here
  * @data: MTD partition parser-specific data
  * @data: MTD partition parser-specific data
  *
  *
- * This function tries to find partition on MTD device @master. It uses MTD
- * partition parsers, specified in @types. However, if @types is %NULL, then
- * the default list of parsers is used. The default list contains only the
+ * This function tries to find & register partitions on MTD device @master. It
+ * uses MTD partition parsers, specified in @types. However, if @types is %NULL,
+ * then the default list of parsers is used. The default list contains only the
  * "cmdlinepart" and "ofpart" parsers ATM.
  * "cmdlinepart" and "ofpart" parsers ATM.
  * Note: If there are more then one parser in @types, the kernel only takes the
  * Note: If there are more then one parser in @types, the kernel only takes the
  * partitions parsed out by the first parser.
  * partitions parsed out by the first parser.
  *
  *
  * This function may return:
  * This function may return:
  * o a negative error code in case of failure
  * o a negative error code in case of failure
- * o zero otherwise, and @pparts will describe the partitions, number of
- *   partitions, and the parser which parsed them. Caller must release
- *   resources with mtd_part_parser_cleanup() when finished with the returned
- *   data.
+ * o number of found partitions otherwise
  */
  */
 int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
 int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
-			 struct mtd_partitions *pparts,
 			 struct mtd_part_parser_data *data)
 			 struct mtd_part_parser_data *data)
 {
 {
+	struct mtd_partitions pparts = { };
 	struct mtd_part_parser *parser;
 	struct mtd_part_parser *parser;
 	int ret, err = 0;
 	int ret, err = 0;
 
 
@@ -970,7 +954,7 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
 		 * handled in a separated function.
 		 * handled in a separated function.
 		 */
 		 */
 		if (!strcmp(*types, "ofpart")) {
 		if (!strcmp(*types, "ofpart")) {
-			ret = mtd_part_of_parse(master, pparts);
+			ret = mtd_part_of_parse(master, &pparts);
 		} else {
 		} else {
 			pr_debug("%s: parsing partitions %s\n", master->name,
 			pr_debug("%s: parsing partitions %s\n", master->name,
 				 *types);
 				 *types);
@@ -981,13 +965,17 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
 				parser ? parser->name : NULL);
 				parser ? parser->name : NULL);
 			if (!parser)
 			if (!parser)
 				continue;
 				continue;
-			ret = mtd_part_do_parse(parser, master, pparts, data);
+			ret = mtd_part_do_parse(parser, master, &pparts, data);
 			if (ret <= 0)
 			if (ret <= 0)
 				mtd_part_parser_put(parser);
 				mtd_part_parser_put(parser);
 		}
 		}
 		/* Found partitions! */
 		/* Found partitions! */
-		if (ret > 0)
-			return 0;
+		if (ret > 0) {
+			err = add_mtd_partitions(master, pparts.parts,
+						 pparts.nr_parts);
+			mtd_part_parser_cleanup(&pparts);
+			return err ? err : pparts.nr_parts;
+		}
 		/*
 		/*
 		 * Stash the first error we see; only report it if no parser
 		 * Stash the first error we see; only report it if no parser
 		 * succeeds
 		 * succeeds

+ 2 - 4
drivers/mtd/nand/onenand/samsung.c

@@ -958,8 +958,7 @@ static int s3c_onenand_remove(struct platform_device *pdev)
 
 
 static int s3c_pm_ops_suspend(struct device *dev)
 static int s3c_pm_ops_suspend(struct device *dev)
 {
 {
-	struct platform_device *pdev = to_platform_device(dev);
-	struct mtd_info *mtd = platform_get_drvdata(pdev);
+	struct mtd_info *mtd = dev_get_drvdata(dev);
 	struct onenand_chip *this = mtd->priv;
 	struct onenand_chip *this = mtd->priv;
 
 
 	this->wait(mtd, FL_PM_SUSPENDED);
 	this->wait(mtd, FL_PM_SUSPENDED);
@@ -968,8 +967,7 @@ static int s3c_pm_ops_suspend(struct device *dev)
 
 
 static  int s3c_pm_ops_resume(struct device *dev)
 static  int s3c_pm_ops_resume(struct device *dev)
 {
 {
-	struct platform_device *pdev = to_platform_device(dev);
-	struct mtd_info *mtd = platform_get_drvdata(pdev);
+	struct mtd_info *mtd = dev_get_drvdata(dev);
 	struct onenand_chip *this = mtd->priv;
 	struct onenand_chip *this = mtd->priv;
 
 
 	this->unlock_all(mtd);
 	this->unlock_all(mtd);

+ 2 - 6
drivers/mtd/nand/raw/Kconfig

@@ -46,7 +46,7 @@ config MTD_NAND_DENALI
 config MTD_NAND_DENALI_PCI
 config MTD_NAND_DENALI_PCI
         tristate "Support Denali NAND controller on Intel Moorestown"
         tristate "Support Denali NAND controller on Intel Moorestown"
 	select MTD_NAND_DENALI
 	select MTD_NAND_DENALI
-	depends on HAS_DMA && PCI
+	depends on PCI
         help
         help
           Enable the driver for NAND flash on Intel Moorestown, using the
           Enable the driver for NAND flash on Intel Moorestown, using the
           Denali NAND controller core.
           Denali NAND controller core.
@@ -152,7 +152,6 @@ config MTD_NAND_S3C2410_CLKSTOP
 config MTD_NAND_TANGO
 config MTD_NAND_TANGO
 	tristate "NAND Flash support for Tango chips"
 	tristate "NAND Flash support for Tango chips"
 	depends on ARCH_TANGO || COMPILE_TEST
 	depends on ARCH_TANGO || COMPILE_TEST
-	depends on HAS_DMA
 	help
 	help
 	  Enables the NAND Flash controller on Tango chips.
 	  Enables the NAND Flash controller on Tango chips.
 
 
@@ -285,7 +284,7 @@ config MTD_NAND_MARVELL
 	tristate "NAND controller support on Marvell boards"
 	tristate "NAND controller support on Marvell boards"
 	depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
 	depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
 		   COMPILE_TEST
 		   COMPILE_TEST
-	depends on HAS_IOMEM && HAS_DMA
+	depends on HAS_IOMEM
 	help
 	help
 	  This enables the NAND flash controller driver for Marvell boards,
 	  This enables the NAND flash controller driver for Marvell boards,
 	  including:
 	  including:
@@ -447,7 +446,6 @@ config MTD_NAND_SH_FLCTL
 	tristate "Support for NAND on Renesas SuperH FLCTL"
 	tristate "Support for NAND on Renesas SuperH FLCTL"
 	depends on SUPERH || COMPILE_TEST
 	depends on SUPERH || COMPILE_TEST
 	depends on HAS_IOMEM
 	depends on HAS_IOMEM
-	depends on HAS_DMA
 	help
 	help
 	  Several Renesas SuperH CPU has FLCTL. This option enables support
 	  Several Renesas SuperH CPU has FLCTL. This option enables support
 	  for NAND Flash using FLCTL.
 	  for NAND Flash using FLCTL.
@@ -515,7 +513,6 @@ config MTD_NAND_SUNXI
 config MTD_NAND_HISI504
 config MTD_NAND_HISI504
 	tristate "Support for NAND controller on Hisilicon SoC Hip04"
 	tristate "Support for NAND controller on Hisilicon SoC Hip04"
 	depends on ARCH_HISI || COMPILE_TEST
 	depends on ARCH_HISI || COMPILE_TEST
-	depends on HAS_DMA
 	help
 	help
 	  Enables support for NAND controller on Hisilicon SoC Hip04.
 	  Enables support for NAND controller on Hisilicon SoC Hip04.
 
 
@@ -529,7 +526,6 @@ config MTD_NAND_QCOM
 config MTD_NAND_MTK
 config MTD_NAND_MTK
 	tristate "Support for NAND controller on MTK SoCs"
 	tristate "Support for NAND controller on MTK SoCs"
 	depends on ARCH_MEDIATEK || COMPILE_TEST
 	depends on ARCH_MEDIATEK || COMPILE_TEST
-	depends on HAS_DMA
 	help
 	help
 	  Enables support for NAND controller on MTK SoCs.
 	  Enables support for NAND controller on MTK SoCs.
 	  This controller is found on mt27xx, mt81xx, mt65xx SoCs.
 	  This controller is found on mt27xx, mt81xx, mt65xx SoCs.

+ 1 - 24
drivers/mtd/nand/raw/davinci_nand.c

@@ -27,7 +27,6 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
 #include <linux/err.h>
 #include <linux/err.h>
-#include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/io.h>
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/partitions.h>
@@ -55,7 +54,6 @@ struct davinci_nand_info {
 	struct nand_chip	chip;
 	struct nand_chip	chip;
 
 
 	struct device		*dev;
 	struct device		*dev;
-	struct clk		*clk;
 
 
 	bool			is_readmode;
 	bool			is_readmode;
 
 
@@ -703,22 +701,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
 	/* Use board-specific ECC config */
 	/* Use board-specific ECC config */
 	info->chip.ecc.mode	= pdata->ecc_mode;
 	info->chip.ecc.mode	= pdata->ecc_mode;
 
 
-	ret = -EINVAL;
-
-	info->clk = devm_clk_get(&pdev->dev, "aemif");
-	if (IS_ERR(info->clk)) {
-		ret = PTR_ERR(info->clk);
-		dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
-		return ret;
-	}
-
-	ret = clk_prepare_enable(info->clk);
-	if (ret < 0) {
-		dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
-			ret);
-		goto err_clk_enable;
-	}
-
 	spin_lock_irq(&davinci_nand_lock);
 	spin_lock_irq(&davinci_nand_lock);
 
 
 	/* put CSxNAND into NAND mode */
 	/* put CSxNAND into NAND mode */
@@ -732,7 +714,7 @@ static int nand_davinci_probe(struct platform_device *pdev)
 	ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
 	ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
 	if (ret < 0) {
 	if (ret < 0) {
 		dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
 		dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
-		goto err;
+		return ret;
 	}
 	}
 
 
 	switch (info->chip.ecc.mode) {
 	switch (info->chip.ecc.mode) {
@@ -838,9 +820,6 @@ err_cleanup_nand:
 	nand_cleanup(&info->chip);
 	nand_cleanup(&info->chip);
 
 
 err:
 err:
-	clk_disable_unprepare(info->clk);
-
-err_clk_enable:
 	spin_lock_irq(&davinci_nand_lock);
 	spin_lock_irq(&davinci_nand_lock);
 	if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
 	if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
 		ecc4_busy = false;
 		ecc4_busy = false;
@@ -859,8 +838,6 @@ static int nand_davinci_remove(struct platform_device *pdev)
 
 
 	nand_release(nand_to_mtd(&info->chip));
 	nand_release(nand_to_mtd(&info->chip));
 
 
-	clk_disable_unprepare(info->clk);
-
 	return 0;
 	return 0;
 }
 }
 
 

+ 2 - 2
drivers/mtd/nand/raw/diskonchip.c

@@ -1480,12 +1480,12 @@ static int __init doc_probe(unsigned long physadr)
 		WriteDOC(tmp, virtadr, Mplus_DOCControl);
 		WriteDOC(tmp, virtadr, Mplus_DOCControl);
 		WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
 		WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
 
 
-		mdelay(1);
+		usleep_range(1000, 2000);
 		/* Enable the Millennium Plus ASIC */
 		/* Enable the Millennium Plus ASIC */
 		tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
 		tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
 		WriteDOC(tmp, virtadr, Mplus_DOCControl);
 		WriteDOC(tmp, virtadr, Mplus_DOCControl);
 		WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
 		WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
-		mdelay(1);
+		usleep_range(1000, 2000);
 
 
 		ChipID = ReadDOC(virtadr, ChipID);
 		ChipID = ReadDOC(virtadr, ChipID);
 
 

+ 9 - 4
drivers/mtd/nand/raw/fsl_elbc_nand.c

@@ -813,8 +813,6 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
 	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
 	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
 	struct mtd_info *mtd = nand_to_mtd(&priv->chip);
 	struct mtd_info *mtd = nand_to_mtd(&priv->chip);
 
 
-	nand_release(mtd);
-
 	kfree(mtd->name);
 	kfree(mtd->name);
 
 
 	if (priv->vbase)
 	if (priv->vbase)
@@ -926,15 +924,20 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
 
 
 	/* First look for RedBoot table or partitions on the command
 	/* First look for RedBoot table or partitions on the command
 	 * line, these take precedence over device tree information */
 	 * line, these take precedence over device tree information */
-	mtd_device_parse_register(mtd, part_probe_types, NULL,
-				  NULL, 0);
+	ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
+	if (ret)
+		goto cleanup_nand;
 
 
 	pr_info("eLBC NAND device at 0x%llx, bank %d\n",
 	pr_info("eLBC NAND device at 0x%llx, bank %d\n",
 		(unsigned long long)res.start, priv->bank);
 		(unsigned long long)res.start, priv->bank);
+
 	return 0;
 	return 0;
 
 
+cleanup_nand:
+	nand_cleanup(&priv->chip);
 err:
 err:
 	fsl_elbc_chip_remove(priv);
 	fsl_elbc_chip_remove(priv);
+
 	return ret;
 	return ret;
 }
 }
 
 
@@ -942,7 +945,9 @@ static int fsl_elbc_nand_remove(struct platform_device *pdev)
 {
 {
 	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
 	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
 	struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
 	struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
+	struct mtd_info *mtd = nand_to_mtd(&priv->chip);
 
 
+	nand_release(mtd);
 	fsl_elbc_chip_remove(priv);
 	fsl_elbc_chip_remove(priv);
 
 
 	mutex_lock(&fsl_elbc_nand_mutex);
 	mutex_lock(&fsl_elbc_nand_mutex);

+ 19 - 10
drivers/mtd/nand/raw/fsl_ifc_nand.c

@@ -342,9 +342,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
 
 
 	case NAND_CMD_READID:
 	case NAND_CMD_READID:
 	case NAND_CMD_PARAM: {
 	case NAND_CMD_PARAM: {
+		/*
+		 * For READID, read 8 bytes that are currently used.
+		 * For PARAM, read all 3 copies of 256-bytes pages.
+		 */
+		int len = 8;
 		int timing = IFC_FIR_OP_RB;
 		int timing = IFC_FIR_OP_RB;
-		if (command == NAND_CMD_PARAM)
+		if (command == NAND_CMD_PARAM) {
 			timing = IFC_FIR_OP_RBCD;
 			timing = IFC_FIR_OP_RBCD;
+			len = 256 * 3;
+		}
 
 
 		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
 		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
 			  (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
 			  (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
@@ -354,12 +361,8 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
 			  &ifc->ifc_nand.nand_fcr0);
 			  &ifc->ifc_nand.nand_fcr0);
 		ifc_out32(column, &ifc->ifc_nand.row3);
 		ifc_out32(column, &ifc->ifc_nand.row3);
 
 
-		/*
-		 * although currently it's 8 bytes for READID, we always read
-		 * the maximum 256 bytes(for PARAM)
-		 */
-		ifc_out32(256, &ifc->ifc_nand.nand_fbcr);
-		ifc_nand_ctrl->read_bytes = 256;
+		ifc_out32(len, &ifc->ifc_nand.nand_fbcr);
+		ifc_nand_ctrl->read_bytes = len;
 
 
 		set_addr(mtd, 0, 0, 0);
 		set_addr(mtd, 0, 0, 0);
 		fsl_ifc_run_command(mtd);
 		fsl_ifc_run_command(mtd);
@@ -924,8 +927,6 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
 {
 {
 	struct mtd_info *mtd = nand_to_mtd(&priv->chip);
 	struct mtd_info *mtd = nand_to_mtd(&priv->chip);
 
 
-	nand_release(mtd);
-
 	kfree(mtd->name);
 	kfree(mtd->name);
 
 
 	if (priv->vbase)
 	if (priv->vbase)
@@ -1059,21 +1060,29 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
 
 
 	/* First look for RedBoot table or partitions on the command
 	/* First look for RedBoot table or partitions on the command
 	 * line, these take precedence over device tree information */
 	 * line, these take precedence over device tree information */
-	mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
+	ret = mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
+	if (ret)
+		goto cleanup_nand;
 
 
 	dev_info(priv->dev, "IFC NAND device at 0x%llx, bank %d\n",
 	dev_info(priv->dev, "IFC NAND device at 0x%llx, bank %d\n",
 		 (unsigned long long)res.start, priv->bank);
 		 (unsigned long long)res.start, priv->bank);
+
 	return 0;
 	return 0;
 
 
+cleanup_nand:
+	nand_cleanup(&priv->chip);
 err:
 err:
 	fsl_ifc_chip_remove(priv);
 	fsl_ifc_chip_remove(priv);
+
 	return ret;
 	return ret;
 }
 }
 
 
 static int fsl_ifc_nand_remove(struct platform_device *dev)
 static int fsl_ifc_nand_remove(struct platform_device *dev)
 {
 {
 	struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
 	struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
+	struct mtd_info *mtd = nand_to_mtd(&priv->chip);
 
 
+	nand_release(mtd);
 	fsl_ifc_chip_remove(priv);
 	fsl_ifc_chip_remove(priv);
 
 
 	mutex_lock(&fsl_ifc_nand_mutex);
 	mutex_lock(&fsl_ifc_nand_mutex);

+ 15 - 12
drivers/mtd/nand/raw/fsmc_nand.c

@@ -1022,12 +1022,12 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
 		host->read_dma_chan = dma_request_channel(mask, filter, NULL);
 		host->read_dma_chan = dma_request_channel(mask, filter, NULL);
 		if (!host->read_dma_chan) {
 		if (!host->read_dma_chan) {
 			dev_err(&pdev->dev, "Unable to get read dma channel\n");
 			dev_err(&pdev->dev, "Unable to get read dma channel\n");
-			goto err_req_read_chnl;
+			goto disable_clk;
 		}
 		}
 		host->write_dma_chan = dma_request_channel(mask, filter, NULL);
 		host->write_dma_chan = dma_request_channel(mask, filter, NULL);
 		if (!host->write_dma_chan) {
 		if (!host->write_dma_chan) {
 			dev_err(&pdev->dev, "Unable to get write dma channel\n");
 			dev_err(&pdev->dev, "Unable to get write dma channel\n");
-			goto err_req_write_chnl;
+			goto release_dma_read_chan;
 		}
 		}
 	}
 	}
 
 
@@ -1050,7 +1050,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
 	ret = nand_scan_ident(mtd, 1, NULL);
 	ret = nand_scan_ident(mtd, 1, NULL);
 	if (ret) {
 	if (ret) {
 		dev_err(&pdev->dev, "No NAND Device found!\n");
 		dev_err(&pdev->dev, "No NAND Device found!\n");
-		goto err_scan_ident;
+		goto release_dma_write_chan;
 	}
 	}
 
 
 	if (AMBA_REV_BITS(host->pid) >= 8) {
 	if (AMBA_REV_BITS(host->pid) >= 8) {
@@ -1065,7 +1065,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
 			dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
 			dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
 				 mtd->oobsize);
 				 mtd->oobsize);
 			ret = -EINVAL;
 			ret = -EINVAL;
-			goto err_probe;
+			goto release_dma_write_chan;
 		}
 		}
 
 
 		mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
 		mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
@@ -1090,7 +1090,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
 
 
 		default:
 		default:
 			dev_err(&pdev->dev, "Unsupported ECC mode!\n");
 			dev_err(&pdev->dev, "Unsupported ECC mode!\n");
-			goto err_probe;
+			goto release_dma_write_chan;
 		}
 		}
 
 
 		/*
 		/*
@@ -1110,7 +1110,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
 					 "No oob scheme defined for oobsize %d\n",
 					 "No oob scheme defined for oobsize %d\n",
 					 mtd->oobsize);
 					 mtd->oobsize);
 				ret = -EINVAL;
 				ret = -EINVAL;
-				goto err_probe;
+				goto release_dma_write_chan;
 			}
 			}
 		}
 		}
 	}
 	}
@@ -1118,26 +1118,29 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
 	/* Second stage of scan to fill MTD data-structures */
 	/* Second stage of scan to fill MTD data-structures */
 	ret = nand_scan_tail(mtd);
 	ret = nand_scan_tail(mtd);
 	if (ret)
 	if (ret)
-		goto err_probe;
+		goto release_dma_write_chan;
 
 
 	mtd->name = "nand";
 	mtd->name = "nand";
 	ret = mtd_device_register(mtd, NULL, 0);
 	ret = mtd_device_register(mtd, NULL, 0);
 	if (ret)
 	if (ret)
-		goto err_probe;
+		goto cleanup_nand;
 
 
 	platform_set_drvdata(pdev, host);
 	platform_set_drvdata(pdev, host);
 	dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
 	dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
+
 	return 0;
 	return 0;
 
 
-err_probe:
-err_scan_ident:
+cleanup_nand:
+	nand_cleanup(nand);
+release_dma_write_chan:
 	if (host->mode == USE_DMA_ACCESS)
 	if (host->mode == USE_DMA_ACCESS)
 		dma_release_channel(host->write_dma_chan);
 		dma_release_channel(host->write_dma_chan);
-err_req_write_chnl:
+release_dma_read_chan:
 	if (host->mode == USE_DMA_ACCESS)
 	if (host->mode == USE_DMA_ACCESS)
 		dma_release_channel(host->read_dma_chan);
 		dma_release_channel(host->read_dma_chan);
-err_req_read_chnl:
+disable_clk:
 	clk_disable_unprepare(host->clk);
 	clk_disable_unprepare(host->clk);
+
 	return ret;
 	return ret;
 }
 }
 
 

+ 31 - 23
drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c

@@ -258,8 +258,9 @@ int bch_set_geometry(struct gpmi_nand_data *this)
 	unsigned int gf_len;
 	unsigned int gf_len;
 	int ret;
 	int ret;
 
 
-	if (common_nfc_set_geometry(this))
-		return !0;
+	ret = common_nfc_set_geometry(this);
+	if (ret)
+		return ret;
 
 
 	block_count   = bch_geo->ecc_chunk_count - 1;
 	block_count   = bch_geo->ecc_chunk_count - 1;
 	block_size    = bch_geo->ecc_chunk_size;
 	block_size    = bch_geo->ecc_chunk_size;
@@ -544,19 +545,13 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
 	return reg & mask;
 	return reg & mask;
 }
 }
 
 
-static inline void set_dma_type(struct gpmi_nand_data *this,
-					enum dma_ops_type type)
-{
-	this->last_dma_type = this->dma_type;
-	this->dma_type = type;
-}
-
 int gpmi_send_command(struct gpmi_nand_data *this)
 int gpmi_send_command(struct gpmi_nand_data *this)
 {
 {
 	struct dma_chan *channel = get_dma_chan(this);
 	struct dma_chan *channel = get_dma_chan(this);
 	struct dma_async_tx_descriptor *desc;
 	struct dma_async_tx_descriptor *desc;
 	struct scatterlist *sgl;
 	struct scatterlist *sgl;
 	int chip = this->current_chip;
 	int chip = this->current_chip;
+	int ret;
 	u32 pio[3];
 	u32 pio[3];
 
 
 	/* [1] send out the PIO words */
 	/* [1] send out the PIO words */
@@ -586,15 +581,19 @@ int gpmi_send_command(struct gpmi_nand_data *this)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	/* [3] submit the DMA */
 	/* [3] submit the DMA */
-	set_dma_type(this, DMA_FOR_COMMAND);
-	return start_dma_without_bch_irq(this, desc);
+	ret = start_dma_without_bch_irq(this, desc);
+
+	dma_unmap_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
+
+	return ret;
 }
 }
 
 
-int gpmi_send_data(struct gpmi_nand_data *this)
+int gpmi_send_data(struct gpmi_nand_data *this, const void *buf, int len)
 {
 {
 	struct dma_async_tx_descriptor *desc;
 	struct dma_async_tx_descriptor *desc;
 	struct dma_chan *channel = get_dma_chan(this);
 	struct dma_chan *channel = get_dma_chan(this);
 	int chip = this->current_chip;
 	int chip = this->current_chip;
+	int ret;
 	uint32_t command_mode;
 	uint32_t command_mode;
 	uint32_t address;
 	uint32_t address;
 	u32 pio[2];
 	u32 pio[2];
@@ -608,7 +607,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
 		| BF_GPMI_CTRL0_CS(chip, this)
 		| BF_GPMI_CTRL0_CS(chip, this)
 		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
 		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
 		| BF_GPMI_CTRL0_ADDRESS(address)
 		| BF_GPMI_CTRL0_ADDRESS(address)
-		| BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
+		| BF_GPMI_CTRL0_XFER_COUNT(len);
 	pio[1] = 0;
 	pio[1] = 0;
 	desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
 	desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
 					ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
 					ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
@@ -616,7 +615,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	/* [2] send DMA request */
 	/* [2] send DMA request */
-	prepare_data_dma(this, DMA_TO_DEVICE);
+	prepare_data_dma(this, buf, len, DMA_TO_DEVICE);
 	desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
 	desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
 					1, DMA_MEM_TO_DEV,
 					1, DMA_MEM_TO_DEV,
 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -624,16 +623,21 @@ int gpmi_send_data(struct gpmi_nand_data *this)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	/* [3] submit the DMA */
 	/* [3] submit the DMA */
-	set_dma_type(this, DMA_FOR_WRITE_DATA);
-	return start_dma_without_bch_irq(this, desc);
+	ret = start_dma_without_bch_irq(this, desc);
+
+	dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
+
+	return ret;
 }
 }
 
 
-int gpmi_read_data(struct gpmi_nand_data *this)
+int gpmi_read_data(struct gpmi_nand_data *this, void *buf, int len)
 {
 {
 	struct dma_async_tx_descriptor *desc;
 	struct dma_async_tx_descriptor *desc;
 	struct dma_chan *channel = get_dma_chan(this);
 	struct dma_chan *channel = get_dma_chan(this);
 	int chip = this->current_chip;
 	int chip = this->current_chip;
+	int ret;
 	u32 pio[2];
 	u32 pio[2];
+	bool direct;
 
 
 	/* [1] : send PIO */
 	/* [1] : send PIO */
 	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
 	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
@@ -641,7 +645,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
 		| BF_GPMI_CTRL0_CS(chip, this)
 		| BF_GPMI_CTRL0_CS(chip, this)
 		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
 		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
 		| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
 		| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
-		| BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
+		| BF_GPMI_CTRL0_XFER_COUNT(len);
 	pio[1] = 0;
 	pio[1] = 0;
 	desc = dmaengine_prep_slave_sg(channel,
 	desc = dmaengine_prep_slave_sg(channel,
 					(struct scatterlist *)pio,
 					(struct scatterlist *)pio,
@@ -650,7 +654,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	/* [2] : send DMA request */
 	/* [2] : send DMA request */
-	prepare_data_dma(this, DMA_FROM_DEVICE);
+	direct = prepare_data_dma(this, buf, len, DMA_FROM_DEVICE);
 	desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
 	desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
 					1, DMA_DEV_TO_MEM,
 					1, DMA_DEV_TO_MEM,
 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -658,8 +662,14 @@ int gpmi_read_data(struct gpmi_nand_data *this)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	/* [3] : submit the DMA */
 	/* [3] : submit the DMA */
-	set_dma_type(this, DMA_FOR_READ_DATA);
-	return start_dma_without_bch_irq(this, desc);
+
+	ret = start_dma_without_bch_irq(this, desc);
+
+	dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
+	if (!direct)
+		memcpy(buf, this->data_buffer_dma, len);
+
+	return ret;
 }
 }
 
 
 int gpmi_send_page(struct gpmi_nand_data *this,
 int gpmi_send_page(struct gpmi_nand_data *this,
@@ -703,7 +713,6 @@ int gpmi_send_page(struct gpmi_nand_data *this,
 	if (!desc)
 	if (!desc)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
 	return start_dma_with_bch_irq(this, desc);
 	return start_dma_with_bch_irq(this, desc);
 }
 }
 
 
@@ -785,7 +794,6 @@ int gpmi_read_page(struct gpmi_nand_data *this,
 		return -EINVAL;
 		return -EINVAL;
 
 
 	/* [4] submit the DMA */
 	/* [4] submit the DMA */
-	set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
 	return start_dma_with_bch_irq(this, desc);
 	return start_dma_with_bch_irq(this, desc);
 }
 }
 
 

+ 59 - 129
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c

@@ -198,17 +198,16 @@ static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
  *
  *
  * We may have available oob space in this case.
  * We may have available oob space in this case.
  */
  */
-static int set_geometry_by_ecc_info(struct gpmi_nand_data *this)
+static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
+				    unsigned int ecc_strength,
+				    unsigned int ecc_step)
 {
 {
 	struct bch_geometry *geo = &this->bch_geometry;
 	struct bch_geometry *geo = &this->bch_geometry;
 	struct nand_chip *chip = &this->nand;
 	struct nand_chip *chip = &this->nand;
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	unsigned int block_mark_bit_offset;
 	unsigned int block_mark_bit_offset;
 
 
-	if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
-		return -EINVAL;
-
-	switch (chip->ecc_step_ds) {
+	switch (ecc_step) {
 	case SZ_512:
 	case SZ_512:
 		geo->gf_len = 13;
 		geo->gf_len = 13;
 		break;
 		break;
@@ -221,8 +220,8 @@ static int set_geometry_by_ecc_info(struct gpmi_nand_data *this)
 			chip->ecc_strength_ds, chip->ecc_step_ds);
 			chip->ecc_strength_ds, chip->ecc_step_ds);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
-	geo->ecc_chunk_size = chip->ecc_step_ds;
-	geo->ecc_strength = round_up(chip->ecc_strength_ds, 2);
+	geo->ecc_chunk_size = ecc_step;
+	geo->ecc_strength = round_up(ecc_strength, 2);
 	if (!gpmi_check_ecc(this))
 	if (!gpmi_check_ecc(this))
 		return -EINVAL;
 		return -EINVAL;
 
 
@@ -230,7 +229,7 @@ static int set_geometry_by_ecc_info(struct gpmi_nand_data *this)
 	if (geo->ecc_chunk_size < mtd->oobsize) {
 	if (geo->ecc_chunk_size < mtd->oobsize) {
 		dev_err(this->dev,
 		dev_err(this->dev,
 			"unsupported nand chip. ecc size: %d, oob size : %d\n",
 			"unsupported nand chip. ecc size: %d, oob size : %d\n",
-			chip->ecc_step_ds, mtd->oobsize);
+			ecc_step, mtd->oobsize);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
@@ -423,9 +422,20 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
 
 
 int common_nfc_set_geometry(struct gpmi_nand_data *this)
 int common_nfc_set_geometry(struct gpmi_nand_data *this)
 {
 {
+	struct nand_chip *chip = &this->nand;
+
+	if (chip->ecc.strength > 0 && chip->ecc.size > 0)
+		return set_geometry_by_ecc_info(this, chip->ecc.strength,
+						chip->ecc.size);
+
 	if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
 	if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
-				|| legacy_set_geometry(this))
-		return set_geometry_by_ecc_info(this);
+				|| legacy_set_geometry(this)) {
+		if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
+			return -EINVAL;
+
+		return set_geometry_by_ecc_info(this, chip->ecc_strength_ds,
+						chip->ecc_step_ds);
+	}
 
 
 	return 0;
 	return 0;
 }
 }
@@ -437,33 +447,32 @@ struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
 }
 }
 
 
 /* Can we use the upper's buffer directly for DMA? */
 /* Can we use the upper's buffer directly for DMA? */
-void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
+bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf, int len,
+		      enum dma_data_direction dr)
 {
 {
 	struct scatterlist *sgl = &this->data_sgl;
 	struct scatterlist *sgl = &this->data_sgl;
 	int ret;
 	int ret;
 
 
 	/* first try to map the upper buffer directly */
 	/* first try to map the upper buffer directly */
-	if (virt_addr_valid(this->upper_buf) &&
-		!object_is_on_stack(this->upper_buf)) {
-		sg_init_one(sgl, this->upper_buf, this->upper_len);
+	if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
+		sg_init_one(sgl, buf, len);
 		ret = dma_map_sg(this->dev, sgl, 1, dr);
 		ret = dma_map_sg(this->dev, sgl, 1, dr);
 		if (ret == 0)
 		if (ret == 0)
 			goto map_fail;
 			goto map_fail;
 
 
-		this->direct_dma_map_ok = true;
-		return;
+		return true;
 	}
 	}
 
 
 map_fail:
 map_fail:
 	/* We have to use our own DMA buffer. */
 	/* We have to use our own DMA buffer. */
-	sg_init_one(sgl, this->data_buffer_dma, this->upper_len);
+	sg_init_one(sgl, this->data_buffer_dma, len);
 
 
 	if (dr == DMA_TO_DEVICE)
 	if (dr == DMA_TO_DEVICE)
-		memcpy(this->data_buffer_dma, this->upper_buf, this->upper_len);
+		memcpy(this->data_buffer_dma, buf, len);
 
 
 	dma_map_sg(this->dev, sgl, 1, dr);
 	dma_map_sg(this->dev, sgl, 1, dr);
 
 
-	this->direct_dma_map_ok = false;
+	return false;
 }
 }
 
 
 /* This will be called after the DMA operation is finished. */
 /* This will be called after the DMA operation is finished. */
@@ -472,31 +481,6 @@ static void dma_irq_callback(void *param)
 	struct gpmi_nand_data *this = param;
 	struct gpmi_nand_data *this = param;
 	struct completion *dma_c = &this->dma_done;
 	struct completion *dma_c = &this->dma_done;
 
 
-	switch (this->dma_type) {
-	case DMA_FOR_COMMAND:
-		dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
-		break;
-
-	case DMA_FOR_READ_DATA:
-		dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
-		if (this->direct_dma_map_ok == false)
-			memcpy(this->upper_buf, this->data_buffer_dma,
-				this->upper_len);
-		break;
-
-	case DMA_FOR_WRITE_DATA:
-		dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
-		break;
-
-	case DMA_FOR_READ_ECC_PAGE:
-	case DMA_FOR_WRITE_ECC_PAGE:
-		/* We have to wait the BCH interrupt to finish. */
-		break;
-
-	default:
-		dev_err(this->dev, "in wrong DMA operation.\n");
-	}
-
 	complete(dma_c);
 	complete(dma_c);
 }
 }
 
 
@@ -516,8 +500,7 @@ int start_dma_without_bch_irq(struct gpmi_nand_data *this,
 	/* Wait for the interrupt from the DMA block. */
 	/* Wait for the interrupt from the DMA block. */
 	timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
 	timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
 	if (!timeout) {
 	if (!timeout) {
-		dev_err(this->dev, "DMA timeout, last DMA :%d\n",
-			this->last_dma_type);
+		dev_err(this->dev, "DMA timeout, last DMA\n");
 		gpmi_dump_info(this);
 		gpmi_dump_info(this);
 		return -ETIMEDOUT;
 		return -ETIMEDOUT;
 	}
 	}
@@ -546,8 +529,7 @@ int start_dma_with_bch_irq(struct gpmi_nand_data *this,
 	/* Wait for the interrupt from the BCH block. */
 	/* Wait for the interrupt from the BCH block. */
 	timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
 	timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
 	if (!timeout) {
 	if (!timeout) {
-		dev_err(this->dev, "BCH timeout, last DMA :%d\n",
-			this->last_dma_type);
+		dev_err(this->dev, "BCH timeout\n");
 		gpmi_dump_info(this);
 		gpmi_dump_info(this);
 		return -ETIMEDOUT;
 		return -ETIMEDOUT;
 	}
 	}
@@ -695,56 +677,6 @@ static void release_resources(struct gpmi_nand_data *this)
 	release_dma_channels(this);
 	release_dma_channels(this);
 }
 }
 
 
-static int read_page_prepare(struct gpmi_nand_data *this,
-			void *destination, unsigned length,
-			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
-			void **use_virt, dma_addr_t *use_phys)
-{
-	struct device *dev = this->dev;
-
-	if (virt_addr_valid(destination)) {
-		dma_addr_t dest_phys;
-
-		dest_phys = dma_map_single(dev, destination,
-						length, DMA_FROM_DEVICE);
-		if (dma_mapping_error(dev, dest_phys)) {
-			if (alt_size < length) {
-				dev_err(dev, "Alternate buffer is too small\n");
-				return -ENOMEM;
-			}
-			goto map_failed;
-		}
-		*use_virt = destination;
-		*use_phys = dest_phys;
-		this->direct_dma_map_ok = true;
-		return 0;
-	}
-
-map_failed:
-	*use_virt = alt_virt;
-	*use_phys = alt_phys;
-	this->direct_dma_map_ok = false;
-	return 0;
-}
-
-static inline void read_page_end(struct gpmi_nand_data *this,
-			void *destination, unsigned length,
-			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
-			void *used_virt, dma_addr_t used_phys)
-{
-	if (this->direct_dma_map_ok)
-		dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE);
-}
-
-static inline void read_page_swap_end(struct gpmi_nand_data *this,
-			void *destination, unsigned length,
-			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
-			void *used_virt, dma_addr_t used_phys)
-{
-	if (!this->direct_dma_map_ok)
-		memcpy(destination, alt_virt, length);
-}
-
 static int send_page_prepare(struct gpmi_nand_data *this,
 static int send_page_prepare(struct gpmi_nand_data *this,
 			const void *source, unsigned length,
 			const void *source, unsigned length,
 			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
 			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
@@ -946,10 +878,8 @@ static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
 
 
 	dev_dbg(this->dev, "len is %d\n", len);
 	dev_dbg(this->dev, "len is %d\n", len);
-	this->upper_buf	= buf;
-	this->upper_len	= len;
 
 
-	gpmi_read_data(this);
+	gpmi_read_data(this, buf, len);
 }
 }
 
 
 static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
 static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
@@ -958,10 +888,8 @@ static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
 	struct gpmi_nand_data *this = nand_get_controller_data(chip);
 
 
 	dev_dbg(this->dev, "len is %d\n", len);
 	dev_dbg(this->dev, "len is %d\n", len);
-	this->upper_buf	= (uint8_t *)buf;
-	this->upper_len	= len;
 
 
-	gpmi_send_data(this);
+	gpmi_send_data(this, buf, len);
 }
 }
 
 
 static uint8_t gpmi_read_byte(struct mtd_info *mtd)
 static uint8_t gpmi_read_byte(struct mtd_info *mtd)
@@ -1031,44 +959,46 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	void          *payload_virt;
 	void          *payload_virt;
 	dma_addr_t    payload_phys;
 	dma_addr_t    payload_phys;
-	void          *auxiliary_virt;
-	dma_addr_t    auxiliary_phys;
 	unsigned int  i;
 	unsigned int  i;
 	unsigned char *status;
 	unsigned char *status;
 	unsigned int  max_bitflips = 0;
 	unsigned int  max_bitflips = 0;
 	int           ret;
 	int           ret;
+	bool          direct = false;
 
 
 	dev_dbg(this->dev, "page number is : %d\n", page);
 	dev_dbg(this->dev, "page number is : %d\n", page);
-	ret = read_page_prepare(this, buf, nfc_geo->payload_size,
-					this->payload_virt, this->payload_phys,
-					nfc_geo->payload_size,
-					&payload_virt, &payload_phys);
-	if (ret) {
-		dev_err(this->dev, "Inadequate DMA buffer\n");
-		ret = -ENOMEM;
-		return ret;
+
+	payload_virt = this->payload_virt;
+	payload_phys = this->payload_phys;
+
+	if (virt_addr_valid(buf)) {
+		dma_addr_t dest_phys;
+
+		dest_phys = dma_map_single(this->dev, buf, nfc_geo->payload_size,
+					   DMA_FROM_DEVICE);
+		if (!dma_mapping_error(this->dev, dest_phys)) {
+			payload_virt = buf;
+			payload_phys = dest_phys;
+			direct = true;
+		}
 	}
 	}
-	auxiliary_virt = this->auxiliary_virt;
-	auxiliary_phys = this->auxiliary_phys;
 
 
 	/* go! */
 	/* go! */
-	ret = gpmi_read_page(this, payload_phys, auxiliary_phys);
-	read_page_end(this, buf, nfc_geo->payload_size,
-			this->payload_virt, this->payload_phys,
-			nfc_geo->payload_size,
-			payload_virt, payload_phys);
+	ret = gpmi_read_page(this, payload_phys, this->auxiliary_phys);
+
+	if (direct)
+		dma_unmap_single(this->dev, payload_phys, nfc_geo->payload_size,
+				 DMA_FROM_DEVICE);
+
 	if (ret) {
 	if (ret) {
 		dev_err(this->dev, "Error in ECC-based read: %d\n", ret);
 		dev_err(this->dev, "Error in ECC-based read: %d\n", ret);
 		return ret;
 		return ret;
 	}
 	}
 
 
 	/* Loop over status bytes, accumulating ECC status. */
 	/* Loop over status bytes, accumulating ECC status. */
-	status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
+	status = this->auxiliary_virt + nfc_geo->auxiliary_status_offset;
 
 
-	read_page_swap_end(this, buf, nfc_geo->payload_size,
-			   this->payload_virt, this->payload_phys,
-			   nfc_geo->payload_size,
-			   payload_virt, payload_phys);
+	if (!direct)
+		memcpy(buf, this->payload_virt, nfc_geo->payload_size);
 
 
 	for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
 	for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
 		if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
 		if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
@@ -1123,7 +1053,7 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
 						buf + i * nfc_geo->ecc_chunk_size,
 						buf + i * nfc_geo->ecc_chunk_size,
 						nfc_geo->ecc_chunk_size,
 						nfc_geo->ecc_chunk_size,
 						eccbuf, eccbytes,
 						eccbuf, eccbytes,
-						auxiliary_virt,
+						this->auxiliary_virt,
 						nfc_geo->metadata_size,
 						nfc_geo->metadata_size,
 						nfc_geo->ecc_strength);
 						nfc_geo->ecc_strength);
 			} else {
 			} else {
@@ -1151,7 +1081,7 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
 	}
 	}
 
 
 	/* handle the block mark swapping */
 	/* handle the block mark swapping */
-	block_mark_swapping(this, buf, auxiliary_virt);
+	block_mark_swapping(this, buf, this->auxiliary_virt);
 
 
 	if (oob_required) {
 	if (oob_required) {
 		/*
 		/*
@@ -1165,7 +1095,7 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
 		 * the block mark.
 		 * the block mark.
 		 */
 		 */
 		memset(chip->oob_poi, ~0, mtd->oobsize);
 		memset(chip->oob_poi, ~0, mtd->oobsize);
-		chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
+		chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0];
 	}
 	}
 
 
 	return max_bitflips;
 	return max_bitflips;

+ 4 - 21
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h

@@ -77,15 +77,6 @@ struct boot_rom_geometry {
 	unsigned int  search_area_stride_exponent;
 	unsigned int  search_area_stride_exponent;
 };
 };
 
 
-/* DMA operations types */
-enum dma_ops_type {
-	DMA_FOR_COMMAND = 1,
-	DMA_FOR_READ_DATA,
-	DMA_FOR_WRITE_DATA,
-	DMA_FOR_READ_ECC_PAGE,
-	DMA_FOR_WRITE_ECC_PAGE
-};
-
 enum gpmi_type {
 enum gpmi_type {
 	IS_MX23,
 	IS_MX23,
 	IS_MX28,
 	IS_MX28,
@@ -150,13 +141,6 @@ struct gpmi_nand_data {
 	int			current_chip;
 	int			current_chip;
 	unsigned int		command_length;
 	unsigned int		command_length;
 
 
-	/* passed from upper layer */
-	uint8_t			*upper_buf;
-	int			upper_len;
-
-	/* for DMA operations */
-	bool			direct_dma_map_ok;
-
 	struct scatterlist	cmd_sgl;
 	struct scatterlist	cmd_sgl;
 	char			*cmd_buffer;
 	char			*cmd_buffer;
 
 
@@ -178,8 +162,6 @@ struct gpmi_nand_data {
 	/* DMA channels */
 	/* DMA channels */
 #define DMA_CHANS		8
 #define DMA_CHANS		8
 	struct dma_chan		*dma_chans[DMA_CHANS];
 	struct dma_chan		*dma_chans[DMA_CHANS];
-	enum dma_ops_type	last_dma_type;
-	enum dma_ops_type	dma_type;
 	struct completion	dma_done;
 	struct completion	dma_done;
 
 
 	/* private */
 	/* private */
@@ -189,7 +171,7 @@ struct gpmi_nand_data {
 /* Common Services */
 /* Common Services */
 int common_nfc_set_geometry(struct gpmi_nand_data *);
 int common_nfc_set_geometry(struct gpmi_nand_data *);
 struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
 struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
-void prepare_data_dma(struct gpmi_nand_data *,
+bool prepare_data_dma(struct gpmi_nand_data *, const void *buf, int len,
 		      enum dma_data_direction dr);
 		      enum dma_data_direction dr);
 int start_dma_without_bch_irq(struct gpmi_nand_data *,
 int start_dma_without_bch_irq(struct gpmi_nand_data *,
 			      struct dma_async_tx_descriptor *);
 			      struct dma_async_tx_descriptor *);
@@ -208,8 +190,9 @@ int gpmi_disable_clk(struct gpmi_nand_data *this);
 int gpmi_setup_data_interface(struct mtd_info *mtd, int chipnr,
 int gpmi_setup_data_interface(struct mtd_info *mtd, int chipnr,
 			      const struct nand_data_interface *conf);
 			      const struct nand_data_interface *conf);
 void gpmi_nfc_apply_timings(struct gpmi_nand_data *this);
 void gpmi_nfc_apply_timings(struct gpmi_nand_data *this);
-int gpmi_read_data(struct gpmi_nand_data *);
-int gpmi_send_data(struct gpmi_nand_data *);
+int gpmi_read_data(struct gpmi_nand_data *, void *buf, int len);
+int gpmi_send_data(struct gpmi_nand_data *, const void *buf, int len);
+
 int gpmi_send_page(struct gpmi_nand_data *,
 int gpmi_send_page(struct gpmi_nand_data *,
 		   dma_addr_t payload, dma_addr_t auxiliary);
 		   dma_addr_t payload, dma_addr_t auxiliary);
 int gpmi_read_page(struct gpmi_nand_data *,
 int gpmi_read_page(struct gpmi_nand_data *,

+ 12 - 23
drivers/mtd/nand/raw/hisi504_nand.c

@@ -731,23 +731,19 @@ static int hisi_nfc_probe(struct platform_device *pdev)
 	irq = platform_get_irq(pdev, 0);
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
 	if (irq < 0) {
 		dev_err(dev, "no IRQ resource defined\n");
 		dev_err(dev, "no IRQ resource defined\n");
-		ret = -ENXIO;
-		goto err_res;
+		return -ENXIO;
 	}
 	}
 
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	host->iobase = devm_ioremap_resource(dev, res);
 	host->iobase = devm_ioremap_resource(dev, res);
-	if (IS_ERR(host->iobase)) {
-		ret = PTR_ERR(host->iobase);
-		goto err_res;
-	}
+	if (IS_ERR(host->iobase))
+		return PTR_ERR(host->iobase);
 
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 	host->mmio = devm_ioremap_resource(dev, res);
 	host->mmio = devm_ioremap_resource(dev, res);
 	if (IS_ERR(host->mmio)) {
 	if (IS_ERR(host->mmio)) {
-		ret = PTR_ERR(host->mmio);
 		dev_err(dev, "devm_ioremap_resource[1] fail\n");
 		dev_err(dev, "devm_ioremap_resource[1] fail\n");
-		goto err_res;
+		return PTR_ERR(host->mmio);
 	}
 	}
 
 
 	mtd->name		= "hisi_nand";
 	mtd->name		= "hisi_nand";
@@ -770,19 +766,17 @@ static int hisi_nfc_probe(struct platform_device *pdev)
 	ret = devm_request_irq(dev, irq, hinfc_irq_handle, 0x0, "nandc", host);
 	ret = devm_request_irq(dev, irq, hinfc_irq_handle, 0x0, "nandc", host);
 	if (ret) {
 	if (ret) {
 		dev_err(dev, "failed to request IRQ\n");
 		dev_err(dev, "failed to request IRQ\n");
-		goto err_res;
+		return ret;
 	}
 	}
 
 
 	ret = nand_scan_ident(mtd, max_chips, NULL);
 	ret = nand_scan_ident(mtd, max_chips, NULL);
 	if (ret)
 	if (ret)
-		goto err_res;
+		return ret;
 
 
 	host->buffer = dmam_alloc_coherent(dev, mtd->writesize + mtd->oobsize,
 	host->buffer = dmam_alloc_coherent(dev, mtd->writesize + mtd->oobsize,
 		&host->dma_buffer, GFP_KERNEL);
 		&host->dma_buffer, GFP_KERNEL);
-	if (!host->buffer) {
-		ret = -ENOMEM;
-		goto err_res;
-	}
+	if (!host->buffer)
+		return -ENOMEM;
 
 
 	host->dma_oob = host->dma_buffer + mtd->writesize;
 	host->dma_oob = host->dma_buffer + mtd->writesize;
 	memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize);
 	memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize);
@@ -798,8 +792,7 @@ static int hisi_nfc_probe(struct platform_device *pdev)
 	 */
 	 */
 	default:
 	default:
 		dev_err(dev, "NON-2KB page size nand flash\n");
 		dev_err(dev, "NON-2KB page size nand flash\n");
-		ret = -EINVAL;
-		goto err_res;
+		return -EINVAL;
 	}
 	}
 	hinfc_write(host, flag, HINFC504_CON);
 	hinfc_write(host, flag, HINFC504_CON);
 
 
@@ -809,21 +802,17 @@ static int hisi_nfc_probe(struct platform_device *pdev)
 	ret = nand_scan_tail(mtd);
 	ret = nand_scan_tail(mtd);
 	if (ret) {
 	if (ret) {
 		dev_err(dev, "nand_scan_tail failed: %d\n", ret);
 		dev_err(dev, "nand_scan_tail failed: %d\n", ret);
-		goto err_res;
+		return ret;
 	}
 	}
 
 
 	ret = mtd_device_register(mtd, NULL, 0);
 	ret = mtd_device_register(mtd, NULL, 0);
 	if (ret) {
 	if (ret) {
 		dev_err(dev, "Err MTD partition=%d\n", ret);
 		dev_err(dev, "Err MTD partition=%d\n", ret);
-		goto err_mtd;
+		nand_cleanup(chip);
+		return ret;
 	}
 	}
 
 
 	return 0;
 	return 0;
-
-err_mtd:
-	nand_release(mtd);
-err_res:
-	return ret;
 }
 }
 
 
 static int hisi_nfc_remove(struct platform_device *pdev)
 static int hisi_nfc_remove(struct platform_device *pdev)

+ 20 - 18
drivers/mtd/nand/raw/lpc32xx_mlc.c

@@ -673,7 +673,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 	host->io_base = devm_ioremap_resource(&pdev->dev, rc);
 	host->io_base = devm_ioremap_resource(&pdev->dev, rc);
 	if (IS_ERR(host->io_base))
 	if (IS_ERR(host->io_base))
 		return PTR_ERR(host->io_base);
 		return PTR_ERR(host->io_base);
-	
+
 	host->io_base_phy = rc->start;
 	host->io_base_phy = rc->start;
 
 
 	nand_chip = &host->nand_chip;
 	nand_chip = &host->nand_chip;
@@ -706,11 +706,11 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 	if (IS_ERR(host->clk)) {
 	if (IS_ERR(host->clk)) {
 		dev_err(&pdev->dev, "Clock initialization failure\n");
 		dev_err(&pdev->dev, "Clock initialization failure\n");
 		res = -ENOENT;
 		res = -ENOENT;
-		goto err_exit1;
+		goto free_gpio;
 	}
 	}
 	res = clk_prepare_enable(host->clk);
 	res = clk_prepare_enable(host->clk);
 	if (res)
 	if (res)
-		goto err_put_clk;
+		goto put_clk;
 
 
 	nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
 	nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
 	nand_chip->dev_ready = lpc32xx_nand_device_ready;
 	nand_chip->dev_ready = lpc32xx_nand_device_ready;
@@ -744,7 +744,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 		res = lpc32xx_dma_setup(host);
 		res = lpc32xx_dma_setup(host);
 		if (res) {
 		if (res) {
 			res = -EIO;
 			res = -EIO;
-			goto err_exit2;
+			goto unprepare_clk;
 		}
 		}
 	}
 	}
 
 
@@ -754,18 +754,18 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 	 */
 	 */
 	res = nand_scan_ident(mtd, 1, NULL);
 	res = nand_scan_ident(mtd, 1, NULL);
 	if (res)
 	if (res)
-		goto err_exit3;
+		goto release_dma_chan;
 
 
 	host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
 	host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
 	if (!host->dma_buf) {
 	if (!host->dma_buf) {
 		res = -ENOMEM;
 		res = -ENOMEM;
-		goto err_exit3;
+		goto release_dma_chan;
 	}
 	}
 
 
 	host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
 	host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
 	if (!host->dummy_buf) {
 	if (!host->dummy_buf) {
 		res = -ENOMEM;
 		res = -ENOMEM;
-		goto err_exit3;
+		goto release_dma_chan;
 	}
 	}
 
 
 	nand_chip->ecc.mode = NAND_ECC_HW;
 	nand_chip->ecc.mode = NAND_ECC_HW;
@@ -783,14 +783,14 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 	if (host->irq < 0) {
 	if (host->irq < 0) {
 		dev_err(&pdev->dev, "failed to get platform irq\n");
 		dev_err(&pdev->dev, "failed to get platform irq\n");
 		res = -EINVAL;
 		res = -EINVAL;
-		goto err_exit3;
+		goto release_dma_chan;
 	}
 	}
 
 
 	if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
 	if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
 			IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
 			IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
 		dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
 		dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
 		res = -ENXIO;
 		res = -ENXIO;
-		goto err_exit3;
+		goto release_dma_chan;
 	}
 	}
 
 
 	/*
 	/*
@@ -799,27 +799,29 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 	 */
 	 */
 	res = nand_scan_tail(mtd);
 	res = nand_scan_tail(mtd);
 	if (res)
 	if (res)
-		goto err_exit4;
+		goto free_irq;
 
 
 	mtd->name = DRV_NAME;
 	mtd->name = DRV_NAME;
 
 
 	res = mtd_device_register(mtd, host->ncfg->parts,
 	res = mtd_device_register(mtd, host->ncfg->parts,
 				  host->ncfg->num_parts);
 				  host->ncfg->num_parts);
-	if (!res)
-		return res;
+	if (res)
+		goto cleanup_nand;
 
 
-	nand_release(mtd);
+	return 0;
 
 
-err_exit4:
+cleanup_nand:
+	nand_cleanup(nand_chip);
+free_irq:
 	free_irq(host->irq, host);
 	free_irq(host->irq, host);
-err_exit3:
+release_dma_chan:
 	if (use_dma)
 	if (use_dma)
 		dma_release_channel(host->dma_chan);
 		dma_release_channel(host->dma_chan);
-err_exit2:
+unprepare_clk:
 	clk_disable_unprepare(host->clk);
 	clk_disable_unprepare(host->clk);
-err_put_clk:
+put_clk:
 	clk_put(host->clk);
 	clk_put(host->clk);
-err_exit1:
+free_gpio:
 	lpc32xx_wp_enable(host);
 	lpc32xx_wp_enable(host);
 	gpio_free(host->ncfg->wp_gpio);
 	gpio_free(host->ncfg->wp_gpio);
 
 

+ 14 - 12
drivers/mtd/nand/raw/lpc32xx_slc.c

@@ -831,11 +831,11 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 	if (IS_ERR(host->clk)) {
 	if (IS_ERR(host->clk)) {
 		dev_err(&pdev->dev, "Clock failure\n");
 		dev_err(&pdev->dev, "Clock failure\n");
 		res = -ENOENT;
 		res = -ENOENT;
-		goto err_exit1;
+		goto enable_wp;
 	}
 	}
 	res = clk_prepare_enable(host->clk);
 	res = clk_prepare_enable(host->clk);
 	if (res)
 	if (res)
-		goto err_exit1;
+		goto enable_wp;
 
 
 	/* Set NAND IO addresses and command/ready functions */
 	/* Set NAND IO addresses and command/ready functions */
 	chip->IO_ADDR_R = SLC_DATA(host->io_base);
 	chip->IO_ADDR_R = SLC_DATA(host->io_base);
@@ -874,19 +874,19 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 				      GFP_KERNEL);
 				      GFP_KERNEL);
 	if (host->data_buf == NULL) {
 	if (host->data_buf == NULL) {
 		res = -ENOMEM;
 		res = -ENOMEM;
-		goto err_exit2;
+		goto unprepare_clk;
 	}
 	}
 
 
 	res = lpc32xx_nand_dma_setup(host);
 	res = lpc32xx_nand_dma_setup(host);
 	if (res) {
 	if (res) {
 		res = -EIO;
 		res = -EIO;
-		goto err_exit2;
+		goto unprepare_clk;
 	}
 	}
 
 
 	/* Find NAND device */
 	/* Find NAND device */
 	res = nand_scan_ident(mtd, 1, NULL);
 	res = nand_scan_ident(mtd, 1, NULL);
 	if (res)
 	if (res)
-		goto err_exit3;
+		goto release_dma;
 
 
 	/* OOB and ECC CPU and DMA work areas */
 	/* OOB and ECC CPU and DMA work areas */
 	host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
 	host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
@@ -920,21 +920,23 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 	 */
 	 */
 	res = nand_scan_tail(mtd);
 	res = nand_scan_tail(mtd);
 	if (res)
 	if (res)
-		goto err_exit3;
+		goto release_dma;
 
 
 	mtd->name = "nxp_lpc3220_slc";
 	mtd->name = "nxp_lpc3220_slc";
 	res = mtd_device_register(mtd, host->ncfg->parts,
 	res = mtd_device_register(mtd, host->ncfg->parts,
 				  host->ncfg->num_parts);
 				  host->ncfg->num_parts);
-	if (!res)
-		return res;
+	if (res)
+		goto cleanup_nand;
 
 
-	nand_release(mtd);
+	return 0;
 
 
-err_exit3:
+cleanup_nand:
+	nand_cleanup(chip);
+release_dma:
 	dma_release_channel(host->dma_chan);
 	dma_release_channel(host->dma_chan);
-err_exit2:
+unprepare_clk:
 	clk_disable_unprepare(host->clk);
 	clk_disable_unprepare(host->clk);
-err_exit1:
+enable_wp:
 	lpc32xx_wp_enable(host);
 	lpc32xx_wp_enable(host);
 
 
 	return res;
 	return res;

+ 1 - 6
drivers/mtd/nand/raw/mtk_ecc.c

@@ -500,7 +500,6 @@ static int mtk_ecc_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct device *dev = &pdev->dev;
 	struct mtk_ecc *ecc;
 	struct mtk_ecc *ecc;
 	struct resource *res;
 	struct resource *res;
-	const struct of_device_id *of_ecc_id = NULL;
 	u32 max_eccdata_size;
 	u32 max_eccdata_size;
 	int irq, ret;
 	int irq, ret;
 
 
@@ -508,11 +507,7 @@ static int mtk_ecc_probe(struct platform_device *pdev)
 	if (!ecc)
 	if (!ecc)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	of_ecc_id = of_match_device(mtk_ecc_dt_match, &pdev->dev);
-	if (!of_ecc_id)
-		return -ENODEV;
-
-	ecc->caps = of_ecc_id->data;
+	ecc->caps = of_device_get_match_data(dev);
 
 
 	max_eccdata_size = ecc->caps->num_ecc_strength - 1;
 	max_eccdata_size = ecc->caps->num_ecc_strength - 1;
 	max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];
 	max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];

+ 1 - 9
drivers/mtd/nand/raw/mtk_nand.c

@@ -1434,7 +1434,6 @@ static int mtk_nfc_probe(struct platform_device *pdev)
 	struct device_node *np = dev->of_node;
 	struct device_node *np = dev->of_node;
 	struct mtk_nfc *nfc;
 	struct mtk_nfc *nfc;
 	struct resource *res;
 	struct resource *res;
-	const struct of_device_id *of_nfc_id = NULL;
 	int ret, irq;
 	int ret, irq;
 
 
 	nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
 	nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
@@ -1452,6 +1451,7 @@ static int mtk_nfc_probe(struct platform_device *pdev)
 	else if (!nfc->ecc)
 	else if (!nfc->ecc)
 		return -ENODEV;
 		return -ENODEV;
 
 
+	nfc->caps = of_device_get_match_data(dev);
 	nfc->dev = dev;
 	nfc->dev = dev;
 
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1498,14 +1498,6 @@ static int mtk_nfc_probe(struct platform_device *pdev)
 		goto clk_disable;
 		goto clk_disable;
 	}
 	}
 
 
-	of_nfc_id = of_match_device(mtk_nfc_id_table, &pdev->dev);
-	if (!of_nfc_id) {
-		ret = -ENODEV;
-		goto clk_disable;
-	}
-
-	nfc->caps = of_nfc_id->data;
-
 	platform_set_drvdata(pdev, nfc);
 	platform_set_drvdata(pdev, nfc);
 
 
 	ret = mtk_nfc_nand_chips_init(dev, nfc);
 	ret = mtk_nfc_nand_chips_init(dev, nfc);

+ 62 - 27
drivers/mtd/nand/raw/nand_base.c

@@ -2174,7 +2174,6 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	struct mtd_info *mtd = nand_to_mtd(chip);
 	const u8 *params = data;
 	const u8 *params = data;
 	int i, ret;
 	int i, ret;
-	u8 status;
 
 
 	if (chip->exec_op) {
 	if (chip->exec_op) {
 		const struct nand_sdr_timings *sdr =
 		const struct nand_sdr_timings *sdr =
@@ -2188,26 +2187,18 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
 		};
 		};
 		struct nand_operation op = NAND_OPERATION(instrs);
 		struct nand_operation op = NAND_OPERATION(instrs);
 
 
-		ret = nand_exec_op(chip, &op);
-		if (ret)
-			return ret;
-
-		ret = nand_status_op(chip, &status);
-		if (ret)
-			return ret;
-	} else {
-		chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
-		for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
-			chip->write_byte(mtd, params[i]);
+		return nand_exec_op(chip, &op);
+	}
 
 
-		ret = chip->waitfunc(mtd, chip);
-		if (ret < 0)
-			return ret;
+	chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
+	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+		chip->write_byte(mtd, params[i]);
 
 
-		status = ret;
-	}
+	ret = chip->waitfunc(mtd, chip);
+	if (ret < 0)
+		return ret;
 
 
-	if (status & NAND_STATUS_FAIL)
+	if (ret & NAND_STATUS_FAIL)
 		return -EIO;
 		return -EIO;
 
 
 	return 0;
 	return 0;
@@ -5091,6 +5082,37 @@ ext_out:
 	return ret;
 	return ret;
 }
 }
 
 
+/*
+ * Recover data with bit-wise majority
+ */
+static void nand_bit_wise_majority(const void **srcbufs,
+				   unsigned int nsrcbufs,
+				   void *dstbuf,
+				   unsigned int bufsize)
+{
+	int i, j, k;
+
+	for (i = 0; i < bufsize; i++) {
+		u8 val = 0;
+
+		for (j = 0; j < 8; j++) {
+			unsigned int cnt = 0;
+
+			for (k = 0; k < nsrcbufs; k++) {
+				const u8 *srcbuf = srcbufs[k];
+
+				if (srcbuf[i] & BIT(j))
+					cnt++;
+			}
+
+			if (cnt > nsrcbufs / 2)
+				val |= BIT(j);
+		}
+
+		((u8 *)dstbuf)[i] = val;
+	}
+}
+
 /*
 /*
  * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
  * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
  */
  */
@@ -5107,7 +5129,7 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
 		return 0;
 		return 0;
 
 
 	/* ONFI chip: allocate a buffer to hold its parameter page */
 	/* ONFI chip: allocate a buffer to hold its parameter page */
-	p = kzalloc(sizeof(*p), GFP_KERNEL);
+	p = kzalloc((sizeof(*p) * 3), GFP_KERNEL);
 	if (!p)
 	if (!p)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -5118,21 +5140,32 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
 	}
 	}
 
 
 	for (i = 0; i < 3; i++) {
 	for (i = 0; i < 3; i++) {
-		ret = nand_read_data_op(chip, p, sizeof(*p), true);
+		ret = nand_read_data_op(chip, &p[i], sizeof(*p), true);
 		if (ret) {
 		if (ret) {
 			ret = 0;
 			ret = 0;
 			goto free_onfi_param_page;
 			goto free_onfi_param_page;
 		}
 		}
 
 
-		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
+		if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) ==
 				le16_to_cpu(p->crc)) {
 				le16_to_cpu(p->crc)) {
+			if (i)
+				memcpy(p, &p[i], sizeof(*p));
 			break;
 			break;
 		}
 		}
 	}
 	}
 
 
 	if (i == 3) {
 	if (i == 3) {
-		pr_err("Could not find valid ONFI parameter page; aborting\n");
-		goto free_onfi_param_page;
+		const void *srcbufs[3] = {p, p + 1, p + 2};
+
+		pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n");
+		nand_bit_wise_majority(srcbufs, ARRAY_SIZE(srcbufs), p,
+				       sizeof(*p));
+
+		if (onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 254) !=
+				le16_to_cpu(p->crc)) {
+			pr_err("ONFI parameter recovery failed, aborting\n");
+			goto free_onfi_param_page;
+		}
 	}
 	}
 
 
 	/* Check version */
 	/* Check version */
@@ -6635,24 +6668,26 @@ EXPORT_SYMBOL(nand_scan_tail);
 #endif
 #endif
 
 
 /**
 /**
- * nand_scan - [NAND Interface] Scan for the NAND device
+ * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
  * @mtd: MTD device structure
  * @mtd: MTD device structure
  * @maxchips: number of chips to scan for
  * @maxchips: number of chips to scan for
+ * @ids: optional flash IDs table
  *
  *
  * This fills out all the uninitialized function pointers with the defaults.
  * This fills out all the uninitialized function pointers with the defaults.
  * The flash ID is read and the mtd/chip structures are filled with the
  * The flash ID is read and the mtd/chip structures are filled with the
  * appropriate values.
  * appropriate values.
  */
  */
-int nand_scan(struct mtd_info *mtd, int maxchips)
+int nand_scan_with_ids(struct mtd_info *mtd, int maxchips,
+		       struct nand_flash_dev *ids)
 {
 {
 	int ret;
 	int ret;
 
 
-	ret = nand_scan_ident(mtd, maxchips, NULL);
+	ret = nand_scan_ident(mtd, maxchips, ids);
 	if (!ret)
 	if (!ret)
 		ret = nand_scan_tail(mtd);
 		ret = nand_scan_tail(mtd);
 	return ret;
 	return ret;
 }
 }
-EXPORT_SYMBOL(nand_scan);
+EXPORT_SYMBOL(nand_scan_with_ids);
 
 
 /**
 /**
  * nand_cleanup - [NAND Interface] Free resources held by the NAND device
  * nand_cleanup - [NAND Interface] Free resources held by the NAND device

+ 15 - 76
drivers/mtd/nand/raw/sunxi_nand.c

@@ -165,49 +165,16 @@
 
 
 #define NFC_MAX_CS		7
 #define NFC_MAX_CS		7
 
 
-/*
- * Ready/Busy detection type: describes the Ready/Busy detection modes
- *
- * @RB_NONE:	no external detection available, rely on STATUS command
- *		and software timeouts
- * @RB_NATIVE:	use sunxi NAND controller Ready/Busy support. The Ready/Busy
- *		pin of the NAND flash chip must be connected to one of the
- *		native NAND R/B pins (those which can be muxed to the NAND
- *		Controller)
- * @RB_GPIO:	use a simple GPIO to handle Ready/Busy status. The Ready/Busy
- *		pin of the NAND flash chip must be connected to a GPIO capable
- *		pin.
- */
-enum sunxi_nand_rb_type {
-	RB_NONE,
-	RB_NATIVE,
-	RB_GPIO,
-};
-
-/*
- * Ready/Busy structure: stores information related to Ready/Busy detection
- *
- * @type:	the Ready/Busy detection mode
- * @info:	information related to the R/B detection mode. Either a gpio
- *		id or a native R/B id (those supported by the NAND controller).
- */
-struct sunxi_nand_rb {
-	enum sunxi_nand_rb_type type;
-	union {
-		int gpio;
-		int nativeid;
-	} info;
-};
-
 /*
 /*
  * Chip Select structure: stores information related to NAND Chip Select
  * Chip Select structure: stores information related to NAND Chip Select
  *
  *
  * @cs:		the NAND CS id used to communicate with a NAND Chip
  * @cs:		the NAND CS id used to communicate with a NAND Chip
- * @rb:		the Ready/Busy description
+ * @rb:		the Ready/Busy pin ID. -1 means no R/B pin connected to the
+ *		NFC
  */
  */
 struct sunxi_nand_chip_sel {
 struct sunxi_nand_chip_sel {
 	u8 cs;
 	u8 cs;
-	struct sunxi_nand_rb rb;
+	s8 rb;
 };
 };
 
 
 /*
 /*
@@ -440,30 +407,19 @@ static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
 	struct nand_chip *nand = mtd_to_nand(mtd);
 	struct nand_chip *nand = mtd_to_nand(mtd);
 	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
 	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
 	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
 	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
-	struct sunxi_nand_rb *rb;
-	int ret;
+	u32 mask;
 
 
 	if (sunxi_nand->selected < 0)
 	if (sunxi_nand->selected < 0)
 		return 0;
 		return 0;
 
 
-	rb = &sunxi_nand->sels[sunxi_nand->selected].rb;
-
-	switch (rb->type) {
-	case RB_NATIVE:
-		ret = !!(readl(nfc->regs + NFC_REG_ST) &
-			 NFC_RB_STATE(rb->info.nativeid));
-		break;
-	case RB_GPIO:
-		ret = gpio_get_value(rb->info.gpio);
-		break;
-	case RB_NONE:
-	default:
-		ret = 0;
+	if (sunxi_nand->sels[sunxi_nand->selected].rb < 0) {
 		dev_err(nfc->dev, "cannot check R/B NAND status!\n");
 		dev_err(nfc->dev, "cannot check R/B NAND status!\n");
-		break;
+		return 0;
 	}
 	}
 
 
-	return ret;
+	mask = NFC_RB_STATE(sunxi_nand->sels[sunxi_nand->selected].rb);
+
+	return !!(readl(nfc->regs + NFC_REG_ST) & mask);
 }
 }
 
 
 static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
 static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
@@ -488,12 +444,11 @@ static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
 
 
 		ctl |= NFC_CE_SEL(sel->cs) | NFC_EN |
 		ctl |= NFC_CE_SEL(sel->cs) | NFC_EN |
 		       NFC_PAGE_SHIFT(nand->page_shift);
 		       NFC_PAGE_SHIFT(nand->page_shift);
-		if (sel->rb.type == RB_NONE) {
+		if (sel->rb < 0) {
 			nand->dev_ready = NULL;
 			nand->dev_ready = NULL;
 		} else {
 		} else {
 			nand->dev_ready = sunxi_nfc_dev_ready;
 			nand->dev_ready = sunxi_nfc_dev_ready;
-			if (sel->rb.type == RB_NATIVE)
-				ctl |= NFC_RB_SEL(sel->rb.info.nativeid);
+			ctl |= NFC_RB_SEL(sel->rb);
 		}
 		}
 
 
 		writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
 		writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
@@ -1946,26 +1901,10 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
 		chip->sels[i].cs = tmp;
 		chip->sels[i].cs = tmp;
 
 
 		if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
 		if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
-		    tmp < 2) {
-			chip->sels[i].rb.type = RB_NATIVE;
-			chip->sels[i].rb.info.nativeid = tmp;
-		} else {
-			ret = of_get_named_gpio(np, "rb-gpios", i);
-			if (ret >= 0) {
-				tmp = ret;
-				chip->sels[i].rb.type = RB_GPIO;
-				chip->sels[i].rb.info.gpio = tmp;
-				ret = devm_gpio_request(dev, tmp, "nand-rb");
-				if (ret)
-					return ret;
-
-				ret = gpio_direction_input(tmp);
-				if (ret)
-					return ret;
-			} else {
-				chip->sels[i].rb.type = RB_NONE;
-			}
-		}
+		    tmp < 2)
+			chip->sels[i].rb = tmp;
+		else
+			chip->sels[i].rb = -1;
 	}
 	}
 
 
 	nand = &chip->nand;
 	nand = &chip->nand;

+ 16 - 7
drivers/mtd/nftlmount.c

@@ -272,28 +272,37 @@ static int memcmpb(void *a, int c, int n)
 static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int len,
 static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int len,
 			      int check_oob)
 			      int check_oob)
 {
 {
-	u8 buf[SECTORSIZE + nftl->mbd.mtd->oobsize];
 	struct mtd_info *mtd = nftl->mbd.mtd;
 	struct mtd_info *mtd = nftl->mbd.mtd;
 	size_t retlen;
 	size_t retlen;
-	int i;
+	int i, ret;
+	u8 *buf;
+
+	buf = kmalloc(SECTORSIZE + mtd->oobsize, GFP_KERNEL);
+	if (!buf)
+		return -1;
 
 
+	ret = -1;
 	for (i = 0; i < len; i += SECTORSIZE) {
 	for (i = 0; i < len; i += SECTORSIZE) {
 		if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
 		if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
-			return -1;
+			goto out;
 		if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
 		if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
-			return -1;
+			goto out;
 
 
 		if (check_oob) {
 		if (check_oob) {
 			if(nftl_read_oob(mtd, address, mtd->oobsize,
 			if(nftl_read_oob(mtd, address, mtd->oobsize,
 					 &retlen, &buf[SECTORSIZE]) < 0)
 					 &retlen, &buf[SECTORSIZE]) < 0)
-				return -1;
+				goto out;
 			if (memcmpb(buf + SECTORSIZE, 0xff, mtd->oobsize) != 0)
 			if (memcmpb(buf + SECTORSIZE, 0xff, mtd->oobsize) != 0)
-				return -1;
+				goto out;
 		}
 		}
 		address += SECTORSIZE;
 		address += SECTORSIZE;
 	}
 	}
 
 
-	return 0;
+	ret = 0;
+
+out:
+	kfree(buf);
+	return ret;
 }
 }
 
 
 /* NFTL_format: format a Erase Unit by erasing ALL Erase Zones in the Erase Unit and
 /* NFTL_format: format a Erase Unit by erasing ALL Erase Zones in the Erase Unit and

+ 3 - 3
drivers/mtd/spi-nor/Kconfig

@@ -71,7 +71,7 @@ config SPI_FSL_QUADSPI
 config SPI_HISI_SFC
 config SPI_HISI_SFC
 	tristate "Hisilicon SPI-NOR Flash Controller(SFC)"
 	tristate "Hisilicon SPI-NOR Flash Controller(SFC)"
 	depends on ARCH_HISI || COMPILE_TEST
 	depends on ARCH_HISI || COMPILE_TEST
-	depends on HAS_IOMEM && HAS_DMA
+	depends on HAS_IOMEM
 	help
 	help
 	  This enables support for hisilicon SPI-NOR flash controller.
 	  This enables support for hisilicon SPI-NOR flash controller.
 
 
@@ -90,7 +90,7 @@ config SPI_INTEL_SPI
 	tristate
 	tristate
 
 
 config SPI_INTEL_SPI_PCI
 config SPI_INTEL_SPI_PCI
-	tristate "Intel PCH/PCU SPI flash PCI driver"
+	tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
 	depends on X86 && PCI
 	depends on X86 && PCI
 	select SPI_INTEL_SPI
 	select SPI_INTEL_SPI
 	help
 	help
@@ -106,7 +106,7 @@ config SPI_INTEL_SPI_PCI
 	  will be called intel-spi-pci.
 	  will be called intel-spi-pci.
 
 
 config SPI_INTEL_SPI_PLATFORM
 config SPI_INTEL_SPI_PLATFORM
-	tristate "Intel PCH/PCU SPI flash platform driver"
+	tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)"
 	depends on X86
 	depends on X86
 	select SPI_INTEL_SPI
 	select SPI_INTEL_SPI
 	help
 	help

+ 94 - 2
drivers/mtd/spi-nor/cadence-quadspi.c

@@ -18,6 +18,8 @@
 #include <linux/clk.h>
 #include <linux/clk.h>
 #include <linux/completion.h>
 #include <linux/completion.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
 #include <linux/err.h>
 #include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/errno.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
@@ -73,6 +75,10 @@ struct cqspi_st {
 	struct completion	transfer_complete;
 	struct completion	transfer_complete;
 	struct mutex		bus_mutex;
 	struct mutex		bus_mutex;
 
 
+	struct dma_chan		*rx_chan;
+	struct completion	rx_dma_complete;
+	dma_addr_t		mmap_phys_base;
+
 	int			current_cs;
 	int			current_cs;
 	int			current_page_size;
 	int			current_page_size;
 	int			current_erase_size;
 	int			current_erase_size;
@@ -930,11 +936,75 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
 	return len;
 	return len;
 }
 }
 
 
+static void cqspi_rx_dma_callback(void *param)
+{
+	struct cqspi_st *cqspi = param;
+
+	complete(&cqspi->rx_dma_complete);
+}
+
+static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
+				     loff_t from, size_t len)
+{
+	struct cqspi_flash_pdata *f_pdata = nor->priv;
+	struct cqspi_st *cqspi = f_pdata->cqspi;
+	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+	dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from;
+	int ret = 0;
+	struct dma_async_tx_descriptor *tx;
+	dma_cookie_t cookie;
+	dma_addr_t dma_dst;
+
+	if (!cqspi->rx_chan || !virt_addr_valid(buf)) {
+		memcpy_fromio(buf, cqspi->ahb_base + from, len);
+		return 0;
+	}
+
+	dma_dst = dma_map_single(nor->dev, buf, len, DMA_DEV_TO_MEM);
+	if (dma_mapping_error(nor->dev, dma_dst)) {
+		dev_err(nor->dev, "dma mapping failed\n");
+		return -ENOMEM;
+	}
+	tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src,
+				       len, flags);
+	if (!tx) {
+		dev_err(nor->dev, "device_prep_dma_memcpy error\n");
+		ret = -EIO;
+		goto err_unmap;
+	}
+
+	tx->callback = cqspi_rx_dma_callback;
+	tx->callback_param = cqspi;
+	cookie = tx->tx_submit(tx);
+	reinit_completion(&cqspi->rx_dma_complete);
+
+	ret = dma_submit_error(cookie);
+	if (ret) {
+		dev_err(nor->dev, "dma_submit_error %d\n", cookie);
+		ret = -EIO;
+		goto err_unmap;
+	}
+
+	dma_async_issue_pending(cqspi->rx_chan);
+	ret = wait_for_completion_timeout(&cqspi->rx_dma_complete,
+					  msecs_to_jiffies(len));
+	if (ret <= 0) {
+		dmaengine_terminate_sync(cqspi->rx_chan);
+		dev_err(nor->dev, "DMA wait_for_completion_timeout\n");
+		ret = -ETIMEDOUT;
+		goto err_unmap;
+	}
+
+err_unmap:
+	dma_unmap_single(nor->dev, dma_dst, len, DMA_DEV_TO_MEM);
+
+	return 0;
+}
+
 static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
 static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
 			  size_t len, u_char *buf)
 			  size_t len, u_char *buf)
 {
 {
 	struct cqspi_flash_pdata *f_pdata = nor->priv;
 	struct cqspi_flash_pdata *f_pdata = nor->priv;
-	struct cqspi_st *cqspi = f_pdata->cqspi;
 	int ret;
 	int ret;
 
 
 	ret = cqspi_set_protocol(nor, 1);
 	ret = cqspi_set_protocol(nor, 1);
@@ -946,7 +1016,7 @@ static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
 		return ret;
 		return ret;
 
 
 	if (f_pdata->use_direct_mode)
 	if (f_pdata->use_direct_mode)
-		memcpy_fromio(buf, cqspi->ahb_base + from, len);
+		ret = cqspi_direct_read_execute(nor, buf, from, len);
 	else
 	else
 		ret = cqspi_indirect_read_execute(nor, buf, from, len);
 		ret = cqspi_indirect_read_execute(nor, buf, from, len);
 	if (ret)
 	if (ret)
@@ -1115,6 +1185,21 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
 	cqspi_controller_enable(cqspi, 1);
 	cqspi_controller_enable(cqspi, 1);
 }
 }
 
 
+static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
+{
+	dma_cap_mask_t mask;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+
+	cqspi->rx_chan = dma_request_chan_by_mask(&mask);
+	if (IS_ERR(cqspi->rx_chan)) {
+		dev_err(&cqspi->pdev->dev, "No Rx DMA available\n");
+		cqspi->rx_chan = NULL;
+	}
+	init_completion(&cqspi->rx_dma_complete);
+}
+
 static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
 static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
 {
 {
 	const struct spi_nor_hwcaps hwcaps = {
 	const struct spi_nor_hwcaps hwcaps = {
@@ -1192,6 +1277,9 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
 			f_pdata->use_direct_mode = true;
 			f_pdata->use_direct_mode = true;
 			dev_dbg(nor->dev, "using direct mode for %s\n",
 			dev_dbg(nor->dev, "using direct mode for %s\n",
 				mtd->name);
 				mtd->name);
+
+			if (!cqspi->rx_chan)
+				cqspi_request_mmap_dma(cqspi);
 		}
 		}
 	}
 	}
 
 
@@ -1252,6 +1340,7 @@ static int cqspi_probe(struct platform_device *pdev)
 		dev_err(dev, "Cannot remap AHB address.\n");
 		dev_err(dev, "Cannot remap AHB address.\n");
 		return PTR_ERR(cqspi->ahb_base);
 		return PTR_ERR(cqspi->ahb_base);
 	}
 	}
+	cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
 	cqspi->ahb_size = resource_size(res_ahb);
 	cqspi->ahb_size = resource_size(res_ahb);
 
 
 	init_completion(&cqspi->transfer_complete);
 	init_completion(&cqspi->transfer_complete);
@@ -1322,6 +1411,9 @@ static int cqspi_remove(struct platform_device *pdev)
 
 
 	cqspi_controller_enable(cqspi, 0);
 	cqspi_controller_enable(cqspi, 0);
 
 
+	if (cqspi->rx_chan)
+		dma_release_channel(cqspi->rx_chan);
+
 	clk_disable_unprepare(cqspi->clk);
 	clk_disable_unprepare(cqspi->clk);
 
 
 	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_put_sync(&pdev->dev);

+ 13 - 2
drivers/mtd/spi-nor/fsl-quadspi.c

@@ -214,6 +214,7 @@ enum fsl_qspi_devtype {
 	FSL_QUADSPI_IMX7D,
 	FSL_QUADSPI_IMX7D,
 	FSL_QUADSPI_IMX6UL,
 	FSL_QUADSPI_IMX6UL,
 	FSL_QUADSPI_LS1021A,
 	FSL_QUADSPI_LS1021A,
+	FSL_QUADSPI_LS2080A,
 };
 };
 
 
 struct fsl_qspi_devtype_data {
 struct fsl_qspi_devtype_data {
@@ -267,6 +268,15 @@ static struct fsl_qspi_devtype_data ls1021a_data = {
 	.driver_data = 0,
 	.driver_data = 0,
 };
 };
 
 
+static const struct fsl_qspi_devtype_data ls2080a_data = {
+	.devtype = FSL_QUADSPI_LS2080A,
+	.rxfifo = 128,
+	.txfifo = 64,
+	.ahb_buf_size = 1024,
+	.driver_data = QUADSPI_QUIRK_TKT253890,
+};
+
+
 #define FSL_QSPI_MAX_CHIP	4
 #define FSL_QSPI_MAX_CHIP	4
 struct fsl_qspi {
 struct fsl_qspi {
 	struct spi_nor nor[FSL_QSPI_MAX_CHIP];
 	struct spi_nor nor[FSL_QSPI_MAX_CHIP];
@@ -661,7 +671,7 @@ static void fsl_qspi_set_map_addr(struct fsl_qspi *q)
  * causes the controller to clear the buffer, and use the sequence pointed
  * causes the controller to clear the buffer, and use the sequence pointed
  * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash.
  * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash.
  */
  */
-static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
+static void fsl_qspi_init_ahb_read(struct fsl_qspi *q)
 {
 {
 	void __iomem *base = q->iobase;
 	void __iomem *base = q->iobase;
 	int seqid;
 	int seqid;
@@ -795,7 +805,7 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
 	fsl_qspi_init_lut(q);
 	fsl_qspi_init_lut(q);
 
 
 	/* Init for AHB read */
 	/* Init for AHB read */
-	fsl_qspi_init_abh_read(q);
+	fsl_qspi_init_ahb_read(q);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -806,6 +816,7 @@ static const struct of_device_id fsl_qspi_dt_ids[] = {
 	{ .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
 	{ .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
 	{ .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
 	{ .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
 	{ .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, },
 	{ .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, },
+	{ .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, },
 	{ /* sentinel */ }
 	{ /* sentinel */ }
 };
 };
 MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
 MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);

+ 6 - 6
drivers/mtd/spi-nor/hisi-sfc.c

@@ -112,7 +112,7 @@ struct hifmc_host {
 	u32 num_chip;
 	u32 num_chip;
 };
 };
 
 
-static inline int wait_op_finish(struct hifmc_host *host)
+static inline int hisi_spi_nor_wait_op_finish(struct hifmc_host *host)
 {
 {
 	u32 reg;
 	u32 reg;
 
 
@@ -120,7 +120,7 @@ static inline int wait_op_finish(struct hifmc_host *host)
 		(reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT);
 		(reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT);
 }
 }
 
 
-static int get_if_type(enum spi_nor_protocol proto)
+static int hisi_spi_nor_get_if_type(enum spi_nor_protocol proto)
 {
 {
 	enum hifmc_iftype if_type;
 	enum hifmc_iftype if_type;
 
 
@@ -208,7 +208,7 @@ static int hisi_spi_nor_op_reg(struct spi_nor *nor,
 	reg = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START | optype;
 	reg = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START | optype;
 	writel(reg, host->regbase + FMC_OP);
 	writel(reg, host->regbase + FMC_OP);
 
 
-	return wait_op_finish(host);
+	return hisi_spi_nor_wait_op_finish(host);
 }
 }
 
 
 static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
 static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
@@ -259,9 +259,9 @@ static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off,
 
 
 	reg = OP_CFG_FM_CS(priv->chipselect);
 	reg = OP_CFG_FM_CS(priv->chipselect);
 	if (op_type == FMC_OP_READ)
 	if (op_type == FMC_OP_READ)
-		if_type = get_if_type(nor->read_proto);
+		if_type = hisi_spi_nor_get_if_type(nor->read_proto);
 	else
 	else
-		if_type = get_if_type(nor->write_proto);
+		if_type = hisi_spi_nor_get_if_type(nor->write_proto);
 	reg |= OP_CFG_MEM_IF_TYPE(if_type);
 	reg |= OP_CFG_MEM_IF_TYPE(if_type);
 	if (op_type == FMC_OP_READ)
 	if (op_type == FMC_OP_READ)
 		reg |= OP_CFG_DUMMY_NUM(nor->read_dummy >> 3);
 		reg |= OP_CFG_DUMMY_NUM(nor->read_dummy >> 3);
@@ -274,7 +274,7 @@ static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off,
 		: OP_CTRL_WR_OPCODE(nor->program_opcode);
 		: OP_CTRL_WR_OPCODE(nor->program_opcode);
 	writel(reg, host->regbase + FMC_OP_DMA);
 	writel(reg, host->regbase + FMC_OP_DMA);
 
 
-	return wait_op_finish(host);
+	return hisi_spi_nor_wait_op_finish(host);
 }
 }
 
 
 static ssize_t hisi_spi_nor_read(struct spi_nor *nor, loff_t from, size_t len,
 static ssize_t hisi_spi_nor_read(struct spi_nor *nor, loff_t from, size_t len,

+ 69 - 11
drivers/mtd/spi-nor/intel-spi.c

@@ -136,6 +136,7 @@
  * @swseq_reg: Use SW sequencer in register reads/writes
  * @swseq_reg: Use SW sequencer in register reads/writes
  * @swseq_erase: Use SW sequencer in erase operation
  * @swseq_erase: Use SW sequencer in erase operation
  * @erase_64k: 64k erase supported
  * @erase_64k: 64k erase supported
+ * @atomic_preopcode: Holds preopcode when atomic sequence is requested
  * @opcodes: Opcodes which are supported. This are programmed by BIOS
  * @opcodes: Opcodes which are supported. This are programmed by BIOS
  *           before it locks down the controller.
  *           before it locks down the controller.
  */
  */
@@ -153,6 +154,7 @@ struct intel_spi {
 	bool swseq_reg;
 	bool swseq_reg;
 	bool swseq_erase;
 	bool swseq_erase;
 	bool erase_64k;
 	bool erase_64k;
+	u8 atomic_preopcode;
 	u8 opcodes[8];
 	u8 opcodes[8];
 };
 };
 
 
@@ -285,7 +287,7 @@ static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
 	u32 val;
 	u32 val;
 
 
 	return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
 	return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
-				  !(val & HSFSTS_CTL_SCIP), 0,
+				  !(val & HSFSTS_CTL_SCIP), 40,
 				  INTEL_SPI_TIMEOUT * 1000);
 				  INTEL_SPI_TIMEOUT * 1000);
 }
 }
 
 
@@ -294,7 +296,7 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
 	u32 val;
 	u32 val;
 
 
 	return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
 	return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
-				  !(val & SSFSTS_CTL_SCIP), 0,
+				  !(val & SSFSTS_CTL_SCIP), 40,
 				  INTEL_SPI_TIMEOUT * 1000);
 				  INTEL_SPI_TIMEOUT * 1000);
 }
 }
 
 
@@ -474,7 +476,7 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
 			      int optype)
 			      int optype)
 {
 {
 	u32 val = 0, status;
 	u32 val = 0, status;
-	u16 preop;
+	u8 atomic_preopcode;
 	int ret;
 	int ret;
 
 
 	ret = intel_spi_opcode_index(ispi, opcode, optype);
 	ret = intel_spi_opcode_index(ispi, opcode, optype);
@@ -484,17 +486,42 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
 	if (len > INTEL_SPI_FIFO_SZ)
 	if (len > INTEL_SPI_FIFO_SZ)
 		return -EINVAL;
 		return -EINVAL;
 
 
+	/*
+	 * Always clear it after each SW sequencer operation regardless
+	 * of whether it is successful or not.
+	 */
+	atomic_preopcode = ispi->atomic_preopcode;
+	ispi->atomic_preopcode = 0;
+
 	/* Only mark 'Data Cycle' bit when there is data to be transferred */
 	/* Only mark 'Data Cycle' bit when there is data to be transferred */
 	if (len > 0)
 	if (len > 0)
 		val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
 		val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
 	val |= ret << SSFSTS_CTL_COP_SHIFT;
 	val |= ret << SSFSTS_CTL_COP_SHIFT;
 	val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
 	val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
 	val |= SSFSTS_CTL_SCGO;
 	val |= SSFSTS_CTL_SCGO;
-	preop = readw(ispi->sregs + PREOP_OPTYPE);
-	if (preop) {
-		val |= SSFSTS_CTL_ACS;
-		if (preop >> 8)
-			val |= SSFSTS_CTL_SPOP;
+	if (atomic_preopcode) {
+		u16 preop;
+
+		switch (optype) {
+		case OPTYPE_WRITE_NO_ADDR:
+		case OPTYPE_WRITE_WITH_ADDR:
+			/* Pick matching preopcode for the atomic sequence */
+			preop = readw(ispi->sregs + PREOP_OPTYPE);
+			if ((preop & 0xff) == atomic_preopcode)
+				; /* Do nothing */
+			else if ((preop >> 8) == atomic_preopcode)
+				val |= SSFSTS_CTL_SPOP;
+			else
+				return -EINVAL;
+
+			/* Enable atomic sequence */
+			val |= SSFSTS_CTL_ACS;
+			break;
+
+		default:
+			return -EINVAL;
+		}
+
 	}
 	}
 	writel(val, ispi->sregs + SSFSTS_CTL);
 	writel(val, ispi->sregs + SSFSTS_CTL);
 
 
@@ -538,13 +565,31 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
 
 
 	/*
 	/*
 	 * This is handled with atomic operation and preop code in Intel
 	 * This is handled with atomic operation and preop code in Intel
-	 * controller so skip it here now. If the controller is not locked,
-	 * program the opcode to the PREOP register for later use.
+	 * controller so we only verify that it is available. If the
+	 * controller is not locked, program the opcode to the PREOP
+	 * register for later use.
+	 *
+	 * When hardware sequencer is used there is no need to program
+	 * any opcodes (it handles them automatically as part of a command).
 	 */
 	 */
 	if (opcode == SPINOR_OP_WREN) {
 	if (opcode == SPINOR_OP_WREN) {
-		if (!ispi->locked)
+		u16 preop;
+
+		if (!ispi->swseq_reg)
+			return 0;
+
+		preop = readw(ispi->sregs + PREOP_OPTYPE);
+		if ((preop & 0xff) != opcode && (preop >> 8) != opcode) {
+			if (ispi->locked)
+				return -EINVAL;
 			writel(opcode, ispi->sregs + PREOP_OPTYPE);
 			writel(opcode, ispi->sregs + PREOP_OPTYPE);
+		}
 
 
+		/*
+		 * This enables atomic sequence on next SW sycle. Will
+		 * be cleared after next operation.
+		 */
+		ispi->atomic_preopcode = opcode;
 		return 0;
 		return 0;
 	}
 	}
 
 
@@ -569,6 +614,13 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
 	u32 val, status;
 	u32 val, status;
 	ssize_t ret;
 	ssize_t ret;
 
 
+	/*
+	 * Atomic sequence is not expected with HW sequencer reads. Make
+	 * sure it is cleared regardless.
+	 */
+	if (WARN_ON_ONCE(ispi->atomic_preopcode))
+		ispi->atomic_preopcode = 0;
+
 	switch (nor->read_opcode) {
 	switch (nor->read_opcode) {
 	case SPINOR_OP_READ:
 	case SPINOR_OP_READ:
 	case SPINOR_OP_READ_FAST:
 	case SPINOR_OP_READ_FAST:
@@ -627,6 +679,9 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
 	u32 val, status;
 	u32 val, status;
 	ssize_t ret;
 	ssize_t ret;
 
 
+	/* Not needed with HW sequencer write, make sure it is cleared */
+	ispi->atomic_preopcode = 0;
+
 	while (len > 0) {
 	while (len > 0) {
 		block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
 		block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
 
 
@@ -707,6 +762,9 @@ static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
 		return 0;
 		return 0;
 	}
 	}
 
 
+	/* Not needed with HW sequencer erase, make sure it is cleared */
+	ispi->atomic_preopcode = 0;
+
 	while (len > 0) {
 	while (len > 0) {
 		writel(offs, ispi->base + FADDR);
 		writel(offs, ispi->base + FADDR);
 
 

+ 33 - 0
drivers/mtd/spi-nor/spi-nor.c

@@ -284,6 +284,20 @@ static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
 		if (need_wren)
 		if (need_wren)
 			write_disable(nor);
 			write_disable(nor);
 
 
+		if (!status && !enable &&
+		    JEDEC_MFR(info) == SNOR_MFR_WINBOND) {
+			/*
+			 * On Winbond W25Q256FV, leaving 4byte mode causes
+			 * the Extended Address Register to be set to 1, so all
+			 * 3-byte-address reads come from the second 16M.
+			 * We must clear the register to enable normal behavior.
+			 */
+			write_enable(nor);
+			nor->cmd_buf[0] = 0;
+			nor->write_reg(nor, SPINOR_OP_WREAR, nor->cmd_buf, 1);
+			write_disable(nor);
+		}
+
 		return status;
 		return status;
 	default:
 	default:
 		/* Spansion style */
 		/* Spansion style */
@@ -980,6 +994,7 @@ static const struct flash_info spi_nor_ids[] = {
 	{ "en25q32b",   INFO(0x1c3016, 0, 64 * 1024,   64, 0) },
 	{ "en25q32b",   INFO(0x1c3016, 0, 64 * 1024,   64, 0) },
 	{ "en25p64",    INFO(0x1c2017, 0, 64 * 1024,  128, 0) },
 	{ "en25p64",    INFO(0x1c2017, 0, 64 * 1024,  128, 0) },
 	{ "en25q64",    INFO(0x1c3017, 0, 64 * 1024,  128, SECT_4K) },
 	{ "en25q64",    INFO(0x1c3017, 0, 64 * 1024,  128, SECT_4K) },
+	{ "en25qh32",   INFO(0x1c7016, 0, 64 * 1024,   64, 0) },
 	{ "en25qh128",  INFO(0x1c7018, 0, 64 * 1024,  256, 0) },
 	{ "en25qh128",  INFO(0x1c7018, 0, 64 * 1024,  256, 0) },
 	{ "en25qh256",  INFO(0x1c7019, 0, 64 * 1024,  512, 0) },
 	{ "en25qh256",  INFO(0x1c7019, 0, 64 * 1024,  512, 0) },
 	{ "en25s64",	INFO(0x1c3817, 0, 64 * 1024,  128, SECT_4K) },
 	{ "en25s64",	INFO(0x1c3817, 0, 64 * 1024,  128, SECT_4K) },
@@ -1049,6 +1064,14 @@ static const struct flash_info spi_nor_ids[] = {
 			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
 			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
 	{ "is25lp128",  INFO(0x9d6018, 0, 64 * 1024, 256,
 	{ "is25lp128",  INFO(0x9d6018, 0, 64 * 1024, 256,
 			SECT_4K | SPI_NOR_DUAL_READ) },
 			SECT_4K | SPI_NOR_DUAL_READ) },
+	{ "is25lp256",  INFO(0x9d6019, 0, 64 * 1024, 512,
+			SECT_4K | SPI_NOR_DUAL_READ) },
+	{ "is25wp032",  INFO(0x9d7016, 0, 64 * 1024,  64,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "is25wp064",  INFO(0x9d7017, 0, 64 * 1024, 128,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "is25wp128",  INFO(0x9d7018, 0, 64 * 1024, 256,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
 
 
 	/* Macronix */
 	/* Macronix */
 	{ "mx25l512e",   INFO(0xc22010, 0, 64 * 1024,   1, SECT_4K) },
 	{ "mx25l512e",   INFO(0xc22010, 0, 64 * 1024,   1, SECT_4K) },
@@ -1087,6 +1110,7 @@ static const struct flash_info spi_nor_ids[] = {
 	{ "n25q512ax3",  INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
 	{ "n25q512ax3",  INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
 	{ "n25q00",      INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
 	{ "n25q00",      INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
 	{ "n25q00a",     INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
 	{ "n25q00a",     INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+	{ "mt25qu02g",   INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
 
 
 	/* PMC */
 	/* PMC */
 	{ "pm25lv512",   INFO(0,        0, 32 * 1024,    2, SECT_4K_PMC) },
 	{ "pm25lv512",   INFO(0,        0, 32 * 1024,    2, SECT_4K_PMC) },
@@ -1198,6 +1222,11 @@ static const struct flash_info spi_nor_ids[] = {
 			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
 			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
 			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
 			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
 	},
 	},
+	{
+		"w25q32jv", INFO(0xef7016, 0, 64 * 1024,  64,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+	},
 	{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
 	{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
 	{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
 	{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
 	{
 	{
@@ -1230,6 +1259,10 @@ static const struct flash_info spi_nor_ids[] = {
 	{ "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
 	{ "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
 	{ "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
 	{ "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
 	{ "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
 	{ "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
+
+	/* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
+	{ "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
 	{ },
 	{ },
 };
 };
 
 

+ 1 - 1
drivers/mtd/spi-nor/stm32-quadspi.c

@@ -656,7 +656,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
 		return ret;
 		return ret;
 	}
 	}
 
 
-	rstc = devm_reset_control_get(dev, NULL);
+	rstc = devm_reset_control_get_exclusive(dev, NULL);
 	if (!IS_ERR(rstc)) {
 	if (!IS_ERR(rstc)) {
 		reset_control_assert(rstc);
 		reset_control_assert(rstc);
 		udelay(2);
 		udelay(2);

+ 3 - 0
include/linux/mtd/nand.h

@@ -86,6 +86,7 @@ struct nand_pos {
  * @ooboffs: the OOB offset within the page
  * @ooboffs: the OOB offset within the page
  * @ooblen: the number of OOB bytes to read from/write to this page
  * @ooblen: the number of OOB bytes to read from/write to this page
  * @oobbuf: buffer to store OOB data in or get OOB data from
  * @oobbuf: buffer to store OOB data in or get OOB data from
+ * @mode: one of the %MTD_OPS_XXX mode
  *
  *
  * This object is used to pass per-page I/O requests to NAND sub-layers. This
  * This object is used to pass per-page I/O requests to NAND sub-layers. This
  * way all useful information are already formatted in a useful way and
  * way all useful information are already formatted in a useful way and
@@ -106,6 +107,7 @@ struct nand_page_io_req {
 		const void *out;
 		const void *out;
 		void *in;
 		void *in;
 	} oobbuf;
 	} oobbuf;
+	int mode;
 };
 };
 
 
 /**
 /**
@@ -599,6 +601,7 @@ static inline void nanddev_io_iter_init(struct nand_device *nand,
 {
 {
 	struct mtd_info *mtd = nanddev_to_mtd(nand);
 	struct mtd_info *mtd = nanddev_to_mtd(nand);
 
 
+	iter->req.mode = req->mode;
 	iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
 	iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
 	iter->req.ooboffs = req->ooboffs;
 	iter->req.ooboffs = req->ooboffs;
 	iter->oobbytes_per_page = mtd_oobavail(mtd, req);
 	iter->oobbytes_per_page = mtd_oobavail(mtd, req);

+ 26 - 9
include/linux/mtd/rawnand.h

@@ -28,7 +28,14 @@ struct nand_flash_dev;
 struct device_node;
 struct device_node;
 
 
 /* Scan and identify a NAND device */
 /* Scan and identify a NAND device */
-int nand_scan(struct mtd_info *mtd, int max_chips);
+int nand_scan_with_ids(struct mtd_info *mtd, int max_chips,
+		       struct nand_flash_dev *ids);
+
+static inline int nand_scan(struct mtd_info *mtd, int max_chips)
+{
+	return nand_scan_with_ids(mtd, max_chips, NULL);
+}
+
 /*
 /*
  * Separate phases of nand_scan(), allowing board driver to intervene
  * Separate phases of nand_scan(), allowing board driver to intervene
  * and override command or ECC setup according to flash type.
  * and override command or ECC setup according to flash type.
@@ -740,8 +747,9 @@ enum nand_data_interface_type {
 
 
 /**
 /**
  * struct nand_data_interface - NAND interface timing
  * struct nand_data_interface - NAND interface timing
- * @type:	type of the timing
- * @timings:	The timing, type according to @type
+ * @type:	 type of the timing
+ * @timings:	 The timing, type according to @type
+ * @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
  */
  */
 struct nand_data_interface {
 struct nand_data_interface {
 	enum nand_data_interface_type type;
 	enum nand_data_interface_type type;
@@ -798,8 +806,9 @@ struct nand_op_addr_instr {
 /**
 /**
  * struct nand_op_data_instr - Definition of a data instruction
  * struct nand_op_data_instr - Definition of a data instruction
  * @len: number of data bytes to move
  * @len: number of data bytes to move
- * @in: buffer to fill when reading from the NAND chip
- * @out: buffer to read from when writing to the NAND chip
+ * @buf: buffer to fill
+ * @buf.in: buffer to fill when reading from the NAND chip
+ * @buf.out: buffer to read from when writing to the NAND chip
  * @force_8bit: force 8-bit access
  * @force_8bit: force 8-bit access
  *
  *
  * Please note that "in" and "out" are inverted from the ONFI specification
  * Please note that "in" and "out" are inverted from the ONFI specification
@@ -842,9 +851,13 @@ enum nand_op_instr_type {
 /**
 /**
  * struct nand_op_instr - Instruction object
  * struct nand_op_instr - Instruction object
  * @type: the instruction type
  * @type: the instruction type
- * @cmd/@addr/@data/@waitrdy: extra data associated to the instruction.
- *                            You'll have to use the appropriate element
- *                            depending on @type
+ * @ctx:  extra data associated to the instruction. You'll have to use the
+ *        appropriate element depending on @type
+ * @ctx.cmd: use it if @type is %NAND_OP_CMD_INSTR
+ * @ctx.addr: use it if @type is %NAND_OP_ADDR_INSTR
+ * @ctx.data: use it if @type is %NAND_OP_DATA_IN_INSTR
+ *	      or %NAND_OP_DATA_OUT_INSTR
+ * @ctx.waitrdy: use it if @type is %NAND_OP_WAITRDY_INSTR
  * @delay_ns: delay the controller should apply after the instruction has been
  * @delay_ns: delay the controller should apply after the instruction has been
  *	      issued on the bus. Most modern controllers have internal timings
  *	      issued on the bus. Most modern controllers have internal timings
  *	      control logic, and in this case, the controller driver can ignore
  *	      control logic, and in this case, the controller driver can ignore
@@ -1003,7 +1016,9 @@ struct nand_op_parser_data_constraints {
  * struct nand_op_parser_pattern_elem - One element of a pattern
  * struct nand_op_parser_pattern_elem - One element of a pattern
  * @type: the instructuction type
  * @type: the instructuction type
  * @optional: whether this element of the pattern is optional or mandatory
  * @optional: whether this element of the pattern is optional or mandatory
- * @addr/@data: address or data constraint (number of cycles or data length)
+ * @ctx: address or data constraint
+ * @ctx.addr: address constraint (number of cycles)
+ * @ctx.data: data constraint (data length)
  */
  */
 struct nand_op_parser_pattern_elem {
 struct nand_op_parser_pattern_elem {
 	enum nand_op_instr_type type;
 	enum nand_op_instr_type type;
@@ -1230,6 +1245,8 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
  *			devices.
  *			devices.
  * @priv:		[OPTIONAL] pointer to private chip data
  * @priv:		[OPTIONAL] pointer to private chip data
  * @manufacturer:	[INTERN] Contains manufacturer information
  * @manufacturer:	[INTERN] Contains manufacturer information
+ * @manufacturer.desc:	[INTERN] Contains manufacturer's description
+ * @manufacturer.priv:	[INTERN] Contains manufacturer private information
  */
  */
 
 
 struct nand_chip {
 struct nand_chip {

+ 2 - 0
include/linux/mtd/spi-nor.h

@@ -62,6 +62,8 @@
 #define SPINOR_OP_RDCR		0x35	/* Read configuration register */
 #define SPINOR_OP_RDCR		0x35	/* Read configuration register */
 #define SPINOR_OP_RDFSR		0x70	/* Read flag status register */
 #define SPINOR_OP_RDFSR		0x70	/* Read flag status register */
 #define SPINOR_OP_CLFSR		0x50	/* Clear flag status register */
 #define SPINOR_OP_CLFSR		0x50	/* Clear flag status register */
+#define SPINOR_OP_RDEAR		0xc8	/* Read Extended Address Register */
+#define SPINOR_OP_WREAR		0xc5	/* Write Extended Address Register */
 
 
 /* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
 /* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
 #define SPINOR_OP_READ_4B	0x13	/* Read data bytes (low frequency) */
 #define SPINOR_OP_READ_4B	0x13	/* Read data bytes (low frequency) */