Browse Source

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc: (75 commits)
  mmc: core: eMMC bus width may not work on all platforms
  mmc: sdhci: Auto-CMD23 fixes.
  mmc: sdhci: Auto-CMD23 support.
  mmc: core: Block CMD23 support for UHS104/SDXC cards.
  mmc: sdhci: Implement MMC_CAP_CMD23 for SDHCI.
  mmc: core: Use CMD23 for multiblock transfers when we can.
  mmc: quirks: Add/remove quirks conditional support.
  mmc: Add new VUB300 USB-to-SD/SDIO/MMC driver
  mmc: sdhci-pxa: Add quirks for DMA/ADMA to match h/w
  mmc: core: duplicated trial with same freq in mmc_rescan_try_freq()
  mmc: core: add support for eMMC Dual Data Rate
  mmc: core: eMMC signal voltage does not use CMD11
  mmc: sdhci-pxa: add platform code for UHS signaling
  mmc: sdhci: add hooks for setting UHS in platform specific code
  mmc: core: clear MMC_PM_KEEP_POWER flag on resume
  mmc: dw_mmc: fixed wrong regulator_enable in suspend/resume
  mmc: sdhi: allow powering down controller with no card inserted
  mmc: tmio: runtime suspend the controller, where possible
  mmc: sdhi: support up to 3 interrupt sources
  mmc: sdhi: print physical base address and clock rate
  ...
Linus Torvalds 14 năm trước cách đây
mục cha
commit
8c1c77ff9b
49 tập tin đã thay đổi với 5699 bổ sung556 xóa
  1. 1 0
      Documentation/ioctl/ioctl-number.txt
  2. 2 0
      Documentation/mmc/00-INDEX
  3. 10 0
      Documentation/mmc/mmc-dev-attrs.txt
  4. 27 0
      Documentation/mmc/mmc-dev-parts.txt
  5. 7 0
      MAINTAINERS
  6. 1 0
      arch/arm/mach-tegra/include/mach/sdhci.h
  7. 646 66
      drivers/mmc/card/block.c
  8. 57 59
      drivers/mmc/card/mmc_test.c
  9. 0 8
      drivers/mmc/card/queue.c
  10. 8 3
      drivers/mmc/core/bus.c
  11. 71 40
      drivers/mmc/core/core.c
  12. 3 4
      drivers/mmc/core/core.h
  13. 2 2
      drivers/mmc/core/host.c
  14. 166 20
      drivers/mmc/core/mmc.c
  15. 32 48
      drivers/mmc/core/mmc_ops.c
  16. 0 1
      drivers/mmc/core/mmc_ops.h
  17. 42 47
      drivers/mmc/core/quirks.c
  18. 370 35
      drivers/mmc/core/sd.c
  19. 1 1
      drivers/mmc/core/sd.h
  20. 17 34
      drivers/mmc/core/sd_ops.c
  21. 15 9
      drivers/mmc/core/sdio.c
  22. 32 1
      drivers/mmc/core/sdio_irq.c
  23. 5 13
      drivers/mmc/core/sdio_ops.c
  24. 32 1
      drivers/mmc/host/Kconfig
  25. 1 0
      drivers/mmc/host/Makefile
  26. 3 3
      drivers/mmc/host/dw_mmc.c
  27. 26 23
      drivers/mmc/host/sdhci-pci.c
  28. 47 1
      drivers/mmc/host/sdhci-pxa.c
  29. 2 0
      drivers/mmc/host/sdhci-tegra.c
  30. 786 68
      drivers/mmc/host/sdhci.c
  31. 55 4
      drivers/mmc/host/sdhci.h
  32. 110 16
      drivers/mmc/host/sh_mmcif.c
  33. 47 3
      drivers/mmc/host/sh_mobile_sdhi.c
  34. 22 10
      drivers/mmc/host/tmio_mmc.c
  35. 15 1
      drivers/mmc/host/tmio_mmc.h
  36. 12 9
      drivers/mmc/host/tmio_mmc_dma.c
  37. 162 22
      drivers/mmc/host/tmio_mmc_pio.c
  38. 2506 0
      drivers/mmc/host/vub300.c
  39. 1 0
      include/linux/Kbuild
  40. 17 0
      include/linux/mfd/tmio.h
  41. 1 0
      include/linux/mmc/Kbuild
  42. 188 1
      include/linux/mmc/card.h
  43. 4 1
      include/linux/mmc/core.h
  44. 48 1
      include/linux/mmc/host.h
  45. 54 0
      include/linux/mmc/ioctl.h
  46. 18 0
      include/linux/mmc/mmc.h
  47. 8 1
      include/linux/mmc/sd.h
  48. 15 0
      include/linux/mmc/sdhci.h
  49. 4 0
      include/linux/mmc/sh_mobile_sdhi.h

+ 1 - 0
Documentation/ioctl/ioctl-number.txt

@@ -304,6 +304,7 @@ Code  Seq#(hex)	Include File		Comments
 0xB0	all	RATIO devices		in development:
 					<mailto:vgo@ratio.de>
 0xB1	00-1F	PPPoX			<mailto:mostrows@styx.uwaterloo.ca>
+0xB3	00	linux/mmc/ioctl.h
 0xC0	00-0F	linux/usb/iowarrior.h
 0xCB	00-1F	CBM serial IEC bus	in development:
 					<mailto:michael.klein@puffin.lb.shuttle.de>

+ 2 - 0
Documentation/mmc/00-INDEX

@@ -2,3 +2,5 @@
         - this file
 mmc-dev-attrs.txt
         - info on SD and MMC device attributes
+mmc-dev-parts.txt
+        - info on SD and MMC device partitions

+ 10 - 0
Documentation/mmc/mmc-dev-attrs.txt

@@ -1,3 +1,13 @@
+SD and MMC Block Device Attributes
+==================================
+
+These attributes are defined for the block devices associated with the
+SD or MMC device.
+
+The following attributes are read/write.
+
+	force_ro		Enforce read-only access even if write protect switch is off.
+
 SD and MMC Device Attributes
 ============================
 

+ 27 - 0
Documentation/mmc/mmc-dev-parts.txt

@@ -0,0 +1,27 @@
+SD and MMC Device Partitions
+============================
+
+Device partitions are additional logical block devices present on the
+SD/MMC device.
+
+As of this writing, MMC boot partitions as supported and exposed as
+/dev/mmcblkXboot0 and /dev/mmcblkXboot1, where X is the index of the
+parent /dev/mmcblkX.
+
+MMC Boot Partitions
+===================
+
+Read and write access is provided to the two MMC boot partitions. Due to
+the sensitive nature of the boot partition contents, which often store
+a bootloader or bootloader configuration tables crucial to booting the
+platform, write access is disabled by default to reduce the chance of
+accidental bricking.
+
+To enable write access to /dev/mmcblkXbootY, disable the forced read-only
+access with:
+
+echo 0 > /sys/block/mmcblkXbootY/force_ro
+
+To re-enable read-only access:
+
+echo 1 > /sys/block/mmcblkXbootY/force_ro

+ 7 - 0
MAINTAINERS

@@ -6800,6 +6800,13 @@ L:	lm-sensors@lm-sensors.org
 S:	Maintained
 F:	drivers/hwmon/vt8231.c
 
+VUB300 USB to SDIO/SD/MMC bridge chip
+M:	Tony Olech <tony.olech@elandigitalsystems.com>
+L:	linux-mmc@vger.kernel.org
+L:	linux-usb@vger.kernel.org
+S:	Supported
+F:	drivers/mmc/host/vub300.c
+
 W1 DALLAS'S 1-WIRE BUS
 M:	Evgeniy Polyakov <johnpol@2ka.mipt.ru>
 S:	Maintained

+ 1 - 0
arch/arm/mach-tegra/include/mach/sdhci.h

@@ -24,6 +24,7 @@ struct tegra_sdhci_platform_data {
 	int wp_gpio;
 	int power_gpio;
 	int is_8bit;
+	int pm_flags;
 };
 
 #endif

+ 646 - 66
drivers/mmc/card/block.c

@@ -31,7 +31,11 @@
 #include <linux/mutex.h>
 #include <linux/scatterlist.h>
 #include <linux/string_helpers.h>
+#include <linux/delay.h>
+#include <linux/capability.h>
+#include <linux/compat.h>
 
+#include <linux/mmc/ioctl.h>
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
@@ -48,6 +52,13 @@ MODULE_ALIAS("mmc:block");
 #endif
 #define MODULE_PARAM_PREFIX "mmcblk."
 
+#define INAND_CMD38_ARG_EXT_CSD  113
+#define INAND_CMD38_ARG_ERASE    0x00
+#define INAND_CMD38_ARG_TRIM     0x01
+#define INAND_CMD38_ARG_SECERASE 0x80
+#define INAND_CMD38_ARG_SECTRIM1 0x81
+#define INAND_CMD38_ARG_SECTRIM2 0x88
+
 static DEFINE_MUTEX(block_mutex);
 
 /*
@@ -64,6 +75,7 @@ static int max_devices;
 
 /* 256 minors, so at most 256 separate devices */
 static DECLARE_BITMAP(dev_use, 256);
+static DECLARE_BITMAP(name_use, 256);
 
 /*
  * There is one mmc_blk_data per slot.
@@ -72,9 +84,24 @@ struct mmc_blk_data {
 	spinlock_t	lock;
 	struct gendisk	*disk;
 	struct mmc_queue queue;
+	struct list_head part;
+
+	unsigned int	flags;
+#define MMC_BLK_CMD23	(1 << 0)	/* Can do SET_BLOCK_COUNT for multiblock */
+#define MMC_BLK_REL_WR	(1 << 1)	/* MMC Reliable write support */
 
 	unsigned int	usage;
 	unsigned int	read_only;
+	unsigned int	part_type;
+	unsigned int	name_idx;
+
+	/*
+	 * Only set in main mmc_blk_data associated
+	 * with mmc_card with mmc_set_drvdata, and keeps
+	 * track of the current selected device partition.
+	 */
+	unsigned int	part_curr;
+	struct device_attribute force_ro;
 };
 
 static DEFINE_MUTEX(open_lock);
@@ -97,17 +124,22 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
 	return md;
 }
 
+static inline int mmc_get_devidx(struct gendisk *disk)
+{
+	int devmaj = MAJOR(disk_devt(disk));
+	int devidx = MINOR(disk_devt(disk)) / perdev_minors;
+
+	if (!devmaj)
+		devidx = disk->first_minor / perdev_minors;
+	return devidx;
+}
+
 static void mmc_blk_put(struct mmc_blk_data *md)
 {
 	mutex_lock(&open_lock);
 	md->usage--;
 	if (md->usage == 0) {
-		int devmaj = MAJOR(disk_devt(md->disk));
-		int devidx = MINOR(disk_devt(md->disk)) / perdev_minors;
-
-		if (!devmaj)
-			devidx = md->disk->first_minor / perdev_minors;
-
+		int devidx = mmc_get_devidx(md->disk);
 		blk_cleanup_queue(md->queue.queue);
 
 		__clear_bit(devidx, dev_use);
@@ -118,6 +150,38 @@ static void mmc_blk_put(struct mmc_blk_data *md)
 	mutex_unlock(&open_lock);
 }
 
+static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	int ret;
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+
+	ret = snprintf(buf, PAGE_SIZE, "%d",
+		       get_disk_ro(dev_to_disk(dev)) ^
+		       md->read_only);
+	mmc_blk_put(md);
+	return ret;
+}
+
+static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	int ret;
+	char *end;
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	unsigned long set = simple_strtoul(buf, &end, 0);
+	if (end == buf) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	set_disk_ro(dev_to_disk(dev), set || md->read_only);
+	ret = count;
+out:
+	mmc_blk_put(md);
+	return ret;
+}
+
 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 {
 	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -158,35 +222,255 @@ mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 	return 0;
 }
 
+struct mmc_blk_ioc_data {
+	struct mmc_ioc_cmd ic;
+	unsigned char *buf;
+	u64 buf_bytes;
+};
+
+static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
+	struct mmc_ioc_cmd __user *user)
+{
+	struct mmc_blk_ioc_data *idata;
+	int err;
+
+	idata = kzalloc(sizeof(*idata), GFP_KERNEL);
+	if (!idata) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
+		err = -EFAULT;
+		goto idata_err;
+	}
+
+	idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
+	if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
+		err = -EOVERFLOW;
+		goto idata_err;
+	}
+
+	idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
+	if (!idata->buf) {
+		err = -ENOMEM;
+		goto idata_err;
+	}
+
+	if (copy_from_user(idata->buf, (void __user *)(unsigned long)
+					idata->ic.data_ptr, idata->buf_bytes)) {
+		err = -EFAULT;
+		goto copy_err;
+	}
+
+	return idata;
+
+copy_err:
+	kfree(idata->buf);
+idata_err:
+	kfree(idata);
+out:
+	return ERR_PTR(err);
+}
+
+static int mmc_blk_ioctl_cmd(struct block_device *bdev,
+	struct mmc_ioc_cmd __user *ic_ptr)
+{
+	struct mmc_blk_ioc_data *idata;
+	struct mmc_blk_data *md;
+	struct mmc_card *card;
+	struct mmc_command cmd = {0};
+	struct mmc_data data = {0};
+	struct mmc_request mrq = {0};
+	struct scatterlist sg;
+	int err;
+
+	/*
+	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+	 * whole block device, not on a partition.  This prevents overspray
+	 * between sibling partitions.
+	 */
+	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+		return -EPERM;
+
+	idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
+	if (IS_ERR(idata))
+		return PTR_ERR(idata);
+
+	cmd.opcode = idata->ic.opcode;
+	cmd.arg = idata->ic.arg;
+	cmd.flags = idata->ic.flags;
+
+	data.sg = &sg;
+	data.sg_len = 1;
+	data.blksz = idata->ic.blksz;
+	data.blocks = idata->ic.blocks;
+
+	sg_init_one(data.sg, idata->buf, idata->buf_bytes);
+
+	if (idata->ic.write_flag)
+		data.flags = MMC_DATA_WRITE;
+	else
+		data.flags = MMC_DATA_READ;
+
+	mrq.cmd = &cmd;
+	mrq.data = &data;
+
+	md = mmc_blk_get(bdev->bd_disk);
+	if (!md) {
+		err = -EINVAL;
+		goto cmd_done;
+	}
+
+	card = md->queue.card;
+	if (IS_ERR(card)) {
+		err = PTR_ERR(card);
+		goto cmd_done;
+	}
+
+	mmc_claim_host(card->host);
+
+	if (idata->ic.is_acmd) {
+		err = mmc_app_cmd(card->host, card);
+		if (err)
+			goto cmd_rel_host;
+	}
+
+	/* data.flags must already be set before doing this. */
+	mmc_set_data_timeout(&data, card);
+	/* Allow overriding the timeout_ns for empirical tuning. */
+	if (idata->ic.data_timeout_ns)
+		data.timeout_ns = idata->ic.data_timeout_ns;
+
+	if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+		/*
+		 * Pretend this is a data transfer and rely on the host driver
+		 * to compute timeout.  When all host drivers support
+		 * cmd.cmd_timeout for R1B, this can be changed to:
+		 *
+		 *     mrq.data = NULL;
+		 *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
+		 */
+		data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
+	}
+
+	mmc_wait_for_req(card->host, &mrq);
+
+	if (cmd.error) {
+		dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
+						__func__, cmd.error);
+		err = cmd.error;
+		goto cmd_rel_host;
+	}
+	if (data.error) {
+		dev_err(mmc_dev(card->host), "%s: data error %d\n",
+						__func__, data.error);
+		err = data.error;
+		goto cmd_rel_host;
+	}
+
+	/*
+	 * According to the SD specs, some commands require a delay after
+	 * issuing the command.
+	 */
+	if (idata->ic.postsleep_min_us)
+		usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
+
+	if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
+		err = -EFAULT;
+		goto cmd_rel_host;
+	}
+
+	if (!idata->ic.write_flag) {
+		if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
+						idata->buf, idata->buf_bytes)) {
+			err = -EFAULT;
+			goto cmd_rel_host;
+		}
+	}
+
+cmd_rel_host:
+	mmc_release_host(card->host);
+
+cmd_done:
+	mmc_blk_put(md);
+	kfree(idata->buf);
+	kfree(idata);
+	return err;
+}
+
+static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
+	unsigned int cmd, unsigned long arg)
+{
+	int ret = -EINVAL;
+	if (cmd == MMC_IOC_CMD)
+		ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
+	unsigned int cmd, unsigned long arg)
+{
+	return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
 static const struct block_device_operations mmc_bdops = {
 	.open			= mmc_blk_open,
 	.release		= mmc_blk_release,
 	.getgeo			= mmc_blk_getgeo,
 	.owner			= THIS_MODULE,
+	.ioctl			= mmc_blk_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= mmc_blk_compat_ioctl,
+#endif
 };
 
 struct mmc_blk_request {
 	struct mmc_request	mrq;
+	struct mmc_command	sbc;
 	struct mmc_command	cmd;
 	struct mmc_command	stop;
 	struct mmc_data		data;
 };
 
+static inline int mmc_blk_part_switch(struct mmc_card *card,
+				      struct mmc_blk_data *md)
+{
+	int ret;
+	struct mmc_blk_data *main_md = mmc_get_drvdata(card);
+	if (main_md->part_curr == md->part_type)
+		return 0;
+
+	if (mmc_card_mmc(card)) {
+		card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+		card->ext_csd.part_config |= md->part_type;
+
+		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
+				 card->ext_csd.part_time);
+		if (ret)
+			return ret;
+}
+
+	main_md->part_curr = md->part_type;
+	return 0;
+}
+
 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
 {
 	int err;
 	u32 result;
 	__be32 *blocks;
 
-	struct mmc_request mrq;
-	struct mmc_command cmd;
-	struct mmc_data data;
+	struct mmc_request mrq = {0};
+	struct mmc_command cmd = {0};
+	struct mmc_data data = {0};
 	unsigned int timeout_us;
 
 	struct scatterlist sg;
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = MMC_APP_CMD;
 	cmd.arg = card->rca << 16;
 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
@@ -203,8 +487,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
 	cmd.arg = 0;
 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 
-	memset(&data, 0, sizeof(struct mmc_data));
-
 	data.timeout_ns = card->csd.tacc_ns * 100;
 	data.timeout_clks = card->csd.tacc_clks * 100;
 
@@ -223,8 +505,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
 	data.sg = &sg;
 	data.sg_len = 1;
 
-	memset(&mrq, 0, sizeof(struct mmc_request));
-
 	mrq.cmd = &cmd;
 	mrq.data = &data;
 
@@ -247,10 +527,9 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
 
 static u32 get_card_status(struct mmc_card *card, struct request *req)
 {
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 	int err;
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
 	cmd.opcode = MMC_SEND_STATUS;
 	if (!mmc_host_is_spi(card->host))
 		cmd.arg = card->rca << 16;
@@ -269,8 +548,6 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
 	unsigned int from, nr, arg;
 	int err = 0;
 
-	mmc_claim_host(card->host);
-
 	if (!mmc_can_erase(card)) {
 		err = -EOPNOTSUPP;
 		goto out;
@@ -284,14 +561,22 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
 	else
 		arg = MMC_ERASE_ARG;
 
+	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 INAND_CMD38_ARG_EXT_CSD,
+				 arg == MMC_TRIM_ARG ?
+				 INAND_CMD38_ARG_TRIM :
+				 INAND_CMD38_ARG_ERASE,
+				 0);
+		if (err)
+			goto out;
+	}
 	err = mmc_erase(card, from, nr, arg);
 out:
 	spin_lock_irq(&md->lock);
 	__blk_end_request(req, err, blk_rq_bytes(req));
 	spin_unlock_irq(&md->lock);
 
-	mmc_release_host(card->host);
-
 	return err ? 0 : 1;
 }
 
@@ -303,8 +588,6 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
 	unsigned int from, nr, arg;
 	int err = 0;
 
-	mmc_claim_host(card->host);
-
 	if (!mmc_can_secure_erase_trim(card)) {
 		err = -EOPNOTSUPP;
 		goto out;
@@ -318,19 +601,74 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
 	else
 		arg = MMC_SECURE_ERASE_ARG;
 
+	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 INAND_CMD38_ARG_EXT_CSD,
+				 arg == MMC_SECURE_TRIM1_ARG ?
+				 INAND_CMD38_ARG_SECTRIM1 :
+				 INAND_CMD38_ARG_SECERASE,
+				 0);
+		if (err)
+			goto out;
+	}
 	err = mmc_erase(card, from, nr, arg);
-	if (!err && arg == MMC_SECURE_TRIM1_ARG)
+	if (!err && arg == MMC_SECURE_TRIM1_ARG) {
+		if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					 INAND_CMD38_ARG_EXT_CSD,
+					 INAND_CMD38_ARG_SECTRIM2,
+					 0);
+			if (err)
+				goto out;
+		}
 		err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
+	}
 out:
 	spin_lock_irq(&md->lock);
 	__blk_end_request(req, err, blk_rq_bytes(req));
 	spin_unlock_irq(&md->lock);
 
-	mmc_release_host(card->host);
-
 	return err ? 0 : 1;
 }
 
+static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
+{
+	struct mmc_blk_data *md = mq->data;
+
+	/*
+	 * No-op, only service this because we need REQ_FUA for reliable
+	 * writes.
+	 */
+	spin_lock_irq(&md->lock);
+	__blk_end_request_all(req, 0);
+	spin_unlock_irq(&md->lock);
+
+	return 1;
+}
+
+/*
+ * Reformat current write as a reliable write, supporting
+ * both legacy and the enhanced reliable write MMC cards.
+ * In each transfer we'll handle only as much as a single
+ * reliable write can handle, thus finish the request in
+ * partial completions.
+ */
+static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
+				    struct mmc_card *card,
+				    struct request *req)
+{
+	if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
+		/* Legacy mode imposes restrictions on transfers. */
+		if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
+			brq->data.blocks = 1;
+
+		if (brq->data.blocks > card->ext_csd.rel_sectors)
+			brq->data.blocks = card->ext_csd.rel_sectors;
+		else if (brq->data.blocks < card->ext_csd.rel_sectors)
+			brq->data.blocks = 1;
+	}
+}
+
 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 {
 	struct mmc_blk_data *md = mq->data;
@@ -338,10 +676,17 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 	struct mmc_blk_request brq;
 	int ret = 1, disable_multi = 0;
 
-	mmc_claim_host(card->host);
+	/*
+	 * Reliable writes are used to implement Forced Unit Access and
+	 * REQ_META accesses, and are supported only on MMCs.
+	 */
+	bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
+			  (req->cmd_flags & REQ_META)) &&
+		(rq_data_dir(req) == WRITE) &&
+		(md->flags & MMC_BLK_REL_WR);
 
 	do {
-		struct mmc_command cmd;
+		struct mmc_command cmd = {0};
 		u32 readcmd, writecmd, status = 0;
 
 		memset(&brq, 0, sizeof(struct mmc_blk_request));
@@ -374,12 +719,12 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 		if (disable_multi && brq.data.blocks > 1)
 			brq.data.blocks = 1;
 
-		if (brq.data.blocks > 1) {
+		if (brq.data.blocks > 1 || do_rel_wr) {
 			/* SPI multiblock writes terminate using a special
 			 * token, not a STOP_TRANSMISSION request.
 			 */
-			if (!mmc_host_is_spi(card->host)
-					|| rq_data_dir(req) == READ)
+			if (!mmc_host_is_spi(card->host) ||
+			    rq_data_dir(req) == READ)
 				brq.mrq.stop = &brq.stop;
 			readcmd = MMC_READ_MULTIPLE_BLOCK;
 			writecmd = MMC_WRITE_MULTIPLE_BLOCK;
@@ -396,6 +741,38 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 			brq.data.flags |= MMC_DATA_WRITE;
 		}
 
+		if (do_rel_wr)
+			mmc_apply_rel_rw(&brq, card, req);
+
+		/*
+		 * Pre-defined multi-block transfers are preferable to
+		 * open ended-ones (and necessary for reliable writes).
+		 * However, it is not sufficient to just send CMD23,
+		 * and avoid the final CMD12, as on an error condition
+		 * CMD12 (stop) needs to be sent anyway. This, coupled
+		 * with Auto-CMD23 enhancements provided by some
+		 * hosts, means that the complexity of dealing
+		 * with this is best left to the host. If CMD23 is
+		 * supported by card and host, we'll fill sbc in and let
+		 * the host deal with handling it correctly. This means
+		 * that for hosts that don't expose MMC_CAP_CMD23, no
+		 * change of behavior will be observed.
+		 *
+		 * N.B: Some MMC cards experience perf degradation.
+		 * We'll avoid using CMD23-bounded multiblock writes for
+		 * these, while retaining features like reliable writes.
+		 */
+
+		if ((md->flags & MMC_BLK_CMD23) &&
+		    mmc_op_multi(brq.cmd.opcode) &&
+		    (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
+			brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
+			brq.sbc.arg = brq.data.blocks |
+				(do_rel_wr ? (1 << 31) : 0);
+			brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+			brq.mrq.sbc = &brq.sbc;
+		}
+
 		mmc_set_data_timeout(&brq.data, card);
 
 		brq.data.sg = mq->sg;
@@ -431,7 +808,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 		 * until later as we need to wait for the card to leave
 		 * programming mode even when things go wrong.
 		 */
-		if (brq.cmd.error || brq.data.error || brq.stop.error) {
+		if (brq.sbc.error || brq.cmd.error ||
+		    brq.data.error || brq.stop.error) {
 			if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
 				/* Redo read one sector at a time */
 				printk(KERN_WARNING "%s: retrying using single "
@@ -442,6 +820,13 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 			status = get_card_status(card, req);
 		}
 
+		if (brq.sbc.error) {
+			printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT "
+			       "command, response %#x, card status %#x\n",
+			       req->rq_disk->disk_name, brq.sbc.error,
+			       brq.sbc.resp[0], status);
+		}
+
 		if (brq.cmd.error) {
 			printk(KERN_ERR "%s: error %d sending read/write "
 			       "command, response %#x, card status %#x\n",
@@ -520,8 +905,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 		spin_unlock_irq(&md->lock);
 	} while (ret);
 
-	mmc_release_host(card->host);
-
 	return 1;
 
  cmd_err:
@@ -548,8 +931,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 		spin_unlock_irq(&md->lock);
 	}
 
-	mmc_release_host(card->host);
-
 	spin_lock_irq(&md->lock);
 	while (ret)
 		ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
@@ -560,14 +941,31 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 
 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 {
+	int ret;
+	struct mmc_blk_data *md = mq->data;
+	struct mmc_card *card = md->queue.card;
+
+	mmc_claim_host(card->host);
+	ret = mmc_blk_part_switch(card, md);
+	if (ret) {
+		ret = 0;
+		goto out;
+	}
+
 	if (req->cmd_flags & REQ_DISCARD) {
 		if (req->cmd_flags & REQ_SECURE)
-			return mmc_blk_issue_secdiscard_rq(mq, req);
+			ret = mmc_blk_issue_secdiscard_rq(mq, req);
 		else
-			return mmc_blk_issue_discard_rq(mq, req);
+			ret = mmc_blk_issue_discard_rq(mq, req);
+	} else if (req->cmd_flags & REQ_FLUSH) {
+		ret = mmc_blk_issue_flush(mq, req);
 	} else {
-		return mmc_blk_issue_rw_rq(mq, req);
+		ret = mmc_blk_issue_rw_rq(mq, req);
 	}
+
+out:
+	mmc_release_host(card->host);
+	return ret;
 }
 
 static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -576,7 +974,11 @@ static inline int mmc_blk_readonly(struct mmc_card *card)
 	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
 }
 
-static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
+static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+					      struct device *parent,
+					      sector_t size,
+					      bool default_ro,
+					      const char *subname)
 {
 	struct mmc_blk_data *md;
 	int devidx, ret;
@@ -592,6 +994,19 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
 		goto out;
 	}
 
+	/*
+	 * !subname implies we are creating main mmc_blk_data that will be
+	 * associated with mmc_card with mmc_set_drvdata. Due to device
+	 * partitions, devidx will not coincide with a per-physical card
+	 * index anymore so we keep track of a name index.
+	 */
+	if (!subname) {
+		md->name_idx = find_first_zero_bit(name_use, max_devices);
+		__set_bit(md->name_idx, name_use);
+	}
+	else
+		md->name_idx = ((struct mmc_blk_data *)
+				dev_to_disk(parent)->private_data)->name_idx;
 
 	/*
 	 * Set the read-only status based on the supported commands
@@ -606,6 +1021,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
 	}
 
 	spin_lock_init(&md->lock);
+	INIT_LIST_HEAD(&md->part);
 	md->usage = 1;
 
 	ret = mmc_init_queue(&md->queue, card, &md->lock);
@@ -620,8 +1036,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
 	md->disk->fops = &mmc_bdops;
 	md->disk->private_data = md;
 	md->disk->queue = md->queue.queue;
-	md->disk->driverfs_dev = &card->dev;
-	set_disk_ro(md->disk, md->read_only);
+	md->disk->driverfs_dev = parent;
+	set_disk_ro(md->disk, md->read_only || default_ro);
 
 	/*
 	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -636,32 +1052,107 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
 	 */
 
 	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
-		"mmcblk%d", devidx);
+		 "mmcblk%d%s", md->name_idx, subname ? subname : "");
 
 	blk_queue_logical_block_size(md->queue.queue, 512);
+	set_capacity(md->disk, size);
+
+	if (mmc_host_cmd23(card->host)) {
+		if (mmc_card_mmc(card) ||
+		    (mmc_card_sd(card) &&
+		     card->scr.cmds & SD_SCR_CMD23_SUPPORT))
+			md->flags |= MMC_BLK_CMD23;
+	}
+
+	if (mmc_card_mmc(card) &&
+	    md->flags & MMC_BLK_CMD23 &&
+	    ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
+	     card->ext_csd.rel_sectors)) {
+		md->flags |= MMC_BLK_REL_WR;
+		blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
+	}
+
+	return md;
+
+ err_putdisk:
+	put_disk(md->disk);
+ err_kfree:
+	kfree(md);
+ out:
+	return ERR_PTR(ret);
+}
+
+static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
+{
+	sector_t size;
+	struct mmc_blk_data *md;
 
 	if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
 		/*
 		 * The EXT_CSD sector count is in number or 512 byte
 		 * sectors.
 		 */
-		set_capacity(md->disk, card->ext_csd.sectors);
+		size = card->ext_csd.sectors;
 	} else {
 		/*
 		 * The CSD capacity field is in units of read_blkbits.
 		 * set_capacity takes units of 512 bytes.
 		 */
-		set_capacity(md->disk,
-			card->csd.capacity << (card->csd.read_blkbits - 9));
+		size = card->csd.capacity << (card->csd.read_blkbits - 9);
 	}
+
+	md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
 	return md;
+}
 
- err_putdisk:
-	put_disk(md->disk);
- err_kfree:
-	kfree(md);
- out:
-	return ERR_PTR(ret);
+static int mmc_blk_alloc_part(struct mmc_card *card,
+			      struct mmc_blk_data *md,
+			      unsigned int part_type,
+			      sector_t size,
+			      bool default_ro,
+			      const char *subname)
+{
+	char cap_str[10];
+	struct mmc_blk_data *part_md;
+
+	part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
+				    subname);
+	if (IS_ERR(part_md))
+		return PTR_ERR(part_md);
+	part_md->part_type = part_type;
+	list_add(&part_md->part, &md->part);
+
+	string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
+			cap_str, sizeof(cap_str));
+	printk(KERN_INFO "%s: %s %s partition %u %s\n",
+	       part_md->disk->disk_name, mmc_card_id(card),
+	       mmc_card_name(card), part_md->part_type, cap_str);
+	return 0;
+}
+
+static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
+{
+	int ret = 0;
+
+	if (!mmc_card_mmc(card))
+		return 0;
+
+	if (card->ext_csd.boot_size) {
+		ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
+					 card->ext_csd.boot_size >> 9,
+					 true,
+					 "boot0");
+		if (ret)
+			return ret;
+		ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
+					 card->ext_csd.boot_size >> 9,
+					 true,
+					 "boot1");
+		if (ret)
+			return ret;
+	}
+
+	return ret;
 }
 
 static int
@@ -682,9 +1173,81 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
 	return 0;
 }
 
+static void mmc_blk_remove_req(struct mmc_blk_data *md)
+{
+	if (md) {
+		if (md->disk->flags & GENHD_FL_UP) {
+			device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+
+			/* Stop new requests from getting into the queue */
+			del_gendisk(md->disk);
+		}
+
+		/* Then flush out any already in there */
+		mmc_cleanup_queue(&md->queue);
+		mmc_blk_put(md);
+	}
+}
+
+static void mmc_blk_remove_parts(struct mmc_card *card,
+				 struct mmc_blk_data *md)
+{
+	struct list_head *pos, *q;
+	struct mmc_blk_data *part_md;
+
+	__clear_bit(md->name_idx, name_use);
+	list_for_each_safe(pos, q, &md->part) {
+		part_md = list_entry(pos, struct mmc_blk_data, part);
+		list_del(pos);
+		mmc_blk_remove_req(part_md);
+	}
+}
+
+static int mmc_add_disk(struct mmc_blk_data *md)
+{
+	int ret;
+
+	add_disk(md->disk);
+	md->force_ro.show = force_ro_show;
+	md->force_ro.store = force_ro_store;
+	sysfs_attr_init(&md->force_ro.attr);
+	md->force_ro.attr.name = "force_ro";
+	md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
+	ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
+	if (ret)
+		del_gendisk(md->disk);
+
+	return ret;
+}
+
+static const struct mmc_fixup blk_fixups[] =
+{
+	MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
+	MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
+	MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
+	MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
+	MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
+
+	/*
+	 * Some MMC cards experience performance degradation with CMD23
+	 * instead of CMD12-bounded multiblock transfers. For now we'll
+	 * black list what's bad...
+	 * - Certain Toshiba cards.
+	 *
+	 * N.B. This doesn't affect SD cards.
+	 */
+	MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_BLK_NO_CMD23),
+	MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_BLK_NO_CMD23),
+	MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_BLK_NO_CMD23),
+	END_FIXUP
+};
+
 static int mmc_blk_probe(struct mmc_card *card)
 {
-	struct mmc_blk_data *md;
+	struct mmc_blk_data *md, *part_md;
 	int err;
 	char cap_str[10];
 
@@ -708,14 +1271,24 @@ static int mmc_blk_probe(struct mmc_card *card)
 		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
 		cap_str, md->read_only ? "(ro)" : "");
 
+	if (mmc_blk_alloc_parts(card, md))
+		goto out;
+
 	mmc_set_drvdata(card, md);
-	add_disk(md->disk);
+	mmc_fixup_device(card, blk_fixups);
+
+	if (mmc_add_disk(md))
+		goto out;
+
+	list_for_each_entry(part_md, &md->part, part) {
+		if (mmc_add_disk(part_md))
+			goto out;
+	}
 	return 0;
 
  out:
-	mmc_cleanup_queue(&md->queue);
-	mmc_blk_put(md);
-
+	mmc_blk_remove_parts(card, md);
+	mmc_blk_remove_req(md);
 	return err;
 }
 
@@ -723,36 +1296,43 @@ static void mmc_blk_remove(struct mmc_card *card)
 {
 	struct mmc_blk_data *md = mmc_get_drvdata(card);
 
-	if (md) {
-		/* Stop new requests from getting into the queue */
-		del_gendisk(md->disk);
-
-		/* Then flush out any already in there */
-		mmc_cleanup_queue(&md->queue);
-
-		mmc_blk_put(md);
-	}
+	mmc_blk_remove_parts(card, md);
+	mmc_blk_remove_req(md);
 	mmc_set_drvdata(card, NULL);
 }
 
 #ifdef CONFIG_PM
 static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
 {
+	struct mmc_blk_data *part_md;
 	struct mmc_blk_data *md = mmc_get_drvdata(card);
 
 	if (md) {
 		mmc_queue_suspend(&md->queue);
+		list_for_each_entry(part_md, &md->part, part) {
+			mmc_queue_suspend(&part_md->queue);
+		}
 	}
 	return 0;
 }
 
 static int mmc_blk_resume(struct mmc_card *card)
 {
+	struct mmc_blk_data *part_md;
 	struct mmc_blk_data *md = mmc_get_drvdata(card);
 
 	if (md) {
 		mmc_blk_set_blksize(md, card);
+
+		/*
+		 * Resume involves the card going into idle state,
+		 * so current partition is always the main one.
+		 */
+		md->part_curr = md->part_type;
 		mmc_queue_resume(&md->queue);
+		list_for_each_entry(part_md, &md->part, part) {
+			mmc_queue_resume(&part_md->queue);
+		}
 	}
 	return 0;
 }

+ 57 - 59
drivers/mmc/card/mmc_test.c

@@ -212,7 +212,7 @@ static int mmc_test_busy(struct mmc_command *cmd)
 static int mmc_test_wait_busy(struct mmc_test_card *test)
 {
 	int ret, busy;
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 
 	busy = 0;
 	do {
@@ -246,18 +246,13 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test,
 {
 	int ret;
 
-	struct mmc_request mrq;
-	struct mmc_command cmd;
-	struct mmc_command stop;
-	struct mmc_data data;
+	struct mmc_request mrq = {0};
+	struct mmc_command cmd = {0};
+	struct mmc_command stop = {0};
+	struct mmc_data data = {0};
 
 	struct scatterlist sg;
 
-	memset(&mrq, 0, sizeof(struct mmc_request));
-	memset(&cmd, 0, sizeof(struct mmc_command));
-	memset(&data, 0, sizeof(struct mmc_data));
-	memset(&stop, 0, sizeof(struct mmc_command));
-
 	mrq.cmd = &cmd;
 	mrq.data = &data;
 	mrq.stop = &stop;
@@ -731,15 +726,10 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,
 	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
 	unsigned blocks, unsigned blksz, int write)
 {
-	struct mmc_request mrq;
-	struct mmc_command cmd;
-	struct mmc_command stop;
-	struct mmc_data data;
-
-	memset(&mrq, 0, sizeof(struct mmc_request));
-	memset(&cmd, 0, sizeof(struct mmc_command));
-	memset(&data, 0, sizeof(struct mmc_data));
-	memset(&stop, 0, sizeof(struct mmc_command));
+	struct mmc_request mrq = {0};
+	struct mmc_command cmd = {0};
+	struct mmc_command stop = {0};
+	struct mmc_data data = {0};
 
 	mrq.cmd = &cmd;
 	mrq.data = &data;
@@ -761,18 +751,13 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,
 static int mmc_test_broken_transfer(struct mmc_test_card *test,
 	unsigned blocks, unsigned blksz, int write)
 {
-	struct mmc_request mrq;
-	struct mmc_command cmd;
-	struct mmc_command stop;
-	struct mmc_data data;
+	struct mmc_request mrq = {0};
+	struct mmc_command cmd = {0};
+	struct mmc_command stop = {0};
+	struct mmc_data data = {0};
 
 	struct scatterlist sg;
 
-	memset(&mrq, 0, sizeof(struct mmc_request));
-	memset(&cmd, 0, sizeof(struct mmc_command));
-	memset(&data, 0, sizeof(struct mmc_data));
-	memset(&stop, 0, sizeof(struct mmc_command));
-
 	mrq.cmd = &cmd;
 	mrq.data = &data;
 	mrq.stop = &stop;
@@ -1401,8 +1386,9 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
  */
 static int mmc_test_area_fill(struct mmc_test_card *test)
 {
-	return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
-				1, 0, 0);
+	struct mmc_test_area *t = &test->area;
+
+	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
 }
 
 /*
@@ -1415,7 +1401,7 @@ static int mmc_test_area_erase(struct mmc_test_card *test)
 	if (!mmc_can_erase(test->card))
 		return 0;
 
-	return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9,
+	return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
 			 MMC_ERASE_ARG);
 }
 
@@ -1542,8 +1528,10 @@ static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
 				     int max_scatter)
 {
-	return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
-				write, max_scatter, 1);
+	struct mmc_test_area *t = &test->area;
+
+	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
+				max_scatter, 1);
 }
 
 /*
@@ -1583,18 +1571,19 @@ static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
  */
 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
 {
+	struct mmc_test_area *t = &test->area;
 	unsigned long sz;
 	unsigned int dev_addr;
 	int ret;
 
-	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
-		dev_addr = test->area.dev_addr + (sz >> 9);
+	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+		dev_addr = t->dev_addr + (sz >> 9);
 		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
 		if (ret)
 			return ret;
 	}
-	sz = test->area.max_tfr;
-	dev_addr = test->area.dev_addr;
+	sz = t->max_tfr;
+	dev_addr = t->dev_addr;
 	return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
 }
 
@@ -1603,6 +1592,7 @@ static int mmc_test_profile_read_perf(struct mmc_test_card *test)
  */
 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
 {
+	struct mmc_test_area *t = &test->area;
 	unsigned long sz;
 	unsigned int dev_addr;
 	int ret;
@@ -1610,8 +1600,8 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
 	ret = mmc_test_area_erase(test);
 	if (ret)
 		return ret;
-	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
-		dev_addr = test->area.dev_addr + (sz >> 9);
+	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+		dev_addr = t->dev_addr + (sz >> 9);
 		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
 		if (ret)
 			return ret;
@@ -1619,8 +1609,8 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
 	ret = mmc_test_area_erase(test);
 	if (ret)
 		return ret;
-	sz = test->area.max_tfr;
-	dev_addr = test->area.dev_addr;
+	sz = t->max_tfr;
+	dev_addr = t->dev_addr;
 	return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
 }
 
@@ -1629,6 +1619,7 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
  */
 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
 {
+	struct mmc_test_area *t = &test->area;
 	unsigned long sz;
 	unsigned int dev_addr;
 	struct timespec ts1, ts2;
@@ -1640,8 +1631,8 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
 	if (!mmc_can_erase(test->card))
 		return RESULT_UNSUP_HOST;
 
-	for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
-		dev_addr = test->area.dev_addr + (sz >> 9);
+	for (sz = 512; sz < t->max_sz; sz <<= 1) {
+		dev_addr = t->dev_addr + (sz >> 9);
 		getnstimeofday(&ts1);
 		ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
 		if (ret)
@@ -1649,7 +1640,7 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
 		getnstimeofday(&ts2);
 		mmc_test_print_rate(test, sz, &ts1, &ts2);
 	}
-	dev_addr = test->area.dev_addr;
+	dev_addr = t->dev_addr;
 	getnstimeofday(&ts1);
 	ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
 	if (ret)
@@ -1661,12 +1652,13 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
 
 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
 {
+	struct mmc_test_area *t = &test->area;
 	unsigned int dev_addr, i, cnt;
 	struct timespec ts1, ts2;
 	int ret;
 
-	cnt = test->area.max_sz / sz;
-	dev_addr = test->area.dev_addr;
+	cnt = t->max_sz / sz;
+	dev_addr = t->dev_addr;
 	getnstimeofday(&ts1);
 	for (i = 0; i < cnt; i++) {
 		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
@@ -1684,20 +1676,22 @@ static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
  */
 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
 {
+	struct mmc_test_area *t = &test->area;
 	unsigned long sz;
 	int ret;
 
-	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
+	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
 		ret = mmc_test_seq_read_perf(test, sz);
 		if (ret)
 			return ret;
 	}
-	sz = test->area.max_tfr;
+	sz = t->max_tfr;
 	return mmc_test_seq_read_perf(test, sz);
 }
 
 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
 {
+	struct mmc_test_area *t = &test->area;
 	unsigned int dev_addr, i, cnt;
 	struct timespec ts1, ts2;
 	int ret;
@@ -1705,8 +1699,8 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
 	ret = mmc_test_area_erase(test);
 	if (ret)
 		return ret;
-	cnt = test->area.max_sz / sz;
-	dev_addr = test->area.dev_addr;
+	cnt = t->max_sz / sz;
+	dev_addr = t->dev_addr;
 	getnstimeofday(&ts1);
 	for (i = 0; i < cnt; i++) {
 		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
@@ -1724,15 +1718,16 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
  */
 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
 {
+	struct mmc_test_area *t = &test->area;
 	unsigned long sz;
 	int ret;
 
-	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
+	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
 		ret = mmc_test_seq_write_perf(test, sz);
 		if (ret)
 			return ret;
 	}
-	sz = test->area.max_tfr;
+	sz = t->max_tfr;
 	return mmc_test_seq_write_perf(test, sz);
 }
 
@@ -1741,6 +1736,7 @@ static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
  */
 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
 {
+	struct mmc_test_area *t = &test->area;
 	unsigned long sz;
 	unsigned int dev_addr, i, cnt;
 	struct timespec ts1, ts2;
@@ -1752,15 +1748,15 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
 	if (!mmc_can_erase(test->card))
 		return RESULT_UNSUP_HOST;
 
-	for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
+	for (sz = 512; sz <= t->max_sz; sz <<= 1) {
 		ret = mmc_test_area_erase(test);
 		if (ret)
 			return ret;
 		ret = mmc_test_area_fill(test);
 		if (ret)
 			return ret;
-		cnt = test->area.max_sz / sz;
-		dev_addr = test->area.dev_addr;
+		cnt = t->max_sz / sz;
+		dev_addr = t->dev_addr;
 		getnstimeofday(&ts1);
 		for (i = 0; i < cnt; i++) {
 			ret = mmc_erase(test->card, dev_addr, sz >> 9,
@@ -1823,11 +1819,12 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
 
 static int mmc_test_random_perf(struct mmc_test_card *test, int write)
 {
+	struct mmc_test_area *t = &test->area;
 	unsigned int next;
 	unsigned long sz;
 	int ret;
 
-	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
+	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
 		/*
 		 * When writing, try to get more consistent results by running
 		 * the test twice with exactly the same I/O but outputting the
@@ -1844,7 +1841,7 @@ static int mmc_test_random_perf(struct mmc_test_card *test, int write)
 		if (ret)
 			return ret;
 	}
-	sz = test->area.max_tfr;
+	sz = t->max_tfr;
 	if (write) {
 		next = rnd_next;
 		ret = mmc_test_rnd_perf(test, write, 0, sz);
@@ -1874,17 +1871,18 @@ static int mmc_test_random_write_perf(struct mmc_test_card *test)
 static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
 			     unsigned int tot_sz, int max_scatter)
 {
+	struct mmc_test_area *t = &test->area;
 	unsigned int dev_addr, i, cnt, sz, ssz;
 	struct timespec ts1, ts2;
 	int ret;
 
-	sz = test->area.max_tfr;
+	sz = t->max_tfr;
+
 	/*
 	 * In the case of a maximally scattered transfer, the maximum transfer
 	 * size is further limited by using PAGE_SIZE segments.
 	 */
 	if (max_scatter) {
-		struct mmc_test_area *t = &test->area;
 		unsigned long max_tfr;
 
 		if (t->max_seg_sz >= PAGE_SIZE)

+ 0 - 8
drivers/mmc/card/queue.c

@@ -343,18 +343,14 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
  */
 void mmc_queue_bounce_pre(struct mmc_queue *mq)
 {
-	unsigned long flags;
-
 	if (!mq->bounce_buf)
 		return;
 
 	if (rq_data_dir(mq->req) != WRITE)
 		return;
 
-	local_irq_save(flags);
 	sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
 		mq->bounce_buf, mq->sg[0].length);
-	local_irq_restore(flags);
 }
 
 /*
@@ -363,17 +359,13 @@ void mmc_queue_bounce_pre(struct mmc_queue *mq)
  */
 void mmc_queue_bounce_post(struct mmc_queue *mq)
 {
-	unsigned long flags;
-
 	if (!mq->bounce_buf)
 		return;
 
 	if (rq_data_dir(mq->req) != READ)
 		return;
 
-	local_irq_save(flags);
 	sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
 		mq->bounce_buf, mq->sg[0].length);
-	local_irq_restore(flags);
 }
 

+ 8 - 3
drivers/mmc/core/bus.c

@@ -274,8 +274,12 @@ int mmc_add_card(struct mmc_card *card)
 		break;
 	case MMC_TYPE_SD:
 		type = "SD";
-		if (mmc_card_blockaddr(card))
-			type = "SDHC";
+		if (mmc_card_blockaddr(card)) {
+			if (mmc_card_ext_capacity(card))
+				type = "SDXC";
+			else
+				type = "SDHC";
+		}
 		break;
 	case MMC_TYPE_SDIO:
 		type = "SDIO";
@@ -299,7 +303,8 @@ int mmc_add_card(struct mmc_card *card)
 	} else {
 		printk(KERN_INFO "%s: new %s%s%s card at address %04x\n",
 			mmc_hostname(card->host),
-			mmc_card_highspeed(card) ? "high speed " : "",
+			mmc_sd_card_uhs(card) ? "ultra high speed " :
+			(mmc_card_highspeed(card) ? "high speed " : ""),
 			mmc_card_ddr_mode(card) ? "DDR " : "",
 			type, card->rca);
 	}

+ 71 - 40
drivers/mmc/core/core.c

@@ -236,12 +236,10 @@ EXPORT_SYMBOL(mmc_wait_for_req);
  */
 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
 {
-	struct mmc_request mrq;
+	struct mmc_request mrq = {0};
 
 	WARN_ON(!host->claimed);
 
-	memset(&mrq, 0, sizeof(struct mmc_request));
-
 	memset(cmd->resp, 0, sizeof(cmd->resp));
 	cmd->retries = retries;
 
@@ -719,23 +717,13 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
 	mmc_set_ios(host);
 }
 
-/*
- * Change data bus width and DDR mode of a host.
- */
-void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
-			   unsigned int ddr)
-{
-	host->ios.bus_width = width;
-	host->ios.ddr = ddr;
-	mmc_set_ios(host);
-}
-
 /*
  * Change data bus width of a host.
  */
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
 {
-	mmc_set_bus_width_ddr(host, width, MMC_SDR_MODE);
+	host->ios.bus_width = width;
+	mmc_set_ios(host);
 }
 
 /**
@@ -944,6 +932,38 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
 	return ocr;
 }
 
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
+{
+	struct mmc_command cmd = {0};
+	int err = 0;
+
+	BUG_ON(!host);
+
+	/*
+	 * Send CMD11 only if the request is to switch the card to
+	 * 1.8V signalling.
+	 */
+	if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
+		cmd.opcode = SD_SWITCH_VOLTAGE;
+		cmd.arg = 0;
+		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+		err = mmc_wait_for_cmd(host, &cmd, 0);
+		if (err)
+			return err;
+
+		if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
+			return -EIO;
+	}
+
+	host->ios.signal_voltage = signal_voltage;
+
+	if (host->ops->start_signal_voltage_switch)
+		err = host->ops->start_signal_voltage_switch(host, &host->ios);
+
+	return err;
+}
+
 /*
  * Select timing parameters for host.
  */
@@ -953,6 +973,15 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
 	mmc_set_ios(host);
 }
 
+/*
+ * Select appropriate driver type for host.
+ */
+void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
+{
+	host->ios.drv_type = drv_type;
+	mmc_set_ios(host);
+}
+
 /*
  * Apply power to the MMC stack.  This is a two-stage process.
  * First, we enable power to the card without the clock running.
@@ -1187,9 +1216,8 @@ void mmc_init_erase(struct mmc_card *card)
 	}
 }
 
-static void mmc_set_mmc_erase_timeout(struct mmc_card *card,
-				      struct mmc_command *cmd,
-				      unsigned int arg, unsigned int qty)
+static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
+				          unsigned int arg, unsigned int qty)
 {
 	unsigned int erase_timeout;
 
@@ -1246,44 +1274,48 @@ static void mmc_set_mmc_erase_timeout(struct mmc_card *card,
 	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
 		erase_timeout = 1000;
 
-	cmd->erase_timeout = erase_timeout;
+	return erase_timeout;
 }
 
-static void mmc_set_sd_erase_timeout(struct mmc_card *card,
-				     struct mmc_command *cmd, unsigned int arg,
-				     unsigned int qty)
+static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
+					 unsigned int arg,
+					 unsigned int qty)
 {
+	unsigned int erase_timeout;
+
 	if (card->ssr.erase_timeout) {
 		/* Erase timeout specified in SD Status Register (SSR) */
-		cmd->erase_timeout = card->ssr.erase_timeout * qty +
-				     card->ssr.erase_offset;
+		erase_timeout = card->ssr.erase_timeout * qty +
+				card->ssr.erase_offset;
 	} else {
 		/*
 		 * Erase timeout not specified in SD Status Register (SSR) so
 		 * use 250ms per write block.
 		 */
-		cmd->erase_timeout = 250 * qty;
+		erase_timeout = 250 * qty;
 	}
 
 	/* Must not be less than 1 second */
-	if (cmd->erase_timeout < 1000)
-		cmd->erase_timeout = 1000;
+	if (erase_timeout < 1000)
+		erase_timeout = 1000;
+
+	return erase_timeout;
 }
 
-static void mmc_set_erase_timeout(struct mmc_card *card,
-				  struct mmc_command *cmd, unsigned int arg,
-				  unsigned int qty)
+static unsigned int mmc_erase_timeout(struct mmc_card *card,
+				      unsigned int arg,
+				      unsigned int qty)
 {
 	if (mmc_card_sd(card))
-		mmc_set_sd_erase_timeout(card, cmd, arg, qty);
+		return mmc_sd_erase_timeout(card, arg, qty);
 	else
-		mmc_set_mmc_erase_timeout(card, cmd, arg, qty);
+		return mmc_mmc_erase_timeout(card, arg, qty);
 }
 
 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
 			unsigned int to, unsigned int arg)
 {
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 	unsigned int qty = 0;
 	int err;
 
@@ -1317,7 +1349,6 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
 		to <<= 9;
 	}
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
 	if (mmc_card_sd(card))
 		cmd.opcode = SD_ERASE_WR_BLK_START;
 	else
@@ -1351,7 +1382,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
 	cmd.opcode = MMC_ERASE;
 	cmd.arg = arg;
 	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
-	mmc_set_erase_timeout(card, &cmd, arg, qty);
+	cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
 	if (err) {
 		printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n",
@@ -1487,12 +1518,11 @@ EXPORT_SYMBOL(mmc_erase_group_aligned);
 
 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
 {
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 
 	if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
 		return 0;
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
 	cmd.opcode = MMC_SET_BLOCKLEN;
 	cmd.arg = blocklen;
 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
@@ -1578,7 +1608,7 @@ void mmc_rescan(struct work_struct *work)
 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
 		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
 			break;
-		if (freqs[i] < host->f_min)
+		if (freqs[i] <= host->f_min)
 			break;
 	}
 	mmc_release_host(host);
@@ -1746,7 +1776,7 @@ int mmc_suspend_host(struct mmc_host *host)
 	}
 	mmc_bus_put(host);
 
-	if (!err && !(host->pm_flags & MMC_PM_KEEP_POWER))
+	if (!err && !mmc_card_keep_power(host))
 		mmc_power_off(host);
 
 	return err;
@@ -1764,7 +1794,7 @@ int mmc_resume_host(struct mmc_host *host)
 
 	mmc_bus_get(host);
 	if (host->bus_ops && !host->bus_dead) {
-		if (!(host->pm_flags & MMC_PM_KEEP_POWER)) {
+		if (!mmc_card_keep_power(host)) {
 			mmc_power_up(host);
 			mmc_select_voltage(host, host->ocr);
 			/*
@@ -1789,6 +1819,7 @@ int mmc_resume_host(struct mmc_host *host)
 			err = 0;
 		}
 	}
+	host->pm_flags &= ~MMC_PM_KEEP_POWER;
 	mmc_bus_put(host);
 
 	return err;

+ 3 - 4
drivers/mmc/core/core.h

@@ -38,10 +38,11 @@ void mmc_ungate_clock(struct mmc_host *host);
 void mmc_set_ungated(struct mmc_host *host);
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
-void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
-			   unsigned int ddr);
 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
+int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage,
+			   bool cmd11);
 void mmc_set_timing(struct mmc_host *host, unsigned int timing);
+void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
 
 static inline void mmc_delay(unsigned int ms)
 {
@@ -61,8 +62,6 @@ int mmc_attach_mmc(struct mmc_host *host);
 int mmc_attach_sd(struct mmc_host *host);
 int mmc_attach_sdio(struct mmc_host *host);
 
-void mmc_fixup_device(struct mmc_card *card);
-
 /* Module parameters */
 extern int use_spi_crc;
 

+ 2 - 2
drivers/mmc/core/host.c

@@ -325,12 +325,12 @@ int mmc_add_host(struct mmc_host *host)
 	WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
 		!host->ops->enable_sdio_irq);
 
-	led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
-
 	err = device_add(&host->class_dev);
 	if (err)
 		return err;
 
+	led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
+
 #ifdef CONFIG_DEBUG_FS
 	mmc_add_host_debugfs(host);
 #endif

+ 166 - 20
drivers/mmc/core/mmc.c

@@ -20,6 +20,7 @@
 #include "core.h"
 #include "bus.h"
 #include "mmc_ops.h"
+#include "sd_ops.h"
 
 static const unsigned int tran_exp[] = {
 	10000,		100000,		1000000,	10000000,
@@ -173,14 +174,17 @@ static int mmc_decode_csd(struct mmc_card *card)
 }
 
 /*
- * Read and decode extended CSD.
+ * Read extended CSD.
  */
-static int mmc_read_ext_csd(struct mmc_card *card)
+static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
 {
 	int err;
 	u8 *ext_csd;
 
 	BUG_ON(!card);
+	BUG_ON(!new_ext_csd);
+
+	*new_ext_csd = NULL;
 
 	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
 		return 0;
@@ -198,12 +202,15 @@ static int mmc_read_ext_csd(struct mmc_card *card)
 
 	err = mmc_send_ext_csd(card, ext_csd);
 	if (err) {
+		kfree(ext_csd);
+		*new_ext_csd = NULL;
+
 		/* If the host or the card can't do the switch,
 		 * fail more gracefully. */
 		if ((err != -EINVAL)
 		 && (err != -ENOSYS)
 		 && (err != -EFAULT))
-			goto out;
+			return err;
 
 		/*
 		 * High capacity cards should have this "magic" size
@@ -221,9 +228,23 @@ static int mmc_read_ext_csd(struct mmc_card *card)
 				mmc_hostname(card->host));
 			err = 0;
 		}
+	} else
+		*new_ext_csd = ext_csd;
 
-		goto out;
-	}
+	return err;
+}
+
+/*
+ * Decode extended CSD.
+ */
+static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
+{
+	int err = 0;
+
+	BUG_ON(!card);
+
+	if (!ext_csd)
+		return 0;
 
 	/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
 	if (card->csd.structure == 3) {
@@ -288,6 +309,10 @@ static int mmc_read_ext_csd(struct mmc_card *card)
 
 	if (card->ext_csd.rev >= 3) {
 		u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
+		card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
+
+		/* EXT_CSD value is in units of 10ms, but we store in ms */
+		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
 
 		/* Sleep / awake timeout in 100ns units */
 		if (sa_shift > 0 && sa_shift <= 0x17)
@@ -299,6 +324,14 @@ static int mmc_read_ext_csd(struct mmc_card *card)
 			ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
 		card->ext_csd.hc_erase_size =
 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
+
+		card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
+
+		/*
+		 * There are two boot regions of equal size, defined in
+		 * multiples of 128K.
+		 */
+		card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
 	}
 
 	if (card->ext_csd.rev >= 4) {
@@ -350,14 +383,78 @@ static int mmc_read_ext_csd(struct mmc_card *card)
 			ext_csd[EXT_CSD_TRIM_MULT];
 	}
 
+	if (card->ext_csd.rev >= 5)
+		card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
+
 	if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
 		card->erased_byte = 0xFF;
 	else
 		card->erased_byte = 0x0;
 
 out:
+	return err;
+}
+
+static inline void mmc_free_ext_csd(u8 *ext_csd)
+{
 	kfree(ext_csd);
+}
+
+
+static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
+			unsigned bus_width)
+{
+	u8 *bw_ext_csd;
+	int err;
+
+	err = mmc_get_ext_csd(card, &bw_ext_csd);
+	if (err)
+		return err;
+
+	if ((ext_csd == NULL || bw_ext_csd == NULL)) {
+		if (bus_width != MMC_BUS_WIDTH_1)
+			err = -EINVAL;
+		goto out;
+	}
 
+	if (bus_width == MMC_BUS_WIDTH_1)
+		goto out;
+
+	/* only compare read only fields */
+	err = (!(ext_csd[EXT_CSD_PARTITION_SUPPORT] ==
+			bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
+		(ext_csd[EXT_CSD_ERASED_MEM_CONT] ==
+			bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
+		(ext_csd[EXT_CSD_REV] ==
+			bw_ext_csd[EXT_CSD_REV]) &&
+		(ext_csd[EXT_CSD_STRUCTURE] ==
+			bw_ext_csd[EXT_CSD_STRUCTURE]) &&
+		(ext_csd[EXT_CSD_CARD_TYPE] ==
+			bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
+		(ext_csd[EXT_CSD_S_A_TIMEOUT] ==
+			bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
+		(ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
+			bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
+		(ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT] ==
+			bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
+		(ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
+			bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
+		(ext_csd[EXT_CSD_SEC_TRIM_MULT] ==
+			bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
+		(ext_csd[EXT_CSD_SEC_ERASE_MULT] ==
+			bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
+		(ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] ==
+			bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
+		(ext_csd[EXT_CSD_TRIM_MULT] ==
+			bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
+		memcmp(&ext_csd[EXT_CSD_SEC_CNT],
+		       &bw_ext_csd[EXT_CSD_SEC_CNT],
+		       4) != 0);
+	if (err)
+		err = -EINVAL;
+
+out:
+	mmc_free_ext_csd(bw_ext_csd);
 	return err;
 }
 
@@ -422,6 +519,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
 	u32 cid[4];
 	unsigned int max_dtr;
 	u32 rocr;
+	u8 *ext_csd = NULL;
 
 	BUG_ON(!host);
 	WARN_ON(!host->claimed);
@@ -520,7 +618,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
 		/*
 		 * Fetch and process extended CSD.
 		 */
-		err = mmc_read_ext_csd(card);
+
+		err = mmc_get_ext_csd(card, &ext_csd);
+		if (err)
+			goto free_card;
+		err = mmc_read_ext_csd(card, ext_csd);
 		if (err)
 			goto free_card;
 
@@ -542,7 +644,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
 	 */
 	if (card->ext_csd.enhanced_area_en) {
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-				EXT_CSD_ERASE_GROUP_DEF, 1);
+				 EXT_CSD_ERASE_GROUP_DEF, 1, 0);
 
 		if (err && err != -EBADMSG)
 			goto free_card;
@@ -567,13 +669,25 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
 		}
 	}
 
+	/*
+	 * Ensure eMMC user default partition is enabled
+	 */
+	if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
+		card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
+				 card->ext_csd.part_config,
+				 card->ext_csd.part_time);
+		if (err && err != -EBADMSG)
+			goto free_card;
+	}
+
 	/*
 	 * Activate high speed (if supported)
 	 */
 	if ((card->ext_csd.hs_max_dtr != 0) &&
 		(host->caps & MMC_CAP_MMC_HIGHSPEED)) {
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-			EXT_CSD_HS_TIMING, 1);
+				 EXT_CSD_HS_TIMING, 1, 0);
 		if (err && err != -EBADMSG)
 			goto free_card;
 
@@ -606,10 +720,14 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
 	 */
 	if (mmc_card_highspeed(card)) {
 		if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
-			&& (host->caps & (MMC_CAP_1_8V_DDR)))
+			&& ((host->caps & (MMC_CAP_1_8V_DDR |
+			     MMC_CAP_UHS_DDR50))
+				== (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)))
 				ddr = MMC_1_8V_DDR_MODE;
 		else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
-			&& (host->caps & (MMC_CAP_1_2V_DDR)))
+			&& ((host->caps & (MMC_CAP_1_2V_DDR |
+			     MMC_CAP_UHS_DDR50))
+				== (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)))
 				ddr = MMC_1_2V_DDR_MODE;
 	}
 
@@ -640,18 +758,22 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
 				ddr = 0; /* no DDR for 1-bit width */
 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 					 EXT_CSD_BUS_WIDTH,
-					 ext_csd_bits[idx][0]);
+					 ext_csd_bits[idx][0],
+					 0);
 			if (!err) {
-				mmc_set_bus_width_ddr(card->host,
-						      bus_width, MMC_SDR_MODE);
+				mmc_set_bus_width(card->host, bus_width);
+
 				/*
 				 * If controller can't handle bus width test,
-				 * use the highest bus width to maintain
-				 * compatibility with previous MMC behavior.
+				 * compare ext_csd previously read in 1 bit mode
+				 * against ext_csd at new bus width
 				 */
 				if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
-					break;
-				err = mmc_bus_test(card, bus_width);
+					err = mmc_compare_ext_csds(card,
+						ext_csd,
+						bus_width);
+				else
+					err = mmc_bus_test(card, bus_width);
 				if (!err)
 					break;
 			}
@@ -659,8 +781,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
 
 		if (!err && ddr) {
 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-					EXT_CSD_BUS_WIDTH,
-					ext_csd_bits[idx][1]);
+					 EXT_CSD_BUS_WIDTH,
+					 ext_csd_bits[idx][1],
+					 0);
 		}
 		if (err) {
 			printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
@@ -668,20 +791,43 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
 				1 << bus_width, ddr);
 			goto free_card;
 		} else if (ddr) {
+			/*
+			 * eMMC cards can support 3.3V to 1.2V i/o (vccq)
+			 * signaling.
+			 *
+			 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
+			 *
+			 * 1.8V vccq at 3.3V core voltage (vcc) is not required
+			 * in the JEDEC spec for DDR.
+			 *
+			 * Do not force change in vccq since we are obviously
+			 * working and no change to vccq is needed.
+			 *
+			 * WARNING: eMMC rules are NOT the same as SD DDR
+			 */
+			if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
+				err = mmc_set_signal_voltage(host,
+					MMC_SIGNAL_VOLTAGE_120, 0);
+				if (err)
+					goto err;
+			}
 			mmc_card_set_ddr_mode(card);
-			mmc_set_bus_width_ddr(card->host, bus_width, ddr);
+			mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50);
+			mmc_set_bus_width(card->host, bus_width);
 		}
 	}
 
 	if (!oldcard)
 		host->card = card;
 
+	mmc_free_ext_csd(ext_csd);
 	return 0;
 
 free_card:
 	if (!oldcard)
 		mmc_remove_card(card);
 err:
+	mmc_free_ext_csd(ext_csd);
 
 	return err;
 }

+ 32 - 48
drivers/mmc/core/mmc_ops.c

@@ -23,12 +23,10 @@
 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
 {
 	int err;
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 
 	BUG_ON(!host);
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = MMC_SELECT_CARD;
 
 	if (card) {
@@ -60,15 +58,13 @@ int mmc_deselect_cards(struct mmc_host *host)
 
 int mmc_card_sleepawake(struct mmc_host *host, int sleep)
 {
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 	struct mmc_card *card = host->card;
 	int err;
 
 	if (sleep)
 		mmc_deselect_cards(host);
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = MMC_SLEEP_AWAKE;
 	cmd.arg = card->rca << 16;
 	if (sleep)
@@ -97,7 +93,7 @@ int mmc_card_sleepawake(struct mmc_host *host, int sleep)
 int mmc_go_idle(struct mmc_host *host)
 {
 	int err;
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 
 	/*
 	 * Non-SPI hosts need to prevent chipselect going active during
@@ -113,8 +109,6 @@ int mmc_go_idle(struct mmc_host *host)
 		mmc_delay(1);
 	}
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = MMC_GO_IDLE_STATE;
 	cmd.arg = 0;
 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
@@ -135,13 +129,11 @@ int mmc_go_idle(struct mmc_host *host)
 
 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
 {
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 	int i, err = 0;
 
 	BUG_ON(!host);
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = MMC_SEND_OP_COND;
 	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
@@ -178,13 +170,11 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
 int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
 {
 	int err;
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 
 	BUG_ON(!host);
 	BUG_ON(!cid);
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = MMC_ALL_SEND_CID;
 	cmd.arg = 0;
 	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
@@ -201,13 +191,11 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
 int mmc_set_relative_addr(struct mmc_card *card)
 {
 	int err;
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 
 	BUG_ON(!card);
 	BUG_ON(!card->host);
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = MMC_SET_RELATIVE_ADDR;
 	cmd.arg = card->rca << 16;
 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
@@ -223,13 +211,11 @@ static int
 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
 {
 	int err;
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 
 	BUG_ON(!host);
 	BUG_ON(!cxd);
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = opcode;
 	cmd.arg = arg;
 	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
@@ -247,9 +233,9 @@ static int
 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
 		u32 opcode, void *buf, unsigned len)
 {
-	struct mmc_request mrq;
-	struct mmc_command cmd;
-	struct mmc_data data;
+	struct mmc_request mrq = {0};
+	struct mmc_command cmd = {0};
+	struct mmc_data data = {0};
 	struct scatterlist sg;
 	void *data_buf;
 
@@ -260,10 +246,6 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
 	if (data_buf == NULL)
 		return -ENOMEM;
 
-	memset(&mrq, 0, sizeof(struct mmc_request));
-	memset(&cmd, 0, sizeof(struct mmc_command));
-	memset(&data, 0, sizeof(struct mmc_data));
-
 	mrq.cmd = &cmd;
 	mrq.data = &data;
 
@@ -355,11 +337,9 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
 
 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
 {
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 	int err;
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = MMC_SPI_READ_OCR;
 	cmd.arg = highcap ? (1 << 30) : 0;
 	cmd.flags = MMC_RSP_SPI_R3;
@@ -372,11 +352,9 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
 
 int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
 {
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 	int err;
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = MMC_SPI_CRC_ON_OFF;
 	cmd.flags = MMC_RSP_SPI_R1;
 	cmd.arg = use_crc;
@@ -387,23 +365,34 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
 	return err;
 }
 
-int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value)
+/**
+ *	mmc_switch - modify EXT_CSD register
+ *	@card: the MMC card associated with the data transfer
+ *	@set: cmd set values
+ *	@index: EXT_CSD register index
+ *	@value: value to program into EXT_CSD register
+ *	@timeout_ms: timeout (ms) for operation performed by register write,
+ *                   timeout of zero implies maximum possible timeout
+ *
+ *	Modifies the EXT_CSD register for selected card.
+ */
+int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
+	       unsigned int timeout_ms)
 {
 	int err;
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 	u32 status;
 
 	BUG_ON(!card);
 	BUG_ON(!card->host);
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = MMC_SWITCH;
 	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
 		  (index << 16) |
 		  (value << 8) |
 		  set;
 	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+	cmd.cmd_timeout_ms = timeout_ms;
 
 	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 	if (err)
@@ -433,17 +422,16 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value)
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(mmc_switch);
 
 int mmc_send_status(struct mmc_card *card, u32 *status)
 {
 	int err;
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 
 	BUG_ON(!card);
 	BUG_ON(!card->host);
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = MMC_SEND_STATUS;
 	if (!mmc_host_is_spi(card->host))
 		cmd.arg = card->rca << 16;
@@ -466,9 +454,9 @@ static int
 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
 		  u8 len)
 {
-	struct mmc_request mrq;
-	struct mmc_command cmd;
-	struct mmc_data data;
+	struct mmc_request mrq = {0};
+	struct mmc_command cmd = {0};
+	struct mmc_data data = {0};
 	struct scatterlist sg;
 	u8 *data_buf;
 	u8 *test_buf;
@@ -497,10 +485,6 @@ mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
 	if (opcode == MMC_BUS_TEST_W)
 		memcpy(data_buf, test_buf, len);
 
-	memset(&mrq, 0, sizeof(struct mmc_request));
-	memset(&cmd, 0, sizeof(struct mmc_command));
-	memset(&data, 0, sizeof(struct mmc_data));
-
 	mrq.cmd = &cmd;
 	mrq.data = &data;
 	cmd.opcode = opcode;

+ 0 - 1
drivers/mmc/core/mmc_ops.h

@@ -20,7 +20,6 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid);
 int mmc_set_relative_addr(struct mmc_card *card);
 int mmc_send_csd(struct mmc_card *card, u32 *csd);
 int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
-int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value);
 int mmc_send_status(struct mmc_card *card, u32 *status);
 int mmc_send_cid(struct mmc_host *host, u32 *cid);
 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);

+ 42 - 47
drivers/mmc/core/quirks.c

@@ -1,7 +1,8 @@
 /*
- *  This file contains work-arounds for many known sdio hardware
- *  bugs.
+ *  This file contains work-arounds for many known SD/MMC
+ *  and SDIO hardware bugs.
  *
+ *  Copyright (c) 2011 Andrei Warkentin <andreiw@motorola.com>
  *  Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com>
  *  Inspired from pci fixup code:
  *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
@@ -11,34 +12,14 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/mmc/card.h>
-#include <linux/mod_devicetable.h>
 
-/*
- *  The world is not perfect and supplies us with broken mmc/sdio devices.
- *  For at least a part of these bugs we need a work-around
- */
-
-struct mmc_fixup {
-	u16 vendor, device;	/* You can use SDIO_ANY_ID here of course */
-	void (*vendor_fixup)(struct mmc_card *card, int data);
-	int data;
-};
-
-/*
- * This hook just adds a quirk unconditionnally
- */
-static void __maybe_unused add_quirk(struct mmc_card *card, int data)
-{
-	card->quirks |= data;
-}
+#ifndef SDIO_VENDOR_ID_TI
+#define SDIO_VENDOR_ID_TI		0x0097
+#endif
 
-/*
- * This hook just removes a quirk unconditionnally
- */
-static void __maybe_unused remove_quirk(struct mmc_card *card, int data)
-{
-	card->quirks &= ~data;
-}
+#ifndef SDIO_DEVICE_ID_TI_WL1271
+#define SDIO_DEVICE_ID_TI_WL1271	0x4076
+#endif
 
 /*
  * This hook just adds a quirk for all sdio devices
@@ -49,33 +30,47 @@ static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
 		card->quirks |= data;
 }
 
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI		0x0097
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1271
-#define SDIO_DEVICE_ID_TI_WL1271	0x4076
-#endif
-
 static const struct mmc_fixup mmc_fixup_methods[] = {
 	/* by default sdio devices are considered CLK_GATING broken */
 	/* good cards will be whitelisted as they are tested */
-	{ SDIO_ANY_ID, SDIO_ANY_ID,
-		add_quirk_for_sdio_devices, MMC_QUIRK_BROKEN_CLK_GATING },
-	{ SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
-		remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING },
-	{ 0 }
+	SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID,
+		   add_quirk_for_sdio_devices,
+		   MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+		   add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+		   add_quirk, MMC_QUIRK_DISABLE_CD),
+
+	END_FIXUP
 };
 
-void mmc_fixup_device(struct mmc_card *card)
+void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table)
 {
 	const struct mmc_fixup *f;
+	u64 rev = cid_rev_card(card);
+
+	/* Non-core specific workarounds. */
+	if (!table)
+		table = mmc_fixup_methods;
 
-	for (f = mmc_fixup_methods; f->vendor_fixup; f++) {
-		if ((f->vendor == card->cis.vendor
-		     || f->vendor == (u16) SDIO_ANY_ID) &&
-		    (f->device == card->cis.device
-		     || f->device == (u16) SDIO_ANY_ID)) {
+	for (f = table; f->vendor_fixup; f++) {
+		if ((f->manfid == CID_MANFID_ANY ||
+		     f->manfid == card->cid.manfid) &&
+		    (f->oemid == CID_OEMID_ANY ||
+		     f->oemid == card->cid.oemid) &&
+		    (f->name == CID_NAME_ANY ||
+		     !strncmp(f->name, card->cid.prod_name,
+			      sizeof(card->cid.prod_name))) &&
+		    (f->cis_vendor == card->cis.vendor ||
+		     f->cis_vendor == (u16) SDIO_ANY_ID) &&
+		    (f->cis_device == card->cis.device ||
+		     f->cis_device == (u16) SDIO_ANY_ID) &&
+		    rev >= f->rev_start && rev <= f->rev_end) {
 			dev_dbg(&card->dev, "calling %pF\n", f->vendor_fixup);
 			f->vendor_fixup(card, f->data);
 		}

+ 370 - 35
drivers/mmc/core/sd.c

@@ -130,7 +130,7 @@ static int mmc_decode_csd(struct mmc_card *card)
 		break;
 	case 1:
 		/*
-		 * This is a block-addressed SDHC card. Most
+		 * This is a block-addressed SDHC or SDXC card. Most
 		 * interesting fields are unused and have fixed
 		 * values. To avoid getting tripped by buggy cards,
 		 * we assume those fixed values ourselves.
@@ -144,6 +144,11 @@ static int mmc_decode_csd(struct mmc_card *card)
 		e = UNSTUFF_BITS(resp, 96, 3);
 		csd->max_dtr	  = tran_exp[e] * tran_mant[m];
 		csd->cmdclass	  = UNSTUFF_BITS(resp, 84, 12);
+		csd->c_size	  = UNSTUFF_BITS(resp, 48, 22);
+
+		/* SDXC cards have a minimum C_SIZE of 0x00FFFF */
+		if (csd->c_size >= 0xFFFF)
+			mmc_card_set_ext_capacity(card);
 
 		m = UNSTUFF_BITS(resp, 48, 22);
 		csd->capacity     = (1 + m) << 10;
@@ -189,12 +194,17 @@ static int mmc_decode_scr(struct mmc_card *card)
 
 	scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4);
 	scr->bus_widths = UNSTUFF_BITS(resp, 48, 4);
+	if (scr->sda_vsn == SCR_SPEC_VER_2)
+		/* Check if Physical Layer Spec v3.0 is supported */
+		scr->sda_spec3 = UNSTUFF_BITS(resp, 47, 1);
 
 	if (UNSTUFF_BITS(resp, 55, 1))
 		card->erased_byte = 0xFF;
 	else
 		card->erased_byte = 0x0;
 
+	if (scr->sda_spec3)
+		scr->cmds = UNSTUFF_BITS(resp, 32, 2);
 	return 0;
 }
 
@@ -274,29 +284,74 @@ static int mmc_read_switch(struct mmc_card *card)
 	status = kmalloc(64, GFP_KERNEL);
 	if (!status) {
 		printk(KERN_ERR "%s: could not allocate a buffer for "
-			"switch capabilities.\n", mmc_hostname(card->host));
+			"switch capabilities.\n",
+			mmc_hostname(card->host));
 		return -ENOMEM;
 	}
 
+	/* Find out the supported Bus Speed Modes. */
 	err = mmc_sd_switch(card, 0, 0, 1, status);
 	if (err) {
-		/* If the host or the card can't do the switch,
-		 * fail more gracefully. */
-		if ((err != -EINVAL)
-		 && (err != -ENOSYS)
-		 && (err != -EFAULT))
+		/*
+		 * If the host or the card can't do the switch,
+		 * fail more gracefully.
+		 */
+		if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
 			goto out;
 
-		printk(KERN_WARNING "%s: problem reading switch "
-			"capabilities, performance might suffer.\n",
+		printk(KERN_WARNING "%s: problem reading Bus Speed modes.\n",
 			mmc_hostname(card->host));
 		err = 0;
 
 		goto out;
 	}
 
-	if (status[13] & 0x02)
-		card->sw_caps.hs_max_dtr = 50000000;
+	if (card->scr.sda_spec3) {
+		card->sw_caps.sd3_bus_mode = status[13];
+
+		/* Find out Driver Strengths supported by the card */
+		err = mmc_sd_switch(card, 0, 2, 1, status);
+		if (err) {
+			/*
+			 * If the host or the card can't do the switch,
+			 * fail more gracefully.
+			 */
+			if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
+				goto out;
+
+			printk(KERN_WARNING "%s: problem reading "
+				"Driver Strength.\n",
+				mmc_hostname(card->host));
+			err = 0;
+
+			goto out;
+		}
+
+		card->sw_caps.sd3_drv_type = status[9];
+
+		/* Find out Current Limits supported by the card */
+		err = mmc_sd_switch(card, 0, 3, 1, status);
+		if (err) {
+			/*
+			 * If the host or the card can't do the switch,
+			 * fail more gracefully.
+			 */
+			if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
+				goto out;
+
+			printk(KERN_WARNING "%s: problem reading "
+				"Current Limit.\n",
+				mmc_hostname(card->host));
+			err = 0;
+
+			goto out;
+		}
+
+		card->sw_caps.sd3_curr_limit = status[7];
+	} else {
+		if (status[13] & 0x02)
+			card->sw_caps.hs_max_dtr = 50000000;
+	}
 
 out:
 	kfree(status);
@@ -352,6 +407,232 @@ out:
 	return err;
 }
 
+static int sd_select_driver_type(struct mmc_card *card, u8 *status)
+{
+	int host_drv_type = 0, card_drv_type = 0;
+	int err;
+
+	/*
+	 * If the host doesn't support any of the Driver Types A,C or D,
+	 * default Driver Type B is used.
+	 */
+	if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C
+	    | MMC_CAP_DRIVER_TYPE_D)))
+		return 0;
+
+	if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) {
+		host_drv_type = MMC_SET_DRIVER_TYPE_A;
+		if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
+			card_drv_type = MMC_SET_DRIVER_TYPE_A;
+		else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B)
+			card_drv_type = MMC_SET_DRIVER_TYPE_B;
+		else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
+			card_drv_type = MMC_SET_DRIVER_TYPE_C;
+	} else if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) {
+		host_drv_type = MMC_SET_DRIVER_TYPE_C;
+		if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
+			card_drv_type = MMC_SET_DRIVER_TYPE_C;
+	} else if (!(card->host->caps & MMC_CAP_DRIVER_TYPE_D)) {
+		/*
+		 * If we are here, that means only the default driver type
+		 * B is supported by the host.
+		 */
+		host_drv_type = MMC_SET_DRIVER_TYPE_B;
+		if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B)
+			card_drv_type = MMC_SET_DRIVER_TYPE_B;
+		else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
+			card_drv_type = MMC_SET_DRIVER_TYPE_C;
+	}
+
+	err = mmc_sd_switch(card, 1, 2, card_drv_type, status);
+	if (err)
+		return err;
+
+	if ((status[15] & 0xF) != card_drv_type) {
+		printk(KERN_WARNING "%s: Problem setting driver strength!\n",
+			mmc_hostname(card->host));
+		return 0;
+	}
+
+	mmc_set_driver_type(card->host, host_drv_type);
+
+	return 0;
+}
+
+static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
+{
+	unsigned int bus_speed = 0, timing = 0;
+	int err;
+
+	/*
+	 * If the host doesn't support any of the UHS-I modes, fallback on
+	 * default speed.
+	 */
+	if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+	    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
+		return 0;
+
+	if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
+	    (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
+			bus_speed = UHS_SDR104_BUS_SPEED;
+			timing = MMC_TIMING_UHS_SDR104;
+			card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+	} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
+		   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
+			bus_speed = UHS_DDR50_BUS_SPEED;
+			timing = MMC_TIMING_UHS_DDR50;
+			card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+		    MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
+		    SD_MODE_UHS_SDR50)) {
+			bus_speed = UHS_SDR50_BUS_SPEED;
+			timing = MMC_TIMING_UHS_SDR50;
+			card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+		    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
+		   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
+			bus_speed = UHS_SDR25_BUS_SPEED;
+			timing = MMC_TIMING_UHS_SDR25;
+			card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+		    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
+		    MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
+		    SD_MODE_UHS_SDR12)) {
+			bus_speed = UHS_SDR12_BUS_SPEED;
+			timing = MMC_TIMING_UHS_SDR12;
+			card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+	}
+
+	card->sd_bus_speed = bus_speed;
+	err = mmc_sd_switch(card, 1, 0, bus_speed, status);
+	if (err)
+		return err;
+
+	if ((status[16] & 0xF) != bus_speed)
+		printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
+			mmc_hostname(card->host));
+	else {
+		mmc_set_timing(card->host, timing);
+		mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
+	}
+
+	return 0;
+}
+
+static int sd_set_current_limit(struct mmc_card *card, u8 *status)
+{
+	int current_limit = 0;
+	int err;
+
+	/*
+	 * Current limit switch is only defined for SDR50, SDR104, and DDR50
+	 * bus speed modes. For other bus speed modes, we set the default
+	 * current limit of 200mA.
+	 */
+	if ((card->sd_bus_speed == UHS_SDR50_BUS_SPEED) ||
+	    (card->sd_bus_speed == UHS_SDR104_BUS_SPEED) ||
+	    (card->sd_bus_speed == UHS_DDR50_BUS_SPEED)) {
+		if (card->host->caps & MMC_CAP_MAX_CURRENT_800) {
+			if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_800)
+				current_limit = SD_SET_CURRENT_LIMIT_800;
+			else if (card->sw_caps.sd3_curr_limit &
+					SD_MAX_CURRENT_600)
+				current_limit = SD_SET_CURRENT_LIMIT_600;
+			else if (card->sw_caps.sd3_curr_limit &
+					SD_MAX_CURRENT_400)
+				current_limit = SD_SET_CURRENT_LIMIT_400;
+			else if (card->sw_caps.sd3_curr_limit &
+					SD_MAX_CURRENT_200)
+				current_limit = SD_SET_CURRENT_LIMIT_200;
+		} else if (card->host->caps & MMC_CAP_MAX_CURRENT_600) {
+			if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_600)
+				current_limit = SD_SET_CURRENT_LIMIT_600;
+			else if (card->sw_caps.sd3_curr_limit &
+					SD_MAX_CURRENT_400)
+				current_limit = SD_SET_CURRENT_LIMIT_400;
+			else if (card->sw_caps.sd3_curr_limit &
+					SD_MAX_CURRENT_200)
+				current_limit = SD_SET_CURRENT_LIMIT_200;
+		} else if (card->host->caps & MMC_CAP_MAX_CURRENT_400) {
+			if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_400)
+				current_limit = SD_SET_CURRENT_LIMIT_400;
+			else if (card->sw_caps.sd3_curr_limit &
+					SD_MAX_CURRENT_200)
+				current_limit = SD_SET_CURRENT_LIMIT_200;
+		} else if (card->host->caps & MMC_CAP_MAX_CURRENT_200) {
+			if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_200)
+				current_limit = SD_SET_CURRENT_LIMIT_200;
+		}
+	} else
+		current_limit = SD_SET_CURRENT_LIMIT_200;
+
+	err = mmc_sd_switch(card, 1, 3, current_limit, status);
+	if (err)
+		return err;
+
+	if (((status[15] >> 4) & 0x0F) != current_limit)
+		printk(KERN_WARNING "%s: Problem setting current limit!\n",
+			mmc_hostname(card->host));
+
+	return 0;
+}
+
+/*
+ * UHS-I specific initialization procedure
+ */
+static int mmc_sd_init_uhs_card(struct mmc_card *card)
+{
+	int err;
+	u8 *status;
+
+	if (!card->scr.sda_spec3)
+		return 0;
+
+	if (!(card->csd.cmdclass & CCC_SWITCH))
+		return 0;
+
+	status = kmalloc(64, GFP_KERNEL);
+	if (!status) {
+		printk(KERN_ERR "%s: could not allocate a buffer for "
+			"switch capabilities.\n", mmc_hostname(card->host));
+		return -ENOMEM;
+	}
+
+	/* Set 4-bit bus width */
+	if ((card->host->caps & MMC_CAP_4_BIT_DATA) &&
+	    (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
+		err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
+		if (err)
+			goto out;
+
+		mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+	}
+
+	/* Set the driver strength for the card */
+	err = sd_select_driver_type(card, status);
+	if (err)
+		goto out;
+
+	/* Set bus speed mode of the card */
+	err = sd_set_bus_speed_mode(card, status);
+	if (err)
+		goto out;
+
+	/* Set current limit for the card */
+	err = sd_set_current_limit(card, status);
+	if (err)
+		goto out;
+
+	/* SPI mode doesn't define CMD19 */
+	if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning)
+		err = card->host->ops->execute_tuning(card->host);
+
+out:
+	kfree(status);
+
+	return err;
+}
+
 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
 	card->raw_cid[2], card->raw_cid[3]);
 MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
@@ -400,7 +681,7 @@ struct device_type sd_type = {
 /*
  * Fetch CID from card.
  */
-int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid)
+int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
 {
 	int err;
 
@@ -420,12 +701,39 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid)
 	 */
 	err = mmc_send_if_cond(host, ocr);
 	if (!err)
-		ocr |= 1 << 30;
+		ocr |= SD_OCR_CCS;
+
+	/*
+	 * If the host supports one of UHS-I modes, request the card
+	 * to switch to 1.8V signaling level.
+	 */
+	if (host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+	    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))
+		ocr |= SD_OCR_S18R;
+
+	/* If the host can supply more than 150mA, XPC should be set to 1. */
+	if (host->caps & (MMC_CAP_SET_XPC_330 | MMC_CAP_SET_XPC_300 |
+	    MMC_CAP_SET_XPC_180))
+		ocr |= SD_OCR_XPC;
 
-	err = mmc_send_app_op_cond(host, ocr, NULL);
+try_again:
+	err = mmc_send_app_op_cond(host, ocr, rocr);
 	if (err)
 		return err;
 
+	/*
+	 * In case CCS and S18A in the response is set, start Signal Voltage
+	 * Switch procedure. SPI mode doesn't support CMD11.
+	 */
+	if (!mmc_host_is_spi(host) && rocr &&
+	   ((*rocr & 0x41000000) == 0x41000000)) {
+		err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, true);
+		if (err) {
+			ocr &= ~SD_OCR_S18R;
+			goto try_again;
+		}
+	}
+
 	if (mmc_host_is_spi(host))
 		err = mmc_send_cid(host, cid);
 	else
@@ -553,11 +861,12 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
 	struct mmc_card *card;
 	int err;
 	u32 cid[4];
+	u32 rocr = 0;
 
 	BUG_ON(!host);
 	WARN_ON(!host->claimed);
 
-	err = mmc_sd_get_cid(host, ocr, cid);
+	err = mmc_sd_get_cid(host, ocr, cid, &rocr);
 	if (err)
 		return err;
 
@@ -610,30 +919,47 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
 	if (err)
 		goto free_card;
 
-	/*
-	 * Attempt to change to high-speed (if supported)
-	 */
-	err = mmc_sd_switch_hs(card);
-	if (err > 0)
-		mmc_sd_go_highspeed(card);
-	else if (err)
-		goto free_card;
+	/* Initialization sequence for UHS-I cards */
+	if (rocr & SD_ROCR_S18A) {
+		err = mmc_sd_init_uhs_card(card);
+		if (err)
+			goto free_card;
 
-	/*
-	 * Set bus speed.
-	 */
-	mmc_set_clock(host, mmc_sd_get_max_clock(card));
+		/* Card is an ultra-high-speed card */
+		mmc_sd_card_set_uhs(card);
 
-	/*
-	 * Switch to wider bus (if supported).
-	 */
-	if ((host->caps & MMC_CAP_4_BIT_DATA) &&
-		(card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
-		err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
-		if (err)
+		/*
+		 * Since initialization is now complete, enable preset
+		 * value registers for UHS-I cards.
+		 */
+		if (host->ops->enable_preset_value)
+			host->ops->enable_preset_value(host, true);
+	} else {
+		/*
+		 * Attempt to change to high-speed (if supported)
+		 */
+		err = mmc_sd_switch_hs(card);
+		if (err > 0)
+			mmc_sd_go_highspeed(card);
+		else if (err)
 			goto free_card;
 
-		mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+		/*
+		 * Set bus speed.
+		 */
+		mmc_set_clock(host, mmc_sd_get_max_clock(card));
+
+		/*
+		 * Switch to wider bus (if supported).
+		 */
+		if ((host->caps & MMC_CAP_4_BIT_DATA) &&
+			(card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
+			err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
+			if (err)
+				goto free_card;
+
+			mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+		}
 	}
 
 	host->card = card;
@@ -773,6 +1099,15 @@ int mmc_attach_sd(struct mmc_host *host)
 	BUG_ON(!host);
 	WARN_ON(!host->claimed);
 
+	/* Make sure we are at 3.3V signalling voltage */
+	err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false);
+	if (err)
+		return err;
+
+	/* Disable preset value enable if already set since last time */
+	if (host->ops->enable_preset_value)
+		host->ops->enable_preset_value(host, false);
+
 	err = mmc_send_app_op_cond(host, 0, &ocr);
 	if (err)
 		return err;

+ 1 - 1
drivers/mmc/core/sd.h

@@ -5,7 +5,7 @@
 
 extern struct device_type sd_type;
 
-int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid);
+int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr);
 int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card);
 void mmc_decode_cid(struct mmc_card *card);
 int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,

+ 17 - 34
drivers/mmc/core/sd_ops.c

@@ -21,10 +21,10 @@
 #include "core.h"
 #include "sd_ops.h"
 
-static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
+int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
 {
 	int err;
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 
 	BUG_ON(!host);
 	BUG_ON(card && (card->host != host));
@@ -49,6 +49,7 @@ static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(mmc_app_cmd);
 
 /**
  *	mmc_wait_for_app_cmd - start an application command and wait for
@@ -66,7 +67,7 @@ static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
 int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
 	struct mmc_command *cmd, int retries)
 {
-	struct mmc_request mrq;
+	struct mmc_request mrq = {0};
 
 	int i, err;
 
@@ -119,13 +120,11 @@ EXPORT_SYMBOL(mmc_wait_for_app_cmd);
 int mmc_app_set_bus_width(struct mmc_card *card, int width)
 {
 	int err;
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 
 	BUG_ON(!card);
 	BUG_ON(!card->host);
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = SD_APP_SET_BUS_WIDTH;
 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 
@@ -149,13 +148,11 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width)
 
 int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
 {
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 	int i, err = 0;
 
 	BUG_ON(!host);
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = SD_APP_OP_COND;
 	if (mmc_host_is_spi(host))
 		cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */
@@ -194,7 +191,7 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
 
 int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
 {
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 	int err;
 	static const u8 test_pattern = 0xAA;
 	u8 result_pattern;
@@ -226,13 +223,11 @@ int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
 int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
 {
 	int err;
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 
 	BUG_ON(!host);
 	BUG_ON(!rca);
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = SD_SEND_RELATIVE_ADDR;
 	cmd.arg = 0;
 	cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR;
@@ -249,9 +244,9 @@ int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
 int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
 {
 	int err;
-	struct mmc_request mrq;
-	struct mmc_command cmd;
-	struct mmc_data data;
+	struct mmc_request mrq = {0};
+	struct mmc_command cmd = {0};
+	struct mmc_data data = {0};
 	struct scatterlist sg;
 	void *data_buf;
 
@@ -272,10 +267,6 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
 	if (data_buf == NULL)
 		return -ENOMEM;
 
-	memset(&mrq, 0, sizeof(struct mmc_request));
-	memset(&cmd, 0, sizeof(struct mmc_command));
-	memset(&data, 0, sizeof(struct mmc_data));
-
 	mrq.cmd = &cmd;
 	mrq.data = &data;
 
@@ -312,9 +303,9 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
 int mmc_sd_switch(struct mmc_card *card, int mode, int group,
 	u8 value, u8 *resp)
 {
-	struct mmc_request mrq;
-	struct mmc_command cmd;
-	struct mmc_data data;
+	struct mmc_request mrq = {0};
+	struct mmc_command cmd = {0};
+	struct mmc_data data = {0};
 	struct scatterlist sg;
 
 	BUG_ON(!card);
@@ -325,10 +316,6 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
 	mode = !!mode;
 	value &= 0xF;
 
-	memset(&mrq, 0, sizeof(struct mmc_request));
-	memset(&cmd, 0, sizeof(struct mmc_command));
-	memset(&data, 0, sizeof(struct mmc_data));
-
 	mrq.cmd = &cmd;
 	mrq.data = &data;
 
@@ -361,9 +348,9 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
 int mmc_app_sd_status(struct mmc_card *card, void *ssr)
 {
 	int err;
-	struct mmc_request mrq;
-	struct mmc_command cmd;
-	struct mmc_data data;
+	struct mmc_request mrq = {0};
+	struct mmc_command cmd = {0};
+	struct mmc_data data = {0};
 	struct scatterlist sg;
 
 	BUG_ON(!card);
@@ -376,10 +363,6 @@ int mmc_app_sd_status(struct mmc_card *card, void *ssr)
 	if (err)
 		return err;
 
-	memset(&mrq, 0, sizeof(struct mmc_request));
-	memset(&cmd, 0, sizeof(struct mmc_command));
-	memset(&data, 0, sizeof(struct mmc_data));
-
 	mrq.cmd = &cmd;
 	mrq.data = &data;
 

+ 15 - 9
drivers/mmc/core/sdio.c

@@ -16,6 +16,7 @@
 #include <linux/mmc/card.h>
 #include <linux/mmc/sdio.h>
 #include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
 
 #include "core.h"
 #include "bus.h"
@@ -31,6 +32,11 @@ static int sdio_read_fbr(struct sdio_func *func)
 	int ret;
 	unsigned char data;
 
+	if (mmc_card_nonstd_func_interface(func->card)) {
+		func->class = SDIO_CLASS_NONE;
+		return 0;
+	}
+
 	ret = mmc_io_rw_direct(func->card, 0, 0,
 		SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF, 0, &data);
 	if (ret)
@@ -181,7 +187,7 @@ static int sdio_disable_cd(struct mmc_card *card)
 	int ret;
 	u8 ctrl;
 
-	if (!card->cccr.disable_cd)
+	if (!mmc_card_disable_cd(card))
 		return 0;
 
 	ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
@@ -363,8 +369,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
 		goto err;
 	}
 
-	if (ocr & R4_MEMORY_PRESENT
-	    && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) {
+	if ((ocr & R4_MEMORY_PRESENT) &&
+	    mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid, NULL) == 0) {
 		card->type = MMC_TYPE_SD_COMBO;
 
 		if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
@@ -466,7 +472,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
 
 		card = oldcard;
 	}
-	mmc_fixup_device(card);
+	mmc_fixup_device(card, NULL);
 
 	if (card->type == MMC_TYPE_SD_COMBO) {
 		err = mmc_sd_setup_card(host, card, oldcard != NULL);
@@ -625,7 +631,7 @@ static int mmc_sdio_suspend(struct mmc_host *host)
 		}
 	}
 
-	if (!err && host->pm_flags & MMC_PM_KEEP_POWER) {
+	if (!err && mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
 		mmc_claim_host(host);
 		sdio_disable_wide(host->card);
 		mmc_release_host(host);
@@ -645,10 +651,10 @@ static int mmc_sdio_resume(struct mmc_host *host)
 	mmc_claim_host(host);
 
 	/* No need to reinitialize powered-resumed nonremovable cards */
-	if (mmc_card_is_removable(host) || !mmc_card_is_powered_resumed(host))
+	if (mmc_card_is_removable(host) || !mmc_card_keep_power(host))
 		err = mmc_sdio_init_card(host, host->ocr, host->card,
-				 (host->pm_flags & MMC_PM_KEEP_POWER));
-	else if (mmc_card_is_powered_resumed(host)) {
+					mmc_card_keep_power(host));
+	else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
 		/* We may have switched to 1-bit mode during suspend */
 		err = sdio_enable_4bit_bus(host->card);
 		if (err > 0) {
@@ -691,7 +697,7 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
 
 	mmc_claim_host(host);
 	ret = mmc_sdio_init_card(host, host->ocr, host->card,
-			(host->pm_flags & MMC_PM_KEEP_POWER));
+				mmc_card_keep_power(host));
 	if (!ret && host->sdio_irqs)
 		mmc_signal_sdio_irq(host);
 	mmc_release_host(host);

+ 32 - 1
drivers/mmc/core/sdio_irq.c

@@ -31,6 +31,17 @@ static int process_sdio_pending_irqs(struct mmc_card *card)
 {
 	int i, ret, count;
 	unsigned char pending;
+	struct sdio_func *func;
+
+	/*
+	 * Optimization, if there is only 1 function interrupt registered
+	 * call irq handler directly
+	 */
+	func = card->sdio_single_irq;
+	if (func) {
+		func->irq_handler(func);
+		return 1;
+	}
 
 	ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending);
 	if (ret) {
@@ -42,7 +53,7 @@ static int process_sdio_pending_irqs(struct mmc_card *card)
 	count = 0;
 	for (i = 1; i <= 7; i++) {
 		if (pending & (1 << i)) {
-			struct sdio_func *func = card->sdio_func[i - 1];
+			func = card->sdio_func[i - 1];
 			if (!func) {
 				printk(KERN_WARNING "%s: pending IRQ for "
 					"non-existent function\n",
@@ -186,6 +197,24 @@ static int sdio_card_irq_put(struct mmc_card *card)
 	return 0;
 }
 
+/* If there is only 1 function registered set sdio_single_irq */
+static void sdio_single_irq_set(struct mmc_card *card)
+{
+	struct sdio_func *func;
+	int i;
+
+	card->sdio_single_irq = NULL;
+	if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
+	    card->host->sdio_irqs == 1)
+		for (i = 0; i < card->sdio_funcs; i++) {
+		       func = card->sdio_func[i];
+		       if (func && func->irq_handler) {
+			       card->sdio_single_irq = func;
+			       break;
+		       }
+	       }
+}
+
 /**
  *	sdio_claim_irq - claim the IRQ for a SDIO function
  *	@func: SDIO function
@@ -227,6 +256,7 @@ int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
 	ret = sdio_card_irq_get(func->card);
 	if (ret)
 		func->irq_handler = NULL;
+	sdio_single_irq_set(func->card);
 
 	return ret;
 }
@@ -251,6 +281,7 @@ int sdio_release_irq(struct sdio_func *func)
 	if (func->irq_handler) {
 		func->irq_handler = NULL;
 		sdio_card_irq_put(func->card);
+		sdio_single_irq_set(func->card);
 	}
 
 	ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);

+ 5 - 13
drivers/mmc/core/sdio_ops.c

@@ -21,13 +21,11 @@
 
 int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
 {
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 	int i, err = 0;
 
 	BUG_ON(!host);
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = SD_IO_SEND_OP_COND;
 	cmd.arg = ocr;
 	cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR;
@@ -70,7 +68,7 @@ int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
 static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
 	unsigned addr, u8 in, u8 *out)
 {
-	struct mmc_command cmd;
+	struct mmc_command cmd = {0};
 	int err;
 
 	BUG_ON(!host);
@@ -80,8 +78,6 @@ static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
 	if (addr & ~0x1FFFF)
 		return -EINVAL;
 
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
 	cmd.opcode = SD_IO_RW_DIRECT;
 	cmd.arg = write ? 0x80000000 : 0x00000000;
 	cmd.arg |= fn << 28;
@@ -125,9 +121,9 @@ int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
 int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
 	unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz)
 {
-	struct mmc_request mrq;
-	struct mmc_command cmd;
-	struct mmc_data data;
+	struct mmc_request mrq = {0};
+	struct mmc_command cmd = {0};
+	struct mmc_data data = {0};
 	struct scatterlist sg;
 
 	BUG_ON(!card);
@@ -140,10 +136,6 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
 	if (addr & ~0x1FFFF)
 		return -EINVAL;
 
-	memset(&mrq, 0, sizeof(struct mmc_request));
-	memset(&cmd, 0, sizeof(struct mmc_command));
-	memset(&data, 0, sizeof(struct mmc_data));
-
 	mrq.cmd = &cmd;
 	mrq.data = &data;
 

+ 32 - 1
drivers/mmc/host/Kconfig

@@ -154,7 +154,7 @@ config MMC_SDHCI_DOVE
 	  If unsure, say N.
 
 config MMC_SDHCI_TEGRA
-	tristate "SDHCI platform support for the Tegra SD/MMC Controller"
+	bool "SDHCI platform support for the Tegra SD/MMC Controller"
 	depends on MMC_SDHCI_PLTFM && ARCH_TEGRA
 	select MMC_SDHCI_IO_ACCESSORS
 	help
@@ -535,6 +535,37 @@ config MMC_JZ4740
 	  If you have a board based on such a SoC and with a SD/MMC slot,
 	  say Y or M here.
 
+config MMC_VUB300
+	tristate "VUB300 USB to SDIO/SD/MMC Host Controller support"
+	depends on USB
+	help
+	  This selects support for Elan Digital Systems' VUB300 chip.
+
+	  The VUB300 is a USB-SDIO Host Controller Interface chip
+	  that enables the host computer to use SDIO/SD/MMC cards
+	  via a USB 2.0 or USB 1.1 host.
+
+	  The VUB300 chip will be found in both physically separate
+	  USB to SDIO/SD/MMC adapters and embedded on some motherboards.
+
+	  The VUB300 chip supports SD and MMC memory cards in addition
+	  to single and multifunction SDIO cards.
+
+	  Some SDIO cards will need a firmware file to be loaded and
+	  sent to VUB300 chip in order to achieve better data throughput.
+	  Download these "Offload Pseudocode" from Elan Digital Systems'
+	  web-site http://www.elandigitalsystems.com/support/downloads.php
+	  and put them in /lib/firmware. Note that without these additional
+	  firmware files the VUB300 chip will still function, but not at
+	  the best obtainable data rate.
+
+	  To compile this mmc host controller driver as a module,
+	  choose M here: the module will be called vub300.
+
+	  If you have a computer with an embedded VUB300 chip
+	  or if you intend connecting a USB adapter based on a
+	  VUB300 chip say Y or M here.
+
 config MMC_USHC
 	tristate "USB SD Host Controller (USHC) support"
 	depends on USB

+ 1 - 0
drivers/mmc/host/Makefile

@@ -41,6 +41,7 @@ obj-$(CONFIG_SDH_BFIN)		+= bfin_sdh.o
 obj-$(CONFIG_MMC_DW)		+= dw_mmc.o
 obj-$(CONFIG_MMC_SH_MMCIF)	+= sh_mmcif.o
 obj-$(CONFIG_MMC_JZ4740)	+= jz4740_mmc.o
+obj-$(CONFIG_MMC_VUB300)	+= vub300.o
 obj-$(CONFIG_MMC_USHC)		+= ushc.o
 
 obj-$(CONFIG_MMC_SDHCI_PLTFM)			+= sdhci-platform.o

+ 3 - 3
drivers/mmc/host/dw_mmc.c

@@ -1769,9 +1769,6 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
 	int i, ret;
 	struct dw_mci *host = platform_get_drvdata(pdev);
 
-	if (host->vmmc)
-		regulator_enable(host->vmmc);
-
 	for (i = 0; i < host->num_slots; i++) {
 		struct dw_mci_slot *slot = host->slot[i];
 		if (!slot)
@@ -1798,6 +1795,9 @@ static int dw_mci_resume(struct platform_device *pdev)
 	int i, ret;
 	struct dw_mci *host = platform_get_drvdata(pdev);
 
+	if (host->vmmc)
+		regulator_enable(host->vmmc);
+
 	if (host->dma_ops->init)
 		host->dma_ops->init(host);
 

+ 26 - 23
drivers/mmc/host/sdhci-pci.c

@@ -18,11 +18,9 @@
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/device.h>
-
 #include <linux/mmc/host.h>
-
-#include <asm/scatterlist.h>
-#include <asm/io.h>
+#include <linux/scatterlist.h>
+#include <linux/io.h>
 
 #include "sdhci.h"
 
@@ -46,14 +44,14 @@ struct sdhci_pci_slot;
 struct sdhci_pci_fixes {
 	unsigned int		quirks;
 
-	int			(*probe)(struct sdhci_pci_chip*);
+	int			(*probe) (struct sdhci_pci_chip *);
 
-	int			(*probe_slot)(struct sdhci_pci_slot*);
-	void			(*remove_slot)(struct sdhci_pci_slot*, int);
+	int			(*probe_slot) (struct sdhci_pci_slot *);
+	void			(*remove_slot) (struct sdhci_pci_slot *, int);
 
-	int			(*suspend)(struct sdhci_pci_chip*,
+	int			(*suspend) (struct sdhci_pci_chip *,
 					pm_message_t);
-	int			(*resume)(struct sdhci_pci_chip*);
+	int			(*resume) (struct sdhci_pci_chip *);
 };
 
 struct sdhci_pci_slot {
@@ -329,6 +327,11 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)
 		return ret;
 	}
 
+	/* quirk for unsable RO-detection on JM388 chips */
+	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
+	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
+		chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT;
+
 	return 0;
 }
 
@@ -402,7 +405,7 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
 
 	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
 	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
-		for (i = 0;i < chip->num_slots;i++)
+		for (i = 0; i < chip->num_slots; i++)
 			jmicron_enable_mmc(chip->slots[i]->host, 0);
 	}
 
@@ -415,7 +418,7 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
 
 	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
 	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
-		for (i = 0;i < chip->num_slots;i++)
+		for (i = 0; i < chip->num_slots; i++)
 			jmicron_enable_mmc(chip->slots[i]->host, 1);
 	}
 
@@ -798,7 +801,7 @@ static struct sdhci_ops sdhci_pci_ops = {
 
 #ifdef CONFIG_PM
 
-static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
+static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
 {
 	struct sdhci_pci_chip *chip;
 	struct sdhci_pci_slot *slot;
@@ -810,7 +813,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
 	if (!chip)
 		return 0;
 
-	for (i = 0;i < chip->num_slots;i++) {
+	for (i = 0; i < chip->num_slots; i++) {
 		slot = chip->slots[i];
 		if (!slot)
 			continue;
@@ -818,7 +821,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
 		ret = sdhci_suspend_host(slot->host, state);
 
 		if (ret) {
-			for (i--;i >= 0;i--)
+			for (i--; i >= 0; i--)
 				sdhci_resume_host(chip->slots[i]->host);
 			return ret;
 		}
@@ -833,7 +836,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
 	if (chip->fixes && chip->fixes->suspend) {
 		ret = chip->fixes->suspend(chip, state);
 		if (ret) {
-			for (i = chip->num_slots - 1;i >= 0;i--)
+			for (i = chip->num_slots - 1; i >= 0; i--)
 				sdhci_resume_host(chip->slots[i]->host);
 			return ret;
 		}
@@ -855,7 +858,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
 	return 0;
 }
 
-static int sdhci_pci_resume (struct pci_dev *pdev)
+static int sdhci_pci_resume(struct pci_dev *pdev)
 {
 	struct sdhci_pci_chip *chip;
 	struct sdhci_pci_slot *slot;
@@ -877,7 +880,7 @@ static int sdhci_pci_resume (struct pci_dev *pdev)
 			return ret;
 	}
 
-	for (i = 0;i < chip->num_slots;i++) {
+	for (i = 0; i < chip->num_slots; i++) {
 		slot = chip->slots[i];
 		if (!slot)
 			continue;
@@ -1059,7 +1062,7 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
 	}
 
 	chip->pdev = pdev;
-	chip->fixes = (const struct sdhci_pci_fixes*)ent->driver_data;
+	chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
 	if (chip->fixes)
 		chip->quirks = chip->fixes->quirks;
 	chip->num_slots = slots;
@@ -1074,10 +1077,10 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
 
 	slots = chip->num_slots;	/* Quirk may have changed this */
 
-	for (i = 0;i < slots;i++) {
+	for (i = 0; i < slots; i++) {
 		slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i);
 		if (IS_ERR(slot)) {
-			for (i--;i >= 0;i--)
+			for (i--; i >= 0; i--)
 				sdhci_pci_remove_slot(chip->slots[i]);
 			ret = PTR_ERR(slot);
 			goto free;
@@ -1105,7 +1108,7 @@ static void __devexit sdhci_pci_remove(struct pci_dev *pdev)
 	chip = pci_get_drvdata(pdev);
 
 	if (chip) {
-		for (i = 0;i < chip->num_slots; i++)
+		for (i = 0; i < chip->num_slots; i++)
 			sdhci_pci_remove_slot(chip->slots[i]);
 
 		pci_set_drvdata(pdev, NULL);
@@ -1116,9 +1119,9 @@ static void __devexit sdhci_pci_remove(struct pci_dev *pdev)
 }
 
 static struct pci_driver sdhci_driver = {
-	.name = 	"sdhci-pci",
+	.name =		"sdhci-pci",
 	.id_table =	pci_ids,
-	.probe = 	sdhci_pci_probe,
+	.probe =	sdhci_pci_probe,
 	.remove =	__devexit_p(sdhci_pci_remove),
 	.suspend =	sdhci_pci_suspend,
 	.resume	=	sdhci_pci_resume,

+ 47 - 1
drivers/mmc/host/sdhci-pxa.c

@@ -69,7 +69,45 @@ static void set_clock(struct sdhci_host *host, unsigned int clock)
 	}
 }
 
+static int set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
+{
+	u16 ctrl_2;
+
+	/*
+	 * Set V18_EN -- UHS modes do not work without this.
+	 * does not change signaling voltage
+	 */
+	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+	/* Select Bus Speed Mode for host */
+	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+	switch (uhs) {
+	case MMC_TIMING_UHS_SDR12:
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+		break;
+	case MMC_TIMING_UHS_SDR25:
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+		break;
+	case MMC_TIMING_UHS_SDR50:
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180;
+		break;
+	case MMC_TIMING_UHS_SDR104:
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180;
+		break;
+	case MMC_TIMING_UHS_DDR50:
+		ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180;
+		break;
+	}
+
+	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+	pr_debug("%s:%s uhs = %d, ctrl_2 = %04X\n",
+		__func__, mmc_hostname(host->mmc), uhs, ctrl_2);
+
+	return 0;
+}
+
 static struct sdhci_ops sdhci_pxa_ops = {
+	.set_uhs_signaling = set_uhs_signaling,
 	.set_clock = set_clock,
 };
 
@@ -136,11 +174,19 @@ static int __devinit sdhci_pxa_probe(struct platform_device *pdev)
 	host->hw_name = "MMC";
 	host->ops = &sdhci_pxa_ops;
 	host->irq = irq;
-	host->quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+	host->quirks = SDHCI_QUIRK_BROKEN_ADMA
+		| SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
+		| SDHCI_QUIRK_32BIT_DMA_ADDR
+		| SDHCI_QUIRK_32BIT_DMA_SIZE
+		| SDHCI_QUIRK_32BIT_ADMA_SIZE
+		| SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
 
 	if (pdata->quirks)
 		host->quirks |= pdata->quirks;
 
+	/* enable 1/8V DDR capable */
+	host->mmc->caps |= MMC_CAP_1_8V_DDR;
+
 	/* If slot design supports 8 bit data, indicate this to MMC. */
 	if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
 		host->mmc->caps |= MMC_CAP_8_BIT_DATA;

+ 2 - 0
drivers/mmc/host/sdhci-tegra.c

@@ -184,6 +184,8 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
 	clk_enable(clk);
 	pltfm_host->clk = clk;
 
+	host->mmc->pm_caps = plat->pm_flags;
+
 	if (plat->is_8bit)
 		host->mmc->caps |= MMC_CAP_8_BIT_DATA;
 

+ 786 - 68
drivers/mmc/host/sdhci.c

@@ -38,13 +38,16 @@
 #define SDHCI_USE_LEDS_CLASS
 #endif
 
+#define MAX_TUNING_LOOP 40
+
 static unsigned int debug_quirks = 0;
 
-static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
 static void sdhci_finish_data(struct sdhci_host *);
 
 static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
 static void sdhci_finish_command(struct sdhci_host *);
+static int sdhci_execute_tuning(struct mmc_host *mmc);
+static void sdhci_tuning_timer(unsigned long data);
 
 static void sdhci_dumpregs(struct sdhci_host *host)
 {
@@ -84,6 +87,8 @@ static void sdhci_dumpregs(struct sdhci_host *host)
 	printk(KERN_DEBUG DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
 		sdhci_readw(host, SDHCI_COMMAND),
 		sdhci_readl(host, SDHCI_MAX_CURRENT));
+	printk(KERN_DEBUG DRIVER_NAME ": Host ctl2: 0x%08x\n",
+		sdhci_readw(host, SDHCI_HOST_CONTROL2));
 
 	if (host->flags & SDHCI_USE_ADMA)
 		printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
@@ -157,6 +162,9 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
 	if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
 		ier = sdhci_readl(host, SDHCI_INT_ENABLE);
 
+	if (host->ops->platform_reset_enter)
+		host->ops->platform_reset_enter(host, mask);
+
 	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
 
 	if (mask & SDHCI_RESET_ALL)
@@ -177,6 +185,9 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
 		mdelay(1);
 	}
 
+	if (host->ops->platform_reset_exit)
+		host->ops->platform_reset_exit(host, mask);
+
 	if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
 		sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
 }
@@ -591,9 +602,10 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
 		data->sg_len, direction);
 }
 
-static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
+static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
 {
 	u8 count;
+	struct mmc_data *data = cmd->data;
 	unsigned target_timeout, current_timeout;
 
 	/*
@@ -605,9 +617,16 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
 	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
 		return 0xE;
 
+	/* Unspecified timeout, assume max */
+	if (!data && !cmd->cmd_timeout_ms)
+		return 0xE;
+
 	/* timeout in us */
-	target_timeout = data->timeout_ns / 1000 +
-		data->timeout_clks / host->clock;
+	if (!data)
+		target_timeout = cmd->cmd_timeout_ms * 1000;
+	else
+		target_timeout = data->timeout_ns / 1000 +
+			data->timeout_clks / host->clock;
 
 	if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
 		host->timeout_clk = host->clock / 1000;
@@ -622,6 +641,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
 	 *     =>
 	 *     (1) / (2) > 2^6
 	 */
+	BUG_ON(!host->timeout_clk);
 	count = 0;
 	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
 	while (current_timeout < target_timeout) {
@@ -632,8 +652,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
 	}
 
 	if (count >= 0xF) {
-		printk(KERN_WARNING "%s: Too large timeout requested!\n",
-			mmc_hostname(host->mmc));
+		printk(KERN_WARNING "%s: Too large timeout requested for CMD%d!\n",
+		       mmc_hostname(host->mmc), cmd->opcode);
 		count = 0xE;
 	}
 
@@ -651,15 +671,21 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
 		sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
 }
 
-static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
+static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
 {
 	u8 count;
 	u8 ctrl;
+	struct mmc_data *data = cmd->data;
 	int ret;
 
 	WARN_ON(host->data);
 
-	if (data == NULL)
+	if (data || (cmd->flags & MMC_RSP_BUSY)) {
+		count = sdhci_calc_timeout(host, cmd);
+		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
+	}
+
+	if (!data)
 		return;
 
 	/* Sanity checks */
@@ -669,9 +695,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
 
 	host->data = data;
 	host->data_early = 0;
-
-	count = sdhci_calc_timeout(host, data);
-	sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
+	host->data->bytes_xfered = 0;
 
 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
 		host->flags |= SDHCI_REQ_USE_DMA;
@@ -807,15 +831,17 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
 
 	sdhci_set_transfer_irqs(host);
 
-	/* We do not handle DMA boundaries, so set it to max (512 KiB) */
-	sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE);
+	/* Set the DMA boundary value and block size */
+	sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
+		data->blksz), SDHCI_BLOCK_SIZE);
 	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
 }
 
 static void sdhci_set_transfer_mode(struct sdhci_host *host,
-	struct mmc_data *data)
+	struct mmc_command *cmd)
 {
 	u16 mode;
+	struct mmc_data *data = cmd->data;
 
 	if (data == NULL)
 		return;
@@ -823,12 +849,20 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
 	WARN_ON(!host->data);
 
 	mode = SDHCI_TRNS_BLK_CNT_EN;
-	if (data->blocks > 1) {
-		if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
-			mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_ACMD12;
-		else
-			mode |= SDHCI_TRNS_MULTI;
+	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
+		mode |= SDHCI_TRNS_MULTI;
+		/*
+		 * If we are sending CMD23, CMD12 never gets sent
+		 * on successful completion (so no Auto-CMD12).
+		 */
+		if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12))
+			mode |= SDHCI_TRNS_AUTO_CMD12;
+		else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
+			mode |= SDHCI_TRNS_AUTO_CMD23;
+			sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
+		}
 	}
+
 	if (data->flags & MMC_DATA_READ)
 		mode |= SDHCI_TRNS_READ;
 	if (host->flags & SDHCI_REQ_USE_DMA)
@@ -868,7 +902,15 @@ static void sdhci_finish_data(struct sdhci_host *host)
 	else
 		data->bytes_xfered = data->blksz * data->blocks;
 
-	if (data->stop) {
+	/*
+	 * Need to send CMD12 if -
+	 * a) open-ended multiblock transfer (no CMD23)
+	 * b) error in multiblock transfer
+	 */
+	if (data->stop &&
+	    (data->error ||
+	     !host->mrq->sbc)) {
+
 		/*
 		 * The controller needs a reset of internal state machines
 		 * upon error conditions.
@@ -920,11 +962,11 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
 
 	host->cmd = cmd;
 
-	sdhci_prepare_data(host, cmd->data);
+	sdhci_prepare_data(host, cmd);
 
 	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
 
-	sdhci_set_transfer_mode(host, cmd->data);
+	sdhci_set_transfer_mode(host, cmd);
 
 	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
 		printk(KERN_ERR "%s: Unsupported response type!\n",
@@ -947,7 +989,9 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
 		flags |= SDHCI_CMD_CRC;
 	if (cmd->flags & MMC_RSP_OPCODE)
 		flags |= SDHCI_CMD_INDEX;
-	if (cmd->data)
+
+	/* CMD19 is special in that the Data Present Select should be set */
+	if (cmd->data || (cmd->opcode == MMC_SEND_TUNING_BLOCK))
 		flags |= SDHCI_CMD_DATA;
 
 	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
@@ -977,19 +1021,27 @@ static void sdhci_finish_command(struct sdhci_host *host)
 
 	host->cmd->error = 0;
 
-	if (host->data && host->data_early)
-		sdhci_finish_data(host);
+	/* Finished CMD23, now send actual command. */
+	if (host->cmd == host->mrq->sbc) {
+		host->cmd = NULL;
+		sdhci_send_command(host, host->mrq->cmd);
+	} else {
 
-	if (!host->cmd->data)
-		tasklet_schedule(&host->finish_tasklet);
+		/* Processed actual command. */
+		if (host->data && host->data_early)
+			sdhci_finish_data(host);
 
-	host->cmd = NULL;
+		if (!host->cmd->data)
+			tasklet_schedule(&host->finish_tasklet);
+
+		host->cmd = NULL;
+	}
 }
 
 static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
 {
-	int div;
-	u16 clk;
+	int div = 0; /* Initialized for compiler warning */
+	u16 clk = 0;
 	unsigned long timeout;
 
 	if (clock == host->clock)
@@ -1007,14 +1059,45 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
 		goto out;
 
 	if (host->version >= SDHCI_SPEC_300) {
-		/* Version 3.00 divisors must be a multiple of 2. */
-		if (host->max_clk <= clock)
-			div = 1;
-		else {
-			for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) {
-				if ((host->max_clk / div) <= clock)
-					break;
+		/*
+		 * Check if the Host Controller supports Programmable Clock
+		 * Mode.
+		 */
+		if (host->clk_mul) {
+			u16 ctrl;
+
+			/*
+			 * We need to figure out whether the Host Driver needs
+			 * to select Programmable Clock Mode, or the value can
+			 * be set automatically by the Host Controller based on
+			 * the Preset Value registers.
+			 */
+			ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+			if (!(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
+				for (div = 1; div <= 1024; div++) {
+					if (((host->max_clk * host->clk_mul) /
+					      div) <= clock)
+						break;
+				}
+				/*
+				 * Set Programmable Clock Mode in the Clock
+				 * Control register.
+				 */
+				clk = SDHCI_PROG_CLOCK_MODE;
+				div--;
 			}
+		} else {
+			/* Version 3.00 divisors must be a multiple of 2. */
+			if (host->max_clk <= clock)
+				div = 1;
+			else {
+				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
+				     div += 2) {
+					if ((host->max_clk / div) <= clock)
+						break;
+				}
+			}
+			div >>= 1;
 		}
 	} else {
 		/* Version 2.00 divisors must be a power of 2. */
@@ -1022,10 +1105,10 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
 			if ((host->max_clk / div) <= clock)
 				break;
 		}
+		div >>= 1;
 	}
-	div >>= 1;
 
-	clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
+	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
 	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
 		<< SDHCI_DIVIDER_HI_SHIFT;
 	clk |= SDHCI_CLOCK_INT_EN;
@@ -1131,7 +1214,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 #ifndef SDHCI_USE_LEDS_CLASS
 	sdhci_activate_led(host);
 #endif
-	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) {
+
+	/*
+	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
+	 * requests if Auto-CMD12 is enabled.
+	 */
+	if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
 		if (mrq->stop) {
 			mrq->data->stop = NULL;
 			mrq->stop = NULL;
@@ -1150,8 +1238,30 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
 		host->mrq->cmd->error = -ENOMEDIUM;
 		tasklet_schedule(&host->finish_tasklet);
-	} else
-		sdhci_send_command(host, mrq->cmd);
+	} else {
+		u32 present_state;
+
+		present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
+		/*
+		 * Check if the re-tuning timer has already expired and there
+		 * is no on-going data transfer. If so, we need to execute
+		 * tuning procedure before sending command.
+		 */
+		if ((host->flags & SDHCI_NEEDS_RETUNING) &&
+		    !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
+			spin_unlock_irqrestore(&host->lock, flags);
+			sdhci_execute_tuning(mmc);
+			spin_lock_irqsave(&host->lock, flags);
+
+			/* Restore original mmc_request structure */
+			host->mrq = mrq;
+		}
+
+		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
+			sdhci_send_command(host, mrq->sbc);
+		else
+			sdhci_send_command(host, mrq->cmd);
+	}
 
 	mmiowb();
 	spin_unlock_irqrestore(&host->lock, flags);
@@ -1222,7 +1332,84 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 	else
 		ctrl &= ~SDHCI_CTRL_HISPD;
 
-	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+	if (host->version >= SDHCI_SPEC_300) {
+		u16 clk, ctrl_2;
+		unsigned int clock;
+
+		/* In case of UHS-I modes, set High Speed Enable */
+		if ((ios->timing == MMC_TIMING_UHS_SDR50) ||
+		    (ios->timing == MMC_TIMING_UHS_SDR104) ||
+		    (ios->timing == MMC_TIMING_UHS_DDR50) ||
+		    (ios->timing == MMC_TIMING_UHS_SDR25) ||
+		    (ios->timing == MMC_TIMING_UHS_SDR12))
+			ctrl |= SDHCI_CTRL_HISPD;
+
+		ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+		if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
+			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+			/*
+			 * We only need to set Driver Strength if the
+			 * preset value enable is not set.
+			 */
+			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
+			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
+				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
+			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
+				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
+
+			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+		} else {
+			/*
+			 * According to SDHC Spec v3.00, if the Preset Value
+			 * Enable in the Host Control 2 register is set, we
+			 * need to reset SD Clock Enable before changing High
+			 * Speed Enable to avoid generating clock gliches.
+			 */
+
+			/* Reset SD Clock Enable */
+			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+			clk &= ~SDHCI_CLOCK_CARD_EN;
+			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+			/* Re-enable SD Clock */
+			clock = host->clock;
+			host->clock = 0;
+			sdhci_set_clock(host, clock);
+		}
+
+
+		/* Reset SD Clock Enable */
+		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+		clk &= ~SDHCI_CLOCK_CARD_EN;
+		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+		if (host->ops->set_uhs_signaling)
+			host->ops->set_uhs_signaling(host, ios->timing);
+		else {
+			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+			/* Select Bus Speed Mode for host */
+			ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+			if (ios->timing == MMC_TIMING_UHS_SDR12)
+				ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+			else if (ios->timing == MMC_TIMING_UHS_SDR25)
+				ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+			else if (ios->timing == MMC_TIMING_UHS_SDR50)
+				ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+			else if (ios->timing == MMC_TIMING_UHS_SDR104)
+				ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+			else if (ios->timing == MMC_TIMING_UHS_DDR50)
+				ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+		}
+
+		/* Re-enable SD Clock */
+		clock = host->clock;
+		host->clock = 0;
+		sdhci_set_clock(host, clock);
+	} else
+		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 
 	/*
 	 * Some (ENE) controllers go apeshit on some ios operation,
@@ -1237,14 +1424,11 @@ out:
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
-static int sdhci_get_ro(struct mmc_host *mmc)
+static int check_ro(struct sdhci_host *host)
 {
-	struct sdhci_host *host;
 	unsigned long flags;
 	int is_readonly;
 
-	host = mmc_priv(mmc);
-
 	spin_lock_irqsave(&host->lock, flags);
 
 	if (host->flags & SDHCI_DEVICE_DEAD)
@@ -1262,6 +1446,29 @@ static int sdhci_get_ro(struct mmc_host *mmc)
 		!is_readonly : is_readonly;
 }
 
+#define SAMPLE_COUNT	5
+
+static int sdhci_get_ro(struct mmc_host *mmc)
+{
+	struct sdhci_host *host;
+	int i, ro_count;
+
+	host = mmc_priv(mmc);
+
+	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
+		return check_ro(host);
+
+	ro_count = 0;
+	for (i = 0; i < SAMPLE_COUNT; i++) {
+		if (check_ro(host)) {
+			if (++ro_count > SAMPLE_COUNT / 2)
+				return 1;
+		}
+		msleep(30);
+	}
+	return 0;
+}
+
 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
 {
 	struct sdhci_host *host;
@@ -1284,11 +1491,322 @@ out:
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
+static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
+	struct mmc_ios *ios)
+{
+	struct sdhci_host *host;
+	u8 pwr;
+	u16 clk, ctrl;
+	u32 present_state;
+
+	host = mmc_priv(mmc);
+
+	/*
+	 * Signal Voltage Switching is only applicable for Host Controllers
+	 * v3.00 and above.
+	 */
+	if (host->version < SDHCI_SPEC_300)
+		return 0;
+
+	/*
+	 * We first check whether the request is to set signalling voltage
+	 * to 3.3V. If so, we change the voltage to 3.3V and return quickly.
+	 */
+	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
+		ctrl &= ~SDHCI_CTRL_VDD_180;
+		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+		/* Wait for 5ms */
+		usleep_range(5000, 5500);
+
+		/* 3.3V regulator output should be stable within 5 ms */
+		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+		if (!(ctrl & SDHCI_CTRL_VDD_180))
+			return 0;
+		else {
+			printk(KERN_INFO DRIVER_NAME ": Switching to 3.3V "
+				"signalling voltage failed\n");
+			return -EIO;
+		}
+	} else if (!(ctrl & SDHCI_CTRL_VDD_180) &&
+		  (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) {
+		/* Stop SDCLK */
+		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+		clk &= ~SDHCI_CLOCK_CARD_EN;
+		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+		/* Check whether DAT[3:0] is 0000 */
+		present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
+		if (!((present_state & SDHCI_DATA_LVL_MASK) >>
+		       SDHCI_DATA_LVL_SHIFT)) {
+			/*
+			 * Enable 1.8V Signal Enable in the Host Control2
+			 * register
+			 */
+			ctrl |= SDHCI_CTRL_VDD_180;
+			sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+			/* Wait for 5ms */
+			usleep_range(5000, 5500);
+
+			ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+			if (ctrl & SDHCI_CTRL_VDD_180) {
+				/* Provide SDCLK again and wait for 1ms*/
+				clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+				clk |= SDHCI_CLOCK_CARD_EN;
+				sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+				usleep_range(1000, 1500);
+
+				/*
+				 * If DAT[3:0] level is 1111b, then the card
+				 * was successfully switched to 1.8V signaling.
+				 */
+				present_state = sdhci_readl(host,
+							SDHCI_PRESENT_STATE);
+				if ((present_state & SDHCI_DATA_LVL_MASK) ==
+				     SDHCI_DATA_LVL_MASK)
+					return 0;
+			}
+		}
+
+		/*
+		 * If we are here, that means the switch to 1.8V signaling
+		 * failed. We power cycle the card, and retry initialization
+		 * sequence by setting S18R to 0.
+		 */
+		pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
+		pwr &= ~SDHCI_POWER_ON;
+		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+
+		/* Wait for 1ms as per the spec */
+		usleep_range(1000, 1500);
+		pwr |= SDHCI_POWER_ON;
+		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+
+		printk(KERN_INFO DRIVER_NAME ": Switching to 1.8V signalling "
+			"voltage failed, retrying with S18R set to 0\n");
+		return -EAGAIN;
+	} else
+		/* No signal voltage switch required */
+		return 0;
+}
+
+static int sdhci_execute_tuning(struct mmc_host *mmc)
+{
+	struct sdhci_host *host;
+	u16 ctrl;
+	u32 ier;
+	int tuning_loop_counter = MAX_TUNING_LOOP;
+	unsigned long timeout;
+	int err = 0;
+
+	host = mmc_priv(mmc);
+
+	disable_irq(host->irq);
+	spin_lock(&host->lock);
+
+	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+	/*
+	 * Host Controller needs tuning only in case of SDR104 mode
+	 * and for SDR50 mode when Use Tuning for SDR50 is set in
+	 * Capabilities register.
+	 */
+	if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
+	    (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
+	    (host->flags & SDHCI_SDR50_NEEDS_TUNING)))
+		ctrl |= SDHCI_CTRL_EXEC_TUNING;
+	else {
+		spin_unlock(&host->lock);
+		enable_irq(host->irq);
+		return 0;
+	}
+
+	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+	/*
+	 * As per the Host Controller spec v3.00, tuning command
+	 * generates Buffer Read Ready interrupt, so enable that.
+	 *
+	 * Note: The spec clearly says that when tuning sequence
+	 * is being performed, the controller does not generate
+	 * interrupts other than Buffer Read Ready interrupt. But
+	 * to make sure we don't hit a controller bug, we _only_
+	 * enable Buffer Read Ready interrupt here.
+	 */
+	ier = sdhci_readl(host, SDHCI_INT_ENABLE);
+	sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
+
+	/*
+	 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
+	 * of loops reaches 40 times or a timeout of 150ms occurs.
+	 */
+	timeout = 150;
+	do {
+		struct mmc_command cmd = {0};
+		struct mmc_request mrq = {0};
+
+		if (!tuning_loop_counter && !timeout)
+			break;
+
+		cmd.opcode = MMC_SEND_TUNING_BLOCK;
+		cmd.arg = 0;
+		cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+		cmd.retries = 0;
+		cmd.data = NULL;
+		cmd.error = 0;
+
+		mrq.cmd = &cmd;
+		host->mrq = &mrq;
+
+		/*
+		 * In response to CMD19, the card sends 64 bytes of tuning
+		 * block to the Host Controller. So we set the block size
+		 * to 64 here.
+		 */
+		sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
+
+		/*
+		 * The tuning block is sent by the card to the host controller.
+		 * So we set the TRNS_READ bit in the Transfer Mode register.
+		 * This also takes care of setting DMA Enable and Multi Block
+		 * Select in the same register to 0.
+		 */
+		sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
+
+		sdhci_send_command(host, &cmd);
+
+		host->cmd = NULL;
+		host->mrq = NULL;
+
+		spin_unlock(&host->lock);
+		enable_irq(host->irq);
+
+		/* Wait for Buffer Read Ready interrupt */
+		wait_event_interruptible_timeout(host->buf_ready_int,
+					(host->tuning_done == 1),
+					msecs_to_jiffies(50));
+		disable_irq(host->irq);
+		spin_lock(&host->lock);
+
+		if (!host->tuning_done) {
+			printk(KERN_INFO DRIVER_NAME ": Timeout waiting for "
+				"Buffer Read Ready interrupt during tuning "
+				"procedure, falling back to fixed sampling "
+				"clock\n");
+			ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+			ctrl &= ~SDHCI_CTRL_TUNED_CLK;
+			ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
+			sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+			err = -EIO;
+			goto out;
+		}
+
+		host->tuning_done = 0;
+
+		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+		tuning_loop_counter--;
+		timeout--;
+		mdelay(1);
+	} while (ctrl & SDHCI_CTRL_EXEC_TUNING);
+
+	/*
+	 * The Host Driver has exhausted the maximum number of loops allowed,
+	 * so use fixed sampling frequency.
+	 */
+	if (!tuning_loop_counter || !timeout) {
+		ctrl &= ~SDHCI_CTRL_TUNED_CLK;
+		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+	} else {
+		if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
+			printk(KERN_INFO DRIVER_NAME ": Tuning procedure"
+				" failed, falling back to fixed sampling"
+				" clock\n");
+			err = -EIO;
+		}
+	}
+
+out:
+	/*
+	 * If this is the very first time we are here, we start the retuning
+	 * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
+	 * flag won't be set, we check this condition before actually starting
+	 * the timer.
+	 */
+	if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
+	    (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
+		mod_timer(&host->tuning_timer, jiffies +
+			host->tuning_count * HZ);
+		/* Tuning mode 1 limits the maximum data length to 4MB */
+		mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
+	} else {
+		host->flags &= ~SDHCI_NEEDS_RETUNING;
+		/* Reload the new initial value for timer */
+		if (host->tuning_mode == SDHCI_TUNING_MODE_1)
+			mod_timer(&host->tuning_timer, jiffies +
+				host->tuning_count * HZ);
+	}
+
+	/*
+	 * In case tuning fails, host controllers which support re-tuning can
+	 * try tuning again at a later time, when the re-tuning timer expires.
+	 * So for these controllers, we return 0. Since there might be other
+	 * controllers who do not have this capability, we return error for
+	 * them.
+	 */
+	if (err && host->tuning_count &&
+	    host->tuning_mode == SDHCI_TUNING_MODE_1)
+		err = 0;
+
+	sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
+	spin_unlock(&host->lock);
+	enable_irq(host->irq);
+
+	return err;
+}
+
+static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable)
+{
+	struct sdhci_host *host;
+	u16 ctrl;
+	unsigned long flags;
+
+	host = mmc_priv(mmc);
+
+	/* Host Controller v3.00 defines preset value registers */
+	if (host->version < SDHCI_SPEC_300)
+		return;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+	/*
+	 * We only enable or disable Preset Value if they are not already
+	 * enabled or disabled respectively. Otherwise, we bail out.
+	 */
+	if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
+		ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
+		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+	} else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
+		ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
+		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+	}
+
+	spin_unlock_irqrestore(&host->lock, flags);
+}
+
 static const struct mmc_host_ops sdhci_ops = {
 	.request	= sdhci_request,
 	.set_ios	= sdhci_set_ios,
 	.get_ro		= sdhci_get_ro,
 	.enable_sdio_irq = sdhci_enable_sdio_irq,
+	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
+	.execute_tuning			= sdhci_execute_tuning,
+	.enable_preset_value		= sdhci_enable_preset_value,
 };
 
 /*****************************************************************************\
@@ -1345,6 +1863,9 @@ static void sdhci_tasklet_finish(unsigned long param)
 
 	del_timer(&host->timer);
 
+	if (host->version >= SDHCI_SPEC_300)
+		del_timer(&host->tuning_timer);
+
 	mrq = host->mrq;
 
 	/*
@@ -1418,6 +1939,20 @@ static void sdhci_timeout_timer(unsigned long data)
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
+static void sdhci_tuning_timer(unsigned long data)
+{
+	struct sdhci_host *host;
+	unsigned long flags;
+
+	host = (struct sdhci_host *)data;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	host->flags |= SDHCI_NEEDS_RETUNING;
+
+	spin_unlock_irqrestore(&host->lock, flags);
+}
+
 /*****************************************************************************\
  *                                                                           *
  * Interrupt handling                                                        *
@@ -1506,6 +2041,16 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
 {
 	BUG_ON(intmask == 0);
 
+	/* CMD19 generates _only_ Buffer Read Ready interrupt */
+	if (intmask & SDHCI_INT_DATA_AVAIL) {
+		if (SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) ==
+		    MMC_SEND_TUNING_BLOCK) {
+			host->tuning_done = 1;
+			wake_up(&host->buf_ready_int);
+			return;
+		}
+	}
+
 	if (!host->data) {
 		/*
 		 * The "data complete" interrupt is also used to
@@ -1551,10 +2096,28 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
 		 * We currently don't do anything fancy with DMA
 		 * boundaries, but as we can't disable the feature
 		 * we need to at least restart the transfer.
+		 *
+		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
+		 * should return a valid address to continue from, but as
+		 * some controllers are faulty, don't trust them.
 		 */
-		if (intmask & SDHCI_INT_DMA_END)
-			sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS),
-				SDHCI_DMA_ADDRESS);
+		if (intmask & SDHCI_INT_DMA_END) {
+			u32 dmastart, dmanow;
+			dmastart = sg_dma_address(host->data->sg);
+			dmanow = dmastart + host->data->bytes_xfered;
+			/*
+			 * Force update to the next DMA block boundary.
+			 */
+			dmanow = (dmanow &
+				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
+				SDHCI_DEFAULT_BOUNDARY_SIZE;
+			host->data->bytes_xfered = dmanow - dmastart;
+			DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
+				" next 0x%08x\n",
+				mmc_hostname(host->mmc), dmastart,
+				host->data->bytes_xfered, dmanow);
+			sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
+		}
 
 		if (intmask & SDHCI_INT_DATA_END) {
 			if (host->cmd) {
@@ -1664,6 +2227,14 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
 
 	sdhci_disable_card_detection(host);
 
+	/* Disable tuning since we are suspending */
+	if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
+	    host->tuning_mode == SDHCI_TUNING_MODE_1) {
+		host->flags &= ~SDHCI_NEEDS_RETUNING;
+		mod_timer(&host->tuning_timer, jiffies +
+			host->tuning_count * HZ);
+	}
+
 	ret = mmc_suspend_host(host->mmc);
 	if (ret)
 		return ret;
@@ -1705,6 +2276,11 @@ int sdhci_resume_host(struct sdhci_host *host)
 	ret = mmc_resume_host(host->mmc);
 	sdhci_enable_card_detection(host);
 
+	/* Set the re-tuning expiration flag */
+	if ((host->version >= SDHCI_SPEC_300) && host->tuning_count &&
+	    (host->tuning_mode == SDHCI_TUNING_MODE_1))
+		host->flags |= SDHCI_NEEDS_RETUNING;
+
 	return ret;
 }
 
@@ -1751,7 +2327,9 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host);
 int sdhci_add_host(struct sdhci_host *host)
 {
 	struct mmc_host *mmc;
-	unsigned int caps, ocr_avail;
+	u32 caps[2];
+	u32 max_current_caps;
+	unsigned int ocr_avail;
 	int ret;
 
 	WARN_ON(host == NULL);
@@ -1774,12 +2352,15 @@ int sdhci_add_host(struct sdhci_host *host)
 			host->version);
 	}
 
-	caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
+	caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
 		sdhci_readl(host, SDHCI_CAPABILITIES);
 
+	caps[1] = (host->version >= SDHCI_SPEC_300) ?
+		sdhci_readl(host, SDHCI_CAPABILITIES_1) : 0;
+
 	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
 		host->flags |= SDHCI_USE_SDMA;
-	else if (!(caps & SDHCI_CAN_DO_SDMA))
+	else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
 		DBG("Controller doesn't have SDMA capability\n");
 	else
 		host->flags |= SDHCI_USE_SDMA;
@@ -1790,7 +2371,8 @@ int sdhci_add_host(struct sdhci_host *host)
 		host->flags &= ~SDHCI_USE_SDMA;
 	}
 
-	if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2))
+	if ((host->version >= SDHCI_SPEC_200) &&
+		(caps[0] & SDHCI_CAN_DO_ADMA2))
 		host->flags |= SDHCI_USE_ADMA;
 
 	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
@@ -1840,10 +2422,10 @@ int sdhci_add_host(struct sdhci_host *host)
 	}
 
 	if (host->version >= SDHCI_SPEC_300)
-		host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK)
+		host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
 			>> SDHCI_CLOCK_BASE_SHIFT;
 	else
-		host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK)
+		host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
 			>> SDHCI_CLOCK_BASE_SHIFT;
 
 	host->max_clk *= 1000000;
@@ -1859,7 +2441,7 @@ int sdhci_add_host(struct sdhci_host *host)
 	}
 
 	host->timeout_clk =
-		(caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
+		(caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
 	if (host->timeout_clk == 0) {
 		if (host->ops->get_timeout_clock) {
 			host->timeout_clk = host->ops->get_timeout_clock(host);
@@ -1871,22 +2453,55 @@ int sdhci_add_host(struct sdhci_host *host)
 			return -ENODEV;
 		}
 	}
-	if (caps & SDHCI_TIMEOUT_CLK_UNIT)
+	if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
 		host->timeout_clk *= 1000;
 
+	/*
+	 * In case of Host Controller v3.00, find out whether clock
+	 * multiplier is supported.
+	 */
+	host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
+			SDHCI_CLOCK_MUL_SHIFT;
+
+	/*
+	 * In case the value in Clock Multiplier is 0, then programmable
+	 * clock mode is not supported, otherwise the actual clock
+	 * multiplier is one more than the value of Clock Multiplier
+	 * in the Capabilities Register.
+	 */
+	if (host->clk_mul)
+		host->clk_mul += 1;
+
 	/*
 	 * Set host parameters.
 	 */
 	mmc->ops = &sdhci_ops;
+	mmc->f_max = host->max_clk;
 	if (host->ops->get_min_clock)
 		mmc->f_min = host->ops->get_min_clock(host);
-	else if (host->version >= SDHCI_SPEC_300)
-		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
-	else
+	else if (host->version >= SDHCI_SPEC_300) {
+		if (host->clk_mul) {
+			mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
+			mmc->f_max = host->max_clk * host->clk_mul;
+		} else
+			mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
+	} else
 		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
 
-	mmc->f_max = host->max_clk;
-	mmc->caps |= MMC_CAP_SDIO_IRQ;
+	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
+
+	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
+		host->flags |= SDHCI_AUTO_CMD12;
+
+	/* Auto-CMD23 stuff only works in ADMA or PIO. */
+	if ((host->version >= SDHCI_SPEC_300) &&
+	    ((host->flags & SDHCI_USE_ADMA) ||
+	     !(host->flags & SDHCI_USE_SDMA))) {
+		host->flags |= SDHCI_AUTO_CMD23;
+		DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
+	} else {
+		DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
+	}
 
 	/*
 	 * A controller may support 8-bit width, but the board itself
@@ -1898,21 +2513,113 @@ int sdhci_add_host(struct sdhci_host *host)
 	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
 		mmc->caps |= MMC_CAP_4_BIT_DATA;
 
-	if (caps & SDHCI_CAN_DO_HISPD)
+	if (caps[0] & SDHCI_CAN_DO_HISPD)
 		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
 
 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
 	    mmc_card_is_removable(mmc))
 		mmc->caps |= MMC_CAP_NEEDS_POLL;
 
+	/* UHS-I mode(s) supported by the host controller. */
+	if (host->version >= SDHCI_SPEC_300)
+		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
+
+	/* SDR104 supports also implies SDR50 support */
+	if (caps[1] & SDHCI_SUPPORT_SDR104)
+		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
+	else if (caps[1] & SDHCI_SUPPORT_SDR50)
+		mmc->caps |= MMC_CAP_UHS_SDR50;
+
+	if (caps[1] & SDHCI_SUPPORT_DDR50)
+		mmc->caps |= MMC_CAP_UHS_DDR50;
+
+	/* Does the host needs tuning for SDR50? */
+	if (caps[1] & SDHCI_USE_SDR50_TUNING)
+		host->flags |= SDHCI_SDR50_NEEDS_TUNING;
+
+	/* Driver Type(s) (A, C, D) supported by the host */
+	if (caps[1] & SDHCI_DRIVER_TYPE_A)
+		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
+	if (caps[1] & SDHCI_DRIVER_TYPE_C)
+		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
+	if (caps[1] & SDHCI_DRIVER_TYPE_D)
+		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
+
+	/* Initial value for re-tuning timer count */
+	host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
+			      SDHCI_RETUNING_TIMER_COUNT_SHIFT;
+
+	/*
+	 * In case Re-tuning Timer is not disabled, the actual value of
+	 * re-tuning timer will be 2 ^ (n - 1).
+	 */
+	if (host->tuning_count)
+		host->tuning_count = 1 << (host->tuning_count - 1);
+
+	/* Re-tuning mode supported by the Host Controller */
+	host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
+			     SDHCI_RETUNING_MODE_SHIFT;
+
 	ocr_avail = 0;
-	if (caps & SDHCI_CAN_VDD_330)
+	/*
+	 * According to SD Host Controller spec v3.00, if the Host System
+	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
+	 * the value is meaningful only if Voltage Support in the Capabilities
+	 * register is set. The actual current value is 4 times the register
+	 * value.
+	 */
+	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
+
+	if (caps[0] & SDHCI_CAN_VDD_330) {
+		int max_current_330;
+
 		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
-	if (caps & SDHCI_CAN_VDD_300)
+
+		max_current_330 = ((max_current_caps &
+				   SDHCI_MAX_CURRENT_330_MASK) >>
+				   SDHCI_MAX_CURRENT_330_SHIFT) *
+				   SDHCI_MAX_CURRENT_MULTIPLIER;
+
+		if (max_current_330 > 150)
+			mmc->caps |= MMC_CAP_SET_XPC_330;
+	}
+	if (caps[0] & SDHCI_CAN_VDD_300) {
+		int max_current_300;
+
 		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
-	if (caps & SDHCI_CAN_VDD_180)
+
+		max_current_300 = ((max_current_caps &
+				   SDHCI_MAX_CURRENT_300_MASK) >>
+				   SDHCI_MAX_CURRENT_300_SHIFT) *
+				   SDHCI_MAX_CURRENT_MULTIPLIER;
+
+		if (max_current_300 > 150)
+			mmc->caps |= MMC_CAP_SET_XPC_300;
+	}
+	if (caps[0] & SDHCI_CAN_VDD_180) {
+		int max_current_180;
+
 		ocr_avail |= MMC_VDD_165_195;
 
+		max_current_180 = ((max_current_caps &
+				   SDHCI_MAX_CURRENT_180_MASK) >>
+				   SDHCI_MAX_CURRENT_180_SHIFT) *
+				   SDHCI_MAX_CURRENT_MULTIPLIER;
+
+		if (max_current_180 > 150)
+			mmc->caps |= MMC_CAP_SET_XPC_180;
+
+		/* Maximum current capabilities of the host at 1.8V */
+		if (max_current_180 >= 800)
+			mmc->caps |= MMC_CAP_MAX_CURRENT_800;
+		else if (max_current_180 >= 600)
+			mmc->caps |= MMC_CAP_MAX_CURRENT_600;
+		else if (max_current_180 >= 400)
+			mmc->caps |= MMC_CAP_MAX_CURRENT_400;
+		else
+			mmc->caps |= MMC_CAP_MAX_CURRENT_200;
+	}
+
 	mmc->ocr_avail = ocr_avail;
 	mmc->ocr_avail_sdio = ocr_avail;
 	if (host->ocr_avail_sdio)
@@ -1972,7 +2679,7 @@ int sdhci_add_host(struct sdhci_host *host)
 	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
 		mmc->max_blk_size = 2;
 	} else {
-		mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >>
+		mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
 				SDHCI_MAX_BLOCK_SHIFT;
 		if (mmc->max_blk_size >= 3) {
 			printk(KERN_WARNING "%s: Invalid maximum block size, "
@@ -1998,6 +2705,15 @@ int sdhci_add_host(struct sdhci_host *host)
 
 	setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
 
+	if (host->version >= SDHCI_SPEC_300) {
+		init_waitqueue_head(&host->buf_ready_int);
+
+		/* Initialize re-tuning timer */
+		init_timer(&host->tuning_timer);
+		host->tuning_timer.data = (unsigned long)host;
+		host->tuning_timer.function = sdhci_tuning_timer;
+	}
+
 	ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
 		mmc_hostname(mmc), host);
 	if (ret)
@@ -2091,6 +2807,8 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
 	free_irq(host->irq, host);
 
 	del_timer_sync(&host->timer);
+	if (host->version >= SDHCI_SPEC_300)
+		del_timer_sync(&host->tuning_timer);
 
 	tasklet_kill(&host->card_tasklet);
 	tasklet_kill(&host->finish_tasklet);

+ 55 - 4
drivers/mmc/host/sdhci.h

@@ -25,6 +25,7 @@
  */
 
 #define SDHCI_DMA_ADDRESS	0x00
+#define SDHCI_ARGUMENT2		SDHCI_DMA_ADDRESS
 
 #define SDHCI_BLOCK_SIZE	0x04
 #define  SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF))
@@ -36,7 +37,8 @@
 #define SDHCI_TRANSFER_MODE	0x0C
 #define  SDHCI_TRNS_DMA		0x01
 #define  SDHCI_TRNS_BLK_CNT_EN	0x02
-#define  SDHCI_TRNS_ACMD12	0x04
+#define  SDHCI_TRNS_AUTO_CMD12	0x04
+#define  SDHCI_TRNS_AUTO_CMD23	0x08
 #define  SDHCI_TRNS_READ	0x10
 #define  SDHCI_TRNS_MULTI	0x20
 
@@ -68,8 +70,10 @@
 #define  SDHCI_DATA_AVAILABLE	0x00000800
 #define  SDHCI_CARD_PRESENT	0x00010000
 #define  SDHCI_WRITE_PROTECT	0x00080000
+#define  SDHCI_DATA_LVL_MASK	0x00F00000
+#define   SDHCI_DATA_LVL_SHIFT	20
 
-#define SDHCI_HOST_CONTROL 	0x28
+#define SDHCI_HOST_CONTROL	0x28
 #define  SDHCI_CTRL_LED		0x01
 #define  SDHCI_CTRL_4BITBUS	0x02
 #define  SDHCI_CTRL_HISPD	0x04
@@ -99,6 +103,7 @@
 #define  SDHCI_DIV_MASK	0xFF
 #define  SDHCI_DIV_MASK_LEN	8
 #define  SDHCI_DIV_HI_MASK	0x300
+#define  SDHCI_PROG_CLOCK_MODE	0x0020
 #define  SDHCI_CLOCK_CARD_EN	0x0004
 #define  SDHCI_CLOCK_INT_STABLE	0x0002
 #define  SDHCI_CLOCK_INT_EN	0x0001
@@ -146,7 +151,22 @@
 
 #define SDHCI_ACMD12_ERR	0x3C
 
-/* 3E-3F reserved */
+#define SDHCI_HOST_CONTROL2		0x3E
+#define  SDHCI_CTRL_UHS_MASK		0x0007
+#define   SDHCI_CTRL_UHS_SDR12		0x0000
+#define   SDHCI_CTRL_UHS_SDR25		0x0001
+#define   SDHCI_CTRL_UHS_SDR50		0x0002
+#define   SDHCI_CTRL_UHS_SDR104		0x0003
+#define   SDHCI_CTRL_UHS_DDR50		0x0004
+#define  SDHCI_CTRL_VDD_180		0x0008
+#define  SDHCI_CTRL_DRV_TYPE_MASK	0x0030
+#define   SDHCI_CTRL_DRV_TYPE_B		0x0000
+#define   SDHCI_CTRL_DRV_TYPE_A		0x0010
+#define   SDHCI_CTRL_DRV_TYPE_C		0x0020
+#define   SDHCI_CTRL_DRV_TYPE_D		0x0030
+#define  SDHCI_CTRL_EXEC_TUNING		0x0040
+#define  SDHCI_CTRL_TUNED_CLK		0x0080
+#define  SDHCI_CTRL_PRESET_VAL_ENABLE	0x8000
 
 #define SDHCI_CAPABILITIES	0x40
 #define  SDHCI_TIMEOUT_CLK_MASK	0x0000003F
@@ -167,9 +187,30 @@
 #define  SDHCI_CAN_VDD_180	0x04000000
 #define  SDHCI_CAN_64BIT	0x10000000
 
+#define  SDHCI_SUPPORT_SDR50	0x00000001
+#define  SDHCI_SUPPORT_SDR104	0x00000002
+#define  SDHCI_SUPPORT_DDR50	0x00000004
+#define  SDHCI_DRIVER_TYPE_A	0x00000010
+#define  SDHCI_DRIVER_TYPE_C	0x00000020
+#define  SDHCI_DRIVER_TYPE_D	0x00000040
+#define  SDHCI_RETUNING_TIMER_COUNT_MASK	0x00000F00
+#define  SDHCI_RETUNING_TIMER_COUNT_SHIFT	8
+#define  SDHCI_USE_SDR50_TUNING			0x00002000
+#define  SDHCI_RETUNING_MODE_MASK		0x0000C000
+#define  SDHCI_RETUNING_MODE_SHIFT		14
+#define  SDHCI_CLOCK_MUL_MASK	0x00FF0000
+#define  SDHCI_CLOCK_MUL_SHIFT	16
+
 #define SDHCI_CAPABILITIES_1	0x44
 
-#define SDHCI_MAX_CURRENT	0x48
+#define SDHCI_MAX_CURRENT		0x48
+#define  SDHCI_MAX_CURRENT_330_MASK	0x0000FF
+#define  SDHCI_MAX_CURRENT_330_SHIFT	0
+#define  SDHCI_MAX_CURRENT_300_MASK	0x00FF00
+#define  SDHCI_MAX_CURRENT_300_SHIFT	8
+#define  SDHCI_MAX_CURRENT_180_MASK	0xFF0000
+#define  SDHCI_MAX_CURRENT_180_SHIFT	16
+#define   SDHCI_MAX_CURRENT_MULTIPLIER	4
 
 /* 4C-4F reserved for more max current */
 
@@ -202,6 +243,12 @@
 #define SDHCI_MAX_DIV_SPEC_200	256
 #define SDHCI_MAX_DIV_SPEC_300	2046
 
+/*
+ * Host SDMA buffer boundary. Valid values from 4K to 512K in powers of 2.
+ */
+#define SDHCI_DEFAULT_BOUNDARY_SIZE  (512 * 1024)
+#define SDHCI_DEFAULT_BOUNDARY_ARG   (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12)
+
 struct sdhci_ops {
 #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
 	u32		(*read_l)(struct sdhci_host *host, int reg);
@@ -223,6 +270,10 @@ struct sdhci_ops {
 	void (*platform_send_init_74_clocks)(struct sdhci_host *host,
 					     u8 power_mode);
 	unsigned int    (*get_ro)(struct sdhci_host *host);
+	void	(*platform_reset_enter)(struct sdhci_host *host, u8 mask);
+	void	(*platform_reset_exit)(struct sdhci_host *host, u8 mask);
+	int	(*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
+
 };
 
 #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS

+ 110 - 16
drivers/mmc/host/sh_mmcif.c

@@ -29,6 +29,8 @@
 #include <linux/mmc/sh_mmcif.h>
 #include <linux/pagemap.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
 
 #define DRIVER_NAME	"sh_mmcif"
 #define DRIVER_VERSION	"2010-04-28"
@@ -153,6 +155,12 @@
 #define CLKDEV_MMC_DATA		20000000 /* 20MHz */
 #define CLKDEV_INIT		400000   /* 400 KHz */
 
+enum mmcif_state {
+	STATE_IDLE,
+	STATE_REQUEST,
+	STATE_IOS,
+};
+
 struct sh_mmcif_host {
 	struct mmc_host *mmc;
 	struct mmc_data *data;
@@ -164,6 +172,9 @@ struct sh_mmcif_host {
 	long timeout;
 	void __iomem *addr;
 	struct completion intr_wait;
+	enum mmcif_state state;
+	spinlock_t lock;
+	bool power;
 
 	/* DMA support */
 	struct dma_chan		*chan_rx;
@@ -798,17 +809,31 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
 	struct sh_mmcif_host *host = mmc_priv(mmc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+	if (host->state != STATE_IDLE) {
+		spin_unlock_irqrestore(&host->lock, flags);
+		mrq->cmd->error = -EAGAIN;
+		mmc_request_done(mmc, mrq);
+		return;
+	}
+
+	host->state = STATE_REQUEST;
+	spin_unlock_irqrestore(&host->lock, flags);
 
 	switch (mrq->cmd->opcode) {
 	/* MMCIF does not support SD/SDIO command */
 	case SD_IO_SEND_OP_COND:
 	case MMC_APP_CMD:
+		host->state = STATE_IDLE;
 		mrq->cmd->error = -ETIMEDOUT;
 		mmc_request_done(mmc, mrq);
 		return;
 	case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
 		if (!mrq->data) {
 			/* send_if_cond cmd (not support) */
+			host->state = STATE_IDLE;
 			mrq->cmd->error = -ETIMEDOUT;
 			mmc_request_done(mmc, mrq);
 			return;
@@ -830,12 +855,9 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
 	sh_mmcif_start_cmd(host, mrq, mrq->cmd);
 	host->data = NULL;
 
-	if (mrq->cmd->error != 0) {
-		mmc_request_done(mmc, mrq);
-		return;
-	}
-	if (mrq->stop)
+	if (!mrq->cmd->error && mrq->stop)
 		sh_mmcif_stop_cmd(host, mrq, mrq->stop);
+	host->state = STATE_IDLE;
 	mmc_request_done(mmc, mrq);
 }
 
@@ -843,15 +865,39 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 {
 	struct sh_mmcif_host *host = mmc_priv(mmc);
 	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+	if (host->state != STATE_IDLE) {
+		spin_unlock_irqrestore(&host->lock, flags);
+		return;
+	}
+
+	host->state = STATE_IOS;
+	spin_unlock_irqrestore(&host->lock, flags);
 
 	if (ios->power_mode == MMC_POWER_UP) {
 		if (p->set_pwr)
 			p->set_pwr(host->pd, ios->power_mode);
+		if (!host->power) {
+			/* See if we also get DMA */
+			sh_mmcif_request_dma(host, host->pd->dev.platform_data);
+			pm_runtime_get_sync(&host->pd->dev);
+			host->power = true;
+		}
 	} else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
 		/* clock stop */
 		sh_mmcif_clock_control(host, 0);
-		if (ios->power_mode == MMC_POWER_OFF && p->down_pwr)
-			p->down_pwr(host->pd);
+		if (ios->power_mode == MMC_POWER_OFF) {
+			if (host->power) {
+				pm_runtime_put(&host->pd->dev);
+				sh_mmcif_release_dma(host);
+				host->power = false;
+			}
+			if (p->down_pwr)
+				p->down_pwr(host->pd);
+		}
+		host->state = STATE_IDLE;
 		return;
 	}
 
@@ -859,6 +905,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 		sh_mmcif_clock_control(host, ios->clock);
 
 	host->bus_width = ios->bus_width;
+	host->state = STATE_IDLE;
 }
 
 static int sh_mmcif_get_cd(struct mmc_host *mmc)
@@ -925,7 +972,7 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
 		err = 1;
 	} else {
-		dev_dbg(&host->pd->dev, "Not support int\n");
+		dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
 		err = 1;
@@ -996,6 +1043,7 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 	host->pd = pdev;
 
 	init_completion(&host->intr_wait);
+	spin_lock_init(&host->lock);
 
 	mmc->ops = &sh_mmcif_ops;
 	mmc->f_max = host->clk;
@@ -1020,24 +1068,29 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 	sh_mmcif_sync_reset(host);
 	platform_set_drvdata(pdev, host);
 
-	/* See if we also get DMA */
-	sh_mmcif_request_dma(host, pd);
+	pm_runtime_enable(&pdev->dev);
+	host->power = false;
+
+	ret = pm_runtime_resume(&pdev->dev);
+	if (ret < 0)
+		goto clean_up2;
 
 	mmc_add_host(mmc);
 
+	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
+
 	ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
 	if (ret) {
 		dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
-		goto clean_up2;
+		goto clean_up3;
 	}
 	ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
 	if (ret) {
 		free_irq(irq[0], host);
 		dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
-		goto clean_up2;
+		goto clean_up3;
 	}
 
-	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
 	sh_mmcif_detect(host->mmc);
 
 	dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
@@ -1045,7 +1098,11 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
 		sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
 	return ret;
 
+clean_up3:
+	mmc_remove_host(mmc);
+	pm_runtime_suspend(&pdev->dev);
 clean_up2:
+	pm_runtime_disable(&pdev->dev);
 	clk_disable(host->hclk);
 clean_up1:
 	mmc_free_host(mmc);
@@ -1060,14 +1117,14 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
 	int irq[2];
 
+	pm_runtime_get_sync(&pdev->dev);
+
 	mmc_remove_host(host->mmc);
-	sh_mmcif_release_dma(host);
+	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
 
 	if (host->addr)
 		iounmap(host->addr);
 
-	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
-
 	irq[0] = platform_get_irq(pdev, 0);
 	irq[1] = platform_get_irq(pdev, 1);
 
@@ -1078,15 +1135,52 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
 
 	clk_disable(host->hclk);
 	mmc_free_host(host->mmc);
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
 
 	return 0;
 }
 
+#ifdef CONFIG_PM
+static int sh_mmcif_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
+	int ret = mmc_suspend_host(host->mmc);
+
+	if (!ret) {
+		sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
+		clk_disable(host->hclk);
+	}
+
+	return ret;
+}
+
+static int sh_mmcif_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
+
+	clk_enable(host->hclk);
+
+	return mmc_resume_host(host->mmc);
+}
+#else
+#define sh_mmcif_suspend	NULL
+#define sh_mmcif_resume		NULL
+#endif	/* CONFIG_PM */
+
+static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
+	.suspend = sh_mmcif_suspend,
+	.resume = sh_mmcif_resume,
+};
+
 static struct platform_driver sh_mmcif_driver = {
 	.probe		= sh_mmcif_probe,
 	.remove		= sh_mmcif_remove,
 	.driver		= {
 		.name	= DRIVER_NAME,
+		.pm	= &sh_mmcif_dev_pm_ops,
 	},
 };
 

+ 47 - 3
drivers/mmc/host/sh_mobile_sdhi.c

@@ -62,7 +62,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
 	struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
 	struct tmio_mmc_host *host;
 	char clk_name[8];
-	int ret;
+	int i, irq, ret;
 
 	priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
 	if (priv == NULL) {
@@ -71,6 +71,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
 	}
 
 	mmc_data = &priv->mmc_data;
+	p->pdata = mmc_data;
 
 	snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id);
 	priv->clk = clk_get(&pdev->dev, clk_name);
@@ -116,11 +117,36 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
 	if (ret < 0)
 		goto eprobe;
 
-	pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
-		(unsigned long)host->ctl, host->irq);
+	for (i = 0; i < 3; i++) {
+		irq = platform_get_irq(pdev, i);
+		if (irq < 0) {
+			if (i) {
+				continue;
+			} else {
+				ret = irq;
+				goto eirq;
+			}
+		}
+		ret = request_irq(irq, tmio_mmc_irq, 0,
+				  dev_name(&pdev->dev), host);
+		if (ret) {
+			while (i--) {
+				irq = platform_get_irq(pdev, i);
+				if (irq >= 0)
+					free_irq(irq, host);
+			}
+			goto eirq;
+		}
+	}
+	dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n",
+		 mmc_hostname(host->mmc), (unsigned long)
+		 (platform_get_resource(pdev,IORESOURCE_MEM, 0)->start),
+		 mmc_data->hclk / 1000000);
 
 	return ret;
 
+eirq:
+	tmio_mmc_host_remove(host);
 eprobe:
 	clk_disable(priv->clk);
 	clk_put(priv->clk);
@@ -134,6 +160,16 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
 	struct mmc_host *mmc = platform_get_drvdata(pdev);
 	struct tmio_mmc_host *host = mmc_priv(mmc);
 	struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
+	struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
+	int i, irq;
+
+	p->pdata = NULL;
+
+	for (i = 0; i < 3; i++) {
+		irq = platform_get_irq(pdev, i);
+		if (irq >= 0)
+			free_irq(irq, host);
+	}
 
 	tmio_mmc_host_remove(host);
 	clk_disable(priv->clk);
@@ -143,10 +179,18 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
 	return 0;
 }
 
+static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
+	.suspend = tmio_mmc_host_suspend,
+	.resume = tmio_mmc_host_resume,
+	.runtime_suspend = tmio_mmc_host_runtime_suspend,
+	.runtime_resume = tmio_mmc_host_runtime_resume,
+};
+
 static struct platform_driver sh_mobile_sdhi_driver = {
 	.driver		= {
 		.name	= "sh_mobile_sdhi",
 		.owner	= THIS_MODULE,
+		.pm	= &tmio_mmc_dev_pm_ops,
 	},
 	.probe		= sh_mobile_sdhi_probe,
 	.remove		= __devexit_p(sh_mobile_sdhi_remove),

+ 22 - 10
drivers/mmc/host/tmio_mmc.c

@@ -30,7 +30,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
 	struct mmc_host *mmc = platform_get_drvdata(dev);
 	int ret;
 
-	ret = mmc_suspend_host(mmc);
+	ret = tmio_mmc_host_suspend(&dev->dev);
 
 	/* Tell MFD core it can disable us now.*/
 	if (!ret && cell->disable)
@@ -46,15 +46,12 @@ static int tmio_mmc_resume(struct platform_device *dev)
 	int ret = 0;
 
 	/* Tell the MFD core we are ready to be enabled */
-	if (cell->resume) {
+	if (cell->resume)
 		ret = cell->resume(dev);
-		if (ret)
-			goto out;
-	}
 
-	mmc_resume_host(mmc);
+	if (!ret)
+		ret = tmio_mmc_host_resume(&dev->dev);
 
-out:
 	return ret;
 }
 #else
@@ -67,7 +64,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev)
 	const struct mfd_cell *cell = mfd_get_cell(pdev);
 	struct tmio_mmc_data *pdata;
 	struct tmio_mmc_host *host;
-	int ret = -EINVAL;
+	int ret = -EINVAL, irq;
 
 	if (pdev->num_resources != 2)
 		goto out;
@@ -76,6 +73,12 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev)
 	if (!pdata || !pdata->hclk)
 		goto out;
 
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		ret = irq;
+		goto out;
+	}
+
 	/* Tell the MFD core we are ready to be enabled */
 	if (cell->enable) {
 		ret = cell->enable(pdev);
@@ -87,11 +90,18 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev)
 	if (ret)
 		goto cell_disable;
 
+	ret = request_irq(irq, tmio_mmc_irq, IRQF_DISABLED |
+			  IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), host);
+	if (ret)
+		goto host_remove;
+
 	pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
-		(unsigned long)host->ctl, host->irq);
+		(unsigned long)host->ctl, irq);
 
 	return 0;
 
+host_remove:
+	tmio_mmc_host_remove(host);
 cell_disable:
 	if (cell->disable)
 		cell->disable(pdev);
@@ -107,7 +117,9 @@ static int __devexit tmio_mmc_remove(struct platform_device *pdev)
 	platform_set_drvdata(pdev, NULL);
 
 	if (mmc) {
-		tmio_mmc_host_remove(mmc_priv(mmc));
+		struct tmio_mmc_host *host = mmc_priv(mmc);
+		free_irq(platform_get_irq(pdev, 0), host);
+		tmio_mmc_host_remove(host);
 		if (cell->disable)
 			cell->disable(pdev);
 	}

+ 15 - 1
drivers/mmc/host/tmio_mmc.h

@@ -19,6 +19,7 @@
 #include <linux/highmem.h>
 #include <linux/mmc/tmio.h>
 #include <linux/pagemap.h>
+#include <linux/spinlock.h>
 
 /* Definitions for values the CTRL_SDIO_STATUS register can take. */
 #define TMIO_SDIO_STAT_IOIRQ	0x0001
@@ -44,13 +45,14 @@ struct tmio_mmc_host {
 	struct mmc_request      *mrq;
 	struct mmc_data         *data;
 	struct mmc_host         *mmc;
-	int                     irq;
 	unsigned int		sdio_irq_enabled;
 
 	/* Callbacks for clock / power control */
 	void (*set_pwr)(struct platform_device *host, int state);
 	void (*set_clk_div)(struct platform_device *host, int state);
 
+	int			pm_error;
+
 	/* pio related stuff */
 	struct scatterlist      *sg_ptr;
 	struct scatterlist      *sg_orig;
@@ -83,6 +85,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
 
 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
+irqreturn_t tmio_mmc_irq(int irq, void *devid);
 
 static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
 					 unsigned long *flags)
@@ -120,4 +123,15 @@ static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
 }
 #endif
 
+#ifdef CONFIG_PM
+int tmio_mmc_host_suspend(struct device *dev);
+int tmio_mmc_host_resume(struct device *dev);
+#else
+#define tmio_mmc_host_suspend NULL
+#define tmio_mmc_host_resume NULL
+#endif
+
+int tmio_mmc_host_runtime_suspend(struct device *dev);
+int tmio_mmc_host_runtime_resume(struct device *dev);
+
 #endif

+ 12 - 9
drivers/mmc/host/tmio_mmc_dma.c

@@ -256,7 +256,10 @@ static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
 void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
 {
 	/* We can only either use DMA for both Tx and Rx or not use it at all */
-	if (pdata->dma) {
+	if (!pdata->dma)
+		return;
+
+	if (!host->chan_tx && !host->chan_rx) {
 		dma_cap_mask_t mask;
 
 		dma_cap_zero(mask);
@@ -284,18 +287,18 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
 
 		tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
 		tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
+	}
 
-		tmio_mmc_enable_dma(host, true);
+	tmio_mmc_enable_dma(host, true);
+
+	return;
 
-		return;
 ebouncebuf:
-		dma_release_channel(host->chan_rx);
-		host->chan_rx = NULL;
+	dma_release_channel(host->chan_rx);
+	host->chan_rx = NULL;
 ereqrx:
-		dma_release_channel(host->chan_tx);
-		host->chan_tx = NULL;
-		return;
-	}
+	dma_release_channel(host->chan_tx);
+	host->chan_tx = NULL;
 }
 
 void tmio_mmc_release_dma(struct tmio_mmc_host *host)

+ 162 - 22
drivers/mmc/host/tmio_mmc_pio.c

@@ -39,6 +39,7 @@
 #include <linux/module.h>
 #include <linux/pagemap.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/scatterlist.h>
 #include <linux/workqueue.h>
 #include <linux/spinlock.h>
@@ -243,8 +244,12 @@ static void tmio_mmc_reset_work(struct work_struct *work)
 	spin_lock_irqsave(&host->lock, flags);
 	mrq = host->mrq;
 
-	/* request already finished */
-	if (!mrq
+	/*
+	 * is request already finished? Since we use a non-blocking
+	 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
+	 * us, so, have to check for IS_ERR(host->mrq)
+	 */
+	if (IS_ERR_OR_NULL(mrq)
 	    || time_is_after_jiffies(host->last_req_ts +
 		msecs_to_jiffies(2000))) {
 		spin_unlock_irqrestore(&host->lock, flags);
@@ -264,16 +269,19 @@ static void tmio_mmc_reset_work(struct work_struct *work)
 
 	host->cmd = NULL;
 	host->data = NULL;
-	host->mrq = NULL;
 	host->force_pio = false;
 
 	spin_unlock_irqrestore(&host->lock, flags);
 
 	tmio_mmc_reset(host);
 
+	/* Ready for new calls */
+	host->mrq = NULL;
+
 	mmc_request_done(host->mmc, mrq);
 }
 
+/* called with host->lock held, interrupts disabled */
 static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
 {
 	struct mmc_request *mrq = host->mrq;
@@ -281,13 +289,15 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
 	if (!mrq)
 		return;
 
-	host->mrq = NULL;
 	host->cmd = NULL;
 	host->data = NULL;
 	host->force_pio = false;
 
 	cancel_delayed_work(&host->delayed_reset_work);
 
+	host->mrq = NULL;
+
+	/* FIXME: mmc_request_done() can schedule! */
 	mmc_request_done(host->mmc, mrq);
 }
 
@@ -554,7 +564,7 @@ out:
 	spin_unlock(&host->lock);
 }
 
-static irqreturn_t tmio_mmc_irq(int irq, void *devid)
+irqreturn_t tmio_mmc_irq(int irq, void *devid)
 {
 	struct tmio_mmc_host *host = devid;
 	struct tmio_mmc_data *pdata = host->pdata;
@@ -649,6 +659,7 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
 out:
 	return IRQ_HANDLED;
 }
+EXPORT_SYMBOL(tmio_mmc_irq);
 
 static int tmio_mmc_start_data(struct tmio_mmc_host *host,
 	struct mmc_data *data)
@@ -685,15 +696,27 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
 static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
 	struct tmio_mmc_host *host = mmc_priv(mmc);
+	unsigned long flags;
 	int ret;
 
-	if (host->mrq)
+	spin_lock_irqsave(&host->lock, flags);
+
+	if (host->mrq) {
 		pr_debug("request not null\n");
+		if (IS_ERR(host->mrq)) {
+			spin_unlock_irqrestore(&host->lock, flags);
+			mrq->cmd->error = -EAGAIN;
+			mmc_request_done(mmc, mrq);
+			return;
+		}
+	}
 
 	host->last_req_ts = jiffies;
 	wmb();
 	host->mrq = mrq;
 
+	spin_unlock_irqrestore(&host->lock, flags);
+
 	if (mrq->data) {
 		ret = tmio_mmc_start_data(host, mrq->data);
 		if (ret)
@@ -708,8 +731,8 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 	}
 
 fail:
-	host->mrq = NULL;
 	host->force_pio = false;
+	host->mrq = NULL;
 	mrq->cmd->error = ret;
 	mmc_request_done(mmc, mrq);
 }
@@ -723,19 +746,54 @@ fail:
 static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 {
 	struct tmio_mmc_host *host = mmc_priv(mmc);
+	struct tmio_mmc_data *pdata = host->pdata;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+	if (host->mrq) {
+		if (IS_ERR(host->mrq)) {
+			dev_dbg(&host->pdev->dev,
+				"%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
+				current->comm, task_pid_nr(current),
+				ios->clock, ios->power_mode);
+			host->mrq = ERR_PTR(-EINTR);
+		} else {
+			dev_dbg(&host->pdev->dev,
+				"%s.%d: CMD%u active since %lu, now %lu!\n",
+				current->comm, task_pid_nr(current),
+				host->mrq->cmd->opcode, host->last_req_ts, jiffies);
+		}
+		spin_unlock_irqrestore(&host->lock, flags);
+		return;
+	}
+
+	host->mrq = ERR_PTR(-EBUSY);
+
+	spin_unlock_irqrestore(&host->lock, flags);
 
 	if (ios->clock)
 		tmio_mmc_set_clock(host, ios->clock);
 
 	/* Power sequence - OFF -> UP -> ON */
 	if (ios->power_mode == MMC_POWER_UP) {
+		if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && !pdata->power) {
+			pm_runtime_get_sync(&host->pdev->dev);
+			pdata->power = true;
+		}
 		/* power up SD bus */
 		if (host->set_pwr)
 			host->set_pwr(host->pdev, 1);
 	} else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
 		/* power down SD bus */
-		if (ios->power_mode == MMC_POWER_OFF && host->set_pwr)
-			host->set_pwr(host->pdev, 0);
+		if (ios->power_mode == MMC_POWER_OFF) {
+			if (host->set_pwr)
+				host->set_pwr(host->pdev, 0);
+			if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
+			    pdata->power) {
+				pdata->power = false;
+				pm_runtime_put(&host->pdev->dev);
+			}
+		}
 		tmio_mmc_clk_stop(host);
 	} else {
 		/* start bus clock */
@@ -753,6 +811,12 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 
 	/* Let things settle. delay taken from winCE driver */
 	udelay(140);
+	if (PTR_ERR(host->mrq) == -EINTR)
+		dev_dbg(&host->pdev->dev,
+			"%s.%d: IOS interrupted: clk %u, mode %u",
+			current->comm, task_pid_nr(current),
+			ios->clock, ios->power_mode);
+	host->mrq = NULL;
 }
 
 static int tmio_mmc_get_ro(struct mmc_host *mmc)
@@ -801,6 +865,7 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
 	if (!mmc)
 		return -ENOMEM;
 
+	pdata->dev = &pdev->dev;
 	_host = mmc_priv(mmc);
 	_host->pdata = pdata;
 	_host->mmc = mmc;
@@ -834,24 +899,19 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
 	else
 		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 
-	tmio_mmc_clk_stop(_host);
-	tmio_mmc_reset(_host);
-
-	ret = platform_get_irq(pdev, 0);
+	pdata->power = false;
+	pm_runtime_enable(&pdev->dev);
+	ret = pm_runtime_resume(&pdev->dev);
 	if (ret < 0)
-		goto unmap_ctl;
+		goto pm_disable;
 
-	_host->irq = ret;
+	tmio_mmc_clk_stop(_host);
+	tmio_mmc_reset(_host);
 
 	tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
 	if (pdata->flags & TMIO_MMC_SDIO_IRQ)
 		tmio_mmc_enable_sdio_irq(mmc, 0);
 
-	ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED |
-		IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host);
-	if (ret)
-		goto unmap_ctl;
-
 	spin_lock_init(&_host->lock);
 
 	/* Init delayed work for request timeouts */
@@ -860,6 +920,10 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
 	/* See if we also get DMA */
 	tmio_mmc_request_dma(_host, pdata);
 
+	/* We have to keep the device powered for its card detection to work */
+	if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD))
+		pm_runtime_get_noresume(&pdev->dev);
+
 	mmc_add_host(mmc);
 
 	/* Unmask the IRQs we want to know about */
@@ -874,7 +938,8 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
 
 	return 0;
 
-unmap_ctl:
+pm_disable:
+	pm_runtime_disable(&pdev->dev);
 	iounmap(_host->ctl);
 host_free:
 	mmc_free_host(mmc);
@@ -885,13 +950,88 @@ EXPORT_SYMBOL(tmio_mmc_host_probe);
 
 void tmio_mmc_host_remove(struct tmio_mmc_host *host)
 {
+	struct platform_device *pdev = host->pdev;
+
+	/*
+	 * We don't have to manipulate pdata->power here: if there is a card in
+	 * the slot, the runtime PM is active and our .runtime_resume() will not
+	 * be run. If there is no card in the slot and the platform can suspend
+	 * the controller, the runtime PM is suspended and pdata->power == false,
+	 * so, our .runtime_resume() will not try to detect a card in the slot.
+	 */
+	if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD)
+		pm_runtime_get_sync(&pdev->dev);
+
 	mmc_remove_host(host->mmc);
 	cancel_delayed_work_sync(&host->delayed_reset_work);
 	tmio_mmc_release_dma(host);
-	free_irq(host->irq, host);
+
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
 	iounmap(host->ctl);
 	mmc_free_host(host->mmc);
 }
 EXPORT_SYMBOL(tmio_mmc_host_remove);
 
+#ifdef CONFIG_PM
+int tmio_mmc_host_suspend(struct device *dev)
+{
+	struct mmc_host *mmc = dev_get_drvdata(dev);
+	struct tmio_mmc_host *host = mmc_priv(mmc);
+	int ret = mmc_suspend_host(mmc);
+
+	if (!ret)
+		tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
+
+	host->pm_error = pm_runtime_put_sync(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(tmio_mmc_host_suspend);
+
+int tmio_mmc_host_resume(struct device *dev)
+{
+	struct mmc_host *mmc = dev_get_drvdata(dev);
+	struct tmio_mmc_host *host = mmc_priv(mmc);
+
+	/* The MMC core will perform the complete set up */
+	host->pdata->power = false;
+
+	if (!host->pm_error)
+		pm_runtime_get_sync(dev);
+
+	tmio_mmc_reset(mmc_priv(mmc));
+	tmio_mmc_request_dma(host, host->pdata);
+
+	return mmc_resume_host(mmc);
+}
+EXPORT_SYMBOL(tmio_mmc_host_resume);
+
+#endif	/* CONFIG_PM */
+
+int tmio_mmc_host_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
+
+int tmio_mmc_host_runtime_resume(struct device *dev)
+{
+	struct mmc_host *mmc = dev_get_drvdata(dev);
+	struct tmio_mmc_host *host = mmc_priv(mmc);
+	struct tmio_mmc_data *pdata = host->pdata;
+
+	tmio_mmc_reset(host);
+
+	if (pdata->power) {
+		/* Only entered after a card-insert interrupt */
+		tmio_mmc_set_ios(mmc, &mmc->ios);
+		mmc_detect_change(mmc, msecs_to_jiffies(100));
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
+
 MODULE_LICENSE("GPL v2");

+ 2506 - 0
drivers/mmc/host/vub300.c

@@ -0,0 +1,2506 @@
+/*
+ * Remote VUB300 SDIO/SDmem Host Controller Driver
+ *
+ * Copyright (C) 2010 Elan Digital Systems Limited
+ *
+ * based on USB Skeleton driver - 2.2
+ *
+ * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2
+ *
+ * VUB300: is a USB 2.0 client device with a single SDIO/SDmem/MMC slot
+ *         Any SDIO/SDmem/MMC device plugged into the VUB300 will appear,
+ *         by virtue of this driver, to have been plugged into a local
+ *         SDIO host controller, similar to, say, a PCI Ricoh controller
+ *         This is because this kernel device driver is both a USB 2.0
+ *         client device driver AND an MMC host controller driver. Thus
+ *         if there is an existing driver for the inserted SDIO/SDmem/MMC
+ *         device then that driver will be used by the kernel to manage
+ *         the device in exactly the same fashion as if it had been
+ *         directly plugged into, say, a local pci bus Ricoh controller
+ *
+ * RANT: this driver was written using a display 128x48 - converting it
+ *       to a line width of 80 makes it very difficult to support. In
+ *       particular functions have been broken down into sub functions
+ *       and the original meaningful names have been shortened into
+ *       cryptic ones.
+ *       The problem is that executing a fragment of code subject to
+ *       two conditions means an indentation of 24, thus leaving only
+ *       56 characters for a C statement. And that is quite ridiculous!
+ *
+ * Data types: data passed to/from the VUB300 is fixed to a number of
+ *             bits and driver data fields reflect that limit by using
+ *             u8, u16, u32
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/uaccess.h>
+#include <linux/usb.h>
+#include <linux/mutex.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/workqueue.h>
+#include <linux/ctype.h>
+#include <linux/firmware.h>
+#include <linux/scatterlist.h>
+
+struct host_controller_info {
+	u8 info_size;
+	u16 firmware_version;
+	u8 number_of_ports;
+} __packed;
+
+#define FIRMWARE_BLOCK_BOUNDARY 1024
+struct sd_command_header {
+	u8 header_size;
+	u8 header_type;
+	u8 port_number;
+	u8 command_type; /* Bit7 - Rd/Wr */
+	u8 command_index;
+	u8 transfer_size[4]; /* ReadSize + ReadSize */
+	u8 response_type;
+	u8 arguments[4];
+	u8 block_count[2];
+	u8 block_size[2];
+	u8 block_boundary[2];
+	u8 reserved[44]; /* to pad out to 64 bytes */
+} __packed;
+
+struct sd_irqpoll_header {
+	u8 header_size;
+	u8 header_type;
+	u8 port_number;
+	u8 command_type; /* Bit7 - Rd/Wr */
+	u8 padding[16]; /* don't ask why !! */
+	u8 poll_timeout_msb;
+	u8 poll_timeout_lsb;
+	u8 reserved[42]; /* to pad out to 64 bytes */
+} __packed;
+
+struct sd_common_header {
+	u8 header_size;
+	u8 header_type;
+	u8 port_number;
+} __packed;
+
+struct sd_response_header {
+	u8 header_size;
+	u8 header_type;
+	u8 port_number;
+	u8 command_type;
+	u8 command_index;
+	u8 command_response[0];
+} __packed;
+
+struct sd_status_header {
+	u8 header_size;
+	u8 header_type;
+	u8 port_number;
+	u16 port_flags;
+	u32 sdio_clock;
+	u16 host_header_size;
+	u16 func_header_size;
+	u16 ctrl_header_size;
+} __packed;
+
+struct sd_error_header {
+	u8 header_size;
+	u8 header_type;
+	u8 port_number;
+	u8 error_code;
+} __packed;
+
+struct sd_interrupt_header {
+	u8 header_size;
+	u8 header_type;
+	u8 port_number;
+} __packed;
+
+struct offload_registers_access {
+	u8 command_byte[4];
+	u8 Respond_Byte[4];
+} __packed;
+
+#define INTERRUPT_REGISTER_ACCESSES 15
+struct sd_offloaded_interrupt {
+	u8 header_size;
+	u8 header_type;
+	u8 port_number;
+	struct offload_registers_access reg[INTERRUPT_REGISTER_ACCESSES];
+} __packed;
+
+struct sd_register_header {
+	u8 header_size;
+	u8 header_type;
+	u8 port_number;
+	u8 command_type;
+	u8 command_index;
+	u8 command_response[6];
+} __packed;
+
+#define PIGGYBACK_REGISTER_ACCESSES 14
+struct sd_offloaded_piggyback {
+	struct sd_register_header sdio;
+	struct offload_registers_access reg[PIGGYBACK_REGISTER_ACCESSES];
+} __packed;
+
+union sd_response {
+	struct sd_common_header common;
+	struct sd_status_header status;
+	struct sd_error_header error;
+	struct sd_interrupt_header interrupt;
+	struct sd_response_header response;
+	struct sd_offloaded_interrupt irq;
+	struct sd_offloaded_piggyback pig;
+} __packed;
+
+union sd_command {
+	struct sd_command_header head;
+	struct sd_irqpoll_header poll;
+} __packed;
+
+enum SD_RESPONSE_TYPE {
+	SDRT_UNSPECIFIED = 0,
+	SDRT_NONE,
+	SDRT_1,
+	SDRT_1B,
+	SDRT_2,
+	SDRT_3,
+	SDRT_4,
+	SDRT_5,
+	SDRT_5B,
+	SDRT_6,
+	SDRT_7,
+};
+
+#define RESPONSE_INTERRUPT			0x01
+#define RESPONSE_ERROR				0x02
+#define RESPONSE_STATUS				0x03
+#define RESPONSE_IRQ_DISABLED			0x05
+#define RESPONSE_IRQ_ENABLED			0x06
+#define RESPONSE_PIGGYBACKED			0x07
+#define RESPONSE_NO_INTERRUPT			0x08
+#define RESPONSE_PIG_DISABLED			0x09
+#define RESPONSE_PIG_ENABLED			0x0A
+#define SD_ERROR_1BIT_TIMEOUT			0x01
+#define SD_ERROR_4BIT_TIMEOUT			0x02
+#define SD_ERROR_1BIT_CRC_WRONG			0x03
+#define SD_ERROR_4BIT_CRC_WRONG			0x04
+#define SD_ERROR_1BIT_CRC_ERROR			0x05
+#define SD_ERROR_4BIT_CRC_ERROR			0x06
+#define SD_ERROR_NO_CMD_ENDBIT			0x07
+#define SD_ERROR_NO_1BIT_DATEND			0x08
+#define SD_ERROR_NO_4BIT_DATEND			0x09
+#define SD_ERROR_1BIT_UNEXPECTED_TIMEOUT	0x0A
+#define SD_ERROR_4BIT_UNEXPECTED_TIMEOUT	0x0B
+#define SD_ERROR_ILLEGAL_COMMAND		0x0C
+#define SD_ERROR_NO_DEVICE			0x0D
+#define SD_ERROR_TRANSFER_LENGTH		0x0E
+#define SD_ERROR_1BIT_DATA_TIMEOUT		0x0F
+#define SD_ERROR_4BIT_DATA_TIMEOUT		0x10
+#define SD_ERROR_ILLEGAL_STATE			0x11
+#define SD_ERROR_UNKNOWN_ERROR			0x12
+#define SD_ERROR_RESERVED_ERROR			0x13
+#define SD_ERROR_INVALID_FUNCTION		0x14
+#define SD_ERROR_OUT_OF_RANGE			0x15
+#define SD_ERROR_STAT_CMD			0x16
+#define SD_ERROR_STAT_DATA			0x17
+#define SD_ERROR_STAT_CMD_TIMEOUT		0x18
+#define SD_ERROR_SDCRDY_STUCK			0x19
+#define SD_ERROR_UNHANDLED			0x1A
+#define SD_ERROR_OVERRUN			0x1B
+#define SD_ERROR_PIO_TIMEOUT			0x1C
+
+#define FUN(c) (0x000007 & (c->arg>>28))
+#define REG(c) (0x01FFFF & (c->arg>>9))
+
+static int limit_speed_to_24_MHz;
+module_param(limit_speed_to_24_MHz, bool, 0644);
+MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz");
+
+static int pad_input_to_usb_pkt;
+module_param(pad_input_to_usb_pkt, bool, 0644);
+MODULE_PARM_DESC(pad_input_to_usb_pkt,
+		 "Pad USB data input transfers to whole USB Packet");
+
+static int disable_offload_processing;
+module_param(disable_offload_processing, bool, 0644);
+MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing");
+
+static int force_1_bit_data_xfers;
+module_param(force_1_bit_data_xfers, bool, 0644);
+MODULE_PARM_DESC(force_1_bit_data_xfers,
+		 "Force SDIO Data Transfers to 1-bit Mode");
+
+static int force_polling_for_irqs;
+module_param(force_polling_for_irqs, bool, 0644);
+MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts");
+
+static int firmware_irqpoll_timeout = 1024;
+module_param(firmware_irqpoll_timeout, int, 0644);
+MODULE_PARM_DESC(firmware_irqpoll_timeout, "VUB300 firmware irqpoll timeout");
+
+static int force_max_req_size = 128;
+module_param(force_max_req_size, int, 0644);
+MODULE_PARM_DESC(force_max_req_size, "set max request size in kBytes");
+
+#ifdef SMSC_DEVELOPMENT_BOARD
+static int firmware_rom_wait_states = 0x04;
+#else
+static int firmware_rom_wait_states = 0x1C;
+#endif
+
+module_param(firmware_rom_wait_states, bool, 0644);
+MODULE_PARM_DESC(firmware_rom_wait_states,
+		 "ROM wait states byte=RRRIIEEE (Reserved Internal External)");
+
+#define ELAN_VENDOR_ID		0x2201
+#define VUB300_VENDOR_ID	0x0424
+#define VUB300_PRODUCT_ID	0x012C
+static struct usb_device_id vub300_table[] = {
+	{USB_DEVICE(ELAN_VENDOR_ID, VUB300_PRODUCT_ID)},
+	{USB_DEVICE(VUB300_VENDOR_ID, VUB300_PRODUCT_ID)},
+	{} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, vub300_table);
+
+static struct workqueue_struct *cmndworkqueue;
+static struct workqueue_struct *pollworkqueue;
+static struct workqueue_struct *deadworkqueue;
+
+static inline int interface_to_InterfaceNumber(struct usb_interface *interface)
+{
+	if (!interface)
+		return -1;
+	if (!interface->cur_altsetting)
+		return -1;
+	return interface->cur_altsetting->desc.bInterfaceNumber;
+}
+
+struct sdio_register {
+	unsigned func_num:3;
+	unsigned sdio_reg:17;
+	unsigned activate:1;
+	unsigned prepared:1;
+	unsigned regvalue:8;
+	unsigned response:8;
+	unsigned sparebit:26;
+};
+
+struct vub300_mmc_host {
+	struct usb_device *udev;
+	struct usb_interface *interface;
+	struct kref kref;
+	struct mutex cmd_mutex;
+	struct mutex irq_mutex;
+	char vub_name[3 + (9 * 8) + 4 + 1]; /* max of 7 sdio fn's */
+	u8 cmnd_out_ep; /* EndPoint for commands */
+	u8 cmnd_res_ep; /* EndPoint for responses */
+	u8 data_out_ep; /* EndPoint for out data */
+	u8 data_inp_ep; /* EndPoint for inp data */
+	bool card_powered;
+	bool card_present;
+	bool read_only;
+	bool large_usb_packets;
+	bool app_spec; /* ApplicationSpecific */
+	bool irq_enabled; /* by the MMC CORE */
+	bool irq_disabled; /* in the firmware */
+	unsigned bus_width:4;
+	u8 total_offload_count;
+	u8 dynamic_register_count;
+	u8 resp_len;
+	u32 datasize;
+	int errors;
+	int usb_transport_fail;
+	int usb_timed_out;
+	int irqs_queued;
+	struct sdio_register sdio_register[16];
+	struct offload_interrupt_function_register {
+#define MAXREGBITS 4
+#define MAXREGS (1<<MAXREGBITS)
+#define MAXREGMASK (MAXREGS-1)
+		u8 offload_count;
+		u32 offload_point;
+		struct offload_registers_access reg[MAXREGS];
+	} fn[8];
+	u16 fbs[8]; /* Function Block Size */
+	struct mmc_command *cmd;
+	struct mmc_request *req;
+	struct mmc_data *data;
+	struct mmc_host *mmc;
+	struct urb *urb;
+	struct urb *command_out_urb;
+	struct urb *command_res_urb;
+	struct completion command_complete;
+	struct completion irqpoll_complete;
+	union sd_command cmnd;
+	union sd_response resp;
+	struct timer_list sg_transfer_timer;
+	struct usb_sg_request sg_request;
+	struct timer_list inactivity_timer;
+	struct work_struct deadwork;
+	struct work_struct cmndwork;
+	struct delayed_work pollwork;
+	struct host_controller_info hc_info;
+	struct sd_status_header system_port_status;
+	u8 padded_buffer[64];
+};
+
+#define kref_to_vub300_mmc_host(d) container_of(d, struct vub300_mmc_host, kref)
+#define SET_TRANSFER_PSEUDOCODE		21
+#define SET_INTERRUPT_PSEUDOCODE	20
+#define SET_FAILURE_MODE		18
+#define SET_ROM_WAIT_STATES		16
+#define SET_IRQ_ENABLE			13
+#define SET_CLOCK_SPEED			11
+#define SET_FUNCTION_BLOCK_SIZE		9
+#define SET_SD_DATA_MODE		6
+#define SET_SD_POWER			4
+#define ENTER_DFU_MODE			3
+#define GET_HC_INF0			1
+#define GET_SYSTEM_PORT_STATUS		0
+
+static void vub300_delete(struct kref *kref)
+{				/* kref callback - softirq */
+	struct vub300_mmc_host *vub300 = kref_to_vub300_mmc_host(kref);
+	struct mmc_host *mmc = vub300->mmc;
+	usb_free_urb(vub300->command_out_urb);
+	vub300->command_out_urb = NULL;
+	usb_free_urb(vub300->command_res_urb);
+	vub300->command_res_urb = NULL;
+	usb_put_dev(vub300->udev);
+	mmc_free_host(mmc);
+	/*
+	 * and hence also frees vub300
+	 * which is contained at the end of struct mmc
+	 */
+}
+
+static void vub300_queue_cmnd_work(struct vub300_mmc_host *vub300)
+{
+	kref_get(&vub300->kref);
+	if (queue_work(cmndworkqueue, &vub300->cmndwork)) {
+		/*
+		 * then the cmndworkqueue was not previously
+		 * running and the above get ref is obvious
+		 * required and will be put when the thread
+		 * terminates by a specific call
+		 */
+	} else {
+		/*
+		 * the cmndworkqueue was already running from
+		 * a previous invocation and thus to keep the
+		 * kref counts correct we must undo the get
+		 */
+		kref_put(&vub300->kref, vub300_delete);
+	}
+}
+
+static void vub300_queue_poll_work(struct vub300_mmc_host *vub300, int delay)
+{
+	kref_get(&vub300->kref);
+	if (queue_delayed_work(pollworkqueue, &vub300->pollwork, delay)) {
+		/*
+		 * then the pollworkqueue was not previously
+		 * running and the above get ref is obvious
+		 * required and will be put when the thread
+		 * terminates by a specific call
+		 */
+	} else {
+		/*
+		 * the pollworkqueue was already running from
+		 * a previous invocation and thus to keep the
+		 * kref counts correct we must undo the get
+		 */
+		kref_put(&vub300->kref, vub300_delete);
+	}
+}
+
+static void vub300_queue_dead_work(struct vub300_mmc_host *vub300)
+{
+	kref_get(&vub300->kref);
+	if (queue_work(deadworkqueue, &vub300->deadwork)) {
+		/*
+		 * then the deadworkqueue was not previously
+		 * running and the above get ref is obvious
+		 * required and will be put when the thread
+		 * terminates by a specific call
+		 */
+	} else {
+		/*
+		 * the deadworkqueue was already running from
+		 * a previous invocation and thus to keep the
+		 * kref counts correct we must undo the get
+		 */
+		kref_put(&vub300->kref, vub300_delete);
+	}
+}
+
+static void irqpoll_res_completed(struct urb *urb)
+{				/* urb completion handler - hardirq */
+	struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
+	if (urb->status)
+		vub300->usb_transport_fail = urb->status;
+	complete(&vub300->irqpoll_complete);
+}
+
+static void irqpoll_out_completed(struct urb *urb)
+{				/* urb completion handler - hardirq */
+	struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
+	if (urb->status) {
+		vub300->usb_transport_fail = urb->status;
+		complete(&vub300->irqpoll_complete);
+		return;
+	} else {
+		int ret;
+		unsigned int pipe =
+			usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep);
+		usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe,
+				  &vub300->resp, sizeof(vub300->resp),
+				  irqpoll_res_completed, vub300);
+		vub300->command_res_urb->actual_length = 0;
+		ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC);
+		if (ret) {
+			vub300->usb_transport_fail = ret;
+			complete(&vub300->irqpoll_complete);
+		}
+		return;
+	}
+}
+
+static void send_irqpoll(struct vub300_mmc_host *vub300)
+{
+	/* cmd_mutex is held by vub300_pollwork_thread */
+	int retval;
+	int timeout = 0xFFFF & (0x0001FFFF - firmware_irqpoll_timeout);
+	vub300->cmnd.poll.header_size = 22;
+	vub300->cmnd.poll.header_type = 1;
+	vub300->cmnd.poll.port_number = 0;
+	vub300->cmnd.poll.command_type = 2;
+	vub300->cmnd.poll.poll_timeout_lsb = 0xFF & (unsigned)timeout;
+	vub300->cmnd.poll.poll_timeout_msb = 0xFF & (unsigned)(timeout >> 8);
+	usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev,
+			  usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep)
+			  , &vub300->cmnd, sizeof(vub300->cmnd)
+			  , irqpoll_out_completed, vub300);
+	retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL);
+	if (0 > retval) {
+		vub300->usb_transport_fail = retval;
+		vub300_queue_poll_work(vub300, 1);
+		complete(&vub300->irqpoll_complete);
+		return;
+	} else {
+		return;
+	}
+}
+
+static void new_system_port_status(struct vub300_mmc_host *vub300)
+{
+	int old_card_present = vub300->card_present;
+	int new_card_present =
+		(0x0001 & vub300->system_port_status.port_flags) ? 1 : 0;
+	vub300->read_only =
+		(0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
+	if (new_card_present && !old_card_present) {
+		dev_info(&vub300->udev->dev, "card just inserted\n");
+		vub300->card_present = 1;
+		vub300->bus_width = 0;
+		if (disable_offload_processing)
+			strncpy(vub300->vub_name, "EMPTY Processing Disabled",
+				sizeof(vub300->vub_name));
+		else
+			vub300->vub_name[0] = 0;
+		mmc_detect_change(vub300->mmc, 1);
+	} else if (!new_card_present && old_card_present) {
+		dev_info(&vub300->udev->dev, "card just ejected\n");
+		vub300->card_present = 0;
+		mmc_detect_change(vub300->mmc, 0);
+	} else {
+		/* no change */
+	}
+}
+
+static void __add_offloaded_reg_to_fifo(struct vub300_mmc_host *vub300,
+					struct offload_registers_access
+					*register_access, u8 func)
+{
+	u8 r = vub300->fn[func].offload_point + vub300->fn[func].offload_count;
+	memcpy(&vub300->fn[func].reg[MAXREGMASK & r], register_access,
+	       sizeof(struct offload_registers_access));
+	vub300->fn[func].offload_count += 1;
+	vub300->total_offload_count += 1;
+}
+
+static void add_offloaded_reg(struct vub300_mmc_host *vub300,
+			      struct offload_registers_access *register_access)
+{
+	u32 Register = ((0x03 & register_access->command_byte[0]) << 15)
+			| ((0xFF & register_access->command_byte[1]) << 7)
+			| ((0xFE & register_access->command_byte[2]) >> 1);
+	u8 func = ((0x70 & register_access->command_byte[0]) >> 4);
+	u8 regs = vub300->dynamic_register_count;
+	u8 i = 0;
+	while (0 < regs-- && 1 == vub300->sdio_register[i].activate) {
+		if (vub300->sdio_register[i].func_num == func &&
+		    vub300->sdio_register[i].sdio_reg == Register) {
+			if (vub300->sdio_register[i].prepared == 0)
+				vub300->sdio_register[i].prepared = 1;
+			vub300->sdio_register[i].response =
+				register_access->Respond_Byte[2];
+			vub300->sdio_register[i].regvalue =
+				register_access->Respond_Byte[3];
+			return;
+		} else {
+			i += 1;
+			continue;
+		}
+	};
+	__add_offloaded_reg_to_fifo(vub300, register_access, func);
+}
+
+static void check_vub300_port_status(struct vub300_mmc_host *vub300)
+{
+	/*
+	 * cmd_mutex is held by vub300_pollwork_thread,
+	 * vub300_deadwork_thread or vub300_cmndwork_thread
+	 */
+	int retval;
+	retval =
+		usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
+				GET_SYSTEM_PORT_STATUS,
+				USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+				0x0000, 0x0000, &vub300->system_port_status,
+				sizeof(vub300->system_port_status), HZ);
+	if (sizeof(vub300->system_port_status) == retval)
+		new_system_port_status(vub300);
+}
+
+static void __vub300_irqpoll_response(struct vub300_mmc_host *vub300)
+{
+	/* cmd_mutex is held by vub300_pollwork_thread */
+	if (vub300->command_res_urb->actual_length == 0)
+		return;
+
+	switch (vub300->resp.common.header_type) {
+	case RESPONSE_INTERRUPT:
+		mutex_lock(&vub300->irq_mutex);
+		if (vub300->irq_enabled)
+			mmc_signal_sdio_irq(vub300->mmc);
+		else
+			vub300->irqs_queued += 1;
+		vub300->irq_disabled = 1;
+		mutex_unlock(&vub300->irq_mutex);
+		break;
+	case RESPONSE_ERROR:
+		if (vub300->resp.error.error_code == SD_ERROR_NO_DEVICE)
+			check_vub300_port_status(vub300);
+		break;
+	case RESPONSE_STATUS:
+		vub300->system_port_status = vub300->resp.status;
+		new_system_port_status(vub300);
+		if (!vub300->card_present)
+			vub300_queue_poll_work(vub300, HZ / 5);
+		break;
+	case RESPONSE_IRQ_DISABLED:
+	{
+		int offloaded_data_length = vub300->resp.common.header_size - 3;
+		int register_count = offloaded_data_length >> 3;
+		int ri = 0;
+		while (register_count--) {
+			add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]);
+			ri += 1;
+		}
+		mutex_lock(&vub300->irq_mutex);
+		if (vub300->irq_enabled)
+			mmc_signal_sdio_irq(vub300->mmc);
+		else
+			vub300->irqs_queued += 1;
+		vub300->irq_disabled = 1;
+		mutex_unlock(&vub300->irq_mutex);
+		break;
+	}
+	case RESPONSE_IRQ_ENABLED:
+	{
+		int offloaded_data_length = vub300->resp.common.header_size - 3;
+		int register_count = offloaded_data_length >> 3;
+		int ri = 0;
+		while (register_count--) {
+			add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]);
+			ri += 1;
+		}
+		mutex_lock(&vub300->irq_mutex);
+		if (vub300->irq_enabled)
+			mmc_signal_sdio_irq(vub300->mmc);
+		else if (vub300->irqs_queued)
+			vub300->irqs_queued += 1;
+		else
+			vub300->irqs_queued += 1;
+		vub300->irq_disabled = 0;
+		mutex_unlock(&vub300->irq_mutex);
+		break;
+	}
+	case RESPONSE_NO_INTERRUPT:
+		vub300_queue_poll_work(vub300, 1);
+		break;
+	default:
+		break;
+	}
+}
+
+static void __do_poll(struct vub300_mmc_host *vub300)
+{
+	/* cmd_mutex is held by vub300_pollwork_thread */
+	long commretval;
+	mod_timer(&vub300->inactivity_timer, jiffies + HZ);
+	init_completion(&vub300->irqpoll_complete);
+	send_irqpoll(vub300);
+	commretval = wait_for_completion_timeout(&vub300->irqpoll_complete,
+						 msecs_to_jiffies(500));
+	if (vub300->usb_transport_fail) {
+		/* no need to do anything */
+	} else if (commretval == 0) {
+		vub300->usb_timed_out = 1;
+		usb_kill_urb(vub300->command_out_urb);
+		usb_kill_urb(vub300->command_res_urb);
+	} else if (commretval < 0) {
+		vub300_queue_poll_work(vub300, 1);
+	} else { /* commretval > 0 */
+		__vub300_irqpoll_response(vub300);
+	}
+}
+
+/* this thread runs only when the driver
+ * is trying to poll the device for an IRQ
+ */
+static void vub300_pollwork_thread(struct work_struct *work)
+{				/* NOT irq */
+	struct vub300_mmc_host *vub300 = container_of(work,
+			      struct vub300_mmc_host, pollwork.work);
+	if (!vub300->interface) {
+		kref_put(&vub300->kref, vub300_delete);
+		return;
+	}
+	mutex_lock(&vub300->cmd_mutex);
+	if (vub300->cmd) {
+		vub300_queue_poll_work(vub300, 1);
+	} else if (!vub300->card_present) {
+		/* no need to do anything */
+	} else { /* vub300->card_present */
+		mutex_lock(&vub300->irq_mutex);
+		if (!vub300->irq_enabled) {
+			mutex_unlock(&vub300->irq_mutex);
+		} else if (vub300->irqs_queued) {
+			vub300->irqs_queued -= 1;
+			mmc_signal_sdio_irq(vub300->mmc);
+			mod_timer(&vub300->inactivity_timer, jiffies + HZ);
+			mutex_unlock(&vub300->irq_mutex);
+		} else { /* NOT vub300->irqs_queued */
+			mutex_unlock(&vub300->irq_mutex);
+			__do_poll(vub300);
+		}
+	}
+	mutex_unlock(&vub300->cmd_mutex);
+	kref_put(&vub300->kref, vub300_delete);
+}
+
+static void vub300_deadwork_thread(struct work_struct *work)
+{				/* NOT irq */
+	struct vub300_mmc_host *vub300 =
+		container_of(work, struct vub300_mmc_host, deadwork);
+	if (!vub300->interface) {
+		kref_put(&vub300->kref, vub300_delete);
+		return;
+	}
+	mutex_lock(&vub300->cmd_mutex);
+	if (vub300->cmd) {
+		/*
+		 * a command got in as the inactivity
+		 * timer expired - so we just let the
+		 * processing of the command show if
+		 * the device is dead
+		 */
+	} else if (vub300->card_present) {
+		check_vub300_port_status(vub300);
+	} else if (vub300->mmc && vub300->mmc->card &&
+		   mmc_card_present(vub300->mmc->card)) {
+		/*
+		 * the MMC core must not have responded
+		 * to the previous indication - lets
+		 * hope that it eventually does so we
+		 * will just ignore this for now
+		 */
+	} else {
+		check_vub300_port_status(vub300);
+	}
+	mod_timer(&vub300->inactivity_timer, jiffies + HZ);
+	mutex_unlock(&vub300->cmd_mutex);
+	kref_put(&vub300->kref, vub300_delete);
+}
+
+static void vub300_inactivity_timer_expired(unsigned long data)
+{				/* softirq */
+	struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
+	if (!vub300->interface) {
+		kref_put(&vub300->kref, vub300_delete);
+	} else if (vub300->cmd) {
+		mod_timer(&vub300->inactivity_timer, jiffies + HZ);
+	} else {
+		vub300_queue_dead_work(vub300);
+		mod_timer(&vub300->inactivity_timer, jiffies + HZ);
+	}
+}
+
+static int vub300_response_error(u8 error_code)
+{
+	switch (error_code) {
+	case SD_ERROR_PIO_TIMEOUT:
+	case SD_ERROR_1BIT_TIMEOUT:
+	case SD_ERROR_4BIT_TIMEOUT:
+		return -ETIMEDOUT;
+	case SD_ERROR_STAT_DATA:
+	case SD_ERROR_OVERRUN:
+	case SD_ERROR_STAT_CMD:
+	case SD_ERROR_STAT_CMD_TIMEOUT:
+	case SD_ERROR_SDCRDY_STUCK:
+	case SD_ERROR_UNHANDLED:
+	case SD_ERROR_1BIT_CRC_WRONG:
+	case SD_ERROR_4BIT_CRC_WRONG:
+	case SD_ERROR_1BIT_CRC_ERROR:
+	case SD_ERROR_4BIT_CRC_ERROR:
+	case SD_ERROR_NO_CMD_ENDBIT:
+	case SD_ERROR_NO_1BIT_DATEND:
+	case SD_ERROR_NO_4BIT_DATEND:
+	case SD_ERROR_1BIT_DATA_TIMEOUT:
+	case SD_ERROR_4BIT_DATA_TIMEOUT:
+	case SD_ERROR_1BIT_UNEXPECTED_TIMEOUT:
+	case SD_ERROR_4BIT_UNEXPECTED_TIMEOUT:
+		return -EILSEQ;
+	case 33:
+		return -EILSEQ;
+	case SD_ERROR_ILLEGAL_COMMAND:
+		return -EINVAL;
+	case SD_ERROR_NO_DEVICE:
+		return -ENOMEDIUM;
+	default:
+		return -ENODEV;
+	}
+}
+
+static void command_res_completed(struct urb *urb)
+{				/* urb completion handler - hardirq */
+	struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
+	if (urb->status) {
+		/* we have to let the initiator handle the error */
+	} else if (vub300->command_res_urb->actual_length == 0) {
+		/*
+		 * we have seen this happen once or twice and
+		 * we suspect a buggy USB host controller
+		 */
+	} else if (!vub300->data) {
+		/* this means that the command (typically CMD52) suceeded */
+	} else if (vub300->resp.common.header_type != 0x02) {
+		/*
+		 * this is an error response from the VUB300 chip
+		 * and we let the initiator handle it
+		 */
+	} else if (vub300->urb) {
+		vub300->cmd->error =
+			vub300_response_error(vub300->resp.error.error_code);
+		usb_unlink_urb(vub300->urb);
+	} else {
+		vub300->cmd->error =
+			vub300_response_error(vub300->resp.error.error_code);
+		usb_sg_cancel(&vub300->sg_request);
+	}
+	complete(&vub300->command_complete);	/* got_response_in */
+}
+
+static void command_out_completed(struct urb *urb)
+{				/* urb completion handler - hardirq */
+	struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
+	if (urb->status) {
+		complete(&vub300->command_complete);
+	} else {
+		int ret;
+		unsigned int pipe =
+			usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep);
+		usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe,
+				  &vub300->resp, sizeof(vub300->resp),
+				  command_res_completed, vub300);
+		vub300->command_res_urb->actual_length = 0;
+		ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC);
+		if (ret == 0) {
+			/*
+			 * the urb completion handler will call
+			 * our completion handler
+			 */
+		} else {
+			/*
+			 * and thus we only call it directly
+			 * when it will not be called
+			 */
+			complete(&vub300->command_complete);
+		}
+	}
+}
+
+/*
+ * the STUFF bits are masked out for the comparisons
+ */
+static void snoop_block_size_and_bus_width(struct vub300_mmc_host *vub300,
+					   u32 cmd_arg)
+{
+	if ((0xFBFFFE00 & cmd_arg) == 0x80022200)
+		vub300->fbs[1] = (cmd_arg << 8) | (0x00FF & vub300->fbs[1]);
+	else if ((0xFBFFFE00 & cmd_arg) == 0x80022000)
+		vub300->fbs[1] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[1]);
+	else if ((0xFBFFFE00 & cmd_arg) == 0x80042200)
+		vub300->fbs[2] = (cmd_arg << 8) | (0x00FF & vub300->fbs[2]);
+	else if ((0xFBFFFE00 & cmd_arg) == 0x80042000)
+		vub300->fbs[2] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[2]);
+	else if ((0xFBFFFE00 & cmd_arg) == 0x80062200)
+		vub300->fbs[3] = (cmd_arg << 8) | (0x00FF & vub300->fbs[3]);
+	else if ((0xFBFFFE00 & cmd_arg) == 0x80062000)
+		vub300->fbs[3] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[3]);
+	else if ((0xFBFFFE00 & cmd_arg) == 0x80082200)
+		vub300->fbs[4] = (cmd_arg << 8) | (0x00FF & vub300->fbs[4]);
+	else if ((0xFBFFFE00 & cmd_arg) == 0x80082000)
+		vub300->fbs[4] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[4]);
+	else if ((0xFBFFFE00 & cmd_arg) == 0x800A2200)
+		vub300->fbs[5] = (cmd_arg << 8) | (0x00FF & vub300->fbs[5]);
+	else if ((0xFBFFFE00 & cmd_arg) == 0x800A2000)
+		vub300->fbs[5] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[5]);
+	else if ((0xFBFFFE00 & cmd_arg) == 0x800C2200)
+		vub300->fbs[6] = (cmd_arg << 8) | (0x00FF & vub300->fbs[6]);
+	else if ((0xFBFFFE00 & cmd_arg) == 0x800C2000)
+		vub300->fbs[6] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[6]);
+	else if ((0xFBFFFE00 & cmd_arg) == 0x800E2200)
+		vub300->fbs[7] = (cmd_arg << 8) | (0x00FF & vub300->fbs[7]);
+	else if ((0xFBFFFE00 & cmd_arg) == 0x800E2000)
+		vub300->fbs[7] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[7]);
+	else if ((0xFBFFFE03 & cmd_arg) == 0x80000E00)
+		vub300->bus_width = 1;
+	else if ((0xFBFFFE03 & cmd_arg) == 0x80000E02)
+		vub300->bus_width = 4;
+}
+
+static void send_command(struct vub300_mmc_host *vub300)
+{
+	/* cmd_mutex is held by vub300_cmndwork_thread */
+	struct mmc_command *cmd = vub300->cmd;
+	struct mmc_data *data = vub300->data;
+	int retval;
+	int i;
+	u8 response_type;
+	if (vub300->app_spec) {
+		switch (cmd->opcode) {
+		case 6:
+			response_type = SDRT_1;
+			vub300->resp_len = 6;
+			if (0x00000000 == (0x00000003 & cmd->arg))
+				vub300->bus_width = 1;
+			else if (0x00000002 == (0x00000003 & cmd->arg))
+				vub300->bus_width = 4;
+			else
+				dev_err(&vub300->udev->dev,
+					"unexpected ACMD6 bus_width=%d\n",
+					0x00000003 & cmd->arg);
+			break;
+		case 13:
+			response_type = SDRT_1;
+			vub300->resp_len = 6;
+			break;
+		case 22:
+			response_type = SDRT_1;
+			vub300->resp_len = 6;
+			break;
+		case 23:
+			response_type = SDRT_1;
+			vub300->resp_len = 6;
+			break;
+		case 41:
+			response_type = SDRT_3;
+			vub300->resp_len = 6;
+			break;
+		case 42:
+			response_type = SDRT_1;
+			vub300->resp_len = 6;
+			break;
+		case 51:
+			response_type = SDRT_1;
+			vub300->resp_len = 6;
+			break;
+		case 55:
+			response_type = SDRT_1;
+			vub300->resp_len = 6;
+			break;
+		default:
+			vub300->resp_len = 0;
+			cmd->error = -EINVAL;
+			complete(&vub300->command_complete);
+			return;
+		}
+		vub300->app_spec = 0;
+	} else {
+		switch (cmd->opcode) {
+		case 0:
+			response_type = SDRT_NONE;
+			vub300->resp_len = 0;
+			break;
+		case 1:
+			response_type = SDRT_3;
+			vub300->resp_len = 6;
+			break;
+		case 2:
+			response_type = SDRT_2;
+			vub300->resp_len = 17;
+			break;
+		case 3:
+			response_type = SDRT_6;
+			vub300->resp_len = 6;
+			break;
+		case 4:
+			response_type = SDRT_NONE;
+			vub300->resp_len = 0;
+			break;
+		case 5:
+			response_type = SDRT_4;
+			vub300->resp_len = 6;
+			break;
+		case 6:
+			response_type = SDRT_1;
+			vub300->resp_len = 6;
+			break;
+		case 7:
+			response_type = SDRT_1B;
+			vub300->resp_len = 6;
+			break;
+		case 8:
+			response_type = SDRT_7;
+			vub300->resp_len = 6;
+			break;
+		case 9:
+			response_type = SDRT_2;
+			vub300->resp_len = 17;
+			break;
+		case 10:
+			response_type = SDRT_2;
+			vub300->resp_len = 17;
+			break;
+		case 12:
+			response_type = SDRT_1B;
+			vub300->resp_len = 6;
+			break;
+		case 13:
+			response_type = SDRT_1;
+			vub300->resp_len = 6;
+			break;
+		case 15:
+			response_type = SDRT_NONE;
+			vub300->resp_len = 0;
+			break;
+		case 16:
+			for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++)
+				vub300->fbs[i] = 0xFFFF & cmd->arg;
+			response_type = SDRT_1;
+			vub300->resp_len = 6;
+			break;
+		case 17:
+		case 18:
+		case 24:
+		case 25:
+		case 27:
+			response_type = SDRT_1;
+			vub300->resp_len = 6;
+			break;
+		case 28:
+		case 29:
+			response_type = SDRT_1B;
+			vub300->resp_len = 6;
+			break;
+		case 30:
+		case 32:
+		case 33:
+			response_type = SDRT_1;
+			vub300->resp_len = 6;
+			break;
+		case 38:
+			response_type = SDRT_1B;
+			vub300->resp_len = 6;
+			break;
+		case 42:
+			response_type = SDRT_1;
+			vub300->resp_len = 6;
+			break;
+		case 52:
+			response_type = SDRT_5;
+			vub300->resp_len = 6;
+			snoop_block_size_and_bus_width(vub300, cmd->arg);
+			break;
+		case 53:
+			response_type = SDRT_5;
+			vub300->resp_len = 6;
+			break;
+		case 55:
+			response_type = SDRT_1;
+			vub300->resp_len = 6;
+			vub300->app_spec = 1;
+			break;
+		case 56:
+			response_type = SDRT_1;
+			vub300->resp_len = 6;
+			break;
+		default:
+			vub300->resp_len = 0;
+			cmd->error = -EINVAL;
+			complete(&vub300->command_complete);
+			return;
+		}
+	}
+	/*
+	 * it is a shame that we can not use "sizeof(struct sd_command_header)"
+	 * this is because the packet _must_ be padded to 64 bytes
+	 */
+	vub300->cmnd.head.header_size = 20;
+	vub300->cmnd.head.header_type = 0x00;
+	vub300->cmnd.head.port_number = 0; /* "0" means port 1 */
+	vub300->cmnd.head.command_type = 0x00; /* standard read command */
+	vub300->cmnd.head.response_type = response_type;
+	vub300->cmnd.head.command_index = cmd->opcode;
+	vub300->cmnd.head.arguments[0] = cmd->arg >> 24;
+	vub300->cmnd.head.arguments[1] = cmd->arg >> 16;
+	vub300->cmnd.head.arguments[2] = cmd->arg >> 8;
+	vub300->cmnd.head.arguments[3] = cmd->arg >> 0;
+	if (cmd->opcode == 52) {
+		int fn = 0x7 & (cmd->arg >> 28);
+		vub300->cmnd.head.block_count[0] = 0;
+		vub300->cmnd.head.block_count[1] = 0;
+		vub300->cmnd.head.block_size[0] = (vub300->fbs[fn] >> 8) & 0xFF;
+		vub300->cmnd.head.block_size[1] = (vub300->fbs[fn] >> 0) & 0xFF;
+		vub300->cmnd.head.command_type = 0x00;
+		vub300->cmnd.head.transfer_size[0] = 0;
+		vub300->cmnd.head.transfer_size[1] = 0;
+		vub300->cmnd.head.transfer_size[2] = 0;
+		vub300->cmnd.head.transfer_size[3] = 0;
+	} else if (!data) {
+		vub300->cmnd.head.block_count[0] = 0;
+		vub300->cmnd.head.block_count[1] = 0;
+		vub300->cmnd.head.block_size[0] = (vub300->fbs[0] >> 8) & 0xFF;
+		vub300->cmnd.head.block_size[1] = (vub300->fbs[0] >> 0) & 0xFF;
+		vub300->cmnd.head.command_type = 0x00;
+		vub300->cmnd.head.transfer_size[0] = 0;
+		vub300->cmnd.head.transfer_size[1] = 0;
+		vub300->cmnd.head.transfer_size[2] = 0;
+		vub300->cmnd.head.transfer_size[3] = 0;
+	} else if (cmd->opcode == 53) {
+		int fn = 0x7 & (cmd->arg >> 28);
+		if (0x08 & vub300->cmnd.head.arguments[0]) { /* BLOCK MODE */
+			vub300->cmnd.head.block_count[0] =
+				(data->blocks >> 8) & 0xFF;
+			vub300->cmnd.head.block_count[1] =
+				(data->blocks >> 0) & 0xFF;
+			vub300->cmnd.head.block_size[0] =
+				(data->blksz >> 8) & 0xFF;
+			vub300->cmnd.head.block_size[1] =
+				(data->blksz >> 0) & 0xFF;
+		} else {	/* BYTE MODE */
+			vub300->cmnd.head.block_count[0] = 0;
+			vub300->cmnd.head.block_count[1] = 0;
+			vub300->cmnd.head.block_size[0] =
+				(vub300->datasize >> 8) & 0xFF;
+			vub300->cmnd.head.block_size[1] =
+				(vub300->datasize >> 0) & 0xFF;
+		}
+		vub300->cmnd.head.command_type =
+			(MMC_DATA_READ & data->flags) ? 0x00 : 0x80;
+		vub300->cmnd.head.transfer_size[0] =
+			(vub300->datasize >> 24) & 0xFF;
+		vub300->cmnd.head.transfer_size[1] =
+			(vub300->datasize >> 16) & 0xFF;
+		vub300->cmnd.head.transfer_size[2] =
+			(vub300->datasize >> 8) & 0xFF;
+		vub300->cmnd.head.transfer_size[3] =
+			(vub300->datasize >> 0) & 0xFF;
+		if (vub300->datasize < vub300->fbs[fn]) {
+			vub300->cmnd.head.block_count[0] = 0;
+			vub300->cmnd.head.block_count[1] = 0;
+		}
+	} else {
+		vub300->cmnd.head.block_count[0] = (data->blocks >> 8) & 0xFF;
+		vub300->cmnd.head.block_count[1] = (data->blocks >> 0) & 0xFF;
+		vub300->cmnd.head.block_size[0] = (data->blksz >> 8) & 0xFF;
+		vub300->cmnd.head.block_size[1] = (data->blksz >> 0) & 0xFF;
+		vub300->cmnd.head.command_type =
+			(MMC_DATA_READ & data->flags) ? 0x00 : 0x80;
+		vub300->cmnd.head.transfer_size[0] =
+			(vub300->datasize >> 24) & 0xFF;
+		vub300->cmnd.head.transfer_size[1] =
+			(vub300->datasize >> 16) & 0xFF;
+		vub300->cmnd.head.transfer_size[2] =
+			(vub300->datasize >> 8) & 0xFF;
+		vub300->cmnd.head.transfer_size[3] =
+			(vub300->datasize >> 0) & 0xFF;
+		if (vub300->datasize < vub300->fbs[0]) {
+			vub300->cmnd.head.block_count[0] = 0;
+			vub300->cmnd.head.block_count[1] = 0;
+		}
+	}
+	if (vub300->cmnd.head.block_size[0] || vub300->cmnd.head.block_size[1]) {
+		u16 block_size = vub300->cmnd.head.block_size[1] |
+			(vub300->cmnd.head.block_size[0] << 8);
+		u16 block_boundary = FIRMWARE_BLOCK_BOUNDARY -
+			(FIRMWARE_BLOCK_BOUNDARY % block_size);
+		vub300->cmnd.head.block_boundary[0] =
+			(block_boundary >> 8) & 0xFF;
+		vub300->cmnd.head.block_boundary[1] =
+			(block_boundary >> 0) & 0xFF;
+	} else {
+		vub300->cmnd.head.block_boundary[0] = 0;
+		vub300->cmnd.head.block_boundary[1] = 0;
+	}
+	usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev,
+			  usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep),
+			  &vub300->cmnd, sizeof(vub300->cmnd),
+			  command_out_completed, vub300);
+	retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL);
+	if (retval < 0) {
+		cmd->error = retval;
+		complete(&vub300->command_complete);
+		return;
+	} else {
+		return;
+	}
+}
+
+/*
+ * timer callback runs in atomic mode
+ *       so it cannot call usb_kill_urb()
+ */
+static void vub300_sg_timed_out(unsigned long data)
+{
+	struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
+	vub300->usb_timed_out = 1;
+	usb_sg_cancel(&vub300->sg_request);
+	usb_unlink_urb(vub300->command_out_urb);
+	usb_unlink_urb(vub300->command_res_urb);
+}
+
+static u16 roundup_to_multiple_of_64(u16 number)
+{
+	return 0xFFC0 & (0x3F + number);
+}
+
+/*
+ * this is a separate function to solve the 80 column width restriction
+ */
+static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
+					  const struct firmware *fw)
+{
+	u8 register_count = 0;
+	u16 ts = 0;
+	u16 interrupt_size = 0;
+	const u8 *data = fw->data;
+	int size = fw->size;
+	u8 c;
+	dev_info(&vub300->udev->dev, "using %s for SDIO offload processing\n",
+		 vub300->vub_name);
+	do {
+		c = *data++;
+	} while (size-- && c); /* skip comment */
+	dev_info(&vub300->udev->dev, "using offload firmware %s %s\n", fw->data,
+		 vub300->vub_name);
+	if (size < 4) {
+		dev_err(&vub300->udev->dev,
+			"corrupt offload pseudocode in firmware %s\n",
+			vub300->vub_name);
+		strncpy(vub300->vub_name, "corrupt offload pseudocode",
+			sizeof(vub300->vub_name));
+		return;
+	}
+	interrupt_size += *data++;
+	size -= 1;
+	interrupt_size <<= 8;
+	interrupt_size += *data++;
+	size -= 1;
+	if (interrupt_size < size) {
+		u16 xfer_length = roundup_to_multiple_of_64(interrupt_size);
+		u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL);
+		if (xfer_buffer) {
+			int retval;
+			memcpy(xfer_buffer, data, interrupt_size);
+			memset(xfer_buffer + interrupt_size, 0,
+			       xfer_length - interrupt_size);
+			size -= interrupt_size;
+			data += interrupt_size;
+			retval =
+				usb_control_msg(vub300->udev,
+						usb_sndctrlpipe(vub300->udev, 0),
+						SET_INTERRUPT_PSEUDOCODE,
+						USB_DIR_OUT | USB_TYPE_VENDOR |
+						USB_RECIP_DEVICE, 0x0000, 0x0000,
+						xfer_buffer, xfer_length, HZ);
+			kfree(xfer_buffer);
+			if (retval < 0) {
+				strncpy(vub300->vub_name,
+					"SDIO pseudocode download failed",
+					sizeof(vub300->vub_name));
+				return;
+			}
+		} else {
+			dev_err(&vub300->udev->dev,
+				"not enough memory for xfer buffer to send"
+				" INTERRUPT_PSEUDOCODE for %s %s\n", fw->data,
+				vub300->vub_name);
+			strncpy(vub300->vub_name,
+				"SDIO interrupt pseudocode download failed",
+				sizeof(vub300->vub_name));
+			return;
+		}
+	} else {
+		dev_err(&vub300->udev->dev,
+			"corrupt interrupt pseudocode in firmware %s %s\n",
+			fw->data, vub300->vub_name);
+		strncpy(vub300->vub_name, "corrupt interrupt pseudocode",
+			sizeof(vub300->vub_name));
+		return;
+	}
+	ts += *data++;
+	size -= 1;
+	ts <<= 8;
+	ts += *data++;
+	size -= 1;
+	if (ts < size) {
+		u16 xfer_length = roundup_to_multiple_of_64(ts);
+		u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL);
+		if (xfer_buffer) {
+			int retval;
+			memcpy(xfer_buffer, data, ts);
+			memset(xfer_buffer + ts, 0,
+			       xfer_length - ts);
+			size -= ts;
+			data += ts;
+			retval =
+				usb_control_msg(vub300->udev,
+						usb_sndctrlpipe(vub300->udev, 0),
+						SET_TRANSFER_PSEUDOCODE,
+						USB_DIR_OUT | USB_TYPE_VENDOR |
+						USB_RECIP_DEVICE, 0x0000, 0x0000,
+						xfer_buffer, xfer_length, HZ);
+			kfree(xfer_buffer);
+			if (retval < 0) {
+				strncpy(vub300->vub_name,
+					"SDIO pseudocode download failed",
+					sizeof(vub300->vub_name));
+				return;
+			}
+		} else {
+			dev_err(&vub300->udev->dev,
+				"not enough memory for xfer buffer to send"
+				" TRANSFER_PSEUDOCODE for %s %s\n", fw->data,
+				vub300->vub_name);
+			strncpy(vub300->vub_name,
+				"SDIO transfer pseudocode download failed",
+				sizeof(vub300->vub_name));
+			return;
+		}
+	} else {
+		dev_err(&vub300->udev->dev,
+			"corrupt transfer pseudocode in firmware %s %s\n",
+			fw->data, vub300->vub_name);
+		strncpy(vub300->vub_name, "corrupt transfer pseudocode",
+			sizeof(vub300->vub_name));
+		return;
+	}
+	register_count += *data++;
+	size -= 1;
+	if (register_count * 4 == size) {
+		int I = vub300->dynamic_register_count = register_count;
+		int i = 0;
+		while (I--) {
+			unsigned int func_num = 0;
+			vub300->sdio_register[i].func_num = *data++;
+			size -= 1;
+			func_num += *data++;
+			size -= 1;
+			func_num <<= 8;
+			func_num += *data++;
+			size -= 1;
+			func_num <<= 8;
+			func_num += *data++;
+			size -= 1;
+			vub300->sdio_register[i].sdio_reg = func_num;
+			vub300->sdio_register[i].activate = 1;
+			vub300->sdio_register[i].prepared = 0;
+			i += 1;
+		}
+		dev_info(&vub300->udev->dev,
+			 "initialized %d dynamic pseudocode registers\n",
+			 vub300->dynamic_register_count);
+		return;
+	} else {
+		dev_err(&vub300->udev->dev,
+			"corrupt dynamic registers in firmware %s\n",
+			vub300->vub_name);
+		strncpy(vub300->vub_name, "corrupt dynamic registers",
+			sizeof(vub300->vub_name));
+		return;
+	}
+}
+
+/*
+ * if the binary containing the EMPTY PseudoCode can not be found
+ * vub300->vub_name is set anyway in order to prevent an automatic retry
+ */
+static void download_offload_pseudocode(struct vub300_mmc_host *vub300)
+{
+	struct mmc_card *card = vub300->mmc->card;
+	int sdio_funcs = card->sdio_funcs;
+	const struct firmware *fw = NULL;
+	int l = snprintf(vub300->vub_name, sizeof(vub300->vub_name),
+			 "vub_%04X%04X", card->cis.vendor, card->cis.device);
+	int n = 0;
+	int retval;
+	for (n = 0; n < sdio_funcs; n++) {
+		struct sdio_func *sf = card->sdio_func[n];
+		l += snprintf(vub300->vub_name + l,
+			      sizeof(vub300->vub_name) - l, "_%04X%04X",
+			      sf->vendor, sf->device);
+	};
+	snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin");
+	dev_info(&vub300->udev->dev, "requesting offload firmware %s\n",
+		 vub300->vub_name);
+	retval = request_firmware(&fw, vub300->vub_name, &card->dev);
+	if (retval < 0) {
+		strncpy(vub300->vub_name, "vub_default.bin",
+			sizeof(vub300->vub_name));
+		retval = request_firmware(&fw, vub300->vub_name, &card->dev);
+		if (retval < 0) {
+			strncpy(vub300->vub_name,
+				"no SDIO offload firmware found",
+				sizeof(vub300->vub_name));
+		} else {
+			__download_offload_pseudocode(vub300, fw);
+			release_firmware(fw);
+		}
+	} else {
+		__download_offload_pseudocode(vub300, fw);
+		release_firmware(fw);
+	}
+}
+
+static void vub300_usb_bulk_msg_completion(struct urb *urb)
+{				/* urb completion handler - hardirq */
+	complete((struct completion *)urb->context);
+}
+
+static int vub300_usb_bulk_msg(struct vub300_mmc_host *vub300,
+			       unsigned int pipe, void *data, int len,
+			       int *actual_length, int timeout_msecs)
+{
+	/* cmd_mutex is held by vub300_cmndwork_thread */
+	struct usb_device *usb_dev = vub300->udev;
+	struct completion done;
+	int retval;
+	vub300->urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!vub300->urb)
+		return -ENOMEM;
+	usb_fill_bulk_urb(vub300->urb, usb_dev, pipe, data, len,
+			  vub300_usb_bulk_msg_completion, NULL);
+	init_completion(&done);
+	vub300->urb->context = &done;
+	vub300->urb->actual_length = 0;
+	retval = usb_submit_urb(vub300->urb, GFP_KERNEL);
+	if (unlikely(retval))
+		goto out;
+	if (!wait_for_completion_timeout
+	    (&done, msecs_to_jiffies(timeout_msecs))) {
+		retval = -ETIMEDOUT;
+		usb_kill_urb(vub300->urb);
+	} else {
+		retval = vub300->urb->status;
+	}
+out:
+	*actual_length = vub300->urb->actual_length;
+	usb_free_urb(vub300->urb);
+	vub300->urb = NULL;
+	return retval;
+}
+
+static int __command_read_data(struct vub300_mmc_host *vub300,
+			       struct mmc_command *cmd, struct mmc_data *data)
+{
+	/* cmd_mutex is held by vub300_cmndwork_thread */
+	int linear_length = vub300->datasize;
+	int padded_length = vub300->large_usb_packets ?
+		((511 + linear_length) >> 9) << 9 :
+		((63 + linear_length) >> 6) << 6;
+	if ((padded_length == linear_length) || !pad_input_to_usb_pkt) {
+		int result;
+		unsigned pipe;
+		pipe = usb_rcvbulkpipe(vub300->udev, vub300->data_inp_ep);
+		result = usb_sg_init(&vub300->sg_request, vub300->udev,
+				     pipe, 0, data->sg,
+				     data->sg_len, 0, GFP_KERNEL);
+		if (result < 0) {
+			usb_unlink_urb(vub300->command_out_urb);
+			usb_unlink_urb(vub300->command_res_urb);
+			cmd->error = result;
+			data->bytes_xfered = 0;
+			return 0;
+		} else {
+			vub300->sg_transfer_timer.expires =
+				jiffies + msecs_to_jiffies(2000 +
+						  (linear_length / 16384));
+			add_timer(&vub300->sg_transfer_timer);
+			usb_sg_wait(&vub300->sg_request);
+			del_timer(&vub300->sg_transfer_timer);
+			if (vub300->sg_request.status < 0) {
+				cmd->error = vub300->sg_request.status;
+				data->bytes_xfered = 0;
+				return 0;
+			} else {
+				data->bytes_xfered = vub300->datasize;
+				return linear_length;
+			}
+		}
+	} else {
+		u8 *buf = kmalloc(padded_length, GFP_KERNEL);
+		if (buf) {
+			int result;
+			unsigned pipe = usb_rcvbulkpipe(vub300->udev,
+							vub300->data_inp_ep);
+			int actual_length = 0;
+			result = vub300_usb_bulk_msg(vub300, pipe, buf,
+					     padded_length, &actual_length,
+					     2000 + (padded_length / 16384));
+			if (result < 0) {
+				cmd->error = result;
+				data->bytes_xfered = 0;
+				kfree(buf);
+				return 0;
+			} else if (actual_length < linear_length) {
+				cmd->error = -EREMOTEIO;
+				data->bytes_xfered = 0;
+				kfree(buf);
+				return 0;
+			} else {
+				sg_copy_from_buffer(data->sg, data->sg_len, buf,
+						    linear_length);
+				kfree(buf);
+				data->bytes_xfered = vub300->datasize;
+				return linear_length;
+			}
+		} else {
+			cmd->error = -ENOMEM;
+			data->bytes_xfered = 0;
+			return 0;
+		}
+	}
+}
+
+static int __command_write_data(struct vub300_mmc_host *vub300,
+				struct mmc_command *cmd, struct mmc_data *data)
+{
+	/* cmd_mutex is held by vub300_cmndwork_thread */
+	unsigned pipe = usb_sndbulkpipe(vub300->udev, vub300->data_out_ep);
+	int linear_length = vub300->datasize;
+	int modulo_64_length = linear_length & 0x003F;
+	int modulo_512_length = linear_length & 0x01FF;
+	if (linear_length < 64) {
+		int result;
+		int actual_length;
+		sg_copy_to_buffer(data->sg, data->sg_len,
+				  vub300->padded_buffer,
+				  sizeof(vub300->padded_buffer));
+		memset(vub300->padded_buffer + linear_length, 0,
+		       sizeof(vub300->padded_buffer) - linear_length);
+		result = vub300_usb_bulk_msg(vub300, pipe, vub300->padded_buffer,
+					     sizeof(vub300->padded_buffer),
+					     &actual_length, 2000 +
+					     (sizeof(vub300->padded_buffer) /
+					      16384));
+		if (result < 0) {
+			cmd->error = result;
+			data->bytes_xfered = 0;
+		} else {
+			data->bytes_xfered = vub300->datasize;
+		}
+	} else if ((!vub300->large_usb_packets && (0 < modulo_64_length)) ||
+		    (vub300->large_usb_packets && (64 > modulo_512_length))
+		) {		/* don't you just love these work-rounds */
+		int padded_length = ((63 + linear_length) >> 6) << 6;
+		u8 *buf = kmalloc(padded_length, GFP_KERNEL);
+		if (buf) {
+			int result;
+			int actual_length;
+			sg_copy_to_buffer(data->sg, data->sg_len, buf,
+					  padded_length);
+			memset(buf + linear_length, 0,
+			       padded_length - linear_length);
+			result =
+				vub300_usb_bulk_msg(vub300, pipe, buf,
+						    padded_length, &actual_length,
+						    2000 + padded_length / 16384);
+			kfree(buf);
+			if (result < 0) {
+				cmd->error = result;
+				data->bytes_xfered = 0;
+			} else {
+				data->bytes_xfered = vub300->datasize;
+			}
+		} else {
+			cmd->error = -ENOMEM;
+			data->bytes_xfered = 0;
+		}
+	} else {		/* no data padding required */
+		int result;
+		unsigned char buf[64 * 4];
+		sg_copy_to_buffer(data->sg, data->sg_len, buf, sizeof(buf));
+		result = usb_sg_init(&vub300->sg_request, vub300->udev,
+				     pipe, 0, data->sg,
+				     data->sg_len, 0, GFP_KERNEL);
+		if (result < 0) {
+			usb_unlink_urb(vub300->command_out_urb);
+			usb_unlink_urb(vub300->command_res_urb);
+			cmd->error = result;
+			data->bytes_xfered = 0;
+		} else {
+			vub300->sg_transfer_timer.expires =
+				jiffies + msecs_to_jiffies(2000 +
+							   linear_length / 16384);
+			add_timer(&vub300->sg_transfer_timer);
+			usb_sg_wait(&vub300->sg_request);
+			if (cmd->error) {
+				data->bytes_xfered = 0;
+			} else {
+				del_timer(&vub300->sg_transfer_timer);
+				if (vub300->sg_request.status < 0) {
+					cmd->error = vub300->sg_request.status;
+					data->bytes_xfered = 0;
+				} else {
+					data->bytes_xfered = vub300->datasize;
+				}
+			}
+		}
+	}
+	return linear_length;
+}
+
+static void __vub300_command_response(struct vub300_mmc_host *vub300,
+				      struct mmc_command *cmd,
+				      struct mmc_data *data, int data_length)
+{
+	/* cmd_mutex is held by vub300_cmndwork_thread */
+	long respretval;
+	int msec_timeout = 1000 + data_length / 4;
+	respretval =
+		wait_for_completion_timeout(&vub300->command_complete,
+					    msecs_to_jiffies(msec_timeout));
+	if (respretval == 0) { /* TIMED OUT */
+		/* we don't know which of "out" and "res" if any failed */
+		int result;
+		vub300->usb_timed_out = 1;
+		usb_kill_urb(vub300->command_out_urb);
+		usb_kill_urb(vub300->command_res_urb);
+		cmd->error = -ETIMEDOUT;
+		result = usb_lock_device_for_reset(vub300->udev,
+						   vub300->interface);
+		if (result == 0) {
+			result = usb_reset_device(vub300->udev);
+			usb_unlock_device(vub300->udev);
+		}
+	} else if (respretval < 0) {
+		/* we don't know which of "out" and "res" if any failed */
+		usb_kill_urb(vub300->command_out_urb);
+		usb_kill_urb(vub300->command_res_urb);
+		cmd->error = respretval;
+	} else if (cmd->error) {
+		/*
+		 * the error occured sending the command
+		 * or recieving the response
+		 */
+	} else if (vub300->command_out_urb->status) {
+		vub300->usb_transport_fail = vub300->command_out_urb->status;
+		cmd->error = -EPROTO == vub300->command_out_urb->status ?
+			-ESHUTDOWN : vub300->command_out_urb->status;
+	} else if (vub300->command_res_urb->status) {
+		vub300->usb_transport_fail = vub300->command_res_urb->status;
+		cmd->error = -EPROTO == vub300->command_res_urb->status ?
+			-ESHUTDOWN : vub300->command_res_urb->status;
+	} else if (vub300->resp.common.header_type == 0x00) {
+		/*
+		 * the command completed successfully
+		 * and there was no piggybacked data
+		 */
+	} else if (vub300->resp.common.header_type == RESPONSE_ERROR) {
+		cmd->error =
+			vub300_response_error(vub300->resp.error.error_code);
+		if (vub300->data)
+			usb_sg_cancel(&vub300->sg_request);
+	} else if (vub300->resp.common.header_type == RESPONSE_PIGGYBACKED) {
+		int offloaded_data_length =
+			vub300->resp.common.header_size -
+			sizeof(struct sd_register_header);
+		int register_count = offloaded_data_length >> 3;
+		int ri = 0;
+		while (register_count--) {
+			add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
+			ri += 1;
+		}
+		vub300->resp.common.header_size =
+			sizeof(struct sd_register_header);
+		vub300->resp.common.header_type = 0x00;
+		cmd->error = 0;
+	} else if (vub300->resp.common.header_type == RESPONSE_PIG_DISABLED) {
+		int offloaded_data_length =
+			vub300->resp.common.header_size -
+			sizeof(struct sd_register_header);
+		int register_count = offloaded_data_length >> 3;
+		int ri = 0;
+		while (register_count--) {
+			add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
+			ri += 1;
+		}
+		mutex_lock(&vub300->irq_mutex);
+		if (vub300->irqs_queued) {
+			vub300->irqs_queued += 1;
+		} else if (vub300->irq_enabled) {
+			vub300->irqs_queued += 1;
+			vub300_queue_poll_work(vub300, 0);
+		} else {
+			vub300->irqs_queued += 1;
+		}
+		vub300->irq_disabled = 1;
+		mutex_unlock(&vub300->irq_mutex);
+		vub300->resp.common.header_size =
+			sizeof(struct sd_register_header);
+		vub300->resp.common.header_type = 0x00;
+		cmd->error = 0;
+	} else if (vub300->resp.common.header_type == RESPONSE_PIG_ENABLED) {
+		int offloaded_data_length =
+			vub300->resp.common.header_size -
+			sizeof(struct sd_register_header);
+		int register_count = offloaded_data_length >> 3;
+		int ri = 0;
+		while (register_count--) {
+			add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
+			ri += 1;
+		}
+		mutex_lock(&vub300->irq_mutex);
+		if (vub300->irqs_queued) {
+			vub300->irqs_queued += 1;
+		} else if (vub300->irq_enabled) {
+			vub300->irqs_queued += 1;
+			vub300_queue_poll_work(vub300, 0);
+		} else {
+			vub300->irqs_queued += 1;
+		}
+		vub300->irq_disabled = 0;
+		mutex_unlock(&vub300->irq_mutex);
+		vub300->resp.common.header_size =
+			sizeof(struct sd_register_header);
+		vub300->resp.common.header_type = 0x00;
+		cmd->error = 0;
+	} else {
+		cmd->error = -EINVAL;
+	}
+}
+
+static void construct_request_response(struct vub300_mmc_host *vub300,
+				       struct mmc_command *cmd)
+{
+	int resp_len = vub300->resp_len;
+	int less_cmd = (17 == resp_len) ? resp_len : resp_len - 1;
+	int bytes = 3 & less_cmd;
+	int words = less_cmd >> 2;
+	u8 *r = vub300->resp.response.command_response;
+	if (bytes == 3) {
+		cmd->resp[words] = (r[1 + (words << 2)] << 24)
+			| (r[2 + (words << 2)] << 16)
+			| (r[3 + (words << 2)] << 8);
+	} else if (bytes == 2) {
+		cmd->resp[words] = (r[1 + (words << 2)] << 24)
+			| (r[2 + (words << 2)] << 16);
+	} else if (bytes == 1) {
+		cmd->resp[words] = (r[1 + (words << 2)] << 24);
+	}
+	while (words-- > 0) {
+		cmd->resp[words] = (r[1 + (words << 2)] << 24)
+			| (r[2 + (words << 2)] << 16)
+			| (r[3 + (words << 2)] << 8)
+			| (r[4 + (words << 2)] << 0);
+	}
+	if ((cmd->opcode == 53) && (0x000000FF & cmd->resp[0]))
+		cmd->resp[0] &= 0xFFFFFF00;
+}
+
+/* this thread runs only when there is an upper level command req outstanding */
+static void vub300_cmndwork_thread(struct work_struct *work)
+{
+	struct vub300_mmc_host *vub300 =
+		container_of(work, struct vub300_mmc_host, cmndwork);
+	if (!vub300->interface) {
+		kref_put(&vub300->kref, vub300_delete);
+		return;
+	} else {
+		struct mmc_request *req = vub300->req;
+		struct mmc_command *cmd = vub300->cmd;
+		struct mmc_data *data = vub300->data;
+		int data_length;
+		mutex_lock(&vub300->cmd_mutex);
+		init_completion(&vub300->command_complete);
+		if (likely(vub300->vub_name[0]) || !vub300->mmc->card ||
+		    !mmc_card_present(vub300->mmc->card)) {
+			/*
+			 * the name of the EMPTY Pseudo firmware file
+			 * is used as a flag to indicate that the file
+			 * has been already downloaded to the VUB300 chip
+			 */
+		} else if (0 == vub300->mmc->card->sdio_funcs) {
+			strncpy(vub300->vub_name, "SD memory device",
+				sizeof(vub300->vub_name));
+		} else {
+			download_offload_pseudocode(vub300);
+		}
+		send_command(vub300);
+		if (!data)
+			data_length = 0;
+		else if (MMC_DATA_READ & data->flags)
+			data_length = __command_read_data(vub300, cmd, data);
+		else
+			data_length = __command_write_data(vub300, cmd, data);
+		__vub300_command_response(vub300, cmd, data, data_length);
+		vub300->req = NULL;
+		vub300->cmd = NULL;
+		vub300->data = NULL;
+		if (cmd->error) {
+			if (cmd->error == -ENOMEDIUM)
+				check_vub300_port_status(vub300);
+			mutex_unlock(&vub300->cmd_mutex);
+			mmc_request_done(vub300->mmc, req);
+			kref_put(&vub300->kref, vub300_delete);
+			return;
+		} else {
+			construct_request_response(vub300, cmd);
+			vub300->resp_len = 0;
+			mutex_unlock(&vub300->cmd_mutex);
+			kref_put(&vub300->kref, vub300_delete);
+			mmc_request_done(vub300->mmc, req);
+			return;
+		}
+	}
+}
+
+static int examine_cyclic_buffer(struct vub300_mmc_host *vub300,
+				 struct mmc_command *cmd, u8 Function)
+{
+	/* cmd_mutex is held by vub300_mmc_request */
+	u8 cmd0 = 0xFF & (cmd->arg >> 24);
+	u8 cmd1 = 0xFF & (cmd->arg >> 16);
+	u8 cmd2 = 0xFF & (cmd->arg >> 8);
+	u8 cmd3 = 0xFF & (cmd->arg >> 0);
+	int first = MAXREGMASK & vub300->fn[Function].offload_point;
+	struct offload_registers_access *rf = &vub300->fn[Function].reg[first];
+	if (cmd0 == rf->command_byte[0] &&
+	    cmd1 == rf->command_byte[1] &&
+	    cmd2 == rf->command_byte[2] &&
+	    cmd3 == rf->command_byte[3]) {
+		u8 checksum = 0x00;
+		cmd->resp[1] = checksum << 24;
+		cmd->resp[0] = (rf->Respond_Byte[0] << 24)
+			| (rf->Respond_Byte[1] << 16)
+			| (rf->Respond_Byte[2] << 8)
+			| (rf->Respond_Byte[3] << 0);
+		vub300->fn[Function].offload_point += 1;
+		vub300->fn[Function].offload_count -= 1;
+		vub300->total_offload_count -= 1;
+		return 1;
+	} else {
+		int delta = 1;	/* because it does not match the first one */
+		u8 register_count = vub300->fn[Function].offload_count - 1;
+		u32 register_point = vub300->fn[Function].offload_point + 1;
+		while (0 < register_count) {
+			int point = MAXREGMASK & register_point;
+			struct offload_registers_access *r =
+				&vub300->fn[Function].reg[point];
+			if (cmd0 == r->command_byte[0] &&
+			    cmd1 == r->command_byte[1] &&
+			    cmd2 == r->command_byte[2] &&
+			    cmd3 == r->command_byte[3]) {
+				u8 checksum = 0x00;
+				cmd->resp[1] = checksum << 24;
+				cmd->resp[0] = (r->Respond_Byte[0] << 24)
+					| (r->Respond_Byte[1] << 16)
+					| (r->Respond_Byte[2] << 8)
+					| (r->Respond_Byte[3] << 0);
+				vub300->fn[Function].offload_point += delta;
+				vub300->fn[Function].offload_count -= delta;
+				vub300->total_offload_count -= delta;
+				return 1;
+			} else {
+				register_point += 1;
+				register_count -= 1;
+				delta += 1;
+				continue;
+			}
+		}
+		return 0;
+	}
+}
+
+static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300,
+					       struct mmc_command *cmd)
+{
+	/* cmd_mutex is held by vub300_mmc_request */
+	u8 regs = vub300->dynamic_register_count;
+	u8 i = 0;
+	u8 func = FUN(cmd);
+	u32 reg = REG(cmd);
+	while (0 < regs--) {
+		if ((vub300->sdio_register[i].func_num == func) &&
+		    (vub300->sdio_register[i].sdio_reg == reg)) {
+			if (!vub300->sdio_register[i].prepared) {
+				return 0;
+			} else if ((0x80000000 & cmd->arg) == 0x80000000) {
+				/*
+				 * a write to a dynamic register
+				 * nullifies our offloaded value
+				 */
+				vub300->sdio_register[i].prepared = 0;
+				return 0;
+			} else {
+				u8 checksum = 0x00;
+				u8 rsp0 = 0x00;
+				u8 rsp1 = 0x00;
+				u8 rsp2 = vub300->sdio_register[i].response;
+				u8 rsp3 = vub300->sdio_register[i].regvalue;
+				vub300->sdio_register[i].prepared = 0;
+				cmd->resp[1] = checksum << 24;
+				cmd->resp[0] = (rsp0 << 24)
+					| (rsp1 << 16)
+					| (rsp2 << 8)
+					| (rsp3 << 0);
+				return 1;
+			}
+		} else {
+			i += 1;
+			continue;
+		}
+	};
+	if (vub300->total_offload_count == 0)
+		return 0;
+	else if (vub300->fn[func].offload_count == 0)
+		return 0;
+	else
+		return examine_cyclic_buffer(vub300, cmd, func);
+}
+
+static void vub300_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
+{				/* NOT irq */
+	struct mmc_command *cmd = req->cmd;
+	struct vub300_mmc_host *vub300 = mmc_priv(mmc);
+	if (!vub300->interface) {
+		cmd->error = -ESHUTDOWN;
+		mmc_request_done(mmc, req);
+		return;
+	} else {
+		struct mmc_data *data = req->data;
+		if (!vub300->card_powered) {
+			cmd->error = -ENOMEDIUM;
+			mmc_request_done(mmc, req);
+			return;
+		}
+		if (!vub300->card_present) {
+			cmd->error = -ENOMEDIUM;
+			mmc_request_done(mmc, req);
+			return;
+		}
+		if (vub300->usb_transport_fail) {
+			cmd->error = vub300->usb_transport_fail;
+			mmc_request_done(mmc, req);
+			return;
+		}
+		if (!vub300->interface) {
+			cmd->error = -ENODEV;
+			mmc_request_done(mmc, req);
+			return;
+		}
+		kref_get(&vub300->kref);
+		mutex_lock(&vub300->cmd_mutex);
+		mod_timer(&vub300->inactivity_timer, jiffies + HZ);
+		/*
+		 * for performance we have to return immediately
+		 * if the requested data has been offloaded
+		 */
+		if (cmd->opcode == 52 &&
+		    satisfy_request_from_offloaded_data(vub300, cmd)) {
+			cmd->error = 0;
+			mutex_unlock(&vub300->cmd_mutex);
+			kref_put(&vub300->kref, vub300_delete);
+			mmc_request_done(mmc, req);
+			return;
+		} else {
+			vub300->cmd = cmd;
+			vub300->req = req;
+			vub300->data = data;
+			if (data)
+				vub300->datasize = data->blksz * data->blocks;
+			else
+				vub300->datasize = 0;
+			vub300_queue_cmnd_work(vub300);
+			mutex_unlock(&vub300->cmd_mutex);
+			kref_put(&vub300->kref, vub300_delete);
+			/*
+			 * the kernel lock diagnostics complain
+			 * if the cmd_mutex * is "passed on"
+			 * to the cmndwork thread,
+			 * so we must release it now
+			 * and re-acquire it in the cmndwork thread
+			 */
+		}
+	}
+}
+
+static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8],
+			      struct mmc_ios *ios)
+{
+	int buf_array_size = 8; /* ARRAY_SIZE(buf) does not work !!! */
+	int retval;
+	u32 kHzClock;
+	if (ios->clock >= 48000000)
+		kHzClock = 48000;
+	else if (ios->clock >= 24000000)
+		kHzClock = 24000;
+	else if (ios->clock >= 20000000)
+		kHzClock = 20000;
+	else if (ios->clock >= 15000000)
+		kHzClock = 15000;
+	else if (ios->clock >= 200000)
+		kHzClock = 200;
+	else
+		kHzClock = 0;
+	{
+		int i;
+		u64 c = kHzClock;
+		for (i = 0; i < buf_array_size; i++) {
+			buf[i] = c;
+			c >>= 8;
+		}
+	}
+	retval =
+		usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
+				SET_CLOCK_SPEED,
+				USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+				0x00, 0x00, buf, buf_array_size, HZ);
+	if (retval != 8) {
+		dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED"
+			" %dkHz failed with retval=%d\n", kHzClock, retval);
+	} else {
+		dev_dbg(&vub300->udev->dev, "SET_CLOCK_SPEED"
+			" %dkHz\n", kHzClock);
+	}
+}
+
+static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{				/* NOT irq */
+	struct vub300_mmc_host *vub300 = mmc_priv(mmc);
+	if (!vub300->interface)
+		return;
+	kref_get(&vub300->kref);
+	mutex_lock(&vub300->cmd_mutex);
+	if ((ios->power_mode == MMC_POWER_OFF) && vub300->card_powered) {
+		vub300->card_powered = 0;
+		usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
+				SET_SD_POWER,
+				USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+				0x0000, 0x0000, NULL, 0, HZ);
+		/* must wait for the VUB300 u-proc to boot up */
+		msleep(600);
+	} else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) {
+		usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
+				SET_SD_POWER,
+				USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+				0x0001, 0x0000, NULL, 0, HZ);
+		msleep(600);
+		vub300->card_powered = 1;
+	} else if (ios->power_mode == MMC_POWER_ON) {
+		u8 *buf = kmalloc(8, GFP_KERNEL);
+		if (buf) {
+			__set_clock_speed(vub300, buf, ios);
+			kfree(buf);
+		}
+	} else {
+		/* this should mean no change of state */
+	}
+	mutex_unlock(&vub300->cmd_mutex);
+	kref_put(&vub300->kref, vub300_delete);
+}
+
+static int vub300_mmc_get_ro(struct mmc_host *mmc)
+{
+	struct vub300_mmc_host *vub300 = mmc_priv(mmc);
+	return vub300->read_only;
+}
+
+static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{				/* NOT irq */
+	struct vub300_mmc_host *vub300 = mmc_priv(mmc);
+	if (!vub300->interface)
+		return;
+	kref_get(&vub300->kref);
+	if (enable) {
+		mutex_lock(&vub300->irq_mutex);
+		if (vub300->irqs_queued) {
+			vub300->irqs_queued -= 1;
+			mmc_signal_sdio_irq(vub300->mmc);
+		} else if (vub300->irq_disabled) {
+			vub300->irq_disabled = 0;
+			vub300->irq_enabled = 1;
+			vub300_queue_poll_work(vub300, 0);
+		} else if (vub300->irq_enabled) {
+			/* this should not happen, so we will just ignore it */
+		} else {
+			vub300->irq_enabled = 1;
+			vub300_queue_poll_work(vub300, 0);
+		}
+		mutex_unlock(&vub300->irq_mutex);
+	} else {
+		vub300->irq_enabled = 0;
+	}
+	kref_put(&vub300->kref, vub300_delete);
+}
+
+void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
+{				/* NOT irq */
+	struct vub300_mmc_host *vub300 = mmc_priv(mmc);
+	dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n");
+}
+
+static struct mmc_host_ops vub300_mmc_ops = {
+	.request = vub300_mmc_request,
+	.set_ios = vub300_mmc_set_ios,
+	.get_ro = vub300_mmc_get_ro,
+	.enable_sdio_irq = vub300_enable_sdio_irq,
+	.init_card = vub300_init_card,
+};
+
+static int vub300_probe(struct usb_interface *interface,
+			const struct usb_device_id *id)
+{				/* NOT irq */
+	struct vub300_mmc_host *vub300 = NULL;
+	struct usb_host_interface *iface_desc;
+	struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface));
+	int i;
+	int retval = -ENOMEM;
+	struct urb *command_out_urb;
+	struct urb *command_res_urb;
+	struct mmc_host *mmc;
+	char manufacturer[48];
+	char product[32];
+	char serial_number[32];
+	usb_string(udev, udev->descriptor.iManufacturer, manufacturer,
+		   sizeof(manufacturer));
+	usb_string(udev, udev->descriptor.iProduct, product, sizeof(product));
+	usb_string(udev, udev->descriptor.iSerialNumber, serial_number,
+		   sizeof(serial_number));
+	dev_info(&udev->dev, "probing VID:PID(%04X:%04X) %s %s %s\n",
+		 udev->descriptor.idVendor, udev->descriptor.idProduct,
+		 manufacturer, product, serial_number);
+	command_out_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!command_out_urb) {
+		retval = -ENOMEM;
+		dev_err(&vub300->udev->dev,
+			"not enough memory for the command_out_urb\n");
+		goto error0;
+	}
+	command_res_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!command_res_urb) {
+		retval = -ENOMEM;
+		dev_err(&vub300->udev->dev,
+			"not enough memory for the command_res_urb\n");
+		goto error1;
+	}
+	/* this also allocates memory for our VUB300 mmc host device */
+	mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev);
+	if (!mmc) {
+		retval = -ENOMEM;
+		dev_err(&vub300->udev->dev,
+			"not enough memory for the mmc_host\n");
+		goto error4;
+	}
+	/* MMC core transfer sizes tunable parameters */
+	mmc->caps = 0;
+	if (!force_1_bit_data_xfers)
+		mmc->caps |= MMC_CAP_4_BIT_DATA;
+	if (!force_polling_for_irqs)
+		mmc->caps |= MMC_CAP_SDIO_IRQ;
+	mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+	/*
+	 * MMC_CAP_NEEDS_POLL causes core.c:mmc_rescan() to poll
+	 * for devices which results in spurious CMD7's being
+	 * issued which stops some SDIO cards from working
+	 */
+	if (limit_speed_to_24_MHz) {
+		mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
+		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+		mmc->f_max = 24000000;
+		dev_info(&udev->dev, "limiting SDIO speed to 24_MHz\n");
+	} else {
+		mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
+		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+		mmc->f_max = 48000000;
+	}
+	mmc->f_min = 200000;
+	mmc->max_blk_count = 511;
+	mmc->max_blk_size = 512;
+	mmc->max_segs = 128;
+	if (force_max_req_size)
+		mmc->max_req_size = force_max_req_size * 1024;
+	else
+		mmc->max_req_size = 64 * 1024;
+	mmc->max_seg_size = mmc->max_req_size;
+	mmc->ocr_avail = 0;
+	mmc->ocr_avail |= MMC_VDD_165_195;
+	mmc->ocr_avail |= MMC_VDD_20_21;
+	mmc->ocr_avail |= MMC_VDD_21_22;
+	mmc->ocr_avail |= MMC_VDD_22_23;
+	mmc->ocr_avail |= MMC_VDD_23_24;
+	mmc->ocr_avail |= MMC_VDD_24_25;
+	mmc->ocr_avail |= MMC_VDD_25_26;
+	mmc->ocr_avail |= MMC_VDD_26_27;
+	mmc->ocr_avail |= MMC_VDD_27_28;
+	mmc->ocr_avail |= MMC_VDD_28_29;
+	mmc->ocr_avail |= MMC_VDD_29_30;
+	mmc->ocr_avail |= MMC_VDD_30_31;
+	mmc->ocr_avail |= MMC_VDD_31_32;
+	mmc->ocr_avail |= MMC_VDD_32_33;
+	mmc->ocr_avail |= MMC_VDD_33_34;
+	mmc->ocr_avail |= MMC_VDD_34_35;
+	mmc->ocr_avail |= MMC_VDD_35_36;
+	mmc->ops = &vub300_mmc_ops;
+	vub300 = mmc_priv(mmc);
+	vub300->mmc = mmc;
+	vub300->card_powered = 0;
+	vub300->bus_width = 0;
+	vub300->cmnd.head.block_size[0] = 0x00;
+	vub300->cmnd.head.block_size[1] = 0x00;
+	vub300->app_spec = 0;
+	mutex_init(&vub300->cmd_mutex);
+	mutex_init(&vub300->irq_mutex);
+	vub300->command_out_urb = command_out_urb;
+	vub300->command_res_urb = command_res_urb;
+	vub300->usb_timed_out = 0;
+	vub300->dynamic_register_count = 0;
+
+	for (i = 0; i < ARRAY_SIZE(vub300->fn); i++) {
+		vub300->fn[i].offload_point = 0;
+		vub300->fn[i].offload_count = 0;
+	}
+
+	vub300->total_offload_count = 0;
+	vub300->irq_enabled = 0;
+	vub300->irq_disabled = 0;
+	vub300->irqs_queued = 0;
+
+	for (i = 0; i < ARRAY_SIZE(vub300->sdio_register); i++)
+		vub300->sdio_register[i++].activate = 0;
+
+	vub300->udev = udev;
+	vub300->interface = interface;
+	vub300->cmnd_res_ep = 0;
+	vub300->cmnd_out_ep = 0;
+	vub300->data_inp_ep = 0;
+	vub300->data_out_ep = 0;
+
+	for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++)
+		vub300->fbs[i] = 512;
+
+	/*
+	 *      set up the endpoint information
+	 *
+	 * use the first pair of bulk-in and bulk-out
+	 *     endpoints for Command/Response+Interrupt
+	 *
+	 * use the second pair of bulk-in and bulk-out
+	 *     endpoints for Data In/Out
+	 */
+	vub300->large_usb_packets = 0;
+	iface_desc = interface->cur_altsetting;
+	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+		struct usb_endpoint_descriptor *endpoint =
+			&iface_desc->endpoint[i].desc;
+		dev_info(&vub300->udev->dev,
+			 "vub300 testing %s EndPoint(%d) %02X\n",
+			 usb_endpoint_is_bulk_in(endpoint) ? "BULK IN" :
+			 usb_endpoint_is_bulk_out(endpoint) ? "BULK OUT" :
+			 "UNKNOWN", i, endpoint->bEndpointAddress);
+		if (endpoint->wMaxPacketSize > 64)
+			vub300->large_usb_packets = 1;
+		if (usb_endpoint_is_bulk_in(endpoint)) {
+			if (!vub300->cmnd_res_ep) {
+				vub300->cmnd_res_ep =
+					endpoint->bEndpointAddress;
+			} else if (!vub300->data_inp_ep) {
+				vub300->data_inp_ep =
+					endpoint->bEndpointAddress;
+			} else {
+				dev_warn(&vub300->udev->dev,
+					 "ignoring"
+					 " unexpected bulk_in endpoint");
+			}
+		} else if (usb_endpoint_is_bulk_out(endpoint)) {
+			if (!vub300->cmnd_out_ep) {
+				vub300->cmnd_out_ep =
+					endpoint->bEndpointAddress;
+			} else if (!vub300->data_out_ep) {
+				vub300->data_out_ep =
+					endpoint->bEndpointAddress;
+			} else {
+				dev_warn(&vub300->udev->dev,
+					 "ignoring"
+					 " unexpected bulk_out endpoint");
+			}
+		} else {
+			dev_warn(&vub300->udev->dev,
+				 "vub300 ignoring EndPoint(%d) %02X", i,
+				 endpoint->bEndpointAddress);
+		}
+	}
+	if (vub300->cmnd_res_ep && vub300->cmnd_out_ep &&
+	    vub300->data_inp_ep && vub300->data_out_ep) {
+		dev_info(&vub300->udev->dev,
+			 "vub300 %s packets"
+			 " using EndPoints %02X %02X %02X %02X\n",
+			 vub300->large_usb_packets ? "LARGE" : "SMALL",
+			 vub300->cmnd_out_ep, vub300->cmnd_res_ep,
+			 vub300->data_out_ep, vub300->data_inp_ep);
+		/* we have the expected EndPoints */
+	} else {
+		dev_err(&vub300->udev->dev,
+		    "Could not find two sets of bulk-in/out endpoint pairs\n");
+		retval = -EINVAL;
+		goto error5;
+	}
+	retval =
+		usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
+				GET_HC_INF0,
+				USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+				0x0000, 0x0000, &vub300->hc_info,
+				sizeof(vub300->hc_info), HZ);
+	if (retval < 0)
+		goto error5;
+	retval =
+		usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
+				SET_ROM_WAIT_STATES,
+				USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+				firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
+	if (retval < 0)
+		goto error5;
+	dev_info(&vub300->udev->dev,
+		 "operating_mode = %s %s %d MHz %s %d byte USB packets\n",
+		 (mmc->caps & MMC_CAP_SDIO_IRQ) ? "IRQs" : "POLL",
+		 (mmc->caps & MMC_CAP_4_BIT_DATA) ? "4-bit" : "1-bit",
+		 mmc->f_max / 1000000,
+		 pad_input_to_usb_pkt ? "padding input data to" : "with",
+		 vub300->large_usb_packets ? 512 : 64);
+	retval =
+		usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
+				GET_SYSTEM_PORT_STATUS,
+				USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+				0x0000, 0x0000, &vub300->system_port_status,
+				sizeof(vub300->system_port_status), HZ);
+	if (retval < 0) {
+		goto error4;
+	} else if (sizeof(vub300->system_port_status) == retval) {
+		vub300->card_present =
+			(0x0001 & vub300->system_port_status.port_flags) ? 1 : 0;
+		vub300->read_only =
+			(0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
+	} else {
+		goto error4;
+	}
+	usb_set_intfdata(interface, vub300);
+	INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread);
+	INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread);
+	INIT_WORK(&vub300->deadwork, vub300_deadwork_thread);
+	kref_init(&vub300->kref);
+	init_timer(&vub300->sg_transfer_timer);
+	vub300->sg_transfer_timer.data = (unsigned long)vub300;
+	vub300->sg_transfer_timer.function = vub300_sg_timed_out;
+	kref_get(&vub300->kref);
+	init_timer(&vub300->inactivity_timer);
+	vub300->inactivity_timer.data = (unsigned long)vub300;
+	vub300->inactivity_timer.function = vub300_inactivity_timer_expired;
+	vub300->inactivity_timer.expires = jiffies + HZ;
+	add_timer(&vub300->inactivity_timer);
+	if (vub300->card_present)
+		dev_info(&vub300->udev->dev,
+			 "USB vub300 remote SDIO host controller[%d]"
+			 "connected with SD/SDIO card inserted\n",
+			 interface_to_InterfaceNumber(interface));
+	else
+		dev_info(&vub300->udev->dev,
+			 "USB vub300 remote SDIO host controller[%d]"
+			 "connected with no SD/SDIO card inserted\n",
+			 interface_to_InterfaceNumber(interface));
+	mmc_add_host(mmc);
+	return 0;
+error5:
+	mmc_free_host(mmc);
+	/*
+	 * and hence also frees vub300
+	 * which is contained at the end of struct mmc
+	 */
+error4:
+	usb_free_urb(command_out_urb);
+error1:
+	usb_free_urb(command_res_urb);
+error0:
+	return retval;
+}
+
+static void vub300_disconnect(struct usb_interface *interface)
+{				/* NOT irq */
+	struct vub300_mmc_host *vub300 = usb_get_intfdata(interface);
+	if (!vub300 || !vub300->mmc) {
+		return;
+	} else {
+		struct mmc_host *mmc = vub300->mmc;
+		if (!vub300->mmc) {
+			return;
+		} else {
+			int ifnum = interface_to_InterfaceNumber(interface);
+			usb_set_intfdata(interface, NULL);
+			/* prevent more I/O from starting */
+			vub300->interface = NULL;
+			kref_put(&vub300->kref, vub300_delete);
+			mmc_remove_host(mmc);
+			pr_info("USB vub300 remote SDIO host controller[%d]"
+				" now disconnected", ifnum);
+			return;
+		}
+	}
+}
+
+#ifdef CONFIG_PM
+static int vub300_suspend(struct usb_interface *intf, pm_message_t message)
+{
+	struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
+	if (!vub300 || !vub300->mmc) {
+		return 0;
+	} else {
+		struct mmc_host *mmc = vub300->mmc;
+		mmc_suspend_host(mmc);
+		return 0;
+	}
+}
+
+static int vub300_resume(struct usb_interface *intf)
+{
+	struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
+	if (!vub300 || !vub300->mmc) {
+		return 0;
+	} else {
+		struct mmc_host *mmc = vub300->mmc;
+		mmc_resume_host(mmc);
+		return 0;
+	}
+}
+#else
+#define vub300_suspend NULL
+#define vub300_resume NULL
+#endif
+static int vub300_pre_reset(struct usb_interface *intf)
+{				/* NOT irq */
+	struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
+	mutex_lock(&vub300->cmd_mutex);
+	return 0;
+}
+
+static int vub300_post_reset(struct usb_interface *intf)
+{				/* NOT irq */
+	struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
+	/* we are sure no URBs are active - no locking needed */
+	vub300->errors = -EPIPE;
+	mutex_unlock(&vub300->cmd_mutex);
+	return 0;
+}
+
+static struct usb_driver vub300_driver = {
+	.name = "vub300",
+	.probe = vub300_probe,
+	.disconnect = vub300_disconnect,
+	.suspend = vub300_suspend,
+	.resume = vub300_resume,
+	.pre_reset = vub300_pre_reset,
+	.post_reset = vub300_post_reset,
+	.id_table = vub300_table,
+	.supports_autosuspend = 1,
+};
+
+static int __init vub300_init(void)
+{				/* NOT irq */
+	int result;
+
+	pr_info("VUB300 Driver rom wait states = %02X irqpoll timeout = %04X",
+		firmware_rom_wait_states, 0x0FFFF & firmware_irqpoll_timeout);
+	cmndworkqueue = create_singlethread_workqueue("kvub300c");
+	if (!cmndworkqueue) {
+		pr_err("not enough memory for the REQUEST workqueue");
+		result = -ENOMEM;
+		goto out1;
+	}
+	pollworkqueue = create_singlethread_workqueue("kvub300p");
+	if (!pollworkqueue) {
+		pr_err("not enough memory for the IRQPOLL workqueue");
+		result = -ENOMEM;
+		goto out2;
+	}
+	deadworkqueue = create_singlethread_workqueue("kvub300d");
+	if (!deadworkqueue) {
+		pr_err("not enough memory for the EXPIRED workqueue");
+		result = -ENOMEM;
+		goto out3;
+	}
+	result = usb_register(&vub300_driver);
+	if (result) {
+		pr_err("usb_register failed. Error number %d", result);
+		goto out4;
+	}
+	return 0;
+out4:
+	destroy_workqueue(deadworkqueue);
+out3:
+	destroy_workqueue(pollworkqueue);
+out2:
+	destroy_workqueue(cmndworkqueue);
+out1:
+	return result;
+}
+
+static void __exit vub300_exit(void)
+{
+	usb_deregister(&vub300_driver);
+	flush_workqueue(cmndworkqueue);
+	flush_workqueue(pollworkqueue);
+	flush_workqueue(deadworkqueue);
+	destroy_workqueue(cmndworkqueue);
+	destroy_workqueue(pollworkqueue);
+	destroy_workqueue(deadworkqueue);
+}
+
+module_init(vub300_init);
+module_exit(vub300_exit);
+
+MODULE_AUTHOR("Tony Olech <tony.olech@elandigitalsystems.com>");
+MODULE_DESCRIPTION("VUB300 USB to SD/MMC/SDIO adapter driver");
+MODULE_LICENSE("GPL");

+ 1 - 0
include/linux/Kbuild

@@ -4,6 +4,7 @@ header-y += caif/
 header-y += dvb/
 header-y += hdlc/
 header-y += isdn/
+header-y += mmc/
 header-y += nfsd/
 header-y += raid/
 header-y += spi/

+ 17 - 0
include/linux/mfd/tmio.h

@@ -4,6 +4,7 @@
 #include <linux/fb.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 
 #define tmio_ioread8(addr) readb(addr)
 #define tmio_ioread16(addr) readw(addr)
@@ -61,6 +62,12 @@
  * Some controllers can support SDIO IRQ signalling.
  */
 #define TMIO_MMC_SDIO_IRQ		(1 << 2)
+/*
+ * Some platforms can detect card insertion events with controller powered
+ * down, in which case they have to call tmio_mmc_cd_wakeup() to power up the
+ * controller and report the event to the driver.
+ */
+#define TMIO_MMC_HAS_COLD_CD		(1 << 3)
 
 int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
 int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
@@ -82,11 +89,21 @@ struct tmio_mmc_data {
 	unsigned long			flags;
 	u32				ocr_mask;	/* available voltages */
 	struct tmio_mmc_dma		*dma;
+	struct device			*dev;
+	bool				power;
 	void (*set_pwr)(struct platform_device *host, int state);
 	void (*set_clk_div)(struct platform_device *host, int state);
 	int (*get_cd)(struct platform_device *host);
 };
 
+static inline void tmio_mmc_cd_wakeup(struct tmio_mmc_data *pdata)
+{
+	if (pdata && !pdata->power) {
+		pdata->power = true;
+		pm_runtime_get(pdata->dev);
+	}
+}
+
 /*
  * data for the NAND controller
  */

+ 1 - 0
include/linux/mmc/Kbuild

@@ -0,0 +1 @@
+header-y += ioctl.h

+ 188 - 1
include/linux/mmc/card.h

@@ -11,6 +11,7 @@
 #define LINUX_MMC_CARD_H
 
 #include <linux/mmc/core.h>
+#include <linux/mod_devicetable.h>
 
 struct mmc_cid {
 	unsigned int		manfid;
@@ -29,6 +30,7 @@ struct mmc_csd {
 	unsigned short		cmdclass;
 	unsigned short		tacc_clks;
 	unsigned int		tacc_ns;
+	unsigned int		c_size;
 	unsigned int		r2w_factor;
 	unsigned int		max_dtr;
 	unsigned int		erase_size;		/* In sectors */
@@ -45,6 +47,10 @@ struct mmc_ext_csd {
 	u8			rev;
 	u8			erase_group_def;
 	u8			sec_feature_support;
+	u8			rel_sectors;
+	u8			rel_param;
+	u8			part_config;
+	unsigned int		part_time;		/* Units: ms */
 	unsigned int		sa_timeout;		/* Units: 100ns */
 	unsigned int		hs_max_dtr;
 	unsigned int		sectors;
@@ -57,13 +63,18 @@ struct mmc_ext_csd {
 	bool			enhanced_area_en;	/* enable bit */
 	unsigned long long	enhanced_area_offset;	/* Units: Byte */
 	unsigned int		enhanced_area_size;	/* Units: KB */
+	unsigned int		boot_size;		/* in bytes */
 };
 
 struct sd_scr {
 	unsigned char		sda_vsn;
+	unsigned char		sda_spec3;
 	unsigned char		bus_widths;
 #define SD_SCR_BUS_WIDTH_1	(1<<0)
 #define SD_SCR_BUS_WIDTH_4	(1<<2)
+	unsigned char		cmds;
+#define SD_SCR_CMD20_SUPPORT   (1<<0)
+#define SD_SCR_CMD23_SUPPORT   (1<<1)
 };
 
 struct sd_ssr {
@@ -74,6 +85,39 @@ struct sd_ssr {
 
 struct sd_switch_caps {
 	unsigned int		hs_max_dtr;
+	unsigned int		uhs_max_dtr;
+#define UHS_SDR104_MAX_DTR	208000000
+#define UHS_SDR50_MAX_DTR	100000000
+#define UHS_DDR50_MAX_DTR	50000000
+#define UHS_SDR25_MAX_DTR	UHS_DDR50_MAX_DTR
+#define UHS_SDR12_MAX_DTR	25000000
+	unsigned int		sd3_bus_mode;
+#define UHS_SDR12_BUS_SPEED	0
+#define UHS_SDR25_BUS_SPEED	1
+#define UHS_SDR50_BUS_SPEED	2
+#define UHS_SDR104_BUS_SPEED	3
+#define UHS_DDR50_BUS_SPEED	4
+
+#define SD_MODE_UHS_SDR12	(1 << UHS_SDR12_BUS_SPEED)
+#define SD_MODE_UHS_SDR25	(1 << UHS_SDR25_BUS_SPEED)
+#define SD_MODE_UHS_SDR50	(1 << UHS_SDR50_BUS_SPEED)
+#define SD_MODE_UHS_SDR104	(1 << UHS_SDR104_BUS_SPEED)
+#define SD_MODE_UHS_DDR50	(1 << UHS_DDR50_BUS_SPEED)
+	unsigned int		sd3_drv_type;
+#define SD_DRIVER_TYPE_B	0x01
+#define SD_DRIVER_TYPE_A	0x02
+#define SD_DRIVER_TYPE_C	0x04
+#define SD_DRIVER_TYPE_D	0x08
+	unsigned int		sd3_curr_limit;
+#define SD_SET_CURRENT_LIMIT_200	0
+#define SD_SET_CURRENT_LIMIT_400	1
+#define SD_SET_CURRENT_LIMIT_600	2
+#define SD_SET_CURRENT_LIMIT_800	3
+
+#define SD_MAX_CURRENT_200	(1 << SD_SET_CURRENT_LIMIT_200)
+#define SD_MAX_CURRENT_400	(1 << SD_SET_CURRENT_LIMIT_400)
+#define SD_MAX_CURRENT_600	(1 << SD_SET_CURRENT_LIMIT_600)
+#define SD_MAX_CURRENT_800	(1 << SD_SET_CURRENT_LIMIT_800)
 };
 
 struct sdio_cccr {
@@ -118,6 +162,8 @@ struct mmc_card {
 #define MMC_STATE_HIGHSPEED	(1<<2)		/* card is in high speed mode */
 #define MMC_STATE_BLOCKADDR	(1<<3)		/* card uses block-addressing */
 #define MMC_STATE_HIGHSPEED_DDR (1<<4)		/* card is in high speed mode */
+#define MMC_STATE_ULTRAHIGHSPEED (1<<5)		/* card is in ultra high speed mode */
+#define MMC_CARD_SDXC		(1<<6)		/* card is SDXC */
 	unsigned int		quirks; 	/* card quirks */
 #define MMC_QUIRK_LENIENT_FN0	(1<<0)		/* allow SDIO FN0 writes outside of the VS CCCR range */
 #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1)	/* use func->cur_blksize */
@@ -125,6 +171,10 @@ struct mmc_card {
 #define MMC_QUIRK_NONSTD_SDIO	(1<<2)		/* non-standard SDIO card attached */
 						/* (missing CIA registers) */
 #define MMC_QUIRK_BROKEN_CLK_GATING (1<<3)	/* clock gating the sdio bus will make card fail */
+#define MMC_QUIRK_NONSTD_FUNC_IF (1<<4)		/* SDIO card has nonstd function interfaces */
+#define MMC_QUIRK_DISABLE_CD	(1<<5)		/* disconnect CD/DAT[3] resistor */
+#define MMC_QUIRK_INAND_CMD38	(1<<6)		/* iNAND devices have broken CMD38 */
+#define MMC_QUIRK_BLK_NO_CMD23	(1<<7)		/* Avoid CMD23 for regular multiblock */
 
 	unsigned int		erase_size;	/* erase size in sectors */
  	unsigned int		erase_shift;	/* if erase unit is power 2 */
@@ -145,14 +195,100 @@ struct mmc_card {
 	struct sdio_cccr	cccr;		/* common card info */
 	struct sdio_cis		cis;		/* common tuple info */
 	struct sdio_func	*sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */
+	struct sdio_func	*sdio_single_irq; /* SDIO function when only one IRQ active */
 	unsigned		num_info;	/* number of info strings */
 	const char		**info;		/* info strings */
 	struct sdio_func_tuple	*tuples;	/* unknown common tuples */
 
+	unsigned int		sd_bus_speed;	/* Bus Speed Mode set for the card */
+
 	struct dentry		*debugfs_root;
 };
 
-void mmc_fixup_device(struct mmc_card *dev);
+/*
+ *  The world is not perfect and supplies us with broken mmc/sdio devices.
+ *  For at least some of these bugs we need a work-around.
+ */
+
+struct mmc_fixup {
+	/* CID-specific fields. */
+	const char *name;
+
+	/* Valid revision range */
+	u64 rev_start, rev_end;
+
+	unsigned int manfid;
+	unsigned short oemid;
+
+	/* SDIO-specfic fields. You can use SDIO_ANY_ID here of course */
+	u16 cis_vendor, cis_device;
+
+	void (*vendor_fixup)(struct mmc_card *card, int data);
+	int data;
+};
+
+#define CID_MANFID_ANY (-1u)
+#define CID_OEMID_ANY ((unsigned short) -1)
+#define CID_NAME_ANY (NULL)
+
+#define END_FIXUP { 0 }
+
+#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end,	\
+		   _cis_vendor, _cis_device,				\
+		   _fixup, _data)					\
+	{						   \
+		.name = (_name),			   \
+		.manfid = (_manfid),			   \
+		.oemid = (_oemid),			   \
+		.rev_start = (_rev_start),		   \
+		.rev_end = (_rev_end),			   \
+		.cis_vendor = (_cis_vendor),		   \
+		.cis_device = (_cis_device),		   \
+		.vendor_fixup = (_fixup),		   \
+		.data = (_data),			   \
+	 }
+
+#define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end,	\
+		      _fixup, _data)					\
+	_FIXUP_EXT(_name, _manfid,					\
+		   _oemid, _rev_start, _rev_end,			\
+		   SDIO_ANY_ID, SDIO_ANY_ID,				\
+		   _fixup, _data)					\
+
+#define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \
+	MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data)
+
+#define SDIO_FIXUP(_vendor, _device, _fixup, _data)			\
+	_FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY,			\
+		    CID_OEMID_ANY, 0, -1ull,				\
+		   _vendor, _device,					\
+		   _fixup, _data)					\
+
+#define cid_rev(hwrev, fwrev, year, month)	\
+	(((u64) hwrev) << 40 |                  \
+	 ((u64) fwrev) << 32 |                  \
+	 ((u64) year) << 16 |                   \
+	 ((u64) month))
+
+#define cid_rev_card(card)		  \
+	cid_rev(card->cid.hwrev,	  \
+		    card->cid.fwrev,      \
+		    card->cid.year,	  \
+		    card->cid.month)
+
+/*
+ * Unconditionally quirk add/remove.
+ */
+
+static inline void __maybe_unused add_quirk(struct mmc_card *card, int data)
+{
+	card->quirks |= data;
+}
+
+static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
+{
+	card->quirks &= ~data;
+}
 
 #define mmc_card_mmc(c)		((c)->type == MMC_TYPE_MMC)
 #define mmc_card_sd(c)		((c)->type == MMC_TYPE_SD)
@@ -163,12 +299,50 @@ void mmc_fixup_device(struct mmc_card *dev);
 #define mmc_card_highspeed(c)	((c)->state & MMC_STATE_HIGHSPEED)
 #define mmc_card_blockaddr(c)	((c)->state & MMC_STATE_BLOCKADDR)
 #define mmc_card_ddr_mode(c)	((c)->state & MMC_STATE_HIGHSPEED_DDR)
+#define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED)
+#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
 
 #define mmc_card_set_present(c)	((c)->state |= MMC_STATE_PRESENT)
 #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
 #define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED)
 #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
 #define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR)
+#define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
+#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
+
+/*
+ * Quirk add/remove for MMC products.
+ */
+
+static inline void __maybe_unused add_quirk_mmc(struct mmc_card *card, int data)
+{
+	if (mmc_card_mmc(card))
+		card->quirks |= data;
+}
+
+static inline void __maybe_unused remove_quirk_mmc(struct mmc_card *card,
+						   int data)
+{
+	if (mmc_card_mmc(card))
+		card->quirks &= ~data;
+}
+
+/*
+ * Quirk add/remove for SD products.
+ */
+
+static inline void __maybe_unused add_quirk_sd(struct mmc_card *card, int data)
+{
+	if (mmc_card_sd(card))
+		card->quirks |= data;
+}
+
+static inline void __maybe_unused remove_quirk_sd(struct mmc_card *card,
+						   int data)
+{
+	if (mmc_card_sd(card))
+		card->quirks &= ~data;
+}
 
 static inline int mmc_card_lenient_fn0(const struct mmc_card *c)
 {
@@ -180,6 +354,16 @@ static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c)
 	return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
 }
 
+static inline int mmc_card_disable_cd(const struct mmc_card *c)
+{
+	return c->quirks & MMC_QUIRK_DISABLE_CD;
+}
+
+static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c)
+{
+	return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF;
+}
+
 #define mmc_card_name(c)	((c)->cid.prod_name)
 #define mmc_card_id(c)		(dev_name(&(c)->dev))
 
@@ -203,4 +387,7 @@ struct mmc_driver {
 extern int mmc_register_driver(struct mmc_driver *);
 extern void mmc_unregister_driver(struct mmc_driver *);
 
+extern void mmc_fixup_device(struct mmc_card *card,
+			     const struct mmc_fixup *table);
+
 #endif

+ 4 - 1
include/linux/mmc/core.h

@@ -92,7 +92,7 @@ struct mmc_command {
  *              actively failing requests
  */
 
-	unsigned int		erase_timeout;	/* in milliseconds */
+	unsigned int		cmd_timeout_ms;	/* in milliseconds */
 
 	struct mmc_data		*data;		/* data segment associated with cmd */
 	struct mmc_request	*mrq;		/* associated request */
@@ -120,6 +120,7 @@ struct mmc_data {
 };
 
 struct mmc_request {
+	struct mmc_command	*sbc;		/* SET_BLOCK_COUNT for multiblock */
 	struct mmc_command	*cmd;
 	struct mmc_data		*data;
 	struct mmc_command	*stop;
@@ -133,8 +134,10 @@ struct mmc_card;
 
 extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *);
 extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int);
+extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
 extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
 	struct mmc_command *, int);
+extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
 
 #define MMC_ERASE_ARG		0x00000000
 #define MMC_SECURE_ERASE_ARG	0x80000000

+ 48 - 1
include/linux/mmc/host.h

@@ -50,12 +50,30 @@ struct mmc_ios {
 #define MMC_TIMING_LEGACY	0
 #define MMC_TIMING_MMC_HS	1
 #define MMC_TIMING_SD_HS	2
+#define MMC_TIMING_UHS_SDR12	MMC_TIMING_LEGACY
+#define MMC_TIMING_UHS_SDR25	MMC_TIMING_SD_HS
+#define MMC_TIMING_UHS_SDR50	3
+#define MMC_TIMING_UHS_SDR104	4
+#define MMC_TIMING_UHS_DDR50	5
 
 	unsigned char	ddr;			/* dual data rate used */
 
 #define MMC_SDR_MODE		0
 #define MMC_1_2V_DDR_MODE	1
 #define MMC_1_8V_DDR_MODE	2
+
+	unsigned char	signal_voltage;		/* signalling voltage (1.8V or 3.3V) */
+
+#define MMC_SIGNAL_VOLTAGE_330	0
+#define MMC_SIGNAL_VOLTAGE_180	1
+#define MMC_SIGNAL_VOLTAGE_120	2
+
+	unsigned char	drv_type;		/* driver type (A, B, C, D) */
+
+#define MMC_SET_DRIVER_TYPE_B	0
+#define MMC_SET_DRIVER_TYPE_A	1
+#define MMC_SET_DRIVER_TYPE_C	2
+#define MMC_SET_DRIVER_TYPE_D	3
 };
 
 struct mmc_host_ops {
@@ -117,6 +135,10 @@ struct mmc_host_ops {
 
 	/* optional callback for HC quirks */
 	void	(*init_card)(struct mmc_host *host, struct mmc_card *card);
+
+	int	(*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios);
+	int	(*execute_tuning)(struct mmc_host *host);
+	void	(*enable_preset_value)(struct mmc_host *host, bool enable);
 };
 
 struct mmc_card;
@@ -173,6 +195,22 @@ struct mmc_host {
 						/* DDR mode at 1.2V */
 #define MMC_CAP_POWER_OFF_CARD	(1 << 13)	/* Can power off after boot */
 #define MMC_CAP_BUS_WIDTH_TEST	(1 << 14)	/* CMD14/CMD19 bus width ok */
+#define MMC_CAP_UHS_SDR12	(1 << 15)	/* Host supports UHS SDR12 mode */
+#define MMC_CAP_UHS_SDR25	(1 << 16)	/* Host supports UHS SDR25 mode */
+#define MMC_CAP_UHS_SDR50	(1 << 17)	/* Host supports UHS SDR50 mode */
+#define MMC_CAP_UHS_SDR104	(1 << 18)	/* Host supports UHS SDR104 mode */
+#define MMC_CAP_UHS_DDR50	(1 << 19)	/* Host supports UHS DDR50 mode */
+#define MMC_CAP_SET_XPC_330	(1 << 20)	/* Host supports >150mA current at 3.3V */
+#define MMC_CAP_SET_XPC_300	(1 << 21)	/* Host supports >150mA current at 3.0V */
+#define MMC_CAP_SET_XPC_180	(1 << 22)	/* Host supports >150mA current at 1.8V */
+#define MMC_CAP_DRIVER_TYPE_A	(1 << 23)	/* Host supports Driver Type A */
+#define MMC_CAP_DRIVER_TYPE_C	(1 << 24)	/* Host supports Driver Type C */
+#define MMC_CAP_DRIVER_TYPE_D	(1 << 25)	/* Host supports Driver Type D */
+#define MMC_CAP_MAX_CURRENT_200	(1 << 26)	/* Host max current limit is 200mA */
+#define MMC_CAP_MAX_CURRENT_400	(1 << 27)	/* Host max current limit is 400mA */
+#define MMC_CAP_MAX_CURRENT_600	(1 << 28)	/* Host max current limit is 600mA */
+#define MMC_CAP_MAX_CURRENT_800	(1 << 29)	/* Host max current limit is 800mA */
+#define MMC_CAP_CMD23		(1 << 30)	/* CMD23 supported. */
 
 	mmc_pm_flag_t		pm_caps;	/* supported pm features */
 
@@ -321,10 +359,19 @@ static inline int mmc_card_is_removable(struct mmc_host *host)
 	return !(host->caps & MMC_CAP_NONREMOVABLE) && mmc_assume_removable;
 }
 
-static inline int mmc_card_is_powered_resumed(struct mmc_host *host)
+static inline int mmc_card_keep_power(struct mmc_host *host)
 {
 	return host->pm_flags & MMC_PM_KEEP_POWER;
 }
 
+static inline int mmc_card_wake_sdio_irq(struct mmc_host *host)
+{
+	return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ;
+}
+
+static inline int mmc_host_cmd23(struct mmc_host *host)
+{
+	return host->caps & MMC_CAP_CMD23;
+}
 #endif
 

+ 54 - 0
include/linux/mmc/ioctl.h

@@ -0,0 +1,54 @@
+#ifndef LINUX_MMC_IOCTL_H
+#define LINUX_MMC_IOCTL_H
+struct mmc_ioc_cmd {
+	/* Implies direction of data.  true = write, false = read */
+	int write_flag;
+
+	/* Application-specific command.  true = precede with CMD55 */
+	int is_acmd;
+
+	__u32 opcode;
+	__u32 arg;
+	__u32 response[4];  /* CMD response */
+	unsigned int flags;
+	unsigned int blksz;
+	unsigned int blocks;
+
+	/*
+	 * Sleep at least postsleep_min_us useconds, and at most
+	 * postsleep_max_us useconds *after* issuing command.  Needed for
+	 * some read commands for which cards have no other way of indicating
+	 * they're ready for the next command (i.e. there is no equivalent of
+	 * a "busy" indicator for read operations).
+	 */
+	unsigned int postsleep_min_us;
+	unsigned int postsleep_max_us;
+
+	/*
+	 * Override driver-computed timeouts.  Note the difference in units!
+	 */
+	unsigned int data_timeout_ns;
+	unsigned int cmd_timeout_ms;
+
+	/*
+	 * For 64-bit machines, the next member, ``__u64 data_ptr``, wants to
+	 * be 8-byte aligned.  Make sure this struct is the same size when
+	 * built for 32-bit.
+	 */
+	__u32 __pad;
+
+	/* DAT buffer */
+	__u64 data_ptr;
+};
+#define mmc_ioc_cmd_set_data(ic, ptr) ic.data_ptr = (__u64)(unsigned long) ptr
+
+#define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
+
+/*
+ * Since this ioctl is only meant to enhance (and not replace) normal access
+ * to the mmc bus device, an upper data transfer limit of MMC_IOC_MAX_BYTES
+ * is enforced per ioctl call.  For larger data transfers, use the normal
+ * block device operations.
+ */
+#define MMC_IOC_MAX_BYTES  (512L * 256)
+#endif  /* LINUX_MMC_IOCTL_H */

+ 18 - 0
include/linux/mmc/mmc.h

@@ -50,6 +50,7 @@
 #define MMC_SET_BLOCKLEN         16   /* ac   [31:0] block len   R1  */
 #define MMC_READ_SINGLE_BLOCK    17   /* adtc [31:0] data addr   R1  */
 #define MMC_READ_MULTIPLE_BLOCK  18   /* adtc [31:0] data addr   R1  */
+#define MMC_SEND_TUNING_BLOCK    19   /* adtc                    R1  */
 
   /* class 3 */
 #define MMC_WRITE_DAT_UNTIL_STOP 20   /* adtc [31:0] data addr   R1  */
@@ -82,6 +83,12 @@
 #define MMC_APP_CMD              55   /* ac   [31:16] RCA        R1  */
 #define MMC_GEN_CMD              56   /* adtc [0] RD/WR          R1  */
 
+static inline bool mmc_op_multi(u32 opcode)
+{
+	return opcode == MMC_WRITE_MULTIPLE_BLOCK ||
+	       opcode == MMC_READ_MULTIPLE_BLOCK;
+}
+
 /*
  * MMC_SWITCH argument format:
  *
@@ -255,18 +262,23 @@ struct _mmc_csd {
 
 #define EXT_CSD_PARTITION_ATTRIBUTE	156	/* R/W */
 #define EXT_CSD_PARTITION_SUPPORT	160	/* RO */
+#define EXT_CSD_WR_REL_PARAM		166	/* RO */
 #define EXT_CSD_ERASE_GROUP_DEF		175	/* R/W */
+#define EXT_CSD_PART_CONFIG		179	/* R/W */
 #define EXT_CSD_ERASED_MEM_CONT		181	/* RO */
 #define EXT_CSD_BUS_WIDTH		183	/* R/W */
 #define EXT_CSD_HS_TIMING		185	/* R/W */
 #define EXT_CSD_REV			192	/* RO */
 #define EXT_CSD_STRUCTURE		194	/* RO */
 #define EXT_CSD_CARD_TYPE		196	/* RO */
+#define EXT_CSD_PART_SWITCH_TIME        199     /* RO */
 #define EXT_CSD_SEC_CNT			212	/* RO, 4 bytes */
 #define EXT_CSD_S_A_TIMEOUT		217	/* RO */
+#define EXT_CSD_REL_WR_SEC_C		222	/* RO */
 #define EXT_CSD_HC_WP_GRP_SIZE		221	/* RO */
 #define EXT_CSD_ERASE_TIMEOUT_MULT	223	/* RO */
 #define EXT_CSD_HC_ERASE_GRP_SIZE	224	/* RO */
+#define EXT_CSD_BOOT_MULT		226	/* RO */
 #define EXT_CSD_SEC_TRIM_MULT		229	/* RO */
 #define EXT_CSD_SEC_ERASE_MULT		230	/* RO */
 #define EXT_CSD_SEC_FEATURE_SUPPORT	231	/* RO */
@@ -276,6 +288,12 @@ struct _mmc_csd {
  * EXT_CSD field definitions
  */
 
+#define EXT_CSD_WR_REL_PARAM_EN		(1<<2)
+
+#define EXT_CSD_PART_CONFIG_ACC_MASK	(0x7)
+#define EXT_CSD_PART_CONFIG_ACC_BOOT0	(0x1)
+#define EXT_CSD_PART_CONFIG_ACC_BOOT1	(0x2)
+
 #define EXT_CSD_CMD_SET_NORMAL		(1<<0)
 #define EXT_CSD_CMD_SET_SECURE		(1<<1)
 #define EXT_CSD_CMD_SET_CPSECURE	(1<<2)

+ 8 - 1
include/linux/mmc/sd.h

@@ -17,6 +17,7 @@
 /* This is basically the same command as for MMC with some quirks. */
 #define SD_SEND_RELATIVE_ADDR     3   /* bcr                     R6  */
 #define SD_SEND_IF_COND           8   /* bcr  [11:0] See below   R7  */
+#define SD_SWITCH_VOLTAGE         11  /* ac                      R1  */
 
   /* class 10 */
 #define SD_SWITCH                 6   /* adtc [31:0] See below   R1  */
@@ -32,6 +33,12 @@
 #define SD_APP_OP_COND           41   /* bcr  [31:0] OCR         R3  */
 #define SD_APP_SEND_SCR          51   /* adtc                    R1  */
 
+/* OCR bit definitions */
+#define SD_OCR_S18R		(1 << 24)    /* 1.8V switching request */
+#define SD_ROCR_S18A		SD_OCR_S18R  /* 1.8V switching accepted by card */
+#define SD_OCR_XPC		(1 << 28)    /* SDXC power control */
+#define SD_OCR_CCS		(1 << 30)    /* Card Capacity Status */
+
 /*
  * SD_SWITCH argument format:
  *
@@ -59,7 +66,7 @@
 
 #define SCR_SPEC_VER_0		0	/* Implements system specification 1.0 - 1.01 */
 #define SCR_SPEC_VER_1		1	/* Implements system specification 1.10 */
-#define SCR_SPEC_VER_2		2	/* Implements system specification 2.00 */
+#define SCR_SPEC_VER_2		2	/* Implements system specification 2.00-3.0X */
 
 /*
  * SD bus widths

+ 15 - 0
include/linux/mmc/sdhci.h

@@ -85,6 +85,8 @@ struct sdhci_host {
 #define SDHCI_QUIRK_NO_HISPD_BIT			(1<<29)
 /* Controller treats ADMA descriptors with length 0000h incorrectly */
 #define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC		(1<<30)
+/* The read-only detection via SDHCI_PRESENT_STATE register is unstable */
+#define SDHCI_QUIRK_UNSTABLE_RO_DETECT			(1<<31)
 
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
@@ -109,11 +111,16 @@ struct sdhci_host {
 #define SDHCI_USE_ADMA		(1<<1)	/* Host is ADMA capable */
 #define SDHCI_REQ_USE_DMA	(1<<2)	/* Use DMA for this req. */
 #define SDHCI_DEVICE_DEAD	(1<<3)	/* Device unresponsive */
+#define SDHCI_SDR50_NEEDS_TUNING (1<<4)	/* SDR50 needs tuning */
+#define SDHCI_NEEDS_RETUNING	(1<<5)	/* Host needs retuning */
+#define SDHCI_AUTO_CMD12	(1<<6)	/* Auto CMD12 support */
+#define SDHCI_AUTO_CMD23	(1<<7)	/* Auto CMD23 support */
 
 	unsigned int version;	/* SDHCI spec. version */
 
 	unsigned int max_clk;	/* Max possible freq (MHz) */
 	unsigned int timeout_clk;	/* Timeout freq (KHz) */
+	unsigned int clk_mul;	/* Clock Muliplier value */
 
 	unsigned int clock;	/* Current clock (MHz) */
 	u8 pwr;			/* Current voltage */
@@ -145,6 +152,14 @@ struct sdhci_host {
 	unsigned int            ocr_avail_sd;
 	unsigned int            ocr_avail_mmc;
 
+	wait_queue_head_t	buf_ready_int;	/* Waitqueue for Buffer Read Ready interrupt */
+	unsigned int		tuning_done;	/* Condition flag set when CMD19 succeeds */
+
+	unsigned int		tuning_count;	/* Timer count for re-tuning */
+	unsigned int		tuning_mode;	/* Re-tuning mode supported by host */
+#define SDHCI_TUNING_MODE_1	0
+	struct timer_list	tuning_timer;	/* Timer for tuning */
+
 	unsigned long private[0] ____cacheline_aligned;
 };
 #endif /* __SDHCI_H */

+ 4 - 0
include/linux/mmc/sh_mobile_sdhi.h

@@ -3,12 +3,16 @@
 
 #include <linux/types.h>
 
+struct platform_device;
+struct tmio_mmc_data;
+
 struct sh_mobile_sdhi_info {
 	int dma_slave_tx;
 	int dma_slave_rx;
 	unsigned long tmio_flags;
 	unsigned long tmio_caps;
 	u32 tmio_ocr_mask;	/* available MMC voltages */
+	struct tmio_mmc_data *pdata;
 	void (*set_pwr)(struct platform_device *pdev, int state);
 	int (*get_cd)(struct platform_device *pdev);
 };