|
@@ -15,6 +15,7 @@
|
|
#include <linux/clk-provider.h>
|
|
#include <linux/clk-provider.h>
|
|
#include <linux/err.h>
|
|
#include <linux/err.h>
|
|
#include <linux/io.h>
|
|
#include <linux/io.h>
|
|
|
|
+#include <linux/mutex.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/platform_data/clk-lpss.h>
|
|
#include <linux/platform_data/clk-lpss.h>
|
|
#include <linux/pm_runtime.h>
|
|
#include <linux/pm_runtime.h>
|
|
@@ -26,6 +27,10 @@ ACPI_MODULE_NAME("acpi_lpss");
|
|
|
|
|
|
#ifdef CONFIG_X86_INTEL_LPSS
|
|
#ifdef CONFIG_X86_INTEL_LPSS
|
|
|
|
|
|
|
|
+#include <asm/cpu_device_id.h>
|
|
|
|
+#include <asm/iosf_mbi.h>
|
|
|
|
+#include <asm/pmc_atom.h>
|
|
|
|
+
|
|
#define LPSS_ADDR(desc) ((unsigned long)&desc)
|
|
#define LPSS_ADDR(desc) ((unsigned long)&desc)
|
|
|
|
|
|
#define LPSS_CLK_SIZE 0x04
|
|
#define LPSS_CLK_SIZE 0x04
|
|
@@ -71,7 +76,7 @@ struct lpss_device_desc {
|
|
void (*setup)(struct lpss_private_data *pdata);
|
|
void (*setup)(struct lpss_private_data *pdata);
|
|
};
|
|
};
|
|
|
|
|
|
-static struct lpss_device_desc lpss_dma_desc = {
|
|
|
|
|
|
+static const struct lpss_device_desc lpss_dma_desc = {
|
|
.flags = LPSS_CLK,
|
|
.flags = LPSS_CLK,
|
|
};
|
|
};
|
|
|
|
|
|
@@ -84,6 +89,23 @@ struct lpss_private_data {
|
|
u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
|
|
u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+/* LPSS run time quirks */
|
|
|
|
+static unsigned int lpss_quirks;
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device.
|
|
|
|
+ *
|
|
|
|
+ * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover
|
|
|
|
+ * it can be powered off automatically whenever the last LPSS device goes down.
|
|
|
|
+ * In case of no power any access to the DMA controller will hang the system.
|
|
|
|
+ * The behaviour is reproduced on some HP laptops based on Intel BayTrail as
|
|
|
|
+ * well as on ASuS T100TA transformer.
|
|
|
|
+ *
|
|
|
|
+ * This quirk overrides power state of entire LPSS island to keep DMA powered
|
|
|
|
+ * on whenever we have at least one other device in use.
|
|
|
|
+ */
|
|
|
|
+#define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0)
|
|
|
|
+
|
|
/* UART Component Parameter Register */
|
|
/* UART Component Parameter Register */
|
|
#define LPSS_UART_CPR 0xF4
|
|
#define LPSS_UART_CPR 0xF4
|
|
#define LPSS_UART_CPR_AFCE BIT(4)
|
|
#define LPSS_UART_CPR_AFCE BIT(4)
|
|
@@ -196,13 +218,21 @@ static const struct lpss_device_desc bsw_i2c_dev_desc = {
|
|
.setup = byt_i2c_setup,
|
|
.setup = byt_i2c_setup,
|
|
};
|
|
};
|
|
|
|
|
|
-static struct lpss_device_desc bsw_spi_dev_desc = {
|
|
|
|
|
|
+static const struct lpss_device_desc bsw_spi_dev_desc = {
|
|
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
|
|
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
|
|
| LPSS_NO_D3_DELAY,
|
|
| LPSS_NO_D3_DELAY,
|
|
.prv_offset = 0x400,
|
|
.prv_offset = 0x400,
|
|
.setup = lpss_deassert_reset,
|
|
.setup = lpss_deassert_reset,
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
|
|
|
|
+
|
|
|
|
+static const struct x86_cpu_id lpss_cpu_ids[] = {
|
|
|
|
+ ICPU(0x37), /* Valleyview, Bay Trail */
|
|
|
|
+ ICPU(0x4c), /* Braswell, Cherry Trail */
|
|
|
|
+ {}
|
|
|
|
+};
|
|
|
|
+
|
|
#else
|
|
#else
|
|
|
|
|
|
#define LPSS_ADDR(desc) (0UL)
|
|
#define LPSS_ADDR(desc) (0UL)
|
|
@@ -574,6 +604,17 @@ static void acpi_lpss_restore_ctx(struct device *dev,
|
|
{
|
|
{
|
|
unsigned int i;
|
|
unsigned int i;
|
|
|
|
|
|
|
|
+ for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
|
|
|
|
+ unsigned long offset = i * sizeof(u32);
|
|
|
|
+
|
|
|
|
+ __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset);
|
|
|
|
+ dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
|
|
|
|
+ pdata->prv_reg_ctx[i], offset);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata)
|
|
|
|
+{
|
|
/*
|
|
/*
|
|
* The following delay is needed or the subsequent write operations may
|
|
* The following delay is needed or the subsequent write operations may
|
|
* fail. The LPSS devices are actually PCI devices and the PCI spec
|
|
* fail. The LPSS devices are actually PCI devices and the PCI spec
|
|
@@ -586,14 +627,34 @@ static void acpi_lpss_restore_ctx(struct device *dev,
|
|
delay = 0;
|
|
delay = 0;
|
|
|
|
|
|
msleep(delay);
|
|
msleep(delay);
|
|
|
|
+}
|
|
|
|
|
|
- for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
|
|
|
|
- unsigned long offset = i * sizeof(u32);
|
|
|
|
|
|
+static int acpi_lpss_activate(struct device *dev)
|
|
|
|
+{
|
|
|
|
+ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
|
|
|
+ int ret;
|
|
|
|
|
|
- __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset);
|
|
|
|
- dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
|
|
|
|
- pdata->prv_reg_ctx[i], offset);
|
|
|
|
- }
|
|
|
|
|
|
+ ret = acpi_dev_runtime_resume(dev);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
|
|
+ acpi_lpss_d3_to_d0_delay(pdata);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * This is called only on ->probe() stage where a device is either in
|
|
|
|
+ * known state defined by BIOS or most likely powered off. Due to this
|
|
|
|
+ * we have to deassert reset line to be sure that ->probe() will
|
|
|
|
+ * recognize the device.
|
|
|
|
+ */
|
|
|
|
+ if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
|
|
|
|
+ lpss_deassert_reset(pdata);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void acpi_lpss_dismiss(struct device *dev)
|
|
|
|
+{
|
|
|
|
+ acpi_dev_runtime_suspend(dev);
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
#ifdef CONFIG_PM_SLEEP
|
|
@@ -621,6 +682,8 @@ static int acpi_lpss_resume_early(struct device *dev)
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
|
|
+ acpi_lpss_d3_to_d0_delay(pdata);
|
|
|
|
+
|
|
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
|
|
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
|
|
acpi_lpss_restore_ctx(dev, pdata);
|
|
acpi_lpss_restore_ctx(dev, pdata);
|
|
|
|
|
|
@@ -628,6 +691,89 @@ static int acpi_lpss_resume_early(struct device *dev)
|
|
}
|
|
}
|
|
#endif /* CONFIG_PM_SLEEP */
|
|
#endif /* CONFIG_PM_SLEEP */
|
|
|
|
|
|
|
|
+/* IOSF SB for LPSS island */
|
|
|
|
+#define LPSS_IOSF_UNIT_LPIOEP 0xA0
|
|
|
|
+#define LPSS_IOSF_UNIT_LPIO1 0xAB
|
|
|
|
+#define LPSS_IOSF_UNIT_LPIO2 0xAC
|
|
|
|
+
|
|
|
|
+#define LPSS_IOSF_PMCSR 0x84
|
|
|
|
+#define LPSS_PMCSR_D0 0
|
|
|
|
+#define LPSS_PMCSR_D3hot 3
|
|
|
|
+#define LPSS_PMCSR_Dx_MASK GENMASK(1, 0)
|
|
|
|
+
|
|
|
|
+#define LPSS_IOSF_GPIODEF0 0x154
|
|
|
|
+#define LPSS_GPIODEF0_DMA1_D3 BIT(2)
|
|
|
|
+#define LPSS_GPIODEF0_DMA2_D3 BIT(3)
|
|
|
|
+#define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2)
|
|
|
|
+
|
|
|
|
+static DEFINE_MUTEX(lpss_iosf_mutex);
|
|
|
|
+
|
|
|
|
+static void lpss_iosf_enter_d3_state(void)
|
|
|
|
+{
|
|
|
|
+ u32 value1 = 0;
|
|
|
|
+ u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK;
|
|
|
|
+ u32 value2 = LPSS_PMCSR_D3hot;
|
|
|
|
+ u32 mask2 = LPSS_PMCSR_Dx_MASK;
|
|
|
|
+ /*
|
|
|
|
+ * PMC provides an information about actual status of the LPSS devices.
|
|
|
|
+ * Here we read the values related to LPSS power island, i.e. LPSS
|
|
|
|
+ * devices, excluding both LPSS DMA controllers, along with SCC domain.
|
|
|
|
+ */
|
|
|
|
+ u32 func_dis, d3_sts_0, pmc_status, pmc_mask = 0xfe000ffe;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);
|
|
|
|
+ if (ret)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&lpss_iosf_mutex);
|
|
|
|
+
|
|
|
|
+ ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0);
|
|
|
|
+ if (ret)
|
|
|
|
+ goto exit;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Get the status of entire LPSS power island per device basis.
|
|
|
|
+ * Shutdown both LPSS DMA controllers if and only if all other devices
|
|
|
|
+ * are already in D3hot.
|
|
|
|
+ */
|
|
|
|
+ pmc_status = (~(d3_sts_0 | func_dis)) & pmc_mask;
|
|
|
|
+ if (pmc_status)
|
|
|
|
+ goto exit;
|
|
|
|
+
|
|
|
|
+ iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
|
|
|
|
+ LPSS_IOSF_PMCSR, value2, mask2);
|
|
|
|
+
|
|
|
|
+ iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
|
|
|
|
+ LPSS_IOSF_PMCSR, value2, mask2);
|
|
|
|
+
|
|
|
|
+ iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
|
|
|
|
+ LPSS_IOSF_GPIODEF0, value1, mask1);
|
|
|
|
+exit:
|
|
|
|
+ mutex_unlock(&lpss_iosf_mutex);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void lpss_iosf_exit_d3_state(void)
|
|
|
|
+{
|
|
|
|
+ u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3;
|
|
|
|
+ u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK;
|
|
|
|
+ u32 value2 = LPSS_PMCSR_D0;
|
|
|
|
+ u32 mask2 = LPSS_PMCSR_Dx_MASK;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&lpss_iosf_mutex);
|
|
|
|
+
|
|
|
|
+ iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
|
|
|
|
+ LPSS_IOSF_GPIODEF0, value1, mask1);
|
|
|
|
+
|
|
|
|
+ iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
|
|
|
|
+ LPSS_IOSF_PMCSR, value2, mask2);
|
|
|
|
+
|
|
|
|
+ iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
|
|
|
|
+ LPSS_IOSF_PMCSR, value2, mask2);
|
|
|
|
+
|
|
|
|
+ mutex_unlock(&lpss_iosf_mutex);
|
|
|
|
+}
|
|
|
|
+
|
|
static int acpi_lpss_runtime_suspend(struct device *dev)
|
|
static int acpi_lpss_runtime_suspend(struct device *dev)
|
|
{
|
|
{
|
|
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
|
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
|
@@ -640,7 +786,17 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
|
|
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
|
|
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
|
|
acpi_lpss_save_ctx(dev, pdata);
|
|
acpi_lpss_save_ctx(dev, pdata);
|
|
|
|
|
|
- return acpi_dev_runtime_suspend(dev);
|
|
|
|
|
|
+ ret = acpi_dev_runtime_suspend(dev);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * This call must be last in the sequence, otherwise PMC will return
|
|
|
|
+ * wrong status for devices being about to be powered off. See
|
|
|
|
+ * lpss_iosf_enter_d3_state() for further information.
|
|
|
|
+ */
|
|
|
|
+ if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
|
|
|
|
+ lpss_iosf_enter_d3_state();
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
static int acpi_lpss_runtime_resume(struct device *dev)
|
|
static int acpi_lpss_runtime_resume(struct device *dev)
|
|
@@ -648,10 +804,19 @@ static int acpi_lpss_runtime_resume(struct device *dev)
|
|
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
|
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * This call is kept first to be in symmetry with
|
|
|
|
+ * acpi_lpss_runtime_suspend() one.
|
|
|
|
+ */
|
|
|
|
+ if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
|
|
|
|
+ lpss_iosf_exit_d3_state();
|
|
|
|
+
|
|
ret = acpi_dev_runtime_resume(dev);
|
|
ret = acpi_dev_runtime_resume(dev);
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
|
|
+ acpi_lpss_d3_to_d0_delay(pdata);
|
|
|
|
+
|
|
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
|
|
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
|
|
acpi_lpss_restore_ctx(dev, pdata);
|
|
acpi_lpss_restore_ctx(dev, pdata);
|
|
|
|
|
|
@@ -660,6 +825,10 @@ static int acpi_lpss_runtime_resume(struct device *dev)
|
|
#endif /* CONFIG_PM */
|
|
#endif /* CONFIG_PM */
|
|
|
|
|
|
static struct dev_pm_domain acpi_lpss_pm_domain = {
|
|
static struct dev_pm_domain acpi_lpss_pm_domain = {
|
|
|
|
+#ifdef CONFIG_PM
|
|
|
|
+ .activate = acpi_lpss_activate,
|
|
|
|
+ .dismiss = acpi_lpss_dismiss,
|
|
|
|
+#endif
|
|
.ops = {
|
|
.ops = {
|
|
#ifdef CONFIG_PM
|
|
#ifdef CONFIG_PM
|
|
#ifdef CONFIG_PM_SLEEP
|
|
#ifdef CONFIG_PM_SLEEP
|
|
@@ -705,8 +874,14 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb,
|
|
}
|
|
}
|
|
|
|
|
|
switch (action) {
|
|
switch (action) {
|
|
- case BUS_NOTIFY_ADD_DEVICE:
|
|
|
|
|
|
+ case BUS_NOTIFY_BIND_DRIVER:
|
|
pdev->dev.pm_domain = &acpi_lpss_pm_domain;
|
|
pdev->dev.pm_domain = &acpi_lpss_pm_domain;
|
|
|
|
+ break;
|
|
|
|
+ case BUS_NOTIFY_DRIVER_NOT_BOUND:
|
|
|
|
+ case BUS_NOTIFY_UNBOUND_DRIVER:
|
|
|
|
+ pdev->dev.pm_domain = NULL;
|
|
|
|
+ break;
|
|
|
|
+ case BUS_NOTIFY_ADD_DEVICE:
|
|
if (pdata->dev_desc->flags & LPSS_LTR)
|
|
if (pdata->dev_desc->flags & LPSS_LTR)
|
|
return sysfs_create_group(&pdev->dev.kobj,
|
|
return sysfs_create_group(&pdev->dev.kobj,
|
|
&lpss_attr_group);
|
|
&lpss_attr_group);
|
|
@@ -714,7 +889,6 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb,
|
|
case BUS_NOTIFY_DEL_DEVICE:
|
|
case BUS_NOTIFY_DEL_DEVICE:
|
|
if (pdata->dev_desc->flags & LPSS_LTR)
|
|
if (pdata->dev_desc->flags & LPSS_LTR)
|
|
sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
|
|
sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
|
|
- pdev->dev.pm_domain = NULL;
|
|
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
break;
|
|
break;
|
|
@@ -754,10 +928,19 @@ static struct acpi_scan_handler lpss_handler = {
|
|
|
|
|
|
void __init acpi_lpss_init(void)
|
|
void __init acpi_lpss_init(void)
|
|
{
|
|
{
|
|
- if (!lpt_clk_init()) {
|
|
|
|
- bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
|
|
|
|
- acpi_scan_add_handler(&lpss_handler);
|
|
|
|
- }
|
|
|
|
|
|
+ const struct x86_cpu_id *id;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ ret = lpt_clk_init();
|
|
|
|
+ if (ret)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ id = x86_match_cpu(lpss_cpu_ids);
|
|
|
|
+ if (id)
|
|
|
|
+ lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON;
|
|
|
|
+
|
|
|
|
+ bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
|
|
|
|
+ acpi_scan_add_handler(&lpss_handler);
|
|
}
|
|
}
|
|
|
|
|
|
#else
|
|
#else
|