|
|
@@ -75,6 +75,20 @@
|
|
|
#include "iwl-agn-hw.h"
|
|
|
#include "internal.h"
|
|
|
|
|
|
+static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
|
|
|
+{
|
|
|
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
|
|
|
+ ((reg & 0x0000ffff) | (2 << 28)));
|
|
|
+ return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
|
|
|
+}
|
|
|
+
|
|
|
+static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
|
|
|
+{
|
|
|
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
|
|
|
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
|
|
|
+ ((reg & 0x0000ffff) | (3 << 28)));
|
|
|
+}
|
|
|
+
|
|
|
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
|
|
|
{
|
|
|
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
|
|
|
@@ -229,6 +243,116 @@ out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Enable LP XTAL to avoid HW bug where device may consume much power if
|
|
|
+ * FW is not loaded after device reset. LP XTAL is disabled by default
|
|
|
+ * after device HW reset. Do it only if XTAL is fed by internal source.
|
|
|
+ * Configure device's "persistence" mode to avoid resetting XTAL again when
|
|
|
+ * SHRD_HW_RST occurs in S3.
|
|
|
+ */
|
|
|
+static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+ u32 apmg_gp1_reg;
|
|
|
+ u32 apmg_xtal_cfg_reg;
|
|
|
+ u32 dl_cfg_reg;
|
|
|
+
|
|
|
+ /* Force XTAL ON */
|
|
|
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
|
|
|
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
|
|
|
+
|
|
|
+ /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
|
|
|
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
|
|
|
+
|
|
|
+ udelay(10);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Set "initialization complete" bit to move adapter from
|
|
|
+ * D0U* --> D0A* (powered-up active) state.
|
|
|
+ */
|
|
|
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Wait for clock stabilization; once stabilized, access to
|
|
|
+ * device-internal resources is possible.
|
|
|
+ */
|
|
|
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
|
|
|
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
|
|
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
|
|
+ 25000);
|
|
|
+ if (WARN_ON(ret < 0)) {
|
|
|
+ IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
|
|
|
+ /* Release XTAL ON request */
|
|
|
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
|
|
|
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Clear "disable persistence" to avoid LP XTAL resetting when
|
|
|
+ * SHRD_HW_RST is applied in S3.
|
|
|
+ */
|
|
|
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
|
|
|
+ APMG_PCIDEV_STT_VAL_PERSIST_DIS);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Force APMG XTAL to be active to prevent its disabling by HW
|
|
|
+ * caused by APMG idle state.
|
|
|
+ */
|
|
|
+ apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
|
|
|
+ SHR_APMG_XTAL_CFG_REG);
|
|
|
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
|
|
|
+ apmg_xtal_cfg_reg |
|
|
|
+ SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Reset entire device again - do controller reset (results in
|
|
|
+ * SHRD_HW_RST). Turn MAC off before proceeding.
|
|
|
+ */
|
|
|
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
|
|
|
+
|
|
|
+ udelay(10);
|
|
|
+
|
|
|
+ /* Enable LP XTAL by indirect access through CSR */
|
|
|
+ apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
|
|
|
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
|
|
|
+ SHR_APMG_GP1_WF_XTAL_LP_EN |
|
|
|
+ SHR_APMG_GP1_CHICKEN_BIT_SELECT);
|
|
|
+
|
|
|
+ /* Clear delay line clock power up */
|
|
|
+ dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
|
|
|
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
|
|
|
+ ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Enable persistence mode to avoid LP XTAL resetting when
|
|
|
+ * SHRD_HW_RST is applied in S3.
|
|
|
+ */
|
|
|
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
|
|
|
+ CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Clear "initialization complete" bit to move adapter from
|
|
|
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
|
|
|
+ */
|
|
|
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
|
|
|
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
|
|
+
|
|
|
+ /* Activates XTAL resources monitor */
|
|
|
+ __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
|
|
|
+ CSR_MONITOR_XTAL_RESOURCES);
|
|
|
+
|
|
|
+ /* Release XTAL ON request */
|
|
|
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
|
|
|
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
|
|
|
+ udelay(10);
|
|
|
+
|
|
|
+ /* Release APMG XTAL */
|
|
|
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
|
|
|
+ apmg_xtal_cfg_reg &
|
|
|
+ ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
|
|
|
+}
|
|
|
+
|
|
|
static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
|
|
|
{
|
|
|
int ret = 0;
|
|
|
@@ -256,6 +380,11 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans)
|
|
|
/* Stop device's DMA activity */
|
|
|
iwl_pcie_apm_stop_master(trans);
|
|
|
|
|
|
+ if (trans->cfg->lp_xtal_workaround) {
|
|
|
+ iwl_pcie_apm_lp_xtal_enable(trans);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
/* Reset the entire device */
|
|
|
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
|
|
|
|
|
|
@@ -1208,6 +1337,7 @@ static const char *get_csr_string(int cmd)
|
|
|
IWL_CMD(CSR_GIO_CHICKEN_BITS);
|
|
|
IWL_CMD(CSR_ANA_PLL_CFG);
|
|
|
IWL_CMD(CSR_HW_REV_WA_REG);
|
|
|
+ IWL_CMD(CSR_MONITOR_STATUS_REG);
|
|
|
IWL_CMD(CSR_DBG_HPET_MEM_REG);
|
|
|
default:
|
|
|
return "UNKNOWN";
|
|
|
@@ -1240,6 +1370,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
|
|
|
CSR_DRAM_INT_TBL_REG,
|
|
|
CSR_GIO_CHICKEN_BITS,
|
|
|
CSR_ANA_PLL_CFG,
|
|
|
+ CSR_MONITOR_STATUS_REG,
|
|
|
CSR_HW_REV_WA_REG,
|
|
|
CSR_DBG_HPET_MEM_REG
|
|
|
};
|