|
@@ -124,3 +124,126 @@ void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
|
|
|
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
|
|
|
}
|
|
|
+
|
|
|
+void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
|
|
|
+{
|
|
|
+ int i = pcie->phy_count;
|
|
|
+
|
|
|
+ while (i--) {
|
|
|
+ phy_power_off(pcie->phy[i]);
|
|
|
+ phy_exit(pcie->phy[i]);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < pcie->phy_count; i++) {
|
|
|
+ ret = phy_init(pcie->phy[i]);
|
|
|
+ if (ret < 0)
|
|
|
+ goto err_phy;
|
|
|
+
|
|
|
+ ret = phy_power_on(pcie->phy[i]);
|
|
|
+ if (ret < 0) {
|
|
|
+ phy_exit(pcie->phy[i]);
|
|
|
+ goto err_phy;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+err_phy:
|
|
|
+ while (--i >= 0) {
|
|
|
+ phy_power_off(pcie->phy[i]);
|
|
|
+ phy_exit(pcie->phy[i]);
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
|
|
|
+{
|
|
|
+ struct device_node *np = dev->of_node;
|
|
|
+ int phy_count;
|
|
|
+ struct phy **phy;
|
|
|
+ struct device_link **link;
|
|
|
+ int i;
|
|
|
+ int ret;
|
|
|
+ const char *name;
|
|
|
+
|
|
|
+ phy_count = of_property_count_strings(np, "phy-names");
|
|
|
+ if (phy_count < 1) {
|
|
|
+ dev_err(dev, "no phy-names. PHY will not be initialized\n");
|
|
|
+ pcie->phy_count = 0;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
|
|
|
+ if (!phy)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL);
|
|
|
+ if (!link)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ for (i = 0; i < phy_count; i++) {
|
|
|
+ of_property_read_string_index(np, "phy-names", i, &name);
|
|
|
+ phy[i] = devm_phy_optional_get(dev, name);
|
|
|
+ if (IS_ERR(phy))
|
|
|
+ return PTR_ERR(phy);
|
|
|
+
|
|
|
+ link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
|
|
|
+ if (!link[i]) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ goto err_link;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ pcie->phy_count = phy_count;
|
|
|
+ pcie->phy = phy;
|
|
|
+ pcie->link = link;
|
|
|
+
|
|
|
+ ret = cdns_pcie_enable_phy(pcie);
|
|
|
+ if (ret)
|
|
|
+ goto err_link;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+err_link:
|
|
|
+ while (--i >= 0)
|
|
|
+ device_link_del(link[i]);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
|
+static int cdns_pcie_suspend_noirq(struct device *dev)
|
|
|
+{
|
|
|
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
|
|
|
+
|
|
|
+ cdns_pcie_disable_phy(pcie);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int cdns_pcie_resume_noirq(struct device *dev)
|
|
|
+{
|
|
|
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = cdns_pcie_enable_phy(pcie);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(dev, "failed to enable phy\n");
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+const struct dev_pm_ops cdns_pcie_pm_ops = {
|
|
|
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
|
|
|
+ cdns_pcie_resume_noirq)
|
|
|
+};
|