|
@@ -73,10 +73,38 @@ struct rockchip_pcie_data {
|
|
|
struct rockchip_pcie_phy {
|
|
|
struct rockchip_pcie_data *phy_data;
|
|
|
struct regmap *reg_base;
|
|
|
+ struct phy_pcie_instance {
|
|
|
+ struct phy *phy;
|
|
|
+ u32 index;
|
|
|
+ } phys[PHY_MAX_LANE_NUM];
|
|
|
+ struct mutex pcie_mutex;
|
|
|
struct reset_control *phy_rst;
|
|
|
struct clk *clk_pciephy_ref;
|
|
|
+ int pwr_cnt;
|
|
|
+ int init_cnt;
|
|
|
};
|
|
|
|
|
|
+static struct rockchip_pcie_phy *to_pcie_phy(struct phy_pcie_instance *inst)
|
|
|
+{
|
|
|
+ return container_of(inst, struct rockchip_pcie_phy,
|
|
|
+ phys[inst->index]);
|
|
|
+}
|
|
|
+
|
|
|
+static struct phy *rockchip_pcie_phy_of_xlate(struct device *dev,
|
|
|
+ struct of_phandle_args *args)
|
|
|
+{
|
|
|
+ struct rockchip_pcie_phy *rk_phy = dev_get_drvdata(dev);
|
|
|
+
|
|
|
+ if (args->args_count == 0)
|
|
|
+ return rk_phy->phys[0].phy;
|
|
|
+
|
|
|
+ if (WARN_ON(args->args[0] >= PHY_MAX_LANE_NUM))
|
|
|
+ return ERR_PTR(-ENODEV);
|
|
|
+
|
|
|
+ return rk_phy->phys[args->args[0]].phy;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
static inline void phy_wr_cfg(struct rockchip_pcie_phy *rk_phy,
|
|
|
u32 addr, u32 data)
|
|
|
{
|
|
@@ -116,29 +144,59 @@ static inline u32 phy_rd_cfg(struct rockchip_pcie_phy *rk_phy,
|
|
|
|
|
|
static int rockchip_pcie_phy_power_off(struct phy *phy)
|
|
|
{
|
|
|
- struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy);
|
|
|
+ struct phy_pcie_instance *inst = phy_get_drvdata(phy);
|
|
|
+ struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
|
|
|
int err = 0;
|
|
|
|
|
|
+ mutex_lock(&rk_phy->pcie_mutex);
|
|
|
+
|
|
|
+ regmap_write(rk_phy->reg_base,
|
|
|
+ rk_phy->phy_data->pcie_laneoff,
|
|
|
+ HIWORD_UPDATE(PHY_LANE_IDLE_OFF,
|
|
|
+ PHY_LANE_IDLE_MASK,
|
|
|
+ PHY_LANE_IDLE_A_SHIFT + inst->index));
|
|
|
+
|
|
|
+ if (--rk_phy->pwr_cnt)
|
|
|
+ goto err_out;
|
|
|
+
|
|
|
err = reset_control_assert(rk_phy->phy_rst);
|
|
|
if (err) {
|
|
|
dev_err(&phy->dev, "assert phy_rst err %d\n", err);
|
|
|
- return err;
|
|
|
+ goto err_restore;
|
|
|
}
|
|
|
|
|
|
+err_out:
|
|
|
+ mutex_unlock(&rk_phy->pcie_mutex);
|
|
|
return 0;
|
|
|
+
|
|
|
+err_restore:
|
|
|
+ rk_phy->pwr_cnt++;
|
|
|
+ regmap_write(rk_phy->reg_base,
|
|
|
+ rk_phy->phy_data->pcie_laneoff,
|
|
|
+ HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
|
|
|
+ PHY_LANE_IDLE_MASK,
|
|
|
+ PHY_LANE_IDLE_A_SHIFT + inst->index));
|
|
|
+ mutex_unlock(&rk_phy->pcie_mutex);
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
static int rockchip_pcie_phy_power_on(struct phy *phy)
|
|
|
{
|
|
|
- struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy);
|
|
|
+ struct phy_pcie_instance *inst = phy_get_drvdata(phy);
|
|
|
+ struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
|
|
|
int err = 0;
|
|
|
u32 status;
|
|
|
unsigned long timeout;
|
|
|
|
|
|
+ mutex_lock(&rk_phy->pcie_mutex);
|
|
|
+
|
|
|
+ if (rk_phy->pwr_cnt++)
|
|
|
+ goto err_out;
|
|
|
+
|
|
|
err = reset_control_deassert(rk_phy->phy_rst);
|
|
|
if (err) {
|
|
|
dev_err(&phy->dev, "deassert phy_rst err %d\n", err);
|
|
|
- return err;
|
|
|
+ goto err_pwr_cnt;
|
|
|
}
|
|
|
|
|
|
regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
|
|
@@ -146,6 +204,12 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
|
|
|
PHY_CFG_ADDR_MASK,
|
|
|
PHY_CFG_ADDR_SHIFT));
|
|
|
|
|
|
+ regmap_write(rk_phy->reg_base,
|
|
|
+ rk_phy->phy_data->pcie_laneoff,
|
|
|
+ HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
|
|
|
+ PHY_LANE_IDLE_MASK,
|
|
|
+ PHY_LANE_IDLE_A_SHIFT + inst->index));
|
|
|
+
|
|
|
/*
|
|
|
* No documented timeout value for phy operation below,
|
|
|
* so we make it large enough here. And we use loop-break
|
|
@@ -214,18 +278,29 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
|
|
|
goto err_pll_lock;
|
|
|
}
|
|
|
|
|
|
+err_out:
|
|
|
+ mutex_unlock(&rk_phy->pcie_mutex);
|
|
|
return 0;
|
|
|
|
|
|
err_pll_lock:
|
|
|
reset_control_assert(rk_phy->phy_rst);
|
|
|
+err_pwr_cnt:
|
|
|
+ rk_phy->pwr_cnt--;
|
|
|
+ mutex_unlock(&rk_phy->pcie_mutex);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
static int rockchip_pcie_phy_init(struct phy *phy)
|
|
|
{
|
|
|
- struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy);
|
|
|
+ struct phy_pcie_instance *inst = phy_get_drvdata(phy);
|
|
|
+ struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
|
|
|
int err = 0;
|
|
|
|
|
|
+ mutex_lock(&rk_phy->pcie_mutex);
|
|
|
+
|
|
|
+ if (rk_phy->init_cnt++)
|
|
|
+ goto err_out;
|
|
|
+
|
|
|
err = clk_prepare_enable(rk_phy->clk_pciephy_ref);
|
|
|
if (err) {
|
|
|
dev_err(&phy->dev, "Fail to enable pcie ref clock.\n");
|
|
@@ -238,20 +313,33 @@ static int rockchip_pcie_phy_init(struct phy *phy)
|
|
|
goto err_reset;
|
|
|
}
|
|
|
|
|
|
- return err;
|
|
|
+err_out:
|
|
|
+ mutex_unlock(&rk_phy->pcie_mutex);
|
|
|
+ return 0;
|
|
|
|
|
|
err_reset:
|
|
|
+
|
|
|
clk_disable_unprepare(rk_phy->clk_pciephy_ref);
|
|
|
err_refclk:
|
|
|
+ rk_phy->init_cnt--;
|
|
|
+ mutex_unlock(&rk_phy->pcie_mutex);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
static int rockchip_pcie_phy_exit(struct phy *phy)
|
|
|
{
|
|
|
- struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy);
|
|
|
+ struct phy_pcie_instance *inst = phy_get_drvdata(phy);
|
|
|
+ struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst);
|
|
|
+
|
|
|
+ mutex_lock(&rk_phy->pcie_mutex);
|
|
|
+
|
|
|
+ if (--rk_phy->init_cnt)
|
|
|
+ goto err_init_cnt;
|
|
|
|
|
|
clk_disable_unprepare(rk_phy->clk_pciephy_ref);
|
|
|
|
|
|
+err_init_cnt:
|
|
|
+ mutex_unlock(&rk_phy->pcie_mutex);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -283,10 +371,11 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev)
|
|
|
{
|
|
|
struct device *dev = &pdev->dev;
|
|
|
struct rockchip_pcie_phy *rk_phy;
|
|
|
- struct phy *generic_phy;
|
|
|
struct phy_provider *phy_provider;
|
|
|
struct regmap *grf;
|
|
|
const struct of_device_id *of_id;
|
|
|
+ int i;
|
|
|
+ u32 phy_num;
|
|
|
|
|
|
grf = syscon_node_to_regmap(dev->parent->of_node);
|
|
|
if (IS_ERR(grf)) {
|
|
@@ -305,6 +394,8 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev)
|
|
|
rk_phy->phy_data = (struct rockchip_pcie_data *)of_id->data;
|
|
|
rk_phy->reg_base = grf;
|
|
|
|
|
|
+ mutex_init(&rk_phy->pcie_mutex);
|
|
|
+
|
|
|
rk_phy->phy_rst = devm_reset_control_get(dev, "phy");
|
|
|
if (IS_ERR(rk_phy->phy_rst)) {
|
|
|
if (PTR_ERR(rk_phy->phy_rst) != -EPROBE_DEFER)
|
|
@@ -319,14 +410,26 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev)
|
|
|
return PTR_ERR(rk_phy->clk_pciephy_ref);
|
|
|
}
|
|
|
|
|
|
- generic_phy = devm_phy_create(dev, dev->of_node, &ops);
|
|
|
- if (IS_ERR(generic_phy)) {
|
|
|
- dev_err(dev, "failed to create PHY\n");
|
|
|
- return PTR_ERR(generic_phy);
|
|
|
+ /* parse #phy-cells to see if it's legacy PHY model */
|
|
|
+ if (of_property_read_u32(dev->of_node, "#phy-cells", &phy_num))
|
|
|
+ return -ENOENT;
|
|
|
+
|
|
|
+ phy_num = (phy_num == 0) ? 1 : PHY_MAX_LANE_NUM;
|
|
|
+ dev_dbg(dev, "phy number is %d\n", phy_num);
|
|
|
+
|
|
|
+ for (i = 0; i < phy_num; i++) {
|
|
|
+ rk_phy->phys[i].phy = devm_phy_create(dev, dev->of_node, &ops);
|
|
|
+ if (IS_ERR(rk_phy->phys[i].phy)) {
|
|
|
+ dev_err(dev, "failed to create PHY%d\n", i);
|
|
|
+ return PTR_ERR(rk_phy->phys[i].phy);
|
|
|
+ }
|
|
|
+ rk_phy->phys[i].index = i;
|
|
|
+ phy_set_drvdata(rk_phy->phys[i].phy, &rk_phy->phys[i]);
|
|
|
}
|
|
|
|
|
|
- phy_set_drvdata(generic_phy, rk_phy);
|
|
|
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
|
|
|
+ platform_set_drvdata(pdev, rk_phy);
|
|
|
+ phy_provider = devm_of_phy_provider_register(dev,
|
|
|
+ rockchip_pcie_phy_of_xlate);
|
|
|
|
|
|
return PTR_ERR_OR_ZERO(phy_provider);
|
|
|
}
|