|
@@ -70,20 +70,27 @@ static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
|
|
|
}
|
|
|
|
|
|
static int ufs_qcom_host_clk_get(struct device *dev,
|
|
|
- const char *name, struct clk **clk_out)
|
|
|
+ const char *name, struct clk **clk_out, bool optional)
|
|
|
{
|
|
|
struct clk *clk;
|
|
|
int err = 0;
|
|
|
|
|
|
clk = devm_clk_get(dev, name);
|
|
|
- if (IS_ERR(clk)) {
|
|
|
- err = PTR_ERR(clk);
|
|
|
- dev_err(dev, "%s: failed to get %s err %d",
|
|
|
- __func__, name, err);
|
|
|
- } else {
|
|
|
+ if (!IS_ERR(clk)) {
|
|
|
*clk_out = clk;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
+ err = PTR_ERR(clk);
|
|
|
+
|
|
|
+ if (optional && err == -ENOENT) {
|
|
|
+ *clk_out = NULL;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (err != -EPROBE_DEFER)
|
|
|
+ dev_err(dev, "failed to get %s err %d\n", name, err);
|
|
|
+
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -104,11 +111,9 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
|
|
|
if (!host->is_lane_clks_enabled)
|
|
|
return;
|
|
|
|
|
|
- if (host->hba->lanes_per_direction > 1)
|
|
|
- clk_disable_unprepare(host->tx_l1_sync_clk);
|
|
|
+ clk_disable_unprepare(host->tx_l1_sync_clk);
|
|
|
clk_disable_unprepare(host->tx_l0_sync_clk);
|
|
|
- if (host->hba->lanes_per_direction > 1)
|
|
|
- clk_disable_unprepare(host->rx_l1_sync_clk);
|
|
|
+ clk_disable_unprepare(host->rx_l1_sync_clk);
|
|
|
clk_disable_unprepare(host->rx_l0_sync_clk);
|
|
|
|
|
|
host->is_lane_clks_enabled = false;
|
|
@@ -132,24 +137,21 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
|
|
|
if (err)
|
|
|
goto disable_rx_l0;
|
|
|
|
|
|
- if (host->hba->lanes_per_direction > 1) {
|
|
|
- err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
|
|
|
+ err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
|
|
|
host->rx_l1_sync_clk);
|
|
|
- if (err)
|
|
|
- goto disable_tx_l0;
|
|
|
+ if (err)
|
|
|
+ goto disable_tx_l0;
|
|
|
|
|
|
- err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
|
|
|
+ err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
|
|
|
host->tx_l1_sync_clk);
|
|
|
- if (err)
|
|
|
- goto disable_rx_l1;
|
|
|
- }
|
|
|
+ if (err)
|
|
|
+ goto disable_rx_l1;
|
|
|
|
|
|
host->is_lane_clks_enabled = true;
|
|
|
goto out;
|
|
|
|
|
|
disable_rx_l1:
|
|
|
- if (host->hba->lanes_per_direction > 1)
|
|
|
- clk_disable_unprepare(host->rx_l1_sync_clk);
|
|
|
+ clk_disable_unprepare(host->rx_l1_sync_clk);
|
|
|
disable_tx_l0:
|
|
|
clk_disable_unprepare(host->tx_l0_sync_clk);
|
|
|
disable_rx_l0:
|
|
@@ -163,25 +165,25 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
|
|
|
int err = 0;
|
|
|
struct device *dev = host->hba->dev;
|
|
|
|
|
|
- err = ufs_qcom_host_clk_get(dev,
|
|
|
- "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
|
|
|
+ err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
|
|
|
+ &host->rx_l0_sync_clk, false);
|
|
|
if (err)
|
|
|
goto out;
|
|
|
|
|
|
- err = ufs_qcom_host_clk_get(dev,
|
|
|
- "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
|
|
|
+ err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
|
|
|
+ &host->tx_l0_sync_clk, false);
|
|
|
if (err)
|
|
|
goto out;
|
|
|
|
|
|
/* In case of single lane per direction, don't read lane1 clocks */
|
|
|
if (host->hba->lanes_per_direction > 1) {
|
|
|
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
|
|
|
- &host->rx_l1_sync_clk);
|
|
|
+ &host->rx_l1_sync_clk, false);
|
|
|
if (err)
|
|
|
goto out;
|
|
|
|
|
|
err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
|
|
|
- &host->tx_l1_sync_clk);
|
|
|
+ &host->tx_l1_sync_clk, true);
|
|
|
}
|
|
|
out:
|
|
|
return err;
|