|
@@ -45,6 +45,9 @@
|
|
|
#include "ufs_quirks.h"
|
|
|
#include "unipro.h"
|
|
|
|
|
|
+#define CREATE_TRACE_POINTS
|
|
|
+#include <trace/events/ufs.h>
|
|
|
+
|
|
|
#define UFSHCD_REQ_SENSE_SIZE 18
|
|
|
|
|
|
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
|
|
@@ -801,6 +804,8 @@ start:
|
|
|
case REQ_CLKS_OFF:
|
|
|
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
|
|
|
hba->clk_gating.state = CLKS_ON;
|
|
|
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
|
|
|
+ hba->clk_gating.state);
|
|
|
break;
|
|
|
}
|
|
|
/*
|
|
@@ -811,6 +816,8 @@ start:
|
|
|
case CLKS_OFF:
|
|
|
scsi_block_requests(hba->host);
|
|
|
hba->clk_gating.state = REQ_CLKS_ON;
|
|
|
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
|
|
|
+ hba->clk_gating.state);
|
|
|
schedule_work(&hba->clk_gating.ungate_work);
|
|
|
/*
|
|
|
* fall through to check if we should wait for this
|
|
@@ -855,6 +862,8 @@ static void ufshcd_gate_work(struct work_struct *work)
|
|
|
if (hba->clk_gating.is_suspended ||
|
|
|
(hba->clk_gating.state == REQ_CLKS_ON)) {
|
|
|
hba->clk_gating.state = CLKS_ON;
|
|
|
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
|
|
|
+ hba->clk_gating.state);
|
|
|
goto rel_lock;
|
|
|
}
|
|
|
|
|
@@ -870,6 +879,8 @@ static void ufshcd_gate_work(struct work_struct *work)
|
|
|
if (ufshcd_can_hibern8_during_gating(hba)) {
|
|
|
if (ufshcd_uic_hibern8_enter(hba)) {
|
|
|
hba->clk_gating.state = CLKS_ON;
|
|
|
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
|
|
|
+ hba->clk_gating.state);
|
|
|
goto out;
|
|
|
}
|
|
|
ufshcd_set_link_hibern8(hba);
|
|
@@ -893,9 +904,11 @@ static void ufshcd_gate_work(struct work_struct *work)
|
|
|
* new requests arriving before the current cancel work is done.
|
|
|
*/
|
|
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
|
|
- if (hba->clk_gating.state == REQ_CLKS_OFF)
|
|
|
+ if (hba->clk_gating.state == REQ_CLKS_OFF) {
|
|
|
hba->clk_gating.state = CLKS_OFF;
|
|
|
-
|
|
|
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
|
|
|
+ hba->clk_gating.state);
|
|
|
+ }
|
|
|
rel_lock:
|
|
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
|
|
out:
|
|
@@ -918,6 +931,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
|
|
|
return;
|
|
|
|
|
|
hba->clk_gating.state = REQ_CLKS_OFF;
|
|
|
+ trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
|
|
|
schedule_delayed_work(&hba->clk_gating.gate_work,
|
|
|
msecs_to_jiffies(hba->clk_gating.delay_ms));
|
|
|
}
|
|
@@ -3932,6 +3946,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
|
|
|
}
|
|
|
|
|
|
hba->auto_bkops_enabled = true;
|
|
|
+ trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
|
|
|
|
|
|
/* No need of URGENT_BKOPS exception from the device */
|
|
|
err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
|
|
@@ -3982,6 +3997,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
|
|
|
}
|
|
|
|
|
|
hba->auto_bkops_enabled = false;
|
|
|
+ trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
|
|
|
out:
|
|
|
return err;
|
|
|
}
|
|
@@ -5377,6 +5393,7 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
|
|
|
static int ufshcd_probe_hba(struct ufs_hba *hba)
|
|
|
{
|
|
|
int ret;
|
|
|
+ ktime_t start = ktime_get();
|
|
|
|
|
|
ret = ufshcd_link_startup(hba);
|
|
|
if (ret)
|
|
@@ -5468,6 +5485,9 @@ out:
|
|
|
ufshcd_hba_exit(hba);
|
|
|
}
|
|
|
|
|
|
+ trace_ufshcd_init(dev_name(hba->dev), ret,
|
|
|
+ ktime_to_us(ktime_sub(ktime_get(), start)),
|
|
|
+ hba->uic_link_state, hba->curr_dev_pwr_mode);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -5817,11 +5837,14 @@ out:
|
|
|
if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
|
|
|
clk_disable_unprepare(clki->clk);
|
|
|
}
|
|
|
- } else if (on) {
|
|
|
+ } else if (!ret && on) {
|
|
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
|
|
hba->clk_gating.state = CLKS_ON;
|
|
|
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
|
|
|
+ hba->clk_gating.state);
|
|
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
|
|
}
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -6304,6 +6327,7 @@ disable_clks:
|
|
|
__ufshcd_setup_clocks(hba, false, true);
|
|
|
|
|
|
hba->clk_gating.state = CLKS_OFF;
|
|
|
+ trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
|
|
|
/*
|
|
|
* Disable the host irq as host controller as there won't be any
|
|
|
* host controller transaction expected till resume.
|
|
@@ -6436,6 +6460,7 @@ out:
|
|
|
int ufshcd_system_suspend(struct ufs_hba *hba)
|
|
|
{
|
|
|
int ret = 0;
|
|
|
+ ktime_t start = ktime_get();
|
|
|
|
|
|
if (!hba || !hba->is_powered)
|
|
|
return 0;
|
|
@@ -6462,6 +6487,9 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
|
|
|
|
|
|
ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
|
|
|
out:
|
|
|
+ trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
|
|
|
+ ktime_to_us(ktime_sub(ktime_get(), start)),
|
|
|
+ hba->uic_link_state, hba->curr_dev_pwr_mode);
|
|
|
if (!ret)
|
|
|
hba->is_sys_suspended = true;
|
|
|
return ret;
|
|
@@ -6477,6 +6505,9 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
|
|
|
|
|
|
int ufshcd_system_resume(struct ufs_hba *hba)
|
|
|
{
|
|
|
+ int ret = 0;
|
|
|
+ ktime_t start = ktime_get();
|
|
|
+
|
|
|
if (!hba)
|
|
|
return -EINVAL;
|
|
|
|
|
@@ -6485,9 +6516,14 @@ int ufshcd_system_resume(struct ufs_hba *hba)
|
|
|
* Let the runtime resume take care of resuming
|
|
|
* if runtime suspended.
|
|
|
*/
|
|
|
- return 0;
|
|
|
-
|
|
|
- return ufshcd_resume(hba, UFS_SYSTEM_PM);
|
|
|
+ goto out;
|
|
|
+ else
|
|
|
+ ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
|
|
|
+out:
|
|
|
+ trace_ufshcd_system_resume(dev_name(hba->dev), ret,
|
|
|
+ ktime_to_us(ktime_sub(ktime_get(), start)),
|
|
|
+ hba->uic_link_state, hba->curr_dev_pwr_mode);
|
|
|
+ return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(ufshcd_system_resume);
|
|
|
|
|
@@ -6501,13 +6537,21 @@ EXPORT_SYMBOL(ufshcd_system_resume);
|
|
|
*/
|
|
|
int ufshcd_runtime_suspend(struct ufs_hba *hba)
|
|
|
{
|
|
|
+ int ret = 0;
|
|
|
+ ktime_t start = ktime_get();
|
|
|
+
|
|
|
if (!hba)
|
|
|
return -EINVAL;
|
|
|
|
|
|
if (!hba->is_powered)
|
|
|
- return 0;
|
|
|
-
|
|
|
- return ufshcd_suspend(hba, UFS_RUNTIME_PM);
|
|
|
+ goto out;
|
|
|
+ else
|
|
|
+ ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
|
|
|
+out:
|
|
|
+ trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
|
|
|
+ ktime_to_us(ktime_sub(ktime_get(), start)),
|
|
|
+ hba->uic_link_state, hba->curr_dev_pwr_mode);
|
|
|
+ return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(ufshcd_runtime_suspend);
|
|
|
|
|
@@ -6534,13 +6578,21 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
|
|
|
*/
|
|
|
int ufshcd_runtime_resume(struct ufs_hba *hba)
|
|
|
{
|
|
|
+ int ret = 0;
|
|
|
+ ktime_t start = ktime_get();
|
|
|
+
|
|
|
if (!hba)
|
|
|
return -EINVAL;
|
|
|
|
|
|
if (!hba->is_powered)
|
|
|
- return 0;
|
|
|
-
|
|
|
- return ufshcd_resume(hba, UFS_RUNTIME_PM);
|
|
|
+ goto out;
|
|
|
+ else
|
|
|
+ ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
|
|
|
+out:
|
|
|
+ trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
|
|
|
+ ktime_to_us(ktime_sub(ktime_get(), start)),
|
|
|
+ hba->uic_link_state, hba->curr_dev_pwr_mode);
|
|
|
+ return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(ufshcd_runtime_resume);
|
|
|
|
|
@@ -6684,6 +6736,11 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
|
|
|
clki->max_freq, ret);
|
|
|
break;
|
|
|
}
|
|
|
+ trace_ufshcd_clk_scaling(dev_name(hba->dev),
|
|
|
+ "scaled up", clki->name,
|
|
|
+ clki->curr_freq,
|
|
|
+ clki->max_freq);
|
|
|
+
|
|
|
clki->curr_freq = clki->max_freq;
|
|
|
|
|
|
} else if (!scale_up && clki->min_freq) {
|
|
@@ -6696,6 +6753,10 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
|
|
|
clki->min_freq, ret);
|
|
|
break;
|
|
|
}
|
|
|
+ trace_ufshcd_clk_scaling(dev_name(hba->dev),
|
|
|
+ "scaled down", clki->name,
|
|
|
+ clki->curr_freq,
|
|
|
+ clki->min_freq);
|
|
|
clki->curr_freq = clki->min_freq;
|
|
|
}
|
|
|
}
|