|
@@ -41,7 +41,11 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
|
|
if (IS_ERR(opp))
|
|
if (IS_ERR(opp))
|
|
return PTR_ERR(opp);
|
|
return PTR_ERR(opp);
|
|
|
|
|
|
- clk_set_rate(gpu->core_clk, *freq);
|
|
|
|
|
|
+ if (gpu->funcs->gpu_set_freq)
|
|
|
|
+ gpu->funcs->gpu_set_freq(gpu, (u64)*freq);
|
|
|
|
+ else
|
|
|
|
+ clk_set_rate(gpu->core_clk, *freq);
|
|
|
|
+
|
|
dev_pm_opp_put(opp);
|
|
dev_pm_opp_put(opp);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -51,16 +55,14 @@ static int msm_devfreq_get_dev_status(struct device *dev,
|
|
struct devfreq_dev_status *status)
|
|
struct devfreq_dev_status *status)
|
|
{
|
|
{
|
|
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
|
|
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
|
|
- u64 cycles;
|
|
|
|
- u32 freq = ((u32) status->current_frequency) / 1000000;
|
|
|
|
ktime_t time;
|
|
ktime_t time;
|
|
|
|
|
|
- status->current_frequency = (unsigned long) clk_get_rate(gpu->core_clk);
|
|
|
|
- gpu->funcs->gpu_busy(gpu, &cycles);
|
|
|
|
-
|
|
|
|
- status->busy_time = ((u32) (cycles - gpu->devfreq.busy_cycles)) / freq;
|
|
|
|
|
|
+ if (gpu->funcs->gpu_get_freq)
|
|
|
|
+ status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
|
|
|
|
+ else
|
|
|
|
+ status->current_frequency = clk_get_rate(gpu->core_clk);
|
|
|
|
|
|
- gpu->devfreq.busy_cycles = cycles;
|
|
|
|
|
|
+ status->busy_time = gpu->funcs->gpu_busy(gpu);
|
|
|
|
|
|
time = ktime_get();
|
|
time = ktime_get();
|
|
status->total_time = ktime_us_delta(time, gpu->devfreq.time);
|
|
status->total_time = ktime_us_delta(time, gpu->devfreq.time);
|
|
@@ -73,7 +75,10 @@ static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
|
|
{
|
|
{
|
|
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
|
|
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
|
|
|
|
|
|
- *freq = (unsigned long) clk_get_rate(gpu->core_clk);
|
|
|
|
|
|
+ if (gpu->funcs->gpu_get_freq)
|
|
|
|
+ *freq = gpu->funcs->gpu_get_freq(gpu);
|
|
|
|
+ else
|
|
|
|
+ *freq = clk_get_rate(gpu->core_clk);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -88,7 +93,7 @@ static struct devfreq_dev_profile msm_devfreq_profile = {
|
|
static void msm_devfreq_init(struct msm_gpu *gpu)
|
|
static void msm_devfreq_init(struct msm_gpu *gpu)
|
|
{
|
|
{
|
|
/* We need target support to do devfreq */
|
|
/* We need target support to do devfreq */
|
|
- if (!gpu->funcs->gpu_busy || !gpu->core_clk)
|
|
|
|
|
|
+ if (!gpu->funcs->gpu_busy)
|
|
return;
|
|
return;
|
|
|
|
|
|
msm_devfreq_profile.initial_freq = gpu->fast_rate;
|
|
msm_devfreq_profile.initial_freq = gpu->fast_rate;
|
|
@@ -186,6 +191,14 @@ static int disable_axi(struct msm_gpu *gpu)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
|
|
|
|
+{
|
|
|
|
+ gpu->devfreq.busy_cycles = 0;
|
|
|
|
+ gpu->devfreq.time = ktime_get();
|
|
|
|
+
|
|
|
|
+ devfreq_resume_device(gpu->devfreq.devfreq);
|
|
|
|
+}
|
|
|
|
+
|
|
int msm_gpu_pm_resume(struct msm_gpu *gpu)
|
|
int msm_gpu_pm_resume(struct msm_gpu *gpu)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
@@ -204,12 +217,7 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
- if (gpu->devfreq.devfreq) {
|
|
|
|
- gpu->devfreq.busy_cycles = 0;
|
|
|
|
- gpu->devfreq.time = ktime_get();
|
|
|
|
-
|
|
|
|
- devfreq_resume_device(gpu->devfreq.devfreq);
|
|
|
|
- }
|
|
|
|
|
|
+ msm_gpu_resume_devfreq(gpu);
|
|
|
|
|
|
gpu->needs_hw_init = true;
|
|
gpu->needs_hw_init = true;
|
|
|
|
|
|
@@ -222,8 +230,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
|
|
|
|
|
|
DBG("%s", gpu->name);
|
|
DBG("%s", gpu->name);
|
|
|
|
|
|
- if (gpu->devfreq.devfreq)
|
|
|
|
- devfreq_suspend_device(gpu->devfreq.devfreq);
|
|
|
|
|
|
+ devfreq_suspend_device(gpu->devfreq.devfreq);
|
|
|
|
|
|
ret = disable_axi(gpu);
|
|
ret = disable_axi(gpu);
|
|
if (ret)
|
|
if (ret)
|