|
@@ -39,6 +39,7 @@
|
|
|
|
|
|
#include <linux/cpufreq.h>
|
|
|
#include <linux/delay.h>
|
|
|
+#include <linux/iopoll.h>
|
|
|
#include <linux/ktime.h>
|
|
|
#include <linux/rwsem.h>
|
|
|
#include <linux/wait.h>
|
|
@@ -49,7 +50,7 @@ struct cppc_pcc_data {
|
|
|
struct mbox_chan *pcc_channel;
|
|
|
void __iomem *pcc_comm_addr;
|
|
|
bool pcc_channel_acquired;
|
|
|
- ktime_t deadline;
|
|
|
+ unsigned int deadline_us;
|
|
|
unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
|
|
|
|
|
|
bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
|
|
@@ -156,6 +157,9 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
|
|
|
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
|
|
|
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
|
|
|
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
|
|
|
+show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
|
|
|
+show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
|
|
|
+
|
|
|
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
|
|
|
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
|
|
|
|
|
@@ -183,6 +187,8 @@ static struct attribute *cppc_attrs[] = {
|
|
|
&lowest_perf.attr,
|
|
|
&lowest_nonlinear_perf.attr,
|
|
|
&nominal_perf.attr,
|
|
|
+ &nominal_freq.attr,
|
|
|
+ &lowest_freq.attr,
|
|
|
NULL
|
|
|
};
|
|
|
|
|
@@ -193,42 +199,31 @@ static struct kobj_type cppc_ktype = {
|
|
|
|
|
|
static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
|
|
|
{
|
|
|
- int ret = -EIO, status = 0;
|
|
|
+ int ret, status;
|
|
|
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
|
|
|
struct acpi_pcct_shared_memory __iomem *generic_comm_base =
|
|
|
pcc_ss_data->pcc_comm_addr;
|
|
|
- ktime_t next_deadline = ktime_add(ktime_get(),
|
|
|
- pcc_ss_data->deadline);
|
|
|
|
|
|
if (!pcc_ss_data->platform_owns_pcc)
|
|
|
return 0;
|
|
|
|
|
|
- /* Retry in case the remote processor was too slow to catch up. */
|
|
|
- while (!ktime_after(ktime_get(), next_deadline)) {
|
|
|
- /*
|
|
|
- * Per spec, prior to boot the PCC space wil be initialized by
|
|
|
- * platform and should have set the command completion bit when
|
|
|
- * PCC can be used by OSPM
|
|
|
- */
|
|
|
- status = readw_relaxed(&generic_comm_base->status);
|
|
|
- if (status & PCC_CMD_COMPLETE_MASK) {
|
|
|
- ret = 0;
|
|
|
- if (chk_err_bit && (status & PCC_ERROR_MASK))
|
|
|
- ret = -EIO;
|
|
|
- break;
|
|
|
- }
|
|
|
- /*
|
|
|
- * Reducing the bus traffic in case this loop takes longer than
|
|
|
- * a few retries.
|
|
|
- */
|
|
|
- udelay(3);
|
|
|
- }
|
|
|
+ /*
|
|
|
+ * Poll PCC status register every 3us(delay_us) for maximum of
|
|
|
+ * deadline_us(timeout_us) until PCC command complete bit is set(cond)
|
|
|
+ */
|
|
|
+ ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
|
|
|
+ status & PCC_CMD_COMPLETE_MASK, 3,
|
|
|
+ pcc_ss_data->deadline_us);
|
|
|
|
|
|
- if (likely(!ret))
|
|
|
+ if (likely(!ret)) {
|
|
|
pcc_ss_data->platform_owns_pcc = false;
|
|
|
- else
|
|
|
- pr_err("PCC check channel failed for ss: %d. Status=%x\n",
|
|
|
- pcc_ss_id, status);
|
|
|
+ if (chk_err_bit && (status & PCC_ERROR_MASK))
|
|
|
+ ret = -EIO;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (unlikely(ret))
|
|
|
+ pr_err("PCC check channel failed for ss: %d. ret=%d\n",
|
|
|
+ pcc_ss_id, ret);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -580,7 +575,7 @@ static int register_pcc_channel(int pcc_ss_idx)
|
|
|
* So add an arbitrary amount of wait on top of Nominal.
|
|
|
*/
|
|
|
usecs_lat = NUM_RETRIES * cppc_ss->latency;
|
|
|
- pcc_data[pcc_ss_idx]->deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
|
|
|
+ pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
|
|
|
pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
|
|
|
pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
|
|
|
pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
|
|
@@ -613,7 +608,6 @@ bool __weak cpc_ffh_supported(void)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
/**
|
|
|
* pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
|
|
|
*
|
|
@@ -641,6 +635,34 @@ int pcc_data_alloc(int pcc_ss_id)
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
+
|
|
|
+/* Check if CPPC revision + num_ent combination is supported */
|
|
|
+static bool is_cppc_supported(int revision, int num_ent)
|
|
|
+{
|
|
|
+ int expected_num_ent;
|
|
|
+
|
|
|
+ switch (revision) {
|
|
|
+ case CPPC_V2_REV:
|
|
|
+ expected_num_ent = CPPC_V2_NUM_ENT;
|
|
|
+ break;
|
|
|
+ case CPPC_V3_REV:
|
|
|
+ expected_num_ent = CPPC_V3_NUM_ENT;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ pr_debug("Firmware exports unsupported CPPC revision: %d\n",
|
|
|
+ revision);
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (expected_num_ent != num_ent) {
|
|
|
+ pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
|
|
|
+ num_ent, expected_num_ent, revision);
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* An example CPC table looks like the following.
|
|
|
*
|
|
@@ -731,14 +753,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
|
|
cpc_obj->type);
|
|
|
goto out_free;
|
|
|
}
|
|
|
-
|
|
|
- /* Only support CPPCv2. Bail otherwise. */
|
|
|
- if (num_ent != CPPC_NUM_ENT) {
|
|
|
- pr_debug("Firmware exports %d entries. Expected: %d\n",
|
|
|
- num_ent, CPPC_NUM_ENT);
|
|
|
- goto out_free;
|
|
|
- }
|
|
|
-
|
|
|
cpc_ptr->num_entries = num_ent;
|
|
|
|
|
|
/* Second entry should be revision. */
|
|
@@ -750,12 +764,10 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
|
|
cpc_obj->type);
|
|
|
goto out_free;
|
|
|
}
|
|
|
+ cpc_ptr->version = cpc_rev;
|
|
|
|
|
|
- if (cpc_rev != CPPC_REV) {
|
|
|
- pr_debug("Firmware exports revision:%d. Expected:%d\n",
|
|
|
- cpc_rev, CPPC_REV);
|
|
|
+ if (!is_cppc_supported(cpc_rev, num_ent))
|
|
|
goto out_free;
|
|
|
- }
|
|
|
|
|
|
/* Iterate through remaining entries in _CPC */
|
|
|
for (i = 2; i < num_ent; i++) {
|
|
@@ -808,6 +820,18 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
|
|
}
|
|
|
}
|
|
|
per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Initialize the remaining cpc_regs as unsupported.
|
|
|
+ * Example: In case FW exposes CPPC v2, the below loop will initialize
|
|
|
+ * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
|
|
|
+ */
|
|
|
+ for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
|
|
|
+ cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
|
|
|
+ cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
/* Store CPU Logical ID */
|
|
|
cpc_ptr->cpu_id = pr->id;
|
|
|
|
|
@@ -1037,26 +1061,34 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
|
|
|
{
|
|
|
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
|
|
|
struct cpc_register_resource *highest_reg, *lowest_reg,
|
|
|
- *lowest_non_linear_reg, *nominal_reg;
|
|
|
- u64 high, low, nom, min_nonlinear;
|
|
|
+ *lowest_non_linear_reg, *nominal_reg,
|
|
|
+ *low_freq_reg = NULL, *nom_freq_reg = NULL;
|
|
|
+ u64 high, low, nom, min_nonlinear, low_f = 0, nom_f = 0;
|
|
|
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
|
|
|
- struct cppc_pcc_data *pcc_ss_data;
|
|
|
+ struct cppc_pcc_data *pcc_ss_data = NULL;
|
|
|
int ret = 0, regs_in_pcc = 0;
|
|
|
|
|
|
- if (!cpc_desc || pcc_ss_id < 0) {
|
|
|
+ if (!cpc_desc) {
|
|
|
pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
|
|
|
return -ENODEV;
|
|
|
}
|
|
|
|
|
|
- pcc_ss_data = pcc_data[pcc_ss_id];
|
|
|
highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
|
|
|
lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
|
|
|
lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
|
|
|
nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
|
|
|
+ low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
|
|
|
+ nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
|
|
|
|
|
|
/* Are any of the regs PCC ?*/
|
|
|
if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
|
|
|
- CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg)) {
|
|
|
+ CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
|
|
|
+ CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
|
|
|
+ if (pcc_ss_id < 0) {
|
|
|
+ pr_debug("Invalid pcc_ss_id\n");
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
+ pcc_ss_data = pcc_data[pcc_ss_id];
|
|
|
regs_in_pcc = 1;
|
|
|
down_write(&pcc_ss_data->pcc_lock);
|
|
|
/* Ring doorbell once to update PCC subspace */
|
|
@@ -1081,6 +1113,17 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
|
|
|
if (!high || !low || !nom || !min_nonlinear)
|
|
|
ret = -EFAULT;
|
|
|
|
|
|
+ /* Read optional lowest and nominal frequencies if present */
|
|
|
+ if (CPC_SUPPORTED(low_freq_reg))
|
|
|
+ cpc_read(cpunum, low_freq_reg, &low_f);
|
|
|
+
|
|
|
+ if (CPC_SUPPORTED(nom_freq_reg))
|
|
|
+ cpc_read(cpunum, nom_freq_reg, &nom_f);
|
|
|
+
|
|
|
+ perf_caps->lowest_freq = low_f;
|
|
|
+ perf_caps->nominal_freq = nom_f;
|
|
|
+
|
|
|
+
|
|
|
out_err:
|
|
|
if (regs_in_pcc)
|
|
|
up_write(&pcc_ss_data->pcc_lock);
|
|
@@ -1101,16 +1144,15 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
|
|
|
struct cpc_register_resource *delivered_reg, *reference_reg,
|
|
|
*ref_perf_reg, *ctr_wrap_reg;
|
|
|
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
|
|
|
- struct cppc_pcc_data *pcc_ss_data;
|
|
|
+ struct cppc_pcc_data *pcc_ss_data = NULL;
|
|
|
u64 delivered, reference, ref_perf, ctr_wrap_time;
|
|
|
int ret = 0, regs_in_pcc = 0;
|
|
|
|
|
|
- if (!cpc_desc || pcc_ss_id < 0) {
|
|
|
+ if (!cpc_desc) {
|
|
|
pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
|
|
|
return -ENODEV;
|
|
|
}
|
|
|
|
|
|
- pcc_ss_data = pcc_data[pcc_ss_id];
|
|
|
delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
|
|
|
reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
|
|
|
ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
|
|
@@ -1126,6 +1168,11 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
|
|
|
/* Are any of the regs PCC ?*/
|
|
|
if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
|
|
|
CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
|
|
|
+ if (pcc_ss_id < 0) {
|
|
|
+ pr_debug("Invalid pcc_ss_id\n");
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
+ pcc_ss_data = pcc_data[pcc_ss_id];
|
|
|
down_write(&pcc_ss_data->pcc_lock);
|
|
|
regs_in_pcc = 1;
|
|
|
/* Ring doorbell once to update PCC subspace */
|
|
@@ -1176,15 +1223,14 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
|
|
|
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
|
|
|
struct cpc_register_resource *desired_reg;
|
|
|
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
|
|
|
- struct cppc_pcc_data *pcc_ss_data;
|
|
|
+ struct cppc_pcc_data *pcc_ss_data = NULL;
|
|
|
int ret = 0;
|
|
|
|
|
|
- if (!cpc_desc || pcc_ss_id < 0) {
|
|
|
+ if (!cpc_desc) {
|
|
|
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
|
|
|
return -ENODEV;
|
|
|
}
|
|
|
|
|
|
- pcc_ss_data = pcc_data[pcc_ss_id];
|
|
|
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
|
|
|
|
|
|
/*
|
|
@@ -1195,6 +1241,11 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
|
|
|
* achieve that goal here
|
|
|
*/
|
|
|
if (CPC_IN_PCC(desired_reg)) {
|
|
|
+ if (pcc_ss_id < 0) {
|
|
|
+ pr_debug("Invalid pcc_ss_id\n");
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
+ pcc_ss_data = pcc_data[pcc_ss_id];
|
|
|
down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
|
|
|
if (pcc_ss_data->platform_owns_pcc) {
|
|
|
ret = check_pcc_chan(pcc_ss_id, false);
|