summaryrefslogtreecommitdiff
path: root/drivers/acpi
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2023-04-14 18:18:07 +0300
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2023-04-14 18:18:07 +0300
commit684952212ca639bce124e673f9f644a09895359c (patch)
treed69b9291f10610ad4c71393ed71fabed91f07f70 /drivers/acpi
parent4654e9f9f43993eb9ce383fa7c88d14b052b8cc3 (diff)
parent11fa52fe619acfa945712d94a0bc27c0f5bc49de (diff)
downloadlinux-684952212ca639bce124e673f9f644a09895359c.tar.xz
Merge back cpufreq changes for 6.4-rc1.
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/cppc_acpi.c118
1 files changed, 111 insertions, 7 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index c51d3ccb4cca..7ff269a78c20 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1434,6 +1434,102 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
/**
+ * cppc_get_auto_sel_caps - Read autonomous selection register.
+ * @cpunum : CPU from which to read register.
+ * @perf_caps : struct where autonomous selection register value is updated.
+ */
+int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
+{
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
+ struct cpc_register_resource *auto_sel_reg;
+ u64 auto_sel;
+
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
+ return -ENODEV;
+ }
+
+ auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
+
+ if (!CPC_SUPPORTED(auto_sel_reg))
+ pr_warn_once("Autonomous mode is not unsupported!\n");
+
+ if (CPC_IN_PCC(auto_sel_reg)) {
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret = 0;
+
+ if (pcc_ss_id < 0)
+ return -ENODEV;
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+
+ if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) {
+ cpc_read(cpunum, auto_sel_reg, &auto_sel);
+ perf_caps->auto_sel = (bool)auto_sel;
+ } else {
+ ret = -EIO;
+ }
+
+ up_write(&pcc_ss_data->pcc_lock);
+
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
+
+/**
+ * cppc_set_auto_sel - Write autonomous selection register.
+ * @cpu : CPU to which to write register.
+ * @enable : the desired value of autonomous selection resiter to be updated.
+ */
+int cppc_set_auto_sel(int cpu, bool enable)
+{
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cpc_register_resource *auto_sel_reg;
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret = -EINVAL;
+
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
+ return -ENODEV;
+ }
+
+ auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
+
+ if (CPC_IN_PCC(auto_sel_reg)) {
+ if (pcc_ss_id < 0) {
+ pr_debug("Invalid pcc_ss_id\n");
+ return -ENODEV;
+ }
+
+ if (CPC_SUPPORTED(auto_sel_reg)) {
+ ret = cpc_write(cpu, auto_sel_reg, enable);
+ if (ret)
+ return ret;
+ }
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+ /* after writing CPC, transfer the ownership of PCC to platform */
+ ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
+ up_write(&pcc_ss_data->pcc_lock);
+ } else {
+ ret = -ENOTSUPP;
+ pr_debug("_CPC in PCC is not supported\n");
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
+
+/**
* cppc_set_enable - Set to enable CPPC on the processor by writing the
* Continuous Performance Control package EnableRegister field.
* @cpu: CPU for which to enable CPPC register.
@@ -1488,7 +1584,7 @@ EXPORT_SYMBOL_GPL(cppc_set_enable);
int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
- struct cpc_register_resource *desired_reg;
+ struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = 0;
@@ -1499,6 +1595,8 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
}
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
+ min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF];
+ max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF];
/*
* This is Phase-I where we want to write to CPC registers
@@ -1507,7 +1605,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
* Since read_lock can be acquired by multiple CPUs simultaneously we
* achieve that goal here
*/
- if (CPC_IN_PCC(desired_reg)) {
+ if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
if (pcc_ss_id < 0) {
pr_debug("Invalid pcc_ss_id\n");
return -ENODEV;
@@ -1530,13 +1628,19 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
cpc_desc->write_cmd_status = 0;
}
+ cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
+
/*
- * Skip writing MIN/MAX until Linux knows how to come up with
- * useful values.
+ * Only write if min_perf and max_perf not zero. Some drivers pass zero
+ * value to min and max perf, but they don't mean to set the zero value,
+ * they just don't want to write to those registers.
*/
- cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
+ if (perf_ctrls->min_perf)
+ cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf);
+ if (perf_ctrls->max_perf)
+ cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf);
- if (CPC_IN_PCC(desired_reg))
+ if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg))
up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
/*
* This is Phase-II where we transfer the ownership of PCC to Platform
@@ -1584,7 +1688,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
* case during a CMD_READ and if there are pending writes it delivers
* the write command before servicing the read command
*/
- if (CPC_IN_PCC(desired_reg)) {
+ if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
/* Update only if there are pending write commands */
if (pcc_ss_data->pending_pcc_write_cmd)