mirror of https://gitee.com/openkylin/linux.git
Merge branches 'acpi-cppc', 'acpi-misc', 'acpi-battery' and 'acpi-ac'
* acpi-cppc: mailbox: PCC: erroneous error message when parsing ACPI PCCT ACPI / CPPC: Fix invalid PCC channel status errors ACPI / CPPC: Document CPPC sysfs interface cpufreq / CPPC: Support for CPPC v3 ACPI / CPPC: Check for valid PCC subspace only if PCC is used ACPI / CPPC: Add support for CPPC v3 * acpi-misc: ACPI: Add missing prototype_for arch_post_acpi_subsys_init() ACPI: add missing newline to printk * acpi-battery: ACPI / battery: Add quirk to avoid checking for PMIC with native driver ACPI / battery: Ignore AC state in handle_discharging on systems where it is broken ACPI / battery: Add handling for devices which wrongly report discharging state ACPI / battery: Remove initializer for unused ident dmi_system_id ACPI / AC: Remove initializer for unused ident dmi_system_id * acpi-ac: ACPI / AC: Add quirk to avoid checking for PMIC with native driver
This commit is contained in:
commit
6c128e798f
|
@ -0,0 +1,69 @@
|
|||
|
||||
Collaborative Processor Performance Control (CPPC)
|
||||
|
||||
CPPC defined in the ACPI spec describes a mechanism for the OS to manage the
|
||||
performance of a logical processor on a contigious and abstract performance
|
||||
scale. CPPC exposes a set of registers to describe abstract performance scale,
|
||||
to request performance levels and to measure per-cpu delivered performance.
|
||||
|
||||
For more details on CPPC please refer to the ACPI specification at:
|
||||
|
||||
http://uefi.org/specifications
|
||||
|
||||
Some of the CPPC registers are exposed via sysfs under:
|
||||
|
||||
/sys/devices/system/cpu/cpuX/acpi_cppc/
|
||||
|
||||
for each cpu X
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
$ ls -lR /sys/devices/system/cpu/cpu0/acpi_cppc/
|
||||
/sys/devices/system/cpu/cpu0/acpi_cppc/:
|
||||
total 0
|
||||
-r--r--r-- 1 root root 65536 Mar 5 19:38 feedback_ctrs
|
||||
-r--r--r-- 1 root root 65536 Mar 5 19:38 highest_perf
|
||||
-r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_freq
|
||||
-r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_nonlinear_perf
|
||||
-r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_perf
|
||||
-r--r--r-- 1 root root 65536 Mar 5 19:38 nominal_freq
|
||||
-r--r--r-- 1 root root 65536 Mar 5 19:38 nominal_perf
|
||||
-r--r--r-- 1 root root 65536 Mar 5 19:38 reference_perf
|
||||
-r--r--r-- 1 root root 65536 Mar 5 19:38 wraparound_time
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
* highest_perf : Highest performance of this processor (abstract scale).
|
||||
* nominal_perf : Highest sustained performance of this processor (abstract scale).
|
||||
* lowest_nonlinear_perf : Lowest performance of this processor with nonlinear
|
||||
power savings (abstract scale).
|
||||
* lowest_perf : Lowest performance of this processor (abstract scale).
|
||||
|
||||
* lowest_freq : CPU frequency corresponding to lowest_perf (in MHz).
|
||||
* nominal_freq : CPU frequency corresponding to nominal_perf (in MHz).
|
||||
The above frequencies should only be used to report processor performance in
|
||||
freqency instead of abstract scale. These values should not be used for any
|
||||
functional decisions.
|
||||
|
||||
* feedback_ctrs : Includes both Reference and delivered performance counter.
|
||||
Reference counter ticks up proportional to processor's reference performance.
|
||||
Delivered counter ticks up proportional to processor's delivered performance.
|
||||
* wraparound_time: Minimum time for the feedback counters to wraparound (seconds).
|
||||
* reference_perf : Performance level at which reference performance counter
|
||||
accumulates (abstract scale).
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Computing Average Delivered Performance
|
||||
|
||||
Below describes the steps to compute the average performance delivered by taking
|
||||
two different snapshots of feedback counters at time T1 and T2.
|
||||
|
||||
T1: Read feedback_ctrs as fbc_t1
|
||||
Wait or run some workload
|
||||
T2: Read feedback_ctrs as fbc_t2
|
||||
|
||||
delivered_counter_delta = fbc_t2[del] - fbc_t1[del]
|
||||
reference_counter_delta = fbc_t2[ref] - fbc_t1[ref]
|
||||
|
||||
delivered_perf = (refernce_perf x delivered_counter_delta) / reference_counter_delta
|
|
@ -87,6 +87,7 @@ static int acpi_ac_open_fs(struct inode *inode, struct file *file);
|
|||
|
||||
|
||||
static int ac_sleep_before_get_state_ms;
|
||||
static int ac_check_pmic = 1;
|
||||
|
||||
static struct acpi_driver acpi_ac_driver = {
|
||||
.name = "ac",
|
||||
|
@ -310,21 +311,43 @@ static int acpi_ac_battery_notify(struct notifier_block *nb,
|
|||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int thinkpad_e530_quirk(const struct dmi_system_id *d)
|
||||
static int __init thinkpad_e530_quirk(const struct dmi_system_id *d)
|
||||
{
|
||||
ac_sleep_before_get_state_ms = 1000;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id ac_dmi_table[] = {
|
||||
static int __init ac_do_not_check_pmic_quirk(const struct dmi_system_id *d)
|
||||
{
|
||||
ac_check_pmic = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id ac_dmi_table[] __initconst = {
|
||||
{
|
||||
/* Thinkpad e530 */
|
||||
.callback = thinkpad_e530_quirk,
|
||||
.ident = "thinkpad e530",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* ECS EF20EA */
|
||||
.callback = ac_do_not_check_pmic_quirk,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Lenovo Ideapad Miix 320 */
|
||||
.callback = ac_do_not_check_pmic_quirk,
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80XF"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
|
||||
},
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -384,7 +407,6 @@ static int acpi_ac_add(struct acpi_device *device)
|
|||
kfree(ac);
|
||||
}
|
||||
|
||||
dmi_check_system(ac_dmi_table);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -442,13 +464,17 @@ static int __init acpi_ac_init(void)
|
|||
if (acpi_disabled)
|
||||
return -ENODEV;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(acpi_ac_blacklist); i++)
|
||||
if (acpi_dev_present(acpi_ac_blacklist[i].hid, "1",
|
||||
acpi_ac_blacklist[i].hrv)) {
|
||||
pr_info(PREFIX "AC: found native %s PMIC, not loading\n",
|
||||
acpi_ac_blacklist[i].hid);
|
||||
return -ENODEV;
|
||||
}
|
||||
dmi_check_system(ac_dmi_table);
|
||||
|
||||
if (ac_check_pmic) {
|
||||
for (i = 0; i < ARRAY_SIZE(acpi_ac_blacklist); i++)
|
||||
if (acpi_dev_present(acpi_ac_blacklist[i].hid, "1",
|
||||
acpi_ac_blacklist[i].hrv)) {
|
||||
pr_info(PREFIX "AC: found native %s PMIC, not loading\n",
|
||||
acpi_ac_blacklist[i].hid);
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI_PROCFS_POWER
|
||||
acpi_ac_dir = acpi_lock_ac_dir();
|
||||
|
|
|
@ -74,6 +74,8 @@ static async_cookie_t async_cookie;
|
|||
static bool battery_driver_registered;
|
||||
static int battery_bix_broken_package;
|
||||
static int battery_notification_delay_ms;
|
||||
static int battery_ac_is_broken;
|
||||
static int battery_check_pmic = 1;
|
||||
static unsigned int cache_time = 1000;
|
||||
module_param(cache_time, uint, 0644);
|
||||
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
|
||||
|
@ -215,6 +217,20 @@ static bool acpi_battery_is_degraded(struct acpi_battery *battery)
|
|||
battery->full_charge_capacity < battery->design_capacity;
|
||||
}
|
||||
|
||||
static int acpi_battery_handle_discharging(struct acpi_battery *battery)
|
||||
{
|
||||
/*
|
||||
* Some devices wrongly report discharging if the battery's charge level
|
||||
* was above the device's start charging threshold atm the AC adapter
|
||||
* was plugged in and the device thus did not start a new charge cycle.
|
||||
*/
|
||||
if ((battery_ac_is_broken || power_supply_is_system_supplied()) &&
|
||||
battery->rate_now == 0)
|
||||
return POWER_SUPPLY_STATUS_NOT_CHARGING;
|
||||
|
||||
return POWER_SUPPLY_STATUS_DISCHARGING;
|
||||
}
|
||||
|
||||
static int acpi_battery_get_property(struct power_supply *psy,
|
||||
enum power_supply_property psp,
|
||||
union power_supply_propval *val)
|
||||
|
@ -230,7 +246,7 @@ static int acpi_battery_get_property(struct power_supply *psy,
|
|||
switch (psp) {
|
||||
case POWER_SUPPLY_PROP_STATUS:
|
||||
if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
|
||||
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
|
||||
val->intval = acpi_battery_handle_discharging(battery);
|
||||
else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
|
||||
val->intval = POWER_SUPPLY_STATUS_CHARGING;
|
||||
else if (acpi_battery_is_charged(battery))
|
||||
|
@ -1332,23 +1348,64 @@ battery_notification_delay_quirk(const struct dmi_system_id *d)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __init
|
||||
battery_ac_is_broken_quirk(const struct dmi_system_id *d)
|
||||
{
|
||||
battery_ac_is_broken = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init
|
||||
battery_do_not_check_pmic_quirk(const struct dmi_system_id *d)
|
||||
{
|
||||
battery_check_pmic = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id bat_dmi_table[] __initconst = {
|
||||
{
|
||||
/* NEC LZ750/LS */
|
||||
.callback = battery_bix_broken_package_quirk,
|
||||
.ident = "NEC LZ750/LS",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Acer Aspire V5-573G */
|
||||
.callback = battery_notification_delay_quirk,
|
||||
.ident = "Acer Aspire V5-573G",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Point of View mobii wintab p800w */
|
||||
.callback = battery_ac_is_broken_quirk,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
|
||||
DMI_MATCH(DMI_BIOS_VERSION, "3BAIR1013"),
|
||||
/* Above matches are too generic, add bios-date match */
|
||||
DMI_MATCH(DMI_BIOS_DATE, "08/22/2014"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* ECS EF20EA */
|
||||
.callback = battery_do_not_check_pmic_quirk,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Lenovo Ideapad Miix 320 */
|
||||
.callback = battery_do_not_check_pmic_quirk,
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "80XF"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
|
||||
},
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -1488,16 +1545,18 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
|
|||
unsigned int i;
|
||||
int result;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(acpi_battery_blacklist); i++)
|
||||
if (acpi_dev_present(acpi_battery_blacklist[i], "1", -1)) {
|
||||
pr_info(PREFIX ACPI_BATTERY_DEVICE_NAME
|
||||
": found native %s PMIC, not loading\n",
|
||||
acpi_battery_blacklist[i]);
|
||||
return;
|
||||
}
|
||||
|
||||
dmi_check_system(bat_dmi_table);
|
||||
|
||||
if (battery_check_pmic) {
|
||||
for (i = 0; i < ARRAY_SIZE(acpi_battery_blacklist); i++)
|
||||
if (acpi_dev_present(acpi_battery_blacklist[i], "1", -1)) {
|
||||
pr_info(PREFIX ACPI_BATTERY_DEVICE_NAME
|
||||
": found native %s PMIC, not loading\n",
|
||||
acpi_battery_blacklist[i]);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI_PROCFS_POWER
|
||||
acpi_battery_dir = acpi_lock_battery_dir();
|
||||
if (!acpi_battery_dir)
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/wait.h>
|
||||
|
@ -49,7 +50,7 @@ struct cppc_pcc_data {
|
|||
struct mbox_chan *pcc_channel;
|
||||
void __iomem *pcc_comm_addr;
|
||||
bool pcc_channel_acquired;
|
||||
ktime_t deadline;
|
||||
unsigned int deadline_us;
|
||||
unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
|
||||
|
||||
bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
|
||||
|
@ -156,6 +157,9 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
|
|||
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
|
||||
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
|
||||
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
|
||||
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
|
||||
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
|
||||
|
||||
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
|
||||
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
|
||||
|
||||
|
@ -183,6 +187,8 @@ static struct attribute *cppc_attrs[] = {
|
|||
&lowest_perf.attr,
|
||||
&lowest_nonlinear_perf.attr,
|
||||
&nominal_perf.attr,
|
||||
&nominal_freq.attr,
|
||||
&lowest_freq.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -193,42 +199,31 @@ static struct kobj_type cppc_ktype = {
|
|||
|
||||
static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
|
||||
{
|
||||
int ret = -EIO, status = 0;
|
||||
int ret, status;
|
||||
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
|
||||
struct acpi_pcct_shared_memory __iomem *generic_comm_base =
|
||||
pcc_ss_data->pcc_comm_addr;
|
||||
ktime_t next_deadline = ktime_add(ktime_get(),
|
||||
pcc_ss_data->deadline);
|
||||
|
||||
if (!pcc_ss_data->platform_owns_pcc)
|
||||
return 0;
|
||||
|
||||
/* Retry in case the remote processor was too slow to catch up. */
|
||||
while (!ktime_after(ktime_get(), next_deadline)) {
|
||||
/*
|
||||
* Per spec, prior to boot the PCC space wil be initialized by
|
||||
* platform and should have set the command completion bit when
|
||||
* PCC can be used by OSPM
|
||||
*/
|
||||
status = readw_relaxed(&generic_comm_base->status);
|
||||
if (status & PCC_CMD_COMPLETE_MASK) {
|
||||
ret = 0;
|
||||
if (chk_err_bit && (status & PCC_ERROR_MASK))
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Reducing the bus traffic in case this loop takes longer than
|
||||
* a few retries.
|
||||
*/
|
||||
udelay(3);
|
||||
/*
|
||||
* Poll PCC status register every 3us(delay_us) for maximum of
|
||||
* deadline_us(timeout_us) until PCC command complete bit is set(cond)
|
||||
*/
|
||||
ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
|
||||
status & PCC_CMD_COMPLETE_MASK, 3,
|
||||
pcc_ss_data->deadline_us);
|
||||
|
||||
if (likely(!ret)) {
|
||||
pcc_ss_data->platform_owns_pcc = false;
|
||||
if (chk_err_bit && (status & PCC_ERROR_MASK))
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
if (likely(!ret))
|
||||
pcc_ss_data->platform_owns_pcc = false;
|
||||
else
|
||||
pr_err("PCC check channel failed for ss: %d. Status=%x\n",
|
||||
pcc_ss_id, status);
|
||||
if (unlikely(ret))
|
||||
pr_err("PCC check channel failed for ss: %d. ret=%d\n",
|
||||
pcc_ss_id, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -580,7 +575,7 @@ static int register_pcc_channel(int pcc_ss_idx)
|
|||
* So add an arbitrary amount of wait on top of Nominal.
|
||||
*/
|
||||
usecs_lat = NUM_RETRIES * cppc_ss->latency;
|
||||
pcc_data[pcc_ss_idx]->deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
|
||||
pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
|
||||
pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
|
||||
pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
|
||||
pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
|
||||
|
@ -613,7 +608,6 @@ bool __weak cpc_ffh_supported(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
|
||||
*
|
||||
|
@ -641,6 +635,34 @@ int pcc_data_alloc(int pcc_ss_id)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Check if CPPC revision + num_ent combination is supported */
|
||||
static bool is_cppc_supported(int revision, int num_ent)
|
||||
{
|
||||
int expected_num_ent;
|
||||
|
||||
switch (revision) {
|
||||
case CPPC_V2_REV:
|
||||
expected_num_ent = CPPC_V2_NUM_ENT;
|
||||
break;
|
||||
case CPPC_V3_REV:
|
||||
expected_num_ent = CPPC_V3_NUM_ENT;
|
||||
break;
|
||||
default:
|
||||
pr_debug("Firmware exports unsupported CPPC revision: %d\n",
|
||||
revision);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (expected_num_ent != num_ent) {
|
||||
pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
|
||||
num_ent, expected_num_ent, revision);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* An example CPC table looks like the following.
|
||||
*
|
||||
|
@ -731,14 +753,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
|||
cpc_obj->type);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/* Only support CPPCv2. Bail otherwise. */
|
||||
if (num_ent != CPPC_NUM_ENT) {
|
||||
pr_debug("Firmware exports %d entries. Expected: %d\n",
|
||||
num_ent, CPPC_NUM_ENT);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
cpc_ptr->num_entries = num_ent;
|
||||
|
||||
/* Second entry should be revision. */
|
||||
|
@ -750,12 +764,10 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
|||
cpc_obj->type);
|
||||
goto out_free;
|
||||
}
|
||||
cpc_ptr->version = cpc_rev;
|
||||
|
||||
if (cpc_rev != CPPC_REV) {
|
||||
pr_debug("Firmware exports revision:%d. Expected:%d\n",
|
||||
cpc_rev, CPPC_REV);
|
||||
if (!is_cppc_supported(cpc_rev, num_ent))
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/* Iterate through remaining entries in _CPC */
|
||||
for (i = 2; i < num_ent; i++) {
|
||||
|
@ -808,6 +820,18 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
|||
}
|
||||
}
|
||||
per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
|
||||
|
||||
/*
|
||||
* Initialize the remaining cpc_regs as unsupported.
|
||||
* Example: In case FW exposes CPPC v2, the below loop will initialize
|
||||
* LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
|
||||
*/
|
||||
for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
|
||||
cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
|
||||
cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
|
||||
}
|
||||
|
||||
|
||||
/* Store CPU Logical ID */
|
||||
cpc_ptr->cpu_id = pr->id;
|
||||
|
||||
|
@ -1037,26 +1061,34 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
|
|||
{
|
||||
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
|
||||
struct cpc_register_resource *highest_reg, *lowest_reg,
|
||||
*lowest_non_linear_reg, *nominal_reg;
|
||||
u64 high, low, nom, min_nonlinear;
|
||||
*lowest_non_linear_reg, *nominal_reg,
|
||||
*low_freq_reg = NULL, *nom_freq_reg = NULL;
|
||||
u64 high, low, nom, min_nonlinear, low_f = 0, nom_f = 0;
|
||||
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
|
||||
struct cppc_pcc_data *pcc_ss_data;
|
||||
struct cppc_pcc_data *pcc_ss_data = NULL;
|
||||
int ret = 0, regs_in_pcc = 0;
|
||||
|
||||
if (!cpc_desc || pcc_ss_id < 0) {
|
||||
if (!cpc_desc) {
|
||||
pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pcc_ss_data = pcc_data[pcc_ss_id];
|
||||
highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
|
||||
lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
|
||||
lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
|
||||
nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
|
||||
low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
|
||||
nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
|
||||
|
||||
/* Are any of the regs PCC ?*/
|
||||
if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
|
||||
CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg)) {
|
||||
CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
|
||||
CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
|
||||
if (pcc_ss_id < 0) {
|
||||
pr_debug("Invalid pcc_ss_id\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
pcc_ss_data = pcc_data[pcc_ss_id];
|
||||
regs_in_pcc = 1;
|
||||
down_write(&pcc_ss_data->pcc_lock);
|
||||
/* Ring doorbell once to update PCC subspace */
|
||||
|
@ -1081,6 +1113,17 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
|
|||
if (!high || !low || !nom || !min_nonlinear)
|
||||
ret = -EFAULT;
|
||||
|
||||
/* Read optional lowest and nominal frequencies if present */
|
||||
if (CPC_SUPPORTED(low_freq_reg))
|
||||
cpc_read(cpunum, low_freq_reg, &low_f);
|
||||
|
||||
if (CPC_SUPPORTED(nom_freq_reg))
|
||||
cpc_read(cpunum, nom_freq_reg, &nom_f);
|
||||
|
||||
perf_caps->lowest_freq = low_f;
|
||||
perf_caps->nominal_freq = nom_f;
|
||||
|
||||
|
||||
out_err:
|
||||
if (regs_in_pcc)
|
||||
up_write(&pcc_ss_data->pcc_lock);
|
||||
|
@ -1101,16 +1144,15 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
|
|||
struct cpc_register_resource *delivered_reg, *reference_reg,
|
||||
*ref_perf_reg, *ctr_wrap_reg;
|
||||
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
|
||||
struct cppc_pcc_data *pcc_ss_data;
|
||||
struct cppc_pcc_data *pcc_ss_data = NULL;
|
||||
u64 delivered, reference, ref_perf, ctr_wrap_time;
|
||||
int ret = 0, regs_in_pcc = 0;
|
||||
|
||||
if (!cpc_desc || pcc_ss_id < 0) {
|
||||
if (!cpc_desc) {
|
||||
pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pcc_ss_data = pcc_data[pcc_ss_id];
|
||||
delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
|
||||
reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
|
||||
ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
|
||||
|
@ -1126,6 +1168,11 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
|
|||
/* Are any of the regs PCC ?*/
|
||||
if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
|
||||
CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
|
||||
if (pcc_ss_id < 0) {
|
||||
pr_debug("Invalid pcc_ss_id\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
pcc_ss_data = pcc_data[pcc_ss_id];
|
||||
down_write(&pcc_ss_data->pcc_lock);
|
||||
regs_in_pcc = 1;
|
||||
/* Ring doorbell once to update PCC subspace */
|
||||
|
@ -1176,15 +1223,14 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
|
|||
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
|
||||
struct cpc_register_resource *desired_reg;
|
||||
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
|
||||
struct cppc_pcc_data *pcc_ss_data;
|
||||
struct cppc_pcc_data *pcc_ss_data = NULL;
|
||||
int ret = 0;
|
||||
|
||||
if (!cpc_desc || pcc_ss_id < 0) {
|
||||
if (!cpc_desc) {
|
||||
pr_debug("No CPC descriptor for CPU:%d\n", cpu);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pcc_ss_data = pcc_data[pcc_ss_id];
|
||||
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
|
||||
|
||||
/*
|
||||
|
@ -1195,6 +1241,11 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
|
|||
* achieve that goal here
|
||||
*/
|
||||
if (CPC_IN_PCC(desired_reg)) {
|
||||
if (pcc_ss_id < 0) {
|
||||
pr_debug("Invalid pcc_ss_id\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
pcc_ss_data = pcc_data[pcc_ss_id];
|
||||
down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
|
||||
if (pcc_ss_data->platform_owns_pcc) {
|
||||
ret = check_pcc_chan(pcc_ss_id, false);
|
||||
|
|
|
@ -8,8 +8,8 @@ void acpi_reboot(void)
|
|||
{
|
||||
struct acpi_generic_address *rr;
|
||||
struct pci_bus *bus0;
|
||||
u8 reset_value;
|
||||
unsigned int devfn;
|
||||
u8 reset_value;
|
||||
|
||||
if (acpi_disabled)
|
||||
return;
|
||||
|
@ -40,7 +40,7 @@ void acpi_reboot(void)
|
|||
/* Form PCI device/function pair. */
|
||||
devfn = PCI_DEVFN((rr->address >> 32) & 0xffff,
|
||||
(rr->address >> 16) & 0xffff);
|
||||
printk(KERN_DEBUG "Resetting with ACPI PCI RESET_REG.");
|
||||
printk(KERN_DEBUG "Resetting with ACPI PCI RESET_REG.\n");
|
||||
/* Write the value that resets us. */
|
||||
pci_bus_write_config_byte(bus0, devfn,
|
||||
(rr->address & 0xffff), reset_value);
|
||||
|
|
|
@ -42,9 +42,6 @@
|
|||
*/
|
||||
static struct cppc_cpudata **all_cpu_data;
|
||||
|
||||
/* Capture the max KHz from DMI */
|
||||
static u64 cppc_dmi_max_khz;
|
||||
|
||||
/* Callback function used to retrieve the max frequency from DMI */
|
||||
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
|
||||
{
|
||||
|
@ -75,6 +72,64 @@ static u64 cppc_get_dmi_max_khz(void)
|
|||
return (1000 * mhz);
|
||||
}
|
||||
|
||||
/*
|
||||
* If CPPC lowest_freq and nominal_freq registers are exposed then we can
|
||||
* use them to convert perf to freq and vice versa
|
||||
*
|
||||
* If the perf/freq point lies between Nominal and Lowest, we can treat
|
||||
* (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line
|
||||
* and extrapolate the rest
|
||||
* For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion
|
||||
*/
|
||||
static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
|
||||
unsigned int perf)
|
||||
{
|
||||
static u64 max_khz;
|
||||
struct cppc_perf_caps *caps = &cpu->perf_caps;
|
||||
u64 mul, div;
|
||||
|
||||
if (caps->lowest_freq && caps->nominal_freq) {
|
||||
if (perf >= caps->nominal_perf) {
|
||||
mul = caps->nominal_freq;
|
||||
div = caps->nominal_perf;
|
||||
} else {
|
||||
mul = caps->nominal_freq - caps->lowest_freq;
|
||||
div = caps->nominal_perf - caps->lowest_perf;
|
||||
}
|
||||
} else {
|
||||
if (!max_khz)
|
||||
max_khz = cppc_get_dmi_max_khz();
|
||||
mul = max_khz;
|
||||
div = cpu->perf_caps.highest_perf;
|
||||
}
|
||||
return (u64)perf * mul / div;
|
||||
}
|
||||
|
||||
static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu,
|
||||
unsigned int freq)
|
||||
{
|
||||
static u64 max_khz;
|
||||
struct cppc_perf_caps *caps = &cpu->perf_caps;
|
||||
u64 mul, div;
|
||||
|
||||
if (caps->lowest_freq && caps->nominal_freq) {
|
||||
if (freq >= caps->nominal_freq) {
|
||||
mul = caps->nominal_perf;
|
||||
div = caps->nominal_freq;
|
||||
} else {
|
||||
mul = caps->lowest_perf;
|
||||
div = caps->lowest_freq;
|
||||
}
|
||||
} else {
|
||||
if (!max_khz)
|
||||
max_khz = cppc_get_dmi_max_khz();
|
||||
mul = cpu->perf_caps.highest_perf;
|
||||
div = max_khz;
|
||||
}
|
||||
|
||||
return (u64)freq * mul / div;
|
||||
}
|
||||
|
||||
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation)
|
||||
|
@ -86,7 +141,7 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
|
|||
|
||||
cpu = all_cpu_data[policy->cpu];
|
||||
|
||||
desired_perf = (u64)target_freq * cpu->perf_caps.highest_perf / cppc_dmi_max_khz;
|
||||
desired_perf = cppc_cpufreq_khz_to_perf(cpu, target_freq);
|
||||
/* Return if it is exactly the same perf */
|
||||
if (desired_perf == cpu->perf_ctrls.desired_perf)
|
||||
return ret;
|
||||
|
@ -186,24 +241,24 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
return ret;
|
||||
}
|
||||
|
||||
cppc_dmi_max_khz = cppc_get_dmi_max_khz();
|
||||
/* Convert the lowest and nominal freq from MHz to KHz */
|
||||
cpu->perf_caps.lowest_freq *= 1000;
|
||||
cpu->perf_caps.nominal_freq *= 1000;
|
||||
|
||||
/*
|
||||
* Set min to lowest nonlinear perf to avoid any efficiency penalty (see
|
||||
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
|
||||
*/
|
||||
policy->min = cpu->perf_caps.lowest_nonlinear_perf * cppc_dmi_max_khz /
|
||||
cpu->perf_caps.highest_perf;
|
||||
policy->max = cppc_dmi_max_khz;
|
||||
policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf);
|
||||
policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
|
||||
|
||||
/*
|
||||
* Set cpuinfo.min_freq to Lowest to make the full range of performance
|
||||
* available if userspace wants to use any perf between lowest & lowest
|
||||
* nonlinear perf
|
||||
*/
|
||||
policy->cpuinfo.min_freq = cpu->perf_caps.lowest_perf * cppc_dmi_max_khz /
|
||||
cpu->perf_caps.highest_perf;
|
||||
policy->cpuinfo.max_freq = cppc_dmi_max_khz;
|
||||
policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf);
|
||||
policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
|
||||
|
||||
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num);
|
||||
policy->shared_type = cpu->shared_type;
|
||||
|
@ -229,7 +284,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
cpu->cur_policy = policy;
|
||||
|
||||
/* Set policy->cur to max now. The governors will adjust later. */
|
||||
policy->cur = cppc_dmi_max_khz;
|
||||
policy->cur = cppc_cpufreq_perf_to_khz(cpu,
|
||||
cpu->perf_caps.highest_perf);
|
||||
cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf;
|
||||
|
||||
ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
|
||||
|
|
|
@ -373,33 +373,24 @@ static const struct mbox_chan_ops pcc_chan_ops = {
|
|||
};
|
||||
|
||||
/**
|
||||
* parse_pcc_subspace - Parse the PCC table and verify PCC subspace
|
||||
* entries. There should be one entry per PCC client.
|
||||
* parse_pcc_subspaces -- Count PCC subspaces defined
|
||||
* @header: Pointer to the ACPI subtable header under the PCCT.
|
||||
* @end: End of subtable entry.
|
||||
*
|
||||
* Return: 0 for Success, else errno.
|
||||
* Return: If we find a PCC subspace entry of a valid type, return 0.
|
||||
* Otherwise, return -EINVAL.
|
||||
*
|
||||
* This gets called for each entry in the PCC table.
|
||||
*/
|
||||
static int parse_pcc_subspace(struct acpi_subtable_header *header,
|
||||
const unsigned long end)
|
||||
{
|
||||
struct acpi_pcct_hw_reduced *pcct_ss;
|
||||
struct acpi_pcct_subspace *ss = (struct acpi_pcct_subspace *) header;
|
||||
|
||||
if (pcc_mbox_ctrl.num_chans <= MAX_PCC_SUBSPACES) {
|
||||
pcct_ss = (struct acpi_pcct_hw_reduced *) header;
|
||||
if (ss->header.type < ACPI_PCCT_TYPE_RESERVED)
|
||||
return 0;
|
||||
|
||||
if ((pcct_ss->header.type !=
|
||||
ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE)
|
||||
&& (pcct_ss->header.type !=
|
||||
ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2)) {
|
||||
pr_err("Incorrect PCC Subspace type detected\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -449,8 +440,8 @@ static int __init acpi_pcc_probe(void)
|
|||
struct acpi_table_header *pcct_tbl;
|
||||
struct acpi_subtable_header *pcct_entry;
|
||||
struct acpi_table_pcct *acpi_pcct_tbl;
|
||||
struct acpi_subtable_proc proc[ACPI_PCCT_TYPE_RESERVED];
|
||||
int count, i, rc;
|
||||
int sum = 0;
|
||||
acpi_status status = AE_OK;
|
||||
|
||||
/* Search for PCCT */
|
||||
|
@ -459,43 +450,41 @@ static int __init acpi_pcc_probe(void)
|
|||
if (ACPI_FAILURE(status) || !pcct_tbl)
|
||||
return -ENODEV;
|
||||
|
||||
count = acpi_table_parse_entries(ACPI_SIG_PCCT,
|
||||
sizeof(struct acpi_table_pcct),
|
||||
ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE,
|
||||
parse_pcc_subspace, MAX_PCC_SUBSPACES);
|
||||
sum += (count > 0) ? count : 0;
|
||||
/* Set up the subtable handlers */
|
||||
for (i = ACPI_PCCT_TYPE_GENERIC_SUBSPACE;
|
||||
i < ACPI_PCCT_TYPE_RESERVED; i++) {
|
||||
proc[i].id = i;
|
||||
proc[i].count = 0;
|
||||
proc[i].handler = parse_pcc_subspace;
|
||||
}
|
||||
|
||||
count = acpi_table_parse_entries(ACPI_SIG_PCCT,
|
||||
sizeof(struct acpi_table_pcct),
|
||||
ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2,
|
||||
parse_pcc_subspace, MAX_PCC_SUBSPACES);
|
||||
sum += (count > 0) ? count : 0;
|
||||
|
||||
if (sum == 0 || sum >= MAX_PCC_SUBSPACES) {
|
||||
pr_err("Error parsing PCC subspaces from PCCT\n");
|
||||
count = acpi_table_parse_entries_array(ACPI_SIG_PCCT,
|
||||
sizeof(struct acpi_table_pcct), proc,
|
||||
ACPI_PCCT_TYPE_RESERVED, MAX_PCC_SUBSPACES);
|
||||
if (count == 0 || count > MAX_PCC_SUBSPACES) {
|
||||
pr_warn("Invalid PCCT: %d PCC subspaces\n", count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pcc_mbox_channels = kzalloc(sizeof(struct mbox_chan) *
|
||||
sum, GFP_KERNEL);
|
||||
pcc_mbox_channels = kzalloc(sizeof(struct mbox_chan) * count, GFP_KERNEL);
|
||||
if (!pcc_mbox_channels) {
|
||||
pr_err("Could not allocate space for PCC mbox channels\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pcc_doorbell_vaddr = kcalloc(sum, sizeof(void *), GFP_KERNEL);
|
||||
pcc_doorbell_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL);
|
||||
if (!pcc_doorbell_vaddr) {
|
||||
rc = -ENOMEM;
|
||||
goto err_free_mbox;
|
||||
}
|
||||
|
||||
pcc_doorbell_ack_vaddr = kcalloc(sum, sizeof(void *), GFP_KERNEL);
|
||||
pcc_doorbell_ack_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL);
|
||||
if (!pcc_doorbell_ack_vaddr) {
|
||||
rc = -ENOMEM;
|
||||
goto err_free_db_vaddr;
|
||||
}
|
||||
|
||||
pcc_doorbell_irq = kcalloc(sum, sizeof(int), GFP_KERNEL);
|
||||
pcc_doorbell_irq = kcalloc(count, sizeof(int), GFP_KERNEL);
|
||||
if (!pcc_doorbell_irq) {
|
||||
rc = -ENOMEM;
|
||||
goto err_free_db_ack_vaddr;
|
||||
|
@ -509,18 +498,24 @@ static int __init acpi_pcc_probe(void)
|
|||
if (acpi_pcct_tbl->flags & ACPI_PCCT_DOORBELL)
|
||||
pcc_mbox_ctrl.txdone_irq = true;
|
||||
|
||||
for (i = 0; i < sum; i++) {
|
||||
for (i = 0; i < count; i++) {
|
||||
struct acpi_generic_address *db_reg;
|
||||
struct acpi_pcct_hw_reduced *pcct_ss;
|
||||
struct acpi_pcct_subspace *pcct_ss;
|
||||
pcc_mbox_channels[i].con_priv = pcct_entry;
|
||||
|
||||
pcct_ss = (struct acpi_pcct_hw_reduced *) pcct_entry;
|
||||
if (pcct_entry->type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE ||
|
||||
pcct_entry->type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
|
||||
struct acpi_pcct_hw_reduced *pcct_hrss;
|
||||
|
||||
if (pcc_mbox_ctrl.txdone_irq) {
|
||||
rc = pcc_parse_subspace_irq(i, pcct_ss);
|
||||
if (rc < 0)
|
||||
goto err;
|
||||
pcct_hrss = (struct acpi_pcct_hw_reduced *) pcct_entry;
|
||||
|
||||
if (pcc_mbox_ctrl.txdone_irq) {
|
||||
rc = pcc_parse_subspace_irq(i, pcct_hrss);
|
||||
if (rc < 0)
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
pcct_ss = (struct acpi_pcct_subspace *) pcct_entry;
|
||||
|
||||
/* If doorbell is in system memory cache the virt address */
|
||||
db_reg = &pcct_ss->doorbell_register;
|
||||
|
@ -531,7 +526,7 @@ static int __init acpi_pcc_probe(void)
|
|||
((unsigned long) pcct_entry + pcct_entry->length);
|
||||
}
|
||||
|
||||
pcc_mbox_ctrl.num_chans = sum;
|
||||
pcc_mbox_ctrl.num_chans = count;
|
||||
|
||||
pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl.num_chans);
|
||||
|
||||
|
|
|
@ -20,14 +20,16 @@
|
|||
#include <acpi/pcc.h>
|
||||
#include <acpi/processor.h>
|
||||
|
||||
/* Only support CPPCv2 for now. */
|
||||
#define CPPC_NUM_ENT 21
|
||||
#define CPPC_REV 2
|
||||
/* Support CPPCv2 and CPPCv3 */
|
||||
#define CPPC_V2_REV 2
|
||||
#define CPPC_V3_REV 3
|
||||
#define CPPC_V2_NUM_ENT 21
|
||||
#define CPPC_V3_NUM_ENT 23
|
||||
|
||||
#define PCC_CMD_COMPLETE_MASK (1 << 0)
|
||||
#define PCC_ERROR_MASK (1 << 2)
|
||||
|
||||
#define MAX_CPC_REG_ENT 19
|
||||
#define MAX_CPC_REG_ENT 21
|
||||
|
||||
/* CPPC specific PCC commands. */
|
||||
#define CMD_READ 0
|
||||
|
@ -91,6 +93,8 @@ enum cppc_regs {
|
|||
AUTO_ACT_WINDOW,
|
||||
ENERGY_PERF,
|
||||
REFERENCE_PERF,
|
||||
LOWEST_FREQ,
|
||||
NOMINAL_FREQ,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -104,6 +108,8 @@ struct cppc_perf_caps {
|
|||
u32 nominal_perf;
|
||||
u32 lowest_perf;
|
||||
u32 lowest_nonlinear_perf;
|
||||
u32 lowest_freq;
|
||||
u32 nominal_freq;
|
||||
};
|
||||
|
||||
struct cppc_perf_ctrls {
|
||||
|
|
|
@ -578,6 +578,7 @@ int acpi_match_platform_list(const struct acpi_platform_list *plat);
|
|||
|
||||
extern void acpi_early_init(void);
|
||||
extern void acpi_subsystem_init(void);
|
||||
extern void arch_post_acpi_subsys_init(void);
|
||||
|
||||
extern int acpi_nvs_register(__u64 start, __u64 size);
|
||||
|
||||
|
|
Loading…
Reference in New Issue