Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (65 commits)
  ACPI: suppress power button event on S3 resume
  ACPI: resolve merge conflict between sem2mutex and processor_perflib.c
  ACPI: use for_each_possible_cpu() instead of for_each_cpu()
  ACPI: delete newly added debugging macros in processor_perflib.c
  ACPI: UP build fix for bugzilla-5737
  Enable P-state software coordination via _PDC
  P-state software coordination for speedstep-centrino
  P-state software coordination for acpi-cpufreq
  P-state software coordination for ACPI core
  ACPI: create acpi_thermal_resume()
  ACPI: create acpi_fan_suspend()/acpi_fan_resume()
  ACPI: pass pm_message_t from acpi_device_suspend() to root_suspend()
  ACPI: create acpi_device_suspend()/acpi_device_resume()
  ACPI: replace spin_lock_irq with mutex for ec poll mode
  ACPI: Allow a WAN module enable/disable on a Thinkpad X60.
  sem2mutex: acpi, acpi_link_lock
  ACPI: delete unused acpi_bus_drivers_lock
  sem2mutex: drivers/acpi/processor_perflib.c
  ACPI add ia64 exports to build acpi_memhotplug as a module
  ACPI: asus_acpi_init(): propagate correct return value
  ...

Manual resolve of conflicts in:

	arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
	arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
	include/acpi/processor.h
This commit is contained in:
Linus Torvalds 2006-06-23 07:52:36 -07:00
commit 37224470c8
177 changed files with 6455 additions and 5134 deletions

View File

@ -147,6 +147,9 @@ running once the system is up.
acpi_irq_isa= [HW,ACPI] If irq_balance, mark listed IRQs used by ISA
Format: <irq>,<irq>...
acpi_os_name= [HW,ACPI] Tell ACPI BIOS the name of the OS
Format: To spoof as Windows 98: ="Microsoft Windows"
acpi_osi= [HW,ACPI] empty param disables _OSI
acpi_serialize [HW,ACPI] force serialization of AML methods

View File

@ -217,7 +217,7 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
{
struct acpi_table_madt *madt = NULL;
if (!phys_addr || !size)
if (!phys_addr || !size || !cpu_has_apic)
return -EINVAL;
madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
@ -623,9 +623,9 @@ extern u32 pmtmr_ioport;
static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
{
struct fadt_descriptor_rev2 *fadt = NULL;
struct fadt_descriptor *fadt = NULL;
fadt = (struct fadt_descriptor_rev2 *)__acpi_map_table(phys, size);
fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
if (!fadt) {
printk(KERN_WARNING PREFIX "Unable to map FADT\n");
return 0;

View File

@ -47,7 +47,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
if (cpu_has(c, X86_FEATURE_EST))
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;

View File

@ -48,12 +48,13 @@ MODULE_LICENSE("GPL");
struct cpufreq_acpi_io {
struct acpi_processor_performance acpi_data;
struct acpi_processor_performance *acpi_data;
struct cpufreq_frequency_table *freq_table;
unsigned int resume;
};
static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
static struct cpufreq_driver acpi_cpufreq_driver;
@ -104,64 +105,43 @@ acpi_processor_set_performance (
{
u16 port = 0;
u8 bit_width = 0;
int ret;
u32 value = 0;
int i = 0;
struct cpufreq_freqs cpufreq_freqs;
cpumask_t saved_mask;
int ret = 0;
u32 value = 0;
int retval;
struct acpi_processor_performance *perf;
dprintk("acpi_processor_set_performance\n");
/*
* TBD: Use something other than set_cpus_allowed.
* As set_cpus_allowed is a bit racy,
* with any other set_cpus_allowed for this process.
*/
saved_mask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu));
if (smp_processor_id() != cpu) {
return (-EAGAIN);
}
if (state == data->acpi_data.state) {
retval = 0;
perf = data->acpi_data;
if (state == perf->state) {
if (unlikely(data->resume)) {
dprintk("Called after resume, resetting to P%d\n", state);
data->resume = 0;
} else {
dprintk("Already at target state (P%d)\n", state);
retval = 0;
goto migrate_end;
return (retval);
}
}
dprintk("Transitioning from P%d to P%d\n",
data->acpi_data.state, state);
/* cpufreq frequency struct */
cpufreq_freqs.cpu = cpu;
cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
cpufreq_freqs.new = data->freq_table[state].frequency;
/* notify cpufreq */
cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
dprintk("Transitioning from P%d to P%d\n", perf->state, state);
/*
* First we write the target state's 'control' value to the
* control_register.
*/
port = data->acpi_data.control_register.address;
bit_width = data->acpi_data.control_register.bit_width;
value = (u32) data->acpi_data.states[state].control;
port = perf->control_register.address;
bit_width = perf->control_register.bit_width;
value = (u32) perf->states[state].control;
dprintk("Writing 0x%08x to port 0x%04x\n", value, port);
ret = acpi_processor_write_port(port, bit_width, value);
if (ret) {
dprintk("Invalid port width 0x%04x\n", bit_width);
retval = ret;
goto migrate_end;
return (ret);
}
/*
@ -177,48 +157,35 @@ acpi_processor_set_performance (
* before giving up.
*/
port = data->acpi_data.status_register.address;
bit_width = data->acpi_data.status_register.bit_width;
port = perf->status_register.address;
bit_width = perf->status_register.bit_width;
dprintk("Looking for 0x%08x from port 0x%04x\n",
(u32) data->acpi_data.states[state].status, port);
(u32) perf->states[state].status, port);
for (i=0; i<100; i++) {
for (i = 0; i < 100; i++) {
ret = acpi_processor_read_port(port, bit_width, &value);
if (ret) {
dprintk("Invalid port width 0x%04x\n", bit_width);
retval = ret;
goto migrate_end;
return (ret);
}
if (value == (u32) data->acpi_data.states[state].status)
if (value == (u32) perf->states[state].status)
break;
udelay(10);
}
} else {
value = (u32) data->acpi_data.states[state].status;
value = (u32) perf->states[state].status;
}
/* notify cpufreq */
cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
if (unlikely(value != (u32) data->acpi_data.states[state].status)) {
unsigned int tmp = cpufreq_freqs.new;
cpufreq_freqs.new = cpufreq_freqs.old;
cpufreq_freqs.old = tmp;
cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
if (unlikely(value != (u32) perf->states[state].status)) {
printk(KERN_WARNING "acpi-cpufreq: Transition failed\n");
retval = -ENODEV;
goto migrate_end;
return (retval);
}
dprintk("Transition successful after %d microseconds\n", i * 10);
data->acpi_data.state = state;
retval = 0;
migrate_end:
set_cpus_allowed(current, saved_mask);
perf->state = state;
return (retval);
}
@ -230,8 +197,17 @@ acpi_cpufreq_target (
unsigned int relation)
{
struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
struct acpi_processor_performance *perf;
struct cpufreq_freqs freqs;
cpumask_t online_policy_cpus;
cpumask_t saved_mask;
cpumask_t set_mask;
cpumask_t covered_cpus;
unsigned int cur_state = 0;
unsigned int next_state = 0;
unsigned int result = 0;
unsigned int j;
unsigned int tmp;
dprintk("acpi_cpufreq_setpolicy\n");
@ -240,11 +216,95 @@ acpi_cpufreq_target (
target_freq,
relation,
&next_state);
if (result)
if (unlikely(result))
return (result);
result = acpi_processor_set_performance (data, policy->cpu, next_state);
perf = data->acpi_data;
cur_state = perf->state;
freqs.old = data->freq_table[cur_state].frequency;
freqs.new = data->freq_table[next_state].frequency;
#ifdef CONFIG_HOTPLUG_CPU
/* cpufreq holds the hotplug lock, so we are safe from here on */
cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
#else
online_policy_cpus = policy->cpus;
#endif
for_each_cpu_mask(j, online_policy_cpus) {
freqs.cpu = j;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
/*
* We need to call driver->target() on all or any CPU in
* policy->cpus, depending on policy->shared_type.
*/
saved_mask = current->cpus_allowed;
cpus_clear(covered_cpus);
for_each_cpu_mask(j, online_policy_cpus) {
/*
* Support for SMP systems.
* Make sure we are running on CPU that wants to change freq
*/
cpus_clear(set_mask);
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
cpus_or(set_mask, set_mask, online_policy_cpus);
else
cpu_set(j, set_mask);
set_cpus_allowed(current, set_mask);
if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
dprintk("couldn't limit to CPUs in this domain\n");
result = -EAGAIN;
break;
}
result = acpi_processor_set_performance (data, j, next_state);
if (result) {
result = -EAGAIN;
break;
}
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
break;
cpu_set(j, covered_cpus);
}
for_each_cpu_mask(j, online_policy_cpus) {
freqs.cpu = j;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
if (unlikely(result)) {
/*
* We have failed halfway through the frequency change.
* We have sent callbacks to online_policy_cpus and
* acpi_processor_set_performance() has been called on
* coverd_cpus. Best effort undo..
*/
if (!cpus_empty(covered_cpus)) {
for_each_cpu_mask(j, covered_cpus) {
policy->cpu = j;
acpi_processor_set_performance (data,
j,
cur_state);
}
}
tmp = freqs.new;
freqs.new = freqs.old;
freqs.old = tmp;
for_each_cpu_mask(j, online_policy_cpus) {
freqs.cpu = j;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
}
set_cpus_allowed(current, saved_mask);
return (result);
}
@ -270,30 +330,65 @@ acpi_cpufreq_guess_freq (
struct cpufreq_acpi_io *data,
unsigned int cpu)
{
struct acpi_processor_performance *perf = data->acpi_data;
if (cpu_khz) {
/* search the closest match to cpu_khz */
unsigned int i;
unsigned long freq;
unsigned long freqn = data->acpi_data.states[0].core_frequency * 1000;
unsigned long freqn = perf->states[0].core_frequency * 1000;
for (i=0; i < (data->acpi_data.state_count - 1); i++) {
for (i = 0; i < (perf->state_count - 1); i++) {
freq = freqn;
freqn = data->acpi_data.states[i+1].core_frequency * 1000;
freqn = perf->states[i+1].core_frequency * 1000;
if ((2 * cpu_khz) > (freqn + freq)) {
data->acpi_data.state = i;
perf->state = i;
return (freq);
}
}
data->acpi_data.state = data->acpi_data.state_count - 1;
perf->state = perf->state_count - 1;
return (freqn);
} else
} else {
/* assume CPU is at P0... */
data->acpi_data.state = 0;
return data->acpi_data.states[0].core_frequency * 1000;
perf->state = 0;
return perf->states[0].core_frequency * 1000;
}
}
/*
* acpi_cpufreq_early_init - initialize ACPI P-States library
*
* Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
* in order to determine correct frequency and voltage pairings. We can
* do _PDC and _PSD and find out the processor dependency for the
* actual init that will happen later...
*/
static int acpi_cpufreq_early_init_acpi(void)
{
struct acpi_processor_performance *data;
unsigned int i, j;
dprintk("acpi_cpufreq_early_init\n");
for_each_cpu(i) {
data = kzalloc(sizeof(struct acpi_processor_performance),
GFP_KERNEL);
if (!data) {
for_each_cpu(j) {
kfree(acpi_perf_data[j]);
acpi_perf_data[j] = NULL;
}
return (-ENOMEM);
}
acpi_perf_data[i] = data;
}
/* Do initialization in ACPI core */
acpi_processor_preregister_performance(acpi_perf_data);
return 0;
}
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
@ -303,41 +398,51 @@ acpi_cpufreq_cpu_init (
struct cpufreq_acpi_io *data;
unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
struct acpi_processor_performance *perf;
dprintk("acpi_cpufreq_cpu_init\n");
if (!acpi_perf_data[cpu])
return (-ENODEV);
data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data)
return (-ENOMEM);
data->acpi_data = acpi_perf_data[cpu];
acpi_io_data[cpu] = data;
result = acpi_processor_register_performance(&data->acpi_data, cpu);
result = acpi_processor_register_performance(data->acpi_data, cpu);
if (result)
goto err_free;
perf = data->acpi_data;
policy->cpus = perf->shared_cpu_map;
policy->shared_type = perf->shared_type;
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
}
/* capability check */
if (data->acpi_data.state_count <= 1) {
if (perf->state_count <= 1) {
dprintk("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) ||
(data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
if ((perf->control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) ||
(perf->status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
dprintk("Unsupported address space [%d, %d]\n",
(u32) (data->acpi_data.control_register.space_id),
(u32) (data->acpi_data.status_register.space_id));
(u32) (perf->control_register.space_id),
(u32) (perf->status_register.space_id));
result = -ENODEV;
goto err_unreg;
}
/* alloc freq_table */
data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (data->acpi_data.state_count + 1), GFP_KERNEL);
data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count + 1), GFP_KERNEL);
if (!data->freq_table) {
result = -ENOMEM;
goto err_unreg;
@ -345,9 +450,9 @@ acpi_cpufreq_cpu_init (
/* detect transition latency */
policy->cpuinfo.transition_latency = 0;
for (i=0; i<data->acpi_data.state_count; i++) {
if ((data->acpi_data.states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
policy->cpuinfo.transition_latency = data->acpi_data.states[i].transition_latency * 1000;
for (i=0; i<perf->state_count; i++) {
if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000;
}
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
@ -355,11 +460,11 @@ acpi_cpufreq_cpu_init (
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
/* table init */
for (i=0; i<=data->acpi_data.state_count; i++)
for (i=0; i<=perf->state_count; i++)
{
data->freq_table[i].index = i;
if (i<data->acpi_data.state_count)
data->freq_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000;
if (i<perf->state_count)
data->freq_table[i].frequency = perf->states[i].core_frequency * 1000;
else
data->freq_table[i].frequency = CPUFREQ_TABLE_END;
}
@ -374,12 +479,12 @@ acpi_cpufreq_cpu_init (
printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n",
cpu);
for (i = 0; i < data->acpi_data.state_count; i++)
for (i = 0; i < perf->state_count; i++)
dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
(i == data->acpi_data.state?'*':' '), i,
(u32) data->acpi_data.states[i].core_frequency,
(u32) data->acpi_data.states[i].power,
(u32) data->acpi_data.states[i].transition_latency);
(i == perf->state?'*':' '), i,
(u32) perf->states[i].core_frequency,
(u32) perf->states[i].power,
(u32) perf->states[i].transition_latency);
cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
@ -394,7 +499,7 @@ acpi_cpufreq_cpu_init (
err_freqfree:
kfree(data->freq_table);
err_unreg:
acpi_processor_unregister_performance(&data->acpi_data, cpu);
acpi_processor_unregister_performance(perf, cpu);
err_free:
kfree(data);
acpi_io_data[cpu] = NULL;
@ -415,7 +520,7 @@ acpi_cpufreq_cpu_exit (
if (data) {
cpufreq_frequency_table_put_attr(policy->cpu);
acpi_io_data[policy->cpu] = NULL;
acpi_processor_unregister_performance(&data->acpi_data, policy->cpu);
acpi_processor_unregister_performance(data->acpi_data, policy->cpu);
kfree(data);
}
@ -462,7 +567,10 @@ acpi_cpufreq_init (void)
dprintk("acpi_cpufreq_init\n");
result = cpufreq_register_driver(&acpi_cpufreq_driver);
result = acpi_cpufreq_early_init_acpi();
if (!result)
result = cpufreq_register_driver(&acpi_cpufreq_driver);
return (result);
}
@ -471,10 +579,15 @@ acpi_cpufreq_init (void)
static void __exit
acpi_cpufreq_exit (void)
{
unsigned int i;
dprintk("acpi_cpufreq_exit\n");
cpufreq_unregister_driver(&acpi_cpufreq_driver);
for_each_cpu(i) {
kfree(acpi_perf_data[i]);
acpi_perf_data[i] = NULL;
}
return;
}

View File

@ -347,7 +347,36 @@ static unsigned int get_cur_freq(unsigned int cpu)
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
static struct acpi_processor_performance p;
static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
/*
* centrino_cpu_early_init_acpi - Do the preregistering with ACPI P-States
* library
*
* Before doing the actual init, we need to do _PSD related setup whenever
* supported by the BIOS. These are handled by this early_init routine.
*/
static int centrino_cpu_early_init_acpi(void)
{
unsigned int i, j;
struct acpi_processor_performance *data;
for_each_cpu(i) {
data = kzalloc(sizeof(struct acpi_processor_performance),
GFP_KERNEL);
if (!data) {
for_each_cpu(j) {
kfree(acpi_perf_data[j]);
acpi_perf_data[j] = NULL;
}
return (-ENOMEM);
}
acpi_perf_data[i] = data;
}
acpi_processor_preregister_performance(acpi_perf_data);
return 0;
}
/*
* centrino_cpu_init_acpi - register with ACPI P-States library
@ -361,46 +390,51 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
unsigned long cur_freq;
int result = 0, i;
unsigned int cpu = policy->cpu;
struct acpi_processor_performance *p;
p = acpi_perf_data[cpu];
/* register with ACPI core */
if (acpi_processor_register_performance(&p, cpu)) {
dprintk("obtaining ACPI data failed\n");
if (acpi_processor_register_performance(p, cpu)) {
dprintk(PFX "obtaining ACPI data failed\n");
return -EIO;
}
policy->cpus = p->shared_cpu_map;
policy->shared_type = p->shared_type;
/* verify the acpi_data */
if (p.state_count <= 1) {
if (p->state_count <= 1) {
dprintk("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
if ((p.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
(p.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
if ((p->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
(p->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
dprintk("Invalid control/status registers (%x - %x)\n",
p.control_register.space_id, p.status_register.space_id);
p->control_register.space_id, p->status_register.space_id);
result = -EIO;
goto err_unreg;
}
for (i=0; i<p.state_count; i++) {
if (p.states[i].control != p.states[i].status) {
for (i=0; i<p->state_count; i++) {
if (p->states[i].control != p->states[i].status) {
dprintk("Different control (%llu) and status values (%llu)\n",
p.states[i].control, p.states[i].status);
p->states[i].control, p->states[i].status);
result = -EINVAL;
goto err_unreg;
}
if (!p.states[i].core_frequency) {
if (!p->states[i].core_frequency) {
dprintk("Zero core frequency for state %u\n", i);
result = -EINVAL;
goto err_unreg;
}
if (p.states[i].core_frequency > p.states[0].core_frequency) {
if (p->states[i].core_frequency > p->states[0].core_frequency) {
dprintk("P%u has larger frequency (%llu) than P0 (%llu), skipping\n", i,
p.states[i].core_frequency, p.states[0].core_frequency);
p.states[i].core_frequency = 0;
p->states[i].core_frequency, p->states[0].core_frequency);
p->states[i].core_frequency = 0;
continue;
}
}
@ -412,26 +446,26 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
}
centrino_model[cpu]->model_name=NULL;
centrino_model[cpu]->max_freq = p.states[0].core_frequency * 1000;
centrino_model[cpu]->max_freq = p->states[0].core_frequency * 1000;
centrino_model[cpu]->op_points = kmalloc(sizeof(struct cpufreq_frequency_table) *
(p.state_count + 1), GFP_KERNEL);
(p->state_count + 1), GFP_KERNEL);
if (!centrino_model[cpu]->op_points) {
result = -ENOMEM;
goto err_kfree;
}
for (i=0; i<p.state_count; i++) {
centrino_model[cpu]->op_points[i].index = p.states[i].control;
centrino_model[cpu]->op_points[i].frequency = p.states[i].core_frequency * 1000;
for (i=0; i<p->state_count; i++) {
centrino_model[cpu]->op_points[i].index = p->states[i].control;
centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000;
dprintk("adding state %i with frequency %u and control value %04x\n",
i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index);
}
centrino_model[cpu]->op_points[p.state_count].frequency = CPUFREQ_TABLE_END;
centrino_model[cpu]->op_points[p->state_count].frequency = CPUFREQ_TABLE_END;
cur_freq = get_cur_freq(cpu);
for (i=0; i<p.state_count; i++) {
if (!p.states[i].core_frequency) {
for (i=0; i<p->state_count; i++) {
if (!p->states[i].core_frequency) {
dprintk("skipping state %u\n", i);
centrino_model[cpu]->op_points[i].frequency = CPUFREQ_ENTRY_INVALID;
continue;
@ -447,7 +481,7 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
}
if (cur_freq == centrino_model[cpu]->op_points[i].frequency)
p.state = i;
p->state = i;
}
/* notify BIOS that we exist */
@ -460,12 +494,13 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
err_kfree:
kfree(centrino_model[cpu]);
err_unreg:
acpi_processor_unregister_performance(&p, cpu);
dprintk("invalid ACPI data\n");
acpi_processor_unregister_performance(p, cpu);
dprintk(PFX "invalid ACPI data\n");
return (result);
}
#else
static inline int centrino_cpu_init_acpi(struct cpufreq_policy *policy) { return -ENODEV; }
static inline int centrino_cpu_early_init_acpi(void) { return 0; }
#endif
static int centrino_cpu_init(struct cpufreq_policy *policy)
@ -551,10 +586,15 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
if (!centrino_model[cpu]->model_name) {
dprintk("unregistering and freeing ACPI data\n");
acpi_processor_unregister_performance(&p, cpu);
kfree(centrino_model[cpu]->op_points);
kfree(centrino_model[cpu]);
static struct acpi_processor_performance *p;
if (acpi_perf_data[cpu]) {
p = acpi_perf_data[cpu];
dprintk("unregistering and freeing ACPI data\n");
acpi_processor_unregister_performance(p, cpu);
kfree(centrino_model[cpu]->op_points);
kfree(centrino_model[cpu]);
}
}
#endif
@ -588,63 +628,128 @@ static int centrino_target (struct cpufreq_policy *policy,
unsigned int relation)
{
unsigned int newstate = 0;
unsigned int msr, oldmsr, h, cpu = policy->cpu;
unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
struct cpufreq_freqs freqs;
cpumask_t online_policy_cpus;
cpumask_t saved_mask;
int retval;
cpumask_t set_mask;
cpumask_t covered_cpus;
int retval = 0;
unsigned int j, k, first_cpu, tmp;
if (centrino_model[cpu] == NULL)
if (unlikely(centrino_model[cpu] == NULL))
return -ENODEV;
/*
* Support for SMP systems.
* Make sure we are running on the CPU that wants to change frequency
*/
if (unlikely(cpufreq_frequency_table_target(policy,
centrino_model[cpu]->op_points,
target_freq,
relation,
&newstate))) {
return -EINVAL;
}
#ifdef CONFIG_HOTPLUG_CPU
/* cpufreq holds the hotplug lock, so we are safe from here on */
cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
#else
online_policy_cpus = policy->cpus;
#endif
saved_mask = current->cpus_allowed;
set_cpus_allowed(current, policy->cpus);
if (!cpu_isset(smp_processor_id(), policy->cpus)) {
dprintk("couldn't limit to CPUs in this domain\n");
return(-EAGAIN);
first_cpu = 1;
cpus_clear(covered_cpus);
for_each_cpu_mask(j, online_policy_cpus) {
/*
* Support for SMP systems.
* Make sure we are running on CPU that wants to change freq
*/
cpus_clear(set_mask);
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
cpus_or(set_mask, set_mask, online_policy_cpus);
else
cpu_set(j, set_mask);
set_cpus_allowed(current, set_mask);
if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
dprintk("couldn't limit to CPUs in this domain\n");
retval = -EAGAIN;
if (first_cpu) {
/* We haven't started the transition yet. */
goto migrate_end;
}
break;
}
msr = centrino_model[cpu]->op_points[newstate].index;
if (first_cpu) {
rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
if (msr == (oldmsr & 0xffff)) {
dprintk("no change needed - msr was and needs "
"to be %x\n", oldmsr);
retval = 0;
goto migrate_end;
}
freqs.old = extract_clock(oldmsr, cpu, 0);
freqs.new = extract_clock(msr, cpu, 0);
dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
target_freq, freqs.old, freqs.new, msr);
for_each_cpu_mask(k, online_policy_cpus) {
freqs.cpu = k;
cpufreq_notify_transition(&freqs,
CPUFREQ_PRECHANGE);
}
first_cpu = 0;
/* all but 16 LSB are reserved, treat them with care */
oldmsr &= ~0xffff;
msr &= 0xffff;
oldmsr |= msr;
}
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
break;
cpu_set(j, covered_cpus);
}
if (cpufreq_frequency_table_target(policy, centrino_model[cpu]->op_points, target_freq,
relation, &newstate)) {
retval = -EINVAL;
goto migrate_end;
for_each_cpu_mask(k, online_policy_cpus) {
freqs.cpu = k;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
msr = centrino_model[cpu]->op_points[newstate].index;
rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
if (unlikely(retval)) {
/*
* We have failed halfway through the frequency change.
* We have sent callbacks to policy->cpus and
* MSRs have already been written on coverd_cpus.
* Best effort undo..
*/
if (msr == (oldmsr & 0xffff)) {
retval = 0;
dprintk("no change needed - msr was and needs to be %x\n", oldmsr);
goto migrate_end;
if (!cpus_empty(covered_cpus)) {
for_each_cpu_mask(j, covered_cpus) {
set_cpus_allowed(current, cpumask_of_cpu(j));
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
}
}
tmp = freqs.new;
freqs.new = freqs.old;
freqs.old = tmp;
for_each_cpu_mask(j, online_policy_cpus) {
freqs.cpu = j;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
}
freqs.cpu = cpu;
freqs.old = extract_clock(oldmsr, cpu, 0);
freqs.new = extract_clock(msr, cpu, 0);
dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
target_freq, freqs.old, freqs.new, msr);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* all but 16 LSB are "reserved", so treat them with
care */
oldmsr &= ~0xffff;
msr &= 0xffff;
oldmsr |= msr;
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
retval = 0;
migrate_end:
set_cpus_allowed(current, saved_mask);
return (retval);
return 0;
}
static struct freq_attr* centrino_attr[] = {
@ -686,12 +791,25 @@ static int __init centrino_init(void)
if (!cpu_has(cpu, X86_FEATURE_EST))
return -ENODEV;
centrino_cpu_early_init_acpi();
return cpufreq_register_driver(&centrino_driver);
}
static void __exit centrino_exit(void)
{
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
unsigned int j;
#endif
cpufreq_unregister_driver(&centrino_driver);
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
for_each_cpu(j) {
kfree(acpi_perf_data[j]);
acpi_perf_data[j] = NULL;
}
#endif
}
MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");

View File

@ -77,6 +77,7 @@ choice
config IA64_GENERIC
bool "generic"
select ACPI
select PCI
select NUMA
select ACPI_NUMA
help

View File

@ -1999,7 +1999,7 @@ acpi_sba_ioc_add(struct acpi_device *device)
if (!iovp_shift)
iovp_shift = min(PAGE_SHIFT, 16);
}
ACPI_MEM_FREE(dev_info);
kfree(dev_info);
/*
* default anything not caught above or specified on cmdline to 4k

View File

@ -68,8 +68,6 @@ EXPORT_SYMBOL(pm_power_off);
unsigned char acpi_kbd_controller_present = 1;
unsigned char acpi_legacy_devices;
static unsigned int __initdata acpi_madt_rev;
unsigned int acpi_cpei_override;
unsigned int acpi_cpei_phys_cpuid;
@ -243,6 +241,8 @@ acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
return iosapic_init(iosapic->address, iosapic->global_irq_base);
}
static unsigned int __initdata acpi_madt_rev;
static int __init
acpi_parse_plat_int_src(acpi_table_entry_header * header,
const unsigned long end)

View File

@ -671,9 +671,11 @@ int add_memory(u64 start, u64 size)
return ret;
}
EXPORT_SYMBOL_GPL(add_memory);
int remove_memory(u64 start, u64 size)
{
return -EINVAL;
}
EXPORT_SYMBOL_GPL(remove_memory);
#endif

View File

@ -299,6 +299,7 @@ config X86_64_ACPI_NUMA
bool "ACPI NUMA detection"
depends on NUMA
select ACPI
select PCI
select ACPI_NUMA
default y
help

View File

@ -4,5 +4,6 @@ obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += processor.o
processor-y := ../../../i386/kernel/acpi/processor.o ../../../i386/kernel/acpi/cstate.o
endif

View File

@ -1,72 +0,0 @@
/*
* arch/x86_64/kernel/acpi/processor.c
*
* Copyright (C) 2005 Intel Corporation
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added _PDC for platforms with Intel CPUs
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pr->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{
unsigned int cpu = pr->id;
struct cpuinfo_x86 *c = cpu_data + cpu;
pr->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL && cpu_has(c, X86_FEATURE_EST))
init_intel_pdc(pr, c);
return;
}
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);

View File

@ -10,9 +10,8 @@ menu "ACPI (Advanced Configuration and Power Interface) Support"
config ACPI
bool "ACPI Support"
depends on IA64 || X86
depends on PCI
select PM
select PCI
default y
---help---
Advanced Configuration and Power Interface (ACPI) support for

View File

@ -74,7 +74,7 @@ struct acpi_memory_device {
unsigned short caching; /* memory cache attribute */
unsigned short write_protect; /* memory read/write attribute */
u64 start_addr; /* Memory Range start physical addr */
u64 end_addr; /* Memory Range end physical addr */
u64 length; /* Memory Range length */
};
static int
@ -97,12 +97,11 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
if (ACPI_SUCCESS(status)) {
if (address64.resource_type == ACPI_MEMORY_RANGE) {
/* Populate the structure */
mem_device->caching =
address64.info.mem.caching;
mem_device->caching = address64.info.mem.caching;
mem_device->write_protect =
address64.info.mem.write_protect;
mem_device->start_addr = address64.minimum;
mem_device->end_addr = address64.maximum;
mem_device->length = address64.address_length;
}
}
@ -199,8 +198,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
* Tell the VM there is more memory here...
* Note: Assume that this function returns zero on success
*/
result = add_memory(mem_device->start_addr,
(mem_device->end_addr - mem_device->start_addr) + 1);
result = add_memory(mem_device->start_addr, mem_device->length);
if (result) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "\nadd_memory failed\n"));
mem_device->state = MEMORY_INVALID_STATE;
@ -249,7 +247,7 @@ static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
{
int result;
u64 start = mem_device->start_addr;
u64 len = mem_device->end_addr - start + 1;
u64 len = mem_device->length;
ACPI_FUNCTION_TRACE("acpi_memory_disable_device");

View File

@ -817,7 +817,7 @@ typedef int (proc_writefunc) (struct file * file, const char __user * buffer,
unsigned long count, void *data);
static int
__init asus_proc_add(char *name, proc_writefunc * writefunc,
asus_proc_add(char *name, proc_writefunc * writefunc,
proc_readfunc * readfunc, mode_t mode,
struct acpi_device *device)
{
@ -836,7 +836,7 @@ __init asus_proc_add(char *name, proc_writefunc * writefunc,
return 0;
}
static int __init asus_hotk_add_fs(struct acpi_device *device)
static int asus_hotk_add_fs(struct acpi_device *device)
{
struct proc_dir_entry *proc;
mode_t mode;
@ -954,7 +954,7 @@ static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
* This function is used to initialize the hotk with right values. In this
* method, we can make all the detection we want, and modify the hotk struct
*/
static int __init asus_hotk_get_info(void)
static int asus_hotk_get_info(void)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer dsdt = { ACPI_ALLOCATE_BUFFER, NULL };
@ -970,7 +970,7 @@ static int __init asus_hotk_get_info(void)
* HID), this bit will be moved. A global variable asus_info contains
* the DSDT header.
*/
status = acpi_get_table(ACPI_TABLE_DSDT, 1, &dsdt);
status = acpi_get_table(ACPI_TABLE_ID_DSDT, 1, &dsdt);
if (ACPI_FAILURE(status))
printk(KERN_WARNING " Couldn't get the DSDT table header\n");
else
@ -1101,7 +1101,7 @@ static int __init asus_hotk_get_info(void)
return AE_OK;
}
static int __init asus_hotk_check(void)
static int asus_hotk_check(void)
{
int result = 0;
@ -1119,7 +1119,9 @@ static int __init asus_hotk_check(void)
return result;
}
static int __init asus_hotk_add(struct acpi_device *device)
static int asus_hotk_found;
static int asus_hotk_add(struct acpi_device *device)
{
acpi_status status = AE_OK;
int result;
@ -1180,6 +1182,8 @@ static int __init asus_hotk_add(struct acpi_device *device)
}
}
asus_hotk_found = 1;
end:
if (result) {
kfree(hotk);
@ -1226,12 +1230,24 @@ static int __init asus_acpi_init(void)
asus_proc_dir->owner = THIS_MODULE;
result = acpi_bus_register_driver(&asus_hotk_driver);
if (result < 1) {
acpi_bus_unregister_driver(&asus_hotk_driver);
if (result < 0) {
remove_proc_entry(PROC_ASUS, acpi_root_dir);
return -ENODEV;
}
/*
* This is a bit of a kludge. We only want this module loaded
* for ASUS systems, but there's currently no way to probe the
* ACPI namespace for ASUS HIDs. So we just return failure if
* we didn't find one, which will cause the module to be
* unloaded.
*/
if (!asus_hotk_found) {
acpi_bus_unregister_driver(&asus_hotk_driver);
remove_proc_entry(PROC_ASUS, acpi_root_dir);
return result;
}
return 0;
}

View File

@ -43,7 +43,7 @@ ACPI_MODULE_NAME("acpi_bus")
extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger);
#endif
FADT_DESCRIPTOR acpi_fadt;
struct fadt_descriptor acpi_fadt;
EXPORT_SYMBOL(acpi_fadt);
struct acpi_device *acpi_root;
@ -205,12 +205,14 @@ int acpi_bus_set_power(acpi_handle handle, int state)
* Get device's current power state if it's unknown
* This means device power state isn't initialized or previous setting failed
*/
if (device->power.state == ACPI_STATE_UNKNOWN)
acpi_bus_get_power(device->handle, &device->power.state);
if (state == device->power.state) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
state));
return_VALUE(0);
if (!device->flags.force_power_state) {
if (device->power.state == ACPI_STATE_UNKNOWN)
acpi_bus_get_power(device->handle, &device->power.state);
if (state == device->power.state) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
state));
return_VALUE(0);
}
}
if (!device->power.states[state].flags.valid) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Device does not support D%d\n",
@ -596,6 +598,8 @@ void __init acpi_early_init(void)
if (acpi_disabled)
return_VOID;
printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION);
/* enable workarounds, unless strict ACPI spec. compliance */
if (!acpi_strict)
acpi_gbl_enable_interpreter_slack = TRUE;
@ -617,7 +621,7 @@ void __init acpi_early_init(void)
/*
* Get a separate copy of the FADT for use by other drivers.
*/
status = acpi_get_table(ACPI_TABLE_FADT, 1, &buffer);
status = acpi_get_table(ACPI_TABLE_ID_FADT, 1, &buffer);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to get the FADT\n");
goto error0;
@ -743,8 +747,6 @@ static int __init acpi_init(void)
ACPI_FUNCTION_TRACE("acpi_init");
printk(KERN_INFO PREFIX "Subsystem revision %08x\n", ACPI_CA_VERSION);
if (acpi_disabled) {
printk(KERN_INFO PREFIX "Interpreter disabled.\n");
return_VALUE(-ENODEV);

View File

@ -87,7 +87,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
union acpi_operand_object *second_desc = NULL;
u32 flags;
ACPI_FUNCTION_TRACE("ds_create_buffer_field");
ACPI_FUNCTION_TRACE(ds_create_buffer_field);
/* Get the name_string argument */
@ -210,7 +210,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
acpi_status status;
acpi_integer position;
ACPI_FUNCTION_TRACE_PTR("ds_get_field_names", info);
ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
/* First field starts at bit zero */
@ -342,7 +342,7 @@ acpi_ds_create_field(union acpi_parse_object *op,
union acpi_parse_object *arg;
struct acpi_create_field_info info;
ACPI_FUNCTION_TRACE_PTR("ds_create_field", op);
ACPI_FUNCTION_TRACE_PTR(ds_create_field, op);
/* First arg is the name of the parent op_region (must already exist) */
@ -399,7 +399,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
struct acpi_namespace_node *node;
u8 type = 0;
ACPI_FUNCTION_TRACE_PTR("ds_init_field_objects", op);
ACPI_FUNCTION_TRACE_PTR(ds_init_field_objects, op);
switch (walk_state->opcode) {
case AML_FIELD_OP:
@ -425,6 +425,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
* Walk the list of entries in the field_list
*/
while (arg) {
/* Ignore OFFSET and ACCESSAS terms here */
if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
@ -481,7 +482,7 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
union acpi_parse_object *arg;
struct acpi_create_field_info info;
ACPI_FUNCTION_TRACE_PTR("ds_create_bank_field", op);
ACPI_FUNCTION_TRACE_PTR(ds_create_bank_field, op);
/* First arg is the name of the parent op_region (must already exist) */
@ -554,7 +555,7 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
union acpi_parse_object *arg;
struct acpi_create_field_info info;
ACPI_FUNCTION_TRACE_PTR("ds_create_index_field", op);
ACPI_FUNCTION_TRACE_PTR(ds_create_index_field, op);
/* First arg is the name of the Index register (must already exist) */

View File

@ -184,7 +184,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
*
* RETURN: Status
*
* DESCRIPTION: Walk the namespace starting at "start_node" and perform any
* DESCRIPTION: Walk the namespace starting at "StartNode" and perform any
* necessary initialization on the objects found therein
*
******************************************************************************/
@ -196,7 +196,7 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
acpi_status status;
struct acpi_init_walk_info info;
ACPI_FUNCTION_TRACE("ds_initialize_objects");
ACPI_FUNCTION_TRACE(ds_initialize_objects);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Starting initialization of namespace objects ****\n"));
@ -213,7 +213,7 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
status = acpi_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
acpi_ds_init_one_object, &info, NULL);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "During walk_namespace"));
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,

View File

@ -81,6 +81,7 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
/* Invoke the global exception handler */
if (acpi_gbl_exception_handler) {
/* Exit the interpreter, allow handler to execute methods */
acpi_ex_exit_interpreter();
@ -100,6 +101,7 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
}
#ifdef ACPI_DISASSEMBLER
if (ACPI_FAILURE(status)) {
/* Display method locals/args if disassembler is present */
acpi_dm_dump_method_info(status, walk_state, walk_state->op);
@ -132,7 +134,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_PTR("ds_begin_method_execution", method_node);
ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
if (!method_node) {
return_ACPI_STATUS(AE_NULL_ENTRY);
@ -168,11 +170,14 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
/*
* Get a unit from the method semaphore. This releases the
* interpreter if we block
* interpreter if we block (then reacquires it)
*/
status =
acpi_ex_system_wait_semaphore(obj_desc->method.semaphore,
ACPI_WAIT_FOREVER);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/*
@ -183,7 +188,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
if (!obj_desc->method.owner_id) {
status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
goto cleanup;
}
}
@ -193,6 +198,14 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
*/
obj_desc->method.thread_count++;
return_ACPI_STATUS(status);
cleanup:
/* On error, must signal the method semaphore if present */
if (obj_desc->method.semaphore) {
(void)acpi_os_signal_semaphore(obj_desc->method.semaphore, 1);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
@ -218,10 +231,10 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
struct acpi_namespace_node *method_node;
struct acpi_walk_state *next_walk_state = NULL;
union acpi_operand_object *obj_desc;
struct acpi_parameter_info info;
struct acpi_evaluate_info *info;
u32 i;
ACPI_FUNCTION_TRACE_PTR("ds_call_control_method", this_walk_state);
ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Execute method %p, currentstate=%p\n",
@ -240,25 +253,31 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(AE_NULL_OBJECT);
}
/* Init for new method, wait on concurrency semaphore */
/* Init for new method, possibly wait on concurrency semaphore */
status = acpi_ds_begin_method_execution(method_node, obj_desc,
this_walk_state->method_node);
if (ACPI_FAILURE(status)) {
goto cleanup;
return_ACPI_STATUS(status);
}
/*
* 1) Parse the method. All "normal" methods are parsed for each execution.
* Internal methods (_OSI, etc.) do not require parsing.
*/
if (!(obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY)) {
/* 1) Parse: Create a new walk state for the preempting walk */
/* Create a new walk state for the parse */
next_walk_state =
acpi_ds_create_walk_state(obj_desc->method.owner_id, op,
obj_desc, NULL);
if (!next_walk_state) {
return_ACPI_STATUS(AE_NO_MEMORY);
status = AE_NO_MEMORY;
goto cleanup;
}
/* Create and init a Root Node */
/* Create and init a parse tree root */
op = acpi_ps_create_scope_op();
if (!op) {
@ -271,17 +290,20 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
obj_desc->method.aml_length,
NULL, 1);
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(next_walk_state);
acpi_ps_delete_parse_tree(op);
goto cleanup;
}
/* Begin AML parse */
/* Begin AML parse (deletes next_walk_state) */
status = acpi_ps_parse_aml(next_walk_state);
acpi_ps_delete_parse_tree(op);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
}
/* 2) Execute: Create a new state for the preempting walk */
/* 2) Begin method execution. Create a new walk state */
next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
NULL, obj_desc, thread);
@ -289,6 +311,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
status = AE_NO_MEMORY;
goto cleanup;
}
/*
* The resolved arguments were put on the previous walk state's operand
* stack. Operands on the previous walk state stack always
@ -296,12 +319,24 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
*/
this_walk_state->operands[this_walk_state->num_operands] = NULL;
info.parameters = &this_walk_state->operands[0];
info.parameter_type = ACPI_PARAM_ARGS;
/*
* Allocate and initialize the evaluation information block
* TBD: this is somewhat inefficient, should change interface to
* ds_init_aml_walk. For now, keeps this struct off the CPU stack
*/
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
info->parameters = &this_walk_state->operands[0];
info->parameter_type = ACPI_PARAM_ARGS;
status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
obj_desc->method.aml_start,
obj_desc->method.aml_length, &info, 3);
obj_desc->method.aml_length, info, 3);
ACPI_FREE(info);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
@ -323,6 +358,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
"Starting nested execution, newstate=%p\n",
next_walk_state));
/* Invoke an internal method if necessary */
if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
status = obj_desc->method.implementation(next_walk_state);
}
@ -330,16 +367,14 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(status);
cleanup:
/* Decrement the thread count on the method parse tree */
if (next_walk_state && (next_walk_state->method_desc)) {
next_walk_state->method_desc->method.thread_count--;
/* On error, we must terminate the method properly */
acpi_ds_terminate_control_method(obj_desc, next_walk_state);
if (next_walk_state) {
acpi_ds_delete_walk_state(next_walk_state);
}
/* On error, we must delete the new walk state */
acpi_ds_terminate_control_method(next_walk_state);
acpi_ds_delete_walk_state(next_walk_state);
return_ACPI_STATUS(status);
}
@ -362,25 +397,33 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
union acpi_operand_object *return_desc)
{
acpi_status status;
int same_as_implicit_return;
ACPI_FUNCTION_TRACE_PTR("ds_restart_control_method", walk_state);
ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"****Restart [%4.4s] Op %p return_value_from_callee %p\n",
"****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
(char *)&walk_state->method_node->name,
walk_state->method_call_op, return_desc));
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
" return_from_this_method_used?=%X res_stack %p Walk %p\n",
" ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
walk_state->return_used,
walk_state->results, walk_state));
/* Did the called method return a value? */
if (return_desc) {
/* Is the implicit return object the same as the return desc? */
same_as_implicit_return =
(walk_state->implicit_return_obj == return_desc);
/* Are we actually going to use the return value? */
if (walk_state->return_used) {
/* Save the return value from the previous method */
status = acpi_ds_result_push(return_desc, walk_state);
@ -397,18 +440,23 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
}
/*
* The following code is the
* optional support for a so-called "implicit return". Some AML code
* assumes that the last value of the method is "implicitly" returned
* to the caller. Just save the last result as the return value.
* The following code is the optional support for the so-called
* "implicit return". Some AML code assumes that the last value of the
* method is "implicitly" returned to the caller, in the absence of an
* explicit return value.
*
* Just save the last result of the method as the return value.
*
* NOTE: this is optional because the ASL language does not actually
* support this behavior.
*/
else if (!acpi_ds_do_implicit_return
(return_desc, walk_state, FALSE)) {
(return_desc, walk_state, FALSE)
|| same_as_implicit_return) {
/*
* Delete the return value if it will not be used by the
* calling method
* calling method or remove one reference if the explicit return
* is the same as the implicit return value.
*/
acpi_ut_remove_reference(return_desc);
}
@ -421,7 +469,8 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
*
* FUNCTION: acpi_ds_terminate_control_method
*
* PARAMETERS: walk_state - State of the method
* PARAMETERS: method_desc - Method object
* walk_state - State associated with the method
*
* RETURN: None
*
@ -431,95 +480,100 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
*
******************************************************************************/
void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
void
acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
struct acpi_walk_state *walk_state)
{
union acpi_operand_object *obj_desc;
struct acpi_namespace_node *method_node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR("ds_terminate_control_method", walk_state);
ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
if (!walk_state) {
/* method_desc is required, walk_state is optional */
if (!method_desc) {
return_VOID;
}
/* The current method object was saved in the walk state */
if (walk_state) {
obj_desc = walk_state->method_desc;
if (!obj_desc) {
return_VOID;
/* Delete all arguments and locals */
acpi_ds_method_data_delete_all(walk_state);
}
/* Delete all arguments and locals */
acpi_ds_method_data_delete_all(walk_state);
/*
* Lock the parser while we terminate this method.
* If this is the last thread executing the method,
* we have additional cleanup to perform
*/
status = acpi_ut_acquire_mutex(ACPI_MTX_PARSER);
status = acpi_ut_acquire_mutex(ACPI_MTX_CONTROL_METHOD);
if (ACPI_FAILURE(status)) {
return_VOID;
}
/* Signal completion of the execution of this method if necessary */
if (walk_state->method_desc->method.semaphore) {
if (method_desc->method.semaphore) {
status =
acpi_os_signal_semaphore(walk_state->method_desc->method.
semaphore, 1);
acpi_os_signal_semaphore(method_desc->method.semaphore, 1);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
"Could not signal method semaphore"));
/* Ignore error and continue cleanup */
/* Ignore error and continue */
ACPI_EXCEPTION((AE_INFO, status,
"Could not signal method semaphore"));
}
}
/*
* There are no more threads executing this method. Perform
* additional cleanup.
*
* The method Node is stored in the walk state
*/
method_node = walk_state->method_node;
if (walk_state) {
/*
* Delete any objects created by this method during execution.
* The method Node is stored in the walk state
*/
method_node = walk_state->method_node;
/* Lock namespace for possible update */
/* Lock namespace for possible update */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto exit;
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto exit;
}
/*
* Delete any namespace entries created immediately underneath
* the method
*/
if (method_node && method_node->child) {
acpi_ns_delete_namespace_subtree(method_node);
}
/*
* Delete any namespace entries created anywhere else within
* the namespace by the execution of this method
*/
acpi_ns_delete_namespace_by_owner(method_desc->method.owner_id);
status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
/*
* Delete any namespace entries created immediately underneath
* the method
*/
if (method_node->child) {
acpi_ns_delete_namespace_subtree(method_node);
}
/* Decrement the thread count on the method */
/*
* Delete any namespace entries created anywhere else within
* the namespace by the execution of this method
*/
acpi_ns_delete_namespace_by_owner(walk_state->method_desc->method.
owner_id);
status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
if (method_desc->method.thread_count) {
method_desc->method.thread_count--;
} else {
ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
}
/* Are there any other threads currently executing this method? */
if (walk_state->method_desc->method.thread_count) {
if (method_desc->method.thread_count) {
/*
* Additional threads. Do not release the owner_id in this case,
* we immediately reuse it for the next thread executing this method
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"*** Completed execution of one thread, %d threads remaining\n",
walk_state->method_desc->method.
thread_count));
method_desc->method.thread_count));
} else {
/* This is the only executing thread for this method */
@ -533,22 +587,20 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
* This code is here because we must wait until the last thread exits
* before creating the synchronization semaphore.
*/
if ((walk_state->method_desc->method.concurrency == 1) &&
(!walk_state->method_desc->method.semaphore)) {
if ((method_desc->method.concurrency == 1) &&
(!method_desc->method.semaphore)) {
status = acpi_os_create_semaphore(1, 1,
&walk_state->
method_desc->method.
&method_desc->method.
semaphore);
}
/* No more threads, we can free the owner_id */
acpi_ut_release_owner_id(&walk_state->method_desc->method.
owner_id);
acpi_ut_release_owner_id(&method_desc->method.owner_id);
}
exit:
(void)acpi_ut_release_mutex(ACPI_MTX_PARSER);
(void)acpi_ut_release_mutex(ACPI_MTX_CONTROL_METHOD);
return_VOID;
}
@ -581,7 +633,7 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
ACPI_FUNCTION_TRACE_PTR("ds_parse_method", node);
ACPI_FUNCTION_TRACE_PTR(ds_parse_method, node);
/* Parameter Validation */
@ -590,7 +642,7 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
}
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** Parsing [%4.4s] **** named_obj=%p\n",
"**** Parsing [%4.4s] **** NamedObj=%p\n",
acpi_ut_get_node_name(node), node));
/* Extract the method object from the method Node */
@ -669,7 +721,7 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
}
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** [%4.4s] Parsed **** named_obj=%p Op=%p\n",
"**** [%4.4s] Parsed **** NamedObj=%p Op=%p\n",
acpi_ut_get_node_name(node), node, op));
/*

View File

@ -81,7 +81,7 @@ acpi_ds_method_data_get_type(u16 opcode,
* special data types.
*
* NOTES: walk_state fields are initialized to zero by the
* ACPI_MEM_CALLOCATE().
* ACPI_ALLOCATE_ZEROED().
*
* A pseudo-Namespace Node is assigned to each argument and local
* so that ref_of() can return a pointer to the Node.
@ -92,7 +92,7 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
{
u32 i;
ACPI_FUNCTION_TRACE("ds_method_data_init");
ACPI_FUNCTION_TRACE(ds_method_data_init);
/* Init the method arguments */
@ -100,10 +100,10 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
ACPI_MOVE_32_TO_32(&walk_state->arguments[i].name,
NAMEOF_ARG_NTE);
walk_state->arguments[i].name.integer |= (i << 24);
walk_state->arguments[i].descriptor = ACPI_DESC_TYPE_NAMED;
walk_state->arguments[i].descriptor_type = ACPI_DESC_TYPE_NAMED;
walk_state->arguments[i].type = ACPI_TYPE_ANY;
walk_state->arguments[i].flags = ANOBJ_END_OF_PEER_LIST |
ANOBJ_METHOD_ARG;
walk_state->arguments[i].flags =
ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_ARG;
}
/* Init the method locals */
@ -113,11 +113,11 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
NAMEOF_LOCAL_NTE);
walk_state->local_variables[i].name.integer |= (i << 24);
walk_state->local_variables[i].descriptor =
walk_state->local_variables[i].descriptor_type =
ACPI_DESC_TYPE_NAMED;
walk_state->local_variables[i].type = ACPI_TYPE_ANY;
walk_state->local_variables[i].flags = ANOBJ_END_OF_PEER_LIST |
ANOBJ_METHOD_LOCAL;
walk_state->local_variables[i].flags =
ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_LOCAL;
}
return_VOID;
@ -140,7 +140,7 @@ void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
{
u32 index;
ACPI_FUNCTION_TRACE("ds_method_data_delete_all");
ACPI_FUNCTION_TRACE(ds_method_data_delete_all);
/* Detach the locals */
@ -199,7 +199,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
acpi_status status;
u32 index = 0;
ACPI_FUNCTION_TRACE_PTR("ds_method_data_init_args", params);
ACPI_FUNCTION_TRACE_PTR(ds_method_data_init_args, params);
if (!params) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
@ -251,7 +251,7 @@ acpi_ds_method_data_get_node(u16 opcode,
struct acpi_walk_state *walk_state,
struct acpi_namespace_node **node)
{
ACPI_FUNCTION_TRACE("ds_method_data_get_node");
ACPI_FUNCTION_TRACE(ds_method_data_get_node);
/*
* Method Locals and Arguments are supported
@ -318,10 +318,10 @@ acpi_ds_method_data_set_value(u16 opcode,
acpi_status status;
struct acpi_namespace_node *node;
ACPI_FUNCTION_TRACE("ds_method_data_set_value");
ACPI_FUNCTION_TRACE(ds_method_data_set_value);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"new_obj %p Opcode %X, Refs=%d [%s]\n", object,
"NewObj %p Opcode %X, Refs=%d [%s]\n", object,
opcode, object->common.reference_count,
acpi_ut_get_type_name(object->common.type)));
@ -336,7 +336,7 @@ acpi_ds_method_data_set_value(u16 opcode,
* Increment ref count so object can't be deleted while installed.
* NOTE: We do not copy the object in order to preserve the call by
* reference semantics of ACPI Control Method invocation.
* (See ACPI specification 2.0_c)
* (See ACPI Specification 2.0_c)
*/
acpi_ut_add_reference(object);
@ -351,7 +351,7 @@ acpi_ds_method_data_set_value(u16 opcode,
* FUNCTION: acpi_ds_method_data_get_value
*
* PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP
* Index - which local_var or argument to get
* Index - Which local_var or argument to get
* walk_state - Current walk state object
* dest_desc - Where Arg or Local value is returned
*
@ -372,7 +372,7 @@ acpi_ds_method_data_get_value(u16 opcode,
struct acpi_namespace_node *node;
union acpi_operand_object *object;
ACPI_FUNCTION_TRACE("ds_method_data_get_value");
ACPI_FUNCTION_TRACE(ds_method_data_get_value);
/* Validate the object descriptor */
@ -459,7 +459,7 @@ acpi_ds_method_data_get_value(u16 opcode,
* FUNCTION: acpi_ds_method_data_delete_value
*
* PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP
* Index - which local_var or argument to delete
* Index - Which local_var or argument to delete
* walk_state - Current walk state object
*
* RETURN: None
@ -477,7 +477,7 @@ acpi_ds_method_data_delete_value(u16 opcode,
struct acpi_namespace_node *node;
union acpi_operand_object *object;
ACPI_FUNCTION_TRACE("ds_method_data_delete_value");
ACPI_FUNCTION_TRACE(ds_method_data_delete_value);
/* Get the namespace node for the arg/local */
@ -538,7 +538,7 @@ acpi_ds_store_object_to_local(u16 opcode,
union acpi_operand_object *current_obj_desc;
union acpi_operand_object *new_obj_desc;
ACPI_FUNCTION_TRACE("ds_store_object_to_local");
ACPI_FUNCTION_TRACE(ds_store_object_to_local);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Opcode=%X Index=%d Obj=%p\n",
opcode, index, obj_desc));
@ -614,7 +614,7 @@ acpi_ds_store_object_to_local(u16 opcode,
&& (current_obj_desc->reference.opcode ==
AML_REF_OF_OP)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Arg (%p) is an obj_ref(Node), storing in node %p\n",
"Arg (%p) is an ObjRef(Node), storing in node %p\n",
new_obj_desc,
current_obj_desc));
@ -688,7 +688,7 @@ acpi_ds_method_data_get_type(u16 opcode,
struct acpi_namespace_node *node;
union acpi_operand_object *object;
ACPI_FUNCTION_TRACE("ds_method_data_get_type");
ACPI_FUNCTION_TRACE(ds_method_data_get_type);
/* Get the namespace node for the arg/local */
@ -701,6 +701,7 @@ acpi_ds_method_data_get_type(u16 opcode,
object = acpi_ns_get_attached_object(node);
if (!object) {
/* Uninitialized local/arg, return TYPE_ANY */
return_VALUE(ACPI_TYPE_ANY);

View File

@ -81,7 +81,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc;
acpi_status status;
ACPI_FUNCTION_TRACE("ds_build_internal_object");
ACPI_FUNCTION_TRACE(ds_build_internal_object);
*obj_desc_ptr = NULL;
if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
@ -103,6 +103,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
common.
node)));
if (ACPI_FAILURE(status)) {
/* Check if we are resolving a named reference within a package */
if ((status == AE_NOT_FOUND)
@ -186,7 +187,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
union acpi_parse_object *byte_list;
u32 byte_list_length = 0;
ACPI_FUNCTION_TRACE("ds_build_internal_buffer_obj");
ACPI_FUNCTION_TRACE(ds_build_internal_buffer_obj);
/*
* If we are evaluating a Named buffer object "Name (xxxx, Buffer)".
@ -195,6 +196,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
*/
obj_desc = *obj_desc_ptr;
if (!obj_desc) {
/* Create a new buffer object */
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
@ -243,7 +245,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
"Buffer defined with zero length in AML, creating\n"));
} else {
obj_desc->buffer.pointer =
ACPI_MEM_CALLOCATE(obj_desc->buffer.length);
ACPI_ALLOCATE_ZEROED(obj_desc->buffer.length);
if (!obj_desc->buffer.pointer) {
acpi_ut_delete_object_desc(obj_desc);
return_ACPI_STATUS(AE_NO_MEMORY);
@ -291,7 +293,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
acpi_status status = AE_OK;
acpi_native_uint i;
ACPI_FUNCTION_TRACE("ds_build_internal_package_obj");
ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
/* Find the parent of a possibly nested package */
@ -339,9 +341,10 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
* individual objects). Add an extra pointer slot so
* that the list is always null terminated.
*/
obj_desc->package.elements = ACPI_MEM_CALLOCATE(((acpi_size) obj_desc->
package.count +
1) * sizeof(void *));
obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
obj_desc->package.
count +
1) * sizeof(void *));
if (!obj_desc->package.elements) {
acpi_ut_delete_object_desc(obj_desc);
@ -355,6 +358,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
arg = arg->common.next;
for (i = 0; arg; i++) {
if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
/* Object (package or buffer) is already built */
obj_desc->package.elements[i] =
@ -396,7 +400,7 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
acpi_status status;
union acpi_operand_object *obj_desc;
ACPI_FUNCTION_TRACE_PTR("ds_create_node", op);
ACPI_FUNCTION_TRACE_PTR(ds_create_node, op);
/*
* Because of the execution pass through the non-control-method
@ -408,6 +412,7 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
}
if (!op->common.value.arg) {
/* No arguments, there is nothing to do */
return_ACPI_STATUS(AE_OK);
@ -464,11 +469,12 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE("ds_init_object_from_op");
ACPI_FUNCTION_TRACE(ds_init_object_from_op);
obj_desc = *ret_obj_desc;
op_info = acpi_ps_get_opcode_info(opcode);
if (op_info->class == AML_CLASS_UNKNOWN) {
/* Unknown opcode */
return_ACPI_STATUS(AE_TYPE);
@ -626,6 +632,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default: /* Other literals, etc.. */
if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
/* Node was saved in Op */
obj_desc->reference.node = op->common.node;

View File

@ -91,7 +91,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
ACPI_FUNCTION_TRACE("ds_execute_arguments");
ACPI_FUNCTION_TRACE(ds_execute_arguments);
/*
* Allocate a new parser op to be the root of the parsed tree
@ -193,7 +193,7 @@ acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR("ds_get_buffer_field_arguments", obj_desc);
ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_field_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
@ -206,7 +206,7 @@ acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_BUFFER_FIELD, node, NULL));
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] buffer_field Arg Init\n",
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
acpi_ut_get_node_name(node)));
/* Execute the AML code for the term_arg arguments */
@ -235,7 +235,7 @@ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR("ds_get_buffer_arguments", obj_desc);
ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
@ -279,7 +279,7 @@ acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR("ds_get_package_arguments", obj_desc);
ACPI_FUNCTION_TRACE_PTR(ds_get_package_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
@ -324,7 +324,7 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
acpi_status status;
union acpi_operand_object *extra_desc;
ACPI_FUNCTION_TRACE_PTR("ds_get_region_arguments", obj_desc);
ACPI_FUNCTION_TRACE_PTR(ds_get_region_arguments, obj_desc);
if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
@ -342,8 +342,7 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_REGION, node, NULL));
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[%4.4s] op_region Arg Init at AML %p\n",
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n",
acpi_ut_get_node_name(node),
extra_desc->extra.aml_start));
@ -352,6 +351,28 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Validate the region address/length via the host OS */
status = acpi_os_validate_address(obj_desc->region.space_id,
obj_desc->region.address,
(acpi_size) obj_desc->region.length);
if (ACPI_FAILURE(status)) {
/*
* Invalid address/length. We will emit an error message and mark
* the region as invalid, so that it will cause an additional error if
* it is ever used. Then return AE_OK.
*/
ACPI_EXCEPTION((AE_INFO, status,
"During address validation of OpRegion [%4.4s]",
node->name.ascii));
obj_desc->common.flags |= AOPOBJ_INVALID;
status = AE_OK;
}
return_ACPI_STATUS(status);
}
@ -411,7 +432,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
u8 field_flags;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR("ds_init_buffer_field", obj_desc);
ACPI_FUNCTION_TRACE_PTR(ds_init_buffer_field, obj_desc);
/* Host object must be a Buffer */
@ -457,7 +478,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
if (bit_count == 0) {
ACPI_ERROR((AE_INFO,
"Attempt to create_field of length zero"));
"Attempt to CreateField of length zero"));
status = AE_AML_OPERAND_VALUE;
goto cleanup;
}
@ -595,7 +616,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *node;
union acpi_parse_object *next_op;
ACPI_FUNCTION_TRACE_PTR("ds_eval_buffer_field_operands", op);
ACPI_FUNCTION_TRACE_PTR(ds_eval_buffer_field_operands, op);
/*
* This is where we evaluate the address and length fields of the
@ -627,7 +648,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE,
acpi_ps_get_opcode_name(op->common.aml_opcode),
walk_state->num_operands,
"after acpi_ex_resolve_operands");
"after AcpiExResolveOperands");
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)",
@ -640,6 +661,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
/* Initialize the Buffer Field */
if (op->common.aml_opcode == AML_CREATE_FIELD_OP) {
/* NOTE: Slightly different operands for this opcode */
status =
@ -685,7 +707,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *node;
union acpi_parse_object *next_op;
ACPI_FUNCTION_TRACE_PTR("ds_eval_region_operands", op);
ACPI_FUNCTION_TRACE_PTR(ds_eval_region_operands, op);
/*
* This is where we evaluate the address and length fields of the
@ -718,7 +740,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE,
acpi_ps_get_opcode_name(op->common.aml_opcode),
1, "after acpi_ex_resolve_operands");
1, "after AcpiExResolveOperands");
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
@ -744,7 +766,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
operand_desc->integer.value;
acpi_ut_remove_reference(operand_desc);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "rgn_obj %p Addr %8.8X%8.8X Len %X\n",
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
obj_desc,
ACPI_FORMAT_UINT64(obj_desc->region.address),
obj_desc->region.length));
@ -780,7 +802,7 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
union acpi_operand_object *arg_desc;
u32 length;
ACPI_FUNCTION_TRACE("ds_eval_data_object_operands");
ACPI_FUNCTION_TRACE(ds_eval_data_object_operands);
/* The first operand (for all of these data objects) is the length */
@ -874,7 +896,7 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
acpi_status status = AE_OK;
union acpi_generic_state *control_state;
ACPI_FUNCTION_NAME("ds_exec_begin_control_op");
ACPI_FUNCTION_NAME(ds_exec_begin_control_op);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p Opcode=%2.2X State=%p\n", op,
op->common.aml_opcode, walk_state));
@ -952,7 +974,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
acpi_status status = AE_OK;
union acpi_generic_state *control_state;
ACPI_FUNCTION_NAME("ds_exec_end_control_op");
ACPI_FUNCTION_NAME(ds_exec_end_control_op);
switch (op->common.aml_opcode) {
case AML_IF_OP:
@ -984,6 +1006,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op));
if (walk_state->control_state->common.value) {
/* Predicate was true, go back and evaluate it again! */
status = AE_CTRL_PENDING;
@ -1014,6 +1037,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
* has been bubbled up the tree
*/
if (op->common.value.arg) {
/* Since we have a real Return(), delete any implicit return */
acpi_ds_clear_implicit_return(walk_state);
@ -1047,6 +1071,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
walk_state->return_desc = walk_state->operands[0];
} else if ((walk_state->results) &&
(walk_state->results->results.num_results > 0)) {
/* Since we have a real Return(), delete any implicit return */
acpi_ds_clear_implicit_return(walk_state);
@ -1095,7 +1120,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
}
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Completed RETURN_OP State=%p, ret_val=%p\n",
"Completed RETURN_OP State=%p, RetVal=%p\n",
walk_state, walk_state->return_desc));
/* End the control method execution right now */

View File

@ -68,7 +68,7 @@ ACPI_MODULE_NAME("dsutils")
******************************************************************************/
void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state)
{
ACPI_FUNCTION_NAME("ds_clear_implicit_return");
ACPI_FUNCTION_NAME(ds_clear_implicit_return);
/*
* Slack must be enabled for this feature
@ -115,7 +115,7 @@ u8
acpi_ds_do_implicit_return(union acpi_operand_object *return_desc,
struct acpi_walk_state *walk_state, u8 add_reference)
{
ACPI_FUNCTION_NAME("ds_do_implicit_return");
ACPI_FUNCTION_NAME(ds_do_implicit_return);
/*
* Slack must be enabled for this feature, and we must
@ -171,7 +171,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
{
const struct acpi_opcode_info *parent_info;
ACPI_FUNCTION_TRACE_PTR("ds_is_result_used", op);
ACPI_FUNCTION_TRACE_PTR(ds_is_result_used, op);
/* Must have both an Op and a Result Object */
@ -202,6 +202,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
*/
if ((!op->common.parent) ||
(op->common.parent->common.aml_opcode == AML_SCOPE_OP)) {
/* No parent, the return value cannot possibly be used */
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
@ -340,7 +341,7 @@ acpi_ds_delete_result_if_not_used(union acpi_parse_object *op,
union acpi_operand_object *obj_desc;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR("ds_delete_result_if_not_used", result_obj);
ACPI_FUNCTION_TRACE_PTR(ds_delete_result_if_not_used, result_obj);
if (!op) {
ACPI_ERROR((AE_INFO, "Null Op"));
@ -352,6 +353,7 @@ acpi_ds_delete_result_if_not_used(union acpi_parse_object *op,
}
if (!acpi_ds_is_result_used(op, walk_state)) {
/* Must pop the result stack (obj_desc should be equal to result_obj) */
status = acpi_ds_result_pop(&obj_desc, walk_state);
@ -382,7 +384,7 @@ acpi_status acpi_ds_resolve_operands(struct acpi_walk_state *walk_state)
u32 i;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_PTR("ds_resolve_operands", walk_state);
ACPI_FUNCTION_TRACE_PTR(ds_resolve_operands, walk_state);
/*
* Attempt to resolve each of the valid operands
@ -417,7 +419,7 @@ void acpi_ds_clear_operands(struct acpi_walk_state *walk_state)
{
u32 i;
ACPI_FUNCTION_TRACE_PTR("ds_clear_operands", walk_state);
ACPI_FUNCTION_TRACE_PTR(ds_clear_operands, walk_state);
/* Remove a reference on each operand on the stack */
@ -465,7 +467,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
acpi_interpreter_mode interpreter_mode;
const struct acpi_opcode_info *op_info;
ACPI_FUNCTION_TRACE_PTR("ds_create_operand", arg);
ACPI_FUNCTION_TRACE_PTR(ds_create_operand, arg);
/* A valid name must be looked up in the namespace */
@ -498,7 +500,9 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
*/
if ((walk_state->deferred_node) &&
(walk_state->deferred_node->type == ACPI_TYPE_BUFFER_FIELD)
&& (arg_index != 0)) {
&& (arg_index ==
(u32) ((walk_state->opcode ==
AML_CREATE_FIELD_OP) ? 3 : 2))) {
obj_desc =
ACPI_CAST_PTR(union acpi_operand_object,
walk_state->deferred_node);
@ -521,6 +525,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
&& (parent_op->common.aml_opcode != AML_REGION_OP)
&& (parent_op->common.aml_opcode !=
AML_INT_NAMEPATH_OP)) {
/* Enter name into namespace if not found */
interpreter_mode = ACPI_IMODE_LOAD_PASS2;
@ -572,7 +577,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
/* Free the namestring created above */
ACPI_MEM_FREE(name_string);
ACPI_FREE(name_string);
/* Check status from the lookup */
@ -696,7 +701,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
union acpi_parse_object *arg;
u32 arg_count = 0;
ACPI_FUNCTION_TRACE_PTR("ds_create_operands", first_arg);
ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg);
/* For all arguments in the list... */

View File

@ -49,7 +49,6 @@
#include <acpi/acinterp.h>
#include <acpi/acnamesp.h>
#include <acpi/acdebug.h>
#include <acpi/acdisasm.h>
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dswexec")
@ -93,7 +92,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc;
union acpi_operand_object *local_obj_desc = NULL;
ACPI_FUNCTION_TRACE_PTR("ds_get_predicate_value", walk_state);
ACPI_FUNCTION_TRACE_PTR(ds_get_predicate_value, walk_state);
walk_state->control_state->common.state = 0;
@ -123,7 +122,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
if (!obj_desc) {
ACPI_ERROR((AE_INFO,
"No predicate obj_desc=%p State=%p",
"No predicate ObjDesc=%p State=%p",
obj_desc, walk_state));
return_ACPI_STATUS(AE_AML_NO_OPERAND);
@ -140,7 +139,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
if (ACPI_GET_OBJECT_TYPE(local_obj_desc) != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
"Bad predicate (not an integer) obj_desc=%p State=%p Type=%X",
"Bad predicate (not an integer) ObjDesc=%p State=%p Type=%X",
obj_desc, walk_state,
ACPI_GET_OBJECT_TYPE(obj_desc)));
@ -214,7 +213,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
acpi_status status = AE_OK;
u32 opcode_class;
ACPI_FUNCTION_TRACE_PTR("ds_exec_begin_op", walk_state);
ACPI_FUNCTION_TRACE_PTR(ds_exec_begin_op, walk_state);
op = walk_state->op;
if (!op) {
@ -296,7 +295,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
case AML_CLASS_NAMED_OBJECT:
if (walk_state->walk_type == ACPI_WALK_METHOD) {
if (walk_state->walk_type & ACPI_WALK_METHOD) {
/*
* Found a named object declaration during method execution;
* we must enter this object into the namespace. The created
@ -354,7 +353,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
union acpi_parse_object *next_op;
union acpi_parse_object *first_arg;
ACPI_FUNCTION_TRACE_PTR("ds_exec_end_op", walk_state);
ACPI_FUNCTION_TRACE_PTR(ds_exec_end_op, walk_state);
op = walk_state->op;
op_type = walk_state->op_info->type;
@ -409,6 +408,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
* being the object_type and size_of operators.
*/
if (!(walk_state->op_info->flags & AML_NO_OPERAND_RESOLVE)) {
/* Resolve all operands */
status = acpi_ex_resolve_operands(walk_state->opcode,
@ -423,7 +423,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
acpi_ps_get_opcode_name
(walk_state->opcode),
walk_state->num_operands,
"after ex_resolve_operands");
"after ExResolveOperands");
}
}
@ -437,7 +437,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
acpi_gbl_op_type_dispatch[op_type] (walk_state);
} else {
/*
* Treat constructs of the form "Store(local_x,local_x)" as noops when the
* Treat constructs of the form "Store(LocalX,LocalX)" as noops when the
* Local is uninitialized.
*/
if ((status == AE_AML_UNINITIALIZED_LOCAL) &&
@ -548,6 +548,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
*/
status = acpi_ds_resolve_operands(walk_state);
if (ACPI_FAILURE(status)) {
/* On error, clear all resolved operands */
acpi_ds_clear_operands(walk_state);
@ -569,7 +570,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
case AML_TYPE_CREATE_FIELD:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Executing create_field Buffer/Index Op=%p\n",
"Executing CreateField Buffer/Index Op=%p\n",
op));
status = acpi_ds_load2_end_op(walk_state);
@ -584,7 +585,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
case AML_TYPE_CREATE_OBJECT:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Executing create_object (Buffer/Package) Op=%p\n",
"Executing CreateObject (Buffer/Package) Op=%p\n",
op));
switch (op->common.parent->common.aml_opcode) {
@ -657,7 +658,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
if (op->common.aml_opcode == AML_REGION_OP) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Executing op_region Address/Length Op=%p\n",
"Executing OpRegion Address/Length Op=%p\n",
op));
status =
@ -722,6 +723,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
cleanup:
if (walk_state->result_obj) {
/* Break to debugger to display result */
ACPI_DEBUGGER_EXEC(acpi_db_display_result_object

View File

@ -127,7 +127,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
char *path;
u32 flags;
ACPI_FUNCTION_TRACE("ds_load1_begin_op");
ACPI_FUNCTION_TRACE(ds_load1_begin_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
@ -178,12 +178,12 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
* Target of Scope() not found. Generate an External for it, and
* insert the name into the namespace.
*/
acpi_dm_add_to_external_list(path);
acpi_dm_add_to_external_list(path, ACPI_TYPE_DEVICE, 0);
status =
acpi_ns_lookup(walk_state->scope_info, path,
object_type, ACPI_IMODE_LOAD_PASS1,
ACPI_NS_SEARCH_PARENT, walk_state,
&(node));
&node);
}
#endif
if (ACPI_FAILURE(status)) {
@ -261,6 +261,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
*/
if (walk_state->deferred_node) {
/* This name is already in the namespace, get the node */
node = walk_state->deferred_node;
@ -300,10 +301,41 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
status =
acpi_ns_lookup(walk_state->scope_info, path, object_type,
ACPI_IMODE_LOAD_PASS1, flags, walk_state,
&(node));
&node);
if (ACPI_FAILURE(status)) {
ACPI_ERROR_NAMESPACE(path, status);
return_ACPI_STATUS(status);
if (status == AE_ALREADY_EXISTS) {
/* The name already exists in this scope */
if (node->flags & ANOBJ_IS_EXTERNAL) {
/*
* Allow one create on an object or segment that was
* previously declared External
*/
node->flags &= ~ANOBJ_IS_EXTERNAL;
node->type = (u8) object_type;
/* Just retyped a node, probably will need to open a scope */
if (acpi_ns_opens_scope(object_type)) {
status =
acpi_ds_scope_stack_push
(node, object_type,
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS
(status);
}
}
status = AE_OK;
}
}
if (ACPI_FAILURE(status)) {
ACPI_ERROR_NAMESPACE(path, status);
return_ACPI_STATUS(status);
}
}
break;
}
@ -311,6 +343,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
/* Common exit */
if (!op) {
/* Create a new op */
op = acpi_ps_alloc_op(walk_state->opcode);
@ -359,7 +392,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
acpi_object_type object_type;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE("ds_load1_end_op");
ACPI_FUNCTION_TRACE(ds_load1_end_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
@ -413,6 +446,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
#endif
if (op->common.aml_opcode == AML_NAME_OP) {
/* For Name opcode, get the object type from the argument */
if (op->common.value.arg) {
@ -445,7 +479,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
* arguments.)
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"LOADING-Method: State=%p Op=%p named_obj=%p\n",
"LOADING-Method: State=%p Op=%p NamedObj=%p\n",
walk_state, op, op->named.node));
if (!acpi_ns_get_attached_object(op->named.node)) {
@ -511,7 +545,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
acpi_object_type object_type;
char *buffer_ptr;
ACPI_FUNCTION_TRACE("ds_load2_begin_op");
ACPI_FUNCTION_TRACE(ds_load2_begin_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
@ -521,6 +555,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
if ((walk_state->control_state) &&
(walk_state->control_state->common.state ==
ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
/* We are executing a while loop outside of a method */
status = acpi_ds_exec_begin_op(walk_state, out_op);
@ -554,10 +589,12 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
/* Get the name we are going to enter or lookup in the namespace */
if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
/* For Namepath op, get the path string */
buffer_ptr = op->common.value.string;
if (!buffer_ptr) {
/* No name, just exit */
return_ACPI_STATUS(AE_OK);
@ -680,6 +717,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
/* All other opcodes */
if (op && op->common.node) {
/* This op/node was previously entered into the namespace */
node = op->common.node;
@ -705,6 +743,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
* Note: Name may already exist if we are executing a deferred opcode.
*/
if (walk_state->deferred_node) {
/* This name is already in the namespace, get the node */
node = walk_state->deferred_node;
@ -727,6 +766,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
}
if (!op) {
/* Create a new op */
op = acpi_ps_alloc_op(walk_state->opcode);
@ -776,7 +816,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
u32 i;
#endif
ACPI_FUNCTION_TRACE("ds_load2_end_op");
ACPI_FUNCTION_TRACE(ds_load2_end_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Opcode [%s] Op %p State %p\n",
@ -870,7 +910,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Create-Load [%s] State=%p Op=%p named_obj=%p\n",
"Create-Load [%s] State=%p Op=%p NamedObj=%p\n",
acpi_ps_get_opcode_name(op->common.aml_opcode),
walk_state, op, node));
@ -1045,7 +1085,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
* arguments.)
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"LOADING-Method: State=%p Op=%p named_obj=%p\n",
"LOADING-Method: State=%p Op=%p NamedObj=%p\n",
walk_state, op, op->named.node));
if (!acpi_ns_get_attached_object(op->named.node)) {
@ -1090,7 +1130,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
case AML_CLASS_METHOD_CALL:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"RESOLVING-method_call: State=%p Op=%p named_obj=%p\n",
"RESOLVING-MethodCall: State=%p Op=%p NamedObj=%p\n",
walk_state, op, node));
/*
@ -1104,7 +1144,6 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
ACPI_NS_DONT_OPEN_SCOPE, walk_state,
&(new_node));
if (ACPI_SUCCESS(status)) {
/*
* Make sure that what we found is indeed a method
* We didn't search for a method on purpose, to see if the name

View File

@ -63,9 +63,10 @@ void acpi_ds_scope_stack_clear(struct acpi_walk_state *walk_state)
{
union acpi_generic_state *scope_info;
ACPI_FUNCTION_NAME("ds_scope_stack_clear");
ACPI_FUNCTION_NAME(ds_scope_stack_clear);
while (walk_state->scope_info) {
/* Pop a scope off the stack */
scope_info = walk_state->scope_info;
@ -102,9 +103,10 @@ acpi_ds_scope_stack_push(struct acpi_namespace_node *node,
union acpi_generic_state *scope_info;
union acpi_generic_state *old_scope_info;
ACPI_FUNCTION_TRACE("ds_scope_stack_push");
ACPI_FUNCTION_TRACE(ds_scope_stack_push);
if (!node) {
/* Invalid scope */
ACPI_ERROR((AE_INFO, "Null scope parameter"));
@ -126,7 +128,7 @@ acpi_ds_scope_stack_push(struct acpi_namespace_node *node,
/* Init new scope object */
scope_info->common.data_type = ACPI_DESC_TYPE_STATE_WSCOPE;
scope_info->common.descriptor_type = ACPI_DESC_TYPE_STATE_WSCOPE;
scope_info->scope.node = node;
scope_info->common.value = (u16) type;
@ -176,7 +178,7 @@ acpi_status acpi_ds_scope_stack_pop(struct acpi_walk_state *walk_state)
union acpi_generic_state *scope_info;
union acpi_generic_state *new_scope_info;
ACPI_FUNCTION_TRACE("ds_scope_stack_pop");
ACPI_FUNCTION_TRACE(ds_scope_stack_pop);
/*
* Pop scope info object off the stack.

View File

@ -66,7 +66,6 @@ void *acpi_ds_obj_stack_get_value(u32 index,
#endif
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
* FUNCTION: acpi_ds_result_remove
@ -88,7 +87,7 @@ acpi_ds_result_remove(union acpi_operand_object **object,
{
union acpi_generic_state *state;
ACPI_FUNCTION_NAME("ds_result_remove");
ACPI_FUNCTION_NAME(ds_result_remove);
state = walk_state->results;
if (!state) {
@ -128,7 +127,6 @@ acpi_ds_result_remove(union acpi_operand_object **object,
return (AE_OK);
}
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
@ -152,7 +150,7 @@ acpi_ds_result_pop(union acpi_operand_object ** object,
acpi_native_uint index;
union acpi_generic_state *state;
ACPI_FUNCTION_NAME("ds_result_pop");
ACPI_FUNCTION_NAME(ds_result_pop);
state = walk_state->results;
if (!state) {
@ -170,6 +168,7 @@ acpi_ds_result_pop(union acpi_operand_object ** object,
state->results.num_results--;
for (index = ACPI_OBJ_NUM_OPERANDS; index; index--) {
/* Check for a valid result object */
if (state->results.obj_desc[index - 1]) {
@ -213,7 +212,7 @@ acpi_ds_result_pop_from_bottom(union acpi_operand_object ** object,
acpi_native_uint index;
union acpi_generic_state *state;
ACPI_FUNCTION_NAME("ds_result_pop_from_bottom");
ACPI_FUNCTION_NAME(ds_result_pop_from_bottom);
state = walk_state->results;
if (!state) {
@ -278,7 +277,7 @@ acpi_ds_result_push(union acpi_operand_object * object,
{
union acpi_generic_state *state;
ACPI_FUNCTION_NAME("ds_result_push");
ACPI_FUNCTION_NAME(ds_result_push);
state = walk_state->results;
if (!state) {
@ -331,14 +330,14 @@ acpi_status acpi_ds_result_stack_push(struct acpi_walk_state * walk_state)
{
union acpi_generic_state *state;
ACPI_FUNCTION_NAME("ds_result_stack_push");
ACPI_FUNCTION_NAME(ds_result_stack_push);
state = acpi_ut_create_generic_state();
if (!state) {
return (AE_NO_MEMORY);
}
state->common.data_type = ACPI_DESC_TYPE_STATE_RESULT;
state->common.descriptor_type = ACPI_DESC_TYPE_STATE_RESULT;
acpi_ut_push_generic_state(&walk_state->results, state);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Results=%p State=%p\n",
@ -363,7 +362,7 @@ acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state * walk_state)
{
union acpi_generic_state *state;
ACPI_FUNCTION_NAME("ds_result_stack_pop");
ACPI_FUNCTION_NAME(ds_result_stack_pop);
/* Check for stack underflow */
@ -376,7 +375,7 @@ acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state * walk_state)
state = acpi_ut_pop_generic_state(&walk_state->results);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Result=%p remaining_results=%X State=%p\n",
"Result=%p RemainingResults=%X State=%p\n",
state, state->results.num_results, walk_state));
acpi_ut_delete_generic_state(state);
@ -400,7 +399,7 @@ acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state * walk_state)
acpi_status
acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
{
ACPI_FUNCTION_NAME("ds_obj_stack_push");
ACPI_FUNCTION_NAME(ds_obj_stack_push);
/* Check for stack overflow */
@ -445,9 +444,10 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
{
u32 i;
ACPI_FUNCTION_NAME("ds_obj_stack_pop");
ACPI_FUNCTION_NAME(ds_obj_stack_pop);
for (i = 0; i < pop_count; i++) {
/* Check for stack underflow */
if (walk_state->num_operands == 0) {
@ -491,9 +491,10 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
u32 i;
union acpi_operand_object *obj_desc;
ACPI_FUNCTION_NAME("ds_obj_stack_pop_and_delete");
ACPI_FUNCTION_NAME(ds_obj_stack_pop_and_delete);
for (i = 0; i < pop_count; i++) {
/* Check for stack underflow */
if (walk_state->num_operands == 0) {
@ -538,13 +539,13 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
struct acpi_walk_state *acpi_ds_get_current_walk_state(struct acpi_thread_state
*thread)
{
ACPI_FUNCTION_NAME("ds_get_current_walk_state");
ACPI_FUNCTION_NAME(ds_get_current_walk_state);
if (!thread) {
return (NULL);
}
ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Current walk_state %p\n",
ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Current WalkState %p\n",
thread->walk_state_list));
return (thread->walk_state_list);
@ -567,7 +568,7 @@ void
acpi_ds_push_walk_state(struct acpi_walk_state *walk_state,
struct acpi_thread_state *thread)
{
ACPI_FUNCTION_TRACE("ds_push_walk_state");
ACPI_FUNCTION_TRACE(ds_push_walk_state);
walk_state->next = thread->walk_state_list;
thread->walk_state_list = walk_state;
@ -593,11 +594,12 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
{
struct acpi_walk_state *walk_state;
ACPI_FUNCTION_TRACE("ds_pop_walk_state");
ACPI_FUNCTION_TRACE(ds_pop_walk_state);
walk_state = thread->walk_state_list;
if (walk_state) {
/* Next walk state becomes the current walk state */
thread->walk_state_list = walk_state->next;
@ -618,7 +620,7 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
*
* PARAMETERS: owner_id - ID for object creation
* Origin - Starting point for this walk
* mth_desc - Method object
* method_desc - Method object
* Thread - Current thread state
*
* RETURN: Pointer to the new walk state.
@ -632,24 +634,24 @@ struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id,
union acpi_parse_object
*origin,
union acpi_operand_object
*mth_desc,
*method_desc,
struct acpi_thread_state
*thread)
{
struct acpi_walk_state *walk_state;
acpi_status status;
ACPI_FUNCTION_TRACE("ds_create_walk_state");
ACPI_FUNCTION_TRACE(ds_create_walk_state);
walk_state = ACPI_MEM_CALLOCATE(sizeof(struct acpi_walk_state));
walk_state = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_walk_state));
if (!walk_state) {
return_PTR(NULL);
}
walk_state->data_type = ACPI_DESC_TYPE_WALK;
walk_state->descriptor_type = ACPI_DESC_TYPE_WALK;
walk_state->method_desc = method_desc;
walk_state->owner_id = owner_id;
walk_state->origin = origin;
walk_state->method_desc = mth_desc;
walk_state->thread = thread;
walk_state->parser_state.start_op = origin;
@ -664,7 +666,7 @@ struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id,
status = acpi_ds_result_stack_push(walk_state);
if (ACPI_FAILURE(status)) {
ACPI_MEM_FREE(walk_state);
ACPI_FREE(walk_state);
return_PTR(NULL);
}
@ -701,13 +703,13 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *method_node,
u8 * aml_start,
u32 aml_length,
struct acpi_parameter_info *info, u8 pass_number)
struct acpi_evaluate_info *info, u8 pass_number)
{
acpi_status status;
struct acpi_parse_state *parser_state = &walk_state->parser_state;
union acpi_parse_object *extra_op;
ACPI_FUNCTION_TRACE("ds_init_aml_walk");
ACPI_FUNCTION_TRACE(ds_init_aml_walk);
walk_state->parser_state.aml =
walk_state->parser_state.aml_start = aml_start;
@ -778,6 +780,7 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
}
if (parser_state->start_node) {
/* Push start scope on scope stack and make it current */
status =
@ -810,21 +813,24 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
{
union acpi_generic_state *state;
ACPI_FUNCTION_TRACE_PTR("ds_delete_walk_state", walk_state);
ACPI_FUNCTION_TRACE_PTR(ds_delete_walk_state, walk_state);
if (!walk_state) {
return;
}
if (walk_state->data_type != ACPI_DESC_TYPE_WALK) {
if (walk_state->descriptor_type != ACPI_DESC_TYPE_WALK) {
ACPI_ERROR((AE_INFO, "%p is not a valid walk state",
walk_state));
return;
}
/* There should not be any open scopes */
if (walk_state->parser_state.scope) {
ACPI_ERROR((AE_INFO, "%p walk still has a scope list",
walk_state));
acpi_ps_cleanup_scope(&walk_state->parser_state);
}
/* Always must free any linked control states */
@ -854,7 +860,7 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
acpi_ut_delete_generic_state(state);
}
ACPI_MEM_FREE(walk_state);
ACPI_FREE(walk_state);
return_VOID;
}
@ -879,7 +885,7 @@ acpi_ds_result_insert(void *object,
{
union acpi_generic_state *state;
ACPI_FUNCTION_NAME("ds_result_insert");
ACPI_FUNCTION_NAME(ds_result_insert);
state = walk_state->results;
if (!state) {
@ -937,7 +943,7 @@ acpi_status acpi_ds_obj_stack_delete_all(struct acpi_walk_state * walk_state)
{
u32 i;
ACPI_FUNCTION_TRACE_PTR("ds_obj_stack_delete_all", walk_state);
ACPI_FUNCTION_TRACE_PTR(ds_obj_stack_delete_all, walk_state);
/* The stack size is configurable, but fixed */
@ -969,7 +975,7 @@ acpi_status
acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
struct acpi_walk_state *walk_state)
{
ACPI_FUNCTION_NAME("ds_obj_stack_pop_object");
ACPI_FUNCTION_NAME(ds_obj_stack_pop_object);
/* Check for stack underflow */
@ -1025,7 +1031,7 @@ acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
void *acpi_ds_obj_stack_get_value(u32 index, struct acpi_walk_state *walk_state)
{
ACPI_FUNCTION_TRACE_PTR("ds_obj_stack_get_value", walk_state);
ACPI_FUNCTION_TRACE_PTR(ds_obj_stack_get_value, walk_state);
/* Can't do it if the stack is empty */

View File

@ -116,7 +116,7 @@ union acpi_ec {
struct acpi_generic_address command_addr;
struct acpi_generic_address data_addr;
unsigned long global_lock;
spinlock_t lock;
struct semaphore sem;
} poll;
};
@ -323,7 +323,6 @@ static int acpi_ec_poll_read(union acpi_ec *ec, u8 address, u32 * data)
{
acpi_status status = AE_OK;
int result = 0;
unsigned long flags = 0;
u32 glk = 0;
ACPI_FUNCTION_TRACE("acpi_ec_read");
@ -339,7 +338,10 @@ static int acpi_ec_poll_read(union acpi_ec *ec, u8 address, u32 * data)
return_VALUE(-ENODEV);
}
spin_lock_irqsave(&ec->poll.lock, flags);
if (down_interruptible(&ec->poll.sem)) {
result = -ERESTARTSYS;
goto end_nosem;
}
acpi_hw_low_level_write(8, ACPI_EC_COMMAND_READ,
&ec->common.command_addr);
@ -358,8 +360,8 @@ static int acpi_ec_poll_read(union acpi_ec *ec, u8 address, u32 * data)
*data, address));
end:
spin_unlock_irqrestore(&ec->poll.lock, flags);
up(&ec->poll.sem);
end_nosem:
if (ec->common.global_lock)
acpi_release_global_lock(glk);
@ -370,7 +372,6 @@ static int acpi_ec_poll_write(union acpi_ec *ec, u8 address, u8 data)
{
int result = 0;
acpi_status status = AE_OK;
unsigned long flags = 0;
u32 glk = 0;
ACPI_FUNCTION_TRACE("acpi_ec_write");
@ -384,7 +385,10 @@ static int acpi_ec_poll_write(union acpi_ec *ec, u8 address, u8 data)
return_VALUE(-ENODEV);
}
spin_lock_irqsave(&ec->poll.lock, flags);
if (down_interruptible(&ec->poll.sem)) {
result = -ERESTARTSYS;
goto end_nosem;
}
acpi_hw_low_level_write(8, ACPI_EC_COMMAND_WRITE,
&ec->common.command_addr);
@ -406,8 +410,8 @@ static int acpi_ec_poll_write(union acpi_ec *ec, u8 address, u8 data)
data, address));
end:
spin_unlock_irqrestore(&ec->poll.lock, flags);
up(&ec->poll.sem);
end_nosem:
if (ec->common.global_lock)
acpi_release_global_lock(glk);
@ -568,7 +572,6 @@ static int acpi_ec_poll_query(union acpi_ec *ec, u32 * data)
{
int result = 0;
acpi_status status = AE_OK;
unsigned long flags = 0;
u32 glk = 0;
ACPI_FUNCTION_TRACE("acpi_ec_query");
@ -589,7 +592,10 @@ static int acpi_ec_poll_query(union acpi_ec *ec, u32 * data)
* Note that successful completion of the query causes the ACPI_EC_SCI
* bit to be cleared (and thus clearing the interrupt source).
*/
spin_lock_irqsave(&ec->poll.lock, flags);
if (down_interruptible(&ec->poll.sem)) {
result = -ERESTARTSYS;
goto end_nosem;
}
acpi_hw_low_level_write(8, ACPI_EC_COMMAND_QUERY,
&ec->common.command_addr);
@ -602,8 +608,8 @@ static int acpi_ec_poll_query(union acpi_ec *ec, u32 * data)
result = -ENODATA;
end:
spin_unlock_irqrestore(&ec->poll.lock, flags);
up(&ec->poll.sem);
end_nosem:
if (ec->common.global_lock)
acpi_release_global_lock(glk);
@ -680,7 +686,6 @@ static void acpi_ec_gpe_poll_query(void *ec_cxt)
{
union acpi_ec *ec = (union acpi_ec *)ec_cxt;
u32 value = 0;
unsigned long flags = 0;
static char object_name[5] = { '_', 'Q', '0', '0', '\0' };
const char hex[] = { '0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
@ -691,9 +696,11 @@ static void acpi_ec_gpe_poll_query(void *ec_cxt)
if (!ec_cxt)
goto end;
spin_lock_irqsave(&ec->poll.lock, flags);
if (down_interruptible (&ec->poll.sem)) {
return_VOID;
}
acpi_hw_low_level_read(8, &value, &ec->common.command_addr);
spin_unlock_irqrestore(&ec->poll.lock, flags);
up(&ec->poll.sem);
/* TBD: Implement asynch events!
* NOTE: All we care about are EC-SCI's. Other EC events are
@ -763,8 +770,7 @@ static u32 acpi_ec_gpe_poll_handler(void *data)
acpi_disable_gpe(NULL, ec->common.gpe_bit, ACPI_ISR);
status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE,
acpi_ec_gpe_query, ec);
status = acpi_os_execute(OSL_EC_POLL_HANDLER, acpi_ec_gpe_query, ec);
if (status == AE_OK)
return ACPI_INTERRUPT_HANDLED;
@ -799,7 +805,7 @@ static u32 acpi_ec_gpe_intr_handler(void *data)
if (value & ACPI_EC_FLAG_SCI) {
atomic_add(1, &ec->intr.pending_gpe);
status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE,
status = acpi_os_execute(OSL_EC_BURST_HANDLER,
acpi_ec_gpe_query, ec);
return status == AE_OK ?
ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
@ -991,7 +997,6 @@ static int acpi_ec_poll_add(struct acpi_device *device)
int result = 0;
acpi_status status = AE_OK;
union acpi_ec *ec = NULL;
unsigned long uid;
ACPI_FUNCTION_TRACE("acpi_ec_add");
@ -1005,7 +1010,7 @@ static int acpi_ec_poll_add(struct acpi_device *device)
ec->common.handle = device->handle;
ec->common.uid = -1;
spin_lock_init(&ec->poll.lock);
init_MUTEX(&ec->poll.sem);
strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_EC_CLASS);
acpi_driver_data(device) = ec;
@ -1014,10 +1019,9 @@ static int acpi_ec_poll_add(struct acpi_device *device)
acpi_evaluate_integer(ec->common.handle, "_GLK", NULL,
&ec->common.global_lock);
/* If our UID matches the UID for the ECDT-enumerated EC,
we now have the *real* EC info, so kill the makeshift one. */
acpi_evaluate_integer(ec->common.handle, "_UID", NULL, &uid);
if (ec_ecdt && ec_ecdt->common.uid == uid) {
/* XXX we don't test uids, because on some boxes ecdt uid = 0, see:
http://bugzilla.kernel.org/show_bug.cgi?id=6111 */
if (ec_ecdt) {
acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler);
@ -1062,7 +1066,6 @@ static int acpi_ec_intr_add(struct acpi_device *device)
int result = 0;
acpi_status status = AE_OK;
union acpi_ec *ec = NULL;
unsigned long uid;
ACPI_FUNCTION_TRACE("acpi_ec_add");
@ -1088,10 +1091,9 @@ static int acpi_ec_intr_add(struct acpi_device *device)
acpi_evaluate_integer(ec->common.handle, "_GLK", NULL,
&ec->common.global_lock);
/* If our UID matches the UID for the ECDT-enumerated EC,
we now have the *real* EC info, so kill the makeshift one. */
acpi_evaluate_integer(ec->common.handle, "_UID", NULL, &uid);
if (ec_ecdt && ec_ecdt->common.uid == uid) {
/* XXX we don't test uids, because on some boxes ecdt uid = 0, see:
http://bugzilla.kernel.org/show_bug.cgi?id=6111 */
if (ec_ecdt) {
acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler);
@ -1300,7 +1302,7 @@ acpi_fake_ecdt_poll_callback(acpi_handle handle,
&ec_ecdt->common.gpe_bit);
if (ACPI_FAILURE(status))
return status;
spin_lock_init(&ec_ecdt->poll.lock);
init_MUTEX(&ec_ecdt->poll.sem);
ec_ecdt->common.global_lock = TRUE;
ec_ecdt->common.handle = handle;
@ -1416,7 +1418,7 @@ static int __init acpi_ec_poll_get_real_ecdt(void)
ec_ecdt->common.status_addr = ecdt_ptr->ec_control;
ec_ecdt->common.data_addr = ecdt_ptr->ec_data;
ec_ecdt->common.gpe_bit = ecdt_ptr->gpe_bit;
spin_lock_init(&ec_ecdt->poll.lock);
init_MUTEX(&ec_ecdt->poll.sem);
/* use the GL just to be safe */
ec_ecdt->common.global_lock = TRUE;
ec_ecdt->common.uid = ecdt_ptr->uid;

View File

@ -68,7 +68,7 @@ acpi_status acpi_ev_initialize_events(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ev_initialize_events");
ACPI_FUNCTION_TRACE(ev_initialize_events);
/* Make sure we have ACPI tables */
@ -118,7 +118,7 @@ acpi_status acpi_ev_install_fadt_gpes(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ev_install_fadt_gpes");
ACPI_FUNCTION_TRACE(ev_install_fadt_gpes);
/* Namespace must be locked */
@ -157,7 +157,7 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ev_install_xrupt_handlers");
ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers);
/* Install the SCI handler */
@ -241,7 +241,7 @@ u32 acpi_ev_fixed_event_detect(void)
u32 fixed_enable;
acpi_native_uint i;
ACPI_FUNCTION_NAME("ev_fixed_event_detect");
ACPI_FUNCTION_NAME(ev_fixed_event_detect);
/*
* Read the fixed feature status and enable registers, as all the cases
@ -260,12 +260,14 @@ u32 acpi_ev_fixed_event_detect(void)
* Check for all possible Fixed Events and dispatch those that are active
*/
for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
/* Both the status and enable bits must be on for this event */
if ((fixed_status & acpi_gbl_fixed_event_info[i].
status_bit_mask)
&& (fixed_enable & acpi_gbl_fixed_event_info[i].
enable_bit_mask)) {
/* Found an active (signalled) event */
int_status |= acpi_ev_fixed_event_dispatch((u32) i);

View File

@ -69,7 +69,7 @@ acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ev_set_gpe_type");
ACPI_FUNCTION_TRACE(ev_set_gpe_type);
/* Validate type and update register enable masks */
@ -115,7 +115,7 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
struct acpi_gpe_register_info *gpe_register_info;
u8 register_bit;
ACPI_FUNCTION_TRACE("ev_update_gpe_enable_masks");
ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks);
gpe_register_info = gpe_event_info->register_info;
if (!gpe_register_info) {
@ -178,7 +178,7 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
{
acpi_status status;
ACPI_FUNCTION_TRACE("ev_enable_gpe");
ACPI_FUNCTION_TRACE(ev_enable_gpe);
/* Make sure HW enable masks are updated */
@ -207,6 +207,7 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
if (write_to_hardware) {
/* Clear the GPE (of stale events), then enable it */
status = acpi_hw_clear_gpe(gpe_event_info);
@ -243,7 +244,7 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ev_disable_gpe");
ACPI_FUNCTION_TRACE(ev_disable_gpe);
if (!(gpe_event_info->flags & ACPI_GPE_ENABLE_MASK)) {
return_ACPI_STATUS(AE_OK);
@ -313,6 +314,7 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
/* A NULL gpe_block means use the FADT-defined GPE block(s) */
if (!gpe_device) {
/* Examine GPE Block 0 and 1 (These blocks are permanent) */
for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
@ -380,10 +382,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
u32 status_reg;
u32 enable_reg;
acpi_cpu_flags flags;
acpi_cpu_flags hw_flags;
acpi_native_uint i;
acpi_native_uint j;
ACPI_FUNCTION_NAME("ev_gpe_detect");
ACPI_FUNCTION_NAME(ev_gpe_detect);
/* Check for the case where there are no GPEs */
@ -391,9 +394,12 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
return (int_status);
}
/* Examine all GPE blocks attached to this interrupt level */
/* We need to hold the GPE lock now, hardware lock in the loop */
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Examine all GPE blocks attached to this interrupt level */
gpe_block = gpe_xrupt_list->gpe_block_list_head;
while (gpe_block) {
/*
@ -402,10 +408,13 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
* Find all currently active GP events.
*/
for (i = 0; i < gpe_block->register_count; i++) {
/* Get the next status/enable pair */
gpe_register_info = &gpe_block->register_info[i];
hw_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
/* Read the Status Register */
status =
@ -414,6 +423,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
&gpe_register_info->
status_address);
if (ACPI_FAILURE(status)) {
acpi_os_release_lock(acpi_gbl_hardware_lock,
hw_flags);
goto unlock_and_exit;
}
@ -424,6 +435,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
&enable_reg,
&gpe_register_info->
enable_address);
acpi_os_release_lock(acpi_gbl_hardware_lock, hw_flags);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
@ -437,6 +450,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
enabled_status_byte = (u8) (status_reg & enable_reg);
if (!enabled_status_byte) {
/* No active GPEs in this register, move on */
continue;
@ -445,6 +459,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
/* Now look at the individual GPEs in this byte register */
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
/* Examine one GPE bit */
if (enabled_status_byte &
@ -483,9 +498,9 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
*
* RETURN: None
*
* DESCRIPTION: Perform the actual execution of a GPE control method. This
* function is called from an invocation of acpi_os_queue_for_execution
* (and therefore does NOT execute at interrupt level) so that
* DESCRIPTION: Perform the actual execution of a GPE control method. This
* function is called from an invocation of acpi_os_execute and
* therefore does NOT execute at interrupt level - so that
* the control method itself is not executed in the context of
* an interrupt handler.
*
@ -494,12 +509,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
{
struct acpi_gpe_event_info *gpe_event_info = (void *)context;
u32 gpe_number = 0;
acpi_status status;
struct acpi_gpe_event_info local_gpe_event_info;
struct acpi_parameter_info info;
struct acpi_evaluate_info *info;
ACPI_FUNCTION_TRACE("ev_asynch_execute_gpe_method");
ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
@ -535,22 +549,35 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
*/
if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_METHOD) {
/*
* Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
* control method that corresponds to this GPE
*/
info.node = local_gpe_event_info.dispatch.method_node;
info.parameters =
ACPI_CAST_PTR(union acpi_operand_object *, gpe_event_info);
info.parameter_type = ACPI_PARAM_GPE;
status = acpi_ns_evaluate_by_handle(&info);
/* Allocate the evaluation information block */
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info) {
status = AE_NO_MEMORY;
} else {
/*
* Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
* control method that corresponds to this GPE
*/
info->prefix_node =
local_gpe_event_info.dispatch.method_node;
info->parameters =
ACPI_CAST_PTR(union acpi_operand_object *,
gpe_event_info);
info->parameter_type = ACPI_PARAM_GPE;
info->flags = ACPI_IGNORE_RETURN_VALUE;
status = acpi_ns_evaluate(info);
ACPI_FREE(info);
}
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"While evaluating method [%4.4s] for GPE[%2X]",
"While evaluating GPE method [%4.4s]",
acpi_ut_get_node_name
(local_gpe_event_info.dispatch.
method_node), gpe_number));
method_node)));
}
}
@ -593,7 +620,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ev_gpe_dispatch");
ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
/*
* If edge-triggered, clear the GPE status bit now. Note that
@ -669,9 +696,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
* Execute the method associated with the GPE
* NOTE: Level-triggered GPEs are cleared after the method completes.
*/
status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE,
acpi_ev_asynch_execute_gpe_method,
gpe_event_info);
status = acpi_os_execute(OSL_GPE_HANDLER,
acpi_ev_asynch_execute_gpe_method,
gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to queue handler for GPE[%2X] - event disabled",
@ -716,7 +743,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
*
* DESCRIPTION: Determine if a a GPE is "wake-only".
*
* Called from Notify() code in interpreter when a "device_wake"
* Called from Notify() code in interpreter when a "DeviceWake"
* Notify comes in.
*
******************************************************************************/
@ -726,7 +753,7 @@ acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ev_check_for_wake_only_gpe");
ACPI_FUNCTION_TRACE(ev_check_for_wake_only_gpe);
if ((gpe_event_info) && /* Only >0 for _Lxx/_Exx */
((gpe_event_info->flags & ACPI_GPE_SYSTEM_MASK) == ACPI_GPE_SYSTEM_RUNNING)) { /* System state at GPE time */

View File

@ -131,14 +131,14 @@ u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
*
******************************************************************************/
acpi_status acpi_ev_walk_gpe_list(ACPI_GPE_CALLBACK gpe_walk_callback)
acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback)
{
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_xrupt_info *gpe_xrupt_info;
acpi_status status = AE_OK;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("ev_walk_gpe_list");
ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
@ -146,10 +146,12 @@ acpi_status acpi_ev_walk_gpe_list(ACPI_GPE_CALLBACK gpe_walk_callback)
gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
while (gpe_xrupt_info) {
/* Walk all Gpe Blocks attached to this interrupt level */
gpe_block = gpe_xrupt_info->gpe_block_list_head;
while (gpe_block) {
/* One callback per GPE block */
status = gpe_walk_callback(gpe_xrupt_info, gpe_block);
@ -190,11 +192,12 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
acpi_native_uint i;
acpi_native_uint j;
ACPI_FUNCTION_TRACE("ev_delete_gpe_handlers");
ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
/* Now look at the individual GPEs in this byte register */
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
@ -204,7 +207,7 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_HANDLER) {
ACPI_MEM_FREE(gpe_event_info->dispatch.handler);
ACPI_FREE(gpe_event_info->dispatch.handler);
gpe_event_info->dispatch.handler = NULL;
gpe_event_info->flags &=
~ACPI_GPE_DISPATCH_MASK;
@ -248,7 +251,7 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
u8 type;
acpi_status status;
ACPI_FUNCTION_TRACE("ev_save_method_info");
ACPI_FUNCTION_TRACE(ev_save_method_info);
/*
* _Lxx and _Exx GPE method support
@ -279,9 +282,9 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
default:
/* Unknown method type, just ignore it! */
ACPI_ERROR((AE_INFO,
"Unknown GPE method type: %s (name not of form _Lxx or _Exx)",
name));
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
"Ignoring unknown GPE method type: %s (name not of form _Lxx or _Exx)",
name));
return_ACPI_STATUS(AE_OK);
}
@ -289,11 +292,12 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
if (gpe_number == ACPI_UINT32_MAX) {
/* Conversion failed; invalid method, just ignore it */
ACPI_ERROR((AE_INFO,
"Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)",
name));
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
"Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)",
name));
return_ACPI_STATUS(AE_OK);
}
@ -364,13 +368,14 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
u32 gpe_number;
acpi_status status;
ACPI_FUNCTION_TRACE("ev_match_prw_and_gpe");
ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
/* Check for a _PRW method under this device */
status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
ACPI_BTYPE_PACKAGE, &pkg_desc);
if (ACPI_FAILURE(status)) {
/* Ignore all errors from _PRW, we don't want to abort the subsystem */
return_ACPI_STATUS(AE_OK);
@ -394,6 +399,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
obj_desc = pkg_desc->package.elements[0];
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
/* Use FADT-defined GPE device (from definition of _PRW) */
target_gpe_device = acpi_gbl_fadt_gpe_device;
@ -402,6 +408,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
gpe_number = (u32) obj_desc->integer.value;
} else if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
/* Package contains a GPE reference and GPE number within a GPE block */
if ((obj_desc->package.count < 2) ||
@ -482,7 +489,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
acpi_status status;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("ev_get_gpe_xrupt_block");
ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
/* No need for lock since we are not changing any list elements here */
@ -497,7 +504,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
/* Not found, must allocate a new xrupt descriptor */
gpe_xrupt = ACPI_MEM_CALLOCATE(sizeof(struct acpi_gpe_xrupt_info));
gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
if (!gpe_xrupt) {
return_PTR(NULL);
}
@ -556,7 +563,7 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
acpi_status status;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("ev_delete_gpe_xrupt");
ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
/* We never want to remove the SCI interrupt handler */
@ -588,7 +595,7 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
/* Free the block */
ACPI_MEM_FREE(gpe_xrupt);
ACPI_FREE(gpe_xrupt);
return_ACPI_STATUS(AE_OK);
}
@ -614,7 +621,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
acpi_status status;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("ev_install_gpe_block");
ACPI_FUNCTION_TRACE(ev_install_gpe_block);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
@ -667,7 +674,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
acpi_status status;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("ev_install_gpe_block");
ACPI_FUNCTION_TRACE(ev_install_gpe_block);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
@ -679,6 +686,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
status = acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block);
if (!gpe_block->previous && !gpe_block->next) {
/* This is the last gpe_block on this interrupt */
status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
@ -704,9 +712,9 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
/* Free the gpe_block */
ACPI_MEM_FREE(gpe_block->register_info);
ACPI_MEM_FREE(gpe_block->event_info);
ACPI_MEM_FREE(gpe_block);
ACPI_FREE(gpe_block->register_info);
ACPI_FREE(gpe_block->event_info);
ACPI_FREE(gpe_block);
unlock_and_exit:
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
@ -736,17 +744,17 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
acpi_native_uint j;
acpi_status status;
ACPI_FUNCTION_TRACE("ev_create_gpe_info_blocks");
ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
/* Allocate the GPE register information block */
gpe_register_info = ACPI_MEM_CALLOCATE((acpi_size) gpe_block->
register_count *
sizeof(struct
acpi_gpe_register_info));
gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
register_count *
sizeof(struct
acpi_gpe_register_info));
if (!gpe_register_info) {
ACPI_ERROR((AE_INFO,
"Could not allocate the gpe_register_info table"));
"Could not allocate the GpeRegisterInfo table"));
return_ACPI_STATUS(AE_NO_MEMORY);
}
@ -754,13 +762,14 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
* Allocate the GPE event_info block. There are eight distinct GPEs
* per register. Initialization to zeros is sufficient.
*/
gpe_event_info = ACPI_MEM_CALLOCATE(((acpi_size) gpe_block->
register_count *
ACPI_GPE_REGISTER_WIDTH) *
sizeof(struct acpi_gpe_event_info));
gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block->
register_count *
ACPI_GPE_REGISTER_WIDTH) *
sizeof(struct
acpi_gpe_event_info));
if (!gpe_event_info) {
ACPI_ERROR((AE_INFO,
"Could not allocate the gpe_event_info table"));
"Could not allocate the GpeEventInfo table"));
status = AE_NO_MEMORY;
goto error_exit;
}
@ -780,6 +789,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
this_event = gpe_event_info;
for (i = 0; i < gpe_block->register_count; i++) {
/* Init the register_info for this GPE register (8 GPEs) */
this_register->base_gpe_number =
@ -839,10 +849,10 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
error_exit:
if (gpe_register_info) {
ACPI_MEM_FREE(gpe_register_info);
ACPI_FREE(gpe_register_info);
}
if (gpe_event_info) {
ACPI_MEM_FREE(gpe_event_info);
ACPI_FREE(gpe_event_info);
}
return_ACPI_STATUS(status);
@ -878,7 +888,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
ACPI_FUNCTION_TRACE("ev_create_gpe_block");
ACPI_FUNCTION_TRACE(ev_create_gpe_block);
if (!register_count) {
return_ACPI_STATUS(AE_OK);
@ -886,7 +896,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
/* Allocate a new GPE block */
gpe_block = ACPI_MEM_CALLOCATE(sizeof(struct acpi_gpe_block_info));
gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
if (!gpe_block) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@ -906,7 +916,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
*/
status = acpi_ev_create_gpe_info_blocks(gpe_block);
if (ACPI_FAILURE(status)) {
ACPI_MEM_FREE(gpe_block);
ACPI_FREE(gpe_block);
return_ACPI_STATUS(status);
}
@ -914,7 +924,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
if (ACPI_FAILURE(status)) {
ACPI_MEM_FREE(gpe_block);
ACPI_FREE(gpe_block);
return_ACPI_STATUS(status);
}
@ -971,7 +981,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
acpi_native_uint i;
acpi_native_uint j;
ACPI_FUNCTION_TRACE("ev_initialize_gpe_block");
ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
/* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
@ -1013,6 +1023,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
for (i = 0; i < gpe_block->register_count; i++) {
for (j = 0; j < 8; j++) {
/* Get the info block for this particular GPE */
gpe_event_info =
@ -1040,7 +1051,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not enable GPEs in gpe_block %p",
ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p",
gpe_block));
}
@ -1066,7 +1077,7 @@ acpi_status acpi_ev_gpe_initialize(void)
u32 gpe_number_max = 0;
acpi_status status;
ACPI_FUNCTION_TRACE("ev_gpe_initialize");
ACPI_FUNCTION_TRACE(ev_gpe_initialize);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
@ -1099,6 +1110,7 @@ acpi_status acpi_ev_gpe_initialize(void)
* particular block is not supported.
*/
if (acpi_gbl_FADT->gpe0_blk_len && acpi_gbl_FADT->xgpe0_blk.address) {
/* GPE block 0 exists (has both length and address > 0) */
register_count0 = (u16) (acpi_gbl_FADT->gpe0_blk_len / 2);
@ -1121,6 +1133,7 @@ acpi_status acpi_ev_gpe_initialize(void)
}
if (acpi_gbl_FADT->gpe1_blk_len && acpi_gbl_FADT->xgpe1_blk.address) {
/* GPE block 1 exists (has both length and address > 0) */
register_count1 = (u16) (acpi_gbl_FADT->gpe1_blk_len / 2);
@ -1168,6 +1181,7 @@ acpi_status acpi_ev_gpe_initialize(void)
/* Exit if there are no GPE registers */
if ((register_count0 + register_count1) == 0) {
/* GPEs are not required by ACPI, this is OK */
ACPI_DEBUG_PRINT((ACPI_DB_INIT,

View File

@ -49,12 +49,13 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evmisc")
/* Names for Notify() values, used for debug output */
#ifdef ACPI_DEBUG_OUTPUT
static const char *acpi_notify_value_names[] = {
"Bus Check",
"Device Check",
"Device Wake",
"Eject request",
"Eject Request",
"Device Check Light",
"Frequency Mismatch",
"Bus Mode Mismatch",
@ -124,7 +125,7 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
union acpi_generic_state *notify_info;
acpi_status status = AE_OK;
ACPI_FUNCTION_NAME("ev_queue_notify_request");
ACPI_FUNCTION_NAME(ev_queue_notify_request);
/*
* For value 3 (Ejection Request), some device method may need to be run.
@ -150,6 +151,7 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
obj_desc = acpi_ns_get_attached_object(node);
if (obj_desc) {
/* We have the notify object, Get the right handler */
switch (node->type) {
@ -184,14 +186,15 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
return (AE_NO_MEMORY);
}
notify_info->common.data_type = ACPI_DESC_TYPE_STATE_NOTIFY;
notify_info->common.descriptor_type =
ACPI_DESC_TYPE_STATE_NOTIFY;
notify_info->notify.node = node;
notify_info->notify.value = (u16) notify_value;
notify_info->notify.handler_obj = handler_obj;
status = acpi_os_queue_for_execution(OSD_PRIORITY_HIGH,
acpi_ev_notify_dispatch,
notify_info);
status =
acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
notify_info);
if (ACPI_FAILURE(status)) {
acpi_ut_delete_generic_state(notify_info);
}
@ -240,6 +243,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
* to the device.
*/
if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) {
/* Global system notification handler */
if (acpi_gbl_system_notify.handler) {
@ -297,6 +301,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context)
/* Signal threads that are waiting for the lock */
if (acpi_gbl_global_lock_thread_count) {
/* Send sufficient units to the semaphore */
status =
@ -335,15 +340,16 @@ static u32 acpi_ev_global_lock_handler(void *context)
*/
ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, acquired);
if (acquired) {
/* Got the lock, now wake all threads waiting for it */
acpi_gbl_global_lock_acquired = TRUE;
/* Run the Global Lock thread which will signal all waiting threads */
status = acpi_os_queue_for_execution(OSD_PRIORITY_HIGH,
acpi_ev_global_lock_thread,
context);
status =
acpi_os_execute(OSL_GLOBAL_LOCK_HANDLER,
acpi_ev_global_lock_thread, context);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not queue Global Lock thread"));
@ -371,7 +377,7 @@ acpi_status acpi_ev_init_global_lock_handler(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ev_init_global_lock_handler");
ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
acpi_gbl_global_lock_present = TRUE;
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
@ -413,7 +419,7 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
acpi_status status = AE_OK;
u8 acquired = FALSE;
ACPI_FUNCTION_TRACE("ev_acquire_global_lock");
ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
#ifndef ACPI_APPLICATION
/* Make sure that we actually have a global lock */
@ -439,6 +445,7 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, acquired);
if (acquired) {
/* We got the lock */
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
@ -458,8 +465,9 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
* Acquire the global lock semaphore first.
* Since this wait will block, we must release the interpreter
*/
status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
timeout);
status =
acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
timeout);
return_ACPI_STATUS(status);
}
@ -480,7 +488,7 @@ acpi_status acpi_ev_release_global_lock(void)
u8 pending = FALSE;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE("ev_release_global_lock");
ACPI_FUNCTION_TRACE(ev_release_global_lock);
if (!acpi_gbl_global_lock_thread_count) {
ACPI_WARNING((AE_INFO,
@ -492,6 +500,7 @@ acpi_status acpi_ev_release_global_lock(void)
acpi_gbl_global_lock_thread_count--;
if (acpi_gbl_global_lock_thread_count) {
/* There are still some threads holding the lock, cannot release */
return_ACPI_STATUS(AE_OK);
@ -533,7 +542,7 @@ void acpi_ev_terminate(void)
acpi_native_uint i;
acpi_status status;
ACPI_FUNCTION_TRACE("ev_terminate");
ACPI_FUNCTION_TRACE(ev_terminate);
if (acpi_gbl_events_initialized) {
/*
@ -573,7 +582,7 @@ void acpi_ev_terminate(void)
if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) {
status = acpi_disable();
if (ACPI_FAILURE(status)) {
ACPI_WARNING((AE_INFO, "acpi_disable failed"));
ACPI_WARNING((AE_INFO, "AcpiDisable failed"));
}
}
return_VOID;

View File

@ -83,7 +83,7 @@ acpi_status acpi_ev_install_region_handlers(void)
acpi_status status;
acpi_native_uint i;
ACPI_FUNCTION_TRACE("ev_install_region_handlers");
ACPI_FUNCTION_TRACE(ev_install_region_handlers);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
@ -153,7 +153,7 @@ acpi_status acpi_ev_initialize_op_regions(void)
acpi_status status;
acpi_native_uint i;
ACPI_FUNCTION_TRACE("ev_initialize_op_regions");
ACPI_FUNCTION_TRACE(ev_initialize_op_regions);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
@ -164,6 +164,7 @@ acpi_status acpi_ev_initialize_op_regions(void)
* Run the _REG methods for op_regions in each default address space
*/
for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
/* TBD: Make sure handler is the DEFAULT handler, otherwise
* _REG will have already been run.
*/
@ -192,12 +193,12 @@ acpi_status acpi_ev_initialize_op_regions(void)
acpi_status
acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
{
struct acpi_parameter_info info;
union acpi_operand_object *params[3];
struct acpi_evaluate_info *info;
union acpi_operand_object *args[3];
union acpi_operand_object *region_obj2;
acpi_status status;
ACPI_FUNCTION_TRACE("ev_execute_reg_method");
ACPI_FUNCTION_TRACE(ev_execute_reg_method);
region_obj2 = acpi_ns_get_secondary_object(region_obj);
if (!region_obj2) {
@ -208,48 +209,60 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
return_ACPI_STATUS(AE_OK);
}
/*
* The _REG method has two arguments:
*
* Arg0, Integer: Operation region space ID
* Same value as region_obj->Region.space_id
* Arg1, Integer: connection status
* 1 for connecting the handler,
* 0 for disconnecting the handler
* Passed as a parameter
*/
params[0] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
if (!params[0]) {
/* Allocate and initialize the evaluation information block */
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
params[1] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
if (!params[1]) {
info->prefix_node = region_obj2->extra.method_REG;
info->pathname = NULL;
info->parameters = args;
info->parameter_type = ACPI_PARAM_ARGS;
info->flags = ACPI_IGNORE_RETURN_VALUE;
/*
* The _REG method has two arguments:
*
* Arg0 - Integer:
* Operation region space ID Same value as region_obj->Region.space_id
*
* Arg1 - Integer:
* connection status 1 for connecting the handler, 0 for disconnecting
* the handler (Passed as a parameter)
*/
args[0] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
if (!args[0]) {
status = AE_NO_MEMORY;
goto cleanup;
goto cleanup1;
}
args[1] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
if (!args[1]) {
status = AE_NO_MEMORY;
goto cleanup2;
}
/* Setup the parameter objects */
params[0]->integer.value = region_obj->region.space_id;
params[1]->integer.value = function;
params[2] = NULL;
info.node = region_obj2->extra.method_REG;
info.parameters = params;
info.parameter_type = ACPI_PARAM_ARGS;
args[0]->integer.value = region_obj->region.space_id;
args[1]->integer.value = function;
args[2] = NULL;
/* Execute the method, no return value */
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_METHOD, info.node, NULL));
status = acpi_ns_evaluate_by_handle(&info);
(ACPI_TYPE_METHOD, info->prefix_node, NULL));
acpi_ut_remove_reference(params[1]);
status = acpi_ns_evaluate(info);
acpi_ut_remove_reference(args[1]);
cleanup:
acpi_ut_remove_reference(params[0]);
cleanup2:
acpi_ut_remove_reference(args[0]);
cleanup1:
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
@ -261,7 +274,8 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
* Function - Read or Write operation
* Address - Where in the space to read or write
* bit_width - Field width in bits (8, 16, 32, or 64)
* Value - Pointer to in or out value
* Value - Pointer to in or out value, must be
* full 64-bit acpi_integer
*
* RETURN: Status
*
@ -274,7 +288,7 @@ acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
u32 function,
acpi_physical_address address,
u32 bit_width, void *value)
u32 bit_width, acpi_integer * value)
{
acpi_status status;
acpi_status status2;
@ -284,7 +298,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
union acpi_operand_object *region_obj2;
void *region_context = NULL;
ACPI_FUNCTION_TRACE("ev_address_space_dispatch");
ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
region_obj2 = acpi_ns_get_secondary_object(region_obj);
if (!region_obj2) {
@ -315,6 +329,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
*/
region_setup = handler_desc->address_space.setup;
if (!region_setup) {
/* No initialization routine, exit with error */
ACPI_ERROR((AE_INFO,
@ -361,9 +376,10 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE;
if (region_obj2->extra.region_context) {
/* The handler for this region was already installed */
ACPI_MEM_FREE(region_context);
ACPI_FREE(region_context);
} else {
/*
* Save the returned context for use in all accesses to
@ -386,9 +402,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
acpi_ut_get_region_name(region_obj->region.
space_id)));
if (!
(handler_desc->address_space.
hflags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
if (!(handler_desc->address_space.handler_flags &
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
/*
* For handlers other than the default (supplied) handlers, we must
* exit the interpreter because the handler *might* block -- we don't
@ -409,9 +424,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
space_id)));
}
if (!
(handler_desc->address_space.
hflags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
if (!(handler_desc->address_space.handler_flags &
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
/*
* We just returned from a non-default handler, we must re-enter the
* interpreter
@ -451,7 +465,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
union acpi_operand_object *region_obj2;
acpi_status status;
ACPI_FUNCTION_TRACE("ev_detach_region");
ACPI_FUNCTION_TRACE(ev_detach_region);
region_obj2 = acpi_ns_get_secondary_object(region_obj);
if (!region_obj2) {
@ -463,6 +477,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
handler_obj = region_obj->region.handler;
if (!handler_obj) {
/* This region has no handler, all done */
return_VOID;
@ -474,6 +489,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
last_obj_ptr = &handler_obj->address_space.region_list;
while (obj_desc) {
/* Is this the correct Region? */
if (obj_desc == region_obj) {
@ -583,7 +599,7 @@ acpi_ev_attach_region(union acpi_operand_object *handler_obj,
u8 acpi_ns_is_locked)
{
ACPI_FUNCTION_TRACE("ev_attach_region");
ACPI_FUNCTION_TRACE(ev_attach_region);
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
"Adding Region [%4.4s] %p to address handler %p [%s]\n",
@ -636,7 +652,7 @@ acpi_ev_install_handler(acpi_handle obj_handle,
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_NAME("ev_install_handler");
ACPI_FUNCTION_NAME(ev_install_handler);
handler_obj = (union acpi_operand_object *)context;
@ -666,6 +682,7 @@ acpi_ev_install_handler(acpi_handle obj_handle,
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
/* No object, just exit */
return (AE_OK);
@ -674,10 +691,12 @@ acpi_ev_install_handler(acpi_handle obj_handle,
/* Devices are handled different than regions */
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_DEVICE) {
/* Check if this Device already has a handler for this address space */
next_handler_obj = obj_desc->device.handler;
while (next_handler_obj) {
/* Found a handler, is it for the same address space? */
if (next_handler_obj->address_space.space_id ==
@ -764,9 +783,9 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
union acpi_operand_object *handler_obj;
acpi_status status;
acpi_object_type type;
u16 flags = 0;
u8 flags = 0;
ACPI_FUNCTION_TRACE("ev_install_space_handler");
ACPI_FUNCTION_TRACE(ev_install_space_handler);
/*
* This registration is valid for only the types below
@ -839,6 +858,7 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
/* Walk the handler list for this device */
while (handler_obj) {
/* Same space_id indicates a handler already installed */
if (handler_obj->address_space.space_id == space_id) {
@ -921,7 +941,7 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
/* Init handler obj */
handler_obj->address_space.space_id = (u8) space_id;
handler_obj->address_space.hflags = flags;
handler_obj->address_space.handler_flags = flags;
handler_obj->address_space.region_list = NULL;
handler_obj->address_space.node = node;
handler_obj->address_space.handler = handler;
@ -979,7 +999,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
{
acpi_status status;
ACPI_FUNCTION_TRACE("ev_execute_reg_methods");
ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
/*
* Run all _REG methods for all Operation Regions for this
@ -1001,7 +1021,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
*
* PARAMETERS: walk_namespace callback
*
* DESCRIPTION: Run _REg method for region objects of the requested space_iD
* DESCRIPTION: Run _REG method for region objects of the requested space_iD
*
******************************************************************************/
@ -1035,6 +1055,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
/* No object, just exit */
return (AE_OK);

View File

@ -71,11 +71,22 @@ acpi_ev_system_memory_region_setup(acpi_handle handle,
(union acpi_operand_object *)handle;
struct acpi_mem_space_context *local_region_context;
ACPI_FUNCTION_TRACE("ev_system_memory_region_setup");
ACPI_FUNCTION_TRACE(ev_system_memory_region_setup);
if (function == ACPI_REGION_DEACTIVATE) {
if (*region_context) {
ACPI_MEM_FREE(*region_context);
local_region_context =
(struct acpi_mem_space_context *)*region_context;
/* Delete a cached mapping if present */
if (local_region_context->mapped_length) {
acpi_os_unmap_memory(local_region_context->
mapped_logical_address,
local_region_context->
mapped_length);
}
ACPI_FREE(local_region_context);
*region_context = NULL;
}
return_ACPI_STATUS(AE_OK);
@ -84,7 +95,7 @@ acpi_ev_system_memory_region_setup(acpi_handle handle,
/* Create a new context */
local_region_context =
ACPI_MEM_CALLOCATE(sizeof(struct acpi_mem_space_context));
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_mem_space_context));
if (!(local_region_context)) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@ -118,7 +129,7 @@ acpi_ev_io_space_region_setup(acpi_handle handle,
u32 function,
void *handler_context, void **region_context)
{
ACPI_FUNCTION_TRACE("ev_io_space_region_setup");
ACPI_FUNCTION_TRACE(ev_io_space_region_setup);
if (function == ACPI_REGION_DEACTIVATE) {
*region_context = NULL;
@ -161,7 +172,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
(union acpi_operand_object *)handle;
struct acpi_device_id object_hID;
ACPI_FUNCTION_TRACE("ev_pci_config_region_setup");
ACPI_FUNCTION_TRACE(ev_pci_config_region_setup);
handler_obj = region_obj->region.handler;
if (!handler_obj) {
@ -178,7 +189,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
*region_context = NULL;
if (function == ACPI_REGION_DEACTIVATE) {
if (pci_id) {
ACPI_MEM_FREE(pci_id);
ACPI_FREE(pci_id);
}
return_ACPI_STATUS(status);
}
@ -199,6 +210,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
* handlers with that device.
*/
if (handler_obj->address_space.node == acpi_gbl_root_node) {
/* Start search from the parent object */
pci_root_node = parent_node;
@ -220,6 +232,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
PCI_EXPRESS_ROOT_HID_STRING,
sizeof(PCI_EXPRESS_ROOT_HID_STRING)))))
{
/* Install a handler for this PCI root bridge */
status =
@ -235,7 +248,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
} else {
ACPI_EXCEPTION((AE_INFO,
status,
"Could not install pci_config handler for Root Bridge %4.4s",
"Could not install PciConfig handler for Root Bridge %4.4s",
acpi_ut_get_node_name
(pci_root_node)));
}
@ -262,7 +275,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
/* Region is still not initialized. Create a new context */
pci_id = ACPI_MEM_CALLOCATE(sizeof(struct acpi_pci_id));
pci_id = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pci_id));
if (!pci_id) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@ -337,7 +350,7 @@ acpi_ev_pci_bar_region_setup(acpi_handle handle,
u32 function,
void *handler_context, void **region_context)
{
ACPI_FUNCTION_TRACE("ev_pci_bar_region_setup");
ACPI_FUNCTION_TRACE(ev_pci_bar_region_setup);
return_ACPI_STATUS(AE_OK);
}
@ -364,7 +377,7 @@ acpi_ev_cmos_region_setup(acpi_handle handle,
u32 function,
void *handler_context, void **region_context)
{
ACPI_FUNCTION_TRACE("ev_cmos_region_setup");
ACPI_FUNCTION_TRACE(ev_cmos_region_setup);
return_ACPI_STATUS(AE_OK);
}
@ -389,7 +402,7 @@ acpi_ev_default_region_setup(acpi_handle handle,
u32 function,
void *handler_context, void **region_context)
{
ACPI_FUNCTION_TRACE("ev_default_region_setup");
ACPI_FUNCTION_TRACE(ev_default_region_setup);
if (function == ACPI_REGION_DEACTIVATE) {
*region_context = NULL;
@ -435,7 +448,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG;
union acpi_operand_object *region_obj2;
ACPI_FUNCTION_TRACE_U32("ev_initialize_region", acpi_ns_locked);
ACPI_FUNCTION_TRACE_U32(ev_initialize_region, acpi_ns_locked);
if (!region_obj) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@ -462,8 +475,9 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
/* Find any "_REG" method associated with this region definition */
status = acpi_ns_search_node(*reg_name_ptr, node,
ACPI_TYPE_METHOD, &method_node);
status =
acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD,
&method_node);
if (ACPI_SUCCESS(status)) {
/*
* The _REG method is optional and there can be only one per region
@ -478,11 +492,13 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
* ie: acpi_gbl_root_node->parent_entry being set to NULL
*/
while (node) {
/* Check to see if a handler exists */
handler_obj = NULL;
obj_desc = acpi_ns_get_attached_object(node);
if (obj_desc) {
/* Can only be a handler if the object exists */
switch (node->type) {
@ -507,10 +523,12 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
}
while (handler_obj) {
/* Is this handler of the correct type? */
if (handler_obj->address_space.space_id ==
space_id) {
/* Found correct handler */
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
@ -571,7 +589,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
/* If we get here, there is no handler for this region */
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
"No handler for region_type %s(%X) (region_obj %p)\n",
"No handler for RegionType %s(%X) (RegionObj %p)\n",
acpi_ut_get_region_name(space_id), space_id,
region_obj));

View File

@ -69,7 +69,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
struct acpi_gpe_xrupt_info *gpe_xrupt_list = context;
u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED;
ACPI_FUNCTION_TRACE("ev_sci_xrupt_handler");
ACPI_FUNCTION_TRACE(ev_sci_xrupt_handler);
/*
* We are guaranteed by the ACPI CA initialization/shutdown code that
@ -108,7 +108,7 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
struct acpi_gpe_xrupt_info *gpe_xrupt_list = context;
u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED;
ACPI_FUNCTION_TRACE("ev_gpe_xrupt_handler");
ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler);
/*
* We are guaranteed by the ACPI CA initialization/shutdown code that
@ -140,7 +140,7 @@ u32 acpi_ev_install_sci_handler(void)
{
u32 status = AE_OK;
ACPI_FUNCTION_TRACE("ev_install_sci_handler");
ACPI_FUNCTION_TRACE(ev_install_sci_handler);
status = acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT->sci_int,
acpi_ev_sci_xrupt_handler,
@ -171,7 +171,7 @@ acpi_status acpi_ev_remove_sci_handler(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ev_remove_sci_handler");
ACPI_FUNCTION_TRACE(ev_remove_sci_handler);
/* Just let the OS remove the handler and disable the level */

View File

@ -41,8 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <linux/module.h>
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
#include <acpi/acevents.h>
@ -68,7 +66,7 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
{
acpi_status status;
ACPI_FUNCTION_TRACE("acpi_install_exception_handler");
ACPI_FUNCTION_TRACE(acpi_install_exception_handler);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
@ -90,6 +88,8 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
@ -107,14 +107,13 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
* event.
*
******************************************************************************/
acpi_status
acpi_install_fixed_event_handler(u32 event,
acpi_event_handler handler, void *context)
{
acpi_status status;
ACPI_FUNCTION_TRACE("acpi_install_fixed_event_handler");
ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler);
/* Parameter validation */
@ -161,7 +160,7 @@ acpi_install_fixed_event_handler(u32 event,
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_install_fixed_event_handler);
ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
/*******************************************************************************
*
@ -175,13 +174,12 @@ EXPORT_SYMBOL(acpi_install_fixed_event_handler);
* DESCRIPTION: Disables the event and unregisters the event handler.
*
******************************************************************************/
acpi_status
acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE("acpi_remove_fixed_event_handler");
ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler);
/* Parameter validation */
@ -216,7 +214,7 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_remove_fixed_event_handler);
ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
/*******************************************************************************
*
@ -235,7 +233,6 @@ EXPORT_SYMBOL(acpi_remove_fixed_event_handler);
* DESCRIPTION: Install a handler for notifies on an ACPI device
*
******************************************************************************/
acpi_status
acpi_install_notify_handler(acpi_handle device,
u32 handler_type,
@ -246,7 +243,7 @@ acpi_install_notify_handler(acpi_handle device,
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE("acpi_install_notify_handler");
ACPI_FUNCTION_TRACE(acpi_install_notify_handler);
/* Parameter validation */
@ -275,6 +272,7 @@ acpi_install_notify_handler(acpi_handle device,
* only one <external> global handler can be regsitered (per notify type).
*/
if (device == ACPI_ROOT_OBJECT) {
/* Make sure the handler is not already installed */
if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
@ -317,6 +315,7 @@ acpi_install_notify_handler(acpi_handle device,
obj_desc = acpi_ns_get_attached_object(node);
if (obj_desc) {
/* Object exists - make sure there's no handler */
if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
@ -370,6 +369,7 @@ acpi_install_notify_handler(acpi_handle device,
}
if (handler_type == ACPI_ALL_NOTIFY) {
/* Extra ref if installed in both */
acpi_ut_add_reference(notify_obj);
@ -381,7 +381,7 @@ acpi_install_notify_handler(acpi_handle device,
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_install_notify_handler);
ACPI_EXPORT_SYMBOL(acpi_install_notify_handler)
/*******************************************************************************
*
@ -399,7 +399,6 @@ EXPORT_SYMBOL(acpi_install_notify_handler);
* DESCRIPTION: Remove a handler for notifies on an ACPI device
*
******************************************************************************/
acpi_status
acpi_remove_notify_handler(acpi_handle device,
u32 handler_type, acpi_notify_handler handler)
@ -409,7 +408,7 @@ acpi_remove_notify_handler(acpi_handle device,
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE("acpi_remove_notify_handler");
ACPI_FUNCTION_TRACE(acpi_remove_notify_handler);
/* Parameter validation */
@ -535,7 +534,7 @@ acpi_remove_notify_handler(acpi_handle device,
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_remove_notify_handler);
ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
/*******************************************************************************
*
@ -554,7 +553,6 @@ EXPORT_SYMBOL(acpi_remove_notify_handler);
* DESCRIPTION: Install a handler for a General Purpose Event.
*
******************************************************************************/
acpi_status
acpi_install_gpe_handler(acpi_handle gpe_device,
u32 gpe_number,
@ -565,7 +563,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
acpi_status status;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("acpi_install_gpe_handler");
ACPI_FUNCTION_TRACE(acpi_install_gpe_handler);
/* Parameter validation */
@ -596,7 +594,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
/* Allocate and init handler object */
handler = ACPI_MEM_CALLOCATE(sizeof(struct acpi_handler_info));
handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info));
if (!handler) {
status = AE_NO_MEMORY;
goto unlock_and_exit;
@ -630,7 +628,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_install_gpe_handler);
ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
/*******************************************************************************
*
@ -646,7 +644,6 @@ EXPORT_SYMBOL(acpi_install_gpe_handler);
* DESCRIPTION: Remove a handler for a General Purpose acpi_event.
*
******************************************************************************/
acpi_status
acpi_remove_gpe_handler(acpi_handle gpe_device,
u32 gpe_number, acpi_event_handler address)
@ -656,7 +653,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
acpi_status status;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("acpi_remove_gpe_handler");
ACPI_FUNCTION_TRACE(acpi_remove_gpe_handler);
/* Parameter validation */
@ -724,14 +721,14 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
/* Now we can free the handler object */
ACPI_MEM_FREE(handler);
ACPI_FREE(handler);
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_remove_gpe_handler);
ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler)
/*******************************************************************************
*
@ -746,7 +743,6 @@ EXPORT_SYMBOL(acpi_remove_gpe_handler);
* DESCRIPTION: Acquire the ACPI Global Lock
*
******************************************************************************/
acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
{
acpi_status status;
@ -771,7 +767,7 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
return (status);
}
EXPORT_SYMBOL(acpi_acquire_global_lock);
ACPI_EXPORT_SYMBOL(acpi_acquire_global_lock)
/*******************************************************************************
*
@ -784,7 +780,6 @@ EXPORT_SYMBOL(acpi_acquire_global_lock);
* DESCRIPTION: Release the ACPI Global Lock. The handle must be valid.
*
******************************************************************************/
acpi_status acpi_release_global_lock(u32 handle)
{
acpi_status status;
@ -797,4 +792,4 @@ acpi_status acpi_release_global_lock(u32 handle)
return (status);
}
EXPORT_SYMBOL(acpi_release_global_lock);
ACPI_EXPORT_SYMBOL(acpi_release_global_lock)

View File

@ -41,8 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <linux/module.h>
#include <acpi/acpi.h>
#include <acpi/acevents.h>
#include <acpi/acnamesp.h>
@ -65,7 +63,7 @@ acpi_status acpi_enable(void)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE("acpi_enable");
ACPI_FUNCTION_TRACE(acpi_enable);
/* Make sure we have the FADT */
@ -94,6 +92,8 @@ acpi_status acpi_enable(void)
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_enable)
/*******************************************************************************
*
* FUNCTION: acpi_disable
@ -105,12 +105,11 @@ acpi_status acpi_enable(void)
* DESCRIPTION: Transfers the system into LEGACY (non-ACPI) mode.
*
******************************************************************************/
acpi_status acpi_disable(void)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE("acpi_disable");
ACPI_FUNCTION_TRACE(acpi_disable);
if (!acpi_gbl_FADT) {
ACPI_WARNING((AE_INFO, "No FADT information present!"));
@ -137,6 +136,8 @@ acpi_status acpi_disable(void)
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_disable)
/*******************************************************************************
*
* FUNCTION: acpi_enable_event
@ -149,13 +150,12 @@ acpi_status acpi_disable(void)
* DESCRIPTION: Enable an ACPI event (fixed)
*
******************************************************************************/
acpi_status acpi_enable_event(u32 event, u32 flags)
{
acpi_status status = AE_OK;
u32 value;
ACPI_FUNCTION_TRACE("acpi_enable_event");
ACPI_FUNCTION_TRACE(acpi_enable_event);
/* Decode the Fixed Event */
@ -193,7 +193,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_enable_event);
ACPI_EXPORT_SYMBOL(acpi_enable_event)
/*******************************************************************************
*
@ -208,13 +208,12 @@ EXPORT_SYMBOL(acpi_enable_event);
* DESCRIPTION: Set the type of an individual GPE
*
******************************************************************************/
acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
ACPI_FUNCTION_TRACE("acpi_set_gpe_type");
ACPI_FUNCTION_TRACE(acpi_set_gpe_type);
/* Ensure that we have a valid GPE number */
@ -236,7 +235,7 @@ acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type)
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_set_gpe_type);
ACPI_EXPORT_SYMBOL(acpi_set_gpe_type)
/*******************************************************************************
*
@ -252,13 +251,12 @@ EXPORT_SYMBOL(acpi_set_gpe_type);
* DESCRIPTION: Enable an ACPI event (general purpose)
*
******************************************************************************/
acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
ACPI_FUNCTION_TRACE("acpi_enable_gpe");
ACPI_FUNCTION_TRACE(acpi_enable_gpe);
/* Use semaphore lock if not executing at interrupt level */
@ -288,7 +286,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_enable_gpe);
ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
/*******************************************************************************
*
@ -304,13 +302,12 @@ EXPORT_SYMBOL(acpi_enable_gpe);
* DESCRIPTION: Disable an ACPI event (general purpose)
*
******************************************************************************/
acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
ACPI_FUNCTION_TRACE("acpi_disable_gpe");
ACPI_FUNCTION_TRACE(acpi_disable_gpe);
/* Use semaphore lock if not executing at interrupt level */
@ -338,6 +335,8 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
/*******************************************************************************
*
* FUNCTION: acpi_disable_event
@ -350,13 +349,12 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
* DESCRIPTION: Disable an ACPI event (fixed)
*
******************************************************************************/
acpi_status acpi_disable_event(u32 event, u32 flags)
{
acpi_status status = AE_OK;
u32 value;
ACPI_FUNCTION_TRACE("acpi_disable_event");
ACPI_FUNCTION_TRACE(acpi_disable_event);
/* Decode the Fixed Event */
@ -392,7 +390,7 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_disable_event);
ACPI_EXPORT_SYMBOL(acpi_disable_event)
/*******************************************************************************
*
@ -405,12 +403,11 @@ EXPORT_SYMBOL(acpi_disable_event);
* DESCRIPTION: Clear an ACPI event (fixed)
*
******************************************************************************/
acpi_status acpi_clear_event(u32 event)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE("acpi_clear_event");
ACPI_FUNCTION_TRACE(acpi_clear_event);
/* Decode the Fixed Event */
@ -429,7 +426,7 @@ acpi_status acpi_clear_event(u32 event)
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_clear_event);
ACPI_EXPORT_SYMBOL(acpi_clear_event)
/*******************************************************************************
*
@ -444,13 +441,12 @@ EXPORT_SYMBOL(acpi_clear_event);
* DESCRIPTION: Clear an ACPI event (general purpose)
*
******************************************************************************/
acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
ACPI_FUNCTION_TRACE("acpi_clear_gpe");
ACPI_FUNCTION_TRACE(acpi_clear_gpe);
/* Use semaphore lock if not executing at interrupt level */
@ -478,6 +474,8 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
@ -492,12 +490,11 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
* DESCRIPTION: Obtains and returns the current status of the event
*
******************************************************************************/
acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE("acpi_get_event_status");
ACPI_FUNCTION_TRACE(acpi_get_event_status);
if (!event_status) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@ -518,6 +515,8 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_get_event_status)
/*******************************************************************************
*
* FUNCTION: acpi_get_gpe_status
@ -533,7 +532,6 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
* DESCRIPTION: Get status of an event (general purpose)
*
******************************************************************************/
acpi_status
acpi_get_gpe_status(acpi_handle gpe_device,
u32 gpe_number, u32 flags, acpi_event_status * event_status)
@ -541,7 +539,7 @@ acpi_get_gpe_status(acpi_handle gpe_device,
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
ACPI_FUNCTION_TRACE("acpi_get_gpe_status");
ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
/* Use semaphore lock if not executing at interrupt level */
@ -570,6 +568,8 @@ acpi_get_gpe_status(acpi_handle gpe_device,
}
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
@ -586,7 +586,6 @@ acpi_get_gpe_status(acpi_handle gpe_device,
* DESCRIPTION: Create and Install a block of GPE registers
*
******************************************************************************/
acpi_status
acpi_install_gpe_block(acpi_handle gpe_device,
struct acpi_generic_address *gpe_block_address,
@ -597,7 +596,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
struct acpi_namespace_node *node;
struct acpi_gpe_block_info *gpe_block;
ACPI_FUNCTION_TRACE("acpi_install_gpe_block");
ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@ -636,6 +635,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
/* No object, create a new one */
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
@ -665,7 +665,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_install_gpe_block);
ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
/*******************************************************************************
*
@ -678,14 +678,13 @@ EXPORT_SYMBOL(acpi_install_gpe_block);
* DESCRIPTION: Remove a previously installed block of GPE registers
*
******************************************************************************/
acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
{
union acpi_operand_object *obj_desc;
acpi_status status;
struct acpi_namespace_node *node;
ACPI_FUNCTION_TRACE("acpi_remove_gpe_block");
ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
if (!gpe_device) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@ -721,4 +720,4 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_remove_gpe_block);
ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)

View File

@ -42,8 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <linux/module.h>
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
#include <acpi/acevents.h>
@ -75,7 +73,7 @@ acpi_install_address_space_handler(acpi_handle device,
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE("acpi_install_address_space_handler");
ACPI_FUNCTION_TRACE(acpi_install_address_space_handler);
/* Parameter validation */
@ -114,7 +112,7 @@ acpi_install_address_space_handler(acpi_handle device,
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_install_address_space_handler);
ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler)
/*******************************************************************************
*
@ -129,7 +127,6 @@ EXPORT_SYMBOL(acpi_install_address_space_handler);
* DESCRIPTION: Remove a previously installed handler.
*
******************************************************************************/
acpi_status
acpi_remove_address_space_handler(acpi_handle device,
acpi_adr_space_type space_id,
@ -142,7 +139,7 @@ acpi_remove_address_space_handler(acpi_handle device,
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE("acpi_remove_address_space_handler");
ACPI_FUNCTION_TRACE(acpi_remove_address_space_handler);
/* Parameter validation */
@ -176,9 +173,11 @@ acpi_remove_address_space_handler(acpi_handle device,
handler_obj = obj_desc->device.handler;
last_obj_ptr = &obj_desc->device.handler;
while (handler_obj) {
/* We have a handler, see if user requested this one */
if (handler_obj->address_space.space_id == space_id) {
/* Matched space_id, first dereference this in the Regions */
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
@ -229,7 +228,7 @@ acpi_remove_address_space_handler(acpi_handle device,
/* The handler does not exist */
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
"Unable to remove address handler %p for %s(%X), dev_node %p, obj %p\n",
"Unable to remove address handler %p for %s(%X), DevNode %p, obj %p\n",
handler, acpi_ut_get_region_name(space_id), space_id,
node, obj_desc));
@ -240,4 +239,4 @@ acpi_remove_address_space_handler(acpi_handle device,
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_remove_address_space_handler);
ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)

View File

@ -82,7 +82,7 @@ acpi_ex_add_table(struct acpi_table_header *table,
struct acpi_table_desc table_info;
union acpi_operand_object *obj_desc;
ACPI_FUNCTION_TRACE("ex_add_table");
ACPI_FUNCTION_TRACE(ex_add_table);
/* Create an object to be the table handle */
@ -100,7 +100,7 @@ acpi_ex_add_table(struct acpi_table_header *table,
ACPI_MEMSET(&table_info, 0, sizeof(struct acpi_table_desc));
table_info.type = ACPI_TABLE_SSDT;
table_info.type = ACPI_TABLE_ID_SSDT;
table_info.pointer = table;
table_info.length = (acpi_size) table->length;
table_info.allocation = ACPI_MEM_ALLOCATED;
@ -110,6 +110,7 @@ acpi_ex_add_table(struct acpi_table_header *table,
if (ACPI_FAILURE(status)) {
if (status == AE_ALREADY_EXISTS) {
/* Table already exists, just return the handle */
return_ACPI_STATUS(AE_OK);
@ -121,6 +122,7 @@ acpi_ex_add_table(struct acpi_table_header *table,
status = acpi_ns_load_table(table_info.installed_desc, parent_node);
if (ACPI_FAILURE(status)) {
/* Uninstall table on error */
(void)acpi_tb_uninstall_table(table_info.installed_desc);
@ -160,7 +162,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *parameter_node = NULL;
union acpi_operand_object *ddb_handle;
ACPI_FUNCTION_TRACE("ex_load_table_op");
ACPI_FUNCTION_TRACE(ex_load_table_op);
#if 0
/*
@ -169,6 +171,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
*/
status = acpi_tb_match_signature(operand[0]->string.pointer, NULL);
if (status == AE_OK) {
/* Signature matched -- don't allow override */
return_ACPI_STATUS(AE_ALREADY_EXISTS);
@ -211,9 +214,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
* location within the namespace where the table will be loaded.
*/
status =
acpi_ns_get_node_by_path(operand[3]->string.pointer,
start_node, ACPI_NS_SEARCH_PARENT,
&parent_node);
acpi_ns_get_node(start_node, operand[3]->string.pointer,
ACPI_NS_SEARCH_PARENT, &parent_node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@ -234,9 +236,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
/* Find the node referenced by the parameter_path_string */
status =
acpi_ns_get_node_by_path(operand[4]->string.pointer,
start_node, ACPI_NS_SEARCH_PARENT,
&parameter_node);
acpi_ns_get_node(start_node, operand[4]->string.pointer,
ACPI_NS_SEARCH_PARENT, &parameter_node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@ -252,6 +253,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
/* Parameter Data (optional) */
if (parameter_node) {
/* Store the parameter data into the optional parameter object */
status = acpi_ex_store(operand[5],
@ -294,9 +296,10 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
struct acpi_table_header *table_ptr = NULL;
acpi_physical_address address;
struct acpi_table_header table_header;
acpi_integer temp;
u32 i;
ACPI_FUNCTION_TRACE("ex_load_op");
ACPI_FUNCTION_TRACE(ex_load_op);
/* Object can be either an op_region or a Field */
@ -322,7 +325,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
address = obj_desc->region.address;
/* Get the table length from the table header */
/* Get part of the table header to get the table length */
table_header.length = 0;
for (i = 0; i < 8; i++) {
@ -330,11 +333,14 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
(acpi_physical_address)
(i + address), 8,
((u8 *) &
table_header) + i);
&temp);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Get the one valid byte of the returned 64-bit value */
ACPI_CAST_PTR(u8, &table_header)[i] = (u8) temp;
}
/* Sanity check the table length */
@ -345,7 +351,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
/* Allocate a buffer for the entire table */
table_ptr = ACPI_MEM_ALLOCATE(table_header.length);
table_ptr = ACPI_ALLOCATE(table_header.length);
if (!table_ptr) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@ -357,11 +363,14 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
(acpi_physical_address)
(i + address), 8,
((u8 *) table_ptr +
i));
&temp);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/* Get the one valid byte of the returned 64-bit value */
ACPI_CAST_PTR(u8, table_ptr)[i] = (u8) temp;
}
break;
@ -407,12 +416,8 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
/* The table must be either an SSDT or a PSDT */
if ((!ACPI_STRNCMP(table_ptr->signature,
acpi_gbl_table_data[ACPI_TABLE_PSDT].signature,
acpi_gbl_table_data[ACPI_TABLE_PSDT].sig_length)) &&
(!ACPI_STRNCMP(table_ptr->signature,
acpi_gbl_table_data[ACPI_TABLE_SSDT].signature,
acpi_gbl_table_data[ACPI_TABLE_SSDT].sig_length))) {
if ((!ACPI_COMPARE_NAME(table_ptr->signature, PSDT_SIG)) &&
(!ACPI_COMPARE_NAME(table_ptr->signature, SSDT_SIG))) {
ACPI_ERROR((AE_INFO,
"Table has invalid signature [%4.4s], must be SSDT or PSDT",
table_ptr->signature));
@ -424,6 +429,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
status = acpi_ex_add_table(table_ptr, acpi_gbl_root_node, &ddb_handle);
if (ACPI_FAILURE(status)) {
/* On error, table_ptr was deallocated above */
return_ACPI_STATUS(status);
@ -442,7 +448,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
cleanup:
if (ACPI_FAILURE(status)) {
ACPI_MEM_FREE(table_ptr);
ACPI_FREE(table_ptr);
}
return_ACPI_STATUS(status);
}
@ -465,7 +471,7 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
union acpi_operand_object *table_desc = ddb_handle;
struct acpi_table_desc *table_info;
ACPI_FUNCTION_TRACE("ex_unload_table");
ACPI_FUNCTION_TRACE(ex_unload_table);
/*
* Validate the handle

View File

@ -79,7 +79,7 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
u32 count;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR("ex_convert_to_integer", obj_desc);
ACPI_FUNCTION_TRACE_PTR(ex_convert_to_integer, obj_desc);
switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
case ACPI_TYPE_INTEGER:
@ -199,7 +199,7 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
union acpi_operand_object *return_desc;
u8 *new_buf;
ACPI_FUNCTION_TRACE_PTR("ex_convert_to_buffer", obj_desc);
ACPI_FUNCTION_TRACE_PTR(ex_convert_to_buffer, obj_desc);
switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
case ACPI_TYPE_BUFFER:
@ -319,6 +319,7 @@ acpi_ex_convert_to_ascii(acpi_integer integer,
remainder = 0;
for (i = decimal_length; i > 0; i--) {
/* Divide by nth factor of 10 */
digit = integer;
@ -346,6 +347,7 @@ acpi_ex_convert_to_ascii(acpi_integer integer,
hex_length = (acpi_native_uint) ACPI_MUL_2(data_width);
for (i = 0, j = (hex_length - 1); i < hex_length; i++, j--) {
/* Get one hex digit, most significant digits first */
string[k] =
@ -400,7 +402,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
u16 base = 16;
u8 separator = ',';
ACPI_FUNCTION_TRACE_PTR("ex_convert_to_string", obj_desc);
ACPI_FUNCTION_TRACE_PTR(ex_convert_to_string, obj_desc);
switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
case ACPI_TYPE_STRING:
@ -567,7 +569,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE("ex_convert_to_target_type");
ACPI_FUNCTION_TRACE(ex_convert_to_target_type);
/* Default behavior */
@ -657,7 +659,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
default:
ACPI_ERROR((AE_INFO,
"Unknown Target type ID 0x%X aml_opcode %X dest_type %s",
"Unknown Target type ID 0x%X AmlOpcode %X DestType %s",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args),
walk_state->opcode,

View File

@ -69,7 +69,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
struct acpi_namespace_node *alias_node;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE("ex_create_alias");
ACPI_FUNCTION_TRACE(ex_create_alias);
/* Get the source/alias operands (both namespace nodes) */
@ -164,7 +164,7 @@ acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state)
acpi_status status;
union acpi_operand_object *obj_desc;
ACPI_FUNCTION_TRACE("ex_create_event");
ACPI_FUNCTION_TRACE(ex_create_event);
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_EVENT);
if (!obj_desc) {
@ -216,7 +216,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
acpi_status status = AE_OK;
union acpi_operand_object *obj_desc;
ACPI_FUNCTION_TRACE_PTR("ex_create_mutex", ACPI_WALK_OPERANDS);
ACPI_FUNCTION_TRACE_PTR(ex_create_mutex, ACPI_WALK_OPERANDS);
/* Create the new mutex object */
@ -243,8 +243,9 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
obj_desc->mutex.node =
(struct acpi_namespace_node *)walk_state->operands[0];
status = acpi_ns_attach_object(obj_desc->mutex.node,
obj_desc, ACPI_TYPE_MUTEX);
status =
acpi_ns_attach_object(obj_desc->mutex.node, obj_desc,
ACPI_TYPE_MUTEX);
cleanup:
/*
@ -280,7 +281,7 @@ acpi_ex_create_region(u8 * aml_start,
struct acpi_namespace_node *node;
union acpi_operand_object *region_obj2;
ACPI_FUNCTION_TRACE("ex_create_region");
ACPI_FUNCTION_TRACE(ex_create_region);
/* Get the Namespace Node */
@ -300,7 +301,7 @@ acpi_ex_create_region(u8 * aml_start,
*/
if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
(region_space < ACPI_USER_REGION_BEGIN)) {
ACPI_ERROR((AE_INFO, "Invalid address_space type %X",
ACPI_ERROR((AE_INFO, "Invalid AddressSpace type %X",
region_space));
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
@ -364,7 +365,7 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
struct acpi_table_header *table;
union acpi_operand_object *region_obj2;
ACPI_FUNCTION_TRACE("ex_create_table_region");
ACPI_FUNCTION_TRACE(ex_create_table_region);
/* Get the Node from the object stack */
@ -452,7 +453,7 @@ acpi_status acpi_ex_create_processor(struct acpi_walk_state *walk_state)
union acpi_operand_object *obj_desc;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR("ex_create_processor", walk_state);
ACPI_FUNCTION_TRACE_PTR(ex_create_processor, walk_state);
/* Create the processor object */
@ -464,9 +465,9 @@ acpi_status acpi_ex_create_processor(struct acpi_walk_state *walk_state)
/* Initialize the processor object from the operands */
obj_desc->processor.proc_id = (u8) operand[1]->integer.value;
obj_desc->processor.length = (u8) operand[3]->integer.value;
obj_desc->processor.address =
(acpi_io_address) operand[2]->integer.value;
obj_desc->processor.length = (u8) operand[3]->integer.value;
/* Install the processor object in the parent Node */
@ -499,7 +500,7 @@ acpi_status acpi_ex_create_power_resource(struct acpi_walk_state *walk_state)
acpi_status status;
union acpi_operand_object *obj_desc;
ACPI_FUNCTION_TRACE_PTR("ex_create_power_resource", walk_state);
ACPI_FUNCTION_TRACE_PTR(ex_create_power_resource, walk_state);
/* Create the power resource object */
@ -549,7 +550,7 @@ acpi_ex_create_method(u8 * aml_start,
acpi_status status;
u8 method_flags;
ACPI_FUNCTION_TRACE_PTR("ex_create_method", walk_state);
ACPI_FUNCTION_TRACE_PTR(ex_create_method, walk_state);
/* Create a new method object */

View File

@ -61,6 +61,10 @@ static void acpi_ex_out_pointer(char *title, void *value);
static void acpi_ex_out_address(char *title, acpi_physical_address value);
static void
acpi_ex_dump_object(union acpi_operand_object *obj_desc,
struct acpi_exdump_info *info);
static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc);
static void
@ -119,7 +123,7 @@ static struct acpi_exdump_info acpi_ex_dump_event[2] = {
static struct acpi_exdump_info acpi_ex_dump_method[8] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "param_count"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "ParamCount"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.concurrency), "Concurrency"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.semaphore), "Semaphore"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"},
@ -263,12 +267,10 @@ static struct acpi_exdump_info acpi_ex_dump_field_common[7] = {
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(common_field.node), "Parent Node"}
};
static struct acpi_exdump_info acpi_ex_dump_node[6] = {
static struct acpi_exdump_info acpi_ex_dump_node[5] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_node), NULL},
{ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(flags), "Flags"},
{ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(owner_id), "Owner Id"},
{ACPI_EXD_UINT16, ACPI_EXD_NSOFFSET(reference_count),
"Reference Count"},
{ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(child), "Child List"},
{ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(peer), "Next Peer"}
};
@ -330,7 +332,7 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
if (!info) {
acpi_os_printf
("ex_dump_object: Display not implemented for object type %s\n",
("ExDumpObject: Display not implemented for object type %s\n",
acpi_ut_get_object_type_name(obj_desc));
return;
}
@ -454,7 +456,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
u32 length;
u32 index;
ACPI_FUNCTION_NAME("ex_dump_operand")
ACPI_FUNCTION_NAME(ex_dump_operand)
if (!
((ACPI_LV_EXEC & acpi_dbg_level)
@ -463,6 +465,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
}
if (!obj_desc) {
/* This could be a null element of a package */
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Null Object Descriptor\n"));
@ -522,7 +525,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
case AML_REF_OF_OP:
acpi_os_printf("Reference: (ref_of) %p\n",
acpi_os_printf("Reference: (RefOf) %p\n",
obj_desc->reference.object);
break;
@ -532,6 +535,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
obj_desc->reference.offset);
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
/* Value is an Integer */
acpi_os_printf(" value is [%8.8X%8.8x]",
@ -610,7 +614,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
case ACPI_TYPE_PACKAGE:
acpi_os_printf("Package [Len %X] element_array %p\n",
acpi_os_printf("Package [Len %X] ElementArray %p\n",
obj_desc->package.count,
obj_desc->package.elements);
@ -662,13 +666,13 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
case ACPI_TYPE_LOCAL_BANK_FIELD:
acpi_os_printf("bank_field\n");
acpi_os_printf("BankField\n");
break;
case ACPI_TYPE_LOCAL_REGION_FIELD:
acpi_os_printf
("region_field: Bits=%X acc_width=%X Lock=%X Update=%X at byte=%X bit=%X of below:\n",
("RegionField: Bits=%X AccWidth=%X Lock=%X Update=%X at byte=%X bit=%X of below:\n",
obj_desc->field.bit_length,
obj_desc->field.access_byte_width,
obj_desc->field.field_flags & AML_FIELD_LOCK_RULE_MASK,
@ -681,12 +685,12 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
case ACPI_TYPE_LOCAL_INDEX_FIELD:
acpi_os_printf("index_field\n");
acpi_os_printf("IndexField\n");
break;
case ACPI_TYPE_BUFFER_FIELD:
acpi_os_printf("buffer_field: %X bits at byte %X bit %X of\n",
acpi_os_printf("BufferField: %X bits at byte %X bit %X of\n",
obj_desc->buffer_field.bit_length,
obj_desc->buffer_field.base_byte_offset,
obj_desc->buffer_field.start_field_bit_offset);
@ -777,7 +781,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
{
acpi_native_uint i;
ACPI_FUNCTION_NAME("ex_dump_operands");
ACPI_FUNCTION_NAME(ex_dump_operands);
if (!ident) {
ident = "?";
@ -901,7 +905,7 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
acpi_os_printf("Could not convert name to pathname\n");
} else {
acpi_os_printf("%s\n", (char *)ret_buf.pointer);
ACPI_MEM_FREE(ret_buf.pointer);
ACPI_FREE(ret_buf.pointer);
}
} else if (obj_desc->reference.object) {
acpi_os_printf("\nReferenced Object: %p\n",
@ -1017,7 +1021,7 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
void
acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
{
ACPI_FUNCTION_TRACE("ex_dump_object_descriptor");
ACPI_FUNCTION_TRACE(ex_dump_object_descriptor);
if (!obj_desc) {
return_VOID;
@ -1046,7 +1050,7 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) {
acpi_os_printf
("ex_dump_object_descriptor: %p is not an ACPI operand object: [%s]\n",
("ExDumpObjectDescriptor: %p is not an ACPI operand object: [%s]\n",
obj_desc, acpi_ut_get_descriptor_name(obj_desc));
return_VOID;
}

View File

@ -73,7 +73,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
void *buffer;
u8 locked;
ACPI_FUNCTION_TRACE_PTR("ex_read_data_from_field", obj_desc);
ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
/* Parameter validation */
@ -142,6 +142,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
length =
(acpi_size) ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
if (length > acpi_gbl_integer_byte_width) {
/* Field is too large for an Integer, create a Buffer instead */
buffer_desc = acpi_ut_create_buffer_object(length);
@ -163,11 +164,11 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"field_read [TO]: Obj %p, Type %X, Buf %p, byte_len %X\n",
"FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n",
obj_desc, ACPI_GET_OBJECT_TYPE(obj_desc), buffer,
(u32) length));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"field_read [FROM]: bit_len %X, bit_off %X, byte_off %X\n",
"FieldRead [FROM]: BitLen %X, BitOff %X, ByteOff %X\n",
obj_desc->common_field.bit_length,
obj_desc->common_field.start_field_bit_offset,
obj_desc->common_field.base_byte_offset));
@ -219,7 +220,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
u8 locked;
union acpi_operand_object *buffer_desc;
ACPI_FUNCTION_TRACE_PTR("ex_write_data_to_field", obj_desc);
ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
/* Parameter validation */
@ -329,9 +330,10 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length);
if (length < required_length) {
/* We need to create a new buffer */
new_buffer = ACPI_MEM_CALLOCATE(required_length);
new_buffer = ACPI_ALLOCATE_ZEROED(required_length);
if (!new_buffer) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@ -347,14 +349,14 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"field_write [FROM]: Obj %p (%s:%X), Buf %p, byte_len %X\n",
"FieldWrite [FROM]: Obj %p (%s:%X), Buf %p, ByteLen %X\n",
source_desc,
acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE
(source_desc)),
ACPI_GET_OBJECT_TYPE(source_desc), buffer, length));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"field_write [TO]: Obj %p (%s:%X), bit_len %X, bit_off %X, byte_off %X\n",
"FieldWrite [TO]: Obj %p (%s:%X), BitLen %X, BitOff %X, ByteOff %X\n",
obj_desc,
acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE(obj_desc)),
ACPI_GET_OBJECT_TYPE(obj_desc),
@ -375,7 +377,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
/* Free temporary buffer if we used one */
if (new_buffer) {
ACPI_MEM_FREE(new_buffer);
ACPI_FREE(new_buffer);
}
return_ACPI_STATUS(status);

View File

@ -87,7 +87,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
acpi_status status = AE_OK;
union acpi_operand_object *rgn_desc;
ACPI_FUNCTION_TRACE_U32("ex_setup_region", field_datum_byte_offset);
ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset);
rgn_desc = obj_desc->common_field.region_obj;
@ -112,7 +112,18 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
}
}
/* Exit if Address/Length have been disallowed by the host OS */
if (rgn_desc->common.flags & AOPOBJ_INVALID) {
return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS);
}
/*
* Exit now for SMBus address space, it has a non-linear address space
* and the request cannot be directly validated
*/
if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS) {
/* SMBus has a non-linear address space */
return_ACPI_STATUS(AE_OK);
@ -134,10 +145,10 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* length of one field datum (access width) must fit within the region.
* (Region length is specified in bytes)
*/
if (rgn_desc->region.length < (obj_desc->common_field.base_byte_offset +
field_datum_byte_offset +
obj_desc->common_field.
access_byte_width)) {
if (rgn_desc->region.length <
(obj_desc->common_field.base_byte_offset +
field_datum_byte_offset +
obj_desc->common_field.access_byte_width)) {
if (acpi_gbl_enable_interpreter_slack) {
/*
* Slack mode only: We will go ahead and allow access to this
@ -217,7 +228,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
union acpi_operand_object *rgn_desc;
acpi_physical_address address;
ACPI_FUNCTION_TRACE("ex_access_region");
ACPI_FUNCTION_TRACE(ex_access_region);
/*
* Ensure that the region operands are fully evaluated and verify
@ -246,7 +257,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
" Region [%s:%X], Width %X, byte_base %X, Offset %X at %8.8X%8.8X\n",
" Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id,
@ -352,7 +363,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
acpi_status status;
acpi_integer local_value;
ACPI_FUNCTION_TRACE_U32("ex_field_datum_io", field_datum_byte_offset);
ACPI_FUNCTION_TRACE_U32(ex_field_datum_io, field_datum_byte_offset);
if (read_write == ACPI_READ) {
if (!value) {
@ -487,10 +498,11 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"I/O to Data Register: value_ptr %p\n",
"I/O to Data Register: ValuePtr %p\n",
value));
if (read_write == ACPI_READ) {
/* Read the datum from the data_register */
status =
@ -559,7 +571,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
acpi_integer merged_value;
acpi_integer current_value;
ACPI_FUNCTION_TRACE_U32("ex_write_with_update_rule", mask);
ACPI_FUNCTION_TRACE_U32(ex_write_with_update_rule, mask);
/* Start with the new bits */
@ -568,6 +580,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
/* If the mask is all ones, we don't need to worry about the update rule */
if (mask != ACPI_INTEGER_MAX) {
/* Decode the update rule */
switch (obj_desc->common_field.
@ -614,7 +627,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
default:
ACPI_ERROR((AE_INFO,
"Unknown update_rule value: %X",
"Unknown UpdateRule value: %X",
(obj_desc->common_field.
field_flags &
AML_FIELD_UPDATE_RULE_MASK)));
@ -623,7 +636,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"Mask %8.8X%8.8X, datum_offset %X, Width %X, Value %8.8X%8.8X, merged_value %8.8X%8.8X\n",
"Mask %8.8X%8.8X, DatumOffset %X, Width %X, Value %8.8X%8.8X, MergedValue %8.8X%8.8X\n",
ACPI_FORMAT_UINT64(mask),
field_datum_byte_offset,
obj_desc->common_field.access_byte_width,
@ -666,7 +679,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
u32 field_datum_count;
u32 i;
ACPI_FUNCTION_TRACE("ex_extract_from_field");
ACPI_FUNCTION_TRACE(ex_extract_from_field);
/* Validate target buffer and clear it */
@ -704,6 +717,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
/* Read the rest of the field */
for (i = 1; i < field_datum_count; i++) {
/* Get next input datum from the field */
field_offset += obj_desc->common_field.access_byte_width;
@ -771,6 +785,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
{
acpi_status status;
acpi_integer mask;
acpi_integer width_mask;
acpi_integer merged_datum;
acpi_integer raw_datum = 0;
u32 field_offset = 0;
@ -780,7 +795,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
u32 field_datum_count;
u32 i;
ACPI_FUNCTION_TRACE("ex_insert_into_field");
ACPI_FUNCTION_TRACE(ex_insert_into_field);
/* Validate input buffer */
@ -795,15 +810,20 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
/* Compute the number of datums (access width data items) */
width_mask =
ACPI_MASK_BITS_ABOVE(obj_desc->common_field.access_bit_width);
mask =
ACPI_MASK_BITS_BELOW(obj_desc->common_field.start_field_bit_offset);
datum_count =
ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
obj_desc->common_field.access_bit_width);
field_datum_count =
ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
obj_desc->common_field.start_field_bit_offset,
obj_desc->common_field.access_bit_width);
width_mask & ACPI_MASK_BITS_BELOW(obj_desc->common_field.
start_field_bit_offset);
datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
obj_desc->common_field.access_bit_width);
field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
obj_desc->common_field.
start_field_bit_offset,
obj_desc->common_field.
access_bit_width);
/* Get initial Datum from the input buffer */
@ -817,6 +837,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
/* Write the entire field */
for (i = 1; i < field_datum_count; i++) {
/* Write merged datum to the target field */
merged_datum &= mask;
@ -833,7 +854,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
merged_datum = raw_datum >>
(obj_desc->common_field.access_bit_width -
obj_desc->common_field.start_field_bit_offset);
mask = ACPI_INTEGER_MAX;
mask = width_mask;
if (i == datum_count) {
break;

View File

@ -72,7 +72,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
union acpi_operand_object *reference_obj;
union acpi_operand_object *referenced_obj;
ACPI_FUNCTION_TRACE_PTR("ex_get_object_reference", obj_desc);
ACPI_FUNCTION_TRACE_PTR(ex_get_object_reference, obj_desc);
*return_desc = NULL;
@ -168,7 +168,7 @@ acpi_ex_concat_template(union acpi_operand_object *operand0,
acpi_size length1;
acpi_size new_length;
ACPI_FUNCTION_TRACE("ex_concat_template");
ACPI_FUNCTION_TRACE(ex_concat_template);
/*
* Find the end_tag descriptor in each resource template.
@ -250,7 +250,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
char *new_buf;
acpi_status status;
ACPI_FUNCTION_TRACE("ex_do_concatenate");
ACPI_FUNCTION_TRACE(ex_do_concatenate);
/*
* Convert the second operand if necessary. The first operand
@ -445,10 +445,24 @@ acpi_ex_do_math_op(u16 opcode, acpi_integer integer0, acpi_integer integer1)
case AML_SHIFT_LEFT_OP: /* shift_left (Operand, shift_count, Result) */
/*
* We need to check if the shiftcount is larger than the integer bit
* width since the behavior of this is not well-defined in the C language.
*/
if (integer1 >= acpi_gbl_integer_bit_width) {
return (0);
}
return (integer0 << integer1);
case AML_SHIFT_RIGHT_OP: /* shift_right (Operand, shift_count, Result) */
/*
* We need to check if the shiftcount is larger than the integer bit
* width since the behavior of this is not well-defined in the C language.
*/
if (integer1 >= acpi_gbl_integer_bit_width) {
return (0);
}
return (integer0 >> integer1);
case AML_SUBTRACT_OP: /* Subtract (Integer0, Integer1, Result) */
@ -489,7 +503,7 @@ acpi_ex_do_logical_numeric_op(u16 opcode,
acpi_status status = AE_OK;
u8 local_result = FALSE;
ACPI_FUNCTION_TRACE("ex_do_logical_numeric_op");
ACPI_FUNCTION_TRACE(ex_do_logical_numeric_op);
switch (opcode) {
case AML_LAND_OP: /* LAnd (Integer0, Integer1) */
@ -557,7 +571,7 @@ acpi_ex_do_logical_op(u16 opcode,
u8 local_result = FALSE;
int compare;
ACPI_FUNCTION_TRACE("ex_do_logical_op");
ACPI_FUNCTION_TRACE(ex_do_logical_op);
/*
* Convert the second operand if necessary. The first operand
@ -649,6 +663,7 @@ acpi_ex_do_logical_op(u16 opcode,
/* Length and all bytes must be equal */
if ((length0 == length1) && (compare == 0)) {
/* Length and all bytes match ==> TRUE */
local_result = TRUE;

View File

@ -61,7 +61,7 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
*
* RETURN: None
*
* DESCRIPTION: Remove a mutex from the "acquired_mutex" list
* DESCRIPTION: Remove a mutex from the "AcquiredMutex" list
*
******************************************************************************/
@ -95,7 +95,7 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
*
* RETURN: None
*
* DESCRIPTION: Add a mutex to the "acquired_mutex" list for this walk
* DESCRIPTION: Add a mutex to the "AcquiredMutex" list for this walk
*
******************************************************************************/
@ -144,7 +144,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
{
acpi_status status;
ACPI_FUNCTION_TRACE_PTR("ex_acquire_mutex", obj_desc);
ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex, obj_desc);
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@ -165,7 +165,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
*/
if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
ACPI_ERROR((AE_INFO,
"Cannot acquire Mutex [%4.4s], incorrect sync_level",
"Cannot acquire Mutex [%4.4s], incorrect SyncLevel",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
}
@ -173,6 +173,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
/* Support for multiple acquires by the owning thread */
if (obj_desc->mutex.owner_thread) {
/* Special case for Global Lock, allow all threads */
if ((obj_desc->mutex.owner_thread->thread_id ==
@ -192,6 +193,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
status = acpi_ex_system_acquire_mutex(time_desc, obj_desc);
if (ACPI_FAILURE(status)) {
/* Includes failure from a timeout on time_desc */
return_ACPI_STATUS(status);
@ -232,7 +234,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
{
acpi_status status;
ACPI_FUNCTION_TRACE("ex_release_mutex");
ACPI_FUNCTION_TRACE(ex_release_mutex);
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@ -277,7 +279,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
*/
if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], incorrect sync_level",
"Cannot release Mutex [%4.4s], incorrect SyncLevel",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
}
@ -286,6 +288,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
obj_desc->mutex.acquisition_depth--;
if (obj_desc->mutex.acquisition_depth != 0) {
/* Just decrement the depth and return */
return_ACPI_STATUS(AE_OK);

View File

@ -77,7 +77,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
char *name_string;
u32 size_needed;
ACPI_FUNCTION_TRACE("ex_allocate_name_string");
ACPI_FUNCTION_TRACE(ex_allocate_name_string);
/*
* Allow room for all \ and ^ prefixes, all segments and a multi_name_prefix.
@ -85,6 +85,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
* This may actually be somewhat longer than needed.
*/
if (prefix_count == ACPI_UINT32_MAX) {
/* Special case for root */
size_needed = 1 + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1;
@ -97,7 +98,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
* Allocate a buffer for the name.
* This buffer must be deleted by the caller!
*/
name_string = ACPI_MEM_ALLOCATE(size_needed);
name_string = ACPI_ALLOCATE(size_needed);
if (!name_string) {
ACPI_ERROR((AE_INFO,
"Could not allocate size %d", size_needed));
@ -119,11 +120,13 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
/* Set up Dual or Multi prefixes if needed */
if (num_name_segs > 2) {
/* Set up multi prefixes */
*temp_ptr++ = AML_MULTI_NAME_PREFIX_OP;
*temp_ptr++ = (char)num_name_segs;
} else if (2 == num_name_segs) {
/* Set up dual prefixes */
*temp_ptr++ = AML_DUAL_NAME_PREFIX;
@ -159,7 +162,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
u32 index;
char char_buf[5];
ACPI_FUNCTION_TRACE("ex_name_segment");
ACPI_FUNCTION_TRACE(ex_name_segment);
/*
* If first character is a digit, then we know that we aren't looking at a
@ -176,7 +179,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
for (index = 0;
(index < ACPI_NAME_SIZE)
&& (acpi_ut_valid_acpi_character(*aml_address)); index++) {
&& (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) {
char_buf[index] = *aml_address++;
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index]));
}
@ -184,6 +187,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
/* Valid name segment */
if (index == 4) {
/* Found 4 valid characters */
char_buf[4] = '\0';
@ -249,11 +253,12 @@ acpi_ex_get_name_string(acpi_object_type data_type,
u32 prefix_count = 0;
u8 has_prefix = FALSE;
ACPI_FUNCTION_TRACE_PTR("ex_get_name_string", aml_address);
ACPI_FUNCTION_TRACE_PTR(ex_get_name_string, aml_address);
if (ACPI_TYPE_LOCAL_REGION_FIELD == data_type ||
ACPI_TYPE_LOCAL_BANK_FIELD == data_type ||
ACPI_TYPE_LOCAL_INDEX_FIELD == data_type) {
/* Disallow prefixes for types associated with field_unit names */
name_string = acpi_ex_allocate_name_string(0, 1);
@ -272,7 +277,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
case AML_ROOT_PREFIX:
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
"root_prefix(\\) at %p\n",
"RootPrefix(\\) at %p\n",
aml_address));
/*
@ -290,7 +295,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
do {
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
"parent_prefix (^) at %p\n",
"ParentPrefix (^) at %p\n",
aml_address));
aml_address++;
@ -314,7 +319,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
case AML_DUAL_NAME_PREFIX:
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
"dual_name_prefix at %p\n",
"DualNamePrefix at %p\n",
aml_address));
aml_address++;
@ -341,7 +346,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
case AML_MULTI_NAME_PREFIX_OP:
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
"multi_name_prefix at %p\n",
"MultiNamePrefix at %p\n",
aml_address));
/* Fetch count of segments remaining in name path */
@ -377,7 +382,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
if (prefix_count == ACPI_UINT32_MAX) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"name_seg is \"\\\" followed by NULL\n"));
"NameSeg is \"\\\" followed by NULL\n"));
}
/* Consume the NULL byte */
@ -410,6 +415,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
}
if (AE_CTRL_PENDING == status && has_prefix) {
/* Ran out of segments after processing a prefix */
ACPI_ERROR((AE_INFO, "Malformed Name at %p", name_string));
@ -418,7 +424,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
if (ACPI_FAILURE(status)) {
if (name_string) {
ACPI_MEM_FREE(name_string);
ACPI_FREE(name_string);
}
return_ACPI_STATUS(status);
}

View File

@ -89,7 +89,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
acpi_status status = AE_OK;
union acpi_operand_object *return_desc = NULL;
ACPI_FUNCTION_TRACE_STR("ex_opcode_0A_0T_1R",
ACPI_FUNCTION_TRACE_STR(ex_opcode_0A_0T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
@ -150,7 +150,7 @@ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
union acpi_operand_object **operand = &walk_state->operands[0];
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_STR("ex_opcode_1A_0T_0R",
ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_0T_0R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
@ -216,7 +216,7 @@ acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state)
acpi_status status = AE_OK;
union acpi_operand_object **operand = &walk_state->operands[0];
ACPI_FUNCTION_TRACE_STR("ex_opcode_1A_1T_0R",
ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_1T_0R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
@ -264,7 +264,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
acpi_integer power_of_ten;
acpi_integer digit;
ACPI_FUNCTION_TRACE_STR("ex_opcode_1A_1T_1R",
ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_1T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
@ -322,8 +322,9 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
/* Since the bit position is one-based, subtract from 33 (65) */
return_desc->integer.value = temp32 == 0 ? 0 :
(ACPI_INTEGER_BIT_SIZE + 1) - temp32;
return_desc->integer.value =
temp32 ==
0 ? 0 : (ACPI_INTEGER_BIT_SIZE + 1) - temp32;
break;
case AML_FROM_BCD_OP: /* from_bcd (BCDValue, Result) */
@ -342,6 +343,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
for (i = 0;
(i < acpi_gbl_integer_nybble_width) && (digit > 0);
i++) {
/* Get the least significant 4-bit BCD digit */
temp32 = ((u32) digit) & 0xF;
@ -487,6 +489,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_convert_to_string(operand[0], &return_desc,
ACPI_EXPLICIT_CONVERT_DECIMAL);
if (return_desc == operand[0]) {
/* No conversion performed, add ref to handle return value */
acpi_ut_add_reference(return_desc);
}
@ -497,6 +500,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_convert_to_string(operand[0], &return_desc,
ACPI_EXPLICIT_CONVERT_HEX);
if (return_desc == operand[0]) {
/* No conversion performed, add ref to handle return value */
acpi_ut_add_reference(return_desc);
}
@ -506,6 +510,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_convert_to_buffer(operand[0], &return_desc);
if (return_desc == operand[0]) {
/* No conversion performed, add ref to handle return value */
acpi_ut_add_reference(return_desc);
}
@ -516,6 +521,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_convert_to_integer(operand[0], &return_desc,
ACPI_ANY_BASE);
if (return_desc == operand[0]) {
/* No conversion performed, add ref to handle return value */
acpi_ut_add_reference(return_desc);
}
@ -541,6 +547,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
}
if (ACPI_SUCCESS(status)) {
/* Store the return value computed above into the target object */
status = acpi_ex_store(return_desc, operand[1], walk_state);
@ -548,16 +555,18 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
cleanup:
if (!walk_state->result_obj) {
walk_state->result_obj = return_desc;
}
/* Delete return object on error */
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(return_desc);
}
/* Save return object on success */
else if (!walk_state->result_obj) {
walk_state->result_obj = return_desc;
}
return_ACPI_STATUS(status);
}
@ -582,7 +591,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
u32 type;
acpi_integer value;
ACPI_FUNCTION_TRACE_STR("ex_opcode_1A_0T_1R",
ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_0T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
@ -625,6 +634,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
temp_desc = operand[0];
if (ACPI_GET_DESCRIPTOR_TYPE(temp_desc) ==
ACPI_DESC_TYPE_OPERAND) {
/* Internal reference object - prevent deletion */
acpi_ut_add_reference(temp_desc);
@ -689,6 +699,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/* Allocate a descriptor to hold the type. */
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
@ -777,8 +788,25 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
/* Check for a method local or argument, or standalone String */
if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) !=
if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) ==
ACPI_DESC_TYPE_NAMED) {
temp_desc =
acpi_ns_get_attached_object((struct
acpi_namespace_node *)
operand[0]);
if (temp_desc
&&
((ACPI_GET_OBJECT_TYPE(temp_desc) ==
ACPI_TYPE_STRING)
|| (ACPI_GET_OBJECT_TYPE(temp_desc) ==
ACPI_TYPE_LOCAL_REFERENCE))) {
operand[0] = temp_desc;
acpi_ut_add_reference(temp_desc);
} else {
status = AE_AML_OPERAND_TYPE;
goto cleanup;
}
} else {
switch (ACPI_GET_OBJECT_TYPE(operand[0])) {
case ACPI_TYPE_LOCAL_REFERENCE:
/*
@ -827,26 +855,35 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
break;
case ACPI_TYPE_STRING:
break;
default:
status = AE_AML_OPERAND_TYPE;
goto cleanup;
}
}
if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) !=
ACPI_DESC_TYPE_NAMED) {
if (ACPI_GET_OBJECT_TYPE(operand[0]) ==
ACPI_TYPE_STRING) {
/*
* This is a deref_of (String). The string is a reference
* to a named ACPI object.
*
* 1) Find the owning Node
* 2) Dereference the node to an actual object. Could be a
* 2) Dereference the node to an actual object. Could be a
* Field, so we need to resolve the node to a value.
*/
status =
acpi_ns_get_node_by_path(operand[0]->string.
pointer,
walk_state->
scope_info->scope.
node,
ACPI_NS_SEARCH_PARENT,
ACPI_CAST_INDIRECT_PTR
(struct
acpi_namespace_node,
&return_desc));
acpi_ns_get_node(walk_state->scope_info->
scope.node,
operand[0]->string.pointer,
ACPI_NS_SEARCH_PARENT,
ACPI_CAST_INDIRECT_PTR
(struct
acpi_namespace_node,
&return_desc));
if (ACPI_FAILURE(status)) {
goto cleanup;
}
@ -857,11 +894,6 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
(struct acpi_namespace_node, &return_desc),
walk_state);
goto cleanup;
default:
status = AE_AML_OPERAND_TYPE;
goto cleanup;
}
}
@ -937,13 +969,12 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
acpi_ut_add_reference
(return_desc);
}
break;
default:
ACPI_ERROR((AE_INFO,
"Unknown Index target_type %X in obj %p",
"Unknown Index TargetType %X in obj %p",
operand[0]->reference.
target_type, operand[0]));
status = AE_AML_OPERAND_TYPE;
@ -957,7 +988,6 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
if (ACPI_GET_DESCRIPTOR_TYPE(return_desc) ==
ACPI_DESC_TYPE_NAMED) {
return_desc =
acpi_ns_get_attached_object((struct
acpi_namespace_node
@ -972,7 +1002,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
"Unknown opcode in ref(%p) - %X",
"Unknown opcode in reference(%p) - %X",
operand[0],
operand[0]->reference.opcode));
@ -998,6 +1028,11 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
acpi_ut_remove_reference(return_desc);
}
walk_state->result_obj = return_desc;
/* Save return object on success */
else {
walk_state->result_obj = return_desc;
}
return_ACPI_STATUS(status);
}

View File

@ -92,7 +92,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
u32 value;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_STR("ex_opcode_2A_0T_0R",
ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_0T_0R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the opcode */
@ -121,7 +121,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
#ifdef ACPI_GPE_NOTIFY_CHECK
/*
* GPE method wake/notify check. Here, we want to ensure that we
* don't receive any "device_wake" Notifies from a GPE _Lxx or _Exx
* don't receive any "DeviceWake" Notifies from a GPE _Lxx or _Exx
* GPE method during system runtime. If we do, the GPE is marked
* as "wake-only" and disabled.
*
@ -138,6 +138,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
acpi_ev_check_for_wake_only_gpe(walk_state->
gpe_event_info);
if (ACPI_FAILURE(status)) {
/* AE_WAKE_ONLY_GPE only error, means ignore this notify */
return_ACPI_STATUS(AE_OK)
@ -185,7 +186,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
union acpi_operand_object *return_desc2 = NULL;
acpi_status status;
ACPI_FUNCTION_TRACE_STR("ex_opcode_2A_2T_1R",
ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_2T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Execute the opcode */
@ -252,6 +253,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
acpi_ut_remove_reference(return_desc2);
if (ACPI_FAILURE(status)) {
/* Delete the return object */
acpi_ut_remove_reference(return_desc1);
@ -281,12 +283,13 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
acpi_status status = AE_OK;
acpi_size length;
ACPI_FUNCTION_TRACE_STR("ex_opcode_2A_1T_1R",
ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_1T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Execute the opcode */
if (walk_state->op_info->flags & AML_MATH) {
/* All simple math opcodes (add, etc.) */
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
@ -383,54 +386,70 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
goto cleanup;
}
/* Initialize the Index reference object */
index = operand[1]->integer.value;
return_desc->reference.offset = (u32) index;
return_desc->reference.opcode = AML_INDEX_OP;
/* At this point, the Source operand is a Package, Buffer, or String */
/*
* At this point, the Source operand is a String, Buffer, or Package.
* Verify that the index is within range.
*/
switch (ACPI_GET_OBJECT_TYPE(operand[0])) {
case ACPI_TYPE_STRING:
if (ACPI_GET_OBJECT_TYPE(operand[0]) == ACPI_TYPE_PACKAGE) {
/* Object to be indexed is a Package */
if (index >= operand[0]->package.count) {
ACPI_ERROR((AE_INFO,
"Index value (%X%8.8X) beyond package end (%X)",
ACPI_FORMAT_UINT64(index),
operand[0]->package.count));
status = AE_AML_PACKAGE_LIMIT;
goto cleanup;
}
return_desc->reference.target_type = ACPI_TYPE_PACKAGE;
return_desc->reference.object = operand[0];
return_desc->reference.where =
&operand[0]->package.elements[index];
} else {
/* Object to be indexed is a Buffer/String */
if (index >= operand[0]->buffer.length) {
ACPI_ERROR((AE_INFO,
"Index value (%X%8.8X) beyond end of buffer (%X)",
ACPI_FORMAT_UINT64(index),
operand[0]->buffer.length));
status = AE_AML_BUFFER_LIMIT;
goto cleanup;
if (index >= operand[0]->string.length) {
status = AE_AML_STRING_LIMIT;
}
return_desc->reference.target_type =
ACPI_TYPE_BUFFER_FIELD;
return_desc->reference.object = operand[0];
break;
case ACPI_TYPE_BUFFER:
if (index >= operand[0]->buffer.length) {
status = AE_AML_BUFFER_LIMIT;
}
return_desc->reference.target_type =
ACPI_TYPE_BUFFER_FIELD;
break;
case ACPI_TYPE_PACKAGE:
if (index >= operand[0]->package.count) {
status = AE_AML_PACKAGE_LIMIT;
}
return_desc->reference.target_type = ACPI_TYPE_PACKAGE;
return_desc->reference.where =
&operand[0]->package.elements[index];
break;
default:
status = AE_AML_INTERNAL;
goto cleanup;
}
/* Failure means that the Index was beyond the end of the object */
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Index (%X%8.8X) is beyond end of object",
ACPI_FORMAT_UINT64(index)));
goto cleanup;
}
/*
* Add a reference to the target package/buffer/string for the life
* of the index.
* Save the target object and add a reference to it for the life
* of the index
*/
return_desc->reference.object = operand[0];
acpi_ut_add_reference(operand[0]);
/* Complete the Index reference object */
return_desc->reference.opcode = AML_INDEX_OP;
return_desc->reference.offset = (u32) index;
/* Store the reference to the Target */
status = acpi_ex_store(return_desc, operand[2], walk_state);
@ -495,7 +514,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
acpi_status status = AE_OK;
u8 logical_result = FALSE;
ACPI_FUNCTION_TRACE_STR("ex_opcode_2A_0T_1R",
ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_0T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Create the internal return object */
@ -509,6 +528,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
/* Execute the Opcode */
if (walk_state->op_info->flags & AML_LOGICAL_NUMERIC) {
/* logical_op (Operand0, Operand1) */
status = acpi_ex_do_logical_numeric_op(walk_state->opcode,
@ -518,6 +538,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
value, &logical_result);
goto store_logical_result;
} else if (walk_state->op_info->flags & AML_LOGICAL) {
/* logical_op (Operand0, Operand1) */
status = acpi_ex_do_logical_op(walk_state->opcode, operand[0],

View File

@ -88,20 +88,19 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
struct acpi_signal_fatal_info *fatal;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_STR("ex_opcode_3A_0T_0R",
ACPI_FUNCTION_TRACE_STR(ex_opcode_3A_0T_0R,
acpi_ps_get_opcode_name(walk_state->opcode));
switch (walk_state->opcode) {
case AML_FATAL_OP: /* Fatal (fatal_type fatal_code fatal_arg) */
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"fatal_op: Type %X Code %X Arg %X <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n",
"FatalOp: Type %X Code %X Arg %X <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n",
(u32) operand[0]->integer.value,
(u32) operand[1]->integer.value,
(u32) operand[2]->integer.value));
fatal =
ACPI_MEM_ALLOCATE(sizeof(struct acpi_signal_fatal_info));
fatal = ACPI_ALLOCATE(sizeof(struct acpi_signal_fatal_info));
if (fatal) {
fatal->type = (u32) operand[0]->integer.value;
fatal->code = (u32) operand[1]->integer.value;
@ -114,7 +113,7 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
/* Might return while OS is shutting down, just continue */
ACPI_MEM_FREE(fatal);
ACPI_FREE(fatal);
break;
default:
@ -151,7 +150,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
acpi_integer index;
acpi_size length;
ACPI_FUNCTION_TRACE_STR("ex_opcode_3A_1T_1R",
ACPI_FUNCTION_TRACE_STR(ex_opcode_3A_1T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
switch (walk_state->opcode) {
@ -196,7 +195,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
/* Always allocate a new buffer for the String */
buffer = ACPI_MEM_CALLOCATE((acpi_size) length + 1);
buffer = ACPI_ALLOCATE_ZEROED((acpi_size) length + 1);
if (!buffer) {
status = AE_NO_MEMORY;
goto cleanup;
@ -208,9 +207,10 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
/* If the requested length is zero, don't allocate a buffer */
if (length > 0) {
/* Allocate a new buffer for the Buffer */
buffer = ACPI_MEM_CALLOCATE(length);
buffer = ACPI_ALLOCATE_ZEROED(length);
if (!buffer) {
status = AE_NO_MEMORY;
goto cleanup;
@ -225,6 +225,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
}
if (buffer) {
/* We have a buffer, copy the portion requested */
ACPI_MEMCPY(buffer, operand[0]->string.pointer + index,

View File

@ -220,7 +220,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
acpi_integer index;
union acpi_operand_object *this_element;
ACPI_FUNCTION_TRACE_STR("ex_opcode_6A_0T_1R",
ACPI_FUNCTION_TRACE_STR(ex_opcode_6A_0T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
switch (walk_state->opcode) {
@ -276,6 +276,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
* match was found.
*/
for (; index < operand[0]->package.count; index++) {
/* Get the current package element */
this_element = operand[0]->package.elements[index];

View File

@ -97,7 +97,7 @@ acpi_ex_generate_access(u32 field_bit_offset,
u32 minimum_accesses = 0xFFFFFFFF;
u32 accesses;
ACPI_FUNCTION_TRACE("ex_generate_access");
ACPI_FUNCTION_TRACE(ex_generate_access);
/* Round Field start offset and length to "minimal" byte boundaries */
@ -146,7 +146,7 @@ acpi_ex_generate_access(u32 field_bit_offset,
accesses = field_end_offset - field_start_offset;
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"access_width %d end is within region\n",
"AccessWidth %d end is within region\n",
access_byte_width));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
@ -173,7 +173,7 @@ acpi_ex_generate_access(u32 field_bit_offset,
}
} else {
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"access_width %d end is NOT within region\n",
"AccessWidth %d end is NOT within region\n",
access_byte_width));
if (access_byte_width == 1) {
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
@ -228,7 +228,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
u32 byte_alignment;
u32 bit_length;
ACPI_FUNCTION_TRACE("ex_decode_field_access");
ACPI_FUNCTION_TRACE(ex_decode_field_access);
access = (field_flags & AML_FIELD_ACCESS_TYPE_MASK);
@ -322,7 +322,7 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
u32 byte_alignment;
u32 nearest_byte_address;
ACPI_FUNCTION_TRACE("ex_prep_common_field_object");
ACPI_FUNCTION_TRACE(ex_prep_common_field_object);
/*
* Note: the structure being initialized is the
@ -415,13 +415,13 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
u32 type;
acpi_status status;
ACPI_FUNCTION_TRACE("ex_prep_field_value");
ACPI_FUNCTION_TRACE(ex_prep_field_value);
/* Parameter validation */
if (info->field_type != ACPI_TYPE_LOCAL_INDEX_FIELD) {
if (!info->region_node) {
ACPI_ERROR((AE_INFO, "Null region_node"));
ACPI_ERROR((AE_INFO, "Null RegionNode"));
return_ACPI_STATUS(AE_AML_NO_OPERAND);
}
@ -467,7 +467,7 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
acpi_ut_add_reference(obj_desc->field.region_obj);
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"region_field: bit_off %X, Off %X, Gran %X, Region %p\n",
"RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
obj_desc->field.start_field_bit_offset,
obj_desc->field.base_byte_offset,
obj_desc->field.access_byte_width,
@ -488,7 +488,7 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
acpi_ut_add_reference(obj_desc->bank_field.bank_obj);
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"Bank Field: bit_off %X, Off %X, Gran %X, Region %p, bank_reg %p\n",
"Bank Field: BitOff %X, Off %X, Gran %X, Region %p, BankReg %p\n",
obj_desc->bank_field.start_field_bit_offset,
obj_desc->bank_field.base_byte_offset,
obj_desc->field.access_byte_width,
@ -519,16 +519,29 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
acpi_ut_add_reference(obj_desc->index_field.index_obj);
/*
* April 2006: Changed to match MS behavior
*
* The value written to the Index register is the byte offset of the
* target field
* Note: may change code to: ACPI_DIV_8 (Info->field_bit_position)
* target field in units of the granularity of the index_field
*
* Previously, the value was calculated as an index in terms of the
* width of the Data register, as below:
*
* obj_desc->index_field.Value = (u32)
* (Info->field_bit_position / ACPI_MUL_8 (
* obj_desc->Field.access_byte_width));
*
* February 2006: Tried value as a byte offset:
* obj_desc->index_field.Value = (u32)
* ACPI_DIV_8 (Info->field_bit_position);
*/
obj_desc->index_field.value = (u32)
(info->field_bit_position /
ACPI_MUL_8(obj_desc->field.access_byte_width));
obj_desc->index_field.value =
(u32) ACPI_ROUND_DOWN(ACPI_DIV_8(info->field_bit_position),
obj_desc->index_field.
access_byte_width);
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"index_field: bit_off %X, Off %X, Value %X, Gran %X, Index %p, Data %p\n",
"IndexField: BitOff %X, Off %X, Value %X, Gran %X, Index %p, Data %p\n",
obj_desc->index_field.start_field_bit_offset,
obj_desc->index_field.base_byte_offset,
obj_desc->index_field.value,
@ -550,7 +563,7 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
acpi_ns_get_type(info->field_node));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"Set named_obj %p [%4.4s], obj_desc %p\n",
"Set NamedObj %p [%4.4s], ObjDesc %p\n",
info->field_node,
acpi_ut_get_node_name(info->field_node), obj_desc));

View File

@ -81,7 +81,7 @@ acpi_ex_system_memory_space_handler(u32 function,
u32 remainder;
#endif
ACPI_FUNCTION_TRACE("ex_system_memory_space_handler");
ACPI_FUNCTION_TRACE(ex_system_memory_space_handler);
/* Validate and translate the bit width */
@ -103,7 +103,7 @@ acpi_ex_system_memory_space_handler(u32 function,
break;
default:
ACPI_ERROR((AE_INFO, "Invalid system_memory width %d",
ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %d",
bit_width));
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
@ -135,6 +135,7 @@ acpi_ex_system_memory_space_handler(u32 function,
* Delete the existing mapping and create a new one.
*/
if (mem_info->mapped_length) {
/* Valid mapping, delete it */
acpi_os_unmap_memory(mem_info->mapped_logical_address,
@ -181,8 +182,8 @@ acpi_ex_system_memory_space_handler(u32 function,
(acpi_integer) mem_info->mapped_physical_address);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"system_memory %d (%d width) Address=%8.8X%8.8X\n",
function, bit_width, ACPI_FORMAT_UINT64(address)));
"System-Memory (width %d) R/W %d Address=%8.8X%8.8X\n",
bit_width, function, ACPI_FORMAT_UINT64(address)));
/*
* Perform the memory read or write
@ -283,11 +284,11 @@ acpi_ex_system_io_space_handler(u32 function,
acpi_status status = AE_OK;
u32 value32;
ACPI_FUNCTION_TRACE("ex_system_io_space_handler");
ACPI_FUNCTION_TRACE(ex_system_io_space_handler);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"system_iO %d (%d width) Address=%8.8X%8.8X\n",
function, bit_width, ACPI_FORMAT_UINT64(address)));
"System-IO (width %d) R/W %d Address=%8.8X%8.8X\n",
bit_width, function, ACPI_FORMAT_UINT64(address)));
/* Decode the function parameter */
@ -342,7 +343,7 @@ acpi_ex_pci_config_space_handler(u32 function,
struct acpi_pci_id *pci_id;
u16 pci_register;
ACPI_FUNCTION_TRACE("ex_pci_config_space_handler");
ACPI_FUNCTION_TRACE(ex_pci_config_space_handler);
/*
* The arguments to acpi_os(Read|Write)pci_configuration are:
@ -360,7 +361,7 @@ acpi_ex_pci_config_space_handler(u32 function,
pci_register = (u16) (u32) address;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"pci_config %d (%d) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n",
"Pci-Config %d (%d) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n",
function, bit_width, pci_id->segment, pci_id->bus,
pci_id->device, pci_id->function, pci_register));
@ -414,7 +415,7 @@ acpi_ex_cmos_space_handler(u32 function,
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE("ex_cmos_space_handler");
ACPI_FUNCTION_TRACE(ex_cmos_space_handler);
return_ACPI_STATUS(status);
}
@ -446,7 +447,7 @@ acpi_ex_pci_bar_space_handler(u32 function,
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE("ex_pci_bar_space_handler");
ACPI_FUNCTION_TRACE(ex_pci_bar_space_handler);
return_ACPI_STATUS(status);
}
@ -476,23 +477,16 @@ acpi_ex_data_table_space_handler(u32 function,
acpi_integer * value,
void *handler_context, void *region_context)
{
acpi_status status = AE_OK;
u32 byte_width = ACPI_DIV_8(bit_width);
u32 i;
char *logical_addr_ptr;
ACPI_FUNCTION_TRACE("ex_data_table_space_handler");
logical_addr_ptr = ACPI_PHYSADDR_TO_PTR(address);
ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
/* Perform the memory read or write */
switch (function) {
case ACPI_READ:
for (i = 0; i < byte_width; i++) {
((char *)value)[i] = logical_addr_ptr[i];
}
ACPI_MEMCPY(ACPI_CAST_PTR(char, value),
ACPI_PHYSADDR_TO_PTR(address),
ACPI_DIV_8(bit_width));
break;
case ACPI_WRITE:
@ -501,5 +495,5 @@ acpi_ex_data_table_space_handler(u32 function,
return_ACPI_STATUS(AE_SUPPORT);
}
return_ACPI_STATUS(status);
return_ACPI_STATUS(AE_OK);
}

View File

@ -87,7 +87,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
struct acpi_namespace_node *node;
acpi_object_type entry_type;
ACPI_FUNCTION_TRACE("ex_resolve_node_to_value");
ACPI_FUNCTION_TRACE(ex_resolve_node_to_value);
/*
* The stack pointer points to a struct acpi_namespace_node (Node). Get the
@ -97,12 +97,13 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
source_desc = acpi_ns_get_attached_object(node);
entry_type = acpi_ns_get_type((acpi_handle) node);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Entry=%p source_desc=%p [%s]\n",
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Entry=%p SourceDesc=%p [%s]\n",
node, source_desc,
acpi_ut_get_type_name(entry_type)));
if ((entry_type == ACPI_TYPE_LOCAL_ALIAS) ||
(entry_type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) {
/* There is always exactly one level of indirection */
node = ACPI_CAST_PTR(struct acpi_namespace_node, node->object);
@ -113,10 +114,11 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
/*
* Several object types require no further processing:
* 1) Devices rarely have an attached object, return the Node
* 1) Device/Thermal objects don't have a "real" subobject, return the Node
* 2) Method locals and arguments have a pseudo-Node
*/
if (entry_type == ACPI_TYPE_DEVICE ||
if ((entry_type == ACPI_TYPE_DEVICE) ||
(entry_type == ACPI_TYPE_THERMAL) ||
(node->flags & (ANOBJ_METHOD_ARG | ANOBJ_METHOD_LOCAL))) {
return_ACPI_STATUS(AE_OK);
}
@ -141,6 +143,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
status = acpi_ds_get_package_arguments(source_desc);
if (ACPI_SUCCESS(status)) {
/* Return an additional reference to the object */
obj_desc = source_desc;
@ -158,6 +161,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
status = acpi_ds_get_buffer_arguments(source_desc);
if (ACPI_SUCCESS(status)) {
/* Return an additional reference to the object */
obj_desc = source_desc;
@ -199,7 +203,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
case ACPI_TYPE_LOCAL_INDEX_FIELD:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"field_read Node=%p source_desc=%p Type=%X\n",
"FieldRead Node=%p SourceDesc=%p Type=%X\n",
node, source_desc, entry_type));
status =
@ -213,7 +217,6 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
case ACPI_TYPE_METHOD:
case ACPI_TYPE_POWER:
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_THERMAL:
case ACPI_TYPE_EVENT:
case ACPI_TYPE_REGION:
@ -240,6 +243,8 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
/* This is a ddb_handle */
/* Return an additional reference to the object */
case AML_REF_OF_OP:
obj_desc = source_desc;
acpi_ut_add_reference(obj_desc);
break;

View File

@ -78,7 +78,7 @@ acpi_ex_resolve_to_value(union acpi_operand_object **stack_ptr,
{
acpi_status status;
ACPI_FUNCTION_TRACE_PTR("ex_resolve_to_value", stack_ptr);
ACPI_FUNCTION_TRACE_PTR(ex_resolve_to_value, stack_ptr);
if (!stack_ptr || !*stack_ptr) {
ACPI_ERROR((AE_INFO, "Internal - null pointer"));
@ -144,7 +144,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
union acpi_operand_object *obj_desc;
u16 opcode;
ACPI_FUNCTION_TRACE("ex_resolve_object_to_value");
ACPI_FUNCTION_TRACE(ex_resolve_object_to_value);
stack_desc = *stack_ptr;
@ -190,7 +190,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Arg/Local %X] value_obj is %p\n",
"[Arg/Local %X] ValueObj is %p\n",
stack_desc->reference.offset,
obj_desc));
@ -239,7 +239,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
/* Invalid reference object */
ACPI_ERROR((AE_INFO,
"Unknown target_type %X in Index/Reference obj %p",
"Unknown TargetType %X in Index/Reference obj %p",
stack_desc->reference.target_type,
stack_desc));
status = AE_AML_INTERNAL;
@ -257,10 +257,24 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
case AML_INT_NAMEPATH_OP: /* Reference to a named object */
/* Get the object pointed to by the namespace node */
/* Dereference the name */
if ((stack_desc->reference.node->type ==
ACPI_TYPE_DEVICE)
|| (stack_desc->reference.node->type ==
ACPI_TYPE_THERMAL)) {
/* These node types do not have 'real' subobjects */
*stack_ptr = (void *)stack_desc->reference.node;
} else {
/* Get the object pointed to by the namespace node */
*stack_ptr =
(stack_desc->reference.node)->object;
acpi_ut_add_reference(*stack_ptr);
}
*stack_ptr = (stack_desc->reference.node)->object;
acpi_ut_add_reference(*stack_ptr);
acpi_ut_remove_reference(stack_desc);
break;
@ -293,7 +307,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
case ACPI_TYPE_LOCAL_INDEX_FIELD:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"field_read source_desc=%p Type=%X\n",
"FieldRead SourceDesc=%p Type=%X\n",
stack_desc,
ACPI_GET_OBJECT_TYPE(stack_desc)));
@ -337,7 +351,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
acpi_object_type type;
acpi_status status;
ACPI_FUNCTION_TRACE("acpi_ex_resolve_multiple");
ACPI_FUNCTION_TRACE(acpi_ex_resolve_multiple);
/* Operand can be either a namespace node or an operand descriptor */
@ -382,10 +396,16 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
while (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REFERENCE) {
switch (obj_desc->reference.opcode) {
case AML_REF_OF_OP:
case AML_INT_NAMEPATH_OP:
/* Dereference the reference pointer */
node = obj_desc->reference.object;
if (obj_desc->reference.opcode == AML_REF_OF_OP) {
node = obj_desc->reference.object;
} else { /* AML_INT_NAMEPATH_OP */
node = obj_desc->reference.node;
}
/* All "References" point to a NS node */
@ -401,6 +421,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
/* No object, use the NS node type */
type = acpi_ns_get_type(node);
@ -432,6 +453,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
*/
obj_desc = *(obj_desc->reference.where);
if (!obj_desc) {
/* NULL package elements are allowed */
type = 0; /* Uninitialized */
@ -439,39 +461,6 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
}
break;
case AML_INT_NAMEPATH_OP:
/* Dereference the reference pointer */
node = obj_desc->reference.node;
/* All "References" point to a NS node */
if (ACPI_GET_DESCRIPTOR_TYPE(node) !=
ACPI_DESC_TYPE_NAMED) {
ACPI_ERROR((AE_INFO, "Not a NS node %p [%s]",
node,
acpi_ut_get_descriptor_name(node)));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
/* Get the attached object */
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
/* No object, use the NS node type */
type = acpi_ns_get_type(node);
goto exit;
}
/* Check for circular references */
if (obj_desc == operand) {
return_ACPI_STATUS(AE_AML_CIRCULAR_REFERENCE);
}
break;
case AML_LOCAL_OP:
case AML_ARG_OP:
@ -513,7 +502,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
case AML_DEBUG_OP:
/* The Debug Object is of type "debug_object" */
/* The Debug Object is of type "DebugObject" */
type = ACPI_TYPE_DEBUG_OBJECT;
goto exit;

View File

@ -77,6 +77,7 @@ acpi_ex_check_object_type(acpi_object_type type_needed,
ACPI_FUNCTION_ENTRY();
if (type_needed == ACPI_TYPE_ANY) {
/* All types OK, so we don't perform any typechecks */
return (AE_OK);
@ -143,7 +144,7 @@ acpi_ex_resolve_operands(u16 opcode,
acpi_object_type type_needed;
u16 target_op = 0;
ACPI_FUNCTION_TRACE_U32("ex_resolve_operands", opcode);
ACPI_FUNCTION_TRACE_U32(ex_resolve_operands, opcode);
op_info = acpi_ps_get_opcode_info(opcode);
if (op_info->class == AML_CLASS_UNKNOWN) {
@ -158,7 +159,7 @@ acpi_ex_resolve_operands(u16 opcode,
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Opcode %X [%s] required_operand_types=%8.8X\n",
"Opcode %X [%s] RequiredOperandTypes=%8.8X\n",
opcode, op_info->name, arg_types));
/*
@ -224,6 +225,7 @@ acpi_ex_resolve_operands(u16 opcode,
}
if (object_type == (u8) ACPI_TYPE_LOCAL_REFERENCE) {
/* Decode the Reference */
op_info = acpi_ps_get_opcode_info(opcode);
@ -247,7 +249,7 @@ acpi_ex_resolve_operands(u16 opcode,
ACPI_DEBUG_ONLY_MEMBERS(ACPI_DEBUG_PRINT
((ACPI_DB_EXEC,
"Operand is a Reference, ref_opcode [%s]\n",
"Operand is a Reference, RefOpcode [%s]\n",
(acpi_ps_get_opcode_info
(obj_desc->
reference.
@ -332,6 +334,7 @@ acpi_ex_resolve_operands(u16 opcode,
}
if (obj_desc->reference.opcode == AML_NAME_OP) {
/* Convert a named reference to the actual named object */
temp_node = obj_desc->reference.object;
@ -623,7 +626,7 @@ acpi_ex_resolve_operands(u16 opcode,
default:
ACPI_ERROR((AE_INFO,
"Needed [Region/region_field], found [%s] %p",
"Needed [Region/RegionField], found [%s] %p",
acpi_ut_get_object_type_name
(obj_desc), obj_desc));
@ -662,6 +665,7 @@ acpi_ex_resolve_operands(u16 opcode,
}
if (target_op == AML_DEBUG_OP) {
/* Allow store of any object to the Debug object */
break;

View File

@ -82,7 +82,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
{
u32 i;
ACPI_FUNCTION_TRACE_PTR("ex_do_debug_object", source_desc);
ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s",
level, " "));
@ -245,7 +245,7 @@ acpi_ex_store(union acpi_operand_object *source_desc,
acpi_status status = AE_OK;
union acpi_operand_object *ref_desc = dest_desc;
ACPI_FUNCTION_TRACE_PTR("ex_store", dest_desc);
ACPI_FUNCTION_TRACE_PTR(ex_store, dest_desc);
/* Validate parameters */
@ -297,7 +297,7 @@ acpi_ex_store(union acpi_operand_object *source_desc,
ACPI_DUMP_STACK_ENTRY(source_desc);
ACPI_DUMP_STACK_ENTRY(dest_desc);
ACPI_DUMP_OPERANDS(&dest_desc, ACPI_IMODE_EXECUTE, "ex_store",
ACPI_DUMP_OPERANDS(&dest_desc, ACPI_IMODE_EXECUTE, "ExStore",
2,
"Target is not a Reference or Constant object");
@ -396,7 +396,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
u8 value = 0;
u32 i;
ACPI_FUNCTION_TRACE("ex_store_object_to_index");
ACPI_FUNCTION_TRACE(ex_store_object_to_index);
/*
* Destination must be a reference pointer, and
@ -423,6 +423,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
}
if (obj_desc) {
/* Decrement reference count by the ref count of the parent package */
for (i = 0; i < ((union acpi_operand_object *)
@ -502,8 +503,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
break;
default:
ACPI_ERROR((AE_INFO,
"Target is not a Package or buffer_field"));
ACPI_ERROR((AE_INFO, "Target is not a Package or BufferField"));
status = AE_AML_OPERAND_TYPE;
break;
}
@ -548,7 +548,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
union acpi_operand_object *new_desc;
acpi_object_type target_type;
ACPI_FUNCTION_TRACE_PTR("ex_store_object_to_node", source_desc);
ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_node, source_desc);
/* Get current type of the node, and object attached to Node */
@ -572,6 +572,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
/* If no implicit conversion, drop into the default case below */
if ((!implicit_conversion) || (walk_state->opcode == AML_COPY_OP)) {
/* Force execution of default (no implicit conversion) */
target_type = ACPI_TYPE_ANY;

View File

@ -72,7 +72,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
union acpi_operand_object *source_desc = *source_desc_ptr;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE("ex_resolve_object");
ACPI_FUNCTION_TRACE(ex_resolve_object);
/* Ensure we have a Target that can be stored to */
@ -97,6 +97,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
*/
if (ACPI_GET_OBJECT_TYPE(source_desc) ==
ACPI_TYPE_LOCAL_REFERENCE) {
/* Resolve a reference object first */
status =
@ -121,6 +122,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
!((ACPI_GET_OBJECT_TYPE(source_desc) ==
ACPI_TYPE_LOCAL_REFERENCE)
&& (source_desc->reference.opcode == AML_LOAD_OP))) {
/* Conversion successful but still not a valid type */
ACPI_ERROR((AE_INFO,
@ -199,7 +201,7 @@ acpi_ex_store_object_to_object(union acpi_operand_object *source_desc,
union acpi_operand_object *actual_src_desc;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_PTR("ex_store_object_to_object", source_desc);
ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_object, source_desc);
actual_src_desc = source_desc;
if (!dest_desc) {
@ -289,6 +291,7 @@ acpi_ex_store_object_to_object(union acpi_operand_object *source_desc,
}
if (actual_src_desc != source_desc) {
/* Delete the intermediate (temporary) source object */
acpi_ut_remove_reference(actual_src_desc);

View File

@ -67,7 +67,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
u32 length;
u8 *buffer;
ACPI_FUNCTION_TRACE_PTR("ex_store_buffer_to_buffer", source_desc);
ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc);
/* We know that source_desc is a buffer by now */
@ -80,7 +80,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
*/
if ((target_desc->buffer.length == 0) ||
(target_desc->common.flags & AOPOBJ_STATIC_POINTER)) {
target_desc->buffer.pointer = ACPI_MEM_ALLOCATE(length);
target_desc->buffer.pointer = ACPI_ALLOCATE(length);
if (!target_desc->buffer.pointer) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@ -91,6 +91,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
/* Copy source buffer to target buffer */
if (length <= target_desc->buffer.length) {
/* Clear existing buffer and copy in the new one */
ACPI_MEMSET(target_desc->buffer.pointer, 0,
@ -102,7 +103,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
* NOTE: ACPI versions up to 3.0 specified that the buffer must be
* truncated if the string is smaller than the buffer. However, "other"
* implementations of ACPI never did this and thus became the defacto
* standard. ACPi 3.0_a changes this behavior such that the buffer
* standard. ACPI 3.0_a changes this behavior such that the buffer
* is no longer truncated.
*/
@ -113,6 +114,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
* copy must not truncate the original buffer.
*/
if (original_src_type == ACPI_TYPE_STRING) {
/* Set the new length of the target */
target_desc->buffer.length = length;
@ -156,7 +158,7 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
u32 length;
u8 *buffer;
ACPI_FUNCTION_TRACE_PTR("ex_store_string_to_string", source_desc);
ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc);
/* We know that source_desc is a string by now */
@ -183,13 +185,14 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
*/
if (target_desc->string.pointer &&
(!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) {
/* Only free if not a pointer into the DSDT */
ACPI_MEM_FREE(target_desc->string.pointer);
ACPI_FREE(target_desc->string.pointer);
}
target_desc->string.pointer = ACPI_MEM_CALLOCATE((acpi_size)
length + 1);
target_desc->string.pointer = ACPI_ALLOCATE_ZEROED((acpi_size)
length + 1);
if (!target_desc->string.pointer) {
return_ACPI_STATUS(AE_NO_MEMORY);
}

View File

@ -68,7 +68,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout)
acpi_status status;
acpi_status status2;
ACPI_FUNCTION_TRACE("ex_system_wait_semaphore");
ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
status = acpi_os_wait_semaphore(semaphore, 1, 0);
if (ACPI_SUCCESS(status)) {
@ -76,6 +76,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout)
}
if (status == AE_TIME) {
/* We must wait, so unlock the interpreter */
acpi_ex_exit_interpreter();
@ -90,6 +91,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout)
status2 = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status2)) {
/* Report fatal error, could not acquire interpreter */
return_ACPI_STATUS(status2);
@ -191,7 +193,7 @@ acpi_ex_system_acquire_mutex(union acpi_operand_object * time_desc,
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_PTR("ex_system_acquire_mutex", obj_desc);
ACPI_FUNCTION_TRACE_PTR(ex_system_acquire_mutex, obj_desc);
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@ -229,7 +231,7 @@ acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE("ex_system_release_mutex");
ACPI_FUNCTION_TRACE(ex_system_release_mutex);
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@ -263,7 +265,7 @@ acpi_status acpi_ex_system_signal_event(union acpi_operand_object *obj_desc)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE("ex_system_signal_event");
ACPI_FUNCTION_TRACE(ex_system_signal_event);
if (obj_desc) {
status = acpi_os_signal_semaphore(obj_desc->event.semaphore, 1);
@ -293,7 +295,7 @@ acpi_ex_system_wait_event(union acpi_operand_object *time_desc,
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE("ex_system_wait_event");
ACPI_FUNCTION_TRACE(ex_system_wait_event);
if (obj_desc) {
status =

View File

@ -87,9 +87,9 @@ acpi_status acpi_ex_enter_interpreter(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ex_enter_interpreter");
ACPI_FUNCTION_TRACE(ex_enter_interpreter);
status = acpi_ut_acquire_mutex(ACPI_MTX_EXECUTE);
status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex"));
}
@ -123,9 +123,9 @@ void acpi_ex_exit_interpreter(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ex_exit_interpreter");
ACPI_FUNCTION_TRACE(ex_exit_interpreter);
status = acpi_ut_release_mutex(ACPI_MTX_EXECUTE);
status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not release interpreter mutex"));
}
@ -189,11 +189,12 @@ u8 acpi_ex_acquire_global_lock(u32 field_flags)
u8 locked = FALSE;
acpi_status status;
ACPI_FUNCTION_TRACE("ex_acquire_global_lock");
ACPI_FUNCTION_TRACE(ex_acquire_global_lock);
/* Only attempt lock if the always_lock bit is set */
if (field_flags & AML_FIELD_LOCK_RULE_MASK) {
/* We should attempt to get the lock, wait forever */
status = acpi_ev_acquire_global_lock(ACPI_WAIT_FOREVER);
@ -225,15 +226,17 @@ void acpi_ex_release_global_lock(u8 locked_by_me)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ex_release_global_lock");
ACPI_FUNCTION_TRACE(ex_release_global_lock);
/* Only attempt unlock if the caller locked it */
if (locked_by_me) {
/* OK, now release the lock */
status = acpi_ev_release_global_lock();
if (ACPI_FAILURE(status)) {
/* Report the error, but there isn't much else we can do */
ACPI_EXCEPTION((AE_INFO, status,
@ -263,7 +266,7 @@ static u32 acpi_ex_digits_needed(acpi_integer value, u32 base)
u32 num_digits;
acpi_integer current_value;
ACPI_FUNCTION_TRACE("ex_digits_needed");
ACPI_FUNCTION_TRACE(ex_digits_needed);
/* acpi_integer is unsigned, so we don't worry about a '-' prefix */

View File

@ -48,6 +48,8 @@ MODULE_LICENSE("GPL");
static int acpi_fan_add(struct acpi_device *device);
static int acpi_fan_remove(struct acpi_device *device, int type);
static int acpi_fan_suspend(struct acpi_device *device, int state);
static int acpi_fan_resume(struct acpi_device *device, int state);
static struct acpi_driver acpi_fan_driver = {
.name = ACPI_FAN_DRIVER_NAME,
@ -56,6 +58,8 @@ static struct acpi_driver acpi_fan_driver = {
.ops = {
.add = acpi_fan_add,
.remove = acpi_fan_remove,
.suspend = acpi_fan_suspend,
.resume = acpi_fan_resume,
},
};
@ -206,6 +210,10 @@ static int acpi_fan_add(struct acpi_device *device)
goto end;
}
device->flags.force_power_state = 1;
acpi_bus_set_power(device->handle, state);
device->flags.force_power_state = 0;
result = acpi_fan_add_fs(device);
if (result)
goto end;
@ -239,6 +247,38 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
return_VALUE(0);
}
static int acpi_fan_suspend(struct acpi_device *device, int state)
{
if (!device)
return -EINVAL;
acpi_bus_set_power(device->handle, ACPI_STATE_D0);
return AE_OK;
}
static int acpi_fan_resume(struct acpi_device *device, int state)
{
int result = 0;
int power_state = 0;
if (!device)
return -EINVAL;
result = acpi_bus_get_power(device->handle, &power_state);
if (result) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Error reading fan power state\n"));
return result;
}
device->flags.force_power_state = 1;
acpi_bus_set_power(device->handle, power_state);
device->flags.force_power_state = 0;
return result;
}
static int __init acpi_fan_init(void)
{
int result = 0;

View File

@ -63,7 +63,7 @@ acpi_status acpi_hw_initialize(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE("hw_initialize");
ACPI_FUNCTION_TRACE(hw_initialize);
/* We must have the ACPI tables by the time we get here */
@ -100,7 +100,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
acpi_status status;
u32 retry;
ACPI_FUNCTION_TRACE("hw_set_mode");
ACPI_FUNCTION_TRACE(hw_set_mode);
/*
* ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
@ -198,7 +198,7 @@ u32 acpi_hw_get_mode(void)
acpi_status status;
u32 value;
ACPI_FUNCTION_TRACE("hw_get_mode");
ACPI_FUNCTION_TRACE(hw_get_mode);
/*
* ACPI 2.0 clarified that if SMI_CMD in FADT is zero,

View File

@ -214,6 +214,7 @@ acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
/* Disable all GPEs in this register */
status = acpi_hw_low_level_write(8, 0x00,
@ -250,6 +251,7 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
/* Clear status on all GPEs in this register */
status = acpi_hw_low_level_write(8, 0xFF,
@ -368,7 +370,7 @@ acpi_status acpi_hw_disable_all_gpes(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE("hw_disable_all_gpes");
ACPI_FUNCTION_TRACE(hw_disable_all_gpes);
status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block);
status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block);
@ -391,7 +393,7 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE("hw_enable_all_runtime_gpes");
ACPI_FUNCTION_TRACE(hw_enable_all_runtime_gpes);
status = acpi_ev_walk_gpe_list(acpi_hw_enable_runtime_gpe_block);
return_ACPI_STATUS(status);
@ -413,7 +415,7 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE("hw_enable_all_wakeup_gpes");
ACPI_FUNCTION_TRACE(hw_enable_all_wakeup_gpes);
status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block);
return_ACPI_STATUS(status);

View File

@ -43,8 +43,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <linux/module.h>
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
#include <acpi/acevents.h>
@ -63,23 +61,21 @@ ACPI_MODULE_NAME("hwregs")
* DESCRIPTION: Clears all fixed and general purpose status bits
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
* NOTE: TBD: Flags parameter is obsolete, to be removed
*
******************************************************************************/
acpi_status acpi_hw_clear_acpi_status(u32 flags)
{
acpi_status status;
acpi_cpu_flags lock_flags = 0;
ACPI_FUNCTION_TRACE("hw_clear_acpi_status");
ACPI_FUNCTION_TRACE(hw_clear_acpi_status);
ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n",
ACPI_BITMASK_ALL_FIXED_STATUS,
(u16) acpi_gbl_FADT->xpm1a_evt_blk.address));
if (flags & ACPI_MTX_LOCK) {
status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_STATUS,
@ -104,9 +100,7 @@ acpi_status acpi_hw_clear_acpi_status(u32 flags)
status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block);
unlock_and_exit:
if (flags & ACPI_MTX_LOCK) {
(void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
}
acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
return_ACPI_STATUS(status);
}
@ -129,10 +123,9 @@ acpi_status
acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b)
{
acpi_status status = AE_OK;
struct acpi_parameter_info info;
char *sleep_state_name;
struct acpi_evaluate_info *info;
ACPI_FUNCTION_TRACE("acpi_get_sleep_type_data");
ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data);
/* Validate parameters */
@ -140,34 +133,39 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Evaluate the namespace object containing the values for this state */
/* Allocate the evaluation information block */
info.parameters = NULL;
info.return_object = NULL;
sleep_state_name =
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
info->pathname =
ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]);
status = acpi_ns_evaluate_by_name(sleep_state_name, &info);
/* Evaluate the namespace object containing the values for this state */
status = acpi_ns_evaluate(info);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"%s while evaluating sleep_state [%s]\n",
"%s while evaluating SleepState [%s]\n",
acpi_format_exception(status),
sleep_state_name));
info->pathname));
return_ACPI_STATUS(status);
goto cleanup;
}
/* Must have a return object */
if (!info.return_object) {
if (!info->return_object) {
ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]",
sleep_state_name));
info->pathname));
status = AE_NOT_EXIST;
}
/* It must be of type Package */
else if (ACPI_GET_OBJECT_TYPE(info.return_object) != ACPI_TYPE_PACKAGE) {
else if (ACPI_GET_OBJECT_TYPE(info->return_object) != ACPI_TYPE_PACKAGE) {
ACPI_ERROR((AE_INFO,
"Sleep State return object is not a Package"));
status = AE_AML_OPERAND_TYPE;
@ -180,7 +178,7 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b)
* by BIOS vendors seems to be to have 2 or more elements, at least
* one per sleep type (A/B).
*/
else if (info.return_object->package.count < 2) {
else if (info->return_object->package.count < 2) {
ACPI_ERROR((AE_INFO,
"Sleep State return package does not have at least two elements"));
status = AE_AML_NO_OPERAND;
@ -188,39 +186,42 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b)
/* The first two elements must both be of type Integer */
else if ((ACPI_GET_OBJECT_TYPE(info.return_object->package.elements[0])
else if ((ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[0])
!= ACPI_TYPE_INTEGER) ||
(ACPI_GET_OBJECT_TYPE(info.return_object->package.elements[1])
(ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[1])
!= ACPI_TYPE_INTEGER)) {
ACPI_ERROR((AE_INFO,
"Sleep State return package elements are not both Integers (%s, %s)",
acpi_ut_get_object_type_name(info.return_object->
acpi_ut_get_object_type_name(info->return_object->
package.elements[0]),
acpi_ut_get_object_type_name(info.return_object->
acpi_ut_get_object_type_name(info->return_object->
package.elements[1])));
status = AE_AML_OPERAND_TYPE;
} else {
/* Valid _Sx_ package size, type, and value */
*sleep_type_a = (u8)
(info.return_object->package.elements[0])->integer.value;
(info->return_object->package.elements[0])->integer.value;
*sleep_type_b = (u8)
(info.return_object->package.elements[1])->integer.value;
(info->return_object->package.elements[1])->integer.value;
}
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"While evaluating sleep_state [%s], bad Sleep object %p type %s",
sleep_state_name, info.return_object,
acpi_ut_get_object_type_name(info.
"While evaluating SleepState [%s], bad Sleep object %p type %s",
info->pathname, info->return_object,
acpi_ut_get_object_type_name(info->
return_object)));
}
acpi_ut_remove_reference(info.return_object);
acpi_ut_remove_reference(info->return_object);
cleanup:
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_get_sleep_type_data);
ACPI_EXPORT_SYMBOL(acpi_get_sleep_type_data)
/*******************************************************************************
*
@ -233,13 +234,12 @@ EXPORT_SYMBOL(acpi_get_sleep_type_data);
* DESCRIPTION: Map register_id into a register bitmask.
*
******************************************************************************/
struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
{
ACPI_FUNCTION_ENTRY();
if (register_id > ACPI_BITREG_MAX) {
ACPI_ERROR((AE_INFO, "Invalid bit_register ID: %X",
ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: %X",
register_id));
return (NULL);
}
@ -260,6 +260,8 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
*
* DESCRIPTION: ACPI bit_register read function.
*
* NOTE: TBD: Flags parameter is obsolete, to be removed
*
******************************************************************************/
acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags)
@ -268,7 +270,7 @@ acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags)
struct acpi_bit_register_info *bit_reg_info;
acpi_status status;
ACPI_FUNCTION_TRACE("acpi_get_register");
ACPI_FUNCTION_TRACE(acpi_get_register);
/* Get the info structure corresponding to the requested ACPI Register */
@ -277,24 +279,14 @@ acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
if (flags & ACPI_MTX_LOCK) {
status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/* Read from the register */
status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
status = acpi_hw_register_read(ACPI_MTX_LOCK,
bit_reg_info->parent_register,
&register_value);
if (flags & ACPI_MTX_LOCK) {
(void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
}
if (ACPI_SUCCESS(status)) {
/* Normalize the value that was read */
register_value =
@ -311,7 +303,7 @@ acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags)
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_get_register);
ACPI_EXPORT_SYMBOL(acpi_get_register)
/*******************************************************************************
*
@ -326,31 +318,28 @@ EXPORT_SYMBOL(acpi_get_register);
*
* DESCRIPTION: ACPI Bit Register write function.
*
* NOTE: TBD: Flags parameter is obsolete, to be removed
*
******************************************************************************/
acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
{
u32 register_value = 0;
struct acpi_bit_register_info *bit_reg_info;
acpi_status status;
acpi_cpu_flags lock_flags;
ACPI_FUNCTION_TRACE_U32("acpi_set_register", register_id);
ACPI_FUNCTION_TRACE_U32(acpi_set_register, register_id);
/* Get the info structure corresponding to the requested ACPI Register */
bit_reg_info = acpi_hw_get_bit_register_info(register_id);
if (!bit_reg_info) {
ACPI_ERROR((AE_INFO, "Bad ACPI HW register_id: %X",
ACPI_ERROR((AE_INFO, "Bad ACPI HW RegisterId: %X",
register_id));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
if (flags & ACPI_MTX_LOCK) {
status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
/* Always do a register read first so we can insert the new bits */
@ -458,9 +447,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
unlock_and_exit:
if (flags & ACPI_MTX_LOCK) {
(void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
}
acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
/* Normalize the value that was read */
@ -474,7 +461,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_set_register);
ACPI_EXPORT_SYMBOL(acpi_set_register)
/******************************************************************************
*
@ -490,21 +477,18 @@ EXPORT_SYMBOL(acpi_set_register);
* given offset.
*
******************************************************************************/
acpi_status
acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
{
u32 value1 = 0;
u32 value2 = 0;
acpi_status status;
acpi_cpu_flags lock_flags = 0;
ACPI_FUNCTION_TRACE("hw_register_read");
ACPI_FUNCTION_TRACE(hw_register_read);
if (ACPI_MTX_LOCK == use_lock) {
status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
}
switch (register_id) {
@ -582,7 +566,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
unlock_and_exit:
if (ACPI_MTX_LOCK == use_lock) {
(void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
}
if (ACPI_SUCCESS(status)) {
@ -610,14 +594,12 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
{
acpi_status status;
acpi_cpu_flags lock_flags = 0;
ACPI_FUNCTION_TRACE("hw_register_write");
ACPI_FUNCTION_TRACE(hw_register_write);
if (ACPI_MTX_LOCK == use_lock) {
status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
}
switch (register_id) {
@ -707,7 +689,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
unlock_and_exit:
if (ACPI_MTX_LOCK == use_lock) {
(void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
}
return_ACPI_STATUS(status);
@ -733,7 +715,7 @@ acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg)
u64 address;
acpi_status status;
ACPI_FUNCTION_NAME("hw_low_level_read");
ACPI_FUNCTION_NAME(hw_low_level_read);
/*
* Must have a valid pointer to a GAS structure, and
@ -805,7 +787,7 @@ acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg)
u64 address;
acpi_status status;
ACPI_FUNCTION_NAME("hw_low_level_write");
ACPI_FUNCTION_NAME(hw_low_level_write);
/*
* Must have a valid pointer to a GAS structure, and

View File

@ -42,7 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <linux/module.h>
#include <acpi/acpi.h>
#define _COMPONENT ACPI_HARDWARE
@ -64,7 +63,7 @@ acpi_status
acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
{
ACPI_FUNCTION_TRACE("acpi_set_firmware_waking_vector");
ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
/* Set the vector */
@ -79,6 +78,8 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
/*******************************************************************************
*
* FUNCTION: acpi_get_firmware_waking_vector
@ -92,13 +93,12 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
* DESCRIPTION: Access function for the firmware_waking_vector field in FACS
*
******************************************************************************/
#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
{
ACPI_FUNCTION_TRACE("acpi_get_firmware_waking_vector");
ACPI_FUNCTION_TRACE(acpi_get_firmware_waking_vector);
if (!physical_address) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@ -118,6 +118,8 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_get_firmware_waking_vector)
#endif
/*******************************************************************************
@ -134,14 +136,13 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
* various OS-specific tasks between the two steps.
*
******************************************************************************/
acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
{
acpi_status status;
struct acpi_object_list arg_list;
union acpi_object arg;
ACPI_FUNCTION_TRACE("acpi_enter_sleep_state_prep");
ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep);
/*
* _PSW methods could be run here to enable wake-on keyboard, LAN, etc.
@ -206,6 +207,8 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
/*******************************************************************************
*
* FUNCTION: acpi_enter_sleep_state
@ -218,7 +221,6 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
{
u32 PM1Acontrol;
@ -228,7 +230,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
u32 in_value;
acpi_status status;
ACPI_FUNCTION_TRACE("acpi_enter_sleep_state");
ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);
if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
(acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
@ -378,7 +380,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
return_ACPI_STATUS(AE_OK);
}
EXPORT_SYMBOL(acpi_enter_sleep_state);
ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
/*******************************************************************************
*
@ -392,13 +394,12 @@ EXPORT_SYMBOL(acpi_enter_sleep_state);
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
{
u32 in_value;
acpi_status status;
ACPI_FUNCTION_TRACE("acpi_enter_sleep_state_s4bios");
ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
status =
acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK);
@ -443,7 +444,7 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
return_ACPI_STATUS(AE_OK);
}
EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios);
ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
/*******************************************************************************
*
@ -457,7 +458,6 @@ EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios);
* Called with interrupts ENABLED.
*
******************************************************************************/
acpi_status acpi_leave_sleep_state(u8 sleep_state)
{
struct acpi_object_list arg_list;
@ -468,7 +468,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
u32 PM1Acontrol;
u32 PM1Bcontrol;
ACPI_FUNCTION_TRACE("acpi_leave_sleep_state");
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
/*
* Set SLP_TYPE and SLP_EN to state S0.
@ -490,6 +490,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
ACPI_REGISTER_PM1_CONTROL,
&PM1Acontrol);
if (ACPI_SUCCESS(status)) {
/* Clear SLP_EN and SLP_TYP fields */
PM1Acontrol &= ~(sleep_type_reg_info->access_bit_mask |
@ -583,3 +584,5 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state)

View File

@ -42,7 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <linux/module.h>
#include <acpi/acpi.h>
#define _COMPONENT ACPI_HARDWARE
@ -61,13 +60,13 @@ ACPI_MODULE_NAME("hwtimer")
******************************************************************************/
acpi_status acpi_get_timer_resolution(u32 * resolution)
{
ACPI_FUNCTION_TRACE("acpi_get_timer_resolution");
ACPI_FUNCTION_TRACE(acpi_get_timer_resolution);
if (!resolution) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
if (0 == acpi_gbl_FADT->tmr_val_ext) {
if (acpi_gbl_FADT->tmr_val_ext == 0) {
*resolution = 24;
} else {
*resolution = 32;
@ -76,6 +75,8 @@ acpi_status acpi_get_timer_resolution(u32 * resolution)
return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_get_timer_resolution)
/******************************************************************************
*
* FUNCTION: acpi_get_timer
@ -87,12 +88,11 @@ acpi_status acpi_get_timer_resolution(u32 * resolution)
* DESCRIPTION: Obtains current value of ACPI PM Timer (in ticks).
*
******************************************************************************/
acpi_status acpi_get_timer(u32 * ticks)
{
acpi_status status;
ACPI_FUNCTION_TRACE("acpi_get_timer");
ACPI_FUNCTION_TRACE(acpi_get_timer);
if (!ticks) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@ -103,7 +103,7 @@ acpi_status acpi_get_timer(u32 * ticks)
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_get_timer);
ACPI_EXPORT_SYMBOL(acpi_get_timer)
/******************************************************************************
*
@ -133,7 +133,6 @@ EXPORT_SYMBOL(acpi_get_timer);
* 2**32 Ticks / 3,600,000 Ticks/Sec = 1193 sec or 19.88 minutes
*
******************************************************************************/
acpi_status
acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
{
@ -141,7 +140,7 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
u32 delta_ticks;
acpi_integer quotient;
ACPI_FUNCTION_TRACE("acpi_get_timer_duration");
ACPI_FUNCTION_TRACE(acpi_get_timer_duration);
if (!time_elapsed) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@ -154,7 +153,8 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
if (start_ticks < end_ticks) {
delta_ticks = end_ticks - start_ticks;
} else if (start_ticks > end_ticks) {
if (0 == acpi_gbl_FADT->tmr_val_ext) {
if (acpi_gbl_FADT->tmr_val_ext == 0) {
/* 24-bit Timer */
delta_ticks =
@ -183,4 +183,4 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_get_timer_duration);
ACPI_EXPORT_SYMBOL(acpi_get_timer_duration)

View File

@ -723,6 +723,8 @@ get_parms(char *config_record,
goto do_fail;
count = tmp1 - tmp;
*action_handle = (char *)kmalloc(count + 1, GFP_KERNEL);
if (!*action_handle)
goto do_fail;
strncpy(*action_handle, tmp, count);
*(*action_handle + count) = 0;

View File

@ -567,6 +567,69 @@ static int bluetooth_write(char *buf)
return 0;
}
static int wan_supported;
static int wan_init(void)
{
wan_supported = hkey_handle &&
acpi_evalf(hkey_handle, NULL, "GWAN", "qv");
return 0;
}
static int wan_status(void)
{
int status;
if (!wan_supported ||
!acpi_evalf(hkey_handle, &status, "GWAN", "d"))
status = 0;
return status;
}
static int wan_read(char *p)
{
int len = 0;
int status = wan_status();
if (!wan_supported)
len += sprintf(p + len, "status:\t\tnot supported\n");
else if (!(status & 1))
len += sprintf(p + len, "status:\t\tnot installed\n");
else {
len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 1));
len += sprintf(p + len, "commands:\tenable, disable\n");
}
return len;
}
static int wan_write(char *buf)
{
int status = wan_status();
char *cmd;
int do_cmd = 0;
if (!wan_supported)
return -ENODEV;
while ((cmd = next_cmd(&buf))) {
if (strlencmp(cmd, "enable") == 0) {
status |= 2;
} else if (strlencmp(cmd, "disable") == 0) {
status &= ~2;
} else
return -EINVAL;
do_cmd = 1;
}
if (do_cmd && !acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
return -EIO;
return 0;
}
static int video_supported;
static int video_orig_autosw;
@ -1562,6 +1625,13 @@ static struct ibm_struct ibms[] = {
.read = bluetooth_read,
.write = bluetooth_write,
},
{
.name = "wan",
.init = wan_init,
.read = wan_read,
.write = wan_write,
.experimental = 1,
},
{
.name = "video",
.init = video_init,

View File

@ -123,42 +123,47 @@ static struct acpi_driver acpi_motherboard_driver2 = {
},
};
static void __init acpi_request_region (struct acpi_generic_address *addr,
unsigned int length, char *desc)
{
if (!addr->address || !length)
return;
if (addr->address_space_id == ACPI_ADR_SPACE_SYSTEM_IO)
request_region(addr->address, length, desc);
else if (addr->address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
request_mem_region(addr->address, length, desc);
}
static void __init acpi_reserve_resources(void)
{
if (acpi_gbl_FADT->xpm1a_evt_blk.address && acpi_gbl_FADT->pm1_evt_len)
request_region(acpi_gbl_FADT->xpm1a_evt_blk.address,
acpi_gbl_FADT->pm1_evt_len, "PM1a_EVT_BLK");
acpi_request_region(&acpi_gbl_FADT->xpm1a_evt_blk,
acpi_gbl_FADT->pm1_evt_len, "ACPI PM1a_EVT_BLK");
if (acpi_gbl_FADT->xpm1b_evt_blk.address && acpi_gbl_FADT->pm1_evt_len)
request_region(acpi_gbl_FADT->xpm1b_evt_blk.address,
acpi_gbl_FADT->pm1_evt_len, "PM1b_EVT_BLK");
acpi_request_region(&acpi_gbl_FADT->xpm1b_evt_blk,
acpi_gbl_FADT->pm1_evt_len, "ACPI PM1b_EVT_BLK");
if (acpi_gbl_FADT->xpm1a_cnt_blk.address && acpi_gbl_FADT->pm1_cnt_len)
request_region(acpi_gbl_FADT->xpm1a_cnt_blk.address,
acpi_gbl_FADT->pm1_cnt_len, "PM1a_CNT_BLK");
acpi_request_region(&acpi_gbl_FADT->xpm1a_cnt_blk,
acpi_gbl_FADT->pm1_cnt_len, "ACPI PM1a_CNT_BLK");
if (acpi_gbl_FADT->xpm1b_cnt_blk.address && acpi_gbl_FADT->pm1_cnt_len)
request_region(acpi_gbl_FADT->xpm1b_cnt_blk.address,
acpi_gbl_FADT->pm1_cnt_len, "PM1b_CNT_BLK");
acpi_request_region(&acpi_gbl_FADT->xpm1b_cnt_blk,
acpi_gbl_FADT->pm1_cnt_len, "ACPI PM1b_CNT_BLK");
if (acpi_gbl_FADT->xpm_tmr_blk.address && acpi_gbl_FADT->pm_tm_len == 4)
request_region(acpi_gbl_FADT->xpm_tmr_blk.address, 4, "PM_TMR");
if (acpi_gbl_FADT->pm_tm_len == 4)
acpi_request_region(&acpi_gbl_FADT->xpm_tmr_blk, 4, "ACPI PM_TMR");
if (acpi_gbl_FADT->xpm2_cnt_blk.address && acpi_gbl_FADT->pm2_cnt_len)
request_region(acpi_gbl_FADT->xpm2_cnt_blk.address,
acpi_gbl_FADT->pm2_cnt_len, "PM2_CNT_BLK");
acpi_request_region(&acpi_gbl_FADT->xpm2_cnt_blk,
acpi_gbl_FADT->pm2_cnt_len, "ACPI PM2_CNT_BLK");
/* Length of GPE blocks must be a non-negative multiple of 2 */
if (acpi_gbl_FADT->xgpe0_blk.address && acpi_gbl_FADT->gpe0_blk_len &&
!(acpi_gbl_FADT->gpe0_blk_len & 0x1))
request_region(acpi_gbl_FADT->xgpe0_blk.address,
acpi_gbl_FADT->gpe0_blk_len, "GPE0_BLK");
if (!(acpi_gbl_FADT->gpe0_blk_len & 0x1))
acpi_request_region(&acpi_gbl_FADT->xgpe0_blk,
acpi_gbl_FADT->gpe0_blk_len, "ACPI GPE0_BLK");
if (acpi_gbl_FADT->xgpe1_blk.address && acpi_gbl_FADT->gpe1_blk_len &&
!(acpi_gbl_FADT->gpe1_blk_len & 0x1))
request_region(acpi_gbl_FADT->xgpe1_blk.address,
acpi_gbl_FADT->gpe1_blk_len, "GPE1_BLK");
if (!(acpi_gbl_FADT->gpe1_blk_len & 0x1))
acpi_request_region(&acpi_gbl_FADT->xgpe1_blk,
acpi_gbl_FADT->gpe1_blk_len, "ACPI GPE1_BLK");
}
static int __init acpi_motherboard_init(void)

View File

@ -70,7 +70,7 @@ acpi_status acpi_ns_root_initialize(void)
union acpi_operand_object *obj_desc;
acpi_string val = NULL;
ACPI_FUNCTION_TRACE("ns_root_initialize");
ACPI_FUNCTION_TRACE(ns_root_initialize);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
@ -98,6 +98,7 @@ acpi_status acpi_ns_root_initialize(void)
"Entering predefined entries into namespace\n"));
for (init_val = acpi_gbl_pre_defined_names; init_val->name; init_val++) {
/* _OSI is optional for now, will be permanent later */
if (!ACPI_STRCMP(init_val->name, "_OSI")
@ -156,7 +157,7 @@ acpi_status acpi_ns_root_initialize(void)
#if defined (ACPI_ASL_COMPILER)
/* save the parameter count for the i_aSL compiler */
/* Save the parameter count for the i_aSL compiler */
new_node->value = obj_desc->method.param_count;
#else
@ -258,10 +259,8 @@ acpi_status acpi_ns_root_initialize(void)
/* Save a handle to "_GPE", it is always present */
if (ACPI_SUCCESS(status)) {
status =
acpi_ns_get_node_by_path("\\_GPE", NULL,
ACPI_NS_NO_UPSEARCH,
&acpi_gbl_fadt_gpe_device);
status = acpi_ns_get_node(NULL, "\\_GPE", ACPI_NS_NO_UPSEARCH,
&acpi_gbl_fadt_gpe_device);
}
return_ACPI_STATUS(status);
@ -310,17 +309,17 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
acpi_object_type type_to_check_for;
acpi_object_type this_search_type;
u32 search_parent_flag = ACPI_NS_SEARCH_PARENT;
u32 local_flags = flags & ~(ACPI_NS_ERROR_IF_FOUND |
ACPI_NS_SEARCH_PARENT);
u32 local_flags;
ACPI_FUNCTION_TRACE("ns_lookup");
ACPI_FUNCTION_TRACE(ns_lookup);
if (!return_node) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
acpi_gbl_ns_lookup_count++;
local_flags = flags & ~(ACPI_NS_ERROR_IF_FOUND | ACPI_NS_SEARCH_PARENT);
*return_node = ACPI_ENTRY_NOT_FOUND;
acpi_gbl_ns_lookup_count++;
if (!acpi_gbl_root_node) {
return_ACPI_STATUS(AE_NO_NAMESPACE);
@ -346,14 +345,17 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
return_ACPI_STATUS(AE_AML_INTERNAL);
}
/*
* This node might not be a actual "scope" node (such as a
* Device/Method, etc.) It could be a Package or other object node.
* Backup up the tree to find the containing scope node.
*/
while (!acpi_ns_opens_scope(prefix_node->type) &&
prefix_node->type != ACPI_TYPE_ANY) {
prefix_node = acpi_ns_get_parent_node(prefix_node);
if (!(flags & ACPI_NS_PREFIX_IS_SCOPE)) {
/*
* This node might not be a actual "scope" node (such as a
* Device/Method, etc.) It could be a Package or other object node.
* Backup up the tree to find the containing scope node.
*/
while (!acpi_ns_opens_scope(prefix_node->type) &&
prefix_node->type != ACPI_TYPE_ANY) {
prefix_node =
acpi_ns_get_parent_node(prefix_node);
}
}
}
@ -365,6 +367,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
* Begin examination of the actual pathname
*/
if (!pathname) {
/* A Null name_path is allowed and refers to the root */
num_segments = 0;
@ -389,6 +392,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
* to the current scope).
*/
if (*path == (u8) AML_ROOT_PREFIX) {
/* Pathname is fully qualified, start from the root */
this_node = acpi_gbl_root_node;
@ -416,6 +420,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
this_node = prefix_node;
num_carats = 0;
while (*path == (u8) AML_PARENT_PREFIX) {
/* Name is fully qualified, no search rules apply */
search_parent_flag = ACPI_NS_NO_UPSEARCH;
@ -430,6 +435,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
num_carats++;
this_node = acpi_ns_get_parent_node(this_node);
if (!this_node) {
/* Current scope has no parent scope */
ACPI_ERROR((AE_INFO,
@ -569,6 +575,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
&this_node);
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_FOUND) {
/* Name not found in ACPI namespace */
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
@ -602,10 +609,11 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
(type_to_check_for != ACPI_TYPE_LOCAL_SCOPE) &&
(this_node->type != ACPI_TYPE_ANY) &&
(this_node->type != type_to_check_for)) {
/* Complain about a type mismatch */
ACPI_WARNING((AE_INFO,
"ns_lookup: Type mismatch on %4.4s (%s), searching for (%s)",
"NsLookup: Type mismatch on %4.4s (%s), searching for (%s)",
ACPI_CAST_PTR(char, &simple_name),
acpi_ut_get_type_name(this_node->type),
acpi_ut_get_type_name

View File

@ -47,9 +47,6 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsalloc")
/* Local prototypes */
static void acpi_ns_remove_reference(struct acpi_namespace_node *node);
/*******************************************************************************
*
* FUNCTION: acpi_ns_create_node
@ -61,14 +58,13 @@ static void acpi_ns_remove_reference(struct acpi_namespace_node *node);
* DESCRIPTION: Create a namespace node
*
******************************************************************************/
struct acpi_namespace_node *acpi_ns_create_node(u32 name)
{
struct acpi_namespace_node *node;
ACPI_FUNCTION_TRACE("ns_create_node");
ACPI_FUNCTION_TRACE(ns_create_node);
node = ACPI_MEM_CALLOCATE(sizeof(struct acpi_namespace_node));
node = acpi_os_acquire_object(acpi_gbl_namespace_cache);
if (!node) {
return_PTR(NULL);
}
@ -76,9 +72,7 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name)
ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_allocated++);
node->name.integer = name;
node->reference_count = 1;
ACPI_SET_DESCRIPTOR_TYPE(node, ACPI_DESC_TYPE_NAMED);
return_PTR(node);
}
@ -100,7 +94,7 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
struct acpi_namespace_node *prev_node;
struct acpi_namespace_node *next_node;
ACPI_FUNCTION_TRACE_PTR("ns_delete_node", node);
ACPI_FUNCTION_TRACE_PTR(ns_delete_node, node);
parent_node = acpi_ns_get_parent_node(node);
@ -115,6 +109,7 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
}
if (prev_node) {
/* Node is not first child, unlink it */
prev_node->peer = next_node->peer;
@ -125,6 +120,7 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
/* Node is first child (has no previous peer) */
if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
/* No peers at all */
parent_node->child = NULL;
@ -137,10 +133,10 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_freed++);
/*
* Detach an object if there is one then delete the node
* Detach an object if there is one, then delete the node
*/
acpi_ns_detach_object(node);
ACPI_MEM_FREE(node);
(void)acpi_os_release_object(acpi_gbl_namespace_cache, node);
return_VOID;
}
@ -171,7 +167,7 @@ void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namesp
acpi_owner_id owner_id = 0;
struct acpi_namespace_node *child_node;
ACPI_FUNCTION_TRACE("ns_install_node");
ACPI_FUNCTION_TRACE(ns_install_node);
/*
* Get the owner ID from the Walk state
@ -216,14 +212,6 @@ void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namesp
acpi_ut_get_type_name(parent_node->type),
parent_node));
/*
* Increment the reference count(s) of all parents up to
* the root!
*/
while ((node = acpi_ns_get_parent_node(node)) != NULL) {
node->reference_count++;
}
return_VOID;
}
@ -244,10 +232,9 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
{
struct acpi_namespace_node *child_node;
struct acpi_namespace_node *next_node;
struct acpi_namespace_node *node;
u8 flags;
ACPI_FUNCTION_TRACE_PTR("ns_delete_children", parent_node);
ACPI_FUNCTION_TRACE_PTR(ns_delete_children, parent_node);
if (!parent_node) {
return_VOID;
@ -264,6 +251,7 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
* Deallocate all children at this level
*/
do {
/* Get the things we need */
next_node = child_node->peer;
@ -289,26 +277,10 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
*/
acpi_ns_detach_object(child_node);
/*
* Decrement the reference count(s) of all parents up to
* the root! (counts were incremented when the node was created)
*/
node = child_node;
while ((node = acpi_ns_get_parent_node(node)) != NULL) {
node->reference_count--;
}
/* There should be only one reference remaining on this node */
if (child_node->reference_count != 1) {
ACPI_WARNING((AE_INFO,
"Existing references (%d) on node being deleted (%p)",
child_node->reference_count, child_node));
}
/* Now we can delete the node */
ACPI_MEM_FREE(child_node);
(void)acpi_os_release_object(acpi_gbl_namespace_cache,
child_node);
/* And move on to the next child in the list */
@ -341,7 +313,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
struct acpi_namespace_node *child_node = NULL;
u32 level = 1;
ACPI_FUNCTION_TRACE("ns_delete_namespace_subtree");
ACPI_FUNCTION_TRACE(ns_delete_namespace_subtree);
if (!parent_node) {
return_VOID;
@ -352,11 +324,14 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
* to where we started.
*/
while (level > 0) {
/* Get the next node in this scope (NULL if none) */
child_node = acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
child_node);
child_node =
acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
child_node);
if (child_node) {
/* Found a child node - detach any attached object */
acpi_ns_detach_object(child_node);
@ -399,55 +374,6 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_remove_reference
*
* PARAMETERS: Node - Named node whose reference count is to be
* decremented
*
* RETURN: None.
*
* DESCRIPTION: Remove a Node reference. Decrements the reference count
* of all parent Nodes up to the root. Any node along
* the way that reaches zero references is freed.
*
******************************************************************************/
static void acpi_ns_remove_reference(struct acpi_namespace_node *node)
{
struct acpi_namespace_node *parent_node;
struct acpi_namespace_node *this_node;
ACPI_FUNCTION_ENTRY();
/*
* Decrement the reference count(s) of this node and all
* nodes up to the root, Delete anything with zero remaining references.
*/
this_node = node;
while (this_node) {
/* Prepare to move up to parent */
parent_node = acpi_ns_get_parent_node(this_node);
/* Decrement the reference count on this node */
this_node->reference_count--;
/* Delete the node if no more references */
if (!this_node->reference_count) {
/* Delete all children and delete the node */
acpi_ns_delete_children(this_node);
acpi_ns_delete_node(this_node);
}
this_node = parent_node;
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_delete_namespace_by_owner
@ -469,15 +395,15 @@ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id)
u32 level;
struct acpi_namespace_node *parent_node;
ACPI_FUNCTION_TRACE_U32("ns_delete_namespace_by_owner", owner_id);
ACPI_FUNCTION_TRACE_U32(ns_delete_namespace_by_owner, owner_id);
if (owner_id == 0) {
return_VOID;
}
deletion_node = NULL;
parent_node = acpi_gbl_root_node;
child_node = NULL;
deletion_node = NULL;
level = 1;
/*
@ -494,12 +420,14 @@ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id)
child_node);
if (deletion_node) {
acpi_ns_remove_reference(deletion_node);
acpi_ns_delete_children(deletion_node);
acpi_ns_delete_node(deletion_node);
deletion_node = NULL;
}
if (child_node) {
if (child_node->owner_id == owner_id) {
/* Found a matching child node - detach any attached object */
acpi_ns_detach_object(child_node);

View File

@ -75,7 +75,7 @@ void acpi_ns_print_pathname(u32 num_segments, char *pathname)
{
acpi_native_uint i;
ACPI_FUNCTION_NAME("ns_print_pathname");
ACPI_FUNCTION_NAME(ns_print_pathname);
if (!(acpi_dbg_level & ACPI_LV_NAMES)
|| !(acpi_dbg_layer & ACPI_NAMESPACE)) {
@ -123,7 +123,7 @@ void
acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
{
ACPI_FUNCTION_TRACE("ns_dump_pathname");
ACPI_FUNCTION_TRACE(ns_dump_pathname);
/* Do this only if the requested debug level and component are enabled */
@ -167,7 +167,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
u32 dbg_level;
u32 i;
ACPI_FUNCTION_NAME("ns_dump_one_object");
ACPI_FUNCTION_NAME(ns_dump_one_object);
/* Is output enabled? */
@ -191,6 +191,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
}
if (!(info->display_type & ACPI_DISPLAY_SHORT)) {
/* Indent the object according to the level */
acpi_os_printf("%2d%*s", (u32) level - 1, (int)level * 2, " ");
@ -203,6 +204,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
}
if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
this_node->name.integer =
acpi_ut_repair_name(this_node->name.integer);
ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X",
this_node->name.integer));
}
@ -226,6 +230,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
case ACPI_DISPLAY_SUMMARY:
if (!obj_desc) {
/* No attached object, we are done */
acpi_os_printf("\n");
@ -419,6 +424,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
acpi_os_printf("O:%p", obj_desc);
if (!obj_desc) {
/* No attached object, we are done */
acpi_os_printf("\n");
@ -669,7 +675,7 @@ void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth)
{
acpi_handle search_handle = search_base;
ACPI_FUNCTION_TRACE("ns_dump_tables");
ACPI_FUNCTION_TRACE(ns_dump_tables);
if (!acpi_gbl_root_node) {
/*
@ -682,6 +688,7 @@ void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth)
}
if (ACPI_NS_ALL == search_base) {
/* Entire namespace */
search_handle = acpi_gbl_root_node;

View File

@ -74,7 +74,7 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
acpi_status status;
u32 i;
ACPI_FUNCTION_NAME("ns_dump_one_device");
ACPI_FUNCTION_NAME(ns_dump_one_device);
status =
acpi_ns_dump_one_object(obj_handle, level, context, return_value);
@ -92,7 +92,7 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
info->hardware_id.value,
ACPI_FORMAT_UINT64(info->address),
info->current_status));
ACPI_MEM_FREE(info);
ACPI_FREE(info);
}
return (status);
@ -115,7 +115,7 @@ void acpi_ns_dump_root_devices(void)
acpi_handle sys_bus_handle;
acpi_status status;
ACPI_FUNCTION_NAME("ns_dump_root_devices");
ACPI_FUNCTION_NAME(ns_dump_root_devices);
/* Only dump the table if tracing is enabled */

View File

@ -1,7 +1,6 @@
/*******************************************************************************
*
* Module Name: nseval - Object evaluation interfaces -- includes control
* method lookup and execution.
* Module Name: nseval - Object evaluation, includes control method execution
*
******************************************************************************/
@ -50,196 +49,14 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nseval")
/* Local prototypes */
static acpi_status
acpi_ns_execute_control_method(struct acpi_parameter_info *info);
static acpi_status acpi_ns_get_object_value(struct acpi_parameter_info *info);
/*******************************************************************************
*
* FUNCTION: acpi_ns_evaluate_relative
* FUNCTION: acpi_ns_evaluate
*
* PARAMETERS: Pathname - Name of method to execute, If NULL, the
* handle is the object to execute
* Info - Method info block, contains:
* return_object - Where to put method's return value (if
* any). If NULL, no value is returned.
* Params - List of parameters to pass to the method,
* terminated by NULL. Params itself may be
* NULL if no parameters are being passed.
*
* RETURN: Status
*
* DESCRIPTION: Evaluate the object or find and execute the requested method
*
* MUTEX: Locks Namespace
*
******************************************************************************/
acpi_status
acpi_ns_evaluate_relative(char *pathname, struct acpi_parameter_info *info)
{
acpi_status status;
struct acpi_namespace_node *node = NULL;
union acpi_generic_state *scope_info;
char *internal_path = NULL;
ACPI_FUNCTION_TRACE("ns_evaluate_relative");
/*
* Must have a valid object handle
*/
if (!info || !info->node) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Build an internal name string for the method */
status = acpi_ns_internalize_name(pathname, &internal_path);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
scope_info = acpi_ut_create_generic_state();
if (!scope_info) {
goto cleanup1;
}
/* Get the prefix handle and Node */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
info->node = acpi_ns_map_handle_to_node(info->node);
if (!info->node) {
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
status = AE_BAD_PARAMETER;
goto cleanup;
}
/* Lookup the name in the namespace */
scope_info->scope.node = info->node;
status = acpi_ns_lookup(scope_info, internal_path, ACPI_TYPE_ANY,
ACPI_IMODE_EXECUTE, ACPI_NS_NO_UPSEARCH, NULL,
&node);
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Object [%s] not found [%s]\n",
pathname, acpi_format_exception(status)));
goto cleanup;
}
/*
* Now that we have a handle to the object, we can attempt to evaluate it.
*/
ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%s [%p] Value %p\n",
pathname, node, acpi_ns_get_attached_object(node)));
info->node = node;
status = acpi_ns_evaluate_by_handle(info);
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"*** Completed eval of object %s ***\n", pathname));
cleanup:
acpi_ut_delete_generic_state(scope_info);
cleanup1:
ACPI_MEM_FREE(internal_path);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_evaluate_by_name
*
* PARAMETERS: Pathname - Fully qualified pathname to the object
* Info - Method info block, contains:
* return_object - Where to put method's return value (if
* any). If NULL, no value is returned.
* Params - List of parameters to pass to the method,
* terminated by NULL. Params itself may be
* NULL if no parameters are being passed.
*
* RETURN: Status
*
* DESCRIPTION: Evaluate the object or rind and execute the requested method
* passing the given parameters
*
* MUTEX: Locks Namespace
*
******************************************************************************/
acpi_status
acpi_ns_evaluate_by_name(char *pathname, struct acpi_parameter_info *info)
{
acpi_status status;
char *internal_path = NULL;
ACPI_FUNCTION_TRACE("ns_evaluate_by_name");
/* Build an internal name string for the method */
status = acpi_ns_internalize_name(pathname, &internal_path);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/* Lookup the name in the namespace */
status = acpi_ns_lookup(NULL, internal_path, ACPI_TYPE_ANY,
ACPI_IMODE_EXECUTE, ACPI_NS_NO_UPSEARCH, NULL,
&info->node);
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Object at [%s] was not found, status=%.4X\n",
pathname, status));
goto cleanup;
}
/*
* Now that we have a handle to the object, we can attempt to evaluate it.
*/
ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%s [%p] Value %p\n",
pathname, info->node,
acpi_ns_get_attached_object(info->node)));
status = acpi_ns_evaluate_by_handle(info);
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"*** Completed eval of object %s ***\n", pathname));
cleanup:
/* Cleanup */
if (internal_path) {
ACPI_MEM_FREE(internal_path);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_evaluate_by_handle
*
* PARAMETERS: Info - Method info block, contains:
* Node - Method/Object Node to execute
* PARAMETERS: Info - Evaluation info block, contains:
* prefix_node - Prefix or Method/Object Node to execute
* Pathname - Name of method to execute, If NULL, the
* Node is the object to execute
* Parameters - List of parameters to pass to the method,
* terminated by NULL. Params itself may be
* NULL if no parameters are being passed.
@ -248,29 +65,21 @@ acpi_ns_evaluate_by_name(char *pathname, struct acpi_parameter_info *info)
* parameter_type - Type of Parameter list
* return_object - Where to put method's return value (if
* any). If NULL, no value is returned.
* Flags - ACPI_IGNORE_RETURN_VALUE to delete return
*
* RETURN: Status
*
* DESCRIPTION: Evaluate object or execute the requested method passing the
* given parameters
* DESCRIPTION: Execute a control method or return the current value of an
* ACPI namespace object.
*
* MUTEX: Locks Namespace
* MUTEX: Locks interpreter
*
******************************************************************************/
acpi_status acpi_ns_evaluate_by_handle(struct acpi_parameter_info *info)
acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ns_evaluate_by_handle");
/* Check if namespace has been initialized */
if (!acpi_gbl_root_node) {
return_ACPI_STATUS(AE_NO_NAMESPACE);
}
/* Parameter Validation */
ACPI_FUNCTION_TRACE(ns_evaluate);
if (!info) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@ -280,202 +89,120 @@ acpi_status acpi_ns_evaluate_by_handle(struct acpi_parameter_info *info)
info->return_object = NULL;
/* Get the prefix handle and Node */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
/*
* Get the actual namespace node for the target object. Handles these cases:
*
* 1) Null node, Pathname (absolute path)
* 2) Node, Pathname (path relative to Node)
* 3) Node, Null Pathname
*/
status = acpi_ns_get_node(info->prefix_node, info->pathname,
ACPI_NS_NO_UPSEARCH, &info->resolved_node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
info->node = acpi_ns_map_handle_to_node(info->node);
if (!info->node) {
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/*
* For a method alias, we must grab the actual method node so that proper
* scoping context will be established before execution.
*/
if (acpi_ns_get_type(info->node) == ACPI_TYPE_LOCAL_METHOD_ALIAS) {
info->node =
if (acpi_ns_get_type(info->resolved_node) ==
ACPI_TYPE_LOCAL_METHOD_ALIAS) {
info->resolved_node =
ACPI_CAST_PTR(struct acpi_namespace_node,
info->node->object);
info->resolved_node->object);
}
ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%s [%p] Value %p\n", info->pathname,
info->resolved_node,
acpi_ns_get_attached_object(info->resolved_node)));
/*
* Two major cases here:
* 1) The object is an actual control method -- execute it.
* 2) The object is not a method -- just return it's current value
*
* In both cases, the namespace is unlocked by the acpi_ns* procedure
* 1) The object is a control method -- execute it
* 2) The object is not a method -- just return it's current value
*/
if (acpi_ns_get_type(info->node) == ACPI_TYPE_METHOD) {
if (acpi_ns_get_type(info->resolved_node) == ACPI_TYPE_METHOD) {
/*
* Case 1) We have an actual control method to execute
* 1) Object is a control method - execute it
*/
status = acpi_ns_execute_control_method(info);
/* Verify that there is a method object associated with this node */
info->obj_desc =
acpi_ns_get_attached_object(info->resolved_node);
if (!info->obj_desc) {
ACPI_ERROR((AE_INFO,
"Control method has no attached sub-object"));
return_ACPI_STATUS(AE_NULL_OBJECT);
}
ACPI_DUMP_PATHNAME(info->resolved_node, "Execute Method:",
ACPI_LV_INFO, _COMPONENT);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Method at AML address %p Length %X\n",
info->obj_desc->method.aml_start + 1,
info->obj_desc->method.aml_length - 1));
/*
* Any namespace deletion must acquire both the namespace and
* interpreter locks to ensure that no thread is using the portion of
* the namespace that is being deleted.
*
* Execute the method via the interpreter. The interpreter is locked
* here before calling into the AML parser
*/
status = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_ps_execute_method(info);
acpi_ex_exit_interpreter();
} else {
/*
* Case 2) Object is NOT a method, just return its current value
* 2) Object is not a method, return its current value
*/
status = acpi_ns_get_object_value(info);
}
/*
* Check if there is a return value on the stack that must be dealt with
*/
if (status == AE_CTRL_RETURN_VALUE) {
/* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
/*
* Objects require additional resolution steps (e.g., the Node may be
* a field that must be read, etc.) -- we can't just grab the object
* out of the node.
*
* Use resolve_node_to_value() to get the associated value.
*
* NOTE: we can get away with passing in NULL for a walk state because
* resolved_node is guaranteed to not be a reference to either a method
* local or a method argument (because this interface is never called
* from a running method.)
*
* Even though we do not directly invoke the interpreter for object
* resolution, we must lock it because we could access an opregion.
* The opregion access code assumes that the interpreter is locked.
*/
status = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = AE_OK;
}
/* Function has a strange interface */
/*
* Namespace was unlocked by the handling acpi_ns* function, so we
* just return
*/
return_ACPI_STATUS(status);
}
status =
acpi_ex_resolve_node_to_value(&info->resolved_node, NULL);
acpi_ex_exit_interpreter();
/*******************************************************************************
*
* FUNCTION: acpi_ns_execute_control_method
*
* PARAMETERS: Info - Method info block, contains:
* Node - Method Node to execute
* obj_desc - Method object
* Parameters - List of parameters to pass to the method,
* terminated by NULL. Params itself may be
* NULL if no parameters are being passed.
* return_object - Where to put method's return value (if
* any). If NULL, no value is returned.
* parameter_type - Type of Parameter list
* return_object - Where to put method's return value (if
* any). If NULL, no value is returned.
*
* RETURN: Status
*
* DESCRIPTION: Execute the requested method passing the given parameters
*
* MUTEX: Assumes namespace is locked
*
******************************************************************************/
static acpi_status
acpi_ns_execute_control_method(struct acpi_parameter_info *info)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ns_execute_control_method");
/* Verify that there is a method associated with this object */
info->obj_desc = acpi_ns_get_attached_object(info->node);
if (!info->obj_desc) {
ACPI_ERROR((AE_INFO, "No attached method object"));
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_NULL_OBJECT);
}
ACPI_DUMP_PATHNAME(info->node, "Execute Method:",
ACPI_LV_INFO, _COMPONENT);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Method at AML address %p Length %X\n",
info->obj_desc->method.aml_start + 1,
info->obj_desc->method.aml_length - 1));
/*
* Unlock the namespace before execution. This allows namespace access
* via the external Acpi* interfaces while a method is being executed.
* However, any namespace deletion must acquire both the namespace and
* interpreter locks to ensure that no thread is using the portion of the
* namespace that is being deleted.
*/
status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* Execute the method via the interpreter. The interpreter is locked
* here before calling into the AML parser
*/
status = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_ps_execute_method(info);
acpi_ex_exit_interpreter();
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_object_value
*
* PARAMETERS: Info - Method info block, contains:
* Node - Object's NS node
* return_object - Where to put object value (if
* any). If NULL, no value is returned.
*
* RETURN: Status
*
* DESCRIPTION: Return the current value of the object
*
* MUTEX: Assumes namespace is locked, leaves namespace unlocked
*
******************************************************************************/
static acpi_status acpi_ns_get_object_value(struct acpi_parameter_info *info)
{
acpi_status status = AE_OK;
struct acpi_namespace_node *resolved_node = info->node;
ACPI_FUNCTION_TRACE("ns_get_object_value");
/*
* Objects require additional resolution steps (e.g., the Node may be a
* field that must be read, etc.) -- we can't just grab the object out of
* the node.
*/
/*
* Use resolve_node_to_value() to get the associated value. This call always
* deletes obj_desc (allocated above).
*
* NOTE: we can get away with passing in NULL for a walk state because
* obj_desc is guaranteed to not be a reference to either a method local or
* a method argument (because this interface can only be called from the
* acpi_evaluate external interface, never called from a running method.)
*
* Even though we do not directly invoke the interpreter for this, we must
* enter it because we could access an opregion. The opregion access code
* assumes that the interpreter is locked.
*
* We must release the namespace lock before entering the intepreter.
*/
status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_ex_enter_interpreter();
if (ACPI_SUCCESS(status)) {
status = acpi_ex_resolve_node_to_value(&resolved_node, NULL);
/*
* If acpi_ex_resolve_node_to_value() succeeded, the return value was placed
* in resolved_node.
*/
acpi_ex_exit_interpreter();
if (ACPI_SUCCESS(status)) {
status = AE_CTRL_RETURN_VALUE;
info->return_object = ACPI_CAST_PTR
(union acpi_operand_object, resolved_node);
info->return_object =
ACPI_CAST_PTR(union acpi_operand_object,
info->resolved_node);
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Returning object %p [%s]\n",
info->return_object,
@ -484,7 +211,30 @@ static acpi_status acpi_ns_get_object_value(struct acpi_parameter_info *info)
}
}
/* Namespace is unlocked */
/*
* Check if there is a return value that must be dealt with
*/
if (status == AE_CTRL_RETURN_VALUE) {
/* If caller does not want the return value, delete it */
if (info->flags & ACPI_IGNORE_RETURN_VALUE) {
acpi_ut_remove_reference(info->return_object);
info->return_object = NULL;
}
/* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
status = AE_OK;
}
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"*** Completed evaluation of object %s ***\n",
info->pathname));
/*
* Namespace was unlocked by the handling acpi_ns* function, so we
* just return
*/
return_ACPI_STATUS(status);
}

View File

@ -58,6 +58,10 @@ static acpi_status
acpi_ns_init_one_device(acpi_handle obj_handle,
u32 nesting_level, void *context, void **return_value);
static acpi_status
acpi_ns_find_ini_methods(acpi_handle obj_handle,
u32 nesting_level, void *context, void **return_value);
/*******************************************************************************
*
* FUNCTION: acpi_ns_initialize_objects
@ -76,7 +80,7 @@ acpi_status acpi_ns_initialize_objects(void)
acpi_status status;
struct acpi_init_walk_info info;
ACPI_FUNCTION_TRACE("ns_initialize_objects");
ACPI_FUNCTION_TRACE(ns_initialize_objects);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Starting initialization of namespace objects ****\n"));
@ -93,7 +97,7 @@ acpi_status acpi_ns_initialize_objects(void)
ACPI_UINT32_MAX, acpi_ns_init_one_object,
&info, NULL);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "During walk_namespace"));
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
@ -133,7 +137,7 @@ acpi_status acpi_ns_initialize_devices(void)
acpi_status status;
struct acpi_device_walk_info info;
ACPI_FUNCTION_TRACE("ns_initialize_devices");
ACPI_FUNCTION_TRACE(ns_initialize_devices);
/* Init counters */
@ -142,30 +146,46 @@ acpi_status acpi_ns_initialize_devices(void)
info.num_INI = 0;
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
"Executing all Device _STA and_INI methods:"));
"Initializing Device/Processor/Thermal objects by executing _INI methods:"));
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Walk namespace for all objects */
/* Tree analysis: find all subtrees that contain _INI methods */
status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, TRUE,
ACPI_UINT32_MAX, FALSE,
acpi_ns_find_ini_methods, &info, NULL);
if (ACPI_FAILURE(status)) {
goto error_exit;
}
/* Allocate the evaluation information block */
info.evaluate_info =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info.evaluate_info) {
status = AE_NO_MEMORY;
goto error_exit;
}
/* Walk namespace to execute all _INIs on present devices */
status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, FALSE,
acpi_ns_init_one_device, &info, NULL);
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
ACPI_FREE(info.evaluate_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "During walk_namespace"));
goto error_exit;
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
"\n%hd Devices found - executed %hd _STA, %hd _INI methods\n",
info.device_count, info.num_STA, info.num_INI));
"\nExecuted %hd _INI methods requiring %hd _STA executions (examined %hd objects)\n",
info.num_INI, info.num_STA, info.device_count));
return_ACPI_STATUS(status);
error_exit:
ACPI_EXCEPTION((AE_INFO, status, "During device initialization"));
return_ACPI_STATUS(status);
}
/*******************************************************************************
@ -200,7 +220,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
(struct acpi_namespace_node *)obj_handle;
union acpi_operand_object *obj_desc;
ACPI_FUNCTION_NAME("ns_init_one_object");
ACPI_FUNCTION_NAME(ns_init_one_object);
info->object_count++;
@ -309,6 +329,72 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_find_ini_methods
*
* PARAMETERS: acpi_walk_callback
*
* RETURN: acpi_status
*
* DESCRIPTION: Called during namespace walk. Finds objects named _INI under
* device/processor/thermal objects, and marks the entire subtree
* with a SUBTREE_HAS_INI flag. This flag is used during the
* subsequent device initialization walk to avoid entire subtrees
* that do not contain an _INI.
*
******************************************************************************/
static acpi_status
acpi_ns_find_ini_methods(acpi_handle obj_handle,
u32 nesting_level, void *context, void **return_value)
{
struct acpi_device_walk_info *info =
ACPI_CAST_PTR(struct acpi_device_walk_info, context);
struct acpi_namespace_node *node;
struct acpi_namespace_node *parent_node;
/* Keep count of device/processor/thermal objects */
node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
if ((node->type == ACPI_TYPE_DEVICE) ||
(node->type == ACPI_TYPE_PROCESSOR) ||
(node->type == ACPI_TYPE_THERMAL)) {
info->device_count++;
return (AE_OK);
}
/* We are only looking for methods named _INI */
if (!ACPI_COMPARE_NAME(node->name.ascii, METHOD_NAME__INI)) {
return (AE_OK);
}
/*
* The only _INI methods that we care about are those that are
* present under Device, Processor, and Thermal objects.
*/
parent_node = acpi_ns_get_parent_node(node);
switch (parent_node->type) {
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_THERMAL:
/* Mark parent and bubble up the INI present flag to the root */
while (parent_node) {
parent_node->flags |= ANOBJ_SUBTREE_HAS_INI;
parent_node = acpi_ns_get_parent_node(parent_node);
}
break;
default:
break;
}
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_init_one_device
@ -327,119 +413,165 @@ static acpi_status
acpi_ns_init_one_device(acpi_handle obj_handle,
u32 nesting_level, void *context, void **return_value)
{
struct acpi_device_walk_info *info =
(struct acpi_device_walk_info *)context;
struct acpi_parameter_info pinfo;
struct acpi_device_walk_info *walk_info =
ACPI_CAST_PTR(struct acpi_device_walk_info, context);
struct acpi_evaluate_info *info = walk_info->evaluate_info;
u32 flags;
acpi_status status;
struct acpi_namespace_node *ini_node;
struct acpi_namespace_node *device_node;
ACPI_FUNCTION_TRACE("ns_init_one_device");
ACPI_FUNCTION_TRACE(ns_init_one_device);
device_node = acpi_ns_map_handle_to_node(obj_handle);
if (!device_node) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* We are interested in Devices, Processors and thermal_zones only */
/*
* We will run _STA/_INI on Devices, Processors and thermal_zones only
*/
device_node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
if ((device_node->type != ACPI_TYPE_DEVICE) &&
(device_node->type != ACPI_TYPE_PROCESSOR) &&
(device_node->type != ACPI_TYPE_THERMAL)) {
return_ACPI_STATUS(AE_OK);
}
if ((acpi_dbg_level <= ACPI_LV_ALL_EXCEPTIONS) &&
(!(acpi_dbg_level & ACPI_LV_INFO))) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
}
info->device_count++;
/*
* Check if the _INI method exists for this device -
* if _INI does not exist, there is no need to run _STA
* No _INI means device requires no initialization
* Because of an earlier namespace analysis, all subtrees that contain an
* _INI method are tagged.
*
* If this device subtree does not contain any _INI methods, we
* can exit now and stop traversing this entire subtree.
*/
status = acpi_ns_search_node(*ACPI_CAST_PTR(u32, METHOD_NAME__INI),
device_node, ACPI_TYPE_METHOD, &ini_node);
if (ACPI_FAILURE(status)) {
/* No _INI method found - move on to next device */
return_ACPI_STATUS(AE_OK);
}
/*
* Run _STA to determine if we can run _INI on the device -
* the device must be present before _INI can be run.
* However, _STA is not required - assume device present if no _STA
*/
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname(ACPI_TYPE_METHOD,
device_node,
METHOD_NAME__STA));
pinfo.node = device_node;
pinfo.parameters = NULL;
pinfo.parameter_type = ACPI_PARAM_ARGS;
status = acpi_ut_execute_STA(pinfo.node, &flags);
if (ACPI_FAILURE(status)) {
/* Ignore error and move on to next device */
return_ACPI_STATUS(AE_OK);
}
if (flags != ACPI_UINT32_MAX) {
info->num_STA++;
}
if (!(flags & ACPI_STA_DEVICE_PRESENT)) {
/* Don't look at children of a not present device */
if (!(device_node->flags & ANOBJ_SUBTREE_HAS_INI)) {
return_ACPI_STATUS(AE_CTRL_DEPTH);
}
/*
* The device is present and _INI exists. Run the _INI method.
* (We already have the _INI node from above)
* Run _STA to determine if this device is present and functioning. We
* must know this information for two important reasons (from ACPI spec):
*
* 1) We can only run _INI if the device is present.
* 2) We must abort the device tree walk on this subtree if the device is
* not present and is not functional (we will not examine the children)
*
* The _STA method is not required to be present under the device, we
* assume the device is present if _STA does not exist.
*/
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname(ACPI_TYPE_METHOD,
pinfo.node,
METHOD_NAME__INI));
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_METHOD, device_node, METHOD_NAME__STA));
pinfo.node = ini_node;
status = acpi_ns_evaluate_by_handle(&pinfo);
status = acpi_ut_execute_STA(device_node, &flags);
if (ACPI_FAILURE(status)) {
/* Ignore error and move on to next device */
#ifdef ACPI_DEBUG_OUTPUT
char *scope_name = acpi_ns_get_external_pathname(ini_node);
return_ACPI_STATUS(AE_OK);
}
ACPI_WARNING((AE_INFO, "%s._INI failed: %s",
scope_name, acpi_format_exception(status)));
/*
* Flags == -1 means that _STA was not found. In this case, we assume that
* the device is both present and functional.
*
* From the ACPI spec, description of _STA:
*
* "If a device object (including the processor object) does not have an
* _STA object, then OSPM assumes that all of the above bits are set (in
* other words, the device is present, ..., and functioning)"
*/
if (flags != ACPI_UINT32_MAX) {
walk_info->num_STA++;
}
ACPI_MEM_FREE(scope_name);
#endif
} else {
/* Delete any return object (especially if implicit_return is enabled) */
/*
* Examine the PRESENT and FUNCTIONING status bits
*
* Note: ACPI spec does not seem to specify behavior for the present but
* not functioning case, so we assume functioning if present.
*/
if (!(flags & ACPI_STA_DEVICE_PRESENT)) {
if (pinfo.return_object) {
acpi_ut_remove_reference(pinfo.return_object);
/* Device is not present, we must examine the Functioning bit */
if (flags & ACPI_STA_DEVICE_FUNCTIONING) {
/*
* Device is not present but is "functioning". In this case,
* we will not run _INI, but we continue to examine the children
* of this device.
*
* From the ACPI spec, description of _STA: (Note - no mention
* of whether to run _INI or not on the device in question)
*
* "_STA may return bit 0 clear (not present) with bit 3 set
* (device is functional). This case is used to indicate a valid
* device for which no device driver should be loaded (for example,
* a bridge device.) Children of this device may be present and
* valid. OSPM should continue enumeration below a device whose
* _STA returns this bit combination"
*/
return_ACPI_STATUS(AE_OK);
} else {
/*
* Device is not present and is not functioning. We must abort the
* walk of this subtree immediately -- don't look at the children
* of such a device.
*
* From the ACPI spec, description of _INI:
*
* "If the _STA method indicates that the device is not present,
* OSPM will not run the _INI and will not examine the children
* of the device for _INI methods"
*/
return_ACPI_STATUS(AE_CTRL_DEPTH);
}
/* Count of successful INIs */
info->num_INI++;
}
/*
* The device is present or is assumed present if no _STA exists.
* Run the _INI if it exists (not required to exist)
*
* Note: We know there is an _INI within this subtree, but it may not be
* under this particular device, it may be lower in the branch.
*/
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI));
info->prefix_node = device_node;
info->pathname = METHOD_NAME__INI;
info->parameters = NULL;
info->parameter_type = ACPI_PARAM_ARGS;
info->flags = ACPI_IGNORE_RETURN_VALUE;
status = acpi_ns_evaluate(info);
if (ACPI_SUCCESS(status)) {
walk_info->num_INI++;
if ((acpi_dbg_level <= ACPI_LV_ALL_EXCEPTIONS) &&
(!(acpi_dbg_level & ACPI_LV_INFO))) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
}
}
#ifdef ACPI_DEBUG_OUTPUT
else if (status != AE_NOT_FOUND) {
/* Ignore error and move on to next device */
char *scope_name =
acpi_ns_get_external_pathname(info->resolved_node);
ACPI_EXCEPTION((AE_INFO, status, "during %s._INI execution",
scope_name));
ACPI_FREE(scope_name);
}
#endif
/* Ignore errors from above */
status = AE_OK;
/*
* The _INI method has been run if present; call the Global Initialization
* Handler for this device.
*/
if (acpi_gbl_init_handler) {
/* External initialization handler is present, call it */
status =
acpi_gbl_init_handler(pinfo.node, ACPI_INIT_DEVICE_INI);
acpi_gbl_init_handler(device_node, ACPI_INIT_DEVICE_INI);
}
return_ACPI_STATUS(AE_OK);
return_ACPI_STATUS(status);
}

View File

@ -77,13 +77,14 @@ acpi_ns_load_table(struct acpi_table_desc *table_desc,
{
acpi_status status;
ACPI_FUNCTION_TRACE("ns_load_table");
ACPI_FUNCTION_TRACE(ns_load_table);
/* Check if table contains valid AML (must be DSDT, PSDT, SSDT, etc.) */
if (!
(acpi_gbl_table_data[table_desc->type].
flags & ACPI_TABLE_EXECUTABLE)) {
/* Just ignore this table */
return_ACPI_STATUS(AE_OK);
@ -168,7 +169,7 @@ static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type)
acpi_status status;
struct acpi_table_desc *table_desc;
ACPI_FUNCTION_TRACE("ns_load_table_by_type");
ACPI_FUNCTION_TRACE(ns_load_table_by_type);
status = acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
if (ACPI_FAILURE(status)) {
@ -180,11 +181,11 @@ static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type)
* DSDT (one), SSDT/PSDT (multiple)
*/
switch (table_type) {
case ACPI_TABLE_DSDT:
case ACPI_TABLE_ID_DSDT:
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace load: DSDT\n"));
table_desc = acpi_gbl_table_lists[ACPI_TABLE_DSDT].next;
table_desc = acpi_gbl_table_lists[ACPI_TABLE_ID_DSDT].next;
/* If table already loaded into namespace, just return */
@ -200,8 +201,8 @@ static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type)
}
break;
case ACPI_TABLE_SSDT:
case ACPI_TABLE_PSDT:
case ACPI_TABLE_ID_SSDT:
case ACPI_TABLE_ID_PSDT:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Namespace load: %d SSDT or PSDTs\n",
@ -258,7 +259,7 @@ acpi_status acpi_ns_load_namespace(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE("acpi_load_name_space");
ACPI_FUNCTION_TRACE(acpi_load_name_space);
/* There must be at least a DSDT installed */
@ -271,15 +272,15 @@ acpi_status acpi_ns_load_namespace(void)
* Load the namespace. The DSDT is required,
* but the SSDT and PSDT tables are optional.
*/
status = acpi_ns_load_table_by_type(ACPI_TABLE_DSDT);
status = acpi_ns_load_table_by_type(ACPI_TABLE_ID_DSDT);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Ignore exceptions from these */
(void)acpi_ns_load_table_by_type(ACPI_TABLE_SSDT);
(void)acpi_ns_load_table_by_type(ACPI_TABLE_PSDT);
(void)acpi_ns_load_table_by_type(ACPI_TABLE_ID_SSDT);
(void)acpi_ns_load_table_by_type(ACPI_TABLE_ID_PSDT);
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
"ACPI Namespace successfully loaded at root %p\n",
@ -314,7 +315,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
acpi_handle dummy;
u32 level;
ACPI_FUNCTION_TRACE("ns_delete_subtree");
ACPI_FUNCTION_TRACE(ns_delete_subtree);
parent_handle = start_handle;
child_handle = NULL;
@ -325,6 +326,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
* to where we started.
*/
while (level > 0) {
/* Attempt to get the next object in this scope */
status = acpi_get_next_object(ACPI_TYPE_ANY, parent_handle,
@ -335,6 +337,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
/* Did we get a new object? */
if (ACPI_SUCCESS(status)) {
/* Check if this object has any children */
if (ACPI_SUCCESS
@ -392,7 +395,7 @@ acpi_status acpi_ns_unload_namespace(acpi_handle handle)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ns_unload_name_space");
ACPI_FUNCTION_TRACE(ns_unload_name_space);
/* Parameter validation */

View File

@ -48,11 +48,6 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsnames")
/* Local prototypes */
static void
acpi_ns_build_external_path(struct acpi_namespace_node *node,
acpi_size size, char *name_buffer);
/*******************************************************************************
*
* FUNCTION: acpi_ns_build_external_path
@ -67,8 +62,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
* DESCRIPTION: Generate a full pathaname
*
******************************************************************************/
static void
void
acpi_ns_build_external_path(struct acpi_namespace_node *node,
acpi_size size, char *name_buffer)
{
@ -138,7 +132,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
char *name_buffer;
acpi_size size;
ACPI_FUNCTION_TRACE_PTR("ns_get_external_pathname", node);
ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node);
/* Calculate required buffer size based on depth below root */
@ -146,7 +140,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
/* Allocate a buffer to be returned to caller */
name_buffer = ACPI_MEM_CALLOCATE(size);
name_buffer = ACPI_ALLOCATE_ZEROED(size);
if (!name_buffer) {
ACPI_ERROR((AE_INFO, "Allocation failure"));
return_PTR(NULL);
@ -219,7 +213,7 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
struct acpi_namespace_node *node;
acpi_size required_size;
ACPI_FUNCTION_TRACE_PTR("ns_handle_to_pathname", target_handle);
ACPI_FUNCTION_TRACE_PTR(ns_handle_to_pathname, target_handle);
node = acpi_ns_map_handle_to_node(target_handle);
if (!node) {

View File

@ -76,19 +76,21 @@ acpi_ns_attach_object(struct acpi_namespace_node *node,
union acpi_operand_object *last_obj_desc;
acpi_object_type object_type = ACPI_TYPE_ANY;
ACPI_FUNCTION_TRACE("ns_attach_object");
ACPI_FUNCTION_TRACE(ns_attach_object);
/*
* Parameter validation
*/
if (!node) {
/* Invalid handle */
ACPI_ERROR((AE_INFO, "Null named_obj handle"));
ACPI_ERROR((AE_INFO, "Null NamedObj handle"));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
if (!object && (ACPI_TYPE_ANY != type)) {
/* Null object */
ACPI_ERROR((AE_INFO,
@ -97,6 +99,7 @@ acpi_ns_attach_object(struct acpi_namespace_node *node,
}
if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
/* Not a name handle */
ACPI_ERROR((AE_INFO, "Invalid handle %p [%s]",
@ -108,7 +111,7 @@ acpi_ns_attach_object(struct acpi_namespace_node *node,
if (node->object == object) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Obj %p already installed in name_obj %p\n",
"Obj %p already installed in NameObj %p\n",
object, node));
return_ACPI_STATUS(AE_OK);
@ -201,7 +204,7 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
{
union acpi_operand_object *obj_desc;
ACPI_FUNCTION_TRACE("ns_detach_object");
ACPI_FUNCTION_TRACE(ns_detach_object);
obj_desc = node->object;
@ -252,7 +255,7 @@ union acpi_operand_object *acpi_ns_get_attached_object(struct
acpi_namespace_node
*node)
{
ACPI_FUNCTION_TRACE_PTR("ns_get_attached_object", node);
ACPI_FUNCTION_TRACE_PTR(ns_get_attached_object, node);
if (!node) {
ACPI_WARNING((AE_INFO, "Null Node ptr"));
@ -287,7 +290,7 @@ union acpi_operand_object *acpi_ns_get_secondary_object(union
acpi_operand_object
*obj_desc)
{
ACPI_FUNCTION_TRACE_PTR("ns_get_secondary_object", obj_desc);
ACPI_FUNCTION_TRACE_PTR(ns_get_secondary_object, obj_desc);
if ((!obj_desc) ||
(ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA) ||

View File

@ -62,13 +62,13 @@ ACPI_MODULE_NAME("nsparse")
*
******************************************************************************/
acpi_status
acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc * table_desc)
acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc)
{
union acpi_parse_object *parse_root;
acpi_status status;
struct acpi_walk_state *walk_state;
ACPI_FUNCTION_TRACE("ns_one_complete_parse");
ACPI_FUNCTION_TRACE(ns_one_complete_parse);
/* Create and init a Root Node */
@ -124,7 +124,7 @@ acpi_ns_parse_table(struct acpi_table_desc *table_desc,
{
acpi_status status;
ACPI_FUNCTION_TRACE("ns_parse_table");
ACPI_FUNCTION_TRACE(ns_parse_table);
/*
* AML Parse, pass 1

View File

@ -56,16 +56,16 @@ acpi_ns_search_parent_tree(u32 target_name,
/*******************************************************************************
*
* FUNCTION: acpi_ns_search_node
* FUNCTION: acpi_ns_search_one_scope
*
* PARAMETERS: target_name - Ascii ACPI name to search for
* Node - Starting node where search will begin
* parent_node - Starting node where search will begin
* Type - Object type to match
* return_node - Where the matched Named obj is returned
*
* RETURN: Status
*
* DESCRIPTION: Search a single level of the namespace. Performs a
* DESCRIPTION: Search a single level of the namespace. Performs a
* simple search of the specified level, and does not add
* entries or search parents.
*
@ -75,35 +75,40 @@ acpi_ns_search_parent_tree(u32 target_name,
*
* All namespace searching is linear in this implementation, but
* could be easily modified to support any improved search
* algorithm. However, the linear search was chosen for simplicity
* algorithm. However, the linear search was chosen for simplicity
* and because the trees are small and the other interpreter
* execution overhead is relatively high.
*
* Note: CPU execution analysis has shown that the AML interpreter spends
* a very small percentage of its time searching the namespace. Therefore,
* the linear search seems to be sufficient, as there would seem to be
* little value in improving the search.
*
******************************************************************************/
acpi_status
acpi_ns_search_node(u32 target_name,
struct acpi_namespace_node *node,
acpi_object_type type,
struct acpi_namespace_node **return_node)
acpi_ns_search_one_scope(u32 target_name,
struct acpi_namespace_node *parent_node,
acpi_object_type type,
struct acpi_namespace_node **return_node)
{
struct acpi_namespace_node *next_node;
struct acpi_namespace_node *node;
ACPI_FUNCTION_TRACE("ns_search_node");
ACPI_FUNCTION_TRACE(ns_search_one_scope);
#ifdef ACPI_DEBUG_OUTPUT
if (ACPI_LV_NAMES & acpi_dbg_level) {
char *scope_name;
scope_name = acpi_ns_get_external_pathname(node);
scope_name = acpi_ns_get_external_pathname(parent_node);
if (scope_name) {
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Searching %s (%p) For [%4.4s] (%s)\n",
scope_name, node, ACPI_CAST_PTR(char,
&target_name),
scope_name, parent_node,
ACPI_CAST_PTR(char, &target_name),
acpi_ut_get_type_name(type)));
ACPI_MEM_FREE(scope_name);
ACPI_FREE(scope_name);
}
}
#endif
@ -112,32 +117,33 @@ acpi_ns_search_node(u32 target_name,
* Search for name at this namespace level, which is to say that we
* must search for the name among the children of this object
*/
next_node = node->child;
while (next_node) {
node = parent_node->child;
while (node) {
/* Check for match against the name */
if (next_node->name.integer == target_name) {
if (node->name.integer == target_name) {
/* Resolve a control method alias if any */
if (acpi_ns_get_type(next_node) ==
if (acpi_ns_get_type(node) ==
ACPI_TYPE_LOCAL_METHOD_ALIAS) {
next_node =
node =
ACPI_CAST_PTR(struct acpi_namespace_node,
next_node->object);
node->object);
}
/*
* Found matching entry.
*/
/* Found matching entry */
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Name [%4.4s] (%s) %p found in scope [%4.4s] %p\n",
ACPI_CAST_PTR(char, &target_name),
acpi_ut_get_type_name(next_node->
type),
next_node,
acpi_ut_get_node_name(node), node));
acpi_ut_get_type_name(node->type),
node,
acpi_ut_get_node_name(parent_node),
parent_node));
*return_node = next_node;
*return_node = node;
return_ACPI_STATUS(AE_OK);
}
@ -145,7 +151,8 @@ acpi_ns_search_node(u32 target_name,
* The last entry in the list points back to the parent,
* so a flag is used to indicate the end-of-list
*/
if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
if (node->flags & ANOBJ_END_OF_PEER_LIST) {
/* Searched entire list, we are done */
break;
@ -153,7 +160,7 @@ acpi_ns_search_node(u32 target_name,
/* Didn't match name, move on to the next peer object */
next_node = next_node->peer;
node = node->peer;
}
/* Searched entire namespace level, not found */
@ -162,7 +169,8 @@ acpi_ns_search_node(u32 target_name,
"Name [%4.4s] (%s) not found in search in scope [%4.4s] %p first child %p\n",
ACPI_CAST_PTR(char, &target_name),
acpi_ut_get_type_name(type),
acpi_ut_get_node_name(node), node, node->child));
acpi_ut_get_node_name(parent_node), parent_node,
parent_node->child));
return_ACPI_STATUS(AE_NOT_FOUND);
}
@ -179,14 +187,14 @@ acpi_ns_search_node(u32 target_name,
* RETURN: Status
*
* DESCRIPTION: Called when a name has not been found in the current namespace
* level. Before adding it or giving up, ACPI scope rules require
* level. Before adding it or giving up, ACPI scope rules require
* searching enclosing scopes in cases identified by acpi_ns_local().
*
* "A name is located by finding the matching name in the current
* name space, and then in the parent name space. If the parent
* name space does not contain the name, the search continues
* recursively until either the name is found or the name space
* does not have a parent (the root of the name space). This
* does not have a parent (the root of the name space). This
* indicates that the name is not found" (From ACPI Specification,
* section 5.3)
*
@ -201,7 +209,7 @@ acpi_ns_search_parent_tree(u32 target_name,
acpi_status status;
struct acpi_namespace_node *parent_node;
ACPI_FUNCTION_TRACE("ns_search_parent_tree");
ACPI_FUNCTION_TRACE(ns_search_parent_tree);
parent_node = acpi_ns_get_parent_node(node);
@ -235,20 +243,19 @@ acpi_ns_search_parent_tree(u32 target_name,
*/
while (parent_node) {
/*
* Search parent scope. Use TYPE_ANY because we don't care about the
* Search parent scope. Use TYPE_ANY because we don't care about the
* object type at this point, we only care about the existence of
* the actual name we are searching for. Typechecking comes later.
* the actual name we are searching for. Typechecking comes later.
*/
status = acpi_ns_search_node(target_name, parent_node,
status =
acpi_ns_search_one_scope(target_name, parent_node,
ACPI_TYPE_ANY, return_node);
if (ACPI_SUCCESS(status)) {
return_ACPI_STATUS(status);
}
/*
* Not found here, go up another level
* (until we reach the root)
*/
/* Not found here, go up another level (until we reach the root) */
parent_node = acpi_ns_get_parent_node(parent_node);
}
@ -273,7 +280,7 @@ acpi_ns_search_parent_tree(u32 target_name,
* RETURN: Status
*
* DESCRIPTION: Search for a name segment in a single namespace level,
* optionally adding it if it is not found. If the passed
* optionally adding it if it is not found. If the passed
* Type is not Any and the type previously stored in the
* entry was Any (i.e. unknown), update the stored type.
*
@ -293,29 +300,46 @@ acpi_ns_search_and_enter(u32 target_name,
acpi_status status;
struct acpi_namespace_node *new_node;
ACPI_FUNCTION_TRACE("ns_search_and_enter");
ACPI_FUNCTION_TRACE(ns_search_and_enter);
/* Parameter validation */
if (!node || !target_name || !return_node) {
ACPI_ERROR((AE_INFO,
"Null param: Node %p Name %X return_node %p",
"Null parameter: Node %p Name %X ReturnNode %p",
node, target_name, return_node));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Name must consist of printable characters */
/*
* Name must consist of valid ACPI characters. We will repair the name if
* necessary because we don't want to abort because of this, but we want
* all namespace names to be printable. A warning message is appropriate.
*
* This issue came up because there are in fact machines that exhibit
* this problem, and we want to be able to enable ACPI support for them,
* even though there are a few bad names.
*/
if (!acpi_ut_valid_acpi_name(target_name)) {
ACPI_ERROR((AE_INFO, "Bad character in ACPI Name: %X",
target_name));
return_ACPI_STATUS(AE_BAD_CHARACTER);
target_name = acpi_ut_repair_name(target_name);
/* Report warning only if in strict mode or debug mode */
if (!acpi_gbl_enable_interpreter_slack) {
ACPI_WARNING((AE_INFO,
"Found bad character(s) in name, repaired: [%4.4s]\n",
ACPI_CAST_PTR(char, &target_name)));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
"Found bad character(s) in name, repaired: [%4.4s]\n",
ACPI_CAST_PTR(char, &target_name)));
}
}
/* Try to find the name in the namespace level specified by the caller */
*return_node = ACPI_ENTRY_NOT_FOUND;
status = acpi_ns_search_node(target_name, node, type, return_node);
status = acpi_ns_search_one_scope(target_name, node, type, return_node);
if (status != AE_NOT_FOUND) {
/*
* If we found it AND the request specifies that a find is an error,
@ -325,18 +349,16 @@ acpi_ns_search_and_enter(u32 target_name,
status = AE_ALREADY_EXISTS;
}
/*
* Either found it or there was an error
* -- finished either way
*/
/* Either found it or there was an error: finished either way */
return_ACPI_STATUS(status);
}
/*
* The name was not found. If we are NOT performing the first pass
* The name was not found. If we are NOT performing the first pass
* (name entry) of loading the namespace, search the parent tree (all the
* way to the root if necessary.) We don't want to perform the parent
* search when the namespace is actually being loaded. We want to perform
* search when the namespace is actually being loaded. We want to perform
* the search when namespace references are being resolved (load pass 2)
* and during the execution phase.
*/
@ -354,9 +376,8 @@ acpi_ns_search_and_enter(u32 target_name,
}
}
/*
* In execute mode, just search, never add names. Exit now.
*/
/* In execute mode, just search, never add names. Exit now */
if (interpreter_mode == ACPI_IMODE_EXECUTE) {
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"%4.4s Not found in %p [Not adding]\n",
@ -371,11 +392,18 @@ acpi_ns_search_and_enter(u32 target_name,
if (!new_node) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
#ifdef ACPI_ASL_COMPILER
/*
* Node is an object defined by an External() statement
*/
if (flags & ACPI_NS_EXTERNAL) {
new_node->flags |= ANOBJ_IS_EXTERNAL;
}
#endif
/* Install the new object into the parent's list of children */
acpi_ns_install_node(walk_state, node, new_node, type);
*return_node = new_node;
return_ACPI_STATUS(AE_OK);
}

View File

@ -78,15 +78,17 @@ acpi_ns_report_error(char *module_name,
char *internal_name, acpi_status lookup_status)
{
acpi_status status;
u32 bad_name;
char *name = NULL;
acpi_ut_report_error(module_name, line_number);
acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
if (lookup_status == AE_BAD_CHARACTER) {
/* There is a non-ascii character in the name */
acpi_os_printf("[0x%4.4X] (NON-ASCII)",
*(ACPI_CAST_PTR(u32, internal_name)));
ACPI_MOVE_32_TO_32(&bad_name, internal_name);
acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name);
} else {
/* Convert path to external format */
@ -102,7 +104,7 @@ acpi_ns_report_error(char *module_name,
}
if (name) {
ACPI_MEM_FREE(name);
ACPI_FREE(name);
}
}
@ -137,11 +139,12 @@ acpi_ns_report_method_error(char *module_name,
acpi_status status;
struct acpi_namespace_node *node = prefix_node;
acpi_ut_report_error(module_name, line_number);
acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
if (path) {
status = acpi_ns_get_node_by_path(path, prefix_node,
ACPI_NS_NO_UPSEARCH, &node);
status =
acpi_ns_get_node(prefix_node, path, ACPI_NS_NO_UPSEARCH,
&node);
if (ACPI_FAILURE(status)) {
acpi_os_printf("[Could not get node by pathname]");
}
@ -185,7 +188,7 @@ acpi_ns_print_node_pathname(struct acpi_namespace_node *node, char *message)
}
acpi_os_printf("[%s] (Node %p)", (char *)buffer.pointer, node);
ACPI_MEM_FREE(buffer.pointer);
ACPI_FREE(buffer.pointer);
}
}
@ -239,7 +242,7 @@ static u8 acpi_ns_valid_path_separator(char sep)
acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
{
ACPI_FUNCTION_TRACE("ns_get_type");
ACPI_FUNCTION_TRACE(ns_get_type);
if (!node) {
ACPI_WARNING((AE_INFO, "Null Node parameter"));
@ -264,9 +267,10 @@ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
u32 acpi_ns_local(acpi_object_type type)
{
ACPI_FUNCTION_TRACE("ns_local");
ACPI_FUNCTION_TRACE(ns_local);
if (!acpi_ut_valid_object_type(type)) {
/* Type code out of range */
ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
@ -363,7 +367,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
char *result = NULL;
acpi_native_uint i;
ACPI_FUNCTION_TRACE("ns_build_internal_name");
ACPI_FUNCTION_TRACE(ns_build_internal_name);
/* Setup the correct prefixes, counts, and pointers */
@ -411,6 +415,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
for (i = 0; i < ACPI_NAME_SIZE; i++) {
if (acpi_ns_valid_path_separator(*external_name) ||
(*external_name == 0)) {
/* Pad the segment with underscore(s) if segment is short */
result[i] = '_';
@ -473,7 +478,7 @@ acpi_status acpi_ns_internalize_name(char *external_name, char **converted_name)
struct acpi_namestring_info info;
acpi_status status;
ACPI_FUNCTION_TRACE("ns_internalize_name");
ACPI_FUNCTION_TRACE(ns_internalize_name);
if ((!external_name) || (*external_name == 0) || (!converted_name)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@ -486,7 +491,7 @@ acpi_status acpi_ns_internalize_name(char *external_name, char **converted_name)
/* We need a segment to store the internal name */
internal_name = ACPI_MEM_CALLOCATE(info.length);
internal_name = ACPI_ALLOCATE_ZEROED(info.length);
if (!internal_name) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@ -496,7 +501,7 @@ acpi_status acpi_ns_internalize_name(char *external_name, char **converted_name)
info.internal_name = internal_name;
status = acpi_ns_build_internal_name(&info);
if (ACPI_FAILURE(status)) {
ACPI_MEM_FREE(internal_name);
ACPI_FREE(internal_name);
return_ACPI_STATUS(status);
}
@ -533,7 +538,7 @@ acpi_ns_externalize_name(u32 internal_name_length,
acpi_native_uint i = 0;
acpi_native_uint j = 0;
ACPI_FUNCTION_TRACE("ns_externalize_name");
ACPI_FUNCTION_TRACE(ns_externalize_name);
if (!internal_name_length || !internal_name || !converted_name) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@ -628,7 +633,7 @@ acpi_ns_externalize_name(u32 internal_name_length,
/*
* Build converted_name
*/
*converted_name = ACPI_MEM_CALLOCATE(required_length);
*converted_name = ACPI_ALLOCATE_ZEROED(required_length);
if (!(*converted_name)) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@ -681,13 +686,9 @@ struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle)
ACPI_FUNCTION_ENTRY();
/*
* Simple implementation.
* Simple implementation
*/
if (!handle) {
return (NULL);
}
if (handle == ACPI_ROOT_OBJECT) {
if ((!handle) || (handle == ACPI_ROOT_OBJECT)) {
return (acpi_gbl_root_node);
}
@ -697,7 +698,7 @@ struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle)
return (NULL);
}
return ((struct acpi_namespace_node *)handle);
return (ACPI_CAST_PTR(struct acpi_namespace_node, handle));
}
/*******************************************************************************
@ -752,7 +753,7 @@ void acpi_ns_terminate(void)
{
union acpi_operand_object *obj_desc;
ACPI_FUNCTION_TRACE("ns_terminate");
ACPI_FUNCTION_TRACE(ns_terminate);
/*
* 1) Free the entire namespace -- all nodes and objects
@ -792,9 +793,10 @@ void acpi_ns_terminate(void)
u32 acpi_ns_opens_scope(acpi_object_type type)
{
ACPI_FUNCTION_TRACE_STR("ns_opens_scope", acpi_ut_get_type_name(type));
ACPI_FUNCTION_TRACE_STR(ns_opens_scope, acpi_ut_get_type_name(type));
if (!acpi_ut_valid_object_type(type)) {
/* type code out of range */
ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
@ -806,12 +808,12 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_node_by_path
* FUNCTION: acpi_ns_get_node
*
* PARAMETERS: *Pathname - Name to be found, in external (ASL) format. The
* \ (backslash) and ^ (carat) prefixes, and the
* . (period) to separate segments are supported.
* start_node - Root of subtree to be searched, or NS_ALL for the
* prefix_node - Root of subtree to be searched, or NS_ALL for the
* root of the name space. If Name is fully
* qualified (first s8 is '\'), the passed value
* of Scope will not be accessed.
@ -827,23 +829,29 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
******************************************************************************/
acpi_status
acpi_ns_get_node_by_path(char *pathname,
struct acpi_namespace_node *start_node,
u32 flags, struct acpi_namespace_node **return_node)
acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
char *pathname,
u32 flags, struct acpi_namespace_node **return_node)
{
union acpi_generic_state scope_info;
acpi_status status;
char *internal_path = NULL;
char *internal_path;
ACPI_FUNCTION_TRACE_PTR("ns_get_node_by_path", pathname);
ACPI_FUNCTION_TRACE_PTR(ns_get_node, pathname);
if (pathname) {
/* Convert path to internal representation */
status = acpi_ns_internalize_name(pathname, &internal_path);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
if (!pathname) {
*return_node = prefix_node;
if (!prefix_node) {
*return_node = acpi_gbl_root_node;
}
return_ACPI_STATUS(AE_OK);
}
/* Convert path to internal representation */
status = acpi_ns_internalize_name(pathname, &internal_path);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Must lock namespace during lookup */
@ -855,26 +863,23 @@ acpi_ns_get_node_by_path(char *pathname,
/* Setup lookup scope (search starting point) */
scope_info.scope.node = start_node;
scope_info.scope.node = prefix_node;
/* Lookup the name in the namespace */
status = acpi_ns_lookup(&scope_info, internal_path,
ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
(flags | ACPI_NS_DONT_OPEN_SCOPE),
NULL, return_node);
status = acpi_ns_lookup(&scope_info, internal_path, ACPI_TYPE_ANY,
ACPI_IMODE_EXECUTE,
(flags | ACPI_NS_DONT_OPEN_SCOPE), NULL,
return_node);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s, %s\n",
internal_path,
acpi_format_exception(status)));
pathname, acpi_format_exception(status)));
}
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
cleanup:
if (internal_path) {
ACPI_MEM_FREE(internal_path);
}
ACPI_FREE(internal_path);
return_ACPI_STATUS(status);
}
@ -960,9 +965,10 @@ acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node * child_node)
{
struct acpi_namespace_node *parent_node;
ACPI_FUNCTION_TRACE("ns_find_parent_name");
ACPI_FUNCTION_TRACE(ns_find_parent_name);
if (child_node) {
/* Valid entry. Get the parent Node */
parent_node = acpi_ns_get_parent_node(child_node);

View File

@ -76,6 +76,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
ACPI_FUNCTION_ENTRY();
if (!child_node) {
/* It's really the parent's _scope_ that we want */
if (parent_node->child) {
@ -92,6 +93,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
/* If any type is OK, we are done */
if (type == ACPI_TYPE_ANY) {
/* next_node is NULL if we are at the end-of-list */
return (next_node);
@ -100,6 +102,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
/* Must search for the node -- but within this scope only */
while (next_node) {
/* If type matches, we are done */
if (next_node->type == type) {
@ -161,7 +164,7 @@ acpi_ns_walk_namespace(acpi_object_type type,
acpi_object_type child_type;
u32 level;
ACPI_FUNCTION_TRACE("ns_walk_namespace");
ACPI_FUNCTION_TRACE(ns_walk_namespace);
/* Special case for the namespace Root Node */
@ -182,6 +185,7 @@ acpi_ns_walk_namespace(acpi_object_type type,
* bubbled up to (and passed) the original parent handle (start_entry)
*/
while (level > 0) {
/* Get the next node in this scope. Null if not found */
status = AE_OK;

View File

@ -42,8 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <linux/module.h>
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
#include <acpi/acinterp.h>
@ -51,6 +49,7 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsxfeval")
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
* FUNCTION: acpi_evaluate_object_typed
@ -71,18 +70,17 @@ ACPI_MODULE_NAME("nsxfeval")
* be valid (non-null)
*
******************************************************************************/
#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_evaluate_object_typed(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *external_params,
struct acpi_buffer *return_buffer,
struct acpi_object_list * external_params,
struct acpi_buffer * return_buffer,
acpi_object_type return_type)
{
acpi_status status;
u8 must_free = FALSE;
ACPI_FUNCTION_TRACE("acpi_evaluate_object_typed");
ACPI_FUNCTION_TRACE(acpi_evaluate_object_typed);
/* Return buffer must be valid */
@ -110,6 +108,7 @@ acpi_evaluate_object_typed(acpi_handle handle,
}
if (return_buffer->length == 0) {
/* Error because caller specifically asked for a return value */
ACPI_ERROR((AE_INFO, "No return value"));
@ -131,6 +130,7 @@ acpi_evaluate_object_typed(acpi_handle handle,
acpi_ut_get_type_name(return_type)));
if (must_free) {
/* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */
acpi_os_free(return_buffer->pointer);
@ -140,6 +140,8 @@ acpi_evaluate_object_typed(acpi_handle handle,
return_buffer->length = 0;
return_ACPI_STATUS(AE_TYPE);
}
ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
@ -161,7 +163,6 @@ acpi_evaluate_object_typed(acpi_handle handle,
* be valid (non-null)
*
******************************************************************************/
acpi_status
acpi_evaluate_object(acpi_handle handle,
acpi_string pathname,
@ -170,51 +171,61 @@ acpi_evaluate_object(acpi_handle handle,
{
acpi_status status;
acpi_status status2;
struct acpi_parameter_info info;
struct acpi_evaluate_info *info;
acpi_size buffer_space_needed;
u32 i;
ACPI_FUNCTION_TRACE("acpi_evaluate_object");
ACPI_FUNCTION_TRACE(acpi_evaluate_object);
info.node = handle;
info.parameters = NULL;
info.return_object = NULL;
info.parameter_type = ACPI_PARAM_ARGS;
/* Allocate and initialize the evaluation information block */
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
info->pathname = pathname;
info->parameter_type = ACPI_PARAM_ARGS;
/* Convert and validate the device handle */
info->prefix_node = acpi_ns_map_handle_to_node(handle);
if (!info->prefix_node) {
status = AE_BAD_PARAMETER;
goto cleanup;
}
/*
* If there are parameters to be passed to the object
* (which must be a control method), the external objects
* must be converted to internal objects
* If there are parameters to be passed to a control method, the external
* objects must all be converted to internal objects
*/
if (external_params && external_params->count) {
/*
* Allocate a new parameter block for the internal objects
* Add 1 to count to allow for null terminated internal list
*/
info.parameters = ACPI_MEM_CALLOCATE(((acpi_size)
external_params->count +
1) * sizeof(void *));
if (!info.parameters) {
return_ACPI_STATUS(AE_NO_MEMORY);
info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size)
external_params->
count +
1) * sizeof(void *));
if (!info->parameters) {
status = AE_NO_MEMORY;
goto cleanup;
}
/*
* Convert each external object in the list to an
* internal object
*/
/* Convert each external object in the list to an internal object */
for (i = 0; i < external_params->count; i++) {
status =
acpi_ut_copy_eobject_to_iobject(&external_params->
pointer[i],
&info.
&info->
parameters[i]);
if (ACPI_FAILURE(status)) {
acpi_ut_delete_internal_object_list(info.
parameters);
return_ACPI_STATUS(status);
goto cleanup;
}
}
info.parameters[external_params->count] = NULL;
info->parameters[external_params->count] = NULL;
}
/*
@ -224,43 +235,31 @@ acpi_evaluate_object(acpi_handle handle,
* 3) Valid handle
*/
if ((pathname) && (acpi_ns_valid_root_prefix(pathname[0]))) {
/*
* The path is fully qualified, just evaluate by name
*/
status = acpi_ns_evaluate_by_name(pathname, &info);
/* The path is fully qualified, just evaluate by name */
info->prefix_node = NULL;
status = acpi_ns_evaluate(info);
} else if (!handle) {
/*
* A handle is optional iff a fully qualified pathname
* is specified. Since we've already handled fully
* qualified names above, this is an error
* A handle is optional iff a fully qualified pathname is specified.
* Since we've already handled fully qualified names above, this is
* an error
*/
if (!pathname) {
ACPI_ERROR((AE_INFO,
"Both Handle and Pathname are NULL"));
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Both Handle and Pathname are NULL"));
} else {
ACPI_ERROR((AE_INFO,
"Handle is NULL and Pathname is relative"));
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Null Handle with relative pathname [%s]",
pathname));
}
status = AE_BAD_PARAMETER;
} else {
/*
* We get here if we have a handle -- and if we have a
* pathname it is relative. The handle will be validated
* in the lower procedures
*/
if (!pathname) {
/*
* The null pathname case means the handle is for
* the actual object to be evaluated
*/
status = acpi_ns_evaluate_by_handle(&info);
} else {
/*
* Both a Handle and a relative Pathname
*/
status = acpi_ns_evaluate_relative(pathname, &info);
}
/* We have a namespace a node and a possible relative path */
status = acpi_ns_evaluate(info);
}
/*
@ -268,10 +267,10 @@ acpi_evaluate_object(acpi_handle handle,
* copy the return value to an external object.
*/
if (return_buffer) {
if (!info.return_object) {
if (!info->return_object) {
return_buffer->length = 0;
} else {
if (ACPI_GET_DESCRIPTOR_TYPE(info.return_object) ==
if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) ==
ACPI_DESC_TYPE_NAMED) {
/*
* If we received a NS Node as a return object, this means that
@ -282,19 +281,19 @@ acpi_evaluate_object(acpi_handle handle,
* support for various types at a later date if necessary.
*/
status = AE_TYPE;
info.return_object = NULL; /* No need to delete a NS Node */
info->return_object = NULL; /* No need to delete a NS Node */
return_buffer->length = 0;
}
if (ACPI_SUCCESS(status)) {
/*
* Find out how large a buffer is needed
* to contain the returned object
*/
/* Get the size of the returned object */
status =
acpi_ut_get_object_size(info.return_object,
acpi_ut_get_object_size(info->return_object,
&buffer_space_needed);
if (ACPI_SUCCESS(status)) {
/* Validate/Allocate/Clear caller buffer */
status =
@ -303,7 +302,8 @@ acpi_evaluate_object(acpi_handle handle,
buffer_space_needed);
if (ACPI_FAILURE(status)) {
/*
* Caller's buffer is too small or a new one can't be allocated
* Caller's buffer is too small or a new one can't
* be allocated
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Needed buffer size %X, %s\n",
@ -312,12 +312,11 @@ acpi_evaluate_object(acpi_handle handle,
acpi_format_exception
(status)));
} else {
/*
* We have enough space for the object, build it
*/
/* We have enough space for the object, build it */
status =
acpi_ut_copy_iobject_to_eobject
(info.return_object,
(info->return_object,
return_buffer);
}
}
@ -325,35 +324,37 @@ acpi_evaluate_object(acpi_handle handle,
}
}
if (info.return_object) {
if (info->return_object) {
/*
* Delete the internal return object. NOTE: Interpreter
* must be locked to avoid race condition.
* Delete the internal return object. NOTE: Interpreter must be
* locked to avoid race condition.
*/
status2 = acpi_ex_enter_interpreter();
if (ACPI_SUCCESS(status2)) {
/*
* Delete the internal return object. (Or at least
* decrement the reference count by one)
*/
acpi_ut_remove_reference(info.return_object);
/* Remove one reference on the return object (should delete it) */
acpi_ut_remove_reference(info->return_object);
acpi_ex_exit_interpreter();
}
}
/*
* Free the input parameter list (if we created one),
*/
if (info.parameters) {
cleanup:
/* Free the input parameter list (if we created one) */
if (info->parameters) {
/* Free the allocated parameter block */
acpi_ut_delete_internal_object_list(info.parameters);
acpi_ut_delete_internal_object_list(info->parameters);
}
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_evaluate_object);
ACPI_EXPORT_SYMBOL(acpi_evaluate_object)
/*******************************************************************************
*
@ -384,7 +385,6 @@ EXPORT_SYMBOL(acpi_evaluate_object);
* function, etc.
*
******************************************************************************/
acpi_status
acpi_walk_namespace(acpi_object_type type,
acpi_handle start_object,
@ -394,7 +394,7 @@ acpi_walk_namespace(acpi_object_type type,
{
acpi_status status;
ACPI_FUNCTION_TRACE("acpi_walk_namespace");
ACPI_FUNCTION_TRACE(acpi_walk_namespace);
/* Parameter validation */
@ -421,7 +421,7 @@ acpi_walk_namespace(acpi_object_type type,
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_walk_namespace);
ACPI_EXPORT_SYMBOL(acpi_walk_namespace)
/*******************************************************************************
*
@ -436,7 +436,6 @@ EXPORT_SYMBOL(acpi_walk_namespace);
* on that.
*
******************************************************************************/
static acpi_status
acpi_ns_get_device_callback(acpi_handle obj_handle,
u32 nesting_level,
@ -473,6 +472,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
}
if (!(flags & ACPI_STA_DEVICE_PRESENT)) {
/* Don't examine children of the device if not present */
return (AE_CTRL_DEPTH);
@ -489,6 +489,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
}
if (ACPI_STRNCMP(hid.value, info->hid, sizeof(hid.value)) != 0) {
/* Get the list of Compatible IDs */
status = acpi_ut_execute_CID(node, &cid);
@ -505,11 +506,11 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
sizeof(struct
acpi_compatible_id)) !=
0) {
ACPI_MEM_FREE(cid);
ACPI_FREE(cid);
return (AE_OK);
}
}
ACPI_MEM_FREE(cid);
ACPI_FREE(cid);
}
}
@ -551,7 +552,7 @@ acpi_get_devices(char *HID,
acpi_status status;
struct acpi_get_devices_info info;
ACPI_FUNCTION_TRACE("acpi_get_devices");
ACPI_FUNCTION_TRACE(acpi_get_devices);
/* Parameter validation */
@ -563,9 +564,9 @@ acpi_get_devices(char *HID,
* We're going to call their callback from OUR callback, so we need
* to know what it is, and their context parameter.
*/
info.hid = HID;
info.context = context;
info.user_function = user_function;
info.hid = HID;
/*
* Lock the namespace around the walk.
@ -578,9 +579,8 @@ acpi_get_devices(char *HID,
return_ACPI_STATUS(status);
}
status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE,
ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
ACPI_NS_WALK_UNLOCK,
status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
acpi_ns_get_device_callback, &info,
return_value);
@ -588,7 +588,7 @@ acpi_get_devices(char *HID,
return_ACPI_STATUS(status);
}
EXPORT_SYMBOL(acpi_get_devices);
ACPI_EXPORT_SYMBOL(acpi_get_devices)
/*******************************************************************************
*
@ -603,7 +603,6 @@ EXPORT_SYMBOL(acpi_get_devices);
* DESCRIPTION: Attach arbitrary data and handler to a namespace node.
*
******************************************************************************/
acpi_status
acpi_attach_data(acpi_handle obj_handle,
acpi_object_handler handler, void *data)
@ -637,6 +636,8 @@ acpi_attach_data(acpi_handle obj_handle,
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_attach_data)
/*******************************************************************************
*
* FUNCTION: acpi_detach_data
@ -649,7 +650,6 @@ acpi_attach_data(acpi_handle obj_handle,
* DESCRIPTION: Remove data that was previously attached to a node.
*
******************************************************************************/
acpi_status
acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
{
@ -682,6 +682,8 @@ acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_detach_data)
/*******************************************************************************
*
* FUNCTION: acpi_get_data
@ -695,7 +697,6 @@ acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
* DESCRIPTION: Retrieve data that was previously attached to a namespace node.
*
******************************************************************************/
acpi_status
acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
{
@ -727,3 +728,5 @@ acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_get_data)

View File

@ -42,8 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <linux/module.h>
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
@ -114,9 +112,8 @@ acpi_get_handle(acpi_handle parent,
/*
* Find the Node and convert to a handle
*/
status =
acpi_ns_get_node_by_path(pathname, prefix_node, ACPI_NS_NO_UPSEARCH,
&node);
status = acpi_ns_get_node(prefix_node, pathname, ACPI_NS_NO_UPSEARCH,
&node);
*ret_handle = NULL;
if (ACPI_SUCCESS(status)) {
@ -126,7 +123,7 @@ acpi_get_handle(acpi_handle parent,
return (status);
}
EXPORT_SYMBOL(acpi_get_handle);
ACPI_EXPORT_SYMBOL(acpi_get_handle)
/******************************************************************************
*
@ -143,7 +140,6 @@ EXPORT_SYMBOL(acpi_get_handle);
* complementary functions.
*
******************************************************************************/
acpi_status
acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
{
@ -162,6 +158,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
}
if (name_type == ACPI_FULL_PATHNAME) {
/* Get the full pathname (From the namespace root) */
status = acpi_ns_handle_to_pathname(handle, buffer);
@ -203,7 +200,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
return (status);
}
EXPORT_SYMBOL(acpi_get_name);
ACPI_EXPORT_SYMBOL(acpi_get_name)
/******************************************************************************
*
@ -219,7 +216,6 @@ EXPORT_SYMBOL(acpi_get_name);
* control methods (Such as in the case of a device.)
*
******************************************************************************/
acpi_status
acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
{
@ -241,7 +237,7 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
return (status);
}
info = ACPI_MEM_CALLOCATE(sizeof(struct acpi_device_info));
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_device_info));
if (!info) {
return (AE_NO_MEMORY);
}
@ -345,11 +341,11 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
}
cleanup:
ACPI_MEM_FREE(info);
ACPI_FREE(info);
if (cid_list) {
ACPI_MEM_FREE(cid_list);
ACPI_FREE(cid_list);
}
return (status);
}
EXPORT_SYMBOL(acpi_get_object_info);
ACPI_EXPORT_SYMBOL(acpi_get_object_info)

View File

@ -42,8 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <linux/module.h>
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
@ -101,7 +99,7 @@ acpi_status acpi_get_type(acpi_handle handle, acpi_object_type * ret_type)
return (status);
}
EXPORT_SYMBOL(acpi_get_type);
ACPI_EXPORT_SYMBOL(acpi_get_type)
/*******************************************************************************
*
@ -116,7 +114,6 @@ EXPORT_SYMBOL(acpi_get_type);
* Handle.
*
******************************************************************************/
acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
{
struct acpi_namespace_node *node;
@ -162,7 +159,7 @@ acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
return (status);
}
EXPORT_SYMBOL(acpi_get_parent);
ACPI_EXPORT_SYMBOL(acpi_get_parent)
/*******************************************************************************
*
@ -181,7 +178,6 @@ EXPORT_SYMBOL(acpi_get_parent);
* Scope is returned.
*
******************************************************************************/
acpi_status
acpi_get_next_object(acpi_object_type type,
acpi_handle parent,
@ -206,6 +202,7 @@ acpi_get_next_object(acpi_object_type type,
/* If null handle, use the parent */
if (!child) {
/* Start search at the beginning of the specified scope */
parent_node = acpi_ns_map_handle_to_node(parent);
@ -242,4 +239,4 @@ acpi_get_next_object(acpi_object_type type,
return (status);
}
EXPORT_SYMBOL(acpi_get_next_object);
ACPI_EXPORT_SYMBOL(acpi_get_next_object)

View File

@ -37,6 +37,7 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/nmi.h>
#include <linux/kthread.h>
#include <acpi/acpi.h>
#include <asm/io.h>
#include <acpi/acpi_bus.h>
@ -600,23 +601,41 @@ static void acpi_os_execute_deferred(void *context)
return_VOID;
}
acpi_status
acpi_os_queue_for_execution(u32 priority,
static int acpi_os_execute_thread(void *context)
{
struct acpi_os_dpc *dpc = (struct acpi_os_dpc *)context;
if (dpc) {
dpc->function(dpc->context);
kfree(dpc);
}
do_exit(0);
}
/*******************************************************************************
*
* FUNCTION: acpi_os_execute
*
* PARAMETERS: Type - Type of the callback
* Function - Function to be executed
* Context - Function parameters
*
* RETURN: Status
*
* DESCRIPTION: Depending on type, either queues function for deferred execution or
* immediately executes function on a separate thread.
*
******************************************************************************/
acpi_status acpi_os_execute(acpi_execute_type type,
acpi_osd_exec_callback function, void *context)
{
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
struct work_struct *task;
ACPI_FUNCTION_TRACE("os_queue_for_execution");
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Scheduling function [%p(%p)] for deferred execution.\n",
function, context));
struct task_struct *p;
if (!function)
return_ACPI_STATUS(AE_BAD_PARAMETER);
return AE_BAD_PARAMETER;
/*
* Allocate/initialize DPC structure. Note that this memory will be
* freed by the callee. The kernel handles the tq_struct list in a
@ -627,30 +646,37 @@ acpi_os_queue_for_execution(u32 priority,
* We can save time and code by allocating the DPC and tq_structs
* from the same memory.
*/
dpc =
kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct),
GFP_ATOMIC);
if (type == OSL_NOTIFY_HANDLER) {
dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_KERNEL);
} else {
dpc = kmalloc(sizeof(struct acpi_os_dpc) +
sizeof(struct work_struct), GFP_ATOMIC);
}
if (!dpc)
return_ACPI_STATUS(AE_NO_MEMORY);
return AE_NO_MEMORY;
dpc->function = function;
dpc->context = context;
task = (void *)(dpc + 1);
INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc);
if (!queue_work(kacpid_wq, task)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Call to queue_work() failed.\n"));
kfree(dpc);
status = AE_ERROR;
if (type == OSL_NOTIFY_HANDLER) {
p = kthread_create(acpi_os_execute_thread, dpc, "kacpid_notify");
if (!IS_ERR(p)) {
wake_up_process(p);
} else {
status = AE_NO_MEMORY;
kfree(dpc);
}
} else {
task = (void *)(dpc + 1);
INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc);
if (!queue_work(kacpid_wq, task)) {
status = AE_ERROR;
kfree(dpc);
}
}
return_ACPI_STATUS(status);
return status;
}
EXPORT_SYMBOL(acpi_os_queue_for_execution);
EXPORT_SYMBOL(acpi_os_execute);
void acpi_os_wait_events_complete(void *context)
{
@ -769,9 +795,6 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
handle, units, timeout));
if (in_atomic())
timeout = 0;
switch (timeout) {
/*
* No Wait:
@ -896,14 +919,6 @@ u8 acpi_os_writable(void *ptr, acpi_size len)
}
#endif
u32 acpi_os_get_thread_id(void)
{
if (!in_atomic())
return current->pid;
return 0;
}
acpi_status acpi_os_signal(u32 function, void *info)
{
switch (function) {
@ -1050,12 +1065,12 @@ void acpi_os_release_lock(acpi_handle handle, acpi_cpu_flags flags)
*
* FUNCTION: acpi_os_create_cache
*
* PARAMETERS: CacheName - Ascii name for the cache
* ObjectSize - Size of each cached object
* MaxDepth - Maximum depth of the cache (in objects)
* ReturnCache - Where the new cache object is returned
* PARAMETERS: name - Ascii name for the cache
* size - Size of each cached object
* depth - Maximum depth of the cache (in objects) <ignored>
* cache - Where the new cache object is returned
*
* RETURN: Status
* RETURN: status
*
* DESCRIPTION: Create a cache object
*
@ -1065,7 +1080,10 @@ acpi_status
acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
{
*cache = kmem_cache_create(name, size, 0, 0, NULL, NULL);
return AE_OK;
if (cache == NULL)
return AE_ERROR;
else
return AE_OK;
}
/*******************************************************************************
@ -1134,16 +1152,63 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
*
* RETURN: Status
*
* DESCRIPTION: Get an object from the specified cache. If cache is empty,
* the object is allocated.
* DESCRIPTION: Return a zero-filled object.
*
******************************************************************************/
void *acpi_os_acquire_object(acpi_cache_t * cache)
{
void *object = kmem_cache_alloc(cache, GFP_KERNEL);
void *object = kmem_cache_zalloc(cache, GFP_KERNEL);
WARN_ON(!object);
return object;
}
/******************************************************************************
*
* FUNCTION: acpi_os_validate_interface
*
* PARAMETERS: interface - Requested interface to be validated
*
* RETURN: AE_OK if interface is supported, AE_SUPPORT otherwise
*
* DESCRIPTION: Match an interface string to the interfaces supported by the
* host. Strings originate from an AML call to the _OSI method.
*
*****************************************************************************/
acpi_status
acpi_os_validate_interface (char *interface)
{
return AE_SUPPORT;
}
/******************************************************************************
*
* FUNCTION: acpi_os_validate_address
*
* PARAMETERS: space_id - ACPI space ID
* address - Physical address
* length - Address length
*
* RETURN: AE_OK if address/length is valid for the space_id. Otherwise,
* should return AE_AML_ILLEGAL_ADDRESS.
*
* DESCRIPTION: Validate a system address via the host OS. Used to validate
* the addresses accessed by AML operation regions.
*
*****************************************************************************/
acpi_status
acpi_os_validate_address (
u8 space_id,
acpi_physical_address address,
acpi_size length)
{
return AE_OK;
}
#endif

View File

@ -79,7 +79,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
acpi_native_uint byte_count;
u8 byte_zero_mask = 0x3F; /* Default [0:5] */
ACPI_FUNCTION_TRACE("ps_get_next_package_length");
ACPI_FUNCTION_TRACE(ps_get_next_package_length);
/*
* Byte 0 bits [6:7] contain the number of additional bytes
@ -128,7 +128,7 @@ u8 *acpi_ps_get_next_package_end(struct acpi_parse_state *parser_state)
u8 *start = parser_state->aml;
u32 package_length;
ACPI_FUNCTION_TRACE("ps_get_next_package_end");
ACPI_FUNCTION_TRACE(ps_get_next_package_end);
/* Function below updates parser_state->Aml */
@ -157,7 +157,7 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
u8 *start = parser_state->aml;
u8 *end = parser_state->aml;
ACPI_FUNCTION_TRACE("ps_get_next_namestring");
ACPI_FUNCTION_TRACE(ps_get_next_namestring);
/* Point past any namestring prefix characters (backslash or carat) */
@ -237,7 +237,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *node;
union acpi_generic_state scope_info;
ACPI_FUNCTION_TRACE("ps_get_next_namepath");
ACPI_FUNCTION_TRACE(ps_get_next_namepath);
path = acpi_ps_get_next_namestring(parser_state);
acpi_ps_init_op(arg, AML_INT_NAMEPATH_OP);
@ -275,6 +275,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
*/
if (ACPI_SUCCESS(status) &&
possible_method_call && (node->type == ACPI_TYPE_METHOD)) {
/* This name is actually a control method invocation */
method_desc = acpi_ns_get_attached_object(node);
@ -319,6 +320,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
* some not_found cases are allowed
*/
if (status == AE_NOT_FOUND) {
/* 1) not_found is ok during load pass 1/2 (allow forward references) */
if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) !=
@ -354,6 +356,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
ACPI_PARSE_EXECUTE) {
/* Report a control method execution error */
status = acpi_ds_method_error(status, walk_state);
@ -388,7 +391,7 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
u16 opcode;
u8 *aml = parser_state->aml;
ACPI_FUNCTION_TRACE_U32("ps_get_next_simple_arg", arg_type);
ACPI_FUNCTION_TRACE_U32(ps_get_next_simple_arg, arg_type);
switch (arg_type) {
case ARGP_BYTEDATA:
@ -453,7 +456,7 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
default:
ACPI_ERROR((AE_INFO, "Invalid arg_type %X", arg_type));
ACPI_ERROR((AE_INFO, "Invalid ArgType %X", arg_type));
return_VOID;
}
@ -484,7 +487,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
u16 opcode;
u32 name;
ACPI_FUNCTION_TRACE("ps_get_next_field");
ACPI_FUNCTION_TRACE(ps_get_next_field);
/* Determine field type */
@ -590,7 +593,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
u32 subop;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_PTR("ps_get_next_arg", parser_state);
ACPI_FUNCTION_TRACE_PTR(ps_get_next_arg, parser_state);
switch (arg_type) {
case ARGP_BYTEDATA:
@ -620,6 +623,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
case ARGP_FIELDLIST:
if (parser_state->aml < parser_state->pkg_end) {
/* Non-empty list */
while (parser_state->aml < parser_state->pkg_end) {
@ -645,6 +649,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
case ARGP_BYTELIST:
if (parser_state->aml < parser_state->pkg_end) {
/* Non-empty list */
arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
@ -673,6 +678,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
if (subop == 0 ||
acpi_ps_is_leading_char(subop) ||
acpi_ps_is_prefix_char(subop)) {
/* null_name or name_string */
arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
@ -703,6 +709,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
case ARGP_OBJLIST:
if (parser_state->aml < parser_state->pkg_end) {
/* Non-empty list of variable arguments, nothing returned */
walk_state->arg_count = ACPI_VAR_ARGS;
@ -711,7 +718,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
default:
ACPI_ERROR((AE_INFO, "Invalid arg_type: %X", arg_type));
ACPI_ERROR((AE_INFO, "Invalid ArgType: %X", arg_type));
status = AE_AML_OPERAND_TYPE;
break;
}

View File

@ -83,7 +83,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
struct acpi_parse_state *parser_state;
u8 *aml_op_start = NULL;
ACPI_FUNCTION_TRACE_PTR("ps_parse_loop", walk_state);
ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state);
if (walk_state->descending_callback == NULL) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@ -95,6 +95,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) {
/* We are restarting a preempted control method */
if (acpi_ps_has_completed_scope(parser_state)) {
@ -128,7 +129,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
}
ACPI_EXCEPTION((AE_INFO, status,
"get_predicate Failed"));
"GetPredicate Failed"));
return_ACPI_STATUS(status);
}
@ -143,6 +144,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"Popped scope, Op=%p\n", op));
} else if (walk_state->prev_op) {
/* We were in the middle of an op */
op = walk_state->prev_op;
@ -156,6 +158,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
while ((parser_state->aml < parser_state->aml_end) || (op)) {
aml_op_start = parser_state->aml;
if (!op) {
/* Get the next opcode from the AML stream */
walk_state->aml_offset =
@ -213,6 +216,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
/* Create Op structure and append to parent's argument list */
if (walk_state->op_info->flags & AML_NAMED) {
/* Allocate a new pre_op if necessary */
if (!pre_op) {
@ -371,7 +375,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
if (walk_state->op_info) {
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"Opcode %4.4X [%s] Op %p Aml %p aml_offset %5.5X\n",
"Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n",
(u32) op->common.aml_opcode,
walk_state->op_info->name, op,
parser_state->aml,
@ -388,6 +392,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
/* Are there any arguments that must be processed? */
if (walk_state->arg_types) {
/* Get arguments */
switch (op->common.aml_opcode) {
@ -742,7 +747,19 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
if (ACPI_FAILURE(status2)) {
return_ACPI_STATUS(status2);
}
status2 =
acpi_ds_result_stack_pop
(walk_state);
if (ACPI_FAILURE(status2)) {
return_ACPI_STATUS(status2);
}
acpi_ut_delete_generic_state
(acpi_ut_pop_generic_state
(&walk_state->control_state));
}
acpi_ps_pop_scope(parser_state, &op,
&walk_state->arg_types,
&walk_state->arg_count);
@ -762,6 +779,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
return_ACPI_STATUS(status2);
}
}
acpi_ps_pop_scope(parser_state, &op,
&walk_state->arg_types,
&walk_state->arg_count);
@ -853,6 +871,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
}
else if (ACPI_FAILURE(status)) {
/* First error is most important */
(void)

View File

@ -725,12 +725,13 @@ static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
{
ACPI_FUNCTION_NAME("ps_get_opcode_info");
ACPI_FUNCTION_NAME(ps_get_opcode_info);
/*
* Detect normal 8-bit opcode or extended 16-bit opcode
*/
if (!(opcode & 0xFF00)) {
/* Simple (8-bit) opcode: 0-255, can't index beyond table */
return (&acpi_gbl_aml_op_info
@ -739,6 +740,7 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
if (((opcode & 0xFF00) == AML_EXTENDED_OPCODE) &&
(((u8) opcode) <= MAX_EXTENDED_OPCODE)) {
/* Valid extended (16-bit) opcode */
return (&acpi_gbl_aml_op_info
@ -779,7 +781,7 @@ char *acpi_ps_get_opcode_name(u16 opcode)
return (op->name);
#else
return ("AE_NOT_CONFIGURED");
return ("OpcodeName unavailable");
#endif
}

View File

@ -106,6 +106,7 @@ u16 acpi_ps_peek_opcode(struct acpi_parse_state * parser_state)
opcode = (u16) ACPI_GET8(aml);
if (opcode == AML_EXTENDED_OP_PREFIX) {
/* Extended opcode, get the second opcode byte */
aml++;
@ -137,7 +138,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
const struct acpi_opcode_info *parent_info;
union acpi_parse_object *replacement_op = NULL;
ACPI_FUNCTION_TRACE_PTR("ps_complete_this_op", op);
ACPI_FUNCTION_TRACE_PTR(ps_complete_this_op, op);
/* Check for null Op, can happen if AML code is corrupt */
@ -158,6 +159,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
if (op->common.parent) {
prev = op->common.parent->common.value.arg;
if (!prev) {
/* Nothing more to do */
goto cleanup;
@ -245,6 +247,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
/* We must unlink this op from the parent tree */
if (prev == op) {
/* This op is the first in the list */
if (replacement_op) {
@ -265,6 +268,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
else
while (prev) {
/* Traverse all siblings in the parent's argument list */
next = prev->common.next;
@ -329,7 +333,7 @@ acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
struct acpi_parse_state *parser_state = &walk_state->parser_state;
acpi_status status = AE_CTRL_PENDING;
ACPI_FUNCTION_TRACE_PTR("ps_next_parse_state", op);
ACPI_FUNCTION_TRACE_PTR(ps_next_parse_state, op);
switch (callback_status) {
case AE_CTRL_TERMINATE:
@ -449,10 +453,10 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
struct acpi_thread_state *prev_walk_list = acpi_gbl_current_walk_list;
struct acpi_walk_state *previous_walk_state;
ACPI_FUNCTION_TRACE("ps_parse_aml");
ACPI_FUNCTION_TRACE(ps_parse_aml);
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"Entered with walk_state=%p Aml=%p size=%X\n",
"Entered with WalkState=%p Aml=%p size=%X\n",
walk_state, walk_state->parser_state.aml,
walk_state->parser_state.aml_size));
@ -460,6 +464,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
thread = acpi_ut_create_thread_state();
if (!thread) {
acpi_ds_delete_walk_state(walk_state);
return_ACPI_STATUS(AE_NO_MEMORY);
}
@ -510,6 +515,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
} else if (status == AE_CTRL_TERMINATE) {
status = AE_OK;
} else if ((status != AE_OK) && (walk_state->method_desc)) {
/* Either the method parse or actual execution failed */
ACPI_ERROR_METHOD("Method parse/execution failed",
@ -550,20 +556,9 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
*/
if (((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
ACPI_PARSE_EXECUTE) || (ACPI_FAILURE(status))) {
if (walk_state->method_desc) {
/* Decrement the thread count on the method parse tree */
if (walk_state->method_desc->method.
thread_count) {
walk_state->method_desc->method.
thread_count--;
} else {
ACPI_ERROR((AE_INFO,
"Invalid zero thread count in method"));
}
}
acpi_ds_terminate_control_method(walk_state);
acpi_ds_terminate_control_method(walk_state->
method_desc,
walk_state);
}
/* Delete this walk state and all linked control states */
@ -572,7 +567,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
previous_walk_state = walk_state;
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"return_value=%p, implicit_value=%p State=%p\n",
"ReturnValue=%p, ImplicitValue=%p State=%p\n",
walk_state->return_desc,
walk_state->implicit_return_obj, walk_state));
@ -633,12 +628,14 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
}
} else {
if (previous_walk_state->return_desc) {
/* Caller doesn't want it, must delete it */
acpi_ut_remove_reference(previous_walk_state->
return_desc);
}
if (previous_walk_state->implicit_return_obj) {
/* Caller doesn't want it, must delete it */
acpi_ut_remove_reference(previous_walk_state->

View File

@ -106,14 +106,14 @@ acpi_ps_init_scope(struct acpi_parse_state * parser_state,
{
union acpi_generic_state *scope;
ACPI_FUNCTION_TRACE_PTR("ps_init_scope", root_op);
ACPI_FUNCTION_TRACE_PTR(ps_init_scope, root_op);
scope = acpi_ut_create_generic_state();
if (!scope) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
scope->common.data_type = ACPI_DESC_TYPE_STATE_RPSCOPE;
scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_RPSCOPE;
scope->parse_scope.op = root_op;
scope->parse_scope.arg_count = ACPI_VAR_ARGS;
scope->parse_scope.arg_end = parser_state->aml_end;
@ -147,14 +147,14 @@ acpi_ps_push_scope(struct acpi_parse_state *parser_state,
{
union acpi_generic_state *scope;
ACPI_FUNCTION_TRACE_PTR("ps_push_scope", op);
ACPI_FUNCTION_TRACE_PTR(ps_push_scope, op);
scope = acpi_ut_create_generic_state();
if (!scope) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
scope->common.data_type = ACPI_DESC_TYPE_STATE_PSCOPE;
scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_PSCOPE;
scope->parse_scope.op = op;
scope->parse_scope.arg_list = remaining_args;
scope->parse_scope.arg_count = arg_count;
@ -165,6 +165,7 @@ acpi_ps_push_scope(struct acpi_parse_state *parser_state,
acpi_ut_push_generic_state(&parser_state->scope, scope);
if (arg_count == ACPI_VAR_ARGS) {
/* Multiple arguments */
scope->parse_scope.arg_end = parser_state->pkg_end;
@ -199,14 +200,14 @@ acpi_ps_pop_scope(struct acpi_parse_state *parser_state,
{
union acpi_generic_state *scope = parser_state->scope;
ACPI_FUNCTION_TRACE("ps_pop_scope");
ACPI_FUNCTION_TRACE(ps_pop_scope);
/* Only pop the scope if there is in fact a next scope */
if (scope->common.next) {
scope = acpi_ut_pop_generic_state(&parser_state->scope);
/* return to parsing previous op */
/* Return to parsing previous op */
*op = scope->parse_scope.op;
*arg_list = scope->parse_scope.arg_list;
@ -217,7 +218,7 @@ acpi_ps_pop_scope(struct acpi_parse_state *parser_state,
acpi_ut_delete_generic_state(scope);
} else {
/* empty parse stack, prepare to fetch next opcode */
/* Empty parse stack, prepare to fetch next opcode */
*op = NULL;
*arg_list = 0;
@ -246,7 +247,7 @@ void acpi_ps_cleanup_scope(struct acpi_parse_state *parser_state)
{
union acpi_generic_state *scope;
ACPI_FUNCTION_TRACE_PTR("ps_cleanup_scope", parser_state);
ACPI_FUNCTION_TRACE_PTR(ps_cleanup_scope, parser_state);
if (!parser_state) {
return_VOID;

View File

@ -77,6 +77,7 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
if (op_info->class == AML_CLASS_UNKNOWN) {
/* Invalid opcode or ASCII character */
return (NULL);
@ -85,6 +86,7 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
/* Check if this opcode requires argument sub-objects */
if (!(op_info->flags & AML_HAS_ARGS)) {
/* Has no linked argument objects */
return (NULL);
@ -130,6 +132,7 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
if (op_info->class == AML_CLASS_UNKNOWN) {
/* Invalid opcode */
ACPI_ERROR((AE_INFO, "Invalid AML Opcode: 0x%2.2X",
@ -140,6 +143,7 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
/* Check if this opcode requires argument sub-objects */
if (!(op_info->flags & AML_HAS_ARGS)) {
/* Has no linked argument objects */
return;
@ -148,6 +152,7 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
/* Append the argument to the linked argument list */
if (op->common.value.arg) {
/* Append to existing argument list */
prev_arg = op->common.value.arg;
@ -222,12 +227,14 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin,
}
if (arg == origin) {
/* Reached parent of origin, end search */
return (NULL);
}
if (parent->common.next) {
/* Found sibling of parent */
return (parent->common.next);
@ -299,5 +306,4 @@ union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op)
return (child);
}
#endif
#endif /* ACPI_FUTURE_USAGE */

View File

@ -89,7 +89,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
{
ACPI_FUNCTION_ENTRY();
op->common.data_type = ACPI_DESC_TYPE_PARSER;
op->common.descriptor_type = ACPI_DESC_TYPE_PARSER;
op->common.aml_opcode = opcode;
ACPI_DISASM_ONLY_MEMBERS(ACPI_STRNCPY(op->common.aml_op_name,
@ -135,6 +135,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
/* Allocate the minimum required size object */
if (flags == ACPI_PARSEOP_GENERIC) {
/* The generic op (default) is by far the most common (16 to 1) */
op = acpi_os_acquire_object(acpi_gbl_ps_node_cache);
@ -171,7 +172,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
void acpi_ps_free_op(union acpi_parse_object *op)
{
ACPI_FUNCTION_NAME("ps_free_op");
ACPI_FUNCTION_NAME(ps_free_op);
if (op->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Free retval op: %p\n",

View File

@ -64,18 +64,21 @@ void acpi_ps_delete_parse_tree(union acpi_parse_object *subtree_root)
union acpi_parse_object *next = NULL;
union acpi_parse_object *parent = NULL;
ACPI_FUNCTION_TRACE_PTR("ps_delete_parse_tree", subtree_root);
ACPI_FUNCTION_TRACE_PTR(ps_delete_parse_tree, subtree_root);
/* Visit all nodes in the subtree */
while (op) {
/* Check if we are not ascending */
if (op != parent) {
/* Look for an argument or child of the current op */
next = acpi_ps_get_arg(op, 0);
if (next) {
/* Still going downward in tree (Op is not completed yet) */
op = next;

View File

@ -50,14 +50,14 @@
ACPI_MODULE_NAME("psxface")
/* Local Prototypes */
static void acpi_ps_start_trace(struct acpi_parameter_info *info);
static void acpi_ps_start_trace(struct acpi_evaluate_info *info);
static void acpi_ps_stop_trace(struct acpi_parameter_info *info);
static void acpi_ps_stop_trace(struct acpi_evaluate_info *info);
static acpi_status acpi_ps_execute_pass(struct acpi_parameter_info *info);
static acpi_status acpi_ps_execute_pass(struct acpi_evaluate_info *info);
static void
acpi_ps_update_parameter_list(struct acpi_parameter_info *info, u16 action);
acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
/*******************************************************************************
*
@ -113,7 +113,7 @@ acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
*
******************************************************************************/
static void acpi_ps_start_trace(struct acpi_parameter_info *info)
static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
{
acpi_status status;
@ -125,7 +125,7 @@ static void acpi_ps_start_trace(struct acpi_parameter_info *info)
}
if ((!acpi_gbl_trace_method_name) ||
(acpi_gbl_trace_method_name != info->node->name.integer)) {
(acpi_gbl_trace_method_name != info->resolved_node->name.integer)) {
goto exit;
}
@ -158,7 +158,7 @@ static void acpi_ps_start_trace(struct acpi_parameter_info *info)
*
******************************************************************************/
static void acpi_ps_stop_trace(struct acpi_parameter_info *info)
static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
{
acpi_status status;
@ -170,7 +170,7 @@ static void acpi_ps_stop_trace(struct acpi_parameter_info *info)
}
if ((!acpi_gbl_trace_method_name) ||
(acpi_gbl_trace_method_name != info->node->name.integer)) {
(acpi_gbl_trace_method_name != info->resolved_node->name.integer)) {
goto exit;
}
@ -212,22 +212,23 @@ static void acpi_ps_stop_trace(struct acpi_parameter_info *info)
*
******************************************************************************/
acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ps_execute_method");
ACPI_FUNCTION_TRACE(ps_execute_method);
/* Validate the Info and method Node */
if (!info || !info->node) {
if (!info || !info->resolved_node) {
return_ACPI_STATUS(AE_NULL_ENTRY);
}
/* Init for new method, wait on concurrency semaphore */
status =
acpi_ds_begin_method_execution(info->node, info->obj_desc, NULL);
acpi_ds_begin_method_execution(info->resolved_node, info->obj_desc,
NULL);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@ -248,7 +249,7 @@ acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
*/
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** Begin Method Parse **** Entry=%p obj=%p\n",
info->node, info->obj_desc));
info->resolved_node, info->obj_desc));
info->pass_number = 1;
status = acpi_ps_execute_pass(info);
@ -261,7 +262,7 @@ acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
*/
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** Begin Method Execution **** Entry=%p obj=%p\n",
info->node, info->obj_desc));
info->resolved_node, info->obj_desc));
info->pass_number = 3;
status = acpi_ps_execute_pass(info);
@ -286,8 +287,7 @@ acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
* a control exception code
*/
if (info->return_object) {
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"Method returned obj_desc=%p\n",
ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Method returned ObjDesc=%p\n",
info->return_object));
ACPI_DUMP_STACK_ENTRY(info->return_object);
@ -301,7 +301,7 @@ acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
*
* FUNCTION: acpi_ps_update_parameter_list
*
* PARAMETERS: Info - See struct acpi_parameter_info
* PARAMETERS: Info - See struct acpi_evaluate_info
* (Used: parameter_type and Parameters)
* Action - Add or Remove reference
*
@ -312,14 +312,16 @@ acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
******************************************************************************/
static void
acpi_ps_update_parameter_list(struct acpi_parameter_info *info, u16 action)
acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action)
{
acpi_native_uint i;
if ((info->parameter_type == ACPI_PARAM_ARGS) && (info->parameters)) {
/* Update reference count for each parameter */
for (i = 0; info->parameters[i]; i++) {
/* Ignore errors, just do them all */
(void)acpi_ut_update_object_reference(info->
@ -333,7 +335,7 @@ acpi_ps_update_parameter_list(struct acpi_parameter_info *info, u16 action)
*
* FUNCTION: acpi_ps_execute_pass
*
* PARAMETERS: Info - See struct acpi_parameter_info
* PARAMETERS: Info - See struct acpi_evaluate_info
* (Used: pass_number, Node, and obj_desc)
*
* RETURN: Status
@ -342,13 +344,13 @@ acpi_ps_update_parameter_list(struct acpi_parameter_info *info, u16 action)
*
******************************************************************************/
static acpi_status acpi_ps_execute_pass(struct acpi_parameter_info *info)
static acpi_status acpi_ps_execute_pass(struct acpi_evaluate_info *info)
{
acpi_status status;
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
ACPI_FUNCTION_TRACE("ps_execute_pass");
ACPI_FUNCTION_TRACE(ps_execute_pass);
/* Create and init a Root Node */
@ -367,7 +369,7 @@ static acpi_status acpi_ps_execute_pass(struct acpi_parameter_info *info)
goto cleanup;
}
status = acpi_ds_init_aml_walk(walk_state, op, info->node,
status = acpi_ds_init_aml_walk(walk_state, op, info->resolved_node,
info->obj_desc->method.aml_start,
info->obj_desc->method.aml_length,
info->pass_number == 1 ? NULL : info,

View File

@ -38,6 +38,7 @@
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/pci.h>
#include <linux/mutex.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@ -91,7 +92,7 @@ static struct {
int count;
struct list_head entries;
} acpi_link;
DECLARE_MUTEX(acpi_link_lock);
DEFINE_MUTEX(acpi_link_lock);
/* --------------------------------------------------------------------------
PCI Link Device Management
@ -641,19 +642,19 @@ acpi_pci_link_allocate_irq(acpi_handle handle,
return_VALUE(-1);
}
down(&acpi_link_lock);
mutex_lock(&acpi_link_lock);
if (acpi_pci_link_allocate(link)) {
up(&acpi_link_lock);
mutex_unlock(&acpi_link_lock);
return_VALUE(-1);
}
if (!link->irq.active) {
up(&acpi_link_lock);
mutex_unlock(&acpi_link_lock);
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Link active IRQ is 0!\n"));
return_VALUE(-1);
}
link->refcnt++;
up(&acpi_link_lock);
mutex_unlock(&acpi_link_lock);
if (triggering)
*triggering = link->irq.triggering;
@ -691,9 +692,9 @@ int acpi_pci_link_free_irq(acpi_handle handle)
return_VALUE(-1);
}
down(&acpi_link_lock);
mutex_lock(&acpi_link_lock);
if (!link->irq.initialized) {
up(&acpi_link_lock);
mutex_unlock(&acpi_link_lock);
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Link isn't initialized\n"));
return_VALUE(-1);
}
@ -716,7 +717,7 @@ int acpi_pci_link_free_irq(acpi_handle handle)
if (link->refcnt == 0) {
acpi_ut_evaluate_object(link->handle, "_DIS", 0, NULL);
}
up(&acpi_link_lock);
mutex_unlock(&acpi_link_lock);
return_VALUE(link->irq.active);
}
@ -747,7 +748,7 @@ static int acpi_pci_link_add(struct acpi_device *device)
strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS);
acpi_driver_data(device) = link;
down(&acpi_link_lock);
mutex_lock(&acpi_link_lock);
result = acpi_pci_link_get_possible(link);
if (result)
goto end;
@ -782,7 +783,7 @@ static int acpi_pci_link_add(struct acpi_device *device)
end:
/* disable all links -- to be activated on use */
acpi_ut_evaluate_object(link->handle, "_DIS", 0, NULL);
up(&acpi_link_lock);
mutex_unlock(&acpi_link_lock);
if (result)
kfree(link);
@ -840,9 +841,9 @@ static int acpi_pci_link_remove(struct acpi_device *device, int type)
link = (struct acpi_pci_link *)acpi_driver_data(device);
down(&acpi_link_lock);
mutex_lock(&acpi_link_lock);
list_del(&link->node);
up(&acpi_link_lock);
mutex_unlock(&acpi_link_lock);
kfree(link);

View File

@ -388,7 +388,7 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
/* Use the acpiid in MADT to map cpus in case of SMP */
#ifndef CONFIG_SMP
#define convert_acpiid_to_cpu(acpi_id) (0xff)
#define convert_acpiid_to_cpu(acpi_id) (-1)
#else
#ifdef CONFIG_IA64
@ -401,7 +401,7 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
#define ARCH_BAD_APICID (0xff)
#endif
static u8 convert_acpiid_to_cpu(u8 acpi_id)
static int convert_acpiid_to_cpu(u8 acpi_id)
{
u16 apic_id;
int i;
@ -427,7 +427,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
acpi_status status = 0;
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
u8 cpu_index;
int cpu_index;
static int cpu0_initialized;
ACPI_FUNCTION_TRACE("acpi_processor_get_info");
@ -473,7 +473,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
cpu_index = convert_acpiid_to_cpu(pr->acpi_id);
/* Handle UP system running SMP kernel, with no LAPIC in MADT */
if (!cpu0_initialized && (cpu_index == 0xff) &&
if (!cpu0_initialized && (cpu_index == -1) &&
(num_online_cpus() == 1)) {
cpu_index = 0;
}
@ -487,7 +487,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
* less than the max # of CPUs. They should be ignored _iff
* they are physically not present.
*/
if (cpu_index >= NR_CPUS) {
if (cpu_index == -1) {
if (ACPI_FAILURE
(acpi_processor_hotadd_init(pr->handle, &pr->id))) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
@ -558,8 +558,8 @@ static int acpi_processor_start(struct acpi_device *device)
*/
if (processor_device_array[pr->id] != NULL &&
processor_device_array[pr->id] != (void *)device) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "BIOS reporting wrong ACPI id"
"for the processor\n"));
printk(KERN_WARNING "BIOS reported wrong ACPI id"
"for the processor\n");
return_VALUE(-ENODEV);
}
processor_device_array[pr->id] = (void *)device;

View File

@ -54,10 +54,10 @@ ACPI_MODULE_NAME("acpi_processor")
#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
static void (*pm_idle_save) (void);
static void (*pm_idle_save) (void) __read_mostly;
module_param(max_cstate, uint, 0644);
static unsigned int nocst = 0;
static unsigned int nocst __read_mostly;
module_param(nocst, uint, 0000);
/*
@ -67,7 +67,7 @@ module_param(nocst, uint, 0000);
* 100 HZ: 0x0000000F: 4 jiffies = 40ms
* reduce history for more aggressive entry into C3
*/
static unsigned int bm_history =
static unsigned int bm_history __read_mostly =
(HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
module_param(bm_history, uint, 0644);
/* --------------------------------------------------------------------------
@ -1081,7 +1081,7 @@ int acpi_processor_power_init(struct acpi_processor *pr,
struct acpi_device *device)
{
acpi_status status = 0;
static int first_run = 0;
static int first_run;
struct proc_dir_entry *entry = NULL;
unsigned int i;

View File

@ -34,6 +34,7 @@
#ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
#endif
@ -48,7 +49,7 @@
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("acpi_processor")
static DECLARE_MUTEX(performance_sem);
static DEFINE_MUTEX(performance_mutex);
/*
* _PPC support is implemented as a CPUfreq policy notifier:
@ -72,7 +73,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
struct acpi_processor *pr;
unsigned int ppc = 0;
down(&performance_sem);
mutex_lock(&performance_mutex);
if (event != CPUFREQ_INCOMPATIBLE)
goto out;
@ -93,7 +94,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
core_frequency * 1000);
out:
up(&performance_sem);
mutex_unlock(&performance_mutex);
return 0;
}
@ -553,6 +554,230 @@ static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
}
#endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
static int acpi_processor_get_psd(struct acpi_processor *pr)
{
int result = 0;
acpi_status status = AE_OK;
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
struct acpi_buffer state = {0, NULL};
union acpi_object *psd = NULL;
struct acpi_psd_package *pdomain;
status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer);
if (ACPI_FAILURE(status)) {
return -ENODEV;
}
psd = (union acpi_object *) buffer.pointer;
if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
result = -EFAULT;
goto end;
}
if (psd->package.count != 1) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
result = -EFAULT;
goto end;
}
pdomain = &(pr->performance->domain_info);
state.length = sizeof(struct acpi_psd_package);
state.pointer = pdomain;
status = acpi_extract_package(&(psd->package.elements[0]),
&format, &state);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
result = -EFAULT;
goto end;
}
if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:num_entries\n"));
result = -EFAULT;
goto end;
}
if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:revision\n"));
result = -EFAULT;
goto end;
}
end:
acpi_os_free(buffer.pointer);
return result;
}
int acpi_processor_preregister_performance(
struct acpi_processor_performance **performance)
{
int count, count_target;
int retval = 0;
unsigned int i, j;
cpumask_t covered_cpus;
struct acpi_processor *pr;
struct acpi_psd_package *pdomain;
struct acpi_processor *match_pr;
struct acpi_psd_package *match_pdomain;
mutex_lock(&performance_mutex);
retval = 0;
/* Call _PSD for all CPUs */
for_each_possible_cpu(i) {
pr = processors[i];
if (!pr) {
/* Look only at processors in ACPI namespace */
continue;
}
if (pr->performance) {
retval = -EBUSY;
continue;
}
if (!performance || !performance[i]) {
retval = -EINVAL;
continue;
}
pr->performance = performance[i];
cpu_set(i, pr->performance->shared_cpu_map);
if (acpi_processor_get_psd(pr)) {
retval = -EINVAL;
continue;
}
}
if (retval)
goto err_ret;
/*
* Now that we have _PSD data from all CPUs, lets setup P-state
* domain info.
*/
for_each_possible_cpu(i) {
pr = processors[i];
if (!pr)
continue;
/* Basic validity check for domain info */
pdomain = &(pr->performance->domain_info);
if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
(pdomain->num_entries != ACPI_PSD_REV0_ENTRIES)) {
retval = -EINVAL;
goto err_ret;
}
if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
retval = -EINVAL;
goto err_ret;
}
}
cpus_clear(covered_cpus);
for_each_possible_cpu(i) {
pr = processors[i];
if (!pr)
continue;
if (cpu_isset(i, covered_cpus))
continue;
pdomain = &(pr->performance->domain_info);
cpu_set(i, pr->performance->shared_cpu_map);
cpu_set(i, covered_cpus);
if (pdomain->num_processors <= 1)
continue;
/* Validate the Domain info */
count_target = pdomain->num_processors;
count = 1;
if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL ||
pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) {
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
} else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) {
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
}
for_each_possible_cpu(j) {
if (i == j)
continue;
match_pr = processors[j];
if (!match_pr)
continue;
match_pdomain = &(match_pr->performance->domain_info);
if (match_pdomain->domain != pdomain->domain)
continue;
/* Here i and j are in the same domain */
if (match_pdomain->num_processors != count_target) {
retval = -EINVAL;
goto err_ret;
}
if (pdomain->coord_type != match_pdomain->coord_type) {
retval = -EINVAL;
goto err_ret;
}
cpu_set(j, covered_cpus);
cpu_set(j, pr->performance->shared_cpu_map);
count++;
}
for_each_possible_cpu(j) {
if (i == j)
continue;
match_pr = processors[j];
if (!match_pr)
continue;
match_pdomain = &(match_pr->performance->domain_info);
if (match_pdomain->domain != pdomain->domain)
continue;
match_pr->performance->shared_type =
pr->performance->shared_type;
match_pr->performance->shared_cpu_map =
pr->performance->shared_cpu_map;
}
}
err_ret:
if (retval) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error while parsing _PSD domain information. Assuming no coordination\n"));
}
for_each_possible_cpu(i) {
pr = processors[i];
if (!pr || !pr->performance)
continue;
/* Assume no coordination on any error parsing domain info */
if (retval) {
cpus_clear(pr->performance->shared_cpu_map);
cpu_set(i, pr->performance->shared_cpu_map);
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
}
pr->performance = NULL; /* Will be set for real in register */
}
mutex_unlock(&performance_mutex);
return retval;
}
EXPORT_SYMBOL(acpi_processor_preregister_performance);
int
acpi_processor_register_performance(struct acpi_processor_performance
*performance, unsigned int cpu)
@ -564,16 +789,16 @@ acpi_processor_register_performance(struct acpi_processor_performance
if (!(acpi_processor_ppc_status & PPC_REGISTERED))
return_VALUE(-EINVAL);
down(&performance_sem);
mutex_lock(&performance_mutex);
pr = processors[cpu];
if (!pr) {
up(&performance_sem);
mutex_unlock(&performance_mutex);
return_VALUE(-ENODEV);
}
if (pr->performance) {
up(&performance_sem);
mutex_unlock(&performance_mutex);
return_VALUE(-EBUSY);
}
@ -583,13 +808,13 @@ acpi_processor_register_performance(struct acpi_processor_performance
if (acpi_processor_get_performance_info(pr)) {
pr->performance = NULL;
up(&performance_sem);
mutex_unlock(&performance_mutex);
return_VALUE(-EIO);
}
acpi_cpufreq_add_file(pr);
up(&performance_sem);
mutex_unlock(&performance_mutex);
return_VALUE(0);
}
@ -603,11 +828,11 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
ACPI_FUNCTION_TRACE("acpi_processor_unregister_performance");
down(&performance_sem);
mutex_lock(&performance_mutex);
pr = processors[cpu];
if (!pr) {
up(&performance_sem);
mutex_unlock(&performance_mutex);
return_VOID;
}
@ -617,7 +842,7 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
acpi_cpufreq_remove_file(pr);
up(&performance_sem);
mutex_unlock(&performance_mutex);
return_VOID;
}

Some files were not shown because too many files have changed in this diff Show More