Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6
This commit is contained in:
@@ -3,6 +3,6 @@ obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o
|
||||
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
|
||||
|
||||
ifneq ($(CONFIG_ACPI_PROCESSOR),)
|
||||
obj-y += cstate.o
|
||||
obj-y += cstate.o processor.o
|
||||
endif
|
||||
|
||||
|
@@ -464,7 +464,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
|
||||
* success: return IRQ number (>=0)
|
||||
* failure: return < 0
|
||||
*/
|
||||
int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
|
||||
int acpi_register_gsi(u32 gsi, int triggering, int polarity)
|
||||
{
|
||||
unsigned int irq;
|
||||
unsigned int plat_gsi = gsi;
|
||||
@@ -476,14 +476,14 @@ int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
|
||||
if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
|
||||
extern void eisa_set_level_irq(unsigned int irq);
|
||||
|
||||
if (edge_level == ACPI_LEVEL_SENSITIVE)
|
||||
if (triggering == ACPI_LEVEL_SENSITIVE)
|
||||
eisa_set_level_irq(gsi);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
|
||||
plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low);
|
||||
plat_gsi = mp_register_gsi(gsi, triggering, polarity);
|
||||
}
|
||||
#endif
|
||||
acpi_gsi_to_irq(plat_gsi, &irq);
|
||||
|
@@ -14,64 +14,6 @@
|
||||
#include <acpi/processor.h>
|
||||
#include <asm/acpi.h>
|
||||
|
||||
static void acpi_processor_power_init_intel_pdc(struct acpi_processor_power
|
||||
*pow)
|
||||
{
|
||||
struct acpi_object_list *obj_list;
|
||||
union acpi_object *obj;
|
||||
u32 *buf;
|
||||
|
||||
/* allocate and initialize pdc. It will be used later. */
|
||||
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
|
||||
if (!obj_list) {
|
||||
printk(KERN_ERR "Memory allocation error\n");
|
||||
return;
|
||||
}
|
||||
|
||||
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
|
||||
if (!obj) {
|
||||
printk(KERN_ERR "Memory allocation error\n");
|
||||
kfree(obj_list);
|
||||
return;
|
||||
}
|
||||
|
||||
buf = kmalloc(12, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
printk(KERN_ERR "Memory allocation error\n");
|
||||
kfree(obj);
|
||||
kfree(obj_list);
|
||||
return;
|
||||
}
|
||||
|
||||
buf[0] = ACPI_PDC_REVISION_ID;
|
||||
buf[1] = 1;
|
||||
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
|
||||
|
||||
obj->type = ACPI_TYPE_BUFFER;
|
||||
obj->buffer.length = 12;
|
||||
obj->buffer.pointer = (u8 *) buf;
|
||||
obj_list->count = 1;
|
||||
obj_list->pointer = obj;
|
||||
pow->pdc = obj_list;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Initialize _PDC data based on the CPU vendor */
|
||||
void acpi_processor_power_init_pdc(struct acpi_processor_power *pow,
|
||||
unsigned int cpu)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data + cpu;
|
||||
|
||||
pow->pdc = NULL;
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL)
|
||||
acpi_processor_power_init_intel_pdc(pow);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(acpi_processor_power_init_pdc);
|
||||
|
||||
/*
|
||||
* Initialize bm_flags based on the CPU cache properties
|
||||
* On SMP it depends on cache configuration
|
||||
|
75
arch/i386/kernel/acpi/processor.c
Normal file
75
arch/i386/kernel/acpi/processor.c
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
* arch/i386/kernel/acpi/processor.c
|
||||
*
|
||||
* Copyright (C) 2005 Intel Corporation
|
||||
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
|
||||
* - Added _PDC for platforms with Intel CPUs
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <acpi/processor.h>
|
||||
#include <asm/acpi.h>
|
||||
|
||||
static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
|
||||
{
|
||||
struct acpi_object_list *obj_list;
|
||||
union acpi_object *obj;
|
||||
u32 *buf;
|
||||
|
||||
/* allocate and initialize pdc. It will be used later. */
|
||||
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
|
||||
if (!obj_list) {
|
||||
printk(KERN_ERR "Memory allocation error\n");
|
||||
return;
|
||||
}
|
||||
|
||||
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
|
||||
if (!obj) {
|
||||
printk(KERN_ERR "Memory allocation error\n");
|
||||
kfree(obj_list);
|
||||
return;
|
||||
}
|
||||
|
||||
buf = kmalloc(12, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
printk(KERN_ERR "Memory allocation error\n");
|
||||
kfree(obj);
|
||||
kfree(obj_list);
|
||||
return;
|
||||
}
|
||||
|
||||
buf[0] = ACPI_PDC_REVISION_ID;
|
||||
buf[1] = 1;
|
||||
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_EST))
|
||||
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
|
||||
|
||||
obj->type = ACPI_TYPE_BUFFER;
|
||||
obj->buffer.length = 12;
|
||||
obj->buffer.pointer = (u8 *) buf;
|
||||
obj_list->count = 1;
|
||||
obj_list->pointer = obj;
|
||||
pr->pdc = obj_list;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Initialize _PDC data based on the CPU vendor */
|
||||
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
|
||||
{
|
||||
unsigned int cpu = pr->id;
|
||||
struct cpuinfo_x86 *c = cpu_data + cpu;
|
||||
|
||||
pr->pdc = NULL;
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL)
|
||||
init_intel_pdc(pr, c);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
|
@@ -295,68 +295,6 @@ acpi_cpufreq_guess_freq (
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* acpi_processor_cpu_init_pdc_est - let BIOS know about the SMP capabilities
|
||||
* of this driver
|
||||
* @perf: processor-specific acpi_io_data struct
|
||||
* @cpu: CPU being initialized
|
||||
*
|
||||
* To avoid issues with legacy OSes, some BIOSes require to be informed of
|
||||
* the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
|
||||
* accordingly, for Enhanced Speedstep. Actual call to _PDC is done in
|
||||
* driver/acpi/processor.c
|
||||
*/
|
||||
static void
|
||||
acpi_processor_cpu_init_pdc_est(
|
||||
struct acpi_processor_performance *perf,
|
||||
unsigned int cpu,
|
||||
struct acpi_object_list *obj_list
|
||||
)
|
||||
{
|
||||
union acpi_object *obj;
|
||||
u32 *buf;
|
||||
struct cpuinfo_x86 *c = cpu_data + cpu;
|
||||
dprintk("acpi_processor_cpu_init_pdc_est\n");
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_EST))
|
||||
return;
|
||||
|
||||
/* Initialize pdc. It will be used later. */
|
||||
if (!obj_list)
|
||||
return;
|
||||
|
||||
if (!(obj_list->count && obj_list->pointer))
|
||||
return;
|
||||
|
||||
obj = obj_list->pointer;
|
||||
if ((obj->buffer.length == 12) && obj->buffer.pointer) {
|
||||
buf = (u32 *)obj->buffer.pointer;
|
||||
buf[0] = ACPI_PDC_REVISION_ID;
|
||||
buf[1] = 1;
|
||||
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
|
||||
perf->pdc = obj_list;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/* CPU specific PDC initialization */
|
||||
static void
|
||||
acpi_processor_cpu_init_pdc(
|
||||
struct acpi_processor_performance *perf,
|
||||
unsigned int cpu,
|
||||
struct acpi_object_list *obj_list
|
||||
)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data + cpu;
|
||||
dprintk("acpi_processor_cpu_init_pdc\n");
|
||||
perf->pdc = NULL;
|
||||
if (cpu_has(c, X86_FEATURE_EST))
|
||||
acpi_processor_cpu_init_pdc_est(perf, cpu, obj_list);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
acpi_cpufreq_cpu_init (
|
||||
struct cpufreq_policy *policy)
|
||||
@@ -367,14 +305,7 @@ acpi_cpufreq_cpu_init (
|
||||
unsigned int result = 0;
|
||||
struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
|
||||
|
||||
union acpi_object arg0 = {ACPI_TYPE_BUFFER};
|
||||
u32 arg0_buf[3];
|
||||
struct acpi_object_list arg_list = {1, &arg0};
|
||||
|
||||
dprintk("acpi_cpufreq_cpu_init\n");
|
||||
/* setup arg_list for _PDC settings */
|
||||
arg0.buffer.length = 12;
|
||||
arg0.buffer.pointer = (u8 *) arg0_buf;
|
||||
|
||||
data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
|
||||
if (!data)
|
||||
@@ -382,9 +313,7 @@ acpi_cpufreq_cpu_init (
|
||||
|
||||
acpi_io_data[cpu] = data;
|
||||
|
||||
acpi_processor_cpu_init_pdc(&data->acpi_data, cpu, &arg_list);
|
||||
result = acpi_processor_register_performance(&data->acpi_data, cpu);
|
||||
data->acpi_data.pdc = NULL;
|
||||
|
||||
if (result)
|
||||
goto err_free;
|
||||
|
@@ -362,22 +362,10 @@ static struct acpi_processor_performance p;
|
||||
*/
|
||||
static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
|
||||
{
|
||||
union acpi_object arg0 = {ACPI_TYPE_BUFFER};
|
||||
u32 arg0_buf[3];
|
||||
struct acpi_object_list arg_list = {1, &arg0};
|
||||
unsigned long cur_freq;
|
||||
int result = 0, i;
|
||||
unsigned int cpu = policy->cpu;
|
||||
|
||||
/* _PDC settings */
|
||||
arg0.buffer.length = 12;
|
||||
arg0.buffer.pointer = (u8 *) arg0_buf;
|
||||
arg0_buf[0] = ACPI_PDC_REVISION_ID;
|
||||
arg0_buf[1] = 1;
|
||||
arg0_buf[2] = ACPI_PDC_EST_CAPABILITY_SMP_MSR;
|
||||
|
||||
p.pdc = &arg_list;
|
||||
|
||||
/* register with ACPI core */
|
||||
if (acpi_processor_register_performance(&p, cpu)) {
|
||||
dprintk(KERN_INFO PFX "obtaining ACPI data failed\n");
|
||||
|
@@ -1080,7 +1080,7 @@ void __init mp_config_acpi_legacy_irqs (void)
|
||||
|
||||
#define MAX_GSI_NUM 4096
|
||||
|
||||
int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
|
||||
int mp_register_gsi (u32 gsi, int triggering, int polarity)
|
||||
{
|
||||
int ioapic = -1;
|
||||
int ioapic_pin = 0;
|
||||
@@ -1129,7 +1129,7 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
|
||||
|
||||
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
|
||||
|
||||
if (edge_level) {
|
||||
if (triggering == ACPI_LEVEL_SENSITIVE) {
|
||||
/*
|
||||
* For PCI devices assign IRQs in order, avoiding gaps
|
||||
* due to unused I/O APIC pins.
|
||||
@@ -1151,8 +1151,8 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
|
||||
}
|
||||
|
||||
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
|
||||
edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
|
||||
active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
|
||||
triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
|
||||
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
|
||||
return gsi;
|
||||
}
|
||||
|
||||
|
@@ -13,6 +13,11 @@ obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
|
||||
obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
|
||||
obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o
|
||||
obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
|
||||
|
||||
ifneq ($(CONFIG_ACPI_PROCESSOR),)
|
||||
obj-y += acpi-processor.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_IA64_PALINFO) += palinfo.o
|
||||
obj-$(CONFIG_IOSAPIC) += iosapic.o
|
||||
obj-$(CONFIG_MODULES) += module.o
|
||||
|
@@ -33,33 +33,33 @@ acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
|
||||
struct acpi_vendor_info *info = (struct acpi_vendor_info *)context;
|
||||
struct acpi_resource_vendor *vendor;
|
||||
struct acpi_vendor_descriptor *descriptor;
|
||||
u32 length;
|
||||
u32 byte_length;
|
||||
|
||||
if (resource->id != ACPI_RSTYPE_VENDOR)
|
||||
if (resource->type != ACPI_RESOURCE_TYPE_VENDOR)
|
||||
return AE_OK;
|
||||
|
||||
vendor = (struct acpi_resource_vendor *)&resource->data;
|
||||
descriptor = (struct acpi_vendor_descriptor *)vendor->reserved;
|
||||
if (vendor->length <= sizeof(*info->descriptor) ||
|
||||
descriptor = (struct acpi_vendor_descriptor *)vendor->byte_data;
|
||||
if (vendor->byte_length <= sizeof(*info->descriptor) ||
|
||||
descriptor->guid_id != info->descriptor->guid_id ||
|
||||
efi_guidcmp(descriptor->guid, info->descriptor->guid))
|
||||
return AE_OK;
|
||||
|
||||
length = vendor->length - sizeof(struct acpi_vendor_descriptor);
|
||||
info->data = acpi_os_allocate(length);
|
||||
byte_length = vendor->byte_length - sizeof(struct acpi_vendor_descriptor);
|
||||
info->data = acpi_os_allocate(byte_length);
|
||||
if (!info->data)
|
||||
return AE_NO_MEMORY;
|
||||
|
||||
memcpy(info->data,
|
||||
vendor->reserved + sizeof(struct acpi_vendor_descriptor),
|
||||
length);
|
||||
info->length = length;
|
||||
vendor->byte_data + sizeof(struct acpi_vendor_descriptor),
|
||||
byte_length);
|
||||
info->length = byte_length;
|
||||
return AE_CTRL_TERMINATE;
|
||||
}
|
||||
|
||||
acpi_status
|
||||
acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
|
||||
u8 ** data, u32 * length)
|
||||
u8 ** data, u32 * byte_length)
|
||||
{
|
||||
struct acpi_vendor_info info;
|
||||
|
||||
@@ -72,7 +72,7 @@ acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
|
||||
return AE_NOT_FOUND;
|
||||
|
||||
*data = info.data;
|
||||
*length = info.length;
|
||||
*byte_length = info.length;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
|
67
arch/ia64/kernel/acpi-processor.c
Normal file
67
arch/ia64/kernel/acpi-processor.c
Normal file
@@ -0,0 +1,67 @@
|
||||
/*
|
||||
* arch/ia64/kernel/cpufreq/processor.c
|
||||
*
|
||||
* Copyright (C) 2005 Intel Corporation
|
||||
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
|
||||
* - Added _PDC for platforms with Intel CPUs
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <acpi/processor.h>
|
||||
#include <asm/acpi.h>
|
||||
|
||||
static void init_intel_pdc(struct acpi_processor *pr)
|
||||
{
|
||||
struct acpi_object_list *obj_list;
|
||||
union acpi_object *obj;
|
||||
u32 *buf;
|
||||
|
||||
/* allocate and initialize pdc. It will be used later. */
|
||||
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
|
||||
if (!obj_list) {
|
||||
printk(KERN_ERR "Memory allocation error\n");
|
||||
return;
|
||||
}
|
||||
|
||||
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
|
||||
if (!obj) {
|
||||
printk(KERN_ERR "Memory allocation error\n");
|
||||
kfree(obj_list);
|
||||
return;
|
||||
}
|
||||
|
||||
buf = kmalloc(12, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
printk(KERN_ERR "Memory allocation error\n");
|
||||
kfree(obj);
|
||||
kfree(obj_list);
|
||||
return;
|
||||
}
|
||||
|
||||
buf[0] = ACPI_PDC_REVISION_ID;
|
||||
buf[1] = 1;
|
||||
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
|
||||
|
||||
obj->type = ACPI_TYPE_BUFFER;
|
||||
obj->buffer.length = 12;
|
||||
obj->buffer.pointer = (u8 *) buf;
|
||||
obj_list->count = 1;
|
||||
obj_list->pointer = obj;
|
||||
pr->pdc = obj_list;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Initialize _PDC data based on the CPU vendor */
|
||||
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
|
||||
{
|
||||
pr->pdc = NULL;
|
||||
init_intel_pdc(pr);
|
||||
return;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
|
@@ -567,16 +567,16 @@ void __init acpi_numa_arch_fixup(void)
|
||||
* success: return IRQ number (>=0)
|
||||
* failure: return < 0
|
||||
*/
|
||||
int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
|
||||
int acpi_register_gsi(u32 gsi, int triggering, int polarity)
|
||||
{
|
||||
if (has_8259 && gsi < 16)
|
||||
return isa_irq_to_vector(gsi);
|
||||
|
||||
return iosapic_register_intr(gsi,
|
||||
(active_high_low ==
|
||||
(polarity ==
|
||||
ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
|
||||
IOSAPIC_POL_LOW,
|
||||
(edge_level ==
|
||||
(triggering ==
|
||||
ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
|
||||
IOSAPIC_LEVEL);
|
||||
}
|
||||
|
@@ -1 +1,2 @@
|
||||
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += acpi-cpufreq.o
|
||||
|
||||
|
@@ -269,48 +269,6 @@ acpi_cpufreq_verify (
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* processor_init_pdc - let BIOS know about the SMP capabilities
|
||||
* of this driver
|
||||
* @perf: processor-specific acpi_io_data struct
|
||||
* @cpu: CPU being initialized
|
||||
*
|
||||
* To avoid issues with legacy OSes, some BIOSes require to be informed of
|
||||
* the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
|
||||
* accordingly. Actual call to _PDC is done in driver/acpi/processor.c
|
||||
*/
|
||||
static void
|
||||
processor_init_pdc (
|
||||
struct acpi_processor_performance *perf,
|
||||
unsigned int cpu,
|
||||
struct acpi_object_list *obj_list
|
||||
)
|
||||
{
|
||||
union acpi_object *obj;
|
||||
u32 *buf;
|
||||
|
||||
dprintk("processor_init_pdc\n");
|
||||
|
||||
perf->pdc = NULL;
|
||||
/* Initialize pdc. It will be used later. */
|
||||
if (!obj_list)
|
||||
return;
|
||||
|
||||
if (!(obj_list->count && obj_list->pointer))
|
||||
return;
|
||||
|
||||
obj = obj_list->pointer;
|
||||
if ((obj->buffer.length == 12) && obj->buffer.pointer) {
|
||||
buf = (u32 *)obj->buffer.pointer;
|
||||
buf[0] = ACPI_PDC_REVISION_ID;
|
||||
buf[1] = 1;
|
||||
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
|
||||
perf->pdc = obj_list;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
acpi_cpufreq_cpu_init (
|
||||
struct cpufreq_policy *policy)
|
||||
@@ -320,14 +278,7 @@ acpi_cpufreq_cpu_init (
|
||||
struct cpufreq_acpi_io *data;
|
||||
unsigned int result = 0;
|
||||
|
||||
union acpi_object arg0 = {ACPI_TYPE_BUFFER};
|
||||
u32 arg0_buf[3];
|
||||
struct acpi_object_list arg_list = {1, &arg0};
|
||||
|
||||
dprintk("acpi_cpufreq_cpu_init\n");
|
||||
/* setup arg_list for _PDC settings */
|
||||
arg0.buffer.length = 12;
|
||||
arg0.buffer.pointer = (u8 *) arg0_buf;
|
||||
|
||||
data = kmalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
|
||||
if (!data)
|
||||
@@ -337,9 +288,7 @@ acpi_cpufreq_cpu_init (
|
||||
|
||||
acpi_io_data[cpu] = data;
|
||||
|
||||
processor_init_pdc(&data->acpi_data, cpu, &arg_list);
|
||||
result = acpi_processor_register_performance(&data->acpi_data, cpu);
|
||||
data->acpi_data.pdc = NULL;
|
||||
|
||||
if (result)
|
||||
goto err_free;
|
||||
|
@@ -193,12 +193,12 @@ add_io_space (struct pci_root_info *info, struct acpi_resource_address64 *addr)
|
||||
goto free_resource;
|
||||
}
|
||||
|
||||
min = addr->min_address_range;
|
||||
min = addr->minimum;
|
||||
max = min + addr->address_length - 1;
|
||||
if (addr->attribute.io.translation_attribute == ACPI_SPARSE_TRANSLATION)
|
||||
if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
|
||||
sparse = 1;
|
||||
|
||||
space_nr = new_space(addr->address_translation_offset, sparse);
|
||||
space_nr = new_space(addr->translation_offset, sparse);
|
||||
if (space_nr == ~0)
|
||||
goto free_name;
|
||||
|
||||
@@ -285,7 +285,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
|
||||
if (addr.resource_type == ACPI_MEMORY_RANGE) {
|
||||
flags = IORESOURCE_MEM;
|
||||
root = &iomem_resource;
|
||||
offset = addr.address_translation_offset;
|
||||
offset = addr.translation_offset;
|
||||
} else if (addr.resource_type == ACPI_IO_RANGE) {
|
||||
flags = IORESOURCE_IO;
|
||||
root = &ioport_resource;
|
||||
@@ -298,7 +298,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
|
||||
window = &info->controller->window[info->controller->windows++];
|
||||
window->resource.name = info->name;
|
||||
window->resource.flags = flags;
|
||||
window->resource.start = addr.min_address_range + offset;
|
||||
window->resource.start = addr.minimum + offset;
|
||||
window->resource.end = window->resource.start + addr.address_length - 1;
|
||||
window->resource.child = NULL;
|
||||
window->offset = offset;
|
||||
|
@@ -1,3 +1,8 @@
|
||||
obj-y := boot.o
|
||||
boot-y := ../../../i386/kernel/acpi/boot.o
|
||||
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
|
||||
|
||||
ifneq ($(CONFIG_ACPI_PROCESSOR),)
|
||||
obj-y += processor.o
|
||||
endif
|
||||
|
||||
|
72
arch/x86_64/kernel/acpi/processor.c
Normal file
72
arch/x86_64/kernel/acpi/processor.c
Normal file
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
* arch/x86_64/kernel/acpi/processor.c
|
||||
*
|
||||
* Copyright (C) 2005 Intel Corporation
|
||||
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
|
||||
* - Added _PDC for platforms with Intel CPUs
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <acpi/processor.h>
|
||||
#include <asm/acpi.h>
|
||||
|
||||
static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
|
||||
{
|
||||
struct acpi_object_list *obj_list;
|
||||
union acpi_object *obj;
|
||||
u32 *buf;
|
||||
|
||||
/* allocate and initialize pdc. It will be used later. */
|
||||
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
|
||||
if (!obj_list) {
|
||||
printk(KERN_ERR "Memory allocation error\n");
|
||||
return;
|
||||
}
|
||||
|
||||
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
|
||||
if (!obj) {
|
||||
printk(KERN_ERR "Memory allocation error\n");
|
||||
kfree(obj_list);
|
||||
return;
|
||||
}
|
||||
|
||||
buf = kmalloc(12, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
printk(KERN_ERR "Memory allocation error\n");
|
||||
kfree(obj);
|
||||
kfree(obj_list);
|
||||
return;
|
||||
}
|
||||
|
||||
buf[0] = ACPI_PDC_REVISION_ID;
|
||||
buf[1] = 1;
|
||||
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
|
||||
|
||||
obj->type = ACPI_TYPE_BUFFER;
|
||||
obj->buffer.length = 12;
|
||||
obj->buffer.pointer = (u8 *) buf;
|
||||
obj_list->count = 1;
|
||||
obj_list->pointer = obj;
|
||||
pr->pdc = obj_list;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Initialize _PDC data based on the CPU vendor */
|
||||
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
|
||||
{
|
||||
unsigned int cpu = pr->id;
|
||||
struct cpuinfo_x86 *c = cpu_data + cpu;
|
||||
|
||||
pr->pdc = NULL;
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL && cpu_has(c, X86_FEATURE_EST))
|
||||
init_intel_pdc(pr, c);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
|
@@ -2027,7 +2027,7 @@ int __init io_apic_get_redir_entries (int ioapic)
|
||||
}
|
||||
|
||||
|
||||
int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
|
||||
int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
|
||||
{
|
||||
struct IO_APIC_route_entry entry;
|
||||
unsigned long flags;
|
||||
@@ -2049,8 +2049,8 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
|
||||
entry.delivery_mode = INT_DELIVERY_MODE;
|
||||
entry.dest_mode = INT_DEST_MODE;
|
||||
entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
|
||||
entry.trigger = edge_level;
|
||||
entry.polarity = active_high_low;
|
||||
entry.trigger = triggering;
|
||||
entry.polarity = polarity;
|
||||
entry.mask = 1; /* Disabled (masked) */
|
||||
|
||||
irq = gsi_irq_sharing(irq);
|
||||
@@ -2065,9 +2065,9 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
|
||||
apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
|
||||
"IRQ %d Mode:%i Active:%i)\n", ioapic,
|
||||
mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
|
||||
edge_level, active_high_low);
|
||||
triggering, polarity);
|
||||
|
||||
ioapic_register_intr(irq, entry.vector, edge_level);
|
||||
ioapic_register_intr(irq, entry.vector, triggering);
|
||||
|
||||
if (!ioapic && (irq < 16))
|
||||
disable_8259A_irq(irq);
|
||||
|
@@ -915,7 +915,7 @@ void __init mp_config_acpi_legacy_irqs (void)
|
||||
|
||||
#define MAX_GSI_NUM 4096
|
||||
|
||||
int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
|
||||
int mp_register_gsi(u32 gsi, int triggering, int polarity)
|
||||
{
|
||||
int ioapic = -1;
|
||||
int ioapic_pin = 0;
|
||||
@@ -964,7 +964,7 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
|
||||
|
||||
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
|
||||
|
||||
if (edge_level) {
|
||||
if (triggering == ACPI_LEVEL_SENSITIVE) {
|
||||
/*
|
||||
* For PCI devices assign IRQs in order, avoiding gaps
|
||||
* due to unused I/O APIC pins.
|
||||
@@ -986,8 +986,8 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
|
||||
}
|
||||
|
||||
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
|
||||
edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
|
||||
active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
|
||||
triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
|
||||
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
|
||||
return gsi;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user